base.cc revision 12276
1/*
2 * Copyright (c) 2011-2012,2016-2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Authors: Steve Reinhardt
44 *          Nathan Binkert
45 *          Rick Strong
46 */
47
48#include "cpu/base.hh"
49
50#include <iostream>
51#include <sstream>
52#include <string>
53
54#include "arch/tlb.hh"
55#include "base/cprintf.hh"
56#include "base/loader/symtab.hh"
57#include "base/misc.hh"
58#include "base/output.hh"
59#include "base/trace.hh"
60#include "cpu/checker/cpu.hh"
61#include "cpu/cpuevent.hh"
62#include "cpu/profile.hh"
63#include "cpu/thread_context.hh"
64#include "debug/Mwait.hh"
65#include "debug/SyscallVerbose.hh"
66#include "mem/page_table.hh"
67#include "params/BaseCPU.hh"
68#include "sim/clocked_object.hh"
69#include "sim/full_system.hh"
70#include "sim/process.hh"
71#include "sim/sim_events.hh"
72#include "sim/sim_exit.hh"
73#include "sim/system.hh"
74
75// Hack
76#include "sim/stat_control.hh"
77
78using namespace std;
79
80vector<BaseCPU *> BaseCPU::cpuList;
81
82// This variable reflects the max number of threads in any CPU.  Be
83// careful to only use it once all the CPUs that you care about have
84// been initialized
85int maxThreadsPerCPU = 1;
86
87CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
88    : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
89      cpu(_cpu), _repeatEvent(true)
90{
91    if (_interval)
92        cpu->schedule(this, curTick() + _interval);
93}
94
95void
96CPUProgressEvent::process()
97{
98    Counter temp = cpu->totalOps();
99
100    if (_repeatEvent)
101      cpu->schedule(this, curTick() + _interval);
102
103    if (cpu->switchedOut()) {
104      return;
105    }
106
107#ifndef NDEBUG
108    double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod());
109
110    DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
111             "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
112             ipc);
113    ipc = 0.0;
114#else
115    cprintf("%lli: %s progress event, total committed:%i, progress insts "
116            "committed: %lli\n", curTick(), cpu->name(), temp,
117            temp - lastNumInst);
118#endif
119    lastNumInst = temp;
120}
121
122const char *
123CPUProgressEvent::description() const
124{
125    return "CPU Progress";
126}
127
128BaseCPU::BaseCPU(Params *p, bool is_checker)
129    : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id),
130      _instMasterId(p->system->getMasterId(name() + ".inst")),
131      _dataMasterId(p->system->getMasterId(name() + ".data")),
132      _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid),
133      _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()),
134      interrupts(p->interrupts), profileEvent(NULL),
135      numThreads(p->numThreads), system(p->system),
136      functionTraceStream(nullptr), currentFunctionStart(0),
137      currentFunctionEnd(0), functionEntryTick(0),
138      addressMonitor(p->numThreads),
139      syscallRetryLatency(p->syscallRetryLatency),
140      pwrGatingLatency(p->pwr_gating_latency),
141      enterPwrGatingEvent([this]{ enterPwrGating(); }, name())
142{
143    // if Python did not provide a valid ID, do it here
144    if (_cpuId == -1 ) {
145        _cpuId = cpuList.size();
146    }
147
148    // add self to global list of CPUs
149    cpuList.push_back(this);
150
151    DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n",
152                _cpuId, _socketId);
153
154    if (numThreads > maxThreadsPerCPU)
155        maxThreadsPerCPU = numThreads;
156
157    // allocate per-thread instruction-based event queues
158    comInstEventQueue = new EventQueue *[numThreads];
159    for (ThreadID tid = 0; tid < numThreads; ++tid)
160        comInstEventQueue[tid] =
161            new EventQueue("instruction-based event queue");
162
163    //
164    // set up instruction-count-based termination events, if any
165    //
166    if (p->max_insts_any_thread != 0) {
167        const char *cause = "a thread reached the max instruction count";
168        for (ThreadID tid = 0; tid < numThreads; ++tid)
169            scheduleInstStop(tid, p->max_insts_any_thread, cause);
170    }
171
172    // Set up instruction-count-based termination events for SimPoints
173    // Typically, there are more than one action points.
174    // Simulation.py is responsible to take the necessary actions upon
175    // exitting the simulation loop.
176    if (!p->simpoint_start_insts.empty()) {
177        const char *cause = "simpoint starting point found";
178        for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i)
179            scheduleInstStop(0, p->simpoint_start_insts[i], cause);
180    }
181
182    if (p->max_insts_all_threads != 0) {
183        const char *cause = "all threads reached the max instruction count";
184
185        // allocate & initialize shared downcounter: each event will
186        // decrement this when triggered; simulation will terminate
187        // when counter reaches 0
188        int *counter = new int;
189        *counter = numThreads;
190        for (ThreadID tid = 0; tid < numThreads; ++tid) {
191            Event *event = new CountedExitEvent(cause, *counter);
192            comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
193        }
194    }
195
196    // allocate per-thread load-based event queues
197    comLoadEventQueue = new EventQueue *[numThreads];
198    for (ThreadID tid = 0; tid < numThreads; ++tid)
199        comLoadEventQueue[tid] = new EventQueue("load-based event queue");
200
201    //
202    // set up instruction-count-based termination events, if any
203    //
204    if (p->max_loads_any_thread != 0) {
205        const char *cause = "a thread reached the max load count";
206        for (ThreadID tid = 0; tid < numThreads; ++tid)
207            scheduleLoadStop(tid, p->max_loads_any_thread, cause);
208    }
209
210    if (p->max_loads_all_threads != 0) {
211        const char *cause = "all threads reached the max load count";
212        // allocate & initialize shared downcounter: each event will
213        // decrement this when triggered; simulation will terminate
214        // when counter reaches 0
215        int *counter = new int;
216        *counter = numThreads;
217        for (ThreadID tid = 0; tid < numThreads; ++tid) {
218            Event *event = new CountedExitEvent(cause, *counter);
219            comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
220        }
221    }
222
223    functionTracingEnabled = false;
224    if (p->function_trace) {
225        const string fname = csprintf("ftrace.%s", name());
226        functionTraceStream = simout.findOrCreate(fname)->stream();
227
228        currentFunctionStart = currentFunctionEnd = 0;
229        functionEntryTick = p->function_trace_start;
230
231        if (p->function_trace_start == 0) {
232            functionTracingEnabled = true;
233        } else {
234            Event *event = new EventFunctionWrapper(
235                [this]{ enableFunctionTrace(); }, name(), true);
236            schedule(event, p->function_trace_start);
237        }
238    }
239
240    // The interrupts should always be present unless this CPU is
241    // switched in later or in case it is a checker CPU
242    if (!params()->switched_out && !is_checker) {
243        fatal_if(interrupts.size() != numThreads,
244                 "CPU %s has %i interrupt controllers, but is expecting one "
245                 "per thread (%i)\n",
246                 name(), interrupts.size(), numThreads);
247        for (ThreadID tid = 0; tid < numThreads; tid++)
248            interrupts[tid]->setCPU(this);
249    }
250
251    if (FullSystem) {
252        if (params()->profile)
253            profileEvent = new EventFunctionWrapper(
254                [this]{ processProfileEvent(); },
255                name());
256    }
257    tracer = params()->tracer;
258
259    if (params()->isa.size() != numThreads) {
260        fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
261              "of threads (%i).\n", params()->isa.size(), numThreads);
262    }
263}
264
265void
266BaseCPU::enableFunctionTrace()
267{
268    functionTracingEnabled = true;
269}
270
271BaseCPU::~BaseCPU()
272{
273    delete profileEvent;
274    delete[] comLoadEventQueue;
275    delete[] comInstEventQueue;
276}
277
278void
279BaseCPU::armMonitor(ThreadID tid, Addr address)
280{
281    assert(tid < numThreads);
282    AddressMonitor &monitor = addressMonitor[tid];
283
284    monitor.armed = true;
285    monitor.vAddr = address;
286    monitor.pAddr = 0x0;
287    DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address);
288}
289
290bool
291BaseCPU::mwait(ThreadID tid, PacketPtr pkt)
292{
293    assert(tid < numThreads);
294    AddressMonitor &monitor = addressMonitor[tid];
295
296    if (!monitor.gotWakeup) {
297        int block_size = cacheLineSize();
298        uint64_t mask = ~((uint64_t)(block_size - 1));
299
300        assert(pkt->req->hasPaddr());
301        monitor.pAddr = pkt->getAddr() & mask;
302        monitor.waiting = true;
303
304        DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, "
305                "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr);
306        return true;
307    } else {
308        monitor.gotWakeup = false;
309        return false;
310    }
311}
312
313void
314BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb)
315{
316    assert(tid < numThreads);
317    AddressMonitor &monitor = addressMonitor[tid];
318
319    Request req;
320    Addr addr = monitor.vAddr;
321    int block_size = cacheLineSize();
322    uint64_t mask = ~((uint64_t)(block_size - 1));
323    int size = block_size;
324
325    //The address of the next line if it crosses a cache line boundary.
326    Addr secondAddr = roundDown(addr + size - 1, block_size);
327
328    if (secondAddr > addr)
329        size = secondAddr - addr;
330
331    req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
332
333    // translate to physical address
334    Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read);
335    assert(fault == NoFault);
336
337    monitor.pAddr = req.getPaddr() & mask;
338    monitor.waiting = true;
339
340    DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
341            tid, monitor.vAddr, monitor.pAddr);
342}
343
344void
345BaseCPU::init()
346{
347    if (!params()->switched_out) {
348        registerThreadContexts();
349
350        verifyMemoryMode();
351    }
352}
353
354void
355BaseCPU::startup()
356{
357    if (FullSystem) {
358        if (!params()->switched_out && profileEvent)
359            schedule(profileEvent, curTick());
360    }
361
362    if (params()->progress_interval) {
363        new CPUProgressEvent(this, params()->progress_interval);
364    }
365
366    if (_switchedOut)
367        ClockedObject::pwrState(Enums::PwrState::OFF);
368
369    // Assumption CPU start to operate instantaneously without any latency
370    if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED)
371        ClockedObject::pwrState(Enums::PwrState::ON);
372
373}
374
375ProbePoints::PMUUPtr
376BaseCPU::pmuProbePoint(const char *name)
377{
378    ProbePoints::PMUUPtr ptr;
379    ptr.reset(new ProbePoints::PMU(getProbeManager(), name));
380
381    return ptr;
382}
383
384void
385BaseCPU::regProbePoints()
386{
387    ppCycles = pmuProbePoint("Cycles");
388
389    ppRetiredInsts = pmuProbePoint("RetiredInsts");
390    ppRetiredLoads = pmuProbePoint("RetiredLoads");
391    ppRetiredStores = pmuProbePoint("RetiredStores");
392    ppRetiredBranches = pmuProbePoint("RetiredBranches");
393}
394
395void
396BaseCPU::probeInstCommit(const StaticInstPtr &inst)
397{
398    if (!inst->isMicroop() || inst->isLastMicroop())
399        ppRetiredInsts->notify(1);
400
401
402    if (inst->isLoad())
403        ppRetiredLoads->notify(1);
404
405    if (inst->isStore())
406        ppRetiredStores->notify(1);
407
408    if (inst->isControl())
409        ppRetiredBranches->notify(1);
410}
411
412void
413BaseCPU::regStats()
414{
415    MemObject::regStats();
416
417    using namespace Stats;
418
419    numCycles
420        .name(name() + ".numCycles")
421        .desc("number of cpu cycles simulated")
422        ;
423
424    numWorkItemsStarted
425        .name(name() + ".numWorkItemsStarted")
426        .desc("number of work items this cpu started")
427        ;
428
429    numWorkItemsCompleted
430        .name(name() + ".numWorkItemsCompleted")
431        .desc("number of work items this cpu completed")
432        ;
433
434    int size = threadContexts.size();
435    if (size > 1) {
436        for (int i = 0; i < size; ++i) {
437            stringstream namestr;
438            ccprintf(namestr, "%s.ctx%d", name(), i);
439            threadContexts[i]->regStats(namestr.str());
440        }
441    } else if (size == 1)
442        threadContexts[0]->regStats(name());
443}
444
445BaseMasterPort &
446BaseCPU::getMasterPort(const string &if_name, PortID idx)
447{
448    // Get the right port based on name. This applies to all the
449    // subclasses of the base CPU and relies on their implementation
450    // of getDataPort and getInstPort. In all cases there methods
451    // return a MasterPort pointer.
452    if (if_name == "dcache_port")
453        return getDataPort();
454    else if (if_name == "icache_port")
455        return getInstPort();
456    else
457        return MemObject::getMasterPort(if_name, idx);
458}
459
460void
461BaseCPU::registerThreadContexts()
462{
463    assert(system->multiThread || numThreads == 1);
464
465    ThreadID size = threadContexts.size();
466    for (ThreadID tid = 0; tid < size; ++tid) {
467        ThreadContext *tc = threadContexts[tid];
468
469        if (system->multiThread) {
470            tc->setContextId(system->registerThreadContext(tc));
471        } else {
472            tc->setContextId(system->registerThreadContext(tc, _cpuId));
473        }
474
475        if (!FullSystem)
476            tc->getProcessPtr()->assignThreadContext(tc->contextId());
477    }
478}
479
480void
481BaseCPU::deschedulePowerGatingEvent()
482{
483    if (enterPwrGatingEvent.scheduled()){
484        deschedule(enterPwrGatingEvent);
485    }
486}
487
488void
489BaseCPU::schedulePowerGatingEvent()
490{
491    for (auto tc : threadContexts) {
492        if (tc->status() == ThreadContext::Active)
493            return;
494    }
495
496    if (ClockedObject::pwrState() == Enums::PwrState::CLK_GATED) {
497        assert(!enterPwrGatingEvent.scheduled());
498        // Schedule a power gating event when clock gated for the specified
499        // amount of time
500        schedule(enterPwrGatingEvent, clockEdge(pwrGatingLatency));
501    }
502}
503
504int
505BaseCPU::findContext(ThreadContext *tc)
506{
507    ThreadID size = threadContexts.size();
508    for (ThreadID tid = 0; tid < size; ++tid) {
509        if (tc == threadContexts[tid])
510            return tid;
511    }
512    return 0;
513}
514
515void
516BaseCPU::activateContext(ThreadID thread_num)
517{
518    // Squash enter power gating event while cpu gets activated
519    if (enterPwrGatingEvent.scheduled())
520        deschedule(enterPwrGatingEvent);
521
522    // For any active thread running, update CPU power state to active (ON)
523    ClockedObject::pwrState(Enums::PwrState::ON);
524}
525
526void
527BaseCPU::suspendContext(ThreadID thread_num)
528{
529    // Check if all threads are suspended
530    for (auto t : threadContexts) {
531        if (t->status() != ThreadContext::Suspended) {
532            return;
533        }
534    }
535
536    // All CPU threads suspended, enter lower power state for the CPU
537    ClockedObject::pwrState(Enums::PwrState::CLK_GATED);
538
539    //Schedule power gating event when clock gated for a configurable cycles
540    schedule(enterPwrGatingEvent, clockEdge(pwrGatingLatency));
541}
542
543void
544BaseCPU::enterPwrGating(void)
545{
546    ClockedObject::pwrState(Enums::PwrState::OFF);
547}
548
549void
550BaseCPU::switchOut()
551{
552    assert(!_switchedOut);
553    _switchedOut = true;
554    if (profileEvent && profileEvent->scheduled())
555        deschedule(profileEvent);
556
557    // Flush all TLBs in the CPU to avoid having stale translations if
558    // it gets switched in later.
559    flushTLBs();
560
561    // Go to the power gating state
562    ClockedObject::pwrState(Enums::PwrState::OFF);
563}
564
565void
566BaseCPU::takeOverFrom(BaseCPU *oldCPU)
567{
568    assert(threadContexts.size() == oldCPU->threadContexts.size());
569    assert(_cpuId == oldCPU->cpuId());
570    assert(_switchedOut);
571    assert(oldCPU != this);
572    _pid = oldCPU->getPid();
573    _taskId = oldCPU->taskId();
574    // Take over the power state of the switchedOut CPU
575    ClockedObject::pwrState(oldCPU->pwrState());
576    _switchedOut = false;
577
578    ThreadID size = threadContexts.size();
579    for (ThreadID i = 0; i < size; ++i) {
580        ThreadContext *newTC = threadContexts[i];
581        ThreadContext *oldTC = oldCPU->threadContexts[i];
582
583        newTC->takeOverFrom(oldTC);
584
585        CpuEvent::replaceThreadContext(oldTC, newTC);
586
587        assert(newTC->contextId() == oldTC->contextId());
588        assert(newTC->threadId() == oldTC->threadId());
589        system->replaceThreadContext(newTC, newTC->contextId());
590
591        /* This code no longer works since the zero register (e.g.,
592         * r31 on Alpha) doesn't necessarily contain zero at this
593         * point.
594           if (DTRACE(Context))
595            ThreadContext::compare(oldTC, newTC);
596        */
597
598        BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort();
599        BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort();
600        BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort();
601        BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort();
602
603        // Move over any table walker ports if they exist
604        if (new_itb_port) {
605            assert(!new_itb_port->isConnected());
606            assert(old_itb_port);
607            assert(old_itb_port->isConnected());
608            BaseSlavePort &slavePort = old_itb_port->getSlavePort();
609            old_itb_port->unbind();
610            new_itb_port->bind(slavePort);
611        }
612        if (new_dtb_port) {
613            assert(!new_dtb_port->isConnected());
614            assert(old_dtb_port);
615            assert(old_dtb_port->isConnected());
616            BaseSlavePort &slavePort = old_dtb_port->getSlavePort();
617            old_dtb_port->unbind();
618            new_dtb_port->bind(slavePort);
619        }
620        newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr());
621        newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr());
622
623        // Checker whether or not we have to transfer CheckerCPU
624        // objects over in the switch
625        CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr();
626        CheckerCPU *newChecker = newTC->getCheckerCpuPtr();
627        if (oldChecker && newChecker) {
628            BaseMasterPort *old_checker_itb_port =
629                oldChecker->getITBPtr()->getMasterPort();
630            BaseMasterPort *old_checker_dtb_port =
631                oldChecker->getDTBPtr()->getMasterPort();
632            BaseMasterPort *new_checker_itb_port =
633                newChecker->getITBPtr()->getMasterPort();
634            BaseMasterPort *new_checker_dtb_port =
635                newChecker->getDTBPtr()->getMasterPort();
636
637            newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr());
638            newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr());
639
640            // Move over any table walker ports if they exist for checker
641            if (new_checker_itb_port) {
642                assert(!new_checker_itb_port->isConnected());
643                assert(old_checker_itb_port);
644                assert(old_checker_itb_port->isConnected());
645                BaseSlavePort &slavePort =
646                    old_checker_itb_port->getSlavePort();
647                old_checker_itb_port->unbind();
648                new_checker_itb_port->bind(slavePort);
649            }
650            if (new_checker_dtb_port) {
651                assert(!new_checker_dtb_port->isConnected());
652                assert(old_checker_dtb_port);
653                assert(old_checker_dtb_port->isConnected());
654                BaseSlavePort &slavePort =
655                    old_checker_dtb_port->getSlavePort();
656                old_checker_dtb_port->unbind();
657                new_checker_dtb_port->bind(slavePort);
658            }
659        }
660    }
661
662    interrupts = oldCPU->interrupts;
663    for (ThreadID tid = 0; tid < numThreads; tid++) {
664        interrupts[tid]->setCPU(this);
665    }
666    oldCPU->interrupts.clear();
667
668    if (FullSystem) {
669        for (ThreadID i = 0; i < size; ++i)
670            threadContexts[i]->profileClear();
671
672        if (profileEvent)
673            schedule(profileEvent, curTick());
674    }
675
676    // All CPUs have an instruction and a data port, and the new CPU's
677    // ports are dangling while the old CPU has its ports connected
678    // already. Unbind the old CPU and then bind the ports of the one
679    // we are switching to.
680    assert(!getInstPort().isConnected());
681    assert(oldCPU->getInstPort().isConnected());
682    BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort();
683    oldCPU->getInstPort().unbind();
684    getInstPort().bind(inst_peer_port);
685
686    assert(!getDataPort().isConnected());
687    assert(oldCPU->getDataPort().isConnected());
688    BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort();
689    oldCPU->getDataPort().unbind();
690    getDataPort().bind(data_peer_port);
691}
692
693void
694BaseCPU::flushTLBs()
695{
696    for (ThreadID i = 0; i < threadContexts.size(); ++i) {
697        ThreadContext &tc(*threadContexts[i]);
698        CheckerCPU *checker(tc.getCheckerCpuPtr());
699
700        tc.getITBPtr()->flushAll();
701        tc.getDTBPtr()->flushAll();
702        if (checker) {
703            checker->getITBPtr()->flushAll();
704            checker->getDTBPtr()->flushAll();
705        }
706    }
707}
708
709void
710BaseCPU::processProfileEvent()
711{
712    ThreadID size = threadContexts.size();
713
714    for (ThreadID i = 0; i < size; ++i)
715        threadContexts[i]->profileSample();
716
717    schedule(profileEvent, curTick() + params()->profile);
718}
719
720void
721BaseCPU::serialize(CheckpointOut &cp) const
722{
723    SERIALIZE_SCALAR(instCnt);
724
725    if (!_switchedOut) {
726        /* Unlike _pid, _taskId is not serialized, as they are dynamically
727         * assigned unique ids that are only meaningful for the duration of
728         * a specific run. We will need to serialize the entire taskMap in
729         * system. */
730        SERIALIZE_SCALAR(_pid);
731
732        // Serialize the threads, this is done by the CPU implementation.
733        for (ThreadID i = 0; i < numThreads; ++i) {
734            ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
735            interrupts[i]->serialize(cp);
736            serializeThread(cp, i);
737        }
738    }
739}
740
741void
742BaseCPU::unserialize(CheckpointIn &cp)
743{
744    UNSERIALIZE_SCALAR(instCnt);
745
746    if (!_switchedOut) {
747        UNSERIALIZE_SCALAR(_pid);
748
749        // Unserialize the threads, this is done by the CPU implementation.
750        for (ThreadID i = 0; i < numThreads; ++i) {
751            ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
752            interrupts[i]->unserialize(cp);
753            unserializeThread(cp, i);
754        }
755    }
756}
757
758void
759BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause)
760{
761    const Tick now(comInstEventQueue[tid]->getCurTick());
762    Event *event(new LocalSimLoopExitEvent(cause, 0));
763
764    comInstEventQueue[tid]->schedule(event, now + insts);
765}
766
767uint64_t
768BaseCPU::getCurrentInstCount(ThreadID tid)
769{
770    return Tick(comInstEventQueue[tid]->getCurTick());
771}
772
773AddressMonitor::AddressMonitor() {
774    armed = false;
775    waiting = false;
776    gotWakeup = false;
777}
778
779bool AddressMonitor::doMonitor(PacketPtr pkt) {
780    assert(pkt->req->hasPaddr());
781    if (armed && waiting) {
782        if (pAddr == pkt->getAddr()) {
783            DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n",
784                    pkt->getAddr());
785            waiting = false;
786            return true;
787        }
788    }
789    return false;
790}
791
792void
793BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause)
794{
795    const Tick now(comLoadEventQueue[tid]->getCurTick());
796    Event *event(new LocalSimLoopExitEvent(cause, 0));
797
798    comLoadEventQueue[tid]->schedule(event, now + loads);
799}
800
801
802void
803BaseCPU::traceFunctionsInternal(Addr pc)
804{
805    if (!debugSymbolTable)
806        return;
807
808    // if pc enters different function, print new function symbol and
809    // update saved range.  Otherwise do nothing.
810    if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
811        string sym_str;
812        bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
813                                                         currentFunctionStart,
814                                                         currentFunctionEnd);
815
816        if (!found) {
817            // no symbol found: use addr as label
818            sym_str = csprintf("0x%x", pc);
819            currentFunctionStart = pc;
820            currentFunctionEnd = pc + 1;
821        }
822
823        ccprintf(*functionTraceStream, " (%d)\n%d: %s",
824                 curTick() - functionEntryTick, curTick(), sym_str);
825        functionEntryTick = curTick();
826    }
827}
828
829bool
830BaseCPU::waitForRemoteGDB() const
831{
832    return params()->wait_for_remote_gdb;
833}
834