base.cc revision 10643
1/*
2 * Copyright (c) 2011-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Authors: Steve Reinhardt
44 *          Nathan Binkert
45 *          Rick Strong
46 */
47
48#include <iostream>
49#include <sstream>
50#include <string>
51
52#include "arch/tlb.hh"
53#include "base/loader/symtab.hh"
54#include "base/cprintf.hh"
55#include "base/misc.hh"
56#include "base/output.hh"
57#include "base/trace.hh"
58#include "cpu/checker/cpu.hh"
59#include "cpu/base.hh"
60#include "cpu/cpuevent.hh"
61#include "cpu/profile.hh"
62#include "cpu/thread_context.hh"
63#include "debug/Mwait.hh"
64#include "debug/SyscallVerbose.hh"
65#include "mem/page_table.hh"
66#include "params/BaseCPU.hh"
67#include "sim/full_system.hh"
68#include "sim/process.hh"
69#include "sim/sim_events.hh"
70#include "sim/sim_exit.hh"
71#include "sim/system.hh"
72
73// Hack
74#include "sim/stat_control.hh"
75
76using namespace std;
77
78vector<BaseCPU *> BaseCPU::cpuList;
79
80// This variable reflects the max number of threads in any CPU.  Be
81// careful to only use it once all the CPUs that you care about have
82// been initialized
83int maxThreadsPerCPU = 1;
84
85CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
86    : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
87      cpu(_cpu), _repeatEvent(true)
88{
89    if (_interval)
90        cpu->schedule(this, curTick() + _interval);
91}
92
93void
94CPUProgressEvent::process()
95{
96    Counter temp = cpu->totalOps();
97#ifndef NDEBUG
98    double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod());
99
100    DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
101             "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
102             ipc);
103    ipc = 0.0;
104#else
105    cprintf("%lli: %s progress event, total committed:%i, progress insts "
106            "committed: %lli\n", curTick(), cpu->name(), temp,
107            temp - lastNumInst);
108#endif
109    lastNumInst = temp;
110
111    if (_repeatEvent)
112        cpu->schedule(this, curTick() + _interval);
113}
114
115const char *
116CPUProgressEvent::description() const
117{
118    return "CPU Progress";
119}
120
121BaseCPU::BaseCPU(Params *p, bool is_checker)
122    : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id),
123      _instMasterId(p->system->getMasterId(name() + ".inst")),
124      _dataMasterId(p->system->getMasterId(name() + ".data")),
125      _taskId(ContextSwitchTaskId::Unknown), _pid(Request::invldPid),
126      _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()),
127      interrupts(p->interrupts), profileEvent(NULL),
128      numThreads(p->numThreads), system(p->system),
129      functionTraceStream(nullptr), currentFunctionStart(0),
130      currentFunctionEnd(0), functionEntryTick(0),
131      addressMonitor()
132{
133    // if Python did not provide a valid ID, do it here
134    if (_cpuId == -1 ) {
135        _cpuId = cpuList.size();
136    }
137
138    // add self to global list of CPUs
139    cpuList.push_back(this);
140
141    DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n",
142                _cpuId, _socketId);
143
144    if (numThreads > maxThreadsPerCPU)
145        maxThreadsPerCPU = numThreads;
146
147    // allocate per-thread instruction-based event queues
148    comInstEventQueue = new EventQueue *[numThreads];
149    for (ThreadID tid = 0; tid < numThreads; ++tid)
150        comInstEventQueue[tid] =
151            new EventQueue("instruction-based event queue");
152
153    //
154    // set up instruction-count-based termination events, if any
155    //
156    if (p->max_insts_any_thread != 0) {
157        const char *cause = "a thread reached the max instruction count";
158        for (ThreadID tid = 0; tid < numThreads; ++tid)
159            scheduleInstStop(tid, p->max_insts_any_thread, cause);
160    }
161
162    // Set up instruction-count-based termination events for SimPoints
163    // Typically, there are more than one action points.
164    // Simulation.py is responsible to take the necessary actions upon
165    // exitting the simulation loop.
166    if (!p->simpoint_start_insts.empty()) {
167        const char *cause = "simpoint starting point found";
168        for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i)
169            scheduleInstStop(0, p->simpoint_start_insts[i], cause);
170    }
171
172    if (p->max_insts_all_threads != 0) {
173        const char *cause = "all threads reached the max instruction count";
174
175        // allocate & initialize shared downcounter: each event will
176        // decrement this when triggered; simulation will terminate
177        // when counter reaches 0
178        int *counter = new int;
179        *counter = numThreads;
180        for (ThreadID tid = 0; tid < numThreads; ++tid) {
181            Event *event = new CountedExitEvent(cause, *counter);
182            comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
183        }
184    }
185
186    // allocate per-thread load-based event queues
187    comLoadEventQueue = new EventQueue *[numThreads];
188    for (ThreadID tid = 0; tid < numThreads; ++tid)
189        comLoadEventQueue[tid] = new EventQueue("load-based event queue");
190
191    //
192    // set up instruction-count-based termination events, if any
193    //
194    if (p->max_loads_any_thread != 0) {
195        const char *cause = "a thread reached the max load count";
196        for (ThreadID tid = 0; tid < numThreads; ++tid)
197            scheduleLoadStop(tid, p->max_loads_any_thread, cause);
198    }
199
200    if (p->max_loads_all_threads != 0) {
201        const char *cause = "all threads reached the max load count";
202        // allocate & initialize shared downcounter: each event will
203        // decrement this when triggered; simulation will terminate
204        // when counter reaches 0
205        int *counter = new int;
206        *counter = numThreads;
207        for (ThreadID tid = 0; tid < numThreads; ++tid) {
208            Event *event = new CountedExitEvent(cause, *counter);
209            comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
210        }
211    }
212
213    functionTracingEnabled = false;
214    if (p->function_trace) {
215        const string fname = csprintf("ftrace.%s", name());
216        functionTraceStream = simout.find(fname);
217        if (!functionTraceStream)
218            functionTraceStream = simout.create(fname);
219
220        currentFunctionStart = currentFunctionEnd = 0;
221        functionEntryTick = p->function_trace_start;
222
223        if (p->function_trace_start == 0) {
224            functionTracingEnabled = true;
225        } else {
226            typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
227            Event *event = new wrap(this, true);
228            schedule(event, p->function_trace_start);
229        }
230    }
231
232    // The interrupts should always be present unless this CPU is
233    // switched in later or in case it is a checker CPU
234    if (!params()->switched_out && !is_checker) {
235        if (interrupts) {
236            interrupts->setCPU(this);
237        } else {
238            fatal("CPU %s has no interrupt controller.\n"
239                  "Ensure createInterruptController() is called.\n", name());
240        }
241    }
242
243    if (FullSystem) {
244        if (params()->profile)
245            profileEvent = new ProfileEvent(this, params()->profile);
246    }
247    tracer = params()->tracer;
248
249    if (params()->isa.size() != numThreads) {
250        fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
251              "of threads (%i).\n", params()->isa.size(), numThreads);
252    }
253}
254
255void
256BaseCPU::enableFunctionTrace()
257{
258    functionTracingEnabled = true;
259}
260
261BaseCPU::~BaseCPU()
262{
263    delete profileEvent;
264    delete[] comLoadEventQueue;
265    delete[] comInstEventQueue;
266}
267
268void
269BaseCPU::armMonitor(Addr address)
270{
271    addressMonitor.armed = true;
272    addressMonitor.vAddr = address;
273    addressMonitor.pAddr = 0x0;
274    DPRINTF(Mwait,"Armed monitor (vAddr=0x%lx)\n", address);
275}
276
277bool
278BaseCPU::mwait(PacketPtr pkt)
279{
280    if(addressMonitor.gotWakeup == false) {
281        int block_size = cacheLineSize();
282        uint64_t mask = ~((uint64_t)(block_size - 1));
283
284        assert(pkt->req->hasPaddr());
285        addressMonitor.pAddr = pkt->getAddr() & mask;
286        addressMonitor.waiting = true;
287
288        DPRINTF(Mwait,"mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
289                addressMonitor.vAddr, addressMonitor.pAddr);
290        return true;
291    } else {
292        addressMonitor.gotWakeup = false;
293        return false;
294    }
295}
296
297void
298BaseCPU::mwaitAtomic(ThreadContext *tc, TheISA::TLB *dtb)
299{
300    Request req;
301    Addr addr = addressMonitor.vAddr;
302    int block_size = cacheLineSize();
303    uint64_t mask = ~((uint64_t)(block_size - 1));
304    int size = block_size;
305
306    //The address of the next line if it crosses a cache line boundary.
307    Addr secondAddr = roundDown(addr + size - 1, block_size);
308
309    if (secondAddr > addr)
310        size = secondAddr - addr;
311
312    req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
313
314    // translate to physical address
315    Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read);
316    assert(fault == NoFault);
317
318    addressMonitor.pAddr = req.getPaddr() & mask;
319    addressMonitor.waiting = true;
320
321    DPRINTF(Mwait,"mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
322            addressMonitor.vAddr, addressMonitor.pAddr);
323}
324
325void
326BaseCPU::init()
327{
328    if (!params()->switched_out) {
329        registerThreadContexts();
330
331        verifyMemoryMode();
332    }
333}
334
335void
336BaseCPU::startup()
337{
338    if (FullSystem) {
339        if (!params()->switched_out && profileEvent)
340            schedule(profileEvent, curTick());
341    }
342
343    if (params()->progress_interval) {
344        new CPUProgressEvent(this, params()->progress_interval);
345    }
346}
347
348ProbePoints::PMUUPtr
349BaseCPU::pmuProbePoint(const char *name)
350{
351    ProbePoints::PMUUPtr ptr;
352    ptr.reset(new ProbePoints::PMU(getProbeManager(), name));
353
354    return ptr;
355}
356
357void
358BaseCPU::regProbePoints()
359{
360    ppCycles = pmuProbePoint("Cycles");
361
362    ppRetiredInsts = pmuProbePoint("RetiredInsts");
363    ppRetiredLoads = pmuProbePoint("RetiredLoads");
364    ppRetiredStores = pmuProbePoint("RetiredStores");
365    ppRetiredBranches = pmuProbePoint("RetiredBranches");
366}
367
368void
369BaseCPU::probeInstCommit(const StaticInstPtr &inst)
370{
371    if (!inst->isMicroop() || inst->isLastMicroop())
372        ppRetiredInsts->notify(1);
373
374
375    if (inst->isLoad())
376        ppRetiredLoads->notify(1);
377
378    if (inst->isStore())
379        ppRetiredStores->notify(1);
380
381    if (inst->isControl())
382        ppRetiredBranches->notify(1);
383}
384
385void
386BaseCPU::regStats()
387{
388    using namespace Stats;
389
390    numCycles
391        .name(name() + ".numCycles")
392        .desc("number of cpu cycles simulated")
393        ;
394
395    numWorkItemsStarted
396        .name(name() + ".numWorkItemsStarted")
397        .desc("number of work items this cpu started")
398        ;
399
400    numWorkItemsCompleted
401        .name(name() + ".numWorkItemsCompleted")
402        .desc("number of work items this cpu completed")
403        ;
404
405    int size = threadContexts.size();
406    if (size > 1) {
407        for (int i = 0; i < size; ++i) {
408            stringstream namestr;
409            ccprintf(namestr, "%s.ctx%d", name(), i);
410            threadContexts[i]->regStats(namestr.str());
411        }
412    } else if (size == 1)
413        threadContexts[0]->regStats(name());
414}
415
416BaseMasterPort &
417BaseCPU::getMasterPort(const string &if_name, PortID idx)
418{
419    // Get the right port based on name. This applies to all the
420    // subclasses of the base CPU and relies on their implementation
421    // of getDataPort and getInstPort. In all cases there methods
422    // return a MasterPort pointer.
423    if (if_name == "dcache_port")
424        return getDataPort();
425    else if (if_name == "icache_port")
426        return getInstPort();
427    else
428        return MemObject::getMasterPort(if_name, idx);
429}
430
431void
432BaseCPU::registerThreadContexts()
433{
434    ThreadID size = threadContexts.size();
435    for (ThreadID tid = 0; tid < size; ++tid) {
436        ThreadContext *tc = threadContexts[tid];
437
438        /** This is so that contextId and cpuId match where there is a
439         * 1cpu:1context relationship.  Otherwise, the order of registration
440         * could affect the assignment and cpu 1 could have context id 3, for
441         * example.  We may even want to do something like this for SMT so that
442         * cpu 0 has the lowest thread contexts and cpu N has the highest, but
443         * I'll just do this for now
444         */
445        if (numThreads == 1)
446            tc->setContextId(system->registerThreadContext(tc, _cpuId));
447        else
448            tc->setContextId(system->registerThreadContext(tc));
449
450        if (!FullSystem)
451            tc->getProcessPtr()->assignThreadContext(tc->contextId());
452    }
453}
454
455
456int
457BaseCPU::findContext(ThreadContext *tc)
458{
459    ThreadID size = threadContexts.size();
460    for (ThreadID tid = 0; tid < size; ++tid) {
461        if (tc == threadContexts[tid])
462            return tid;
463    }
464    return 0;
465}
466
467void
468BaseCPU::switchOut()
469{
470    assert(!_switchedOut);
471    _switchedOut = true;
472    if (profileEvent && profileEvent->scheduled())
473        deschedule(profileEvent);
474
475    // Flush all TLBs in the CPU to avoid having stale translations if
476    // it gets switched in later.
477    flushTLBs();
478}
479
480void
481BaseCPU::takeOverFrom(BaseCPU *oldCPU)
482{
483    assert(threadContexts.size() == oldCPU->threadContexts.size());
484    assert(_cpuId == oldCPU->cpuId());
485    assert(_switchedOut);
486    assert(oldCPU != this);
487    _pid = oldCPU->getPid();
488    _taskId = oldCPU->taskId();
489    _switchedOut = false;
490
491    ThreadID size = threadContexts.size();
492    for (ThreadID i = 0; i < size; ++i) {
493        ThreadContext *newTC = threadContexts[i];
494        ThreadContext *oldTC = oldCPU->threadContexts[i];
495
496        newTC->takeOverFrom(oldTC);
497
498        CpuEvent::replaceThreadContext(oldTC, newTC);
499
500        assert(newTC->contextId() == oldTC->contextId());
501        assert(newTC->threadId() == oldTC->threadId());
502        system->replaceThreadContext(newTC, newTC->contextId());
503
504        /* This code no longer works since the zero register (e.g.,
505         * r31 on Alpha) doesn't necessarily contain zero at this
506         * point.
507           if (DTRACE(Context))
508            ThreadContext::compare(oldTC, newTC);
509        */
510
511        BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort();
512        BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort();
513        BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort();
514        BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort();
515
516        // Move over any table walker ports if they exist
517        if (new_itb_port) {
518            assert(!new_itb_port->isConnected());
519            assert(old_itb_port);
520            assert(old_itb_port->isConnected());
521            BaseSlavePort &slavePort = old_itb_port->getSlavePort();
522            old_itb_port->unbind();
523            new_itb_port->bind(slavePort);
524        }
525        if (new_dtb_port) {
526            assert(!new_dtb_port->isConnected());
527            assert(old_dtb_port);
528            assert(old_dtb_port->isConnected());
529            BaseSlavePort &slavePort = old_dtb_port->getSlavePort();
530            old_dtb_port->unbind();
531            new_dtb_port->bind(slavePort);
532        }
533        newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr());
534        newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr());
535
536        // Checker whether or not we have to transfer CheckerCPU
537        // objects over in the switch
538        CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr();
539        CheckerCPU *newChecker = newTC->getCheckerCpuPtr();
540        if (oldChecker && newChecker) {
541            BaseMasterPort *old_checker_itb_port =
542                oldChecker->getITBPtr()->getMasterPort();
543            BaseMasterPort *old_checker_dtb_port =
544                oldChecker->getDTBPtr()->getMasterPort();
545            BaseMasterPort *new_checker_itb_port =
546                newChecker->getITBPtr()->getMasterPort();
547            BaseMasterPort *new_checker_dtb_port =
548                newChecker->getDTBPtr()->getMasterPort();
549
550            newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr());
551            newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr());
552
553            // Move over any table walker ports if they exist for checker
554            if (new_checker_itb_port) {
555                assert(!new_checker_itb_port->isConnected());
556                assert(old_checker_itb_port);
557                assert(old_checker_itb_port->isConnected());
558                BaseSlavePort &slavePort =
559                    old_checker_itb_port->getSlavePort();
560                old_checker_itb_port->unbind();
561                new_checker_itb_port->bind(slavePort);
562            }
563            if (new_checker_dtb_port) {
564                assert(!new_checker_dtb_port->isConnected());
565                assert(old_checker_dtb_port);
566                assert(old_checker_dtb_port->isConnected());
567                BaseSlavePort &slavePort =
568                    old_checker_dtb_port->getSlavePort();
569                old_checker_dtb_port->unbind();
570                new_checker_dtb_port->bind(slavePort);
571            }
572        }
573    }
574
575    interrupts = oldCPU->interrupts;
576    interrupts->setCPU(this);
577    oldCPU->interrupts = NULL;
578
579    if (FullSystem) {
580        for (ThreadID i = 0; i < size; ++i)
581            threadContexts[i]->profileClear();
582
583        if (profileEvent)
584            schedule(profileEvent, curTick());
585    }
586
587    // All CPUs have an instruction and a data port, and the new CPU's
588    // ports are dangling while the old CPU has its ports connected
589    // already. Unbind the old CPU and then bind the ports of the one
590    // we are switching to.
591    assert(!getInstPort().isConnected());
592    assert(oldCPU->getInstPort().isConnected());
593    BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort();
594    oldCPU->getInstPort().unbind();
595    getInstPort().bind(inst_peer_port);
596
597    assert(!getDataPort().isConnected());
598    assert(oldCPU->getDataPort().isConnected());
599    BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort();
600    oldCPU->getDataPort().unbind();
601    getDataPort().bind(data_peer_port);
602}
603
604void
605BaseCPU::flushTLBs()
606{
607    for (ThreadID i = 0; i < threadContexts.size(); ++i) {
608        ThreadContext &tc(*threadContexts[i]);
609        CheckerCPU *checker(tc.getCheckerCpuPtr());
610
611        tc.getITBPtr()->flushAll();
612        tc.getDTBPtr()->flushAll();
613        if (checker) {
614            checker->getITBPtr()->flushAll();
615            checker->getDTBPtr()->flushAll();
616        }
617    }
618}
619
620
621BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
622    : cpu(_cpu), interval(_interval)
623{ }
624
625void
626BaseCPU::ProfileEvent::process()
627{
628    ThreadID size = cpu->threadContexts.size();
629    for (ThreadID i = 0; i < size; ++i) {
630        ThreadContext *tc = cpu->threadContexts[i];
631        tc->profileSample();
632    }
633
634    cpu->schedule(this, curTick() + interval);
635}
636
637void
638BaseCPU::serialize(std::ostream &os)
639{
640    SERIALIZE_SCALAR(instCnt);
641
642    if (!_switchedOut) {
643        /* Unlike _pid, _taskId is not serialized, as they are dynamically
644         * assigned unique ids that are only meaningful for the duration of
645         * a specific run. We will need to serialize the entire taskMap in
646         * system. */
647        SERIALIZE_SCALAR(_pid);
648
649        interrupts->serialize(os);
650
651        // Serialize the threads, this is done by the CPU implementation.
652        for (ThreadID i = 0; i < numThreads; ++i) {
653            nameOut(os, csprintf("%s.xc.%i", name(), i));
654            serializeThread(os, i);
655        }
656    }
657}
658
659void
660BaseCPU::unserialize(Checkpoint *cp, const std::string &section)
661{
662    UNSERIALIZE_SCALAR(instCnt);
663
664    if (!_switchedOut) {
665        UNSERIALIZE_SCALAR(_pid);
666        interrupts->unserialize(cp, section);
667
668        // Unserialize the threads, this is done by the CPU implementation.
669        for (ThreadID i = 0; i < numThreads; ++i)
670            unserializeThread(cp, csprintf("%s.xc.%i", section, i), i);
671    }
672}
673
674void
675BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause)
676{
677    const Tick now(comInstEventQueue[tid]->getCurTick());
678    Event *event(new LocalSimLoopExitEvent(cause, 0));
679
680    comInstEventQueue[tid]->schedule(event, now + insts);
681}
682
683AddressMonitor::AddressMonitor() {
684    armed = false;
685    waiting = false;
686    gotWakeup = false;
687}
688
689bool AddressMonitor::doMonitor(PacketPtr pkt) {
690    assert(pkt->req->hasPaddr());
691    if(armed && waiting) {
692        if(pAddr == pkt->getAddr()) {
693            DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n",
694                    pkt->getAddr());
695            waiting = false;
696            return true;
697        }
698    }
699    return false;
700}
701
702void
703BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause)
704{
705    const Tick now(comLoadEventQueue[tid]->getCurTick());
706    Event *event(new LocalSimLoopExitEvent(cause, 0));
707
708    comLoadEventQueue[tid]->schedule(event, now + loads);
709}
710
711
712void
713BaseCPU::traceFunctionsInternal(Addr pc)
714{
715    if (!debugSymbolTable)
716        return;
717
718    // if pc enters different function, print new function symbol and
719    // update saved range.  Otherwise do nothing.
720    if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
721        string sym_str;
722        bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
723                                                         currentFunctionStart,
724                                                         currentFunctionEnd);
725
726        if (!found) {
727            // no symbol found: use addr as label
728            sym_str = csprintf("0x%x", pc);
729            currentFunctionStart = pc;
730            currentFunctionEnd = pc + 1;
731        }
732
733        ccprintf(*functionTraceStream, " (%d)\n%d: %s",
734                 curTick() - functionEntryTick, curTick(), sym_str);
735        functionEntryTick = curTick();
736    }
737}
738