base.cc (11325:67cc559d513a) base.cc (11359:b0b976a1ceda)
1/*
2 * Copyright (c) 2011-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Authors: Steve Reinhardt
44 * Nathan Binkert
45 * Rick Strong
46 */
47
48#include <iostream>
49#include <sstream>
50#include <string>
51
52#include "arch/tlb.hh"
53#include "base/loader/symtab.hh"
54#include "base/cprintf.hh"
55#include "base/misc.hh"
56#include "base/output.hh"
57#include "base/trace.hh"
58#include "cpu/checker/cpu.hh"
59#include "cpu/base.hh"
60#include "cpu/cpuevent.hh"
61#include "cpu/profile.hh"
62#include "cpu/thread_context.hh"
63#include "debug/Mwait.hh"
64#include "debug/SyscallVerbose.hh"
65#include "mem/page_table.hh"
66#include "params/BaseCPU.hh"
67#include "sim/full_system.hh"
68#include "sim/process.hh"
69#include "sim/sim_events.hh"
70#include "sim/sim_exit.hh"
71#include "sim/system.hh"
72
73// Hack
74#include "sim/stat_control.hh"
75
76using namespace std;
77
78vector<BaseCPU *> BaseCPU::cpuList;
79
80// This variable reflects the max number of threads in any CPU. Be
81// careful to only use it once all the CPUs that you care about have
82// been initialized
83int maxThreadsPerCPU = 1;
84
85CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
86 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
87 cpu(_cpu), _repeatEvent(true)
88{
89 if (_interval)
90 cpu->schedule(this, curTick() + _interval);
91}
92
93void
94CPUProgressEvent::process()
95{
96 Counter temp = cpu->totalOps();
97
98 if (_repeatEvent)
99 cpu->schedule(this, curTick() + _interval);
100
101 if (cpu->switchedOut()) {
102 return;
103 }
104
105#ifndef NDEBUG
106 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod());
107
108 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
109 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
110 ipc);
111 ipc = 0.0;
112#else
113 cprintf("%lli: %s progress event, total committed:%i, progress insts "
114 "committed: %lli\n", curTick(), cpu->name(), temp,
115 temp - lastNumInst);
116#endif
117 lastNumInst = temp;
118}
119
120const char *
121CPUProgressEvent::description() const
122{
123 return "CPU Progress";
124}
125
126BaseCPU::BaseCPU(Params *p, bool is_checker)
127 : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id),
128 _instMasterId(p->system->getMasterId(name() + ".inst")),
129 _dataMasterId(p->system->getMasterId(name() + ".data")),
130 _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid),
131 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()),
132 interrupts(p->interrupts), profileEvent(NULL),
133 numThreads(p->numThreads), system(p->system),
134 functionTraceStream(nullptr), currentFunctionStart(0),
135 currentFunctionEnd(0), functionEntryTick(0),
136 addressMonitor(p->numThreads)
137{
138 // if Python did not provide a valid ID, do it here
139 if (_cpuId == -1 ) {
140 _cpuId = cpuList.size();
141 }
142
143 // add self to global list of CPUs
144 cpuList.push_back(this);
145
146 DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n",
147 _cpuId, _socketId);
148
149 if (numThreads > maxThreadsPerCPU)
150 maxThreadsPerCPU = numThreads;
151
152 // allocate per-thread instruction-based event queues
153 comInstEventQueue = new EventQueue *[numThreads];
154 for (ThreadID tid = 0; tid < numThreads; ++tid)
155 comInstEventQueue[tid] =
156 new EventQueue("instruction-based event queue");
157
158 //
159 // set up instruction-count-based termination events, if any
160 //
161 if (p->max_insts_any_thread != 0) {
162 const char *cause = "a thread reached the max instruction count";
163 for (ThreadID tid = 0; tid < numThreads; ++tid)
164 scheduleInstStop(tid, p->max_insts_any_thread, cause);
165 }
166
167 // Set up instruction-count-based termination events for SimPoints
168 // Typically, there are more than one action points.
169 // Simulation.py is responsible to take the necessary actions upon
170 // exitting the simulation loop.
171 if (!p->simpoint_start_insts.empty()) {
172 const char *cause = "simpoint starting point found";
173 for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i)
174 scheduleInstStop(0, p->simpoint_start_insts[i], cause);
175 }
176
177 if (p->max_insts_all_threads != 0) {
178 const char *cause = "all threads reached the max instruction count";
179
180 // allocate & initialize shared downcounter: each event will
181 // decrement this when triggered; simulation will terminate
182 // when counter reaches 0
183 int *counter = new int;
184 *counter = numThreads;
185 for (ThreadID tid = 0; tid < numThreads; ++tid) {
186 Event *event = new CountedExitEvent(cause, *counter);
187 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
188 }
189 }
190
191 // allocate per-thread load-based event queues
192 comLoadEventQueue = new EventQueue *[numThreads];
193 for (ThreadID tid = 0; tid < numThreads; ++tid)
194 comLoadEventQueue[tid] = new EventQueue("load-based event queue");
195
196 //
197 // set up instruction-count-based termination events, if any
198 //
199 if (p->max_loads_any_thread != 0) {
200 const char *cause = "a thread reached the max load count";
201 for (ThreadID tid = 0; tid < numThreads; ++tid)
202 scheduleLoadStop(tid, p->max_loads_any_thread, cause);
203 }
204
205 if (p->max_loads_all_threads != 0) {
206 const char *cause = "all threads reached the max load count";
207 // allocate & initialize shared downcounter: each event will
208 // decrement this when triggered; simulation will terminate
209 // when counter reaches 0
210 int *counter = new int;
211 *counter = numThreads;
212 for (ThreadID tid = 0; tid < numThreads; ++tid) {
213 Event *event = new CountedExitEvent(cause, *counter);
214 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
215 }
216 }
217
218 functionTracingEnabled = false;
219 if (p->function_trace) {
220 const string fname = csprintf("ftrace.%s", name());
1/*
2 * Copyright (c) 2011-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Authors: Steve Reinhardt
44 * Nathan Binkert
45 * Rick Strong
46 */
47
48#include <iostream>
49#include <sstream>
50#include <string>
51
52#include "arch/tlb.hh"
53#include "base/loader/symtab.hh"
54#include "base/cprintf.hh"
55#include "base/misc.hh"
56#include "base/output.hh"
57#include "base/trace.hh"
58#include "cpu/checker/cpu.hh"
59#include "cpu/base.hh"
60#include "cpu/cpuevent.hh"
61#include "cpu/profile.hh"
62#include "cpu/thread_context.hh"
63#include "debug/Mwait.hh"
64#include "debug/SyscallVerbose.hh"
65#include "mem/page_table.hh"
66#include "params/BaseCPU.hh"
67#include "sim/full_system.hh"
68#include "sim/process.hh"
69#include "sim/sim_events.hh"
70#include "sim/sim_exit.hh"
71#include "sim/system.hh"
72
73// Hack
74#include "sim/stat_control.hh"
75
76using namespace std;
77
78vector<BaseCPU *> BaseCPU::cpuList;
79
80// This variable reflects the max number of threads in any CPU. Be
81// careful to only use it once all the CPUs that you care about have
82// been initialized
83int maxThreadsPerCPU = 1;
84
85CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
86 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
87 cpu(_cpu), _repeatEvent(true)
88{
89 if (_interval)
90 cpu->schedule(this, curTick() + _interval);
91}
92
93void
94CPUProgressEvent::process()
95{
96 Counter temp = cpu->totalOps();
97
98 if (_repeatEvent)
99 cpu->schedule(this, curTick() + _interval);
100
101 if (cpu->switchedOut()) {
102 return;
103 }
104
105#ifndef NDEBUG
106 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod());
107
108 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
109 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
110 ipc);
111 ipc = 0.0;
112#else
113 cprintf("%lli: %s progress event, total committed:%i, progress insts "
114 "committed: %lli\n", curTick(), cpu->name(), temp,
115 temp - lastNumInst);
116#endif
117 lastNumInst = temp;
118}
119
120const char *
121CPUProgressEvent::description() const
122{
123 return "CPU Progress";
124}
125
126BaseCPU::BaseCPU(Params *p, bool is_checker)
127 : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id),
128 _instMasterId(p->system->getMasterId(name() + ".inst")),
129 _dataMasterId(p->system->getMasterId(name() + ".data")),
130 _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid),
131 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()),
132 interrupts(p->interrupts), profileEvent(NULL),
133 numThreads(p->numThreads), system(p->system),
134 functionTraceStream(nullptr), currentFunctionStart(0),
135 currentFunctionEnd(0), functionEntryTick(0),
136 addressMonitor(p->numThreads)
137{
138 // if Python did not provide a valid ID, do it here
139 if (_cpuId == -1 ) {
140 _cpuId = cpuList.size();
141 }
142
143 // add self to global list of CPUs
144 cpuList.push_back(this);
145
146 DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n",
147 _cpuId, _socketId);
148
149 if (numThreads > maxThreadsPerCPU)
150 maxThreadsPerCPU = numThreads;
151
152 // allocate per-thread instruction-based event queues
153 comInstEventQueue = new EventQueue *[numThreads];
154 for (ThreadID tid = 0; tid < numThreads; ++tid)
155 comInstEventQueue[tid] =
156 new EventQueue("instruction-based event queue");
157
158 //
159 // set up instruction-count-based termination events, if any
160 //
161 if (p->max_insts_any_thread != 0) {
162 const char *cause = "a thread reached the max instruction count";
163 for (ThreadID tid = 0; tid < numThreads; ++tid)
164 scheduleInstStop(tid, p->max_insts_any_thread, cause);
165 }
166
167 // Set up instruction-count-based termination events for SimPoints
168 // Typically, there are more than one action points.
169 // Simulation.py is responsible to take the necessary actions upon
170 // exitting the simulation loop.
171 if (!p->simpoint_start_insts.empty()) {
172 const char *cause = "simpoint starting point found";
173 for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i)
174 scheduleInstStop(0, p->simpoint_start_insts[i], cause);
175 }
176
177 if (p->max_insts_all_threads != 0) {
178 const char *cause = "all threads reached the max instruction count";
179
180 // allocate & initialize shared downcounter: each event will
181 // decrement this when triggered; simulation will terminate
182 // when counter reaches 0
183 int *counter = new int;
184 *counter = numThreads;
185 for (ThreadID tid = 0; tid < numThreads; ++tid) {
186 Event *event = new CountedExitEvent(cause, *counter);
187 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
188 }
189 }
190
191 // allocate per-thread load-based event queues
192 comLoadEventQueue = new EventQueue *[numThreads];
193 for (ThreadID tid = 0; tid < numThreads; ++tid)
194 comLoadEventQueue[tid] = new EventQueue("load-based event queue");
195
196 //
197 // set up instruction-count-based termination events, if any
198 //
199 if (p->max_loads_any_thread != 0) {
200 const char *cause = "a thread reached the max load count";
201 for (ThreadID tid = 0; tid < numThreads; ++tid)
202 scheduleLoadStop(tid, p->max_loads_any_thread, cause);
203 }
204
205 if (p->max_loads_all_threads != 0) {
206 const char *cause = "all threads reached the max load count";
207 // allocate & initialize shared downcounter: each event will
208 // decrement this when triggered; simulation will terminate
209 // when counter reaches 0
210 int *counter = new int;
211 *counter = numThreads;
212 for (ThreadID tid = 0; tid < numThreads; ++tid) {
213 Event *event = new CountedExitEvent(cause, *counter);
214 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
215 }
216 }
217
218 functionTracingEnabled = false;
219 if (p->function_trace) {
220 const string fname = csprintf("ftrace.%s", name());
221 functionTraceStream = simout.find(fname);
222 if (!functionTraceStream)
223 functionTraceStream = simout.create(fname);
221 functionTraceStream = simout.findOrCreate(fname)->stream();
224
225 currentFunctionStart = currentFunctionEnd = 0;
226 functionEntryTick = p->function_trace_start;
227
228 if (p->function_trace_start == 0) {
229 functionTracingEnabled = true;
230 } else {
231 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
232 Event *event = new wrap(this, true);
233 schedule(event, p->function_trace_start);
234 }
235 }
236
237 // The interrupts should always be present unless this CPU is
238 // switched in later or in case it is a checker CPU
239 if (!params()->switched_out && !is_checker) {
240 fatal_if(interrupts.size() != numThreads,
241 "CPU %s has %i interrupt controllers, but is expecting one "
242 "per thread (%i)\n",
243 name(), interrupts.size(), numThreads);
244 for (ThreadID tid = 0; tid < numThreads; tid++)
245 interrupts[tid]->setCPU(this);
246 }
247
248 if (FullSystem) {
249 if (params()->profile)
250 profileEvent = new ProfileEvent(this, params()->profile);
251 }
252 tracer = params()->tracer;
253
254 if (params()->isa.size() != numThreads) {
255 fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
256 "of threads (%i).\n", params()->isa.size(), numThreads);
257 }
258}
259
260void
261BaseCPU::enableFunctionTrace()
262{
263 functionTracingEnabled = true;
264}
265
266BaseCPU::~BaseCPU()
267{
268 delete profileEvent;
269 delete[] comLoadEventQueue;
270 delete[] comInstEventQueue;
271}
272
273void
274BaseCPU::armMonitor(ThreadID tid, Addr address)
275{
276 assert(tid < numThreads);
277 AddressMonitor &monitor = addressMonitor[tid];
278
279 monitor.armed = true;
280 monitor.vAddr = address;
281 monitor.pAddr = 0x0;
282 DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address);
283}
284
285bool
286BaseCPU::mwait(ThreadID tid, PacketPtr pkt)
287{
288 assert(tid < numThreads);
289 AddressMonitor &monitor = addressMonitor[tid];
290
291 if (!monitor.gotWakeup) {
292 int block_size = cacheLineSize();
293 uint64_t mask = ~((uint64_t)(block_size - 1));
294
295 assert(pkt->req->hasPaddr());
296 monitor.pAddr = pkt->getAddr() & mask;
297 monitor.waiting = true;
298
299 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, "
300 "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr);
301 return true;
302 } else {
303 monitor.gotWakeup = false;
304 return false;
305 }
306}
307
308void
309BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb)
310{
311 assert(tid < numThreads);
312 AddressMonitor &monitor = addressMonitor[tid];
313
314 Request req;
315 Addr addr = monitor.vAddr;
316 int block_size = cacheLineSize();
317 uint64_t mask = ~((uint64_t)(block_size - 1));
318 int size = block_size;
319
320 //The address of the next line if it crosses a cache line boundary.
321 Addr secondAddr = roundDown(addr + size - 1, block_size);
322
323 if (secondAddr > addr)
324 size = secondAddr - addr;
325
326 req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
327
328 // translate to physical address
329 Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read);
330 assert(fault == NoFault);
331
332 monitor.pAddr = req.getPaddr() & mask;
333 monitor.waiting = true;
334
335 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
336 tid, monitor.vAddr, monitor.pAddr);
337}
338
339void
340BaseCPU::init()
341{
342 if (!params()->switched_out) {
343 registerThreadContexts();
344
345 verifyMemoryMode();
346 }
347}
348
349void
350BaseCPU::startup()
351{
352 if (FullSystem) {
353 if (!params()->switched_out && profileEvent)
354 schedule(profileEvent, curTick());
355 }
356
357 if (params()->progress_interval) {
358 new CPUProgressEvent(this, params()->progress_interval);
359 }
360}
361
362ProbePoints::PMUUPtr
363BaseCPU::pmuProbePoint(const char *name)
364{
365 ProbePoints::PMUUPtr ptr;
366 ptr.reset(new ProbePoints::PMU(getProbeManager(), name));
367
368 return ptr;
369}
370
371void
372BaseCPU::regProbePoints()
373{
374 ppCycles = pmuProbePoint("Cycles");
375
376 ppRetiredInsts = pmuProbePoint("RetiredInsts");
377 ppRetiredLoads = pmuProbePoint("RetiredLoads");
378 ppRetiredStores = pmuProbePoint("RetiredStores");
379 ppRetiredBranches = pmuProbePoint("RetiredBranches");
380}
381
382void
383BaseCPU::probeInstCommit(const StaticInstPtr &inst)
384{
385 if (!inst->isMicroop() || inst->isLastMicroop())
386 ppRetiredInsts->notify(1);
387
388
389 if (inst->isLoad())
390 ppRetiredLoads->notify(1);
391
392 if (inst->isStore())
393 ppRetiredStores->notify(1);
394
395 if (inst->isControl())
396 ppRetiredBranches->notify(1);
397}
398
399void
400BaseCPU::regStats()
401{
402 using namespace Stats;
403
404 numCycles
405 .name(name() + ".numCycles")
406 .desc("number of cpu cycles simulated")
407 ;
408
409 numWorkItemsStarted
410 .name(name() + ".numWorkItemsStarted")
411 .desc("number of work items this cpu started")
412 ;
413
414 numWorkItemsCompleted
415 .name(name() + ".numWorkItemsCompleted")
416 .desc("number of work items this cpu completed")
417 ;
418
419 int size = threadContexts.size();
420 if (size > 1) {
421 for (int i = 0; i < size; ++i) {
422 stringstream namestr;
423 ccprintf(namestr, "%s.ctx%d", name(), i);
424 threadContexts[i]->regStats(namestr.str());
425 }
426 } else if (size == 1)
427 threadContexts[0]->regStats(name());
428}
429
430BaseMasterPort &
431BaseCPU::getMasterPort(const string &if_name, PortID idx)
432{
433 // Get the right port based on name. This applies to all the
434 // subclasses of the base CPU and relies on their implementation
435 // of getDataPort and getInstPort. In all cases there methods
436 // return a MasterPort pointer.
437 if (if_name == "dcache_port")
438 return getDataPort();
439 else if (if_name == "icache_port")
440 return getInstPort();
441 else
442 return MemObject::getMasterPort(if_name, idx);
443}
444
445void
446BaseCPU::registerThreadContexts()
447{
448 assert(system->multiThread || numThreads == 1);
449
450 ThreadID size = threadContexts.size();
451 for (ThreadID tid = 0; tid < size; ++tid) {
452 ThreadContext *tc = threadContexts[tid];
453
454 if (system->multiThread) {
455 tc->setContextId(system->registerThreadContext(tc));
456 } else {
457 tc->setContextId(system->registerThreadContext(tc, _cpuId));
458 }
459
460 if (!FullSystem)
461 tc->getProcessPtr()->assignThreadContext(tc->contextId());
462 }
463}
464
465
466int
467BaseCPU::findContext(ThreadContext *tc)
468{
469 ThreadID size = threadContexts.size();
470 for (ThreadID tid = 0; tid < size; ++tid) {
471 if (tc == threadContexts[tid])
472 return tid;
473 }
474 return 0;
475}
476
477void
478BaseCPU::switchOut()
479{
480 assert(!_switchedOut);
481 _switchedOut = true;
482 if (profileEvent && profileEvent->scheduled())
483 deschedule(profileEvent);
484
485 // Flush all TLBs in the CPU to avoid having stale translations if
486 // it gets switched in later.
487 flushTLBs();
488}
489
490void
491BaseCPU::takeOverFrom(BaseCPU *oldCPU)
492{
493 assert(threadContexts.size() == oldCPU->threadContexts.size());
494 assert(_cpuId == oldCPU->cpuId());
495 assert(_switchedOut);
496 assert(oldCPU != this);
497 _pid = oldCPU->getPid();
498 _taskId = oldCPU->taskId();
499 _switchedOut = false;
500
501 ThreadID size = threadContexts.size();
502 for (ThreadID i = 0; i < size; ++i) {
503 ThreadContext *newTC = threadContexts[i];
504 ThreadContext *oldTC = oldCPU->threadContexts[i];
505
506 newTC->takeOverFrom(oldTC);
507
508 CpuEvent::replaceThreadContext(oldTC, newTC);
509
510 assert(newTC->contextId() == oldTC->contextId());
511 assert(newTC->threadId() == oldTC->threadId());
512 system->replaceThreadContext(newTC, newTC->contextId());
513
514 /* This code no longer works since the zero register (e.g.,
515 * r31 on Alpha) doesn't necessarily contain zero at this
516 * point.
517 if (DTRACE(Context))
518 ThreadContext::compare(oldTC, newTC);
519 */
520
521 BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort();
522 BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort();
523 BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort();
524 BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort();
525
526 // Move over any table walker ports if they exist
527 if (new_itb_port) {
528 assert(!new_itb_port->isConnected());
529 assert(old_itb_port);
530 assert(old_itb_port->isConnected());
531 BaseSlavePort &slavePort = old_itb_port->getSlavePort();
532 old_itb_port->unbind();
533 new_itb_port->bind(slavePort);
534 }
535 if (new_dtb_port) {
536 assert(!new_dtb_port->isConnected());
537 assert(old_dtb_port);
538 assert(old_dtb_port->isConnected());
539 BaseSlavePort &slavePort = old_dtb_port->getSlavePort();
540 old_dtb_port->unbind();
541 new_dtb_port->bind(slavePort);
542 }
543 newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr());
544 newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr());
545
546 // Checker whether or not we have to transfer CheckerCPU
547 // objects over in the switch
548 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr();
549 CheckerCPU *newChecker = newTC->getCheckerCpuPtr();
550 if (oldChecker && newChecker) {
551 BaseMasterPort *old_checker_itb_port =
552 oldChecker->getITBPtr()->getMasterPort();
553 BaseMasterPort *old_checker_dtb_port =
554 oldChecker->getDTBPtr()->getMasterPort();
555 BaseMasterPort *new_checker_itb_port =
556 newChecker->getITBPtr()->getMasterPort();
557 BaseMasterPort *new_checker_dtb_port =
558 newChecker->getDTBPtr()->getMasterPort();
559
560 newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr());
561 newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr());
562
563 // Move over any table walker ports if they exist for checker
564 if (new_checker_itb_port) {
565 assert(!new_checker_itb_port->isConnected());
566 assert(old_checker_itb_port);
567 assert(old_checker_itb_port->isConnected());
568 BaseSlavePort &slavePort =
569 old_checker_itb_port->getSlavePort();
570 old_checker_itb_port->unbind();
571 new_checker_itb_port->bind(slavePort);
572 }
573 if (new_checker_dtb_port) {
574 assert(!new_checker_dtb_port->isConnected());
575 assert(old_checker_dtb_port);
576 assert(old_checker_dtb_port->isConnected());
577 BaseSlavePort &slavePort =
578 old_checker_dtb_port->getSlavePort();
579 old_checker_dtb_port->unbind();
580 new_checker_dtb_port->bind(slavePort);
581 }
582 }
583 }
584
585 interrupts = oldCPU->interrupts;
586 for (ThreadID tid = 0; tid < numThreads; tid++) {
587 interrupts[tid]->setCPU(this);
588 }
589 oldCPU->interrupts.clear();
590
591 if (FullSystem) {
592 for (ThreadID i = 0; i < size; ++i)
593 threadContexts[i]->profileClear();
594
595 if (profileEvent)
596 schedule(profileEvent, curTick());
597 }
598
599 // All CPUs have an instruction and a data port, and the new CPU's
600 // ports are dangling while the old CPU has its ports connected
601 // already. Unbind the old CPU and then bind the ports of the one
602 // we are switching to.
603 assert(!getInstPort().isConnected());
604 assert(oldCPU->getInstPort().isConnected());
605 BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort();
606 oldCPU->getInstPort().unbind();
607 getInstPort().bind(inst_peer_port);
608
609 assert(!getDataPort().isConnected());
610 assert(oldCPU->getDataPort().isConnected());
611 BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort();
612 oldCPU->getDataPort().unbind();
613 getDataPort().bind(data_peer_port);
614}
615
616void
617BaseCPU::flushTLBs()
618{
619 for (ThreadID i = 0; i < threadContexts.size(); ++i) {
620 ThreadContext &tc(*threadContexts[i]);
621 CheckerCPU *checker(tc.getCheckerCpuPtr());
622
623 tc.getITBPtr()->flushAll();
624 tc.getDTBPtr()->flushAll();
625 if (checker) {
626 checker->getITBPtr()->flushAll();
627 checker->getDTBPtr()->flushAll();
628 }
629 }
630}
631
632
633BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
634 : cpu(_cpu), interval(_interval)
635{ }
636
637void
638BaseCPU::ProfileEvent::process()
639{
640 ThreadID size = cpu->threadContexts.size();
641 for (ThreadID i = 0; i < size; ++i) {
642 ThreadContext *tc = cpu->threadContexts[i];
643 tc->profileSample();
644 }
645
646 cpu->schedule(this, curTick() + interval);
647}
648
649void
650BaseCPU::serialize(CheckpointOut &cp) const
651{
652 SERIALIZE_SCALAR(instCnt);
653
654 if (!_switchedOut) {
655 /* Unlike _pid, _taskId is not serialized, as they are dynamically
656 * assigned unique ids that are only meaningful for the duration of
657 * a specific run. We will need to serialize the entire taskMap in
658 * system. */
659 SERIALIZE_SCALAR(_pid);
660
661 // Serialize the threads, this is done by the CPU implementation.
662 for (ThreadID i = 0; i < numThreads; ++i) {
663 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
664 interrupts[i]->serialize(cp);
665 serializeThread(cp, i);
666 }
667 }
668}
669
670void
671BaseCPU::unserialize(CheckpointIn &cp)
672{
673 UNSERIALIZE_SCALAR(instCnt);
674
675 if (!_switchedOut) {
676 UNSERIALIZE_SCALAR(_pid);
677
678 // Unserialize the threads, this is done by the CPU implementation.
679 for (ThreadID i = 0; i < numThreads; ++i) {
680 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
681 interrupts[i]->unserialize(cp);
682 unserializeThread(cp, i);
683 }
684 }
685}
686
687void
688BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause)
689{
690 const Tick now(comInstEventQueue[tid]->getCurTick());
691 Event *event(new LocalSimLoopExitEvent(cause, 0));
692
693 comInstEventQueue[tid]->schedule(event, now + insts);
694}
695
696AddressMonitor::AddressMonitor() {
697 armed = false;
698 waiting = false;
699 gotWakeup = false;
700}
701
702bool AddressMonitor::doMonitor(PacketPtr pkt) {
703 assert(pkt->req->hasPaddr());
704 if (armed && waiting) {
705 if (pAddr == pkt->getAddr()) {
706 DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n",
707 pkt->getAddr());
708 waiting = false;
709 return true;
710 }
711 }
712 return false;
713}
714
715void
716BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause)
717{
718 const Tick now(comLoadEventQueue[tid]->getCurTick());
719 Event *event(new LocalSimLoopExitEvent(cause, 0));
720
721 comLoadEventQueue[tid]->schedule(event, now + loads);
722}
723
724
725void
726BaseCPU::traceFunctionsInternal(Addr pc)
727{
728 if (!debugSymbolTable)
729 return;
730
731 // if pc enters different function, print new function symbol and
732 // update saved range. Otherwise do nothing.
733 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
734 string sym_str;
735 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
736 currentFunctionStart,
737 currentFunctionEnd);
738
739 if (!found) {
740 // no symbol found: use addr as label
741 sym_str = csprintf("0x%x", pc);
742 currentFunctionStart = pc;
743 currentFunctionEnd = pc + 1;
744 }
745
746 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
747 curTick() - functionEntryTick, curTick(), sym_str);
748 functionEntryTick = curTick();
749 }
750}
222
223 currentFunctionStart = currentFunctionEnd = 0;
224 functionEntryTick = p->function_trace_start;
225
226 if (p->function_trace_start == 0) {
227 functionTracingEnabled = true;
228 } else {
229 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
230 Event *event = new wrap(this, true);
231 schedule(event, p->function_trace_start);
232 }
233 }
234
235 // The interrupts should always be present unless this CPU is
236 // switched in later or in case it is a checker CPU
237 if (!params()->switched_out && !is_checker) {
238 fatal_if(interrupts.size() != numThreads,
239 "CPU %s has %i interrupt controllers, but is expecting one "
240 "per thread (%i)\n",
241 name(), interrupts.size(), numThreads);
242 for (ThreadID tid = 0; tid < numThreads; tid++)
243 interrupts[tid]->setCPU(this);
244 }
245
246 if (FullSystem) {
247 if (params()->profile)
248 profileEvent = new ProfileEvent(this, params()->profile);
249 }
250 tracer = params()->tracer;
251
252 if (params()->isa.size() != numThreads) {
253 fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
254 "of threads (%i).\n", params()->isa.size(), numThreads);
255 }
256}
257
258void
259BaseCPU::enableFunctionTrace()
260{
261 functionTracingEnabled = true;
262}
263
264BaseCPU::~BaseCPU()
265{
266 delete profileEvent;
267 delete[] comLoadEventQueue;
268 delete[] comInstEventQueue;
269}
270
271void
272BaseCPU::armMonitor(ThreadID tid, Addr address)
273{
274 assert(tid < numThreads);
275 AddressMonitor &monitor = addressMonitor[tid];
276
277 monitor.armed = true;
278 monitor.vAddr = address;
279 monitor.pAddr = 0x0;
280 DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address);
281}
282
283bool
284BaseCPU::mwait(ThreadID tid, PacketPtr pkt)
285{
286 assert(tid < numThreads);
287 AddressMonitor &monitor = addressMonitor[tid];
288
289 if (!monitor.gotWakeup) {
290 int block_size = cacheLineSize();
291 uint64_t mask = ~((uint64_t)(block_size - 1));
292
293 assert(pkt->req->hasPaddr());
294 monitor.pAddr = pkt->getAddr() & mask;
295 monitor.waiting = true;
296
297 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, "
298 "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr);
299 return true;
300 } else {
301 monitor.gotWakeup = false;
302 return false;
303 }
304}
305
306void
307BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb)
308{
309 assert(tid < numThreads);
310 AddressMonitor &monitor = addressMonitor[tid];
311
312 Request req;
313 Addr addr = monitor.vAddr;
314 int block_size = cacheLineSize();
315 uint64_t mask = ~((uint64_t)(block_size - 1));
316 int size = block_size;
317
318 //The address of the next line if it crosses a cache line boundary.
319 Addr secondAddr = roundDown(addr + size - 1, block_size);
320
321 if (secondAddr > addr)
322 size = secondAddr - addr;
323
324 req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
325
326 // translate to physical address
327 Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read);
328 assert(fault == NoFault);
329
330 monitor.pAddr = req.getPaddr() & mask;
331 monitor.waiting = true;
332
333 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
334 tid, monitor.vAddr, monitor.pAddr);
335}
336
337void
338BaseCPU::init()
339{
340 if (!params()->switched_out) {
341 registerThreadContexts();
342
343 verifyMemoryMode();
344 }
345}
346
347void
348BaseCPU::startup()
349{
350 if (FullSystem) {
351 if (!params()->switched_out && profileEvent)
352 schedule(profileEvent, curTick());
353 }
354
355 if (params()->progress_interval) {
356 new CPUProgressEvent(this, params()->progress_interval);
357 }
358}
359
360ProbePoints::PMUUPtr
361BaseCPU::pmuProbePoint(const char *name)
362{
363 ProbePoints::PMUUPtr ptr;
364 ptr.reset(new ProbePoints::PMU(getProbeManager(), name));
365
366 return ptr;
367}
368
369void
370BaseCPU::regProbePoints()
371{
372 ppCycles = pmuProbePoint("Cycles");
373
374 ppRetiredInsts = pmuProbePoint("RetiredInsts");
375 ppRetiredLoads = pmuProbePoint("RetiredLoads");
376 ppRetiredStores = pmuProbePoint("RetiredStores");
377 ppRetiredBranches = pmuProbePoint("RetiredBranches");
378}
379
380void
381BaseCPU::probeInstCommit(const StaticInstPtr &inst)
382{
383 if (!inst->isMicroop() || inst->isLastMicroop())
384 ppRetiredInsts->notify(1);
385
386
387 if (inst->isLoad())
388 ppRetiredLoads->notify(1);
389
390 if (inst->isStore())
391 ppRetiredStores->notify(1);
392
393 if (inst->isControl())
394 ppRetiredBranches->notify(1);
395}
396
397void
398BaseCPU::regStats()
399{
400 using namespace Stats;
401
402 numCycles
403 .name(name() + ".numCycles")
404 .desc("number of cpu cycles simulated")
405 ;
406
407 numWorkItemsStarted
408 .name(name() + ".numWorkItemsStarted")
409 .desc("number of work items this cpu started")
410 ;
411
412 numWorkItemsCompleted
413 .name(name() + ".numWorkItemsCompleted")
414 .desc("number of work items this cpu completed")
415 ;
416
417 int size = threadContexts.size();
418 if (size > 1) {
419 for (int i = 0; i < size; ++i) {
420 stringstream namestr;
421 ccprintf(namestr, "%s.ctx%d", name(), i);
422 threadContexts[i]->regStats(namestr.str());
423 }
424 } else if (size == 1)
425 threadContexts[0]->regStats(name());
426}
427
428BaseMasterPort &
429BaseCPU::getMasterPort(const string &if_name, PortID idx)
430{
431 // Get the right port based on name. This applies to all the
432 // subclasses of the base CPU and relies on their implementation
433 // of getDataPort and getInstPort. In all cases there methods
434 // return a MasterPort pointer.
435 if (if_name == "dcache_port")
436 return getDataPort();
437 else if (if_name == "icache_port")
438 return getInstPort();
439 else
440 return MemObject::getMasterPort(if_name, idx);
441}
442
443void
444BaseCPU::registerThreadContexts()
445{
446 assert(system->multiThread || numThreads == 1);
447
448 ThreadID size = threadContexts.size();
449 for (ThreadID tid = 0; tid < size; ++tid) {
450 ThreadContext *tc = threadContexts[tid];
451
452 if (system->multiThread) {
453 tc->setContextId(system->registerThreadContext(tc));
454 } else {
455 tc->setContextId(system->registerThreadContext(tc, _cpuId));
456 }
457
458 if (!FullSystem)
459 tc->getProcessPtr()->assignThreadContext(tc->contextId());
460 }
461}
462
463
464int
465BaseCPU::findContext(ThreadContext *tc)
466{
467 ThreadID size = threadContexts.size();
468 for (ThreadID tid = 0; tid < size; ++tid) {
469 if (tc == threadContexts[tid])
470 return tid;
471 }
472 return 0;
473}
474
475void
476BaseCPU::switchOut()
477{
478 assert(!_switchedOut);
479 _switchedOut = true;
480 if (profileEvent && profileEvent->scheduled())
481 deschedule(profileEvent);
482
483 // Flush all TLBs in the CPU to avoid having stale translations if
484 // it gets switched in later.
485 flushTLBs();
486}
487
488void
489BaseCPU::takeOverFrom(BaseCPU *oldCPU)
490{
491 assert(threadContexts.size() == oldCPU->threadContexts.size());
492 assert(_cpuId == oldCPU->cpuId());
493 assert(_switchedOut);
494 assert(oldCPU != this);
495 _pid = oldCPU->getPid();
496 _taskId = oldCPU->taskId();
497 _switchedOut = false;
498
499 ThreadID size = threadContexts.size();
500 for (ThreadID i = 0; i < size; ++i) {
501 ThreadContext *newTC = threadContexts[i];
502 ThreadContext *oldTC = oldCPU->threadContexts[i];
503
504 newTC->takeOverFrom(oldTC);
505
506 CpuEvent::replaceThreadContext(oldTC, newTC);
507
508 assert(newTC->contextId() == oldTC->contextId());
509 assert(newTC->threadId() == oldTC->threadId());
510 system->replaceThreadContext(newTC, newTC->contextId());
511
512 /* This code no longer works since the zero register (e.g.,
513 * r31 on Alpha) doesn't necessarily contain zero at this
514 * point.
515 if (DTRACE(Context))
516 ThreadContext::compare(oldTC, newTC);
517 */
518
519 BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort();
520 BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort();
521 BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort();
522 BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort();
523
524 // Move over any table walker ports if they exist
525 if (new_itb_port) {
526 assert(!new_itb_port->isConnected());
527 assert(old_itb_port);
528 assert(old_itb_port->isConnected());
529 BaseSlavePort &slavePort = old_itb_port->getSlavePort();
530 old_itb_port->unbind();
531 new_itb_port->bind(slavePort);
532 }
533 if (new_dtb_port) {
534 assert(!new_dtb_port->isConnected());
535 assert(old_dtb_port);
536 assert(old_dtb_port->isConnected());
537 BaseSlavePort &slavePort = old_dtb_port->getSlavePort();
538 old_dtb_port->unbind();
539 new_dtb_port->bind(slavePort);
540 }
541 newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr());
542 newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr());
543
544 // Checker whether or not we have to transfer CheckerCPU
545 // objects over in the switch
546 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr();
547 CheckerCPU *newChecker = newTC->getCheckerCpuPtr();
548 if (oldChecker && newChecker) {
549 BaseMasterPort *old_checker_itb_port =
550 oldChecker->getITBPtr()->getMasterPort();
551 BaseMasterPort *old_checker_dtb_port =
552 oldChecker->getDTBPtr()->getMasterPort();
553 BaseMasterPort *new_checker_itb_port =
554 newChecker->getITBPtr()->getMasterPort();
555 BaseMasterPort *new_checker_dtb_port =
556 newChecker->getDTBPtr()->getMasterPort();
557
558 newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr());
559 newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr());
560
561 // Move over any table walker ports if they exist for checker
562 if (new_checker_itb_port) {
563 assert(!new_checker_itb_port->isConnected());
564 assert(old_checker_itb_port);
565 assert(old_checker_itb_port->isConnected());
566 BaseSlavePort &slavePort =
567 old_checker_itb_port->getSlavePort();
568 old_checker_itb_port->unbind();
569 new_checker_itb_port->bind(slavePort);
570 }
571 if (new_checker_dtb_port) {
572 assert(!new_checker_dtb_port->isConnected());
573 assert(old_checker_dtb_port);
574 assert(old_checker_dtb_port->isConnected());
575 BaseSlavePort &slavePort =
576 old_checker_dtb_port->getSlavePort();
577 old_checker_dtb_port->unbind();
578 new_checker_dtb_port->bind(slavePort);
579 }
580 }
581 }
582
583 interrupts = oldCPU->interrupts;
584 for (ThreadID tid = 0; tid < numThreads; tid++) {
585 interrupts[tid]->setCPU(this);
586 }
587 oldCPU->interrupts.clear();
588
589 if (FullSystem) {
590 for (ThreadID i = 0; i < size; ++i)
591 threadContexts[i]->profileClear();
592
593 if (profileEvent)
594 schedule(profileEvent, curTick());
595 }
596
597 // All CPUs have an instruction and a data port, and the new CPU's
598 // ports are dangling while the old CPU has its ports connected
599 // already. Unbind the old CPU and then bind the ports of the one
600 // we are switching to.
601 assert(!getInstPort().isConnected());
602 assert(oldCPU->getInstPort().isConnected());
603 BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort();
604 oldCPU->getInstPort().unbind();
605 getInstPort().bind(inst_peer_port);
606
607 assert(!getDataPort().isConnected());
608 assert(oldCPU->getDataPort().isConnected());
609 BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort();
610 oldCPU->getDataPort().unbind();
611 getDataPort().bind(data_peer_port);
612}
613
614void
615BaseCPU::flushTLBs()
616{
617 for (ThreadID i = 0; i < threadContexts.size(); ++i) {
618 ThreadContext &tc(*threadContexts[i]);
619 CheckerCPU *checker(tc.getCheckerCpuPtr());
620
621 tc.getITBPtr()->flushAll();
622 tc.getDTBPtr()->flushAll();
623 if (checker) {
624 checker->getITBPtr()->flushAll();
625 checker->getDTBPtr()->flushAll();
626 }
627 }
628}
629
630
631BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
632 : cpu(_cpu), interval(_interval)
633{ }
634
635void
636BaseCPU::ProfileEvent::process()
637{
638 ThreadID size = cpu->threadContexts.size();
639 for (ThreadID i = 0; i < size; ++i) {
640 ThreadContext *tc = cpu->threadContexts[i];
641 tc->profileSample();
642 }
643
644 cpu->schedule(this, curTick() + interval);
645}
646
647void
648BaseCPU::serialize(CheckpointOut &cp) const
649{
650 SERIALIZE_SCALAR(instCnt);
651
652 if (!_switchedOut) {
653 /* Unlike _pid, _taskId is not serialized, as they are dynamically
654 * assigned unique ids that are only meaningful for the duration of
655 * a specific run. We will need to serialize the entire taskMap in
656 * system. */
657 SERIALIZE_SCALAR(_pid);
658
659 // Serialize the threads, this is done by the CPU implementation.
660 for (ThreadID i = 0; i < numThreads; ++i) {
661 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
662 interrupts[i]->serialize(cp);
663 serializeThread(cp, i);
664 }
665 }
666}
667
668void
669BaseCPU::unserialize(CheckpointIn &cp)
670{
671 UNSERIALIZE_SCALAR(instCnt);
672
673 if (!_switchedOut) {
674 UNSERIALIZE_SCALAR(_pid);
675
676 // Unserialize the threads, this is done by the CPU implementation.
677 for (ThreadID i = 0; i < numThreads; ++i) {
678 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
679 interrupts[i]->unserialize(cp);
680 unserializeThread(cp, i);
681 }
682 }
683}
684
685void
686BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause)
687{
688 const Tick now(comInstEventQueue[tid]->getCurTick());
689 Event *event(new LocalSimLoopExitEvent(cause, 0));
690
691 comInstEventQueue[tid]->schedule(event, now + insts);
692}
693
694AddressMonitor::AddressMonitor() {
695 armed = false;
696 waiting = false;
697 gotWakeup = false;
698}
699
700bool AddressMonitor::doMonitor(PacketPtr pkt) {
701 assert(pkt->req->hasPaddr());
702 if (armed && waiting) {
703 if (pAddr == pkt->getAddr()) {
704 DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n",
705 pkt->getAddr());
706 waiting = false;
707 return true;
708 }
709 }
710 return false;
711}
712
713void
714BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause)
715{
716 const Tick now(comLoadEventQueue[tid]->getCurTick());
717 Event *event(new LocalSimLoopExitEvent(cause, 0));
718
719 comLoadEventQueue[tid]->schedule(event, now + loads);
720}
721
722
723void
724BaseCPU::traceFunctionsInternal(Addr pc)
725{
726 if (!debugSymbolTable)
727 return;
728
729 // if pc enters different function, print new function symbol and
730 // update saved range. Otherwise do nothing.
731 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
732 string sym_str;
733 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
734 currentFunctionStart,
735 currentFunctionEnd);
736
737 if (!found) {
738 // no symbol found: use addr as label
739 sym_str = csprintf("0x%x", pc);
740 currentFunctionStart = pc;
741 currentFunctionEnd = pc + 1;
742 }
743
744 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
745 curTick() - functionEntryTick, curTick(), sym_str);
746 functionEntryTick = curTick();
747 }
748}