base.cc (5715:e8c1d4e669a7) base.cc (5718:323cfbfec1a4)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 * Nathan Binkert
30 */
31
32#include <iostream>
33#include <string>
34#include <sstream>
35
36#include "base/cprintf.hh"
37#include "base/loader/symtab.hh"
38#include "base/misc.hh"
39#include "base/output.hh"
40#include "base/trace.hh"
41#include "cpu/base.hh"
42#include "cpu/cpuevent.hh"
43#include "cpu/thread_context.hh"
44#include "cpu/profile.hh"
45#include "params/BaseCPU.hh"
46#include "sim/sim_exit.hh"
47#include "sim/process.hh"
48#include "sim/sim_events.hh"
49#include "sim/system.hh"
50
51// Hack
52#include "sim/stat_control.hh"
53
54using namespace std;
55
56vector<BaseCPU *> BaseCPU::cpuList;
57
58// This variable reflects the max number of threads in any CPU. Be
59// careful to only use it once all the CPUs that you care about have
60// been initialized
61int maxThreadsPerCPU = 1;
62
63CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
64 : Event(Event::Progress_Event_Pri), interval(ival), lastNumInst(0),
65 cpu(_cpu)
66{
67 if (interval)
68 cpu->schedule(this, curTick + interval);
69}
70
71void
72CPUProgressEvent::process()
73{
74 Counter temp = cpu->totalInstructions();
75#ifndef NDEBUG
76 double ipc = double(temp - lastNumInst) / (interval / cpu->ticks(1));
77
78 DPRINTFN("%s progress event, instructions committed: %lli, IPC: %0.8d\n",
79 cpu->name(), temp - lastNumInst, ipc);
80 ipc = 0.0;
81#else
82 cprintf("%lli: %s progress event, instructions committed: %lli\n",
83 curTick, cpu->name(), temp - lastNumInst);
84#endif
85 lastNumInst = temp;
86 cpu->schedule(this, curTick + interval);
87}
88
89const char *
90CPUProgressEvent::description() const
91{
92 return "CPU Progress";
93}
94
95#if FULL_SYSTEM
96BaseCPU::BaseCPU(Params *p)
97 : MemObject(p), clock(p->clock), instCnt(0), _cpuId(p->cpu_id),
98 interrupts(p->interrupts),
99 number_of_threads(p->numThreads), system(p->system),
100 phase(p->phase)
101#else
102BaseCPU::BaseCPU(Params *p)
103 : MemObject(p), clock(p->clock), _cpuId(p->cpu_id),
104 number_of_threads(p->numThreads), system(p->system),
105 phase(p->phase)
106#endif
107{
108// currentTick = curTick;
109
110 // if Python did not provide a valid ID, do it here
111 if (_cpuId == -1 ) {
112 _cpuId = cpuList.size();
113 }
114
115 // add self to global list of CPUs
116 cpuList.push_back(this);
117
118 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId);
119
120 if (number_of_threads > maxThreadsPerCPU)
121 maxThreadsPerCPU = number_of_threads;
122
123 // allocate per-thread instruction-based event queues
124 comInstEventQueue = new EventQueue *[number_of_threads];
125 for (int i = 0; i < number_of_threads; ++i)
126 comInstEventQueue[i] = new EventQueue("instruction-based event queue");
127
128 //
129 // set up instruction-count-based termination events, if any
130 //
131 if (p->max_insts_any_thread != 0) {
132 const char *cause = "a thread reached the max instruction count";
133 for (int i = 0; i < number_of_threads; ++i) {
134 Event *event = new SimLoopExitEvent(cause, 0);
135 comInstEventQueue[i]->schedule(event, p->max_insts_any_thread);
136 }
137 }
138
139 if (p->max_insts_all_threads != 0) {
140 const char *cause = "all threads reached the max instruction count";
141
142 // allocate & initialize shared downcounter: each event will
143 // decrement this when triggered; simulation will terminate
144 // when counter reaches 0
145 int *counter = new int;
146 *counter = number_of_threads;
147 for (int i = 0; i < number_of_threads; ++i) {
148 Event *event = new CountedExitEvent(cause, *counter);
149 comInstEventQueue[i]->schedule(event, p->max_insts_any_thread);
150 }
151 }
152
153 // allocate per-thread load-based event queues
154 comLoadEventQueue = new EventQueue *[number_of_threads];
155 for (int i = 0; i < number_of_threads; ++i)
156 comLoadEventQueue[i] = new EventQueue("load-based event queue");
157
158 //
159 // set up instruction-count-based termination events, if any
160 //
161 if (p->max_loads_any_thread != 0) {
162 const char *cause = "a thread reached the max load count";
163 for (int i = 0; i < number_of_threads; ++i) {
164 Event *event = new SimLoopExitEvent(cause, 0);
165 comLoadEventQueue[i]->schedule(event, p->max_loads_any_thread);
166 }
167 }
168
169 if (p->max_loads_all_threads != 0) {
170 const char *cause = "all threads reached the max load count";
171 // allocate & initialize shared downcounter: each event will
172 // decrement this when triggered; simulation will terminate
173 // when counter reaches 0
174 int *counter = new int;
175 *counter = number_of_threads;
176 for (int i = 0; i < number_of_threads; ++i) {
177 Event *event = new CountedExitEvent(cause, *counter);
178 comLoadEventQueue[i]->schedule(event, p->max_loads_all_threads);
179 }
180 }
181
182 functionTracingEnabled = false;
183 if (p->function_trace) {
184 functionTraceStream = simout.find(csprintf("ftrace.%s", name()));
185 currentFunctionStart = currentFunctionEnd = 0;
186 functionEntryTick = p->function_trace_start;
187
188 if (p->function_trace_start == 0) {
189 functionTracingEnabled = true;
190 } else {
191 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
192 Event *event = new wrap(this, true);
193 schedule(event, p->function_trace_start);
194 }
195 }
196#if FULL_SYSTEM
197 profileEvent = NULL;
198 if (params()->profile)
199 profileEvent = new ProfileEvent(this, params()->profile);
200#endif
201 tracer = params()->tracer;
202}
203
204void
205BaseCPU::enableFunctionTrace()
206{
207 functionTracingEnabled = true;
208}
209
210BaseCPU::~BaseCPU()
211{
212}
213
214void
215BaseCPU::init()
216{
217 if (!params()->defer_registration)
218 registerThreadContexts();
219}
220
221void
222BaseCPU::startup()
223{
224#if FULL_SYSTEM
225 if (!params()->defer_registration && profileEvent)
226 schedule(profileEvent, curTick);
227#endif
228
229 if (params()->progress_interval) {
230 Tick num_ticks = ticks(params()->progress_interval);
231 Event *event = new CPUProgressEvent(this, num_ticks);
232 schedule(event, curTick + num_ticks);
233 }
234}
235
236
237void
238BaseCPU::regStats()
239{
240 using namespace Stats;
241
242 numCycles
243 .name(name() + ".numCycles")
244 .desc("number of cpu cycles simulated")
245 ;
246
247 int size = threadContexts.size();
248 if (size > 1) {
249 for (int i = 0; i < size; ++i) {
250 stringstream namestr;
251 ccprintf(namestr, "%s.ctx%d", name(), i);
252 threadContexts[i]->regStats(namestr.str());
253 }
254 } else if (size == 1)
255 threadContexts[0]->regStats(name());
256
257#if FULL_SYSTEM
258#endif
259}
260
261Tick
262BaseCPU::nextCycle()
263{
264 Tick next_tick = curTick - phase + clock - 1;
265 next_tick -= (next_tick % clock);
266 next_tick += phase;
267 return next_tick;
268}
269
270Tick
271BaseCPU::nextCycle(Tick begin_tick)
272{
273 Tick next_tick = begin_tick;
274 if (next_tick % clock != 0)
275 next_tick = next_tick - (next_tick % clock) + clock;
276 next_tick += phase;
277
278 assert(next_tick >= curTick);
279 return next_tick;
280}
281
282void
283BaseCPU::registerThreadContexts()
284{
285 for (int i = 0; i < threadContexts.size(); ++i) {
286 ThreadContext *tc = threadContexts[i];
287
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 * Nathan Binkert
30 */
31
32#include <iostream>
33#include <string>
34#include <sstream>
35
36#include "base/cprintf.hh"
37#include "base/loader/symtab.hh"
38#include "base/misc.hh"
39#include "base/output.hh"
40#include "base/trace.hh"
41#include "cpu/base.hh"
42#include "cpu/cpuevent.hh"
43#include "cpu/thread_context.hh"
44#include "cpu/profile.hh"
45#include "params/BaseCPU.hh"
46#include "sim/sim_exit.hh"
47#include "sim/process.hh"
48#include "sim/sim_events.hh"
49#include "sim/system.hh"
50
51// Hack
52#include "sim/stat_control.hh"
53
54using namespace std;
55
56vector<BaseCPU *> BaseCPU::cpuList;
57
58// This variable reflects the max number of threads in any CPU. Be
59// careful to only use it once all the CPUs that you care about have
60// been initialized
61int maxThreadsPerCPU = 1;
62
63CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
64 : Event(Event::Progress_Event_Pri), interval(ival), lastNumInst(0),
65 cpu(_cpu)
66{
67 if (interval)
68 cpu->schedule(this, curTick + interval);
69}
70
71void
72CPUProgressEvent::process()
73{
74 Counter temp = cpu->totalInstructions();
75#ifndef NDEBUG
76 double ipc = double(temp - lastNumInst) / (interval / cpu->ticks(1));
77
78 DPRINTFN("%s progress event, instructions committed: %lli, IPC: %0.8d\n",
79 cpu->name(), temp - lastNumInst, ipc);
80 ipc = 0.0;
81#else
82 cprintf("%lli: %s progress event, instructions committed: %lli\n",
83 curTick, cpu->name(), temp - lastNumInst);
84#endif
85 lastNumInst = temp;
86 cpu->schedule(this, curTick + interval);
87}
88
89const char *
90CPUProgressEvent::description() const
91{
92 return "CPU Progress";
93}
94
95#if FULL_SYSTEM
96BaseCPU::BaseCPU(Params *p)
97 : MemObject(p), clock(p->clock), instCnt(0), _cpuId(p->cpu_id),
98 interrupts(p->interrupts),
99 number_of_threads(p->numThreads), system(p->system),
100 phase(p->phase)
101#else
102BaseCPU::BaseCPU(Params *p)
103 : MemObject(p), clock(p->clock), _cpuId(p->cpu_id),
104 number_of_threads(p->numThreads), system(p->system),
105 phase(p->phase)
106#endif
107{
108// currentTick = curTick;
109
110 // if Python did not provide a valid ID, do it here
111 if (_cpuId == -1 ) {
112 _cpuId = cpuList.size();
113 }
114
115 // add self to global list of CPUs
116 cpuList.push_back(this);
117
118 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId);
119
120 if (number_of_threads > maxThreadsPerCPU)
121 maxThreadsPerCPU = number_of_threads;
122
123 // allocate per-thread instruction-based event queues
124 comInstEventQueue = new EventQueue *[number_of_threads];
125 for (int i = 0; i < number_of_threads; ++i)
126 comInstEventQueue[i] = new EventQueue("instruction-based event queue");
127
128 //
129 // set up instruction-count-based termination events, if any
130 //
131 if (p->max_insts_any_thread != 0) {
132 const char *cause = "a thread reached the max instruction count";
133 for (int i = 0; i < number_of_threads; ++i) {
134 Event *event = new SimLoopExitEvent(cause, 0);
135 comInstEventQueue[i]->schedule(event, p->max_insts_any_thread);
136 }
137 }
138
139 if (p->max_insts_all_threads != 0) {
140 const char *cause = "all threads reached the max instruction count";
141
142 // allocate & initialize shared downcounter: each event will
143 // decrement this when triggered; simulation will terminate
144 // when counter reaches 0
145 int *counter = new int;
146 *counter = number_of_threads;
147 for (int i = 0; i < number_of_threads; ++i) {
148 Event *event = new CountedExitEvent(cause, *counter);
149 comInstEventQueue[i]->schedule(event, p->max_insts_any_thread);
150 }
151 }
152
153 // allocate per-thread load-based event queues
154 comLoadEventQueue = new EventQueue *[number_of_threads];
155 for (int i = 0; i < number_of_threads; ++i)
156 comLoadEventQueue[i] = new EventQueue("load-based event queue");
157
158 //
159 // set up instruction-count-based termination events, if any
160 //
161 if (p->max_loads_any_thread != 0) {
162 const char *cause = "a thread reached the max load count";
163 for (int i = 0; i < number_of_threads; ++i) {
164 Event *event = new SimLoopExitEvent(cause, 0);
165 comLoadEventQueue[i]->schedule(event, p->max_loads_any_thread);
166 }
167 }
168
169 if (p->max_loads_all_threads != 0) {
170 const char *cause = "all threads reached the max load count";
171 // allocate & initialize shared downcounter: each event will
172 // decrement this when triggered; simulation will terminate
173 // when counter reaches 0
174 int *counter = new int;
175 *counter = number_of_threads;
176 for (int i = 0; i < number_of_threads; ++i) {
177 Event *event = new CountedExitEvent(cause, *counter);
178 comLoadEventQueue[i]->schedule(event, p->max_loads_all_threads);
179 }
180 }
181
182 functionTracingEnabled = false;
183 if (p->function_trace) {
184 functionTraceStream = simout.find(csprintf("ftrace.%s", name()));
185 currentFunctionStart = currentFunctionEnd = 0;
186 functionEntryTick = p->function_trace_start;
187
188 if (p->function_trace_start == 0) {
189 functionTracingEnabled = true;
190 } else {
191 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
192 Event *event = new wrap(this, true);
193 schedule(event, p->function_trace_start);
194 }
195 }
196#if FULL_SYSTEM
197 profileEvent = NULL;
198 if (params()->profile)
199 profileEvent = new ProfileEvent(this, params()->profile);
200#endif
201 tracer = params()->tracer;
202}
203
204void
205BaseCPU::enableFunctionTrace()
206{
207 functionTracingEnabled = true;
208}
209
210BaseCPU::~BaseCPU()
211{
212}
213
214void
215BaseCPU::init()
216{
217 if (!params()->defer_registration)
218 registerThreadContexts();
219}
220
221void
222BaseCPU::startup()
223{
224#if FULL_SYSTEM
225 if (!params()->defer_registration && profileEvent)
226 schedule(profileEvent, curTick);
227#endif
228
229 if (params()->progress_interval) {
230 Tick num_ticks = ticks(params()->progress_interval);
231 Event *event = new CPUProgressEvent(this, num_ticks);
232 schedule(event, curTick + num_ticks);
233 }
234}
235
236
237void
238BaseCPU::regStats()
239{
240 using namespace Stats;
241
242 numCycles
243 .name(name() + ".numCycles")
244 .desc("number of cpu cycles simulated")
245 ;
246
247 int size = threadContexts.size();
248 if (size > 1) {
249 for (int i = 0; i < size; ++i) {
250 stringstream namestr;
251 ccprintf(namestr, "%s.ctx%d", name(), i);
252 threadContexts[i]->regStats(namestr.str());
253 }
254 } else if (size == 1)
255 threadContexts[0]->regStats(name());
256
257#if FULL_SYSTEM
258#endif
259}
260
261Tick
262BaseCPU::nextCycle()
263{
264 Tick next_tick = curTick - phase + clock - 1;
265 next_tick -= (next_tick % clock);
266 next_tick += phase;
267 return next_tick;
268}
269
270Tick
271BaseCPU::nextCycle(Tick begin_tick)
272{
273 Tick next_tick = begin_tick;
274 if (next_tick % clock != 0)
275 next_tick = next_tick - (next_tick % clock) + clock;
276 next_tick += phase;
277
278 assert(next_tick >= curTick);
279 return next_tick;
280}
281
282void
283BaseCPU::registerThreadContexts()
284{
285 for (int i = 0; i < threadContexts.size(); ++i) {
286 ThreadContext *tc = threadContexts[i];
287
288 tc->setContextId(system->registerThreadContext(tc));
288 /** This is so that contextId and cpuId match where there is a
289 * 1cpu:1context relationship. Otherwise, the order of registration
290 * could affect the assignment and cpu 1 could have context id 3, for
291 * example. We may even want to do something like this for SMT so that
292 * cpu 0 has the lowest thread contexts and cpu N has the highest, but
293 * I'll just do this for now
294 */
295 if (number_of_threads == 1)
296 tc->setContextId(system->registerThreadContext(tc, _cpuId));
297 else
298 tc->setContextId(system->registerThreadContext(tc));
289#if !FULL_SYSTEM
290 tc->getProcessPtr()->assignThreadContext(tc->contextId());
291#endif
292 }
293}
294
295
296int
297BaseCPU::findContext(ThreadContext *tc)
298{
299 for (int i = 0; i < threadContexts.size(); ++i) {
300 if (tc == threadContexts[i])
301 return i;
302 }
303 return 0;
304}
305
306void
307BaseCPU::switchOut()
308{
309// panic("This CPU doesn't support sampling!");
310#if FULL_SYSTEM
311 if (profileEvent && profileEvent->scheduled())
312 deschedule(profileEvent);
313#endif
314}
315
316void
317BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc)
318{
319 assert(threadContexts.size() == oldCPU->threadContexts.size());
320
321 _cpuId = oldCPU->cpuId();
322
323 for (int i = 0; i < threadContexts.size(); ++i) {
324 ThreadContext *newTC = threadContexts[i];
325 ThreadContext *oldTC = oldCPU->threadContexts[i];
326
327 newTC->takeOverFrom(oldTC);
328
329 CpuEvent::replaceThreadContext(oldTC, newTC);
330
331 assert(newTC->contextId() == oldTC->contextId());
332 assert(newTC->threadId() == oldTC->threadId());
333 system->replaceThreadContext(newTC, newTC->contextId());
334
335 if (DTRACE(Context))
336 ThreadContext::compare(oldTC, newTC);
337 }
338
339#if FULL_SYSTEM
340 interrupts = oldCPU->interrupts;
341
342 for (int i = 0; i < threadContexts.size(); ++i)
343 threadContexts[i]->profileClear();
344
345 if (profileEvent)
346 schedule(profileEvent, curTick);
347#endif
348
349 // Connect new CPU to old CPU's memory only if new CPU isn't
350 // connected to anything. Also connect old CPU's memory to new
351 // CPU.
352 if (!ic->isConnected()) {
353 Port *peer = oldCPU->getPort("icache_port")->getPeer();
354 ic->setPeer(peer);
355 peer->setPeer(ic);
356 }
357
358 if (!dc->isConnected()) {
359 Port *peer = oldCPU->getPort("dcache_port")->getPeer();
360 dc->setPeer(peer);
361 peer->setPeer(dc);
362 }
363}
364
365
366#if FULL_SYSTEM
367BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
368 : cpu(_cpu), interval(_interval)
369{ }
370
371void
372BaseCPU::ProfileEvent::process()
373{
374 for (int i = 0, size = cpu->threadContexts.size(); i < size; ++i) {
375 ThreadContext *tc = cpu->threadContexts[i];
376 tc->profileSample();
377 }
378
379 cpu->schedule(this, curTick + interval);
380}
381
382void
383BaseCPU::postInterrupt(int int_num, int index)
384{
385 interrupts->post(int_num, index);
386}
387
388void
389BaseCPU::clearInterrupt(int int_num, int index)
390{
391 interrupts->clear(int_num, index);
392}
393
394void
395BaseCPU::clearInterrupts()
396{
397 interrupts->clearAll();
398}
399
400void
401BaseCPU::serialize(std::ostream &os)
402{
403 SERIALIZE_SCALAR(instCnt);
404 interrupts->serialize(os);
405}
406
407void
408BaseCPU::unserialize(Checkpoint *cp, const std::string &section)
409{
410 UNSERIALIZE_SCALAR(instCnt);
411 interrupts->unserialize(cp, section);
412}
413
414#endif // FULL_SYSTEM
415
416void
417BaseCPU::traceFunctionsInternal(Addr pc)
418{
419 if (!debugSymbolTable)
420 return;
421
422 // if pc enters different function, print new function symbol and
423 // update saved range. Otherwise do nothing.
424 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
425 string sym_str;
426 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
427 currentFunctionStart,
428 currentFunctionEnd);
429
430 if (!found) {
431 // no symbol found: use addr as label
432 sym_str = csprintf("0x%x", pc);
433 currentFunctionStart = pc;
434 currentFunctionEnd = pc + 1;
435 }
436
437 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
438 curTick - functionEntryTick, curTick, sym_str);
439 functionEntryTick = curTick;
440 }
441}
299#if !FULL_SYSTEM
300 tc->getProcessPtr()->assignThreadContext(tc->contextId());
301#endif
302 }
303}
304
305
306int
307BaseCPU::findContext(ThreadContext *tc)
308{
309 for (int i = 0; i < threadContexts.size(); ++i) {
310 if (tc == threadContexts[i])
311 return i;
312 }
313 return 0;
314}
315
316void
317BaseCPU::switchOut()
318{
319// panic("This CPU doesn't support sampling!");
320#if FULL_SYSTEM
321 if (profileEvent && profileEvent->scheduled())
322 deschedule(profileEvent);
323#endif
324}
325
326void
327BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc)
328{
329 assert(threadContexts.size() == oldCPU->threadContexts.size());
330
331 _cpuId = oldCPU->cpuId();
332
333 for (int i = 0; i < threadContexts.size(); ++i) {
334 ThreadContext *newTC = threadContexts[i];
335 ThreadContext *oldTC = oldCPU->threadContexts[i];
336
337 newTC->takeOverFrom(oldTC);
338
339 CpuEvent::replaceThreadContext(oldTC, newTC);
340
341 assert(newTC->contextId() == oldTC->contextId());
342 assert(newTC->threadId() == oldTC->threadId());
343 system->replaceThreadContext(newTC, newTC->contextId());
344
345 if (DTRACE(Context))
346 ThreadContext::compare(oldTC, newTC);
347 }
348
349#if FULL_SYSTEM
350 interrupts = oldCPU->interrupts;
351
352 for (int i = 0; i < threadContexts.size(); ++i)
353 threadContexts[i]->profileClear();
354
355 if (profileEvent)
356 schedule(profileEvent, curTick);
357#endif
358
359 // Connect new CPU to old CPU's memory only if new CPU isn't
360 // connected to anything. Also connect old CPU's memory to new
361 // CPU.
362 if (!ic->isConnected()) {
363 Port *peer = oldCPU->getPort("icache_port")->getPeer();
364 ic->setPeer(peer);
365 peer->setPeer(ic);
366 }
367
368 if (!dc->isConnected()) {
369 Port *peer = oldCPU->getPort("dcache_port")->getPeer();
370 dc->setPeer(peer);
371 peer->setPeer(dc);
372 }
373}
374
375
376#if FULL_SYSTEM
377BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
378 : cpu(_cpu), interval(_interval)
379{ }
380
381void
382BaseCPU::ProfileEvent::process()
383{
384 for (int i = 0, size = cpu->threadContexts.size(); i < size; ++i) {
385 ThreadContext *tc = cpu->threadContexts[i];
386 tc->profileSample();
387 }
388
389 cpu->schedule(this, curTick + interval);
390}
391
392void
393BaseCPU::postInterrupt(int int_num, int index)
394{
395 interrupts->post(int_num, index);
396}
397
398void
399BaseCPU::clearInterrupt(int int_num, int index)
400{
401 interrupts->clear(int_num, index);
402}
403
404void
405BaseCPU::clearInterrupts()
406{
407 interrupts->clearAll();
408}
409
410void
411BaseCPU::serialize(std::ostream &os)
412{
413 SERIALIZE_SCALAR(instCnt);
414 interrupts->serialize(os);
415}
416
417void
418BaseCPU::unserialize(Checkpoint *cp, const std::string &section)
419{
420 UNSERIALIZE_SCALAR(instCnt);
421 interrupts->unserialize(cp, section);
422}
423
424#endif // FULL_SYSTEM
425
426void
427BaseCPU::traceFunctionsInternal(Addr pc)
428{
429 if (!debugSymbolTable)
430 return;
431
432 // if pc enters different function, print new function symbol and
433 // update saved range. Otherwise do nothing.
434 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
435 string sym_str;
436 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
437 currentFunctionStart,
438 currentFunctionEnd);
439
440 if (!found) {
441 // no symbol found: use addr as label
442 sym_str = csprintf("0x%x", pc);
443 currentFunctionStart = pc;
444 currentFunctionEnd = pc + 1;
445 }
446
447 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
448 curTick - functionEntryTick, curTick, sym_str);
449 functionEntryTick = curTick;
450 }
451}