base.cc (2654:9559cfa91b9d) base.cc (2665:a124942bacb8)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 * Nathan Binkert
27 */
28
29#include <iostream>
30#include <string>
31#include <sstream>
32
33#include "base/cprintf.hh"
34#include "base/loader/symtab.hh"
35#include "base/misc.hh"
36#include "base/output.hh"
37#include "cpu/base.hh"
38#include "cpu/cpuevent.hh"
39#include "cpu/exec_context.hh"
40#include "cpu/profile.hh"
41#include "cpu/sampler/sampler.hh"
42#include "sim/param.hh"
43#include "sim/process.hh"
44#include "sim/sim_events.hh"
45#include "sim/system.hh"
46
47#include "base/trace.hh"
48
30 */
31
32#include <iostream>
33#include <string>
34#include <sstream>
35
36#include "base/cprintf.hh"
37#include "base/loader/symtab.hh"
38#include "base/misc.hh"
39#include "base/output.hh"
40#include "cpu/base.hh"
41#include "cpu/cpuevent.hh"
42#include "cpu/exec_context.hh"
43#include "cpu/profile.hh"
44#include "cpu/sampler/sampler.hh"
45#include "sim/param.hh"
46#include "sim/process.hh"
47#include "sim/sim_events.hh"
48#include "sim/system.hh"
49
50#include "base/trace.hh"
51
52#if FULL_SYSTEM
53#include "kern/kernel_stats.hh"
54#endif
55
49using namespace std;
50
51vector<BaseCPU *> BaseCPU::cpuList;
52
53// This variable reflects the max number of threads in any CPU. Be
54// careful to only use it once all the CPUs that you care about have
55// been initialized
56int maxThreadsPerCPU = 1;
57
58#if FULL_SYSTEM
59BaseCPU::BaseCPU(Params *p)
60 : SimObject(p->name), clock(p->clock), checkInterrupts(true),
61 params(p), number_of_threads(p->numberOfThreads), system(p->system)
62#else
63BaseCPU::BaseCPU(Params *p)
64 : SimObject(p->name), clock(p->clock), params(p),
65 number_of_threads(p->numberOfThreads), system(p->system)
66#endif
67{
68 DPRINTF(FullCPU, "BaseCPU: Creating object, mem address %#x.\n", this);
69
70 // add self to global list of CPUs
71 cpuList.push_back(this);
72
73 DPRINTF(FullCPU, "BaseCPU: CPU added to cpuList, mem address %#x.\n",
74 this);
75
76 if (number_of_threads > maxThreadsPerCPU)
77 maxThreadsPerCPU = number_of_threads;
78
79 // allocate per-thread instruction-based event queues
80 comInstEventQueue = new EventQueue *[number_of_threads];
81 for (int i = 0; i < number_of_threads; ++i)
82 comInstEventQueue[i] = new EventQueue("instruction-based event queue");
83
84 //
85 // set up instruction-count-based termination events, if any
86 //
87 if (p->max_insts_any_thread != 0)
88 for (int i = 0; i < number_of_threads; ++i)
89 new SimExitEvent(comInstEventQueue[i], p->max_insts_any_thread,
90 "a thread reached the max instruction count");
91
92 if (p->max_insts_all_threads != 0) {
93 // allocate & initialize shared downcounter: each event will
94 // decrement this when triggered; simulation will terminate
95 // when counter reaches 0
96 int *counter = new int;
97 *counter = number_of_threads;
98 for (int i = 0; i < number_of_threads; ++i)
99 new CountedExitEvent(comInstEventQueue[i],
100 "all threads reached the max instruction count",
101 p->max_insts_all_threads, *counter);
102 }
103
104 // allocate per-thread load-based event queues
105 comLoadEventQueue = new EventQueue *[number_of_threads];
106 for (int i = 0; i < number_of_threads; ++i)
107 comLoadEventQueue[i] = new EventQueue("load-based event queue");
108
109 //
110 // set up instruction-count-based termination events, if any
111 //
112 if (p->max_loads_any_thread != 0)
113 for (int i = 0; i < number_of_threads; ++i)
114 new SimExitEvent(comLoadEventQueue[i], p->max_loads_any_thread,
115 "a thread reached the max load count");
116
117 if (p->max_loads_all_threads != 0) {
118 // allocate & initialize shared downcounter: each event will
119 // decrement this when triggered; simulation will terminate
120 // when counter reaches 0
121 int *counter = new int;
122 *counter = number_of_threads;
123 for (int i = 0; i < number_of_threads; ++i)
124 new CountedExitEvent(comLoadEventQueue[i],
125 "all threads reached the max load count",
126 p->max_loads_all_threads, *counter);
127 }
128
129#if FULL_SYSTEM
130 memset(interrupts, 0, sizeof(interrupts));
131 intstatus = 0;
132#endif
133
134 functionTracingEnabled = false;
135 if (p->functionTrace) {
136 functionTraceStream = simout.find(csprintf("ftrace.%s", name()));
137 currentFunctionStart = currentFunctionEnd = 0;
138 functionEntryTick = p->functionTraceStart;
139
140 if (p->functionTraceStart == 0) {
141 functionTracingEnabled = true;
142 } else {
143 Event *e =
144 new EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace>(this,
145 true);
146 e->schedule(p->functionTraceStart);
147 }
148 }
149#if FULL_SYSTEM
150 profileEvent = NULL;
151 if (params->profile)
152 profileEvent = new ProfileEvent(this, params->profile);
56using namespace std;
57
58vector<BaseCPU *> BaseCPU::cpuList;
59
60// This variable reflects the max number of threads in any CPU. Be
61// careful to only use it once all the CPUs that you care about have
62// been initialized
63int maxThreadsPerCPU = 1;
64
65#if FULL_SYSTEM
66BaseCPU::BaseCPU(Params *p)
67 : SimObject(p->name), clock(p->clock), checkInterrupts(true),
68 params(p), number_of_threads(p->numberOfThreads), system(p->system)
69#else
70BaseCPU::BaseCPU(Params *p)
71 : SimObject(p->name), clock(p->clock), params(p),
72 number_of_threads(p->numberOfThreads), system(p->system)
73#endif
74{
75 DPRINTF(FullCPU, "BaseCPU: Creating object, mem address %#x.\n", this);
76
77 // add self to global list of CPUs
78 cpuList.push_back(this);
79
80 DPRINTF(FullCPU, "BaseCPU: CPU added to cpuList, mem address %#x.\n",
81 this);
82
83 if (number_of_threads > maxThreadsPerCPU)
84 maxThreadsPerCPU = number_of_threads;
85
86 // allocate per-thread instruction-based event queues
87 comInstEventQueue = new EventQueue *[number_of_threads];
88 for (int i = 0; i < number_of_threads; ++i)
89 comInstEventQueue[i] = new EventQueue("instruction-based event queue");
90
91 //
92 // set up instruction-count-based termination events, if any
93 //
94 if (p->max_insts_any_thread != 0)
95 for (int i = 0; i < number_of_threads; ++i)
96 new SimExitEvent(comInstEventQueue[i], p->max_insts_any_thread,
97 "a thread reached the max instruction count");
98
99 if (p->max_insts_all_threads != 0) {
100 // allocate & initialize shared downcounter: each event will
101 // decrement this when triggered; simulation will terminate
102 // when counter reaches 0
103 int *counter = new int;
104 *counter = number_of_threads;
105 for (int i = 0; i < number_of_threads; ++i)
106 new CountedExitEvent(comInstEventQueue[i],
107 "all threads reached the max instruction count",
108 p->max_insts_all_threads, *counter);
109 }
110
111 // allocate per-thread load-based event queues
112 comLoadEventQueue = new EventQueue *[number_of_threads];
113 for (int i = 0; i < number_of_threads; ++i)
114 comLoadEventQueue[i] = new EventQueue("load-based event queue");
115
116 //
117 // set up instruction-count-based termination events, if any
118 //
119 if (p->max_loads_any_thread != 0)
120 for (int i = 0; i < number_of_threads; ++i)
121 new SimExitEvent(comLoadEventQueue[i], p->max_loads_any_thread,
122 "a thread reached the max load count");
123
124 if (p->max_loads_all_threads != 0) {
125 // allocate & initialize shared downcounter: each event will
126 // decrement this when triggered; simulation will terminate
127 // when counter reaches 0
128 int *counter = new int;
129 *counter = number_of_threads;
130 for (int i = 0; i < number_of_threads; ++i)
131 new CountedExitEvent(comLoadEventQueue[i],
132 "all threads reached the max load count",
133 p->max_loads_all_threads, *counter);
134 }
135
136#if FULL_SYSTEM
137 memset(interrupts, 0, sizeof(interrupts));
138 intstatus = 0;
139#endif
140
141 functionTracingEnabled = false;
142 if (p->functionTrace) {
143 functionTraceStream = simout.find(csprintf("ftrace.%s", name()));
144 currentFunctionStart = currentFunctionEnd = 0;
145 functionEntryTick = p->functionTraceStart;
146
147 if (p->functionTraceStart == 0) {
148 functionTracingEnabled = true;
149 } else {
150 Event *e =
151 new EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace>(this,
152 true);
153 e->schedule(p->functionTraceStart);
154 }
155 }
156#if FULL_SYSTEM
157 profileEvent = NULL;
158 if (params->profile)
159 profileEvent = new ProfileEvent(this, params->profile);
160
161 kernelStats = new Kernel::Statistics(system);
153#endif
154
155}
156
157BaseCPU::Params::Params()
158{
159#if FULL_SYSTEM
160 profile = false;
161#endif
162#endif
163
164}
165
166BaseCPU::Params::Params()
167{
168#if FULL_SYSTEM
169 profile = false;
170#endif
162 checker = NULL;
163}
164
165void
166BaseCPU::enableFunctionTrace()
167{
168 functionTracingEnabled = true;
169}
170
171BaseCPU::~BaseCPU()
172{
171}
172
173void
174BaseCPU::enableFunctionTrace()
175{
176 functionTracingEnabled = true;
177}
178
179BaseCPU::~BaseCPU()
180{
181#if FULL_SYSTEM
182 if (kernelStats)
183 delete kernelStats;
184#endif
173}
174
175void
176BaseCPU::init()
177{
178 if (!params->deferRegistration)
179 registerExecContexts();
180}
181
182void
183BaseCPU::startup()
184{
185#if FULL_SYSTEM
186 if (!params->deferRegistration && profileEvent)
187 profileEvent->schedule(curTick);
188#endif
189}
190
191
192void
193BaseCPU::regStats()
194{
195 using namespace Stats;
196
197 numCycles
198 .name(name() + ".numCycles")
199 .desc("number of cpu cycles simulated")
200 ;
201
202 int size = execContexts.size();
203 if (size > 1) {
204 for (int i = 0; i < size; ++i) {
205 stringstream namestr;
206 ccprintf(namestr, "%s.ctx%d", name(), i);
207 execContexts[i]->regStats(namestr.str());
208 }
209 } else if (size == 1)
210 execContexts[0]->regStats(name());
211
212#if FULL_SYSTEM
185}
186
187void
188BaseCPU::init()
189{
190 if (!params->deferRegistration)
191 registerExecContexts();
192}
193
194void
195BaseCPU::startup()
196{
197#if FULL_SYSTEM
198 if (!params->deferRegistration && profileEvent)
199 profileEvent->schedule(curTick);
200#endif
201}
202
203
204void
205BaseCPU::regStats()
206{
207 using namespace Stats;
208
209 numCycles
210 .name(name() + ".numCycles")
211 .desc("number of cpu cycles simulated")
212 ;
213
214 int size = execContexts.size();
215 if (size > 1) {
216 for (int i = 0; i < size; ++i) {
217 stringstream namestr;
218 ccprintf(namestr, "%s.ctx%d", name(), i);
219 execContexts[i]->regStats(namestr.str());
220 }
221 } else if (size == 1)
222 execContexts[0]->regStats(name());
223
224#if FULL_SYSTEM
225 if (kernelStats)
226 kernelStats->regStats(name() + ".kern");
213#endif
214}
215
216
217void
218BaseCPU::registerExecContexts()
219{
220 for (int i = 0; i < execContexts.size(); ++i) {
221 ExecContext *xc = execContexts[i];
222
223#if FULL_SYSTEM
224 int id = params->cpu_id;
225 if (id != -1)
226 id += i;
227
228 xc->setCpuId(system->registerExecContext(xc, id));
229#else
230 xc->setCpuId(xc->getProcessPtr()->registerExecContext(xc));
231#endif
227#endif
228}
229
230
231void
232BaseCPU::registerExecContexts()
233{
234 for (int i = 0; i < execContexts.size(); ++i) {
235 ExecContext *xc = execContexts[i];
236
237#if FULL_SYSTEM
238 int id = params->cpu_id;
239 if (id != -1)
240 id += i;
241
242 xc->setCpuId(system->registerExecContext(xc, id));
243#else
244 xc->setCpuId(xc->getProcessPtr()->registerExecContext(xc));
245#endif
232 }
233 }
234}
235
236
237void
238BaseCPU::switchOut(Sampler *sampler)
239{
240 panic("This CPU doesn't support sampling!");
241}
242
243void
244BaseCPU::takeOverFrom(BaseCPU *oldCPU)
245{
246 assert(execContexts.size() == oldCPU->execContexts.size());
247
248 for (int i = 0; i < execContexts.size(); ++i) {
249 ExecContext *newXC = execContexts[i];
250 ExecContext *oldXC = oldCPU->execContexts[i];
251
252 newXC->takeOverFrom(oldXC);
253
254 CpuEvent::replaceExecContext(oldXC, newXC);
255
256 assert(newXC->readCpuId() == oldXC->readCpuId());
257#if FULL_SYSTEM
258 system->replaceExecContext(newXC, newXC->readCpuId());
259#else
260 assert(newXC->getProcessPtr() == oldXC->getProcessPtr());
261 newXC->getProcessPtr()->replaceExecContext(newXC, newXC->readCpuId());
262#endif
263 }
264
265#if FULL_SYSTEM
266 for (int i = 0; i < TheISA::NumInterruptLevels; ++i)
267 interrupts[i] = oldCPU->interrupts[i];
268 intstatus = oldCPU->intstatus;
269
270 for (int i = 0; i < execContexts.size(); ++i)
271 execContexts[i]->profileClear();
272
273 if (profileEvent)
274 profileEvent->schedule(curTick);
275#endif
276}
277
278
279#if FULL_SYSTEM
280BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, int _interval)
281 : Event(&mainEventQueue), cpu(_cpu), interval(_interval)
282{ }
283
284void
285BaseCPU::ProfileEvent::process()
286{
287 for (int i = 0, size = cpu->execContexts.size(); i < size; ++i) {
288 ExecContext *xc = cpu->execContexts[i];
289 xc->profileSample();
290 }
291
292 schedule(curTick + interval);
293}
294
295void
296BaseCPU::post_interrupt(int int_num, int index)
297{
298 DPRINTF(Interrupt, "Interrupt %d:%d posted\n", int_num, index);
299
300 if (int_num < 0 || int_num >= TheISA::NumInterruptLevels)
301 panic("int_num out of bounds\n");
302
303 if (index < 0 || index >= sizeof(uint64_t) * 8)
304 panic("int_num out of bounds\n");
305
306 checkInterrupts = true;
307 interrupts[int_num] |= 1 << index;
308 intstatus |= (ULL(1) << int_num);
309}
310
311void
312BaseCPU::clear_interrupt(int int_num, int index)
313{
314 DPRINTF(Interrupt, "Interrupt %d:%d cleared\n", int_num, index);
315
316 if (int_num < 0 || int_num >= TheISA::NumInterruptLevels)
317 panic("int_num out of bounds\n");
318
319 if (index < 0 || index >= sizeof(uint64_t) * 8)
320 panic("int_num out of bounds\n");
321
322 interrupts[int_num] &= ~(1 << index);
323 if (interrupts[int_num] == 0)
324 intstatus &= ~(ULL(1) << int_num);
325}
326
327void
328BaseCPU::clear_interrupts()
329{
330 DPRINTF(Interrupt, "Interrupts all cleared\n");
331
332 memset(interrupts, 0, sizeof(interrupts));
333 intstatus = 0;
334}
335
336
337void
338BaseCPU::serialize(std::ostream &os)
339{
340 SERIALIZE_ARRAY(interrupts, TheISA::NumInterruptLevels);
341 SERIALIZE_SCALAR(intstatus);
246 }
247}
248
249
250void
251BaseCPU::switchOut(Sampler *sampler)
252{
253 panic("This CPU doesn't support sampling!");
254}
255
256void
257BaseCPU::takeOverFrom(BaseCPU *oldCPU)
258{
259 assert(execContexts.size() == oldCPU->execContexts.size());
260
261 for (int i = 0; i < execContexts.size(); ++i) {
262 ExecContext *newXC = execContexts[i];
263 ExecContext *oldXC = oldCPU->execContexts[i];
264
265 newXC->takeOverFrom(oldXC);
266
267 CpuEvent::replaceExecContext(oldXC, newXC);
268
269 assert(newXC->readCpuId() == oldXC->readCpuId());
270#if FULL_SYSTEM
271 system->replaceExecContext(newXC, newXC->readCpuId());
272#else
273 assert(newXC->getProcessPtr() == oldXC->getProcessPtr());
274 newXC->getProcessPtr()->replaceExecContext(newXC, newXC->readCpuId());
275#endif
276 }
277
278#if FULL_SYSTEM
279 for (int i = 0; i < TheISA::NumInterruptLevels; ++i)
280 interrupts[i] = oldCPU->interrupts[i];
281 intstatus = oldCPU->intstatus;
282
283 for (int i = 0; i < execContexts.size(); ++i)
284 execContexts[i]->profileClear();
285
286 if (profileEvent)
287 profileEvent->schedule(curTick);
288#endif
289}
290
291
292#if FULL_SYSTEM
293BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, int _interval)
294 : Event(&mainEventQueue), cpu(_cpu), interval(_interval)
295{ }
296
297void
298BaseCPU::ProfileEvent::process()
299{
300 for (int i = 0, size = cpu->execContexts.size(); i < size; ++i) {
301 ExecContext *xc = cpu->execContexts[i];
302 xc->profileSample();
303 }
304
305 schedule(curTick + interval);
306}
307
308void
309BaseCPU::post_interrupt(int int_num, int index)
310{
311 DPRINTF(Interrupt, "Interrupt %d:%d posted\n", int_num, index);
312
313 if (int_num < 0 || int_num >= TheISA::NumInterruptLevels)
314 panic("int_num out of bounds\n");
315
316 if (index < 0 || index >= sizeof(uint64_t) * 8)
317 panic("int_num out of bounds\n");
318
319 checkInterrupts = true;
320 interrupts[int_num] |= 1 << index;
321 intstatus |= (ULL(1) << int_num);
322}
323
324void
325BaseCPU::clear_interrupt(int int_num, int index)
326{
327 DPRINTF(Interrupt, "Interrupt %d:%d cleared\n", int_num, index);
328
329 if (int_num < 0 || int_num >= TheISA::NumInterruptLevels)
330 panic("int_num out of bounds\n");
331
332 if (index < 0 || index >= sizeof(uint64_t) * 8)
333 panic("int_num out of bounds\n");
334
335 interrupts[int_num] &= ~(1 << index);
336 if (interrupts[int_num] == 0)
337 intstatus &= ~(ULL(1) << int_num);
338}
339
340void
341BaseCPU::clear_interrupts()
342{
343 DPRINTF(Interrupt, "Interrupts all cleared\n");
344
345 memset(interrupts, 0, sizeof(interrupts));
346 intstatus = 0;
347}
348
349
350void
351BaseCPU::serialize(std::ostream &os)
352{
353 SERIALIZE_ARRAY(interrupts, TheISA::NumInterruptLevels);
354 SERIALIZE_SCALAR(intstatus);
355
356#if FULL_SYSTEM
357 if (kernelStats)
358 kernelStats->serialize(os);
359#endif
360
342}
343
344void
345BaseCPU::unserialize(Checkpoint *cp, const std::string &section)
346{
347 UNSERIALIZE_ARRAY(interrupts, TheISA::NumInterruptLevels);
348 UNSERIALIZE_SCALAR(intstatus);
361}
362
363void
364BaseCPU::unserialize(Checkpoint *cp, const std::string &section)
365{
366 UNSERIALIZE_ARRAY(interrupts, TheISA::NumInterruptLevels);
367 UNSERIALIZE_SCALAR(intstatus);
368
369#if FULL_SYSTEM
370 if (kernelStats)
371 kernelStats->unserialize(cp, section);
372#endif
349}
350
351#endif // FULL_SYSTEM
352
353void
354BaseCPU::traceFunctionsInternal(Addr pc)
355{
356 if (!debugSymbolTable)
357 return;
358
359 // if pc enters different function, print new function symbol and
360 // update saved range. Otherwise do nothing.
361 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
362 string sym_str;
363 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
364 currentFunctionStart,
365 currentFunctionEnd);
366
367 if (!found) {
368 // no symbol found: use addr as label
369 sym_str = csprintf("0x%x", pc);
370 currentFunctionStart = pc;
371 currentFunctionEnd = pc + 1;
372 }
373
374 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
375 curTick - functionEntryTick, curTick, sym_str);
376 functionEntryTick = curTick;
377 }
378}
379
380
381DEFINE_SIM_OBJECT_CLASS_NAME("BaseCPU", BaseCPU)
373}
374
375#endif // FULL_SYSTEM
376
377void
378BaseCPU::traceFunctionsInternal(Addr pc)
379{
380 if (!debugSymbolTable)
381 return;
382
383 // if pc enters different function, print new function symbol and
384 // update saved range. Otherwise do nothing.
385 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
386 string sym_str;
387 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
388 currentFunctionStart,
389 currentFunctionEnd);
390
391 if (!found) {
392 // no symbol found: use addr as label
393 sym_str = csprintf("0x%x", pc);
394 currentFunctionStart = pc;
395 currentFunctionEnd = pc + 1;
396 }
397
398 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
399 curTick - functionEntryTick, curTick, sym_str);
400 functionEntryTick = curTick;
401 }
402}
403
404
405DEFINE_SIM_OBJECT_CLASS_NAME("BaseCPU", BaseCPU)