base.cc (8795:0909f8ed7aa0) base.cc (8796:a2ae5c378d0a)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * Copyright (c) 2011 Regents of the University of California
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 * Nathan Binkert
31 * Rick Strong
32 */
33
34#include <iostream>
35#include <sstream>
36#include <string>
37
38#include "arch/tlb.hh"
39#include "base/loader/symtab.hh"
40#include "base/cprintf.hh"
41#include "base/misc.hh"
42#include "base/output.hh"
43#include "base/trace.hh"
44#include "cpu/base.hh"
45#include "cpu/cpuevent.hh"
46#include "cpu/profile.hh"
47#include "cpu/thread_context.hh"
48#include "debug/SyscallVerbose.hh"
49#include "params/BaseCPU.hh"
50#include "sim/full_system.hh"
51#include "sim/process.hh"
52#include "sim/sim_events.hh"
53#include "sim/sim_exit.hh"
54#include "sim/system.hh"
55
56// Hack
57#include "sim/stat_control.hh"
58
59using namespace std;
60
61vector<BaseCPU *> BaseCPU::cpuList;
62
63// This variable reflects the max number of threads in any CPU. Be
64// careful to only use it once all the CPUs that you care about have
65// been initialized
66int maxThreadsPerCPU = 1;
67
68CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
69 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
70 cpu(_cpu), _repeatEvent(true)
71{
72 if (_interval)
73 cpu->schedule(this, curTick() + _interval);
74}
75
76void
77CPUProgressEvent::process()
78{
79 Counter temp = cpu->totalInstructions();
80#ifndef NDEBUG
81 double ipc = double(temp - lastNumInst) / (_interval / cpu->ticks(1));
82
83 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
84 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
85 ipc);
86 ipc = 0.0;
87#else
88 cprintf("%lli: %s progress event, total committed:%i, progress insts "
89 "committed: %lli\n", curTick(), cpu->name(), temp,
90 temp - lastNumInst);
91#endif
92 lastNumInst = temp;
93
94 if (_repeatEvent)
95 cpu->schedule(this, curTick() + _interval);
96}
97
98const char *
99CPUProgressEvent::description() const
100{
101 return "CPU Progress";
102}
103
104BaseCPU::BaseCPU(Params *p)
105 : MemObject(p), clock(p->clock), instCnt(0), _cpuId(p->cpu_id),
106 interrupts(p->interrupts),
107 numThreads(p->numThreads), system(p->system),
108 phase(p->phase)
109{
110// currentTick = curTick();
111
112 // if Python did not provide a valid ID, do it here
113 if (_cpuId == -1 ) {
114 _cpuId = cpuList.size();
115 }
116
117 // add self to global list of CPUs
118 cpuList.push_back(this);
119
120 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId);
121
122 if (numThreads > maxThreadsPerCPU)
123 maxThreadsPerCPU = numThreads;
124
125 // allocate per-thread instruction-based event queues
126 comInstEventQueue = new EventQueue *[numThreads];
127 for (ThreadID tid = 0; tid < numThreads; ++tid)
128 comInstEventQueue[tid] =
129 new EventQueue("instruction-based event queue");
130
131 //
132 // set up instruction-count-based termination events, if any
133 //
134 if (p->max_insts_any_thread != 0) {
135 const char *cause = "a thread reached the max instruction count";
136 for (ThreadID tid = 0; tid < numThreads; ++tid) {
137 Event *event = new SimLoopExitEvent(cause, 0);
138 comInstEventQueue[tid]->schedule(event, p->max_insts_any_thread);
139 }
140 }
141
142 if (p->max_insts_all_threads != 0) {
143 const char *cause = "all threads reached the max instruction count";
144
145 // allocate & initialize shared downcounter: each event will
146 // decrement this when triggered; simulation will terminate
147 // when counter reaches 0
148 int *counter = new int;
149 *counter = numThreads;
150 for (ThreadID tid = 0; tid < numThreads; ++tid) {
151 Event *event = new CountedExitEvent(cause, *counter);
152 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
153 }
154 }
155
156 // allocate per-thread load-based event queues
157 comLoadEventQueue = new EventQueue *[numThreads];
158 for (ThreadID tid = 0; tid < numThreads; ++tid)
159 comLoadEventQueue[tid] = new EventQueue("load-based event queue");
160
161 //
162 // set up instruction-count-based termination events, if any
163 //
164 if (p->max_loads_any_thread != 0) {
165 const char *cause = "a thread reached the max load count";
166 for (ThreadID tid = 0; tid < numThreads; ++tid) {
167 Event *event = new SimLoopExitEvent(cause, 0);
168 comLoadEventQueue[tid]->schedule(event, p->max_loads_any_thread);
169 }
170 }
171
172 if (p->max_loads_all_threads != 0) {
173 const char *cause = "all threads reached the max load count";
174 // allocate & initialize shared downcounter: each event will
175 // decrement this when triggered; simulation will terminate
176 // when counter reaches 0
177 int *counter = new int;
178 *counter = numThreads;
179 for (ThreadID tid = 0; tid < numThreads; ++tid) {
180 Event *event = new CountedExitEvent(cause, *counter);
181 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
182 }
183 }
184
185 functionTracingEnabled = false;
186 if (p->function_trace) {
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * Copyright (c) 2011 Regents of the University of California
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 * Nathan Binkert
31 * Rick Strong
32 */
33
34#include <iostream>
35#include <sstream>
36#include <string>
37
38#include "arch/tlb.hh"
39#include "base/loader/symtab.hh"
40#include "base/cprintf.hh"
41#include "base/misc.hh"
42#include "base/output.hh"
43#include "base/trace.hh"
44#include "cpu/base.hh"
45#include "cpu/cpuevent.hh"
46#include "cpu/profile.hh"
47#include "cpu/thread_context.hh"
48#include "debug/SyscallVerbose.hh"
49#include "params/BaseCPU.hh"
50#include "sim/full_system.hh"
51#include "sim/process.hh"
52#include "sim/sim_events.hh"
53#include "sim/sim_exit.hh"
54#include "sim/system.hh"
55
56// Hack
57#include "sim/stat_control.hh"
58
59using namespace std;
60
61vector<BaseCPU *> BaseCPU::cpuList;
62
63// This variable reflects the max number of threads in any CPU. Be
64// careful to only use it once all the CPUs that you care about have
65// been initialized
66int maxThreadsPerCPU = 1;
67
68CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
69 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
70 cpu(_cpu), _repeatEvent(true)
71{
72 if (_interval)
73 cpu->schedule(this, curTick() + _interval);
74}
75
76void
77CPUProgressEvent::process()
78{
79 Counter temp = cpu->totalInstructions();
80#ifndef NDEBUG
81 double ipc = double(temp - lastNumInst) / (_interval / cpu->ticks(1));
82
83 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
84 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
85 ipc);
86 ipc = 0.0;
87#else
88 cprintf("%lli: %s progress event, total committed:%i, progress insts "
89 "committed: %lli\n", curTick(), cpu->name(), temp,
90 temp - lastNumInst);
91#endif
92 lastNumInst = temp;
93
94 if (_repeatEvent)
95 cpu->schedule(this, curTick() + _interval);
96}
97
98const char *
99CPUProgressEvent::description() const
100{
101 return "CPU Progress";
102}
103
104BaseCPU::BaseCPU(Params *p)
105 : MemObject(p), clock(p->clock), instCnt(0), _cpuId(p->cpu_id),
106 interrupts(p->interrupts),
107 numThreads(p->numThreads), system(p->system),
108 phase(p->phase)
109{
110// currentTick = curTick();
111
112 // if Python did not provide a valid ID, do it here
113 if (_cpuId == -1 ) {
114 _cpuId = cpuList.size();
115 }
116
117 // add self to global list of CPUs
118 cpuList.push_back(this);
119
120 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId);
121
122 if (numThreads > maxThreadsPerCPU)
123 maxThreadsPerCPU = numThreads;
124
125 // allocate per-thread instruction-based event queues
126 comInstEventQueue = new EventQueue *[numThreads];
127 for (ThreadID tid = 0; tid < numThreads; ++tid)
128 comInstEventQueue[tid] =
129 new EventQueue("instruction-based event queue");
130
131 //
132 // set up instruction-count-based termination events, if any
133 //
134 if (p->max_insts_any_thread != 0) {
135 const char *cause = "a thread reached the max instruction count";
136 for (ThreadID tid = 0; tid < numThreads; ++tid) {
137 Event *event = new SimLoopExitEvent(cause, 0);
138 comInstEventQueue[tid]->schedule(event, p->max_insts_any_thread);
139 }
140 }
141
142 if (p->max_insts_all_threads != 0) {
143 const char *cause = "all threads reached the max instruction count";
144
145 // allocate & initialize shared downcounter: each event will
146 // decrement this when triggered; simulation will terminate
147 // when counter reaches 0
148 int *counter = new int;
149 *counter = numThreads;
150 for (ThreadID tid = 0; tid < numThreads; ++tid) {
151 Event *event = new CountedExitEvent(cause, *counter);
152 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
153 }
154 }
155
156 // allocate per-thread load-based event queues
157 comLoadEventQueue = new EventQueue *[numThreads];
158 for (ThreadID tid = 0; tid < numThreads; ++tid)
159 comLoadEventQueue[tid] = new EventQueue("load-based event queue");
160
161 //
162 // set up instruction-count-based termination events, if any
163 //
164 if (p->max_loads_any_thread != 0) {
165 const char *cause = "a thread reached the max load count";
166 for (ThreadID tid = 0; tid < numThreads; ++tid) {
167 Event *event = new SimLoopExitEvent(cause, 0);
168 comLoadEventQueue[tid]->schedule(event, p->max_loads_any_thread);
169 }
170 }
171
172 if (p->max_loads_all_threads != 0) {
173 const char *cause = "all threads reached the max load count";
174 // allocate & initialize shared downcounter: each event will
175 // decrement this when triggered; simulation will terminate
176 // when counter reaches 0
177 int *counter = new int;
178 *counter = numThreads;
179 for (ThreadID tid = 0; tid < numThreads; ++tid) {
180 Event *event = new CountedExitEvent(cause, *counter);
181 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
182 }
183 }
184
185 functionTracingEnabled = false;
186 if (p->function_trace) {
187 functionTraceStream = simout.find(csprintf("ftrace.%s", name()));
187 const string fname = csprintf("ftrace.%s", name());
188 functionTraceStream = simout.find(fname);
189 if (!functionTraceStream)
190 functionTraceStream = simout.create(fname);
191
188 currentFunctionStart = currentFunctionEnd = 0;
189 functionEntryTick = p->function_trace_start;
190
191 if (p->function_trace_start == 0) {
192 functionTracingEnabled = true;
193 } else {
194 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
195 Event *event = new wrap(this, true);
196 schedule(event, p->function_trace_start);
197 }
198 }
199 interrupts->setCPU(this);
200
201 if (FullSystem) {
202 profileEvent = NULL;
203 if (params()->profile)
204 profileEvent = new ProfileEvent(this, params()->profile);
205 }
206 tracer = params()->tracer;
207}
208
209void
210BaseCPU::enableFunctionTrace()
211{
212 functionTracingEnabled = true;
213}
214
215BaseCPU::~BaseCPU()
216{
217}
218
219void
220BaseCPU::init()
221{
222 if (!params()->defer_registration)
223 registerThreadContexts();
224}
225
226void
227BaseCPU::startup()
228{
229 if (FullSystem) {
230 if (!params()->defer_registration && profileEvent)
231 schedule(profileEvent, curTick());
232 }
233
234 if (params()->progress_interval) {
235 Tick num_ticks = ticks(params()->progress_interval);
236
237 new CPUProgressEvent(this, num_ticks);
238 }
239}
240
241
242void
243BaseCPU::regStats()
244{
245 using namespace Stats;
246
247 numCycles
248 .name(name() + ".numCycles")
249 .desc("number of cpu cycles simulated")
250 ;
251
252 numWorkItemsStarted
253 .name(name() + ".numWorkItemsStarted")
254 .desc("number of work items this cpu started")
255 ;
256
257 numWorkItemsCompleted
258 .name(name() + ".numWorkItemsCompleted")
259 .desc("number of work items this cpu completed")
260 ;
261
262 int size = threadContexts.size();
263 if (size > 1) {
264 for (int i = 0; i < size; ++i) {
265 stringstream namestr;
266 ccprintf(namestr, "%s.ctx%d", name(), i);
267 threadContexts[i]->regStats(namestr.str());
268 }
269 } else if (size == 1)
270 threadContexts[0]->regStats(name());
271}
272
273Tick
274BaseCPU::nextCycle()
275{
276 Tick next_tick = curTick() - phase + clock - 1;
277 next_tick -= (next_tick % clock);
278 next_tick += phase;
279 return next_tick;
280}
281
282Tick
283BaseCPU::nextCycle(Tick begin_tick)
284{
285 Tick next_tick = begin_tick;
286 if (next_tick % clock != 0)
287 next_tick = next_tick - (next_tick % clock) + clock;
288 next_tick += phase;
289
290 assert(next_tick >= curTick());
291 return next_tick;
292}
293
294void
295BaseCPU::registerThreadContexts()
296{
297 ThreadID size = threadContexts.size();
298 for (ThreadID tid = 0; tid < size; ++tid) {
299 ThreadContext *tc = threadContexts[tid];
300
301 /** This is so that contextId and cpuId match where there is a
302 * 1cpu:1context relationship. Otherwise, the order of registration
303 * could affect the assignment and cpu 1 could have context id 3, for
304 * example. We may even want to do something like this for SMT so that
305 * cpu 0 has the lowest thread contexts and cpu N has the highest, but
306 * I'll just do this for now
307 */
308 if (numThreads == 1)
309 tc->setContextId(system->registerThreadContext(tc, _cpuId));
310 else
311 tc->setContextId(system->registerThreadContext(tc));
312
313 if (!FullSystem)
314 tc->getProcessPtr()->assignThreadContext(tc->contextId());
315 }
316}
317
318
319int
320BaseCPU::findContext(ThreadContext *tc)
321{
322 ThreadID size = threadContexts.size();
323 for (ThreadID tid = 0; tid < size; ++tid) {
324 if (tc == threadContexts[tid])
325 return tid;
326 }
327 return 0;
328}
329
330void
331BaseCPU::switchOut()
332{
333 if (profileEvent && profileEvent->scheduled())
334 deschedule(profileEvent);
335}
336
337void
338BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc)
339{
340 assert(threadContexts.size() == oldCPU->threadContexts.size());
341
342 _cpuId = oldCPU->cpuId();
343
344 ThreadID size = threadContexts.size();
345 for (ThreadID i = 0; i < size; ++i) {
346 ThreadContext *newTC = threadContexts[i];
347 ThreadContext *oldTC = oldCPU->threadContexts[i];
348
349 newTC->takeOverFrom(oldTC);
350
351 CpuEvent::replaceThreadContext(oldTC, newTC);
352
353 assert(newTC->contextId() == oldTC->contextId());
354 assert(newTC->threadId() == oldTC->threadId());
355 system->replaceThreadContext(newTC, newTC->contextId());
356
357 /* This code no longer works since the zero register (e.g.,
358 * r31 on Alpha) doesn't necessarily contain zero at this
359 * point.
360 if (DTRACE(Context))
361 ThreadContext::compare(oldTC, newTC);
362 */
363
364 Port *old_itb_port, *old_dtb_port, *new_itb_port, *new_dtb_port;
365 old_itb_port = oldTC->getITBPtr()->getPort();
366 old_dtb_port = oldTC->getDTBPtr()->getPort();
367 new_itb_port = newTC->getITBPtr()->getPort();
368 new_dtb_port = newTC->getDTBPtr()->getPort();
369
370 // Move over any table walker ports if they exist
371 if (new_itb_port && !new_itb_port->isConnected()) {
372 assert(old_itb_port);
373 Port *peer = old_itb_port->getPeer();;
374 new_itb_port->setPeer(peer);
375 peer->setPeer(new_itb_port);
376 }
377 if (new_dtb_port && !new_dtb_port->isConnected()) {
378 assert(old_dtb_port);
379 Port *peer = old_dtb_port->getPeer();;
380 new_dtb_port->setPeer(peer);
381 peer->setPeer(new_dtb_port);
382 }
383 }
384
385 interrupts = oldCPU->interrupts;
386 interrupts->setCPU(this);
387
388 if (FullSystem) {
389 for (ThreadID i = 0; i < size; ++i)
390 threadContexts[i]->profileClear();
391
392 if (profileEvent)
393 schedule(profileEvent, curTick());
394 }
395
396 // Connect new CPU to old CPU's memory only if new CPU isn't
397 // connected to anything. Also connect old CPU's memory to new
398 // CPU.
399 if (!ic->isConnected()) {
400 Port *peer = oldCPU->getPort("icache_port")->getPeer();
401 ic->setPeer(peer);
402 peer->setPeer(ic);
403 }
404
405 if (!dc->isConnected()) {
406 Port *peer = oldCPU->getPort("dcache_port")->getPeer();
407 dc->setPeer(peer);
408 peer->setPeer(dc);
409 }
410}
411
412
413BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
414 : cpu(_cpu), interval(_interval)
415{ }
416
417void
418BaseCPU::ProfileEvent::process()
419{
420 ThreadID size = cpu->threadContexts.size();
421 for (ThreadID i = 0; i < size; ++i) {
422 ThreadContext *tc = cpu->threadContexts[i];
423 tc->profileSample();
424 }
425
426 cpu->schedule(this, curTick() + interval);
427}
428
429void
430BaseCPU::serialize(std::ostream &os)
431{
432 SERIALIZE_SCALAR(instCnt);
433 interrupts->serialize(os);
434}
435
436void
437BaseCPU::unserialize(Checkpoint *cp, const std::string &section)
438{
439 UNSERIALIZE_SCALAR(instCnt);
440 interrupts->unserialize(cp, section);
441}
442
443void
444BaseCPU::traceFunctionsInternal(Addr pc)
445{
446 if (!debugSymbolTable)
447 return;
448
449 // if pc enters different function, print new function symbol and
450 // update saved range. Otherwise do nothing.
451 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
452 string sym_str;
453 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
454 currentFunctionStart,
455 currentFunctionEnd);
456
457 if (!found) {
458 // no symbol found: use addr as label
459 sym_str = csprintf("0x%x", pc);
460 currentFunctionStart = pc;
461 currentFunctionEnd = pc + 1;
462 }
463
464 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
465 curTick() - functionEntryTick, curTick(), sym_str);
466 functionEntryTick = curTick();
467 }
468}
192 currentFunctionStart = currentFunctionEnd = 0;
193 functionEntryTick = p->function_trace_start;
194
195 if (p->function_trace_start == 0) {
196 functionTracingEnabled = true;
197 } else {
198 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
199 Event *event = new wrap(this, true);
200 schedule(event, p->function_trace_start);
201 }
202 }
203 interrupts->setCPU(this);
204
205 if (FullSystem) {
206 profileEvent = NULL;
207 if (params()->profile)
208 profileEvent = new ProfileEvent(this, params()->profile);
209 }
210 tracer = params()->tracer;
211}
212
213void
214BaseCPU::enableFunctionTrace()
215{
216 functionTracingEnabled = true;
217}
218
219BaseCPU::~BaseCPU()
220{
221}
222
223void
224BaseCPU::init()
225{
226 if (!params()->defer_registration)
227 registerThreadContexts();
228}
229
230void
231BaseCPU::startup()
232{
233 if (FullSystem) {
234 if (!params()->defer_registration && profileEvent)
235 schedule(profileEvent, curTick());
236 }
237
238 if (params()->progress_interval) {
239 Tick num_ticks = ticks(params()->progress_interval);
240
241 new CPUProgressEvent(this, num_ticks);
242 }
243}
244
245
246void
247BaseCPU::regStats()
248{
249 using namespace Stats;
250
251 numCycles
252 .name(name() + ".numCycles")
253 .desc("number of cpu cycles simulated")
254 ;
255
256 numWorkItemsStarted
257 .name(name() + ".numWorkItemsStarted")
258 .desc("number of work items this cpu started")
259 ;
260
261 numWorkItemsCompleted
262 .name(name() + ".numWorkItemsCompleted")
263 .desc("number of work items this cpu completed")
264 ;
265
266 int size = threadContexts.size();
267 if (size > 1) {
268 for (int i = 0; i < size; ++i) {
269 stringstream namestr;
270 ccprintf(namestr, "%s.ctx%d", name(), i);
271 threadContexts[i]->regStats(namestr.str());
272 }
273 } else if (size == 1)
274 threadContexts[0]->regStats(name());
275}
276
277Tick
278BaseCPU::nextCycle()
279{
280 Tick next_tick = curTick() - phase + clock - 1;
281 next_tick -= (next_tick % clock);
282 next_tick += phase;
283 return next_tick;
284}
285
286Tick
287BaseCPU::nextCycle(Tick begin_tick)
288{
289 Tick next_tick = begin_tick;
290 if (next_tick % clock != 0)
291 next_tick = next_tick - (next_tick % clock) + clock;
292 next_tick += phase;
293
294 assert(next_tick >= curTick());
295 return next_tick;
296}
297
298void
299BaseCPU::registerThreadContexts()
300{
301 ThreadID size = threadContexts.size();
302 for (ThreadID tid = 0; tid < size; ++tid) {
303 ThreadContext *tc = threadContexts[tid];
304
305 /** This is so that contextId and cpuId match where there is a
306 * 1cpu:1context relationship. Otherwise, the order of registration
307 * could affect the assignment and cpu 1 could have context id 3, for
308 * example. We may even want to do something like this for SMT so that
309 * cpu 0 has the lowest thread contexts and cpu N has the highest, but
310 * I'll just do this for now
311 */
312 if (numThreads == 1)
313 tc->setContextId(system->registerThreadContext(tc, _cpuId));
314 else
315 tc->setContextId(system->registerThreadContext(tc));
316
317 if (!FullSystem)
318 tc->getProcessPtr()->assignThreadContext(tc->contextId());
319 }
320}
321
322
323int
324BaseCPU::findContext(ThreadContext *tc)
325{
326 ThreadID size = threadContexts.size();
327 for (ThreadID tid = 0; tid < size; ++tid) {
328 if (tc == threadContexts[tid])
329 return tid;
330 }
331 return 0;
332}
333
334void
335BaseCPU::switchOut()
336{
337 if (profileEvent && profileEvent->scheduled())
338 deschedule(profileEvent);
339}
340
341void
342BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc)
343{
344 assert(threadContexts.size() == oldCPU->threadContexts.size());
345
346 _cpuId = oldCPU->cpuId();
347
348 ThreadID size = threadContexts.size();
349 for (ThreadID i = 0; i < size; ++i) {
350 ThreadContext *newTC = threadContexts[i];
351 ThreadContext *oldTC = oldCPU->threadContexts[i];
352
353 newTC->takeOverFrom(oldTC);
354
355 CpuEvent::replaceThreadContext(oldTC, newTC);
356
357 assert(newTC->contextId() == oldTC->contextId());
358 assert(newTC->threadId() == oldTC->threadId());
359 system->replaceThreadContext(newTC, newTC->contextId());
360
361 /* This code no longer works since the zero register (e.g.,
362 * r31 on Alpha) doesn't necessarily contain zero at this
363 * point.
364 if (DTRACE(Context))
365 ThreadContext::compare(oldTC, newTC);
366 */
367
368 Port *old_itb_port, *old_dtb_port, *new_itb_port, *new_dtb_port;
369 old_itb_port = oldTC->getITBPtr()->getPort();
370 old_dtb_port = oldTC->getDTBPtr()->getPort();
371 new_itb_port = newTC->getITBPtr()->getPort();
372 new_dtb_port = newTC->getDTBPtr()->getPort();
373
374 // Move over any table walker ports if they exist
375 if (new_itb_port && !new_itb_port->isConnected()) {
376 assert(old_itb_port);
377 Port *peer = old_itb_port->getPeer();;
378 new_itb_port->setPeer(peer);
379 peer->setPeer(new_itb_port);
380 }
381 if (new_dtb_port && !new_dtb_port->isConnected()) {
382 assert(old_dtb_port);
383 Port *peer = old_dtb_port->getPeer();;
384 new_dtb_port->setPeer(peer);
385 peer->setPeer(new_dtb_port);
386 }
387 }
388
389 interrupts = oldCPU->interrupts;
390 interrupts->setCPU(this);
391
392 if (FullSystem) {
393 for (ThreadID i = 0; i < size; ++i)
394 threadContexts[i]->profileClear();
395
396 if (profileEvent)
397 schedule(profileEvent, curTick());
398 }
399
400 // Connect new CPU to old CPU's memory only if new CPU isn't
401 // connected to anything. Also connect old CPU's memory to new
402 // CPU.
403 if (!ic->isConnected()) {
404 Port *peer = oldCPU->getPort("icache_port")->getPeer();
405 ic->setPeer(peer);
406 peer->setPeer(ic);
407 }
408
409 if (!dc->isConnected()) {
410 Port *peer = oldCPU->getPort("dcache_port")->getPeer();
411 dc->setPeer(peer);
412 peer->setPeer(dc);
413 }
414}
415
416
417BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
418 : cpu(_cpu), interval(_interval)
419{ }
420
421void
422BaseCPU::ProfileEvent::process()
423{
424 ThreadID size = cpu->threadContexts.size();
425 for (ThreadID i = 0; i < size; ++i) {
426 ThreadContext *tc = cpu->threadContexts[i];
427 tc->profileSample();
428 }
429
430 cpu->schedule(this, curTick() + interval);
431}
432
433void
434BaseCPU::serialize(std::ostream &os)
435{
436 SERIALIZE_SCALAR(instCnt);
437 interrupts->serialize(os);
438}
439
440void
441BaseCPU::unserialize(Checkpoint *cp, const std::string &section)
442{
443 UNSERIALIZE_SCALAR(instCnt);
444 interrupts->unserialize(cp, section);
445}
446
447void
448BaseCPU::traceFunctionsInternal(Addr pc)
449{
450 if (!debugSymbolTable)
451 return;
452
453 // if pc enters different function, print new function symbol and
454 // update saved range. Otherwise do nothing.
455 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
456 string sym_str;
457 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
458 currentFunctionStart,
459 currentFunctionEnd);
460
461 if (!found) {
462 // no symbol found: use addr as label
463 sym_str = csprintf("0x%x", pc);
464 currentFunctionStart = pc;
465 currentFunctionEnd = pc + 1;
466 }
467
468 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
469 curTick() - functionEntryTick, curTick(), sym_str);
470 functionEntryTick = curTick();
471 }
472}