base.cc (8745:575cab0db076) base.cc (8779:2a590c51adb1)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * Copyright (c) 2011 Regents of the University of California
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 * Nathan Binkert
31 * Rick Strong
32 */
33
34#include <iostream>
35#include <sstream>
36#include <string>
37
38#include "arch/tlb.hh"
39#include "base/loader/symtab.hh"
40#include "base/cprintf.hh"
41#include "base/misc.hh"
42#include "base/output.hh"
43#include "base/trace.hh"
44#include "cpu/base.hh"
45#include "cpu/cpuevent.hh"
46#include "cpu/profile.hh"
47#include "cpu/thread_context.hh"
48#include "debug/SyscallVerbose.hh"
49#include "params/BaseCPU.hh"
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * Copyright (c) 2011 Regents of the University of California
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 * Nathan Binkert
31 * Rick Strong
32 */
33
34#include <iostream>
35#include <sstream>
36#include <string>
37
38#include "arch/tlb.hh"
39#include "base/loader/symtab.hh"
40#include "base/cprintf.hh"
41#include "base/misc.hh"
42#include "base/output.hh"
43#include "base/trace.hh"
44#include "cpu/base.hh"
45#include "cpu/cpuevent.hh"
46#include "cpu/profile.hh"
47#include "cpu/thread_context.hh"
48#include "debug/SyscallVerbose.hh"
49#include "params/BaseCPU.hh"
50#include "sim/full_system.hh"
50#include "sim/process.hh"
51#include "sim/sim_events.hh"
52#include "sim/sim_exit.hh"
53#include "sim/system.hh"
54
55// Hack
56#include "sim/stat_control.hh"
57
58using namespace std;
59
60vector<BaseCPU *> BaseCPU::cpuList;
61
62// This variable reflects the max number of threads in any CPU. Be
63// careful to only use it once all the CPUs that you care about have
64// been initialized
65int maxThreadsPerCPU = 1;
66
67CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
68 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
69 cpu(_cpu), _repeatEvent(true)
70{
71 if (_interval)
72 cpu->schedule(this, curTick() + _interval);
73}
74
75void
76CPUProgressEvent::process()
77{
78 Counter temp = cpu->totalInstructions();
79#ifndef NDEBUG
80 double ipc = double(temp - lastNumInst) / (_interval / cpu->ticks(1));
81
82 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
83 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
84 ipc);
85 ipc = 0.0;
86#else
87 cprintf("%lli: %s progress event, total committed:%i, progress insts "
88 "committed: %lli\n", curTick(), cpu->name(), temp,
89 temp - lastNumInst);
90#endif
91 lastNumInst = temp;
92
93 if (_repeatEvent)
94 cpu->schedule(this, curTick() + _interval);
95}
96
97const char *
98CPUProgressEvent::description() const
99{
100 return "CPU Progress";
101}
102
103BaseCPU::BaseCPU(Params *p)
104 : MemObject(p), clock(p->clock), instCnt(0), _cpuId(p->cpu_id),
105 interrupts(p->interrupts),
106 numThreads(p->numThreads), system(p->system),
107 phase(p->phase)
108{
109// currentTick = curTick();
110
111 // if Python did not provide a valid ID, do it here
112 if (_cpuId == -1 ) {
113 _cpuId = cpuList.size();
114 }
115
116 // add self to global list of CPUs
117 cpuList.push_back(this);
118
119 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId);
120
121 if (numThreads > maxThreadsPerCPU)
122 maxThreadsPerCPU = numThreads;
123
124 // allocate per-thread instruction-based event queues
125 comInstEventQueue = new EventQueue *[numThreads];
126 for (ThreadID tid = 0; tid < numThreads; ++tid)
127 comInstEventQueue[tid] =
128 new EventQueue("instruction-based event queue");
129
130 //
131 // set up instruction-count-based termination events, if any
132 //
133 if (p->max_insts_any_thread != 0) {
134 const char *cause = "a thread reached the max instruction count";
135 for (ThreadID tid = 0; tid < numThreads; ++tid) {
136 Event *event = new SimLoopExitEvent(cause, 0);
137 comInstEventQueue[tid]->schedule(event, p->max_insts_any_thread);
138 }
139 }
140
141 if (p->max_insts_all_threads != 0) {
142 const char *cause = "all threads reached the max instruction count";
143
144 // allocate & initialize shared downcounter: each event will
145 // decrement this when triggered; simulation will terminate
146 // when counter reaches 0
147 int *counter = new int;
148 *counter = numThreads;
149 for (ThreadID tid = 0; tid < numThreads; ++tid) {
150 Event *event = new CountedExitEvent(cause, *counter);
151 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
152 }
153 }
154
155 // allocate per-thread load-based event queues
156 comLoadEventQueue = new EventQueue *[numThreads];
157 for (ThreadID tid = 0; tid < numThreads; ++tid)
158 comLoadEventQueue[tid] = new EventQueue("load-based event queue");
159
160 //
161 // set up instruction-count-based termination events, if any
162 //
163 if (p->max_loads_any_thread != 0) {
164 const char *cause = "a thread reached the max load count";
165 for (ThreadID tid = 0; tid < numThreads; ++tid) {
166 Event *event = new SimLoopExitEvent(cause, 0);
167 comLoadEventQueue[tid]->schedule(event, p->max_loads_any_thread);
168 }
169 }
170
171 if (p->max_loads_all_threads != 0) {
172 const char *cause = "all threads reached the max load count";
173 // allocate & initialize shared downcounter: each event will
174 // decrement this when triggered; simulation will terminate
175 // when counter reaches 0
176 int *counter = new int;
177 *counter = numThreads;
178 for (ThreadID tid = 0; tid < numThreads; ++tid) {
179 Event *event = new CountedExitEvent(cause, *counter);
180 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
181 }
182 }
183
184 functionTracingEnabled = false;
185 if (p->function_trace) {
186 functionTraceStream = simout.find(csprintf("ftrace.%s", name()));
187 currentFunctionStart = currentFunctionEnd = 0;
188 functionEntryTick = p->function_trace_start;
189
190 if (p->function_trace_start == 0) {
191 functionTracingEnabled = true;
192 } else {
193 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
194 Event *event = new wrap(this, true);
195 schedule(event, p->function_trace_start);
196 }
197 }
198 interrupts->setCPU(this);
199
51#include "sim/process.hh"
52#include "sim/sim_events.hh"
53#include "sim/sim_exit.hh"
54#include "sim/system.hh"
55
56// Hack
57#include "sim/stat_control.hh"
58
59using namespace std;
60
61vector<BaseCPU *> BaseCPU::cpuList;
62
63// This variable reflects the max number of threads in any CPU. Be
64// careful to only use it once all the CPUs that you care about have
65// been initialized
66int maxThreadsPerCPU = 1;
67
68CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
69 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
70 cpu(_cpu), _repeatEvent(true)
71{
72 if (_interval)
73 cpu->schedule(this, curTick() + _interval);
74}
75
76void
77CPUProgressEvent::process()
78{
79 Counter temp = cpu->totalInstructions();
80#ifndef NDEBUG
81 double ipc = double(temp - lastNumInst) / (_interval / cpu->ticks(1));
82
83 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
84 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
85 ipc);
86 ipc = 0.0;
87#else
88 cprintf("%lli: %s progress event, total committed:%i, progress insts "
89 "committed: %lli\n", curTick(), cpu->name(), temp,
90 temp - lastNumInst);
91#endif
92 lastNumInst = temp;
93
94 if (_repeatEvent)
95 cpu->schedule(this, curTick() + _interval);
96}
97
98const char *
99CPUProgressEvent::description() const
100{
101 return "CPU Progress";
102}
103
104BaseCPU::BaseCPU(Params *p)
105 : MemObject(p), clock(p->clock), instCnt(0), _cpuId(p->cpu_id),
106 interrupts(p->interrupts),
107 numThreads(p->numThreads), system(p->system),
108 phase(p->phase)
109{
110// currentTick = curTick();
111
112 // if Python did not provide a valid ID, do it here
113 if (_cpuId == -1 ) {
114 _cpuId = cpuList.size();
115 }
116
117 // add self to global list of CPUs
118 cpuList.push_back(this);
119
120 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId);
121
122 if (numThreads > maxThreadsPerCPU)
123 maxThreadsPerCPU = numThreads;
124
125 // allocate per-thread instruction-based event queues
126 comInstEventQueue = new EventQueue *[numThreads];
127 for (ThreadID tid = 0; tid < numThreads; ++tid)
128 comInstEventQueue[tid] =
129 new EventQueue("instruction-based event queue");
130
131 //
132 // set up instruction-count-based termination events, if any
133 //
134 if (p->max_insts_any_thread != 0) {
135 const char *cause = "a thread reached the max instruction count";
136 for (ThreadID tid = 0; tid < numThreads; ++tid) {
137 Event *event = new SimLoopExitEvent(cause, 0);
138 comInstEventQueue[tid]->schedule(event, p->max_insts_any_thread);
139 }
140 }
141
142 if (p->max_insts_all_threads != 0) {
143 const char *cause = "all threads reached the max instruction count";
144
145 // allocate & initialize shared downcounter: each event will
146 // decrement this when triggered; simulation will terminate
147 // when counter reaches 0
148 int *counter = new int;
149 *counter = numThreads;
150 for (ThreadID tid = 0; tid < numThreads; ++tid) {
151 Event *event = new CountedExitEvent(cause, *counter);
152 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
153 }
154 }
155
156 // allocate per-thread load-based event queues
157 comLoadEventQueue = new EventQueue *[numThreads];
158 for (ThreadID tid = 0; tid < numThreads; ++tid)
159 comLoadEventQueue[tid] = new EventQueue("load-based event queue");
160
161 //
162 // set up instruction-count-based termination events, if any
163 //
164 if (p->max_loads_any_thread != 0) {
165 const char *cause = "a thread reached the max load count";
166 for (ThreadID tid = 0; tid < numThreads; ++tid) {
167 Event *event = new SimLoopExitEvent(cause, 0);
168 comLoadEventQueue[tid]->schedule(event, p->max_loads_any_thread);
169 }
170 }
171
172 if (p->max_loads_all_threads != 0) {
173 const char *cause = "all threads reached the max load count";
174 // allocate & initialize shared downcounter: each event will
175 // decrement this when triggered; simulation will terminate
176 // when counter reaches 0
177 int *counter = new int;
178 *counter = numThreads;
179 for (ThreadID tid = 0; tid < numThreads; ++tid) {
180 Event *event = new CountedExitEvent(cause, *counter);
181 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
182 }
183 }
184
185 functionTracingEnabled = false;
186 if (p->function_trace) {
187 functionTraceStream = simout.find(csprintf("ftrace.%s", name()));
188 currentFunctionStart = currentFunctionEnd = 0;
189 functionEntryTick = p->function_trace_start;
190
191 if (p->function_trace_start == 0) {
192 functionTracingEnabled = true;
193 } else {
194 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
195 Event *event = new wrap(this, true);
196 schedule(event, p->function_trace_start);
197 }
198 }
199 interrupts->setCPU(this);
200
201 if (FullSystem) {
200#if FULL_SYSTEM
202#if FULL_SYSTEM
201 profileEvent = NULL;
202 if (params()->profile)
203 profileEvent = new ProfileEvent(this, params()->profile);
203 profileEvent = NULL;
204 if (params()->profile)
205 profileEvent = new ProfileEvent(this, params()->profile);
204#endif
206#endif
207 }
205 tracer = params()->tracer;
206}
207
208void
209BaseCPU::enableFunctionTrace()
210{
211 functionTracingEnabled = true;
212}
213
214BaseCPU::~BaseCPU()
215{
216}
217
218void
219BaseCPU::init()
220{
221 if (!params()->defer_registration)
222 registerThreadContexts();
223}
224
225void
226BaseCPU::startup()
227{
208 tracer = params()->tracer;
209}
210
211void
212BaseCPU::enableFunctionTrace()
213{
214 functionTracingEnabled = true;
215}
216
217BaseCPU::~BaseCPU()
218{
219}
220
221void
222BaseCPU::init()
223{
224 if (!params()->defer_registration)
225 registerThreadContexts();
226}
227
228void
229BaseCPU::startup()
230{
228#if FULL_SYSTEM
229 if (!params()->defer_registration && profileEvent)
230 schedule(profileEvent, curTick());
231#endif
231 if (FullSystem) {
232 if (!params()->defer_registration && profileEvent)
233 schedule(profileEvent, curTick());
234 }
232
233 if (params()->progress_interval) {
234 Tick num_ticks = ticks(params()->progress_interval);
235
236 Event *event;
237 event = new CPUProgressEvent(this, num_ticks);
238 }
239}
240
241
242void
243BaseCPU::regStats()
244{
245 using namespace Stats;
246
247 numCycles
248 .name(name() + ".numCycles")
249 .desc("number of cpu cycles simulated")
250 ;
251
252 numWorkItemsStarted
253 .name(name() + ".numWorkItemsStarted")
254 .desc("number of work items this cpu started")
255 ;
256
257 numWorkItemsCompleted
258 .name(name() + ".numWorkItemsCompleted")
259 .desc("number of work items this cpu completed")
260 ;
261
262 int size = threadContexts.size();
263 if (size > 1) {
264 for (int i = 0; i < size; ++i) {
265 stringstream namestr;
266 ccprintf(namestr, "%s.ctx%d", name(), i);
267 threadContexts[i]->regStats(namestr.str());
268 }
269 } else if (size == 1)
270 threadContexts[0]->regStats(name());
235
236 if (params()->progress_interval) {
237 Tick num_ticks = ticks(params()->progress_interval);
238
239 Event *event;
240 event = new CPUProgressEvent(this, num_ticks);
241 }
242}
243
244
245void
246BaseCPU::regStats()
247{
248 using namespace Stats;
249
250 numCycles
251 .name(name() + ".numCycles")
252 .desc("number of cpu cycles simulated")
253 ;
254
255 numWorkItemsStarted
256 .name(name() + ".numWorkItemsStarted")
257 .desc("number of work items this cpu started")
258 ;
259
260 numWorkItemsCompleted
261 .name(name() + ".numWorkItemsCompleted")
262 .desc("number of work items this cpu completed")
263 ;
264
265 int size = threadContexts.size();
266 if (size > 1) {
267 for (int i = 0; i < size; ++i) {
268 stringstream namestr;
269 ccprintf(namestr, "%s.ctx%d", name(), i);
270 threadContexts[i]->regStats(namestr.str());
271 }
272 } else if (size == 1)
273 threadContexts[0]->regStats(name());
271
272#if FULL_SYSTEM
273#endif
274}
275
276Tick
277BaseCPU::nextCycle()
278{
279 Tick next_tick = curTick() - phase + clock - 1;
280 next_tick -= (next_tick % clock);
281 next_tick += phase;
282 return next_tick;
283}
284
285Tick
286BaseCPU::nextCycle(Tick begin_tick)
287{
288 Tick next_tick = begin_tick;
289 if (next_tick % clock != 0)
290 next_tick = next_tick - (next_tick % clock) + clock;
291 next_tick += phase;
292
293 assert(next_tick >= curTick());
294 return next_tick;
295}
296
297void
298BaseCPU::registerThreadContexts()
299{
300 ThreadID size = threadContexts.size();
301 for (ThreadID tid = 0; tid < size; ++tid) {
302 ThreadContext *tc = threadContexts[tid];
303
304 /** This is so that contextId and cpuId match where there is a
305 * 1cpu:1context relationship. Otherwise, the order of registration
306 * could affect the assignment and cpu 1 could have context id 3, for
307 * example. We may even want to do something like this for SMT so that
308 * cpu 0 has the lowest thread contexts and cpu N has the highest, but
309 * I'll just do this for now
310 */
311 if (numThreads == 1)
312 tc->setContextId(system->registerThreadContext(tc, _cpuId));
313 else
314 tc->setContextId(system->registerThreadContext(tc));
274}
275
276Tick
277BaseCPU::nextCycle()
278{
279 Tick next_tick = curTick() - phase + clock - 1;
280 next_tick -= (next_tick % clock);
281 next_tick += phase;
282 return next_tick;
283}
284
285Tick
286BaseCPU::nextCycle(Tick begin_tick)
287{
288 Tick next_tick = begin_tick;
289 if (next_tick % clock != 0)
290 next_tick = next_tick - (next_tick % clock) + clock;
291 next_tick += phase;
292
293 assert(next_tick >= curTick());
294 return next_tick;
295}
296
297void
298BaseCPU::registerThreadContexts()
299{
300 ThreadID size = threadContexts.size();
301 for (ThreadID tid = 0; tid < size; ++tid) {
302 ThreadContext *tc = threadContexts[tid];
303
304 /** This is so that contextId and cpuId match where there is a
305 * 1cpu:1context relationship. Otherwise, the order of registration
306 * could affect the assignment and cpu 1 could have context id 3, for
307 * example. We may even want to do something like this for SMT so that
308 * cpu 0 has the lowest thread contexts and cpu N has the highest, but
309 * I'll just do this for now
310 */
311 if (numThreads == 1)
312 tc->setContextId(system->registerThreadContext(tc, _cpuId));
313 else
314 tc->setContextId(system->registerThreadContext(tc));
315#if !FULL_SYSTEM
316 tc->getProcessPtr()->assignThreadContext(tc->contextId());
317#endif
315
316 if (!FullSystem)
317 tc->getProcessPtr()->assignThreadContext(tc->contextId());
318 }
319}
320
321
322int
323BaseCPU::findContext(ThreadContext *tc)
324{
325 ThreadID size = threadContexts.size();
326 for (ThreadID tid = 0; tid < size; ++tid) {
327 if (tc == threadContexts[tid])
328 return tid;
329 }
330 return 0;
331}
332
333void
334BaseCPU::switchOut()
335{
318 }
319}
320
321
322int
323BaseCPU::findContext(ThreadContext *tc)
324{
325 ThreadID size = threadContexts.size();
326 for (ThreadID tid = 0; tid < size; ++tid) {
327 if (tc == threadContexts[tid])
328 return tid;
329 }
330 return 0;
331}
332
333void
334BaseCPU::switchOut()
335{
336// panic("This CPU doesn't support sampling!");
337#if FULL_SYSTEM
338 if (profileEvent && profileEvent->scheduled())
339 deschedule(profileEvent);
336 if (profileEvent && profileEvent->scheduled())
337 deschedule(profileEvent);
340#endif
341}
342
343void
344BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc)
345{
346 assert(threadContexts.size() == oldCPU->threadContexts.size());
347
348 _cpuId = oldCPU->cpuId();
349
350 ThreadID size = threadContexts.size();
351 for (ThreadID i = 0; i < size; ++i) {
352 ThreadContext *newTC = threadContexts[i];
353 ThreadContext *oldTC = oldCPU->threadContexts[i];
354
355 newTC->takeOverFrom(oldTC);
356
357 CpuEvent::replaceThreadContext(oldTC, newTC);
358
359 assert(newTC->contextId() == oldTC->contextId());
360 assert(newTC->threadId() == oldTC->threadId());
361 system->replaceThreadContext(newTC, newTC->contextId());
362
363 /* This code no longer works since the zero register (e.g.,
364 * r31 on Alpha) doesn't necessarily contain zero at this
365 * point.
366 if (DTRACE(Context))
367 ThreadContext::compare(oldTC, newTC);
368 */
369
370 Port *old_itb_port, *old_dtb_port, *new_itb_port, *new_dtb_port;
371 old_itb_port = oldTC->getITBPtr()->getPort();
372 old_dtb_port = oldTC->getDTBPtr()->getPort();
373 new_itb_port = newTC->getITBPtr()->getPort();
374 new_dtb_port = newTC->getDTBPtr()->getPort();
375
376 // Move over any table walker ports if they exist
377 if (new_itb_port && !new_itb_port->isConnected()) {
378 assert(old_itb_port);
379 Port *peer = old_itb_port->getPeer();;
380 new_itb_port->setPeer(peer);
381 peer->setPeer(new_itb_port);
382 }
383 if (new_dtb_port && !new_dtb_port->isConnected()) {
384 assert(old_dtb_port);
385 Port *peer = old_dtb_port->getPeer();;
386 new_dtb_port->setPeer(peer);
387 peer->setPeer(new_dtb_port);
388 }
389 }
390
391 interrupts = oldCPU->interrupts;
392 interrupts->setCPU(this);
393
338}
339
340void
341BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc)
342{
343 assert(threadContexts.size() == oldCPU->threadContexts.size());
344
345 _cpuId = oldCPU->cpuId();
346
347 ThreadID size = threadContexts.size();
348 for (ThreadID i = 0; i < size; ++i) {
349 ThreadContext *newTC = threadContexts[i];
350 ThreadContext *oldTC = oldCPU->threadContexts[i];
351
352 newTC->takeOverFrom(oldTC);
353
354 CpuEvent::replaceThreadContext(oldTC, newTC);
355
356 assert(newTC->contextId() == oldTC->contextId());
357 assert(newTC->threadId() == oldTC->threadId());
358 system->replaceThreadContext(newTC, newTC->contextId());
359
360 /* This code no longer works since the zero register (e.g.,
361 * r31 on Alpha) doesn't necessarily contain zero at this
362 * point.
363 if (DTRACE(Context))
364 ThreadContext::compare(oldTC, newTC);
365 */
366
367 Port *old_itb_port, *old_dtb_port, *new_itb_port, *new_dtb_port;
368 old_itb_port = oldTC->getITBPtr()->getPort();
369 old_dtb_port = oldTC->getDTBPtr()->getPort();
370 new_itb_port = newTC->getITBPtr()->getPort();
371 new_dtb_port = newTC->getDTBPtr()->getPort();
372
373 // Move over any table walker ports if they exist
374 if (new_itb_port && !new_itb_port->isConnected()) {
375 assert(old_itb_port);
376 Port *peer = old_itb_port->getPeer();;
377 new_itb_port->setPeer(peer);
378 peer->setPeer(new_itb_port);
379 }
380 if (new_dtb_port && !new_dtb_port->isConnected()) {
381 assert(old_dtb_port);
382 Port *peer = old_dtb_port->getPeer();;
383 new_dtb_port->setPeer(peer);
384 peer->setPeer(new_dtb_port);
385 }
386 }
387
388 interrupts = oldCPU->interrupts;
389 interrupts->setCPU(this);
390
394#if FULL_SYSTEM
395 for (ThreadID i = 0; i < size; ++i)
396 threadContexts[i]->profileClear();
391 if (FullSystem) {
392 for (ThreadID i = 0; i < size; ++i)
393 threadContexts[i]->profileClear();
397
394
398 if (profileEvent)
399 schedule(profileEvent, curTick());
400#endif
395 if (profileEvent)
396 schedule(profileEvent, curTick());
397 }
401
402 // Connect new CPU to old CPU's memory only if new CPU isn't
403 // connected to anything. Also connect old CPU's memory to new
404 // CPU.
405 if (!ic->isConnected()) {
406 Port *peer = oldCPU->getPort("icache_port")->getPeer();
407 ic->setPeer(peer);
408 peer->setPeer(ic);
409 }
410
411 if (!dc->isConnected()) {
412 Port *peer = oldCPU->getPort("dcache_port")->getPeer();
413 dc->setPeer(peer);
414 peer->setPeer(dc);
415 }
416}
417
418
398
399 // Connect new CPU to old CPU's memory only if new CPU isn't
400 // connected to anything. Also connect old CPU's memory to new
401 // CPU.
402 if (!ic->isConnected()) {
403 Port *peer = oldCPU->getPort("icache_port")->getPeer();
404 ic->setPeer(peer);
405 peer->setPeer(ic);
406 }
407
408 if (!dc->isConnected()) {
409 Port *peer = oldCPU->getPort("dcache_port")->getPeer();
410 dc->setPeer(peer);
411 peer->setPeer(dc);
412 }
413}
414
415
419#if FULL_SYSTEM
420BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
421 : cpu(_cpu), interval(_interval)
422{ }
423
424void
425BaseCPU::ProfileEvent::process()
426{
427 ThreadID size = cpu->threadContexts.size();
428 for (ThreadID i = 0; i < size; ++i) {
429 ThreadContext *tc = cpu->threadContexts[i];
430 tc->profileSample();
431 }
432
433 cpu->schedule(this, curTick() + interval);
434}
435
416BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
417 : cpu(_cpu), interval(_interval)
418{ }
419
420void
421BaseCPU::ProfileEvent::process()
422{
423 ThreadID size = cpu->threadContexts.size();
424 for (ThreadID i = 0; i < size; ++i) {
425 ThreadContext *tc = cpu->threadContexts[i];
426 tc->profileSample();
427 }
428
429 cpu->schedule(this, curTick() + interval);
430}
431
436#endif // FULL_SYSTEM
437
438void
439BaseCPU::serialize(std::ostream &os)
440{
441 SERIALIZE_SCALAR(instCnt);
442 interrupts->serialize(os);
443}
444
445void
446BaseCPU::unserialize(Checkpoint *cp, const std::string &section)
447{
448 UNSERIALIZE_SCALAR(instCnt);
449 interrupts->unserialize(cp, section);
450}
451
452void
453BaseCPU::traceFunctionsInternal(Addr pc)
454{
455 if (!debugSymbolTable)
456 return;
457
458 // if pc enters different function, print new function symbol and
459 // update saved range. Otherwise do nothing.
460 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
461 string sym_str;
462 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
463 currentFunctionStart,
464 currentFunctionEnd);
465
466 if (!found) {
467 // no symbol found: use addr as label
468 sym_str = csprintf("0x%x", pc);
469 currentFunctionStart = pc;
470 currentFunctionEnd = pc + 1;
471 }
472
473 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
474 curTick() - functionEntryTick, curTick(), sym_str);
475 functionEntryTick = curTick();
476 }
477}
432void
433BaseCPU::serialize(std::ostream &os)
434{
435 SERIALIZE_SCALAR(instCnt);
436 interrupts->serialize(os);
437}
438
439void
440BaseCPU::unserialize(Checkpoint *cp, const std::string &section)
441{
442 UNSERIALIZE_SCALAR(instCnt);
443 interrupts->unserialize(cp, section);
444}
445
446void
447BaseCPU::traceFunctionsInternal(Addr pc)
448{
449 if (!debugSymbolTable)
450 return;
451
452 // if pc enters different function, print new function symbol and
453 // update saved range. Otherwise do nothing.
454 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
455 string sym_str;
456 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
457 currentFunctionStart,
458 currentFunctionEnd);
459
460 if (!found) {
461 // no symbol found: use addr as label
462 sym_str = csprintf("0x%x", pc);
463 currentFunctionStart = pc;
464 currentFunctionEnd = pc + 1;
465 }
466
467 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
468 curTick() - functionEntryTick, curTick(), sym_str);
469 functionEntryTick = curTick();
470 }
471}