base.cc (8737:770ccf3af571) base.cc (8745:575cab0db076)
1/*
1/*
2 * Copyright (c) 2011 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 * Nathan Binkert
43 * Rick Strong
44 */
45
46#include <iostream>
47#include <sstream>
48#include <string>
49
50#include "arch/tlb.hh"
51#include "base/loader/symtab.hh"
52#include "base/cprintf.hh"
53#include "base/misc.hh"
54#include "base/output.hh"
55#include "base/trace.hh"
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * Copyright (c) 2011 Regents of the University of California
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 * Nathan Binkert
31 * Rick Strong
32 */
33
34#include <iostream>
35#include <sstream>
36#include <string>
37
38#include "arch/tlb.hh"
39#include "base/loader/symtab.hh"
40#include "base/cprintf.hh"
41#include "base/misc.hh"
42#include "base/output.hh"
43#include "base/trace.hh"
56#include "config/use_checker.hh"
57#include "cpu/base.hh"
58#include "cpu/cpuevent.hh"
59#include "cpu/profile.hh"
60#include "cpu/thread_context.hh"
61#include "debug/SyscallVerbose.hh"
62#include "params/BaseCPU.hh"
63#include "sim/process.hh"
64#include "sim/sim_events.hh"
65#include "sim/sim_exit.hh"
66#include "sim/system.hh"
67
44#include "cpu/base.hh"
45#include "cpu/cpuevent.hh"
46#include "cpu/profile.hh"
47#include "cpu/thread_context.hh"
48#include "debug/SyscallVerbose.hh"
49#include "params/BaseCPU.hh"
50#include "sim/process.hh"
51#include "sim/sim_events.hh"
52#include "sim/sim_exit.hh"
53#include "sim/system.hh"
54
68#if USE_CHECKER
69#include "cpu/checker/cpu.hh"
70#endif
71
72// Hack
73#include "sim/stat_control.hh"
74
75using namespace std;
76
77vector<BaseCPU *> BaseCPU::cpuList;
78
79// This variable reflects the max number of threads in any CPU. Be
80// careful to only use it once all the CPUs that you care about have
81// been initialized
82int maxThreadsPerCPU = 1;
83
84CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
85 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
86 cpu(_cpu), _repeatEvent(true)
87{
88 if (_interval)
89 cpu->schedule(this, curTick() + _interval);
90}
91
92void
93CPUProgressEvent::process()
94{
95 Counter temp = cpu->totalInstructions();
96#ifndef NDEBUG
97 double ipc = double(temp - lastNumInst) / (_interval / cpu->ticks(1));
98
99 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
100 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
101 ipc);
102 ipc = 0.0;
103#else
104 cprintf("%lli: %s progress event, total committed:%i, progress insts "
105 "committed: %lli\n", curTick(), cpu->name(), temp,
106 temp - lastNumInst);
107#endif
108 lastNumInst = temp;
109
110 if (_repeatEvent)
111 cpu->schedule(this, curTick() + _interval);
112}
113
114const char *
115CPUProgressEvent::description() const
116{
117 return "CPU Progress";
118}
119
55// Hack
56#include "sim/stat_control.hh"
57
58using namespace std;
59
60vector<BaseCPU *> BaseCPU::cpuList;
61
62// This variable reflects the max number of threads in any CPU. Be
63// careful to only use it once all the CPUs that you care about have
64// been initialized
65int maxThreadsPerCPU = 1;
66
67CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
68 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
69 cpu(_cpu), _repeatEvent(true)
70{
71 if (_interval)
72 cpu->schedule(this, curTick() + _interval);
73}
74
75void
76CPUProgressEvent::process()
77{
78 Counter temp = cpu->totalInstructions();
79#ifndef NDEBUG
80 double ipc = double(temp - lastNumInst) / (_interval / cpu->ticks(1));
81
82 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
83 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
84 ipc);
85 ipc = 0.0;
86#else
87 cprintf("%lli: %s progress event, total committed:%i, progress insts "
88 "committed: %lli\n", curTick(), cpu->name(), temp,
89 temp - lastNumInst);
90#endif
91 lastNumInst = temp;
92
93 if (_repeatEvent)
94 cpu->schedule(this, curTick() + _interval);
95}
96
97const char *
98CPUProgressEvent::description() const
99{
100 return "CPU Progress";
101}
102
120#if FULL_SYSTEM
121BaseCPU::BaseCPU(Params *p)
122 : MemObject(p), clock(p->clock), instCnt(0), _cpuId(p->cpu_id),
123 interrupts(p->interrupts),
124 numThreads(p->numThreads), system(p->system),
125 phase(p->phase)
103BaseCPU::BaseCPU(Params *p)
104 : MemObject(p), clock(p->clock), instCnt(0), _cpuId(p->cpu_id),
105 interrupts(p->interrupts),
106 numThreads(p->numThreads), system(p->system),
107 phase(p->phase)
126#else
127BaseCPU::BaseCPU(Params *p)
128 : MemObject(p), clock(p->clock), _cpuId(p->cpu_id),
129 numThreads(p->numThreads), system(p->system),
130 phase(p->phase)
131#endif
132{
133// currentTick = curTick();
134
135 // if Python did not provide a valid ID, do it here
136 if (_cpuId == -1 ) {
137 _cpuId = cpuList.size();
138 }
139
140 // add self to global list of CPUs
141 cpuList.push_back(this);
142
143 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId);
144
145 if (numThreads > maxThreadsPerCPU)
146 maxThreadsPerCPU = numThreads;
147
148 // allocate per-thread instruction-based event queues
149 comInstEventQueue = new EventQueue *[numThreads];
150 for (ThreadID tid = 0; tid < numThreads; ++tid)
151 comInstEventQueue[tid] =
152 new EventQueue("instruction-based event queue");
153
154 //
155 // set up instruction-count-based termination events, if any
156 //
157 if (p->max_insts_any_thread != 0) {
158 const char *cause = "a thread reached the max instruction count";
159 for (ThreadID tid = 0; tid < numThreads; ++tid) {
160 Event *event = new SimLoopExitEvent(cause, 0);
161 comInstEventQueue[tid]->schedule(event, p->max_insts_any_thread);
162 }
163 }
164
165 if (p->max_insts_all_threads != 0) {
166 const char *cause = "all threads reached the max instruction count";
167
168 // allocate & initialize shared downcounter: each event will
169 // decrement this when triggered; simulation will terminate
170 // when counter reaches 0
171 int *counter = new int;
172 *counter = numThreads;
173 for (ThreadID tid = 0; tid < numThreads; ++tid) {
174 Event *event = new CountedExitEvent(cause, *counter);
175 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
176 }
177 }
178
179 // allocate per-thread load-based event queues
180 comLoadEventQueue = new EventQueue *[numThreads];
181 for (ThreadID tid = 0; tid < numThreads; ++tid)
182 comLoadEventQueue[tid] = new EventQueue("load-based event queue");
183
184 //
185 // set up instruction-count-based termination events, if any
186 //
187 if (p->max_loads_any_thread != 0) {
188 const char *cause = "a thread reached the max load count";
189 for (ThreadID tid = 0; tid < numThreads; ++tid) {
190 Event *event = new SimLoopExitEvent(cause, 0);
191 comLoadEventQueue[tid]->schedule(event, p->max_loads_any_thread);
192 }
193 }
194
195 if (p->max_loads_all_threads != 0) {
196 const char *cause = "all threads reached the max load count";
197 // allocate & initialize shared downcounter: each event will
198 // decrement this when triggered; simulation will terminate
199 // when counter reaches 0
200 int *counter = new int;
201 *counter = numThreads;
202 for (ThreadID tid = 0; tid < numThreads; ++tid) {
203 Event *event = new CountedExitEvent(cause, *counter);
204 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
205 }
206 }
207
208 functionTracingEnabled = false;
209 if (p->function_trace) {
108{
109// currentTick = curTick();
110
111 // if Python did not provide a valid ID, do it here
112 if (_cpuId == -1 ) {
113 _cpuId = cpuList.size();
114 }
115
116 // add self to global list of CPUs
117 cpuList.push_back(this);
118
119 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId);
120
121 if (numThreads > maxThreadsPerCPU)
122 maxThreadsPerCPU = numThreads;
123
124 // allocate per-thread instruction-based event queues
125 comInstEventQueue = new EventQueue *[numThreads];
126 for (ThreadID tid = 0; tid < numThreads; ++tid)
127 comInstEventQueue[tid] =
128 new EventQueue("instruction-based event queue");
129
130 //
131 // set up instruction-count-based termination events, if any
132 //
133 if (p->max_insts_any_thread != 0) {
134 const char *cause = "a thread reached the max instruction count";
135 for (ThreadID tid = 0; tid < numThreads; ++tid) {
136 Event *event = new SimLoopExitEvent(cause, 0);
137 comInstEventQueue[tid]->schedule(event, p->max_insts_any_thread);
138 }
139 }
140
141 if (p->max_insts_all_threads != 0) {
142 const char *cause = "all threads reached the max instruction count";
143
144 // allocate & initialize shared downcounter: each event will
145 // decrement this when triggered; simulation will terminate
146 // when counter reaches 0
147 int *counter = new int;
148 *counter = numThreads;
149 for (ThreadID tid = 0; tid < numThreads; ++tid) {
150 Event *event = new CountedExitEvent(cause, *counter);
151 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
152 }
153 }
154
155 // allocate per-thread load-based event queues
156 comLoadEventQueue = new EventQueue *[numThreads];
157 for (ThreadID tid = 0; tid < numThreads; ++tid)
158 comLoadEventQueue[tid] = new EventQueue("load-based event queue");
159
160 //
161 // set up instruction-count-based termination events, if any
162 //
163 if (p->max_loads_any_thread != 0) {
164 const char *cause = "a thread reached the max load count";
165 for (ThreadID tid = 0; tid < numThreads; ++tid) {
166 Event *event = new SimLoopExitEvent(cause, 0);
167 comLoadEventQueue[tid]->schedule(event, p->max_loads_any_thread);
168 }
169 }
170
171 if (p->max_loads_all_threads != 0) {
172 const char *cause = "all threads reached the max load count";
173 // allocate & initialize shared downcounter: each event will
174 // decrement this when triggered; simulation will terminate
175 // when counter reaches 0
176 int *counter = new int;
177 *counter = numThreads;
178 for (ThreadID tid = 0; tid < numThreads; ++tid) {
179 Event *event = new CountedExitEvent(cause, *counter);
180 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
181 }
182 }
183
184 functionTracingEnabled = false;
185 if (p->function_trace) {
210 const string fname = csprintf("ftrace.%s", name());
211 functionTraceStream = simout.find(fname);
212 if (!functionTraceStream)
213 functionTraceStream = simout.create(fname);
214
186 functionTraceStream = simout.find(csprintf("ftrace.%s", name()));
215 currentFunctionStart = currentFunctionEnd = 0;
216 functionEntryTick = p->function_trace_start;
217
218 if (p->function_trace_start == 0) {
219 functionTracingEnabled = true;
220 } else {
221 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
222 Event *event = new wrap(this, true);
223 schedule(event, p->function_trace_start);
224 }
225 }
187 currentFunctionStart = currentFunctionEnd = 0;
188 functionEntryTick = p->function_trace_start;
189
190 if (p->function_trace_start == 0) {
191 functionTracingEnabled = true;
192 } else {
193 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap;
194 Event *event = new wrap(this, true);
195 schedule(event, p->function_trace_start);
196 }
197 }
226#if FULL_SYSTEM
227 // Check if CPU model has interrupts connected. The CheckerCPU
228 // cannot take interrupts directly for example.
229 if (interrupts)
230 interrupts->setCPU(this);
198 interrupts->setCPU(this);
231
199
200#if FULL_SYSTEM
232 profileEvent = NULL;
233 if (params()->profile)
234 profileEvent = new ProfileEvent(this, params()->profile);
235#endif
236 tracer = params()->tracer;
237}
238
239void
240BaseCPU::enableFunctionTrace()
241{
242 functionTracingEnabled = true;
243}
244
245BaseCPU::~BaseCPU()
246{
247}
248
249void
250BaseCPU::init()
251{
252 if (!params()->defer_registration)
253 registerThreadContexts();
254}
255
256void
257BaseCPU::startup()
258{
259#if FULL_SYSTEM
260 if (!params()->defer_registration && profileEvent)
261 schedule(profileEvent, curTick());
262#endif
263
264 if (params()->progress_interval) {
265 Tick num_ticks = ticks(params()->progress_interval);
266
201 profileEvent = NULL;
202 if (params()->profile)
203 profileEvent = new ProfileEvent(this, params()->profile);
204#endif
205 tracer = params()->tracer;
206}
207
208void
209BaseCPU::enableFunctionTrace()
210{
211 functionTracingEnabled = true;
212}
213
214BaseCPU::~BaseCPU()
215{
216}
217
218void
219BaseCPU::init()
220{
221 if (!params()->defer_registration)
222 registerThreadContexts();
223}
224
225void
226BaseCPU::startup()
227{
228#if FULL_SYSTEM
229 if (!params()->defer_registration && profileEvent)
230 schedule(profileEvent, curTick());
231#endif
232
233 if (params()->progress_interval) {
234 Tick num_ticks = ticks(params()->progress_interval);
235
267 new CPUProgressEvent(this, num_ticks);
236 Event *event;
237 event = new CPUProgressEvent(this, num_ticks);
268 }
269}
270
271
272void
273BaseCPU::regStats()
274{
275 using namespace Stats;
276
277 numCycles
278 .name(name() + ".numCycles")
279 .desc("number of cpu cycles simulated")
280 ;
281
282 numWorkItemsStarted
283 .name(name() + ".numWorkItemsStarted")
284 .desc("number of work items this cpu started")
285 ;
286
287 numWorkItemsCompleted
288 .name(name() + ".numWorkItemsCompleted")
289 .desc("number of work items this cpu completed")
290 ;
291
292 int size = threadContexts.size();
293 if (size > 1) {
294 for (int i = 0; i < size; ++i) {
295 stringstream namestr;
296 ccprintf(namestr, "%s.ctx%d", name(), i);
297 threadContexts[i]->regStats(namestr.str());
298 }
299 } else if (size == 1)
300 threadContexts[0]->regStats(name());
301
302#if FULL_SYSTEM
303#endif
304}
305
306Tick
307BaseCPU::nextCycle()
308{
309 Tick next_tick = curTick() - phase + clock - 1;
310 next_tick -= (next_tick % clock);
311 next_tick += phase;
312 return next_tick;
313}
314
315Tick
316BaseCPU::nextCycle(Tick begin_tick)
317{
318 Tick next_tick = begin_tick;
319 if (next_tick % clock != 0)
320 next_tick = next_tick - (next_tick % clock) + clock;
321 next_tick += phase;
322
323 assert(next_tick >= curTick());
324 return next_tick;
325}
326
327void
328BaseCPU::registerThreadContexts()
329{
330 ThreadID size = threadContexts.size();
331 for (ThreadID tid = 0; tid < size; ++tid) {
332 ThreadContext *tc = threadContexts[tid];
333
334 /** This is so that contextId and cpuId match where there is a
335 * 1cpu:1context relationship. Otherwise, the order of registration
336 * could affect the assignment and cpu 1 could have context id 3, for
337 * example. We may even want to do something like this for SMT so that
338 * cpu 0 has the lowest thread contexts and cpu N has the highest, but
339 * I'll just do this for now
340 */
341 if (numThreads == 1)
342 tc->setContextId(system->registerThreadContext(tc, _cpuId));
343 else
344 tc->setContextId(system->registerThreadContext(tc));
345#if !FULL_SYSTEM
346 tc->getProcessPtr()->assignThreadContext(tc->contextId());
347#endif
348 }
349}
350
351
352int
353BaseCPU::findContext(ThreadContext *tc)
354{
355 ThreadID size = threadContexts.size();
356 for (ThreadID tid = 0; tid < size; ++tid) {
357 if (tc == threadContexts[tid])
358 return tid;
359 }
360 return 0;
361}
362
363void
364BaseCPU::switchOut()
365{
366// panic("This CPU doesn't support sampling!");
367#if FULL_SYSTEM
368 if (profileEvent && profileEvent->scheduled())
369 deschedule(profileEvent);
370#endif
371}
372
373void
238 }
239}
240
241
242void
243BaseCPU::regStats()
244{
245 using namespace Stats;
246
247 numCycles
248 .name(name() + ".numCycles")
249 .desc("number of cpu cycles simulated")
250 ;
251
252 numWorkItemsStarted
253 .name(name() + ".numWorkItemsStarted")
254 .desc("number of work items this cpu started")
255 ;
256
257 numWorkItemsCompleted
258 .name(name() + ".numWorkItemsCompleted")
259 .desc("number of work items this cpu completed")
260 ;
261
262 int size = threadContexts.size();
263 if (size > 1) {
264 for (int i = 0; i < size; ++i) {
265 stringstream namestr;
266 ccprintf(namestr, "%s.ctx%d", name(), i);
267 threadContexts[i]->regStats(namestr.str());
268 }
269 } else if (size == 1)
270 threadContexts[0]->regStats(name());
271
272#if FULL_SYSTEM
273#endif
274}
275
276Tick
277BaseCPU::nextCycle()
278{
279 Tick next_tick = curTick() - phase + clock - 1;
280 next_tick -= (next_tick % clock);
281 next_tick += phase;
282 return next_tick;
283}
284
285Tick
286BaseCPU::nextCycle(Tick begin_tick)
287{
288 Tick next_tick = begin_tick;
289 if (next_tick % clock != 0)
290 next_tick = next_tick - (next_tick % clock) + clock;
291 next_tick += phase;
292
293 assert(next_tick >= curTick());
294 return next_tick;
295}
296
297void
298BaseCPU::registerThreadContexts()
299{
300 ThreadID size = threadContexts.size();
301 for (ThreadID tid = 0; tid < size; ++tid) {
302 ThreadContext *tc = threadContexts[tid];
303
304 /** This is so that contextId and cpuId match where there is a
305 * 1cpu:1context relationship. Otherwise, the order of registration
306 * could affect the assignment and cpu 1 could have context id 3, for
307 * example. We may even want to do something like this for SMT so that
308 * cpu 0 has the lowest thread contexts and cpu N has the highest, but
309 * I'll just do this for now
310 */
311 if (numThreads == 1)
312 tc->setContextId(system->registerThreadContext(tc, _cpuId));
313 else
314 tc->setContextId(system->registerThreadContext(tc));
315#if !FULL_SYSTEM
316 tc->getProcessPtr()->assignThreadContext(tc->contextId());
317#endif
318 }
319}
320
321
322int
323BaseCPU::findContext(ThreadContext *tc)
324{
325 ThreadID size = threadContexts.size();
326 for (ThreadID tid = 0; tid < size; ++tid) {
327 if (tc == threadContexts[tid])
328 return tid;
329 }
330 return 0;
331}
332
333void
334BaseCPU::switchOut()
335{
336// panic("This CPU doesn't support sampling!");
337#if FULL_SYSTEM
338 if (profileEvent && profileEvent->scheduled())
339 deschedule(profileEvent);
340#endif
341}
342
343void
374BaseCPU::takeOverFrom(BaseCPU *oldCPU)
344BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc)
375{
345{
376 Port *ic = getPort("icache_port");
377 Port *dc = getPort("dcache_port");
378 assert(threadContexts.size() == oldCPU->threadContexts.size());
379
380 _cpuId = oldCPU->cpuId();
381
382 ThreadID size = threadContexts.size();
383 for (ThreadID i = 0; i < size; ++i) {
384 ThreadContext *newTC = threadContexts[i];
385 ThreadContext *oldTC = oldCPU->threadContexts[i];
386
387 newTC->takeOverFrom(oldTC);
388
389 CpuEvent::replaceThreadContext(oldTC, newTC);
390
391 assert(newTC->contextId() == oldTC->contextId());
392 assert(newTC->threadId() == oldTC->threadId());
393 system->replaceThreadContext(newTC, newTC->contextId());
394
395 /* This code no longer works since the zero register (e.g.,
396 * r31 on Alpha) doesn't necessarily contain zero at this
397 * point.
398 if (DTRACE(Context))
399 ThreadContext::compare(oldTC, newTC);
400 */
401
402 Port *old_itb_port, *old_dtb_port, *new_itb_port, *new_dtb_port;
403 old_itb_port = oldTC->getITBPtr()->getPort();
404 old_dtb_port = oldTC->getDTBPtr()->getPort();
405 new_itb_port = newTC->getITBPtr()->getPort();
406 new_dtb_port = newTC->getDTBPtr()->getPort();
407
408 // Move over any table walker ports if they exist
409 if (new_itb_port && !new_itb_port->isConnected()) {
410 assert(old_itb_port);
411 Port *peer = old_itb_port->getPeer();;
412 new_itb_port->setPeer(peer);
413 peer->setPeer(new_itb_port);
414 }
415 if (new_dtb_port && !new_dtb_port->isConnected()) {
416 assert(old_dtb_port);
417 Port *peer = old_dtb_port->getPeer();;
418 new_dtb_port->setPeer(peer);
419 peer->setPeer(new_dtb_port);
420 }
346 assert(threadContexts.size() == oldCPU->threadContexts.size());
347
348 _cpuId = oldCPU->cpuId();
349
350 ThreadID size = threadContexts.size();
351 for (ThreadID i = 0; i < size; ++i) {
352 ThreadContext *newTC = threadContexts[i];
353 ThreadContext *oldTC = oldCPU->threadContexts[i];
354
355 newTC->takeOverFrom(oldTC);
356
357 CpuEvent::replaceThreadContext(oldTC, newTC);
358
359 assert(newTC->contextId() == oldTC->contextId());
360 assert(newTC->threadId() == oldTC->threadId());
361 system->replaceThreadContext(newTC, newTC->contextId());
362
363 /* This code no longer works since the zero register (e.g.,
364 * r31 on Alpha) doesn't necessarily contain zero at this
365 * point.
366 if (DTRACE(Context))
367 ThreadContext::compare(oldTC, newTC);
368 */
369
370 Port *old_itb_port, *old_dtb_port, *new_itb_port, *new_dtb_port;
371 old_itb_port = oldTC->getITBPtr()->getPort();
372 old_dtb_port = oldTC->getDTBPtr()->getPort();
373 new_itb_port = newTC->getITBPtr()->getPort();
374 new_dtb_port = newTC->getDTBPtr()->getPort();
375
376 // Move over any table walker ports if they exist
377 if (new_itb_port && !new_itb_port->isConnected()) {
378 assert(old_itb_port);
379 Port *peer = old_itb_port->getPeer();;
380 new_itb_port->setPeer(peer);
381 peer->setPeer(new_itb_port);
382 }
383 if (new_dtb_port && !new_dtb_port->isConnected()) {
384 assert(old_dtb_port);
385 Port *peer = old_dtb_port->getPeer();;
386 new_dtb_port->setPeer(peer);
387 peer->setPeer(new_dtb_port);
388 }
421
422#if USE_CHECKER
423 Port *old_checker_itb_port, *old_checker_dtb_port;
424 Port *new_checker_itb_port, *new_checker_dtb_port;
425
426 CheckerCPU *oldChecker =
427 dynamic_cast<CheckerCPU*>(oldTC->getCheckerCpuPtr());
428 CheckerCPU *newChecker =
429 dynamic_cast<CheckerCPU*>(newTC->getCheckerCpuPtr());
430 old_checker_itb_port = oldChecker->getITBPtr()->getPort();
431 old_checker_dtb_port = oldChecker->getDTBPtr()->getPort();
432 new_checker_itb_port = newChecker->getITBPtr()->getPort();
433 new_checker_dtb_port = newChecker->getDTBPtr()->getPort();
434
435 // Move over any table walker ports if they exist for checker
436 if (new_checker_itb_port && !new_checker_itb_port->isConnected()) {
437 assert(old_checker_itb_port);
438 Port *peer = old_checker_itb_port->getPeer();;
439 new_checker_itb_port->setPeer(peer);
440 peer->setPeer(new_checker_itb_port);
441 }
442 if (new_checker_dtb_port && !new_checker_dtb_port->isConnected()) {
443 assert(old_checker_dtb_port);
444 Port *peer = old_checker_dtb_port->getPeer();;
445 new_checker_dtb_port->setPeer(peer);
446 peer->setPeer(new_checker_dtb_port);
447 }
448#endif
449
450 }
451
389 }
390
452#if FULL_SYSTEM
453 interrupts = oldCPU->interrupts;
454 interrupts->setCPU(this);
455
391 interrupts = oldCPU->interrupts;
392 interrupts->setCPU(this);
393
394#if FULL_SYSTEM
456 for (ThreadID i = 0; i < size; ++i)
457 threadContexts[i]->profileClear();
458
459 if (profileEvent)
460 schedule(profileEvent, curTick());
461#endif
462
463 // Connect new CPU to old CPU's memory only if new CPU isn't
464 // connected to anything. Also connect old CPU's memory to new
465 // CPU.
466 if (!ic->isConnected()) {
467 Port *peer = oldCPU->getPort("icache_port")->getPeer();
468 ic->setPeer(peer);
469 peer->setPeer(ic);
470 }
471
472 if (!dc->isConnected()) {
473 Port *peer = oldCPU->getPort("dcache_port")->getPeer();
474 dc->setPeer(peer);
475 peer->setPeer(dc);
476 }
477}
478
479
480#if FULL_SYSTEM
481BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
482 : cpu(_cpu), interval(_interval)
483{ }
484
485void
486BaseCPU::ProfileEvent::process()
487{
488 ThreadID size = cpu->threadContexts.size();
489 for (ThreadID i = 0; i < size; ++i) {
490 ThreadContext *tc = cpu->threadContexts[i];
491 tc->profileSample();
492 }
493
494 cpu->schedule(this, curTick() + interval);
495}
496
395 for (ThreadID i = 0; i < size; ++i)
396 threadContexts[i]->profileClear();
397
398 if (profileEvent)
399 schedule(profileEvent, curTick());
400#endif
401
402 // Connect new CPU to old CPU's memory only if new CPU isn't
403 // connected to anything. Also connect old CPU's memory to new
404 // CPU.
405 if (!ic->isConnected()) {
406 Port *peer = oldCPU->getPort("icache_port")->getPeer();
407 ic->setPeer(peer);
408 peer->setPeer(ic);
409 }
410
411 if (!dc->isConnected()) {
412 Port *peer = oldCPU->getPort("dcache_port")->getPeer();
413 dc->setPeer(peer);
414 peer->setPeer(dc);
415 }
416}
417
418
419#if FULL_SYSTEM
420BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval)
421 : cpu(_cpu), interval(_interval)
422{ }
423
424void
425BaseCPU::ProfileEvent::process()
426{
427 ThreadID size = cpu->threadContexts.size();
428 for (ThreadID i = 0; i < size; ++i) {
429 ThreadContext *tc = cpu->threadContexts[i];
430 tc->profileSample();
431 }
432
433 cpu->schedule(this, curTick() + interval);
434}
435
436#endif // FULL_SYSTEM
437
497void
498BaseCPU::serialize(std::ostream &os)
499{
500 SERIALIZE_SCALAR(instCnt);
501 interrupts->serialize(os);
502}
503
504void
505BaseCPU::unserialize(Checkpoint *cp, const std::string &section)
506{
507 UNSERIALIZE_SCALAR(instCnt);
508 interrupts->unserialize(cp, section);
509}
510
438void
439BaseCPU::serialize(std::ostream &os)
440{
441 SERIALIZE_SCALAR(instCnt);
442 interrupts->serialize(os);
443}
444
445void
446BaseCPU::unserialize(Checkpoint *cp, const std::string &section)
447{
448 UNSERIALIZE_SCALAR(instCnt);
449 interrupts->unserialize(cp, section);
450}
451
511#endif // FULL_SYSTEM
512
513void
514BaseCPU::traceFunctionsInternal(Addr pc)
515{
516 if (!debugSymbolTable)
517 return;
518
519 // if pc enters different function, print new function symbol and
520 // update saved range. Otherwise do nothing.
521 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
522 string sym_str;
523 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
524 currentFunctionStart,
525 currentFunctionEnd);
526
527 if (!found) {
528 // no symbol found: use addr as label
529 sym_str = csprintf("0x%x", pc);
530 currentFunctionStart = pc;
531 currentFunctionEnd = pc + 1;
532 }
533
534 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
535 curTick() - functionEntryTick, curTick(), sym_str);
536 functionEntryTick = curTick();
537 }
538}
452void
453BaseCPU::traceFunctionsInternal(Addr pc)
454{
455 if (!debugSymbolTable)
456 return;
457
458 // if pc enters different function, print new function symbol and
459 // update saved range. Otherwise do nothing.
460 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
461 string sym_str;
462 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
463 currentFunctionStart,
464 currentFunctionEnd);
465
466 if (!found) {
467 // no symbol found: use addr as label
468 sym_str = csprintf("0x%x", pc);
469 currentFunctionStart = pc;
470 currentFunctionEnd = pc + 1;
471 }
472
473 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
474 curTick() - functionEntryTick, curTick(), sym_str);
475 functionEntryTick = curTick();
476 }
477}
539
540bool
541BaseCPU::CpuPort::recvTiming(PacketPtr pkt)
542{
543 panic("BaseCPU doesn't expect recvTiming callback!");
544 return true;
545}
546
547void
548BaseCPU::CpuPort::recvRetry()
549{
550 panic("BaseCPU doesn't expect recvRetry callback!");
551}
552
553Tick
554BaseCPU::CpuPort::recvAtomic(PacketPtr pkt)
555{
556 panic("BaseCPU doesn't expect recvAtomic callback!");
557 return curTick();
558}
559
560void
561BaseCPU::CpuPort::recvFunctional(PacketPtr pkt)
562{
563 // No internal storage to update (in the general case). In the
564 // long term this should never be called, but that assumed a split
565 // into master/slave and request/response.
566}
567
568void
569BaseCPU::CpuPort::recvRangeChange()
570{
571}