base.cc (12127:4207df055b0d) base.cc (12276:22c220be30c5)
1/*
1/*
2 * Copyright (c) 2011-2012,2016 ARM Limited
2 * Copyright (c) 2011-2012,2016-2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Authors: Steve Reinhardt
44 * Nathan Binkert
45 * Rick Strong
46 */
47
48#include "cpu/base.hh"
49
50#include <iostream>
51#include <sstream>
52#include <string>
53
54#include "arch/tlb.hh"
55#include "base/cprintf.hh"
56#include "base/loader/symtab.hh"
57#include "base/misc.hh"
58#include "base/output.hh"
59#include "base/trace.hh"
60#include "cpu/checker/cpu.hh"
61#include "cpu/cpuevent.hh"
62#include "cpu/profile.hh"
63#include "cpu/thread_context.hh"
64#include "debug/Mwait.hh"
65#include "debug/SyscallVerbose.hh"
66#include "mem/page_table.hh"
67#include "params/BaseCPU.hh"
68#include "sim/clocked_object.hh"
69#include "sim/full_system.hh"
70#include "sim/process.hh"
71#include "sim/sim_events.hh"
72#include "sim/sim_exit.hh"
73#include "sim/system.hh"
74
75// Hack
76#include "sim/stat_control.hh"
77
78using namespace std;
79
80vector<BaseCPU *> BaseCPU::cpuList;
81
82// This variable reflects the max number of threads in any CPU. Be
83// careful to only use it once all the CPUs that you care about have
84// been initialized
85int maxThreadsPerCPU = 1;
86
87CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
88 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
89 cpu(_cpu), _repeatEvent(true)
90{
91 if (_interval)
92 cpu->schedule(this, curTick() + _interval);
93}
94
95void
96CPUProgressEvent::process()
97{
98 Counter temp = cpu->totalOps();
99
100 if (_repeatEvent)
101 cpu->schedule(this, curTick() + _interval);
102
103 if (cpu->switchedOut()) {
104 return;
105 }
106
107#ifndef NDEBUG
108 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod());
109
110 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
111 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
112 ipc);
113 ipc = 0.0;
114#else
115 cprintf("%lli: %s progress event, total committed:%i, progress insts "
116 "committed: %lli\n", curTick(), cpu->name(), temp,
117 temp - lastNumInst);
118#endif
119 lastNumInst = temp;
120}
121
122const char *
123CPUProgressEvent::description() const
124{
125 return "CPU Progress";
126}
127
128BaseCPU::BaseCPU(Params *p, bool is_checker)
129 : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id),
130 _instMasterId(p->system->getMasterId(name() + ".inst")),
131 _dataMasterId(p->system->getMasterId(name() + ".data")),
132 _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid),
133 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()),
134 interrupts(p->interrupts), profileEvent(NULL),
135 numThreads(p->numThreads), system(p->system),
136 functionTraceStream(nullptr), currentFunctionStart(0),
137 currentFunctionEnd(0), functionEntryTick(0),
138 addressMonitor(p->numThreads),
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Authors: Steve Reinhardt
44 * Nathan Binkert
45 * Rick Strong
46 */
47
48#include "cpu/base.hh"
49
50#include <iostream>
51#include <sstream>
52#include <string>
53
54#include "arch/tlb.hh"
55#include "base/cprintf.hh"
56#include "base/loader/symtab.hh"
57#include "base/misc.hh"
58#include "base/output.hh"
59#include "base/trace.hh"
60#include "cpu/checker/cpu.hh"
61#include "cpu/cpuevent.hh"
62#include "cpu/profile.hh"
63#include "cpu/thread_context.hh"
64#include "debug/Mwait.hh"
65#include "debug/SyscallVerbose.hh"
66#include "mem/page_table.hh"
67#include "params/BaseCPU.hh"
68#include "sim/clocked_object.hh"
69#include "sim/full_system.hh"
70#include "sim/process.hh"
71#include "sim/sim_events.hh"
72#include "sim/sim_exit.hh"
73#include "sim/system.hh"
74
75// Hack
76#include "sim/stat_control.hh"
77
78using namespace std;
79
80vector<BaseCPU *> BaseCPU::cpuList;
81
82// This variable reflects the max number of threads in any CPU. Be
83// careful to only use it once all the CPUs that you care about have
84// been initialized
85int maxThreadsPerCPU = 1;
86
87CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
88 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
89 cpu(_cpu), _repeatEvent(true)
90{
91 if (_interval)
92 cpu->schedule(this, curTick() + _interval);
93}
94
95void
96CPUProgressEvent::process()
97{
98 Counter temp = cpu->totalOps();
99
100 if (_repeatEvent)
101 cpu->schedule(this, curTick() + _interval);
102
103 if (cpu->switchedOut()) {
104 return;
105 }
106
107#ifndef NDEBUG
108 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod());
109
110 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
111 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
112 ipc);
113 ipc = 0.0;
114#else
115 cprintf("%lli: %s progress event, total committed:%i, progress insts "
116 "committed: %lli\n", curTick(), cpu->name(), temp,
117 temp - lastNumInst);
118#endif
119 lastNumInst = temp;
120}
121
122const char *
123CPUProgressEvent::description() const
124{
125 return "CPU Progress";
126}
127
128BaseCPU::BaseCPU(Params *p, bool is_checker)
129 : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id),
130 _instMasterId(p->system->getMasterId(name() + ".inst")),
131 _dataMasterId(p->system->getMasterId(name() + ".data")),
132 _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid),
133 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()),
134 interrupts(p->interrupts), profileEvent(NULL),
135 numThreads(p->numThreads), system(p->system),
136 functionTraceStream(nullptr), currentFunctionStart(0),
137 currentFunctionEnd(0), functionEntryTick(0),
138 addressMonitor(p->numThreads),
139 syscallRetryLatency(p->syscallRetryLatency)
139 syscallRetryLatency(p->syscallRetryLatency),
140 pwrGatingLatency(p->pwr_gating_latency),
141 enterPwrGatingEvent([this]{ enterPwrGating(); }, name())
140{
141 // if Python did not provide a valid ID, do it here
142 if (_cpuId == -1 ) {
143 _cpuId = cpuList.size();
144 }
145
146 // add self to global list of CPUs
147 cpuList.push_back(this);
148
149 DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n",
150 _cpuId, _socketId);
151
152 if (numThreads > maxThreadsPerCPU)
153 maxThreadsPerCPU = numThreads;
154
155 // allocate per-thread instruction-based event queues
156 comInstEventQueue = new EventQueue *[numThreads];
157 for (ThreadID tid = 0; tid < numThreads; ++tid)
158 comInstEventQueue[tid] =
159 new EventQueue("instruction-based event queue");
160
161 //
162 // set up instruction-count-based termination events, if any
163 //
164 if (p->max_insts_any_thread != 0) {
165 const char *cause = "a thread reached the max instruction count";
166 for (ThreadID tid = 0; tid < numThreads; ++tid)
167 scheduleInstStop(tid, p->max_insts_any_thread, cause);
168 }
169
170 // Set up instruction-count-based termination events for SimPoints
171 // Typically, there are more than one action points.
172 // Simulation.py is responsible to take the necessary actions upon
173 // exitting the simulation loop.
174 if (!p->simpoint_start_insts.empty()) {
175 const char *cause = "simpoint starting point found";
176 for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i)
177 scheduleInstStop(0, p->simpoint_start_insts[i], cause);
178 }
179
180 if (p->max_insts_all_threads != 0) {
181 const char *cause = "all threads reached the max instruction count";
182
183 // allocate & initialize shared downcounter: each event will
184 // decrement this when triggered; simulation will terminate
185 // when counter reaches 0
186 int *counter = new int;
187 *counter = numThreads;
188 for (ThreadID tid = 0; tid < numThreads; ++tid) {
189 Event *event = new CountedExitEvent(cause, *counter);
190 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
191 }
192 }
193
194 // allocate per-thread load-based event queues
195 comLoadEventQueue = new EventQueue *[numThreads];
196 for (ThreadID tid = 0; tid < numThreads; ++tid)
197 comLoadEventQueue[tid] = new EventQueue("load-based event queue");
198
199 //
200 // set up instruction-count-based termination events, if any
201 //
202 if (p->max_loads_any_thread != 0) {
203 const char *cause = "a thread reached the max load count";
204 for (ThreadID tid = 0; tid < numThreads; ++tid)
205 scheduleLoadStop(tid, p->max_loads_any_thread, cause);
206 }
207
208 if (p->max_loads_all_threads != 0) {
209 const char *cause = "all threads reached the max load count";
210 // allocate & initialize shared downcounter: each event will
211 // decrement this when triggered; simulation will terminate
212 // when counter reaches 0
213 int *counter = new int;
214 *counter = numThreads;
215 for (ThreadID tid = 0; tid < numThreads; ++tid) {
216 Event *event = new CountedExitEvent(cause, *counter);
217 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
218 }
219 }
220
221 functionTracingEnabled = false;
222 if (p->function_trace) {
223 const string fname = csprintf("ftrace.%s", name());
224 functionTraceStream = simout.findOrCreate(fname)->stream();
225
226 currentFunctionStart = currentFunctionEnd = 0;
227 functionEntryTick = p->function_trace_start;
228
229 if (p->function_trace_start == 0) {
230 functionTracingEnabled = true;
231 } else {
232 Event *event = new EventFunctionWrapper(
233 [this]{ enableFunctionTrace(); }, name(), true);
234 schedule(event, p->function_trace_start);
235 }
236 }
237
238 // The interrupts should always be present unless this CPU is
239 // switched in later or in case it is a checker CPU
240 if (!params()->switched_out && !is_checker) {
241 fatal_if(interrupts.size() != numThreads,
242 "CPU %s has %i interrupt controllers, but is expecting one "
243 "per thread (%i)\n",
244 name(), interrupts.size(), numThreads);
245 for (ThreadID tid = 0; tid < numThreads; tid++)
246 interrupts[tid]->setCPU(this);
247 }
248
249 if (FullSystem) {
250 if (params()->profile)
251 profileEvent = new EventFunctionWrapper(
252 [this]{ processProfileEvent(); },
253 name());
254 }
255 tracer = params()->tracer;
256
257 if (params()->isa.size() != numThreads) {
258 fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
259 "of threads (%i).\n", params()->isa.size(), numThreads);
260 }
261}
262
263void
264BaseCPU::enableFunctionTrace()
265{
266 functionTracingEnabled = true;
267}
268
269BaseCPU::~BaseCPU()
270{
271 delete profileEvent;
272 delete[] comLoadEventQueue;
273 delete[] comInstEventQueue;
274}
275
276void
277BaseCPU::armMonitor(ThreadID tid, Addr address)
278{
279 assert(tid < numThreads);
280 AddressMonitor &monitor = addressMonitor[tid];
281
282 monitor.armed = true;
283 monitor.vAddr = address;
284 monitor.pAddr = 0x0;
285 DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address);
286}
287
288bool
289BaseCPU::mwait(ThreadID tid, PacketPtr pkt)
290{
291 assert(tid < numThreads);
292 AddressMonitor &monitor = addressMonitor[tid];
293
294 if (!monitor.gotWakeup) {
295 int block_size = cacheLineSize();
296 uint64_t mask = ~((uint64_t)(block_size - 1));
297
298 assert(pkt->req->hasPaddr());
299 monitor.pAddr = pkt->getAddr() & mask;
300 monitor.waiting = true;
301
302 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, "
303 "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr);
304 return true;
305 } else {
306 monitor.gotWakeup = false;
307 return false;
308 }
309}
310
311void
312BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb)
313{
314 assert(tid < numThreads);
315 AddressMonitor &monitor = addressMonitor[tid];
316
317 Request req;
318 Addr addr = monitor.vAddr;
319 int block_size = cacheLineSize();
320 uint64_t mask = ~((uint64_t)(block_size - 1));
321 int size = block_size;
322
323 //The address of the next line if it crosses a cache line boundary.
324 Addr secondAddr = roundDown(addr + size - 1, block_size);
325
326 if (secondAddr > addr)
327 size = secondAddr - addr;
328
329 req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
330
331 // translate to physical address
332 Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read);
333 assert(fault == NoFault);
334
335 monitor.pAddr = req.getPaddr() & mask;
336 monitor.waiting = true;
337
338 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
339 tid, monitor.vAddr, monitor.pAddr);
340}
341
342void
343BaseCPU::init()
344{
345 if (!params()->switched_out) {
346 registerThreadContexts();
347
348 verifyMemoryMode();
349 }
350}
351
352void
353BaseCPU::startup()
354{
355 if (FullSystem) {
356 if (!params()->switched_out && profileEvent)
357 schedule(profileEvent, curTick());
358 }
359
360 if (params()->progress_interval) {
361 new CPUProgressEvent(this, params()->progress_interval);
362 }
363
142{
143 // if Python did not provide a valid ID, do it here
144 if (_cpuId == -1 ) {
145 _cpuId = cpuList.size();
146 }
147
148 // add self to global list of CPUs
149 cpuList.push_back(this);
150
151 DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n",
152 _cpuId, _socketId);
153
154 if (numThreads > maxThreadsPerCPU)
155 maxThreadsPerCPU = numThreads;
156
157 // allocate per-thread instruction-based event queues
158 comInstEventQueue = new EventQueue *[numThreads];
159 for (ThreadID tid = 0; tid < numThreads; ++tid)
160 comInstEventQueue[tid] =
161 new EventQueue("instruction-based event queue");
162
163 //
164 // set up instruction-count-based termination events, if any
165 //
166 if (p->max_insts_any_thread != 0) {
167 const char *cause = "a thread reached the max instruction count";
168 for (ThreadID tid = 0; tid < numThreads; ++tid)
169 scheduleInstStop(tid, p->max_insts_any_thread, cause);
170 }
171
172 // Set up instruction-count-based termination events for SimPoints
173 // Typically, there are more than one action points.
174 // Simulation.py is responsible to take the necessary actions upon
175 // exitting the simulation loop.
176 if (!p->simpoint_start_insts.empty()) {
177 const char *cause = "simpoint starting point found";
178 for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i)
179 scheduleInstStop(0, p->simpoint_start_insts[i], cause);
180 }
181
182 if (p->max_insts_all_threads != 0) {
183 const char *cause = "all threads reached the max instruction count";
184
185 // allocate & initialize shared downcounter: each event will
186 // decrement this when triggered; simulation will terminate
187 // when counter reaches 0
188 int *counter = new int;
189 *counter = numThreads;
190 for (ThreadID tid = 0; tid < numThreads; ++tid) {
191 Event *event = new CountedExitEvent(cause, *counter);
192 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads);
193 }
194 }
195
196 // allocate per-thread load-based event queues
197 comLoadEventQueue = new EventQueue *[numThreads];
198 for (ThreadID tid = 0; tid < numThreads; ++tid)
199 comLoadEventQueue[tid] = new EventQueue("load-based event queue");
200
201 //
202 // set up instruction-count-based termination events, if any
203 //
204 if (p->max_loads_any_thread != 0) {
205 const char *cause = "a thread reached the max load count";
206 for (ThreadID tid = 0; tid < numThreads; ++tid)
207 scheduleLoadStop(tid, p->max_loads_any_thread, cause);
208 }
209
210 if (p->max_loads_all_threads != 0) {
211 const char *cause = "all threads reached the max load count";
212 // allocate & initialize shared downcounter: each event will
213 // decrement this when triggered; simulation will terminate
214 // when counter reaches 0
215 int *counter = new int;
216 *counter = numThreads;
217 for (ThreadID tid = 0; tid < numThreads; ++tid) {
218 Event *event = new CountedExitEvent(cause, *counter);
219 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads);
220 }
221 }
222
223 functionTracingEnabled = false;
224 if (p->function_trace) {
225 const string fname = csprintf("ftrace.%s", name());
226 functionTraceStream = simout.findOrCreate(fname)->stream();
227
228 currentFunctionStart = currentFunctionEnd = 0;
229 functionEntryTick = p->function_trace_start;
230
231 if (p->function_trace_start == 0) {
232 functionTracingEnabled = true;
233 } else {
234 Event *event = new EventFunctionWrapper(
235 [this]{ enableFunctionTrace(); }, name(), true);
236 schedule(event, p->function_trace_start);
237 }
238 }
239
240 // The interrupts should always be present unless this CPU is
241 // switched in later or in case it is a checker CPU
242 if (!params()->switched_out && !is_checker) {
243 fatal_if(interrupts.size() != numThreads,
244 "CPU %s has %i interrupt controllers, but is expecting one "
245 "per thread (%i)\n",
246 name(), interrupts.size(), numThreads);
247 for (ThreadID tid = 0; tid < numThreads; tid++)
248 interrupts[tid]->setCPU(this);
249 }
250
251 if (FullSystem) {
252 if (params()->profile)
253 profileEvent = new EventFunctionWrapper(
254 [this]{ processProfileEvent(); },
255 name());
256 }
257 tracer = params()->tracer;
258
259 if (params()->isa.size() != numThreads) {
260 fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
261 "of threads (%i).\n", params()->isa.size(), numThreads);
262 }
263}
264
265void
266BaseCPU::enableFunctionTrace()
267{
268 functionTracingEnabled = true;
269}
270
271BaseCPU::~BaseCPU()
272{
273 delete profileEvent;
274 delete[] comLoadEventQueue;
275 delete[] comInstEventQueue;
276}
277
278void
279BaseCPU::armMonitor(ThreadID tid, Addr address)
280{
281 assert(tid < numThreads);
282 AddressMonitor &monitor = addressMonitor[tid];
283
284 monitor.armed = true;
285 monitor.vAddr = address;
286 monitor.pAddr = 0x0;
287 DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address);
288}
289
290bool
291BaseCPU::mwait(ThreadID tid, PacketPtr pkt)
292{
293 assert(tid < numThreads);
294 AddressMonitor &monitor = addressMonitor[tid];
295
296 if (!monitor.gotWakeup) {
297 int block_size = cacheLineSize();
298 uint64_t mask = ~((uint64_t)(block_size - 1));
299
300 assert(pkt->req->hasPaddr());
301 monitor.pAddr = pkt->getAddr() & mask;
302 monitor.waiting = true;
303
304 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, "
305 "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr);
306 return true;
307 } else {
308 monitor.gotWakeup = false;
309 return false;
310 }
311}
312
313void
314BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb)
315{
316 assert(tid < numThreads);
317 AddressMonitor &monitor = addressMonitor[tid];
318
319 Request req;
320 Addr addr = monitor.vAddr;
321 int block_size = cacheLineSize();
322 uint64_t mask = ~((uint64_t)(block_size - 1));
323 int size = block_size;
324
325 //The address of the next line if it crosses a cache line boundary.
326 Addr secondAddr = roundDown(addr + size - 1, block_size);
327
328 if (secondAddr > addr)
329 size = secondAddr - addr;
330
331 req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
332
333 // translate to physical address
334 Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read);
335 assert(fault == NoFault);
336
337 monitor.pAddr = req.getPaddr() & mask;
338 monitor.waiting = true;
339
340 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
341 tid, monitor.vAddr, monitor.pAddr);
342}
343
344void
345BaseCPU::init()
346{
347 if (!params()->switched_out) {
348 registerThreadContexts();
349
350 verifyMemoryMode();
351 }
352}
353
354void
355BaseCPU::startup()
356{
357 if (FullSystem) {
358 if (!params()->switched_out && profileEvent)
359 schedule(profileEvent, curTick());
360 }
361
362 if (params()->progress_interval) {
363 new CPUProgressEvent(this, params()->progress_interval);
364 }
365
366 if (_switchedOut)
367 ClockedObject::pwrState(Enums::PwrState::OFF);
368
364 // Assumption CPU start to operate instantaneously without any latency
365 if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED)
366 ClockedObject::pwrState(Enums::PwrState::ON);
367
368}
369
370ProbePoints::PMUUPtr
371BaseCPU::pmuProbePoint(const char *name)
372{
373 ProbePoints::PMUUPtr ptr;
374 ptr.reset(new ProbePoints::PMU(getProbeManager(), name));
375
376 return ptr;
377}
378
379void
380BaseCPU::regProbePoints()
381{
382 ppCycles = pmuProbePoint("Cycles");
383
384 ppRetiredInsts = pmuProbePoint("RetiredInsts");
385 ppRetiredLoads = pmuProbePoint("RetiredLoads");
386 ppRetiredStores = pmuProbePoint("RetiredStores");
387 ppRetiredBranches = pmuProbePoint("RetiredBranches");
388}
389
390void
391BaseCPU::probeInstCommit(const StaticInstPtr &inst)
392{
393 if (!inst->isMicroop() || inst->isLastMicroop())
394 ppRetiredInsts->notify(1);
395
396
397 if (inst->isLoad())
398 ppRetiredLoads->notify(1);
399
400 if (inst->isStore())
401 ppRetiredStores->notify(1);
402
403 if (inst->isControl())
404 ppRetiredBranches->notify(1);
405}
406
407void
408BaseCPU::regStats()
409{
410 MemObject::regStats();
411
412 using namespace Stats;
413
414 numCycles
415 .name(name() + ".numCycles")
416 .desc("number of cpu cycles simulated")
417 ;
418
419 numWorkItemsStarted
420 .name(name() + ".numWorkItemsStarted")
421 .desc("number of work items this cpu started")
422 ;
423
424 numWorkItemsCompleted
425 .name(name() + ".numWorkItemsCompleted")
426 .desc("number of work items this cpu completed")
427 ;
428
429 int size = threadContexts.size();
430 if (size > 1) {
431 for (int i = 0; i < size; ++i) {
432 stringstream namestr;
433 ccprintf(namestr, "%s.ctx%d", name(), i);
434 threadContexts[i]->regStats(namestr.str());
435 }
436 } else if (size == 1)
437 threadContexts[0]->regStats(name());
438}
439
440BaseMasterPort &
441BaseCPU::getMasterPort(const string &if_name, PortID idx)
442{
443 // Get the right port based on name. This applies to all the
444 // subclasses of the base CPU and relies on their implementation
445 // of getDataPort and getInstPort. In all cases there methods
446 // return a MasterPort pointer.
447 if (if_name == "dcache_port")
448 return getDataPort();
449 else if (if_name == "icache_port")
450 return getInstPort();
451 else
452 return MemObject::getMasterPort(if_name, idx);
453}
454
455void
456BaseCPU::registerThreadContexts()
457{
458 assert(system->multiThread || numThreads == 1);
459
460 ThreadID size = threadContexts.size();
461 for (ThreadID tid = 0; tid < size; ++tid) {
462 ThreadContext *tc = threadContexts[tid];
463
464 if (system->multiThread) {
465 tc->setContextId(system->registerThreadContext(tc));
466 } else {
467 tc->setContextId(system->registerThreadContext(tc, _cpuId));
468 }
469
470 if (!FullSystem)
471 tc->getProcessPtr()->assignThreadContext(tc->contextId());
472 }
473}
474
369 // Assumption CPU start to operate instantaneously without any latency
370 if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED)
371 ClockedObject::pwrState(Enums::PwrState::ON);
372
373}
374
375ProbePoints::PMUUPtr
376BaseCPU::pmuProbePoint(const char *name)
377{
378 ProbePoints::PMUUPtr ptr;
379 ptr.reset(new ProbePoints::PMU(getProbeManager(), name));
380
381 return ptr;
382}
383
384void
385BaseCPU::regProbePoints()
386{
387 ppCycles = pmuProbePoint("Cycles");
388
389 ppRetiredInsts = pmuProbePoint("RetiredInsts");
390 ppRetiredLoads = pmuProbePoint("RetiredLoads");
391 ppRetiredStores = pmuProbePoint("RetiredStores");
392 ppRetiredBranches = pmuProbePoint("RetiredBranches");
393}
394
395void
396BaseCPU::probeInstCommit(const StaticInstPtr &inst)
397{
398 if (!inst->isMicroop() || inst->isLastMicroop())
399 ppRetiredInsts->notify(1);
400
401
402 if (inst->isLoad())
403 ppRetiredLoads->notify(1);
404
405 if (inst->isStore())
406 ppRetiredStores->notify(1);
407
408 if (inst->isControl())
409 ppRetiredBranches->notify(1);
410}
411
412void
413BaseCPU::regStats()
414{
415 MemObject::regStats();
416
417 using namespace Stats;
418
419 numCycles
420 .name(name() + ".numCycles")
421 .desc("number of cpu cycles simulated")
422 ;
423
424 numWorkItemsStarted
425 .name(name() + ".numWorkItemsStarted")
426 .desc("number of work items this cpu started")
427 ;
428
429 numWorkItemsCompleted
430 .name(name() + ".numWorkItemsCompleted")
431 .desc("number of work items this cpu completed")
432 ;
433
434 int size = threadContexts.size();
435 if (size > 1) {
436 for (int i = 0; i < size; ++i) {
437 stringstream namestr;
438 ccprintf(namestr, "%s.ctx%d", name(), i);
439 threadContexts[i]->regStats(namestr.str());
440 }
441 } else if (size == 1)
442 threadContexts[0]->regStats(name());
443}
444
445BaseMasterPort &
446BaseCPU::getMasterPort(const string &if_name, PortID idx)
447{
448 // Get the right port based on name. This applies to all the
449 // subclasses of the base CPU and relies on their implementation
450 // of getDataPort and getInstPort. In all cases there methods
451 // return a MasterPort pointer.
452 if (if_name == "dcache_port")
453 return getDataPort();
454 else if (if_name == "icache_port")
455 return getInstPort();
456 else
457 return MemObject::getMasterPort(if_name, idx);
458}
459
460void
461BaseCPU::registerThreadContexts()
462{
463 assert(system->multiThread || numThreads == 1);
464
465 ThreadID size = threadContexts.size();
466 for (ThreadID tid = 0; tid < size; ++tid) {
467 ThreadContext *tc = threadContexts[tid];
468
469 if (system->multiThread) {
470 tc->setContextId(system->registerThreadContext(tc));
471 } else {
472 tc->setContextId(system->registerThreadContext(tc, _cpuId));
473 }
474
475 if (!FullSystem)
476 tc->getProcessPtr()->assignThreadContext(tc->contextId());
477 }
478}
479
480void
481BaseCPU::deschedulePowerGatingEvent()
482{
483 if (enterPwrGatingEvent.scheduled()){
484 deschedule(enterPwrGatingEvent);
485 }
486}
475
487
488void
489BaseCPU::schedulePowerGatingEvent()
490{
491 for (auto tc : threadContexts) {
492 if (tc->status() == ThreadContext::Active)
493 return;
494 }
495
496 if (ClockedObject::pwrState() == Enums::PwrState::CLK_GATED) {
497 assert(!enterPwrGatingEvent.scheduled());
498 // Schedule a power gating event when clock gated for the specified
499 // amount of time
500 schedule(enterPwrGatingEvent, clockEdge(pwrGatingLatency));
501 }
502}
503
476int
477BaseCPU::findContext(ThreadContext *tc)
478{
479 ThreadID size = threadContexts.size();
480 for (ThreadID tid = 0; tid < size; ++tid) {
481 if (tc == threadContexts[tid])
482 return tid;
483 }
484 return 0;
485}
486
487void
488BaseCPU::activateContext(ThreadID thread_num)
489{
504int
505BaseCPU::findContext(ThreadContext *tc)
506{
507 ThreadID size = threadContexts.size();
508 for (ThreadID tid = 0; tid < size; ++tid) {
509 if (tc == threadContexts[tid])
510 return tid;
511 }
512 return 0;
513}
514
515void
516BaseCPU::activateContext(ThreadID thread_num)
517{
518 // Squash enter power gating event while cpu gets activated
519 if (enterPwrGatingEvent.scheduled())
520 deschedule(enterPwrGatingEvent);
521
490 // For any active thread running, update CPU power state to active (ON)
491 ClockedObject::pwrState(Enums::PwrState::ON);
492}
493
494void
495BaseCPU::suspendContext(ThreadID thread_num)
496{
497 // Check if all threads are suspended
498 for (auto t : threadContexts) {
499 if (t->status() != ThreadContext::Suspended) {
500 return;
501 }
502 }
503
504 // All CPU threads suspended, enter lower power state for the CPU
505 ClockedObject::pwrState(Enums::PwrState::CLK_GATED);
522 // For any active thread running, update CPU power state to active (ON)
523 ClockedObject::pwrState(Enums::PwrState::ON);
524}
525
526void
527BaseCPU::suspendContext(ThreadID thread_num)
528{
529 // Check if all threads are suspended
530 for (auto t : threadContexts) {
531 if (t->status() != ThreadContext::Suspended) {
532 return;
533 }
534 }
535
536 // All CPU threads suspended, enter lower power state for the CPU
537 ClockedObject::pwrState(Enums::PwrState::CLK_GATED);
538
539 //Schedule power gating event when clock gated for a configurable cycles
540 schedule(enterPwrGatingEvent, clockEdge(pwrGatingLatency));
506}
507
508void
541}
542
543void
544BaseCPU::enterPwrGating(void)
545{
546 ClockedObject::pwrState(Enums::PwrState::OFF);
547}
548
549void
509BaseCPU::switchOut()
510{
511 assert(!_switchedOut);
512 _switchedOut = true;
513 if (profileEvent && profileEvent->scheduled())
514 deschedule(profileEvent);
515
516 // Flush all TLBs in the CPU to avoid having stale translations if
517 // it gets switched in later.
518 flushTLBs();
550BaseCPU::switchOut()
551{
552 assert(!_switchedOut);
553 _switchedOut = true;
554 if (profileEvent && profileEvent->scheduled())
555 deschedule(profileEvent);
556
557 // Flush all TLBs in the CPU to avoid having stale translations if
558 // it gets switched in later.
559 flushTLBs();
560
561 // Go to the power gating state
562 ClockedObject::pwrState(Enums::PwrState::OFF);
519}
520
521void
522BaseCPU::takeOverFrom(BaseCPU *oldCPU)
523{
524 assert(threadContexts.size() == oldCPU->threadContexts.size());
525 assert(_cpuId == oldCPU->cpuId());
526 assert(_switchedOut);
527 assert(oldCPU != this);
528 _pid = oldCPU->getPid();
529 _taskId = oldCPU->taskId();
563}
564
565void
566BaseCPU::takeOverFrom(BaseCPU *oldCPU)
567{
568 assert(threadContexts.size() == oldCPU->threadContexts.size());
569 assert(_cpuId == oldCPU->cpuId());
570 assert(_switchedOut);
571 assert(oldCPU != this);
572 _pid = oldCPU->getPid();
573 _taskId = oldCPU->taskId();
574 // Take over the power state of the switchedOut CPU
575 ClockedObject::pwrState(oldCPU->pwrState());
530 _switchedOut = false;
531
532 ThreadID size = threadContexts.size();
533 for (ThreadID i = 0; i < size; ++i) {
534 ThreadContext *newTC = threadContexts[i];
535 ThreadContext *oldTC = oldCPU->threadContexts[i];
536
537 newTC->takeOverFrom(oldTC);
538
539 CpuEvent::replaceThreadContext(oldTC, newTC);
540
541 assert(newTC->contextId() == oldTC->contextId());
542 assert(newTC->threadId() == oldTC->threadId());
543 system->replaceThreadContext(newTC, newTC->contextId());
544
545 /* This code no longer works since the zero register (e.g.,
546 * r31 on Alpha) doesn't necessarily contain zero at this
547 * point.
548 if (DTRACE(Context))
549 ThreadContext::compare(oldTC, newTC);
550 */
551
552 BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort();
553 BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort();
554 BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort();
555 BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort();
556
557 // Move over any table walker ports if they exist
558 if (new_itb_port) {
559 assert(!new_itb_port->isConnected());
560 assert(old_itb_port);
561 assert(old_itb_port->isConnected());
562 BaseSlavePort &slavePort = old_itb_port->getSlavePort();
563 old_itb_port->unbind();
564 new_itb_port->bind(slavePort);
565 }
566 if (new_dtb_port) {
567 assert(!new_dtb_port->isConnected());
568 assert(old_dtb_port);
569 assert(old_dtb_port->isConnected());
570 BaseSlavePort &slavePort = old_dtb_port->getSlavePort();
571 old_dtb_port->unbind();
572 new_dtb_port->bind(slavePort);
573 }
574 newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr());
575 newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr());
576
577 // Checker whether or not we have to transfer CheckerCPU
578 // objects over in the switch
579 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr();
580 CheckerCPU *newChecker = newTC->getCheckerCpuPtr();
581 if (oldChecker && newChecker) {
582 BaseMasterPort *old_checker_itb_port =
583 oldChecker->getITBPtr()->getMasterPort();
584 BaseMasterPort *old_checker_dtb_port =
585 oldChecker->getDTBPtr()->getMasterPort();
586 BaseMasterPort *new_checker_itb_port =
587 newChecker->getITBPtr()->getMasterPort();
588 BaseMasterPort *new_checker_dtb_port =
589 newChecker->getDTBPtr()->getMasterPort();
590
591 newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr());
592 newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr());
593
594 // Move over any table walker ports if they exist for checker
595 if (new_checker_itb_port) {
596 assert(!new_checker_itb_port->isConnected());
597 assert(old_checker_itb_port);
598 assert(old_checker_itb_port->isConnected());
599 BaseSlavePort &slavePort =
600 old_checker_itb_port->getSlavePort();
601 old_checker_itb_port->unbind();
602 new_checker_itb_port->bind(slavePort);
603 }
604 if (new_checker_dtb_port) {
605 assert(!new_checker_dtb_port->isConnected());
606 assert(old_checker_dtb_port);
607 assert(old_checker_dtb_port->isConnected());
608 BaseSlavePort &slavePort =
609 old_checker_dtb_port->getSlavePort();
610 old_checker_dtb_port->unbind();
611 new_checker_dtb_port->bind(slavePort);
612 }
613 }
614 }
615
616 interrupts = oldCPU->interrupts;
617 for (ThreadID tid = 0; tid < numThreads; tid++) {
618 interrupts[tid]->setCPU(this);
619 }
620 oldCPU->interrupts.clear();
621
622 if (FullSystem) {
623 for (ThreadID i = 0; i < size; ++i)
624 threadContexts[i]->profileClear();
625
626 if (profileEvent)
627 schedule(profileEvent, curTick());
628 }
629
630 // All CPUs have an instruction and a data port, and the new CPU's
631 // ports are dangling while the old CPU has its ports connected
632 // already. Unbind the old CPU and then bind the ports of the one
633 // we are switching to.
634 assert(!getInstPort().isConnected());
635 assert(oldCPU->getInstPort().isConnected());
636 BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort();
637 oldCPU->getInstPort().unbind();
638 getInstPort().bind(inst_peer_port);
639
640 assert(!getDataPort().isConnected());
641 assert(oldCPU->getDataPort().isConnected());
642 BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort();
643 oldCPU->getDataPort().unbind();
644 getDataPort().bind(data_peer_port);
645}
646
647void
648BaseCPU::flushTLBs()
649{
650 for (ThreadID i = 0; i < threadContexts.size(); ++i) {
651 ThreadContext &tc(*threadContexts[i]);
652 CheckerCPU *checker(tc.getCheckerCpuPtr());
653
654 tc.getITBPtr()->flushAll();
655 tc.getDTBPtr()->flushAll();
656 if (checker) {
657 checker->getITBPtr()->flushAll();
658 checker->getDTBPtr()->flushAll();
659 }
660 }
661}
662
663void
664BaseCPU::processProfileEvent()
665{
666 ThreadID size = threadContexts.size();
667
668 for (ThreadID i = 0; i < size; ++i)
669 threadContexts[i]->profileSample();
670
671 schedule(profileEvent, curTick() + params()->profile);
672}
673
674void
675BaseCPU::serialize(CheckpointOut &cp) const
676{
677 SERIALIZE_SCALAR(instCnt);
678
679 if (!_switchedOut) {
680 /* Unlike _pid, _taskId is not serialized, as they are dynamically
681 * assigned unique ids that are only meaningful for the duration of
682 * a specific run. We will need to serialize the entire taskMap in
683 * system. */
684 SERIALIZE_SCALAR(_pid);
685
686 // Serialize the threads, this is done by the CPU implementation.
687 for (ThreadID i = 0; i < numThreads; ++i) {
688 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
689 interrupts[i]->serialize(cp);
690 serializeThread(cp, i);
691 }
692 }
693}
694
695void
696BaseCPU::unserialize(CheckpointIn &cp)
697{
698 UNSERIALIZE_SCALAR(instCnt);
699
700 if (!_switchedOut) {
701 UNSERIALIZE_SCALAR(_pid);
702
703 // Unserialize the threads, this is done by the CPU implementation.
704 for (ThreadID i = 0; i < numThreads; ++i) {
705 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
706 interrupts[i]->unserialize(cp);
707 unserializeThread(cp, i);
708 }
709 }
710}
711
712void
713BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause)
714{
715 const Tick now(comInstEventQueue[tid]->getCurTick());
716 Event *event(new LocalSimLoopExitEvent(cause, 0));
717
718 comInstEventQueue[tid]->schedule(event, now + insts);
719}
720
721uint64_t
722BaseCPU::getCurrentInstCount(ThreadID tid)
723{
724 return Tick(comInstEventQueue[tid]->getCurTick());
725}
726
727AddressMonitor::AddressMonitor() {
728 armed = false;
729 waiting = false;
730 gotWakeup = false;
731}
732
733bool AddressMonitor::doMonitor(PacketPtr pkt) {
734 assert(pkt->req->hasPaddr());
735 if (armed && waiting) {
736 if (pAddr == pkt->getAddr()) {
737 DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n",
738 pkt->getAddr());
739 waiting = false;
740 return true;
741 }
742 }
743 return false;
744}
745
746void
747BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause)
748{
749 const Tick now(comLoadEventQueue[tid]->getCurTick());
750 Event *event(new LocalSimLoopExitEvent(cause, 0));
751
752 comLoadEventQueue[tid]->schedule(event, now + loads);
753}
754
755
756void
757BaseCPU::traceFunctionsInternal(Addr pc)
758{
759 if (!debugSymbolTable)
760 return;
761
762 // if pc enters different function, print new function symbol and
763 // update saved range. Otherwise do nothing.
764 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
765 string sym_str;
766 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
767 currentFunctionStart,
768 currentFunctionEnd);
769
770 if (!found) {
771 // no symbol found: use addr as label
772 sym_str = csprintf("0x%x", pc);
773 currentFunctionStart = pc;
774 currentFunctionEnd = pc + 1;
775 }
776
777 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
778 curTick() - functionEntryTick, curTick(), sym_str);
779 functionEntryTick = curTick();
780 }
781}
782
783bool
784BaseCPU::waitForRemoteGDB() const
785{
786 return params()->wait_for_remote_gdb;
787}
576 _switchedOut = false;
577
578 ThreadID size = threadContexts.size();
579 for (ThreadID i = 0; i < size; ++i) {
580 ThreadContext *newTC = threadContexts[i];
581 ThreadContext *oldTC = oldCPU->threadContexts[i];
582
583 newTC->takeOverFrom(oldTC);
584
585 CpuEvent::replaceThreadContext(oldTC, newTC);
586
587 assert(newTC->contextId() == oldTC->contextId());
588 assert(newTC->threadId() == oldTC->threadId());
589 system->replaceThreadContext(newTC, newTC->contextId());
590
591 /* This code no longer works since the zero register (e.g.,
592 * r31 on Alpha) doesn't necessarily contain zero at this
593 * point.
594 if (DTRACE(Context))
595 ThreadContext::compare(oldTC, newTC);
596 */
597
598 BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort();
599 BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort();
600 BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort();
601 BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort();
602
603 // Move over any table walker ports if they exist
604 if (new_itb_port) {
605 assert(!new_itb_port->isConnected());
606 assert(old_itb_port);
607 assert(old_itb_port->isConnected());
608 BaseSlavePort &slavePort = old_itb_port->getSlavePort();
609 old_itb_port->unbind();
610 new_itb_port->bind(slavePort);
611 }
612 if (new_dtb_port) {
613 assert(!new_dtb_port->isConnected());
614 assert(old_dtb_port);
615 assert(old_dtb_port->isConnected());
616 BaseSlavePort &slavePort = old_dtb_port->getSlavePort();
617 old_dtb_port->unbind();
618 new_dtb_port->bind(slavePort);
619 }
620 newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr());
621 newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr());
622
623 // Checker whether or not we have to transfer CheckerCPU
624 // objects over in the switch
625 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr();
626 CheckerCPU *newChecker = newTC->getCheckerCpuPtr();
627 if (oldChecker && newChecker) {
628 BaseMasterPort *old_checker_itb_port =
629 oldChecker->getITBPtr()->getMasterPort();
630 BaseMasterPort *old_checker_dtb_port =
631 oldChecker->getDTBPtr()->getMasterPort();
632 BaseMasterPort *new_checker_itb_port =
633 newChecker->getITBPtr()->getMasterPort();
634 BaseMasterPort *new_checker_dtb_port =
635 newChecker->getDTBPtr()->getMasterPort();
636
637 newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr());
638 newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr());
639
640 // Move over any table walker ports if they exist for checker
641 if (new_checker_itb_port) {
642 assert(!new_checker_itb_port->isConnected());
643 assert(old_checker_itb_port);
644 assert(old_checker_itb_port->isConnected());
645 BaseSlavePort &slavePort =
646 old_checker_itb_port->getSlavePort();
647 old_checker_itb_port->unbind();
648 new_checker_itb_port->bind(slavePort);
649 }
650 if (new_checker_dtb_port) {
651 assert(!new_checker_dtb_port->isConnected());
652 assert(old_checker_dtb_port);
653 assert(old_checker_dtb_port->isConnected());
654 BaseSlavePort &slavePort =
655 old_checker_dtb_port->getSlavePort();
656 old_checker_dtb_port->unbind();
657 new_checker_dtb_port->bind(slavePort);
658 }
659 }
660 }
661
662 interrupts = oldCPU->interrupts;
663 for (ThreadID tid = 0; tid < numThreads; tid++) {
664 interrupts[tid]->setCPU(this);
665 }
666 oldCPU->interrupts.clear();
667
668 if (FullSystem) {
669 for (ThreadID i = 0; i < size; ++i)
670 threadContexts[i]->profileClear();
671
672 if (profileEvent)
673 schedule(profileEvent, curTick());
674 }
675
676 // All CPUs have an instruction and a data port, and the new CPU's
677 // ports are dangling while the old CPU has its ports connected
678 // already. Unbind the old CPU and then bind the ports of the one
679 // we are switching to.
680 assert(!getInstPort().isConnected());
681 assert(oldCPU->getInstPort().isConnected());
682 BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort();
683 oldCPU->getInstPort().unbind();
684 getInstPort().bind(inst_peer_port);
685
686 assert(!getDataPort().isConnected());
687 assert(oldCPU->getDataPort().isConnected());
688 BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort();
689 oldCPU->getDataPort().unbind();
690 getDataPort().bind(data_peer_port);
691}
692
693void
694BaseCPU::flushTLBs()
695{
696 for (ThreadID i = 0; i < threadContexts.size(); ++i) {
697 ThreadContext &tc(*threadContexts[i]);
698 CheckerCPU *checker(tc.getCheckerCpuPtr());
699
700 tc.getITBPtr()->flushAll();
701 tc.getDTBPtr()->flushAll();
702 if (checker) {
703 checker->getITBPtr()->flushAll();
704 checker->getDTBPtr()->flushAll();
705 }
706 }
707}
708
709void
710BaseCPU::processProfileEvent()
711{
712 ThreadID size = threadContexts.size();
713
714 for (ThreadID i = 0; i < size; ++i)
715 threadContexts[i]->profileSample();
716
717 schedule(profileEvent, curTick() + params()->profile);
718}
719
720void
721BaseCPU::serialize(CheckpointOut &cp) const
722{
723 SERIALIZE_SCALAR(instCnt);
724
725 if (!_switchedOut) {
726 /* Unlike _pid, _taskId is not serialized, as they are dynamically
727 * assigned unique ids that are only meaningful for the duration of
728 * a specific run. We will need to serialize the entire taskMap in
729 * system. */
730 SERIALIZE_SCALAR(_pid);
731
732 // Serialize the threads, this is done by the CPU implementation.
733 for (ThreadID i = 0; i < numThreads; ++i) {
734 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
735 interrupts[i]->serialize(cp);
736 serializeThread(cp, i);
737 }
738 }
739}
740
741void
742BaseCPU::unserialize(CheckpointIn &cp)
743{
744 UNSERIALIZE_SCALAR(instCnt);
745
746 if (!_switchedOut) {
747 UNSERIALIZE_SCALAR(_pid);
748
749 // Unserialize the threads, this is done by the CPU implementation.
750 for (ThreadID i = 0; i < numThreads; ++i) {
751 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
752 interrupts[i]->unserialize(cp);
753 unserializeThread(cp, i);
754 }
755 }
756}
757
758void
759BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause)
760{
761 const Tick now(comInstEventQueue[tid]->getCurTick());
762 Event *event(new LocalSimLoopExitEvent(cause, 0));
763
764 comInstEventQueue[tid]->schedule(event, now + insts);
765}
766
767uint64_t
768BaseCPU::getCurrentInstCount(ThreadID tid)
769{
770 return Tick(comInstEventQueue[tid]->getCurTick());
771}
772
773AddressMonitor::AddressMonitor() {
774 armed = false;
775 waiting = false;
776 gotWakeup = false;
777}
778
779bool AddressMonitor::doMonitor(PacketPtr pkt) {
780 assert(pkt->req->hasPaddr());
781 if (armed && waiting) {
782 if (pAddr == pkt->getAddr()) {
783 DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n",
784 pkt->getAddr());
785 waiting = false;
786 return true;
787 }
788 }
789 return false;
790}
791
792void
793BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause)
794{
795 const Tick now(comLoadEventQueue[tid]->getCurTick());
796 Event *event(new LocalSimLoopExitEvent(cause, 0));
797
798 comLoadEventQueue[tid]->schedule(event, now + loads);
799}
800
801
802void
803BaseCPU::traceFunctionsInternal(Addr pc)
804{
805 if (!debugSymbolTable)
806 return;
807
808 // if pc enters different function, print new function symbol and
809 // update saved range. Otherwise do nothing.
810 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
811 string sym_str;
812 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str,
813 currentFunctionStart,
814 currentFunctionEnd);
815
816 if (!found) {
817 // no symbol found: use addr as label
818 sym_str = csprintf("0x%x", pc);
819 currentFunctionStart = pc;
820 currentFunctionEnd = pc + 1;
821 }
822
823 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
824 curTick() - functionEntryTick, curTick(), sym_str);
825 functionEntryTick = curTick();
826 }
827}
828
829bool
830BaseCPU::waitForRemoteGDB() const
831{
832 return params()->wait_for_remote_gdb;
833}