base.hh (12127:4207df055b0d) base.hh (12276:22c220be30c5)
1/*
1/*
2 * Copyright (c) 2011-2013 ARM Limited
2 * Copyright (c) 2011-2013, 2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 * Nathan Binkert
43 * Rick Strong
44 */
45
46#ifndef __CPU_BASE_HH__
47#define __CPU_BASE_HH__
48
49#include <vector>
50
51// Before we do anything else, check if this build is the NULL ISA,
52// and if so stop here
53#include "config/the_isa.hh"
54#if THE_ISA == NULL_ISA
55#include "arch/null/cpu_dummy.hh"
56#else
57#include "arch/interrupts.hh"
58#include "arch/isa_traits.hh"
59#include "arch/microcode_rom.hh"
60#include "base/statistics.hh"
61#include "mem/mem_object.hh"
62#include "sim/eventq.hh"
63#include "sim/full_system.hh"
64#include "sim/insttracer.hh"
65#include "sim/probe/pmu.hh"
66#include "sim/system.hh"
67#include "debug/Mwait.hh"
68
69class BaseCPU;
70struct BaseCPUParams;
71class CheckerCPU;
72class ThreadContext;
73
74struct AddressMonitor
75{
76 AddressMonitor();
77 bool doMonitor(PacketPtr pkt);
78
79 bool armed;
80 Addr vAddr;
81 Addr pAddr;
82 uint64_t val;
83 bool waiting; // 0=normal, 1=mwaiting
84 bool gotWakeup;
85};
86
87class CPUProgressEvent : public Event
88{
89 protected:
90 Tick _interval;
91 Counter lastNumInst;
92 BaseCPU *cpu;
93 bool _repeatEvent;
94
95 public:
96 CPUProgressEvent(BaseCPU *_cpu, Tick ival = 0);
97
98 void process();
99
100 void interval(Tick ival) { _interval = ival; }
101 Tick interval() { return _interval; }
102
103 void repeatEvent(bool repeat) { _repeatEvent = repeat; }
104
105 virtual const char *description() const;
106};
107
108class BaseCPU : public MemObject
109{
110 protected:
111
112 /// Instruction count used for SPARC misc register
113 /// @todo unify this with the counters that cpus individually keep
114 Tick instCnt;
115
116 // every cpu has an id, put it in the base cpu
117 // Set at initialization, only time a cpuId might change is during a
118 // takeover (which should be done from within the BaseCPU anyway,
119 // therefore no setCpuId() method is provided
120 int _cpuId;
121
122 /** Each cpu will have a socket ID that corresponds to its physical location
123 * in the system. This is usually used to bucket cpu cores under single DVFS
124 * domain. This information may also be required by the OS to identify the
125 * cpu core grouping (as in the case of ARM via MPIDR register)
126 */
127 const uint32_t _socketId;
128
129 /** instruction side request id that must be placed in all requests */
130 MasterID _instMasterId;
131
132 /** data side request id that must be placed in all requests */
133 MasterID _dataMasterId;
134
135 /** An intrenal representation of a task identifier within gem5. This is
136 * used so the CPU can add which taskId (which is an internal representation
137 * of the OS process ID) to each request so components in the memory system
138 * can track which process IDs are ultimately interacting with them
139 */
140 uint32_t _taskId;
141
142 /** The current OS process ID that is executing on this processor. This is
143 * used to generate a taskId */
144 uint32_t _pid;
145
146 /** Is the CPU switched out or active? */
147 bool _switchedOut;
148
149 /** Cache the cache line size that we get from the system */
150 const unsigned int _cacheLineSize;
151
152 public:
153
154 /**
155 * Purely virtual method that returns a reference to the data
156 * port. All subclasses must implement this method.
157 *
158 * @return a reference to the data port
159 */
160 virtual MasterPort &getDataPort() = 0;
161
162 /**
163 * Purely virtual method that returns a reference to the instruction
164 * port. All subclasses must implement this method.
165 *
166 * @return a reference to the instruction port
167 */
168 virtual MasterPort &getInstPort() = 0;
169
170 /** Reads this CPU's ID. */
171 int cpuId() const { return _cpuId; }
172
173 /** Reads this CPU's Socket ID. */
174 uint32_t socketId() const { return _socketId; }
175
176 /** Reads this CPU's unique data requestor ID */
177 MasterID dataMasterId() { return _dataMasterId; }
178 /** Reads this CPU's unique instruction requestor ID */
179 MasterID instMasterId() { return _instMasterId; }
180
181 /**
182 * Get a master port on this CPU. All CPUs have a data and
183 * instruction port, and this method uses getDataPort and
184 * getInstPort of the subclasses to resolve the two ports.
185 *
186 * @param if_name the port name
187 * @param idx ignored index
188 *
189 * @return a reference to the port with the given name
190 */
191 BaseMasterPort &getMasterPort(const std::string &if_name,
192 PortID idx = InvalidPortID) override;
193
194 /** Get cpu task id */
195 uint32_t taskId() const { return _taskId; }
196 /** Set cpu task id */
197 void taskId(uint32_t id) { _taskId = id; }
198
199 uint32_t getPid() const { return _pid; }
200 void setPid(uint32_t pid) { _pid = pid; }
201
202 inline void workItemBegin() { numWorkItemsStarted++; }
203 inline void workItemEnd() { numWorkItemsCompleted++; }
204 // @todo remove me after debugging with legion done
205 Tick instCount() { return instCnt; }
206
207 TheISA::MicrocodeRom microcodeRom;
208
209 protected:
210 std::vector<TheISA::Interrupts*> interrupts;
211
212 public:
213 TheISA::Interrupts *
214 getInterruptController(ThreadID tid)
215 {
216 if (interrupts.empty())
217 return NULL;
218
219 assert(interrupts.size() > tid);
220 return interrupts[tid];
221 }
222
223 virtual void wakeup(ThreadID tid) = 0;
224
225 void
226 postInterrupt(ThreadID tid, int int_num, int index)
227 {
228 interrupts[tid]->post(int_num, index);
229 if (FullSystem)
230 wakeup(tid);
231 }
232
233 void
234 clearInterrupt(ThreadID tid, int int_num, int index)
235 {
236 interrupts[tid]->clear(int_num, index);
237 }
238
239 void
240 clearInterrupts(ThreadID tid)
241 {
242 interrupts[tid]->clearAll();
243 }
244
245 bool
246 checkInterrupts(ThreadContext *tc) const
247 {
248 return FullSystem && interrupts[tc->threadId()]->checkInterrupts(tc);
249 }
250
251 void processProfileEvent();
252 EventFunctionWrapper * profileEvent;
253
254 protected:
255 std::vector<ThreadContext *> threadContexts;
256
257 Trace::InstTracer * tracer;
258
259 public:
260
261
262 /** Invalid or unknown Pid. Possible when operating system is not present
263 * or has not assigned a pid yet */
264 static const uint32_t invldPid = std::numeric_limits<uint32_t>::max();
265
266 // Mask to align PCs to MachInst sized boundaries
267 static const Addr PCMask = ~((Addr)sizeof(TheISA::MachInst) - 1);
268
269 /// Provide access to the tracer pointer
270 Trace::InstTracer * getTracer() { return tracer; }
271
272 /// Notify the CPU that the indicated context is now active.
273 virtual void activateContext(ThreadID thread_num);
274
275 /// Notify the CPU that the indicated context is now suspended.
276 /// Check if possible to enter a lower power state
277 virtual void suspendContext(ThreadID thread_num);
278
279 /// Notify the CPU that the indicated context is now halted.
280 virtual void haltContext(ThreadID thread_num) {}
281
282 /// Given a Thread Context pointer return the thread num
283 int findContext(ThreadContext *tc);
284
285 /// Given a thread num get tho thread context for it
286 virtual ThreadContext *getContext(int tn) { return threadContexts[tn]; }
287
288 /// Get the number of thread contexts available
289 unsigned numContexts() { return threadContexts.size(); }
290
291 /// Convert ContextID to threadID
292 ThreadID contextToThread(ContextID cid)
293 { return static_cast<ThreadID>(cid - threadContexts[0]->contextId()); }
294
295 public:
296 typedef BaseCPUParams Params;
297 const Params *params() const
298 { return reinterpret_cast<const Params *>(_params); }
299 BaseCPU(Params *params, bool is_checker = false);
300 virtual ~BaseCPU();
301
302 void init() override;
303 void startup() override;
304 void regStats() override;
305
306 void regProbePoints() override;
307
308 void registerThreadContexts();
309
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 * Nathan Binkert
43 * Rick Strong
44 */
45
46#ifndef __CPU_BASE_HH__
47#define __CPU_BASE_HH__
48
49#include <vector>
50
51// Before we do anything else, check if this build is the NULL ISA,
52// and if so stop here
53#include "config/the_isa.hh"
54#if THE_ISA == NULL_ISA
55#include "arch/null/cpu_dummy.hh"
56#else
57#include "arch/interrupts.hh"
58#include "arch/isa_traits.hh"
59#include "arch/microcode_rom.hh"
60#include "base/statistics.hh"
61#include "mem/mem_object.hh"
62#include "sim/eventq.hh"
63#include "sim/full_system.hh"
64#include "sim/insttracer.hh"
65#include "sim/probe/pmu.hh"
66#include "sim/system.hh"
67#include "debug/Mwait.hh"
68
69class BaseCPU;
70struct BaseCPUParams;
71class CheckerCPU;
72class ThreadContext;
73
74struct AddressMonitor
75{
76 AddressMonitor();
77 bool doMonitor(PacketPtr pkt);
78
79 bool armed;
80 Addr vAddr;
81 Addr pAddr;
82 uint64_t val;
83 bool waiting; // 0=normal, 1=mwaiting
84 bool gotWakeup;
85};
86
87class CPUProgressEvent : public Event
88{
89 protected:
90 Tick _interval;
91 Counter lastNumInst;
92 BaseCPU *cpu;
93 bool _repeatEvent;
94
95 public:
96 CPUProgressEvent(BaseCPU *_cpu, Tick ival = 0);
97
98 void process();
99
100 void interval(Tick ival) { _interval = ival; }
101 Tick interval() { return _interval; }
102
103 void repeatEvent(bool repeat) { _repeatEvent = repeat; }
104
105 virtual const char *description() const;
106};
107
108class BaseCPU : public MemObject
109{
110 protected:
111
112 /// Instruction count used for SPARC misc register
113 /// @todo unify this with the counters that cpus individually keep
114 Tick instCnt;
115
116 // every cpu has an id, put it in the base cpu
117 // Set at initialization, only time a cpuId might change is during a
118 // takeover (which should be done from within the BaseCPU anyway,
119 // therefore no setCpuId() method is provided
120 int _cpuId;
121
122 /** Each cpu will have a socket ID that corresponds to its physical location
123 * in the system. This is usually used to bucket cpu cores under single DVFS
124 * domain. This information may also be required by the OS to identify the
125 * cpu core grouping (as in the case of ARM via MPIDR register)
126 */
127 const uint32_t _socketId;
128
129 /** instruction side request id that must be placed in all requests */
130 MasterID _instMasterId;
131
132 /** data side request id that must be placed in all requests */
133 MasterID _dataMasterId;
134
135 /** An intrenal representation of a task identifier within gem5. This is
136 * used so the CPU can add which taskId (which is an internal representation
137 * of the OS process ID) to each request so components in the memory system
138 * can track which process IDs are ultimately interacting with them
139 */
140 uint32_t _taskId;
141
142 /** The current OS process ID that is executing on this processor. This is
143 * used to generate a taskId */
144 uint32_t _pid;
145
146 /** Is the CPU switched out or active? */
147 bool _switchedOut;
148
149 /** Cache the cache line size that we get from the system */
150 const unsigned int _cacheLineSize;
151
152 public:
153
154 /**
155 * Purely virtual method that returns a reference to the data
156 * port. All subclasses must implement this method.
157 *
158 * @return a reference to the data port
159 */
160 virtual MasterPort &getDataPort() = 0;
161
162 /**
163 * Purely virtual method that returns a reference to the instruction
164 * port. All subclasses must implement this method.
165 *
166 * @return a reference to the instruction port
167 */
168 virtual MasterPort &getInstPort() = 0;
169
170 /** Reads this CPU's ID. */
171 int cpuId() const { return _cpuId; }
172
173 /** Reads this CPU's Socket ID. */
174 uint32_t socketId() const { return _socketId; }
175
176 /** Reads this CPU's unique data requestor ID */
177 MasterID dataMasterId() { return _dataMasterId; }
178 /** Reads this CPU's unique instruction requestor ID */
179 MasterID instMasterId() { return _instMasterId; }
180
181 /**
182 * Get a master port on this CPU. All CPUs have a data and
183 * instruction port, and this method uses getDataPort and
184 * getInstPort of the subclasses to resolve the two ports.
185 *
186 * @param if_name the port name
187 * @param idx ignored index
188 *
189 * @return a reference to the port with the given name
190 */
191 BaseMasterPort &getMasterPort(const std::string &if_name,
192 PortID idx = InvalidPortID) override;
193
194 /** Get cpu task id */
195 uint32_t taskId() const { return _taskId; }
196 /** Set cpu task id */
197 void taskId(uint32_t id) { _taskId = id; }
198
199 uint32_t getPid() const { return _pid; }
200 void setPid(uint32_t pid) { _pid = pid; }
201
202 inline void workItemBegin() { numWorkItemsStarted++; }
203 inline void workItemEnd() { numWorkItemsCompleted++; }
204 // @todo remove me after debugging with legion done
205 Tick instCount() { return instCnt; }
206
207 TheISA::MicrocodeRom microcodeRom;
208
209 protected:
210 std::vector<TheISA::Interrupts*> interrupts;
211
212 public:
213 TheISA::Interrupts *
214 getInterruptController(ThreadID tid)
215 {
216 if (interrupts.empty())
217 return NULL;
218
219 assert(interrupts.size() > tid);
220 return interrupts[tid];
221 }
222
223 virtual void wakeup(ThreadID tid) = 0;
224
225 void
226 postInterrupt(ThreadID tid, int int_num, int index)
227 {
228 interrupts[tid]->post(int_num, index);
229 if (FullSystem)
230 wakeup(tid);
231 }
232
233 void
234 clearInterrupt(ThreadID tid, int int_num, int index)
235 {
236 interrupts[tid]->clear(int_num, index);
237 }
238
239 void
240 clearInterrupts(ThreadID tid)
241 {
242 interrupts[tid]->clearAll();
243 }
244
245 bool
246 checkInterrupts(ThreadContext *tc) const
247 {
248 return FullSystem && interrupts[tc->threadId()]->checkInterrupts(tc);
249 }
250
251 void processProfileEvent();
252 EventFunctionWrapper * profileEvent;
253
254 protected:
255 std::vector<ThreadContext *> threadContexts;
256
257 Trace::InstTracer * tracer;
258
259 public:
260
261
262 /** Invalid or unknown Pid. Possible when operating system is not present
263 * or has not assigned a pid yet */
264 static const uint32_t invldPid = std::numeric_limits<uint32_t>::max();
265
266 // Mask to align PCs to MachInst sized boundaries
267 static const Addr PCMask = ~((Addr)sizeof(TheISA::MachInst) - 1);
268
269 /// Provide access to the tracer pointer
270 Trace::InstTracer * getTracer() { return tracer; }
271
272 /// Notify the CPU that the indicated context is now active.
273 virtual void activateContext(ThreadID thread_num);
274
275 /// Notify the CPU that the indicated context is now suspended.
276 /// Check if possible to enter a lower power state
277 virtual void suspendContext(ThreadID thread_num);
278
279 /// Notify the CPU that the indicated context is now halted.
280 virtual void haltContext(ThreadID thread_num) {}
281
282 /// Given a Thread Context pointer return the thread num
283 int findContext(ThreadContext *tc);
284
285 /// Given a thread num get tho thread context for it
286 virtual ThreadContext *getContext(int tn) { return threadContexts[tn]; }
287
288 /// Get the number of thread contexts available
289 unsigned numContexts() { return threadContexts.size(); }
290
291 /// Convert ContextID to threadID
292 ThreadID contextToThread(ContextID cid)
293 { return static_cast<ThreadID>(cid - threadContexts[0]->contextId()); }
294
295 public:
296 typedef BaseCPUParams Params;
297 const Params *params() const
298 { return reinterpret_cast<const Params *>(_params); }
299 BaseCPU(Params *params, bool is_checker = false);
300 virtual ~BaseCPU();
301
302 void init() override;
303 void startup() override;
304 void regStats() override;
305
306 void regProbePoints() override;
307
308 void registerThreadContexts();
309
310 // Functions to deschedule and reschedule the events to enter the
311 // power gating sleep before and after checkpoiting respectively.
312 void deschedulePowerGatingEvent();
313 void schedulePowerGatingEvent();
314
310 /**
311 * Prepare for another CPU to take over execution.
312 *
313 * When this method exits, all internal state should have been
314 * flushed. After the method returns, the simulator calls
315 * takeOverFrom() on the new CPU with this CPU as its parameter.
316 */
317 virtual void switchOut();
318
319 /**
320 * Load the state of a CPU from the previous CPU object, invoked
321 * on all new CPUs that are about to be switched in.
322 *
323 * A CPU model implementing this method is expected to initialize
324 * its state from the old CPU and connect its memory (unless they
325 * are already connected) to the memories connected to the old
326 * CPU.
327 *
328 * @param cpu CPU to initialize read state from.
329 */
330 virtual void takeOverFrom(BaseCPU *cpu);
331
332 /**
333 * Flush all TLBs in the CPU.
334 *
335 * This method is mainly used to flush stale translations when
336 * switching CPUs. It is also exported to the Python world to
337 * allow it to request a TLB flush after draining the CPU to make
338 * it easier to compare traces when debugging
339 * handover/checkpointing.
340 */
341 void flushTLBs();
342
343 /**
344 * Determine if the CPU is switched out.
345 *
346 * @return True if the CPU is switched out, false otherwise.
347 */
348 bool switchedOut() const { return _switchedOut; }
349
350 /**
351 * Verify that the system is in a memory mode supported by the
352 * CPU.
353 *
354 * Implementations are expected to query the system for the
355 * current memory mode and ensure that it is what the CPU model
356 * expects. If the check fails, the implementation should
357 * terminate the simulation using fatal().
358 */
359 virtual void verifyMemoryMode() const { };
360
361 /**
362 * Number of threads we're actually simulating (<= SMT_MAX_THREADS).
363 * This is a constant for the duration of the simulation.
364 */
365 ThreadID numThreads;
366
367 /**
368 * Vector of per-thread instruction-based event queues. Used for
369 * scheduling events based on number of instructions committed by
370 * a particular thread.
371 */
372 EventQueue **comInstEventQueue;
373
374 /**
375 * Vector of per-thread load-based event queues. Used for
376 * scheduling events based on number of loads committed by
377 *a particular thread.
378 */
379 EventQueue **comLoadEventQueue;
380
381 System *system;
382
383 /**
384 * Get the cache line size of the system.
385 */
386 inline unsigned int cacheLineSize() const { return _cacheLineSize; }
387
388 /**
389 * Serialize this object to the given output stream.
390 *
391 * @note CPU models should normally overload the serializeThread()
392 * method instead of the serialize() method as this provides a
393 * uniform data format for all CPU models and promotes better code
394 * reuse.
395 *
396 * @param os The stream to serialize to.
397 */
398 void serialize(CheckpointOut &cp) const override;
399
400 /**
401 * Reconstruct the state of this object from a checkpoint.
402 *
403 * @note CPU models should normally overload the
404 * unserializeThread() method instead of the unserialize() method
405 * as this provides a uniform data format for all CPU models and
406 * promotes better code reuse.
407
408 * @param cp The checkpoint use.
409 * @param section The section name of this object.
410 */
411 void unserialize(CheckpointIn &cp) override;
412
413 /**
414 * Serialize a single thread.
415 *
416 * @param os The stream to serialize to.
417 * @param tid ID of the current thread.
418 */
419 virtual void serializeThread(CheckpointOut &cp, ThreadID tid) const {};
420
421 /**
422 * Unserialize one thread.
423 *
424 * @param cp The checkpoint use.
425 * @param section The section name of this thread.
426 * @param tid ID of the current thread.
427 */
428 virtual void unserializeThread(CheckpointIn &cp, ThreadID tid) {};
429
430 virtual Counter totalInsts() const = 0;
431
432 virtual Counter totalOps() const = 0;
433
434 /**
435 * Schedule an event that exits the simulation loops after a
436 * predefined number of instructions.
437 *
438 * This method is usually called from the configuration script to
439 * get an exit event some time in the future. It is typically used
440 * when the script wants to simulate for a specific number of
441 * instructions rather than ticks.
442 *
443 * @param tid Thread monitor.
444 * @param insts Number of instructions into the future.
445 * @param cause Cause to signal in the exit event.
446 */
447 void scheduleInstStop(ThreadID tid, Counter insts, const char *cause);
448
449 /**
450 * Schedule an event that exits the simulation loops after a
451 * predefined number of load operations.
452 *
453 * This method is usually called from the configuration script to
454 * get an exit event some time in the future. It is typically used
455 * when the script wants to simulate for a specific number of
456 * loads rather than ticks.
457 *
458 * @param tid Thread monitor.
459 * @param loads Number of load instructions into the future.
460 * @param cause Cause to signal in the exit event.
461 */
462 void scheduleLoadStop(ThreadID tid, Counter loads, const char *cause);
463
464 /**
465 * Get the number of instructions executed by the specified thread
466 * on this CPU. Used by Python to control simulation.
467 *
468 * @param tid Thread monitor
469 * @return Number of instructions executed
470 */
471 uint64_t getCurrentInstCount(ThreadID tid);
472
473 public:
474 /**
475 * @{
476 * @name PMU Probe points.
477 */
478
479 /**
480 * Helper method to trigger PMU probes for a committed
481 * instruction.
482 *
483 * @param inst Instruction that just committed
484 */
485 virtual void probeInstCommit(const StaticInstPtr &inst);
486
487 /**
488 * Helper method to instantiate probe points belonging to this
489 * object.
490 *
491 * @param name Name of the probe point.
492 * @return A unique_ptr to the new probe point.
493 */
494 ProbePoints::PMUUPtr pmuProbePoint(const char *name);
495
496 /** CPU cycle counter */
497 ProbePoints::PMUUPtr ppCycles;
498
499 /**
500 * Instruction commit probe point.
501 *
502 * This probe point is triggered whenever one or more instructions
503 * are committed. It is normally triggered once for every
504 * instruction. However, CPU models committing bundles of
505 * instructions may call notify once for the entire bundle.
506 */
507 ProbePoints::PMUUPtr ppRetiredInsts;
508
509 /** Retired load instructions */
510 ProbePoints::PMUUPtr ppRetiredLoads;
511 /** Retired store instructions */
512 ProbePoints::PMUUPtr ppRetiredStores;
513
514 /** Retired branches (any type) */
515 ProbePoints::PMUUPtr ppRetiredBranches;
516
517 /** @} */
518
519
520
521 // Function tracing
522 private:
523 bool functionTracingEnabled;
524 std::ostream *functionTraceStream;
525 Addr currentFunctionStart;
526 Addr currentFunctionEnd;
527 Tick functionEntryTick;
528 void enableFunctionTrace();
529 void traceFunctionsInternal(Addr pc);
530
531 private:
532 static std::vector<BaseCPU *> cpuList; //!< Static global cpu list
533
534 public:
535 void traceFunctions(Addr pc)
536 {
537 if (functionTracingEnabled)
538 traceFunctionsInternal(pc);
539 }
540
541 static int numSimulatedCPUs() { return cpuList.size(); }
542 static Counter numSimulatedInsts()
543 {
544 Counter total = 0;
545
546 int size = cpuList.size();
547 for (int i = 0; i < size; ++i)
548 total += cpuList[i]->totalInsts();
549
550 return total;
551 }
552
553 static Counter numSimulatedOps()
554 {
555 Counter total = 0;
556
557 int size = cpuList.size();
558 for (int i = 0; i < size; ++i)
559 total += cpuList[i]->totalOps();
560
561 return total;
562 }
563
564 public:
565 // Number of CPU cycles simulated
566 Stats::Scalar numCycles;
567 Stats::Scalar numWorkItemsStarted;
568 Stats::Scalar numWorkItemsCompleted;
569
570 private:
571 std::vector<AddressMonitor> addressMonitor;
572
573 public:
574 void armMonitor(ThreadID tid, Addr address);
575 bool mwait(ThreadID tid, PacketPtr pkt);
576 void mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb);
577 AddressMonitor *getCpuAddrMonitor(ThreadID tid)
578 {
579 assert(tid < numThreads);
580 return &addressMonitor[tid];
581 }
582
583 bool waitForRemoteGDB() const;
584
585 Cycles syscallRetryLatency;
315 /**
316 * Prepare for another CPU to take over execution.
317 *
318 * When this method exits, all internal state should have been
319 * flushed. After the method returns, the simulator calls
320 * takeOverFrom() on the new CPU with this CPU as its parameter.
321 */
322 virtual void switchOut();
323
324 /**
325 * Load the state of a CPU from the previous CPU object, invoked
326 * on all new CPUs that are about to be switched in.
327 *
328 * A CPU model implementing this method is expected to initialize
329 * its state from the old CPU and connect its memory (unless they
330 * are already connected) to the memories connected to the old
331 * CPU.
332 *
333 * @param cpu CPU to initialize read state from.
334 */
335 virtual void takeOverFrom(BaseCPU *cpu);
336
337 /**
338 * Flush all TLBs in the CPU.
339 *
340 * This method is mainly used to flush stale translations when
341 * switching CPUs. It is also exported to the Python world to
342 * allow it to request a TLB flush after draining the CPU to make
343 * it easier to compare traces when debugging
344 * handover/checkpointing.
345 */
346 void flushTLBs();
347
348 /**
349 * Determine if the CPU is switched out.
350 *
351 * @return True if the CPU is switched out, false otherwise.
352 */
353 bool switchedOut() const { return _switchedOut; }
354
355 /**
356 * Verify that the system is in a memory mode supported by the
357 * CPU.
358 *
359 * Implementations are expected to query the system for the
360 * current memory mode and ensure that it is what the CPU model
361 * expects. If the check fails, the implementation should
362 * terminate the simulation using fatal().
363 */
364 virtual void verifyMemoryMode() const { };
365
366 /**
367 * Number of threads we're actually simulating (<= SMT_MAX_THREADS).
368 * This is a constant for the duration of the simulation.
369 */
370 ThreadID numThreads;
371
372 /**
373 * Vector of per-thread instruction-based event queues. Used for
374 * scheduling events based on number of instructions committed by
375 * a particular thread.
376 */
377 EventQueue **comInstEventQueue;
378
379 /**
380 * Vector of per-thread load-based event queues. Used for
381 * scheduling events based on number of loads committed by
382 *a particular thread.
383 */
384 EventQueue **comLoadEventQueue;
385
386 System *system;
387
388 /**
389 * Get the cache line size of the system.
390 */
391 inline unsigned int cacheLineSize() const { return _cacheLineSize; }
392
393 /**
394 * Serialize this object to the given output stream.
395 *
396 * @note CPU models should normally overload the serializeThread()
397 * method instead of the serialize() method as this provides a
398 * uniform data format for all CPU models and promotes better code
399 * reuse.
400 *
401 * @param os The stream to serialize to.
402 */
403 void serialize(CheckpointOut &cp) const override;
404
405 /**
406 * Reconstruct the state of this object from a checkpoint.
407 *
408 * @note CPU models should normally overload the
409 * unserializeThread() method instead of the unserialize() method
410 * as this provides a uniform data format for all CPU models and
411 * promotes better code reuse.
412
413 * @param cp The checkpoint use.
414 * @param section The section name of this object.
415 */
416 void unserialize(CheckpointIn &cp) override;
417
418 /**
419 * Serialize a single thread.
420 *
421 * @param os The stream to serialize to.
422 * @param tid ID of the current thread.
423 */
424 virtual void serializeThread(CheckpointOut &cp, ThreadID tid) const {};
425
426 /**
427 * Unserialize one thread.
428 *
429 * @param cp The checkpoint use.
430 * @param section The section name of this thread.
431 * @param tid ID of the current thread.
432 */
433 virtual void unserializeThread(CheckpointIn &cp, ThreadID tid) {};
434
435 virtual Counter totalInsts() const = 0;
436
437 virtual Counter totalOps() const = 0;
438
439 /**
440 * Schedule an event that exits the simulation loops after a
441 * predefined number of instructions.
442 *
443 * This method is usually called from the configuration script to
444 * get an exit event some time in the future. It is typically used
445 * when the script wants to simulate for a specific number of
446 * instructions rather than ticks.
447 *
448 * @param tid Thread monitor.
449 * @param insts Number of instructions into the future.
450 * @param cause Cause to signal in the exit event.
451 */
452 void scheduleInstStop(ThreadID tid, Counter insts, const char *cause);
453
454 /**
455 * Schedule an event that exits the simulation loops after a
456 * predefined number of load operations.
457 *
458 * This method is usually called from the configuration script to
459 * get an exit event some time in the future. It is typically used
460 * when the script wants to simulate for a specific number of
461 * loads rather than ticks.
462 *
463 * @param tid Thread monitor.
464 * @param loads Number of load instructions into the future.
465 * @param cause Cause to signal in the exit event.
466 */
467 void scheduleLoadStop(ThreadID tid, Counter loads, const char *cause);
468
469 /**
470 * Get the number of instructions executed by the specified thread
471 * on this CPU. Used by Python to control simulation.
472 *
473 * @param tid Thread monitor
474 * @return Number of instructions executed
475 */
476 uint64_t getCurrentInstCount(ThreadID tid);
477
478 public:
479 /**
480 * @{
481 * @name PMU Probe points.
482 */
483
484 /**
485 * Helper method to trigger PMU probes for a committed
486 * instruction.
487 *
488 * @param inst Instruction that just committed
489 */
490 virtual void probeInstCommit(const StaticInstPtr &inst);
491
492 /**
493 * Helper method to instantiate probe points belonging to this
494 * object.
495 *
496 * @param name Name of the probe point.
497 * @return A unique_ptr to the new probe point.
498 */
499 ProbePoints::PMUUPtr pmuProbePoint(const char *name);
500
501 /** CPU cycle counter */
502 ProbePoints::PMUUPtr ppCycles;
503
504 /**
505 * Instruction commit probe point.
506 *
507 * This probe point is triggered whenever one or more instructions
508 * are committed. It is normally triggered once for every
509 * instruction. However, CPU models committing bundles of
510 * instructions may call notify once for the entire bundle.
511 */
512 ProbePoints::PMUUPtr ppRetiredInsts;
513
514 /** Retired load instructions */
515 ProbePoints::PMUUPtr ppRetiredLoads;
516 /** Retired store instructions */
517 ProbePoints::PMUUPtr ppRetiredStores;
518
519 /** Retired branches (any type) */
520 ProbePoints::PMUUPtr ppRetiredBranches;
521
522 /** @} */
523
524
525
526 // Function tracing
527 private:
528 bool functionTracingEnabled;
529 std::ostream *functionTraceStream;
530 Addr currentFunctionStart;
531 Addr currentFunctionEnd;
532 Tick functionEntryTick;
533 void enableFunctionTrace();
534 void traceFunctionsInternal(Addr pc);
535
536 private:
537 static std::vector<BaseCPU *> cpuList; //!< Static global cpu list
538
539 public:
540 void traceFunctions(Addr pc)
541 {
542 if (functionTracingEnabled)
543 traceFunctionsInternal(pc);
544 }
545
546 static int numSimulatedCPUs() { return cpuList.size(); }
547 static Counter numSimulatedInsts()
548 {
549 Counter total = 0;
550
551 int size = cpuList.size();
552 for (int i = 0; i < size; ++i)
553 total += cpuList[i]->totalInsts();
554
555 return total;
556 }
557
558 static Counter numSimulatedOps()
559 {
560 Counter total = 0;
561
562 int size = cpuList.size();
563 for (int i = 0; i < size; ++i)
564 total += cpuList[i]->totalOps();
565
566 return total;
567 }
568
569 public:
570 // Number of CPU cycles simulated
571 Stats::Scalar numCycles;
572 Stats::Scalar numWorkItemsStarted;
573 Stats::Scalar numWorkItemsCompleted;
574
575 private:
576 std::vector<AddressMonitor> addressMonitor;
577
578 public:
579 void armMonitor(ThreadID tid, Addr address);
580 bool mwait(ThreadID tid, PacketPtr pkt);
581 void mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb);
582 AddressMonitor *getCpuAddrMonitor(ThreadID tid)
583 {
584 assert(tid < numThreads);
585 return &addressMonitor[tid];
586 }
587
588 bool waitForRemoteGDB() const;
589
590 Cycles syscallRetryLatency;
591 // Enables CPU to enter power gating on a configurable cycle count
592 protected:
593 const Cycles pwrGatingLatency;
594 void enterPwrGating();
595 EventFunctionWrapper enterPwrGatingEvent;
586};
587
588#endif // THE_ISA == NULL_ISA
589
590#endif // __CPU_BASE_HH__
596};
597
598#endif // THE_ISA == NULL_ISA
599
600#endif // __CPU_BASE_HH__