base.hh (14016:265e8272c728) base.hh (14197:26cca0c29be6)
1/*
2 * Copyright (c) 2011-2013, 2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 * Nathan Binkert
43 * Rick Strong
44 */
45
46#ifndef __CPU_BASE_HH__
47#define __CPU_BASE_HH__
48
49#include <vector>
50
51// Before we do anything else, check if this build is the NULL ISA,
52// and if so stop here
53#include "config/the_isa.hh"
54#if THE_ISA == NULL_ISA
55#include "arch/null/cpu_dummy.hh"
56#else
57#include "arch/interrupts.hh"
58#include "arch/isa_traits.hh"
59#include "arch/microcode_rom.hh"
60#include "base/statistics.hh"
61#include "sim/clocked_object.hh"
62#include "sim/eventq.hh"
63#include "sim/full_system.hh"
64#include "sim/insttracer.hh"
65#include "sim/probe/pmu.hh"
66#include "sim/probe/probe.hh"
67#include "sim/system.hh"
68#include "debug/Mwait.hh"
69
70class BaseCPU;
71struct BaseCPUParams;
72class CheckerCPU;
73class ThreadContext;
74
75struct AddressMonitor
76{
77 AddressMonitor();
78 bool doMonitor(PacketPtr pkt);
79
80 bool armed;
81 Addr vAddr;
82 Addr pAddr;
83 uint64_t val;
84 bool waiting; // 0=normal, 1=mwaiting
85 bool gotWakeup;
86};
87
88class CPUProgressEvent : public Event
89{
90 protected:
91 Tick _interval;
92 Counter lastNumInst;
93 BaseCPU *cpu;
94 bool _repeatEvent;
95
96 public:
97 CPUProgressEvent(BaseCPU *_cpu, Tick ival = 0);
98
99 void process();
100
101 void interval(Tick ival) { _interval = ival; }
102 Tick interval() { return _interval; }
103
104 void repeatEvent(bool repeat) { _repeatEvent = repeat; }
105
106 virtual const char *description() const;
107};
108
109class BaseCPU : public ClockedObject
110{
111 protected:
112
113 /// Instruction count used for SPARC misc register
114 /// @todo unify this with the counters that cpus individually keep
115 Tick instCnt;
116
117 // every cpu has an id, put it in the base cpu
118 // Set at initialization, only time a cpuId might change is during a
119 // takeover (which should be done from within the BaseCPU anyway,
120 // therefore no setCpuId() method is provided
121 int _cpuId;
122
123 /** Each cpu will have a socket ID that corresponds to its physical location
124 * in the system. This is usually used to bucket cpu cores under single DVFS
125 * domain. This information may also be required by the OS to identify the
126 * cpu core grouping (as in the case of ARM via MPIDR register)
127 */
128 const uint32_t _socketId;
129
130 /** instruction side request id that must be placed in all requests */
131 MasterID _instMasterId;
132
133 /** data side request id that must be placed in all requests */
134 MasterID _dataMasterId;
135
136 /** An intrenal representation of a task identifier within gem5. This is
137 * used so the CPU can add which taskId (which is an internal representation
138 * of the OS process ID) to each request so components in the memory system
139 * can track which process IDs are ultimately interacting with them
140 */
141 uint32_t _taskId;
142
143 /** The current OS process ID that is executing on this processor. This is
144 * used to generate a taskId */
145 uint32_t _pid;
146
147 /** Is the CPU switched out or active? */
148 bool _switchedOut;
149
150 /** Cache the cache line size that we get from the system */
151 const unsigned int _cacheLineSize;
152
153 public:
154
155 /**
156 * Purely virtual method that returns a reference to the data
157 * port. All subclasses must implement this method.
158 *
159 * @return a reference to the data port
160 */
161 virtual MasterPort &getDataPort() = 0;
162
163 /**
1/*
2 * Copyright (c) 2011-2013, 2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 * Nathan Binkert
43 * Rick Strong
44 */
45
46#ifndef __CPU_BASE_HH__
47#define __CPU_BASE_HH__
48
49#include <vector>
50
51// Before we do anything else, check if this build is the NULL ISA,
52// and if so stop here
53#include "config/the_isa.hh"
54#if THE_ISA == NULL_ISA
55#include "arch/null/cpu_dummy.hh"
56#else
57#include "arch/interrupts.hh"
58#include "arch/isa_traits.hh"
59#include "arch/microcode_rom.hh"
60#include "base/statistics.hh"
61#include "sim/clocked_object.hh"
62#include "sim/eventq.hh"
63#include "sim/full_system.hh"
64#include "sim/insttracer.hh"
65#include "sim/probe/pmu.hh"
66#include "sim/probe/probe.hh"
67#include "sim/system.hh"
68#include "debug/Mwait.hh"
69
70class BaseCPU;
71struct BaseCPUParams;
72class CheckerCPU;
73class ThreadContext;
74
75struct AddressMonitor
76{
77 AddressMonitor();
78 bool doMonitor(PacketPtr pkt);
79
80 bool armed;
81 Addr vAddr;
82 Addr pAddr;
83 uint64_t val;
84 bool waiting; // 0=normal, 1=mwaiting
85 bool gotWakeup;
86};
87
88class CPUProgressEvent : public Event
89{
90 protected:
91 Tick _interval;
92 Counter lastNumInst;
93 BaseCPU *cpu;
94 bool _repeatEvent;
95
96 public:
97 CPUProgressEvent(BaseCPU *_cpu, Tick ival = 0);
98
99 void process();
100
101 void interval(Tick ival) { _interval = ival; }
102 Tick interval() { return _interval; }
103
104 void repeatEvent(bool repeat) { _repeatEvent = repeat; }
105
106 virtual const char *description() const;
107};
108
109class BaseCPU : public ClockedObject
110{
111 protected:
112
113 /// Instruction count used for SPARC misc register
114 /// @todo unify this with the counters that cpus individually keep
115 Tick instCnt;
116
117 // every cpu has an id, put it in the base cpu
118 // Set at initialization, only time a cpuId might change is during a
119 // takeover (which should be done from within the BaseCPU anyway,
120 // therefore no setCpuId() method is provided
121 int _cpuId;
122
123 /** Each cpu will have a socket ID that corresponds to its physical location
124 * in the system. This is usually used to bucket cpu cores under single DVFS
125 * domain. This information may also be required by the OS to identify the
126 * cpu core grouping (as in the case of ARM via MPIDR register)
127 */
128 const uint32_t _socketId;
129
130 /** instruction side request id that must be placed in all requests */
131 MasterID _instMasterId;
132
133 /** data side request id that must be placed in all requests */
134 MasterID _dataMasterId;
135
136 /** An intrenal representation of a task identifier within gem5. This is
137 * used so the CPU can add which taskId (which is an internal representation
138 * of the OS process ID) to each request so components in the memory system
139 * can track which process IDs are ultimately interacting with them
140 */
141 uint32_t _taskId;
142
143 /** The current OS process ID that is executing on this processor. This is
144 * used to generate a taskId */
145 uint32_t _pid;
146
147 /** Is the CPU switched out or active? */
148 bool _switchedOut;
149
150 /** Cache the cache line size that we get from the system */
151 const unsigned int _cacheLineSize;
152
153 public:
154
155 /**
156 * Purely virtual method that returns a reference to the data
157 * port. All subclasses must implement this method.
158 *
159 * @return a reference to the data port
160 */
161 virtual MasterPort &getDataPort() = 0;
162
163 /**
164 * Returns a sendFunctional delegate for use with port proxies.
165 */
166 virtual PortProxy::SendFunctionalFunc
167 getSendFunctional()
168 {
169 MasterPort &port = getDataPort();
170 return [&port](PacketPtr pkt)->void { port.sendFunctional(pkt); };
171 }
172
173 /**
164 * Purely virtual method that returns a reference to the instruction
165 * port. All subclasses must implement this method.
166 *
167 * @return a reference to the instruction port
168 */
169 virtual MasterPort &getInstPort() = 0;
170
171 /** Reads this CPU's ID. */
172 int cpuId() const { return _cpuId; }
173
174 /** Reads this CPU's Socket ID. */
175 uint32_t socketId() const { return _socketId; }
176
177 /** Reads this CPU's unique data requestor ID */
178 MasterID dataMasterId() const { return _dataMasterId; }
179 /** Reads this CPU's unique instruction requestor ID */
180 MasterID instMasterId() const { return _instMasterId; }
181
182 /**
183 * Get a port on this CPU. All CPUs have a data and
184 * instruction port, and this method uses getDataPort and
185 * getInstPort of the subclasses to resolve the two ports.
186 *
187 * @param if_name the port name
188 * @param idx ignored index
189 *
190 * @return a reference to the port with the given name
191 */
192 Port &getPort(const std::string &if_name,
193 PortID idx=InvalidPortID) override;
194
195 /** Get cpu task id */
196 uint32_t taskId() const { return _taskId; }
197 /** Set cpu task id */
198 void taskId(uint32_t id) { _taskId = id; }
199
200 uint32_t getPid() const { return _pid; }
201 void setPid(uint32_t pid) { _pid = pid; }
202
203 inline void workItemBegin() { numWorkItemsStarted++; }
204 inline void workItemEnd() { numWorkItemsCompleted++; }
205 // @todo remove me after debugging with legion done
206 Tick instCount() { return instCnt; }
207
208 TheISA::MicrocodeRom microcodeRom;
209
210 protected:
211 std::vector<TheISA::Interrupts*> interrupts;
212
213 public:
214 TheISA::Interrupts *
215 getInterruptController(ThreadID tid)
216 {
217 if (interrupts.empty())
218 return NULL;
219
220 assert(interrupts.size() > tid);
221 return interrupts[tid];
222 }
223
224 virtual void wakeup(ThreadID tid) = 0;
225
226 void
227 postInterrupt(ThreadID tid, int int_num, int index)
228 {
229 interrupts[tid]->post(int_num, index);
230 if (FullSystem)
231 wakeup(tid);
232 }
233
234 void
235 clearInterrupt(ThreadID tid, int int_num, int index)
236 {
237 interrupts[tid]->clear(int_num, index);
238 }
239
240 void
241 clearInterrupts(ThreadID tid)
242 {
243 interrupts[tid]->clearAll();
244 }
245
246 bool
247 checkInterrupts(ThreadContext *tc) const
248 {
249 return FullSystem && interrupts[tc->threadId()]->checkInterrupts(tc);
250 }
251
252 void processProfileEvent();
253 EventFunctionWrapper * profileEvent;
254
255 protected:
256 std::vector<ThreadContext *> threadContexts;
257
258 Trace::InstTracer * tracer;
259
260 public:
261
262
263 /** Invalid or unknown Pid. Possible when operating system is not present
264 * or has not assigned a pid yet */
265 static const uint32_t invldPid = std::numeric_limits<uint32_t>::max();
266
267 // Mask to align PCs to MachInst sized boundaries
268 static const Addr PCMask = ~((Addr)sizeof(TheISA::MachInst) - 1);
269
270 /// Provide access to the tracer pointer
271 Trace::InstTracer * getTracer() { return tracer; }
272
273 /// Notify the CPU that the indicated context is now active.
274 virtual void activateContext(ThreadID thread_num);
275
276 /// Notify the CPU that the indicated context is now suspended.
277 /// Check if possible to enter a lower power state
278 virtual void suspendContext(ThreadID thread_num);
279
280 /// Notify the CPU that the indicated context is now halted.
281 virtual void haltContext(ThreadID thread_num);
282
283 /// Given a Thread Context pointer return the thread num
284 int findContext(ThreadContext *tc);
285
286 /// Given a thread num get tho thread context for it
287 virtual ThreadContext *getContext(int tn) { return threadContexts[tn]; }
288
289 /// Get the number of thread contexts available
290 unsigned numContexts() {
291 return static_cast<unsigned>(threadContexts.size());
292 }
293
294 /// Convert ContextID to threadID
295 ThreadID contextToThread(ContextID cid)
296 { return static_cast<ThreadID>(cid - threadContexts[0]->contextId()); }
297
298 public:
299 typedef BaseCPUParams Params;
300 const Params *params() const
301 { return reinterpret_cast<const Params *>(_params); }
302 BaseCPU(Params *params, bool is_checker = false);
303 virtual ~BaseCPU();
304
305 void init() override;
306 void startup() override;
307 void regStats() override;
308
309 void regProbePoints() override;
310
311 void registerThreadContexts();
312
313 // Functions to deschedule and reschedule the events to enter the
314 // power gating sleep before and after checkpoiting respectively.
315 void deschedulePowerGatingEvent();
316 void schedulePowerGatingEvent();
317
318 /**
319 * Prepare for another CPU to take over execution.
320 *
321 * When this method exits, all internal state should have been
322 * flushed. After the method returns, the simulator calls
323 * takeOverFrom() on the new CPU with this CPU as its parameter.
324 */
325 virtual void switchOut();
326
327 /**
328 * Load the state of a CPU from the previous CPU object, invoked
329 * on all new CPUs that are about to be switched in.
330 *
331 * A CPU model implementing this method is expected to initialize
332 * its state from the old CPU and connect its memory (unless they
333 * are already connected) to the memories connected to the old
334 * CPU.
335 *
336 * @param cpu CPU to initialize read state from.
337 */
338 virtual void takeOverFrom(BaseCPU *cpu);
339
340 /**
341 * Flush all TLBs in the CPU.
342 *
343 * This method is mainly used to flush stale translations when
344 * switching CPUs. It is also exported to the Python world to
345 * allow it to request a TLB flush after draining the CPU to make
346 * it easier to compare traces when debugging
347 * handover/checkpointing.
348 */
349 void flushTLBs();
350
351 /**
352 * Determine if the CPU is switched out.
353 *
354 * @return True if the CPU is switched out, false otherwise.
355 */
356 bool switchedOut() const { return _switchedOut; }
357
358 /**
359 * Verify that the system is in a memory mode supported by the
360 * CPU.
361 *
362 * Implementations are expected to query the system for the
363 * current memory mode and ensure that it is what the CPU model
364 * expects. If the check fails, the implementation should
365 * terminate the simulation using fatal().
366 */
367 virtual void verifyMemoryMode() const { };
368
369 /**
370 * Number of threads we're actually simulating (<= SMT_MAX_THREADS).
371 * This is a constant for the duration of the simulation.
372 */
373 ThreadID numThreads;
374
375 /**
376 * Vector of per-thread instruction-based event queues. Used for
377 * scheduling events based on number of instructions committed by
378 * a particular thread.
379 */
380 EventQueue **comInstEventQueue;
381
382 /**
383 * Vector of per-thread load-based event queues. Used for
384 * scheduling events based on number of loads committed by
385 *a particular thread.
386 */
387 EventQueue **comLoadEventQueue;
388
389 System *system;
390
391 /**
392 * Get the cache line size of the system.
393 */
394 inline unsigned int cacheLineSize() const { return _cacheLineSize; }
395
396 /**
397 * Serialize this object to the given output stream.
398 *
399 * @note CPU models should normally overload the serializeThread()
400 * method instead of the serialize() method as this provides a
401 * uniform data format for all CPU models and promotes better code
402 * reuse.
403 *
404 * @param cp The stream to serialize to.
405 */
406 void serialize(CheckpointOut &cp) const override;
407
408 /**
409 * Reconstruct the state of this object from a checkpoint.
410 *
411 * @note CPU models should normally overload the
412 * unserializeThread() method instead of the unserialize() method
413 * as this provides a uniform data format for all CPU models and
414 * promotes better code reuse.
415
416 * @param cp The checkpoint use.
417 */
418 void unserialize(CheckpointIn &cp) override;
419
420 /**
421 * Serialize a single thread.
422 *
423 * @param cp The stream to serialize to.
424 * @param tid ID of the current thread.
425 */
426 virtual void serializeThread(CheckpointOut &cp, ThreadID tid) const {};
427
428 /**
429 * Unserialize one thread.
430 *
431 * @param cp The checkpoint use.
432 * @param tid ID of the current thread.
433 */
434 virtual void unserializeThread(CheckpointIn &cp, ThreadID tid) {};
435
436 virtual Counter totalInsts() const = 0;
437
438 virtual Counter totalOps() const = 0;
439
440 /**
441 * Schedule an event that exits the simulation loops after a
442 * predefined number of instructions.
443 *
444 * This method is usually called from the configuration script to
445 * get an exit event some time in the future. It is typically used
446 * when the script wants to simulate for a specific number of
447 * instructions rather than ticks.
448 *
449 * @param tid Thread monitor.
450 * @param insts Number of instructions into the future.
451 * @param cause Cause to signal in the exit event.
452 */
453 void scheduleInstStop(ThreadID tid, Counter insts, const char *cause);
454
455 /**
456 * Schedule an event that exits the simulation loops after a
457 * predefined number of load operations.
458 *
459 * This method is usually called from the configuration script to
460 * get an exit event some time in the future. It is typically used
461 * when the script wants to simulate for a specific number of
462 * loads rather than ticks.
463 *
464 * @param tid Thread monitor.
465 * @param loads Number of load instructions into the future.
466 * @param cause Cause to signal in the exit event.
467 */
468 void scheduleLoadStop(ThreadID tid, Counter loads, const char *cause);
469
470 /**
471 * Get the number of instructions executed by the specified thread
472 * on this CPU. Used by Python to control simulation.
473 *
474 * @param tid Thread monitor
475 * @return Number of instructions executed
476 */
477 uint64_t getCurrentInstCount(ThreadID tid);
478
479 public:
480 /**
481 * @{
482 * @name PMU Probe points.
483 */
484
485 /**
486 * Helper method to trigger PMU probes for a committed
487 * instruction.
488 *
489 * @param inst Instruction that just committed
490 * @param pc PC of the instruction that just committed
491 */
492 virtual void probeInstCommit(const StaticInstPtr &inst, Addr pc);
493
494 protected:
495 /**
496 * Helper method to instantiate probe points belonging to this
497 * object.
498 *
499 * @param name Name of the probe point.
500 * @return A unique_ptr to the new probe point.
501 */
502 ProbePoints::PMUUPtr pmuProbePoint(const char *name);
503
504 /**
505 * Instruction commit probe point.
506 *
507 * This probe point is triggered whenever one or more instructions
508 * are committed. It is normally triggered once for every
509 * instruction. However, CPU models committing bundles of
510 * instructions may call notify once for the entire bundle.
511 */
512 ProbePoints::PMUUPtr ppRetiredInsts;
513 ProbePoints::PMUUPtr ppRetiredInstsPC;
514
515 /** Retired load instructions */
516 ProbePoints::PMUUPtr ppRetiredLoads;
517 /** Retired store instructions */
518 ProbePoints::PMUUPtr ppRetiredStores;
519
520 /** Retired branches (any type) */
521 ProbePoints::PMUUPtr ppRetiredBranches;
522
523 /** CPU cycle counter even if any thread Context is suspended*/
524 ProbePoints::PMUUPtr ppAllCycles;
525
526 /** CPU cycle counter, only counts if any thread contexts is active **/
527 ProbePoints::PMUUPtr ppActiveCycles;
528
529 /**
530 * ProbePoint that signals transitions of threadContexts sets.
531 * The ProbePoint reports information through it bool parameter.
532 * - If the parameter is true then the last enabled threadContext of the
533 * CPU object was disabled.
534 * - If the parameter is false then a threadContext was enabled, all the
535 * remaining threadContexts are disabled.
536 */
537 ProbePointArg<bool> *ppSleeping;
538 /** @} */
539
540 enum CPUState {
541 CPU_STATE_ON,
542 CPU_STATE_SLEEP,
543 CPU_STATE_WAKEUP
544 };
545
546 Cycles previousCycle;
547 CPUState previousState;
548
549 /** base method keeping track of cycle progression **/
550 inline void updateCycleCounters(CPUState state)
551 {
552 uint32_t delta = curCycle() - previousCycle;
553
554 if (previousState == CPU_STATE_ON) {
555 ppActiveCycles->notify(delta);
556 }
557
558 switch (state)
559 {
560 case CPU_STATE_WAKEUP:
561 ppSleeping->notify(false);
562 break;
563 case CPU_STATE_SLEEP:
564 ppSleeping->notify(true);
565 break;
566 default:
567 break;
568 }
569
570 ppAllCycles->notify(delta);
571
572 previousCycle = curCycle();
573 previousState = state;
574 }
575
576 // Function tracing
577 private:
578 bool functionTracingEnabled;
579 std::ostream *functionTraceStream;
580 Addr currentFunctionStart;
581 Addr currentFunctionEnd;
582 Tick functionEntryTick;
583 void enableFunctionTrace();
584 void traceFunctionsInternal(Addr pc);
585
586 private:
587 static std::vector<BaseCPU *> cpuList; //!< Static global cpu list
588
589 public:
590 void traceFunctions(Addr pc)
591 {
592 if (functionTracingEnabled)
593 traceFunctionsInternal(pc);
594 }
595
596 static int numSimulatedCPUs() { return cpuList.size(); }
597 static Counter numSimulatedInsts()
598 {
599 Counter total = 0;
600
601 int size = cpuList.size();
602 for (int i = 0; i < size; ++i)
603 total += cpuList[i]->totalInsts();
604
605 return total;
606 }
607
608 static Counter numSimulatedOps()
609 {
610 Counter total = 0;
611
612 int size = cpuList.size();
613 for (int i = 0; i < size; ++i)
614 total += cpuList[i]->totalOps();
615
616 return total;
617 }
618
619 public:
620 // Number of CPU cycles simulated
621 Stats::Scalar numCycles;
622 Stats::Scalar numWorkItemsStarted;
623 Stats::Scalar numWorkItemsCompleted;
624
625 private:
626 std::vector<AddressMonitor> addressMonitor;
627
628 public:
629 void armMonitor(ThreadID tid, Addr address);
630 bool mwait(ThreadID tid, PacketPtr pkt);
631 void mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb);
632 AddressMonitor *getCpuAddrMonitor(ThreadID tid)
633 {
634 assert(tid < numThreads);
635 return &addressMonitor[tid];
636 }
637
638 bool waitForRemoteGDB() const;
639
640 Cycles syscallRetryLatency;
641
642 // Enables CPU to enter power gating on a configurable cycle count
643 protected:
644 void enterPwrGating();
645
646 const Cycles pwrGatingLatency;
647 const bool powerGatingOnIdle;
648 EventFunctionWrapper enterPwrGatingEvent;
649};
650
651#endif // THE_ISA == NULL_ISA
652
653#endif // __CPU_BASE_HH__
174 * Purely virtual method that returns a reference to the instruction
175 * port. All subclasses must implement this method.
176 *
177 * @return a reference to the instruction port
178 */
179 virtual MasterPort &getInstPort() = 0;
180
181 /** Reads this CPU's ID. */
182 int cpuId() const { return _cpuId; }
183
184 /** Reads this CPU's Socket ID. */
185 uint32_t socketId() const { return _socketId; }
186
187 /** Reads this CPU's unique data requestor ID */
188 MasterID dataMasterId() const { return _dataMasterId; }
189 /** Reads this CPU's unique instruction requestor ID */
190 MasterID instMasterId() const { return _instMasterId; }
191
192 /**
193 * Get a port on this CPU. All CPUs have a data and
194 * instruction port, and this method uses getDataPort and
195 * getInstPort of the subclasses to resolve the two ports.
196 *
197 * @param if_name the port name
198 * @param idx ignored index
199 *
200 * @return a reference to the port with the given name
201 */
202 Port &getPort(const std::string &if_name,
203 PortID idx=InvalidPortID) override;
204
205 /** Get cpu task id */
206 uint32_t taskId() const { return _taskId; }
207 /** Set cpu task id */
208 void taskId(uint32_t id) { _taskId = id; }
209
210 uint32_t getPid() const { return _pid; }
211 void setPid(uint32_t pid) { _pid = pid; }
212
213 inline void workItemBegin() { numWorkItemsStarted++; }
214 inline void workItemEnd() { numWorkItemsCompleted++; }
215 // @todo remove me after debugging with legion done
216 Tick instCount() { return instCnt; }
217
218 TheISA::MicrocodeRom microcodeRom;
219
220 protected:
221 std::vector<TheISA::Interrupts*> interrupts;
222
223 public:
224 TheISA::Interrupts *
225 getInterruptController(ThreadID tid)
226 {
227 if (interrupts.empty())
228 return NULL;
229
230 assert(interrupts.size() > tid);
231 return interrupts[tid];
232 }
233
234 virtual void wakeup(ThreadID tid) = 0;
235
236 void
237 postInterrupt(ThreadID tid, int int_num, int index)
238 {
239 interrupts[tid]->post(int_num, index);
240 if (FullSystem)
241 wakeup(tid);
242 }
243
244 void
245 clearInterrupt(ThreadID tid, int int_num, int index)
246 {
247 interrupts[tid]->clear(int_num, index);
248 }
249
250 void
251 clearInterrupts(ThreadID tid)
252 {
253 interrupts[tid]->clearAll();
254 }
255
256 bool
257 checkInterrupts(ThreadContext *tc) const
258 {
259 return FullSystem && interrupts[tc->threadId()]->checkInterrupts(tc);
260 }
261
262 void processProfileEvent();
263 EventFunctionWrapper * profileEvent;
264
265 protected:
266 std::vector<ThreadContext *> threadContexts;
267
268 Trace::InstTracer * tracer;
269
270 public:
271
272
273 /** Invalid or unknown Pid. Possible when operating system is not present
274 * or has not assigned a pid yet */
275 static const uint32_t invldPid = std::numeric_limits<uint32_t>::max();
276
277 // Mask to align PCs to MachInst sized boundaries
278 static const Addr PCMask = ~((Addr)sizeof(TheISA::MachInst) - 1);
279
280 /// Provide access to the tracer pointer
281 Trace::InstTracer * getTracer() { return tracer; }
282
283 /// Notify the CPU that the indicated context is now active.
284 virtual void activateContext(ThreadID thread_num);
285
286 /// Notify the CPU that the indicated context is now suspended.
287 /// Check if possible to enter a lower power state
288 virtual void suspendContext(ThreadID thread_num);
289
290 /// Notify the CPU that the indicated context is now halted.
291 virtual void haltContext(ThreadID thread_num);
292
293 /// Given a Thread Context pointer return the thread num
294 int findContext(ThreadContext *tc);
295
296 /// Given a thread num get tho thread context for it
297 virtual ThreadContext *getContext(int tn) { return threadContexts[tn]; }
298
299 /// Get the number of thread contexts available
300 unsigned numContexts() {
301 return static_cast<unsigned>(threadContexts.size());
302 }
303
304 /// Convert ContextID to threadID
305 ThreadID contextToThread(ContextID cid)
306 { return static_cast<ThreadID>(cid - threadContexts[0]->contextId()); }
307
308 public:
309 typedef BaseCPUParams Params;
310 const Params *params() const
311 { return reinterpret_cast<const Params *>(_params); }
312 BaseCPU(Params *params, bool is_checker = false);
313 virtual ~BaseCPU();
314
315 void init() override;
316 void startup() override;
317 void regStats() override;
318
319 void regProbePoints() override;
320
321 void registerThreadContexts();
322
323 // Functions to deschedule and reschedule the events to enter the
324 // power gating sleep before and after checkpoiting respectively.
325 void deschedulePowerGatingEvent();
326 void schedulePowerGatingEvent();
327
328 /**
329 * Prepare for another CPU to take over execution.
330 *
331 * When this method exits, all internal state should have been
332 * flushed. After the method returns, the simulator calls
333 * takeOverFrom() on the new CPU with this CPU as its parameter.
334 */
335 virtual void switchOut();
336
337 /**
338 * Load the state of a CPU from the previous CPU object, invoked
339 * on all new CPUs that are about to be switched in.
340 *
341 * A CPU model implementing this method is expected to initialize
342 * its state from the old CPU and connect its memory (unless they
343 * are already connected) to the memories connected to the old
344 * CPU.
345 *
346 * @param cpu CPU to initialize read state from.
347 */
348 virtual void takeOverFrom(BaseCPU *cpu);
349
350 /**
351 * Flush all TLBs in the CPU.
352 *
353 * This method is mainly used to flush stale translations when
354 * switching CPUs. It is also exported to the Python world to
355 * allow it to request a TLB flush after draining the CPU to make
356 * it easier to compare traces when debugging
357 * handover/checkpointing.
358 */
359 void flushTLBs();
360
361 /**
362 * Determine if the CPU is switched out.
363 *
364 * @return True if the CPU is switched out, false otherwise.
365 */
366 bool switchedOut() const { return _switchedOut; }
367
368 /**
369 * Verify that the system is in a memory mode supported by the
370 * CPU.
371 *
372 * Implementations are expected to query the system for the
373 * current memory mode and ensure that it is what the CPU model
374 * expects. If the check fails, the implementation should
375 * terminate the simulation using fatal().
376 */
377 virtual void verifyMemoryMode() const { };
378
379 /**
380 * Number of threads we're actually simulating (<= SMT_MAX_THREADS).
381 * This is a constant for the duration of the simulation.
382 */
383 ThreadID numThreads;
384
385 /**
386 * Vector of per-thread instruction-based event queues. Used for
387 * scheduling events based on number of instructions committed by
388 * a particular thread.
389 */
390 EventQueue **comInstEventQueue;
391
392 /**
393 * Vector of per-thread load-based event queues. Used for
394 * scheduling events based on number of loads committed by
395 *a particular thread.
396 */
397 EventQueue **comLoadEventQueue;
398
399 System *system;
400
401 /**
402 * Get the cache line size of the system.
403 */
404 inline unsigned int cacheLineSize() const { return _cacheLineSize; }
405
406 /**
407 * Serialize this object to the given output stream.
408 *
409 * @note CPU models should normally overload the serializeThread()
410 * method instead of the serialize() method as this provides a
411 * uniform data format for all CPU models and promotes better code
412 * reuse.
413 *
414 * @param cp The stream to serialize to.
415 */
416 void serialize(CheckpointOut &cp) const override;
417
418 /**
419 * Reconstruct the state of this object from a checkpoint.
420 *
421 * @note CPU models should normally overload the
422 * unserializeThread() method instead of the unserialize() method
423 * as this provides a uniform data format for all CPU models and
424 * promotes better code reuse.
425
426 * @param cp The checkpoint use.
427 */
428 void unserialize(CheckpointIn &cp) override;
429
430 /**
431 * Serialize a single thread.
432 *
433 * @param cp The stream to serialize to.
434 * @param tid ID of the current thread.
435 */
436 virtual void serializeThread(CheckpointOut &cp, ThreadID tid) const {};
437
438 /**
439 * Unserialize one thread.
440 *
441 * @param cp The checkpoint use.
442 * @param tid ID of the current thread.
443 */
444 virtual void unserializeThread(CheckpointIn &cp, ThreadID tid) {};
445
446 virtual Counter totalInsts() const = 0;
447
448 virtual Counter totalOps() const = 0;
449
450 /**
451 * Schedule an event that exits the simulation loops after a
452 * predefined number of instructions.
453 *
454 * This method is usually called from the configuration script to
455 * get an exit event some time in the future. It is typically used
456 * when the script wants to simulate for a specific number of
457 * instructions rather than ticks.
458 *
459 * @param tid Thread monitor.
460 * @param insts Number of instructions into the future.
461 * @param cause Cause to signal in the exit event.
462 */
463 void scheduleInstStop(ThreadID tid, Counter insts, const char *cause);
464
465 /**
466 * Schedule an event that exits the simulation loops after a
467 * predefined number of load operations.
468 *
469 * This method is usually called from the configuration script to
470 * get an exit event some time in the future. It is typically used
471 * when the script wants to simulate for a specific number of
472 * loads rather than ticks.
473 *
474 * @param tid Thread monitor.
475 * @param loads Number of load instructions into the future.
476 * @param cause Cause to signal in the exit event.
477 */
478 void scheduleLoadStop(ThreadID tid, Counter loads, const char *cause);
479
480 /**
481 * Get the number of instructions executed by the specified thread
482 * on this CPU. Used by Python to control simulation.
483 *
484 * @param tid Thread monitor
485 * @return Number of instructions executed
486 */
487 uint64_t getCurrentInstCount(ThreadID tid);
488
489 public:
490 /**
491 * @{
492 * @name PMU Probe points.
493 */
494
495 /**
496 * Helper method to trigger PMU probes for a committed
497 * instruction.
498 *
499 * @param inst Instruction that just committed
500 * @param pc PC of the instruction that just committed
501 */
502 virtual void probeInstCommit(const StaticInstPtr &inst, Addr pc);
503
504 protected:
505 /**
506 * Helper method to instantiate probe points belonging to this
507 * object.
508 *
509 * @param name Name of the probe point.
510 * @return A unique_ptr to the new probe point.
511 */
512 ProbePoints::PMUUPtr pmuProbePoint(const char *name);
513
514 /**
515 * Instruction commit probe point.
516 *
517 * This probe point is triggered whenever one or more instructions
518 * are committed. It is normally triggered once for every
519 * instruction. However, CPU models committing bundles of
520 * instructions may call notify once for the entire bundle.
521 */
522 ProbePoints::PMUUPtr ppRetiredInsts;
523 ProbePoints::PMUUPtr ppRetiredInstsPC;
524
525 /** Retired load instructions */
526 ProbePoints::PMUUPtr ppRetiredLoads;
527 /** Retired store instructions */
528 ProbePoints::PMUUPtr ppRetiredStores;
529
530 /** Retired branches (any type) */
531 ProbePoints::PMUUPtr ppRetiredBranches;
532
533 /** CPU cycle counter even if any thread Context is suspended*/
534 ProbePoints::PMUUPtr ppAllCycles;
535
536 /** CPU cycle counter, only counts if any thread contexts is active **/
537 ProbePoints::PMUUPtr ppActiveCycles;
538
539 /**
540 * ProbePoint that signals transitions of threadContexts sets.
541 * The ProbePoint reports information through it bool parameter.
542 * - If the parameter is true then the last enabled threadContext of the
543 * CPU object was disabled.
544 * - If the parameter is false then a threadContext was enabled, all the
545 * remaining threadContexts are disabled.
546 */
547 ProbePointArg<bool> *ppSleeping;
548 /** @} */
549
550 enum CPUState {
551 CPU_STATE_ON,
552 CPU_STATE_SLEEP,
553 CPU_STATE_WAKEUP
554 };
555
556 Cycles previousCycle;
557 CPUState previousState;
558
559 /** base method keeping track of cycle progression **/
560 inline void updateCycleCounters(CPUState state)
561 {
562 uint32_t delta = curCycle() - previousCycle;
563
564 if (previousState == CPU_STATE_ON) {
565 ppActiveCycles->notify(delta);
566 }
567
568 switch (state)
569 {
570 case CPU_STATE_WAKEUP:
571 ppSleeping->notify(false);
572 break;
573 case CPU_STATE_SLEEP:
574 ppSleeping->notify(true);
575 break;
576 default:
577 break;
578 }
579
580 ppAllCycles->notify(delta);
581
582 previousCycle = curCycle();
583 previousState = state;
584 }
585
586 // Function tracing
587 private:
588 bool functionTracingEnabled;
589 std::ostream *functionTraceStream;
590 Addr currentFunctionStart;
591 Addr currentFunctionEnd;
592 Tick functionEntryTick;
593 void enableFunctionTrace();
594 void traceFunctionsInternal(Addr pc);
595
596 private:
597 static std::vector<BaseCPU *> cpuList; //!< Static global cpu list
598
599 public:
600 void traceFunctions(Addr pc)
601 {
602 if (functionTracingEnabled)
603 traceFunctionsInternal(pc);
604 }
605
606 static int numSimulatedCPUs() { return cpuList.size(); }
607 static Counter numSimulatedInsts()
608 {
609 Counter total = 0;
610
611 int size = cpuList.size();
612 for (int i = 0; i < size; ++i)
613 total += cpuList[i]->totalInsts();
614
615 return total;
616 }
617
618 static Counter numSimulatedOps()
619 {
620 Counter total = 0;
621
622 int size = cpuList.size();
623 for (int i = 0; i < size; ++i)
624 total += cpuList[i]->totalOps();
625
626 return total;
627 }
628
629 public:
630 // Number of CPU cycles simulated
631 Stats::Scalar numCycles;
632 Stats::Scalar numWorkItemsStarted;
633 Stats::Scalar numWorkItemsCompleted;
634
635 private:
636 std::vector<AddressMonitor> addressMonitor;
637
638 public:
639 void armMonitor(ThreadID tid, Addr address);
640 bool mwait(ThreadID tid, PacketPtr pkt);
641 void mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb);
642 AddressMonitor *getCpuAddrMonitor(ThreadID tid)
643 {
644 assert(tid < numThreads);
645 return &addressMonitor[tid];
646 }
647
648 bool waitForRemoteGDB() const;
649
650 Cycles syscallRetryLatency;
651
652 // Enables CPU to enter power gating on a configurable cycle count
653 protected:
654 void enterPwrGating();
655
656 const Cycles pwrGatingLatency;
657 const bool powerGatingOnIdle;
658 EventFunctionWrapper enterPwrGatingEvent;
659};
660
661#endif // THE_ISA == NULL_ISA
662
663#endif // __CPU_BASE_HH__