Deleted Added
sdiff udiff text old ( 9749:cffb82b745cf ) new ( 9814:7ad2b0186a32 )
full compact
1/*
2 * Copyright (c) 2011-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 * Nathan Binkert
43 * Rick Strong
44 */
45
46#ifndef __CPU_BASE_HH__
47#define __CPU_BASE_HH__
48
49#include <vector>
50
51#include "arch/interrupts.hh"
52#include "arch/isa_traits.hh"
53#include "arch/microcode_rom.hh"
54#include "base/statistics.hh"
55#include "config/the_isa.hh"
56#include "mem/mem_object.hh"
57#include "sim/eventq.hh"
58#include "sim/full_system.hh"
59#include "sim/insttracer.hh"
60#include "sim/system.hh"
61
62struct BaseCPUParams;
63class BranchPred;
64class CheckerCPU;
65class ThreadContext;
66
67class CPUProgressEvent : public Event
68{
69 protected:
70 Tick _interval;
71 Counter lastNumInst;
72 BaseCPU *cpu;
73 bool _repeatEvent;
74
75 public:
76 CPUProgressEvent(BaseCPU *_cpu, Tick ival = 0);
77
78 void process();
79
80 void interval(Tick ival) { _interval = ival; }
81 Tick interval() { return _interval; }
82
83 void repeatEvent(bool repeat) { _repeatEvent = repeat; }
84
85 virtual const char *description() const;
86};
87
88class BaseCPU : public MemObject
89{
90 protected:
91
92 // @todo remove me after debugging with legion done
93 Tick instCnt;
94 // every cpu has an id, put it in the base cpu
95 // Set at initialization, only time a cpuId might change is during a
96 // takeover (which should be done from within the BaseCPU anyway,
97 // therefore no setCpuId() method is provided
98 int _cpuId;
99
100 /** instruction side request id that must be placed in all requests */
101 MasterID _instMasterId;
102
103 /** data side request id that must be placed in all requests */
104 MasterID _dataMasterId;
105
106 /** An intrenal representation of a task identifier within gem5. This is
107 * used so the CPU can add which taskId (which is an internal representation
108 * of the OS process ID) to each request so components in the memory system
109 * can track which process IDs are ultimately interacting with them
110 */
111 uint32_t _taskId;
112
113 /** The current OS process ID that is executing on this processor. This is
114 * used to generate a taskId */
115 uint32_t _pid;
116
117 /** Is the CPU switched out or active? */
118 bool _switchedOut;
119
120 /** Cache the cache line size that we get from the system */
121 const unsigned int _cacheLineSize;
122
123 public:
124
125 /**
126 * Purely virtual method that returns a reference to the data
127 * port. All subclasses must implement this method.
128 *
129 * @return a reference to the data port
130 */
131 virtual MasterPort &getDataPort() = 0;
132
133 /**
134 * Purely virtual method that returns a reference to the instruction
135 * port. All subclasses must implement this method.
136 *
137 * @return a reference to the instruction port
138 */
139 virtual MasterPort &getInstPort() = 0;
140
141 /** Reads this CPU's ID. */
142 int cpuId() { return _cpuId; }
143
144 /** Reads this CPU's unique data requestor ID */
145 MasterID dataMasterId() { return _dataMasterId; }
146 /** Reads this CPU's unique instruction requestor ID */
147 MasterID instMasterId() { return _instMasterId; }
148
149 /**
150 * Get a master port on this CPU. All CPUs have a data and
151 * instruction port, and this method uses getDataPort and
152 * getInstPort of the subclasses to resolve the two ports.
153 *
154 * @param if_name the port name
155 * @param idx ignored index
156 *
157 * @return a reference to the port with the given name
158 */
159 BaseMasterPort &getMasterPort(const std::string &if_name,
160 PortID idx = InvalidPortID);
161
162 /** Get cpu task id */
163 uint32_t taskId() const { return _taskId; }
164 /** Set cpu task id */
165 void taskId(uint32_t id) { _taskId = id; }
166
167 uint32_t getPid() const { return _pid; }
168 void setPid(uint32_t pid) { _pid = pid; }
169
170 inline void workItemBegin() { numWorkItemsStarted++; }
171 inline void workItemEnd() { numWorkItemsCompleted++; }
172 // @todo remove me after debugging with legion done
173 Tick instCount() { return instCnt; }
174
175 TheISA::MicrocodeRom microcodeRom;
176
177 protected:
178 TheISA::Interrupts *interrupts;
179
180 public:
181 TheISA::Interrupts *
182 getInterruptController()
183 {
184 return interrupts;
185 }
186
187 virtual void wakeup() = 0;
188
189 void
190 postInterrupt(int int_num, int index)
191 {
192 interrupts->post(int_num, index);
193 if (FullSystem)
194 wakeup();
195 }
196
197 void
198 clearInterrupt(int int_num, int index)
199 {
200 interrupts->clear(int_num, index);
201 }
202
203 void
204 clearInterrupts()
205 {
206 interrupts->clearAll();
207 }
208
209 bool
210 checkInterrupts(ThreadContext *tc) const
211 {
212 return FullSystem && interrupts->checkInterrupts(tc);
213 }
214
215 class ProfileEvent : public Event
216 {
217 private:
218 BaseCPU *cpu;
219 Tick interval;
220
221 public:
222 ProfileEvent(BaseCPU *cpu, Tick interval);
223 void process();
224 };
225 ProfileEvent *profileEvent;
226
227 protected:
228 std::vector<ThreadContext *> threadContexts;
229
230 Trace::InstTracer * tracer;
231
232 public:
233
234 // Mask to align PCs to MachInst sized boundaries
235 static const Addr PCMask = ~((Addr)sizeof(TheISA::MachInst) - 1);
236
237 /// Provide access to the tracer pointer
238 Trace::InstTracer * getTracer() { return tracer; }
239
240 /// Notify the CPU that the indicated context is now active. The
241 /// delay parameter indicates the number of ticks to wait before
242 /// executing (typically 0 or 1).
243 virtual void activateContext(ThreadID thread_num, Cycles delay) {}
244
245 /// Notify the CPU that the indicated context is now suspended.
246 virtual void suspendContext(ThreadID thread_num) {}
247
248 /// Notify the CPU that the indicated context is now deallocated.
249 virtual void deallocateContext(ThreadID thread_num) {}
250
251 /// Notify the CPU that the indicated context is now halted.
252 virtual void haltContext(ThreadID thread_num) {}
253
254 /// Given a Thread Context pointer return the thread num
255 int findContext(ThreadContext *tc);
256
257 /// Given a thread num get tho thread context for it
258 virtual ThreadContext *getContext(int tn) { return threadContexts[tn]; }
259
260 public:
261 typedef BaseCPUParams Params;
262 const Params *params() const
263 { return reinterpret_cast<const Params *>(_params); }
264 BaseCPU(Params *params, bool is_checker = false);
265 virtual ~BaseCPU();
266
267 virtual void init();
268 virtual void startup();
269 virtual void regStats();
270
271 virtual void activateWhenReady(ThreadID tid) {};
272
273 void registerThreadContexts();
274
275 /**
276 * Prepare for another CPU to take over execution.
277 *
278 * When this method exits, all internal state should have been
279 * flushed. After the method returns, the simulator calls
280 * takeOverFrom() on the new CPU with this CPU as its parameter.
281 */
282 virtual void switchOut();
283
284 /**
285 * Load the state of a CPU from the previous CPU object, invoked
286 * on all new CPUs that are about to be switched in.
287 *
288 * A CPU model implementing this method is expected to initialize
289 * its state from the old CPU and connect its memory (unless they
290 * are already connected) to the memories connected to the old
291 * CPU.
292 *
293 * @param cpu CPU to initialize read state from.
294 */
295 virtual void takeOverFrom(BaseCPU *cpu);
296
297 /**
298 * Flush all TLBs in the CPU.
299 *
300 * This method is mainly used to flush stale translations when
301 * switching CPUs. It is also exported to the Python world to
302 * allow it to request a TLB flush after draining the CPU to make
303 * it easier to compare traces when debugging
304 * handover/checkpointing.
305 */
306 void flushTLBs();
307
308 /**
309 * Determine if the CPU is switched out.
310 *
311 * @return True if the CPU is switched out, false otherwise.
312 */
313 bool switchedOut() const { return _switchedOut; }
314
315 /**
316 * Verify that the system is in a memory mode supported by the
317 * CPU.
318 *
319 * Implementations are expected to query the system for the
320 * current memory mode and ensure that it is what the CPU model
321 * expects. If the check fails, the implementation should
322 * terminate the simulation using fatal().
323 */
324 virtual void verifyMemoryMode() const { };
325
326 /**
327 * Number of threads we're actually simulating (<= SMT_MAX_THREADS).
328 * This is a constant for the duration of the simulation.
329 */
330 ThreadID numThreads;
331
332 /**
333 * Vector of per-thread instruction-based event queues. Used for
334 * scheduling events based on number of instructions committed by
335 * a particular thread.
336 */
337 EventQueue **comInstEventQueue;
338
339 /**
340 * Vector of per-thread load-based event queues. Used for
341 * scheduling events based on number of loads committed by
342 *a particular thread.
343 */
344 EventQueue **comLoadEventQueue;
345
346 System *system;
347
348 /**
349 * Get the cache line size of the system.
350 */
351 inline unsigned int cacheLineSize() const { return _cacheLineSize; }
352
353 /**
354 * Serialize this object to the given output stream.
355 *
356 * @note CPU models should normally overload the serializeThread()
357 * method instead of the serialize() method as this provides a
358 * uniform data format for all CPU models and promotes better code
359 * reuse.
360 *
361 * @param os The stream to serialize to.
362 */
363 virtual void serialize(std::ostream &os);
364
365 /**
366 * Reconstruct the state of this object from a checkpoint.
367 *
368 * @note CPU models should normally overload the
369 * unserializeThread() method instead of the unserialize() method
370 * as this provides a uniform data format for all CPU models and
371 * promotes better code reuse.
372
373 * @param cp The checkpoint use.
374 * @param section The section name of this object.
375 */
376 virtual void unserialize(Checkpoint *cp, const std::string &section);
377
378 /**
379 * Serialize a single thread.
380 *
381 * @param os The stream to serialize to.
382 * @param tid ID of the current thread.
383 */
384 virtual void serializeThread(std::ostream &os, ThreadID tid) {};
385
386 /**
387 * Unserialize one thread.
388 *
389 * @param cp The checkpoint use.
390 * @param section The section name of this thread.
391 * @param tid ID of the current thread.
392 */
393 virtual void unserializeThread(Checkpoint *cp, const std::string &section,
394 ThreadID tid) {};
395
396 /**
397 * Return pointer to CPU's branch predictor (NULL if none).
398 * @return Branch predictor pointer.
399 */
400 virtual BranchPred *getBranchPred() { return NULL; };
401
402 virtual Counter totalInsts() const = 0;
403
404 virtual Counter totalOps() const = 0;
405
406 /**
407 * Schedule an event that exits the simulation loops after a
408 * predefined number of instructions.
409 *
410 * This method is usually called from the configuration script to
411 * get an exit event some time in the future. It is typically used
412 * when the script wants to simulate for a specific number of
413 * instructions rather than ticks.
414 *
415 * @param tid Thread monitor.
416 * @param insts Number of instructions into the future.
417 * @param cause Cause to signal in the exit event.
418 */
419 void scheduleInstStop(ThreadID tid, Counter insts, const char *cause);
420
421 /**
422 * Schedule an event that exits the simulation loops after a
423 * predefined number of load operations.
424 *
425 * This method is usually called from the configuration script to
426 * get an exit event some time in the future. It is typically used
427 * when the script wants to simulate for a specific number of
428 * loads rather than ticks.
429 *
430 * @param tid Thread monitor.
431 * @param loads Number of load instructions into the future.
432 * @param cause Cause to signal in the exit event.
433 */
434 void scheduleLoadStop(ThreadID tid, Counter loads, const char *cause);
435
436 // Function tracing
437 private:
438 bool functionTracingEnabled;
439 std::ostream *functionTraceStream;
440 Addr currentFunctionStart;
441 Addr currentFunctionEnd;
442 Tick functionEntryTick;
443 void enableFunctionTrace();
444 void traceFunctionsInternal(Addr pc);
445
446 private:
447 static std::vector<BaseCPU *> cpuList; //!< Static global cpu list
448
449 public:
450 void traceFunctions(Addr pc)
451 {
452 if (functionTracingEnabled)
453 traceFunctionsInternal(pc);
454 }
455
456 static int numSimulatedCPUs() { return cpuList.size(); }
457 static Counter numSimulatedInsts()
458 {
459 Counter total = 0;
460
461 int size = cpuList.size();
462 for (int i = 0; i < size; ++i)
463 total += cpuList[i]->totalInsts();
464
465 return total;
466 }
467
468 static Counter numSimulatedOps()
469 {
470 Counter total = 0;
471
472 int size = cpuList.size();
473 for (int i = 0; i < size; ++i)
474 total += cpuList[i]->totalOps();
475
476 return total;
477 }
478
479 public:
480 // Number of CPU cycles simulated
481 Stats::Scalar numCycles;
482 Stats::Scalar numWorkItemsStarted;
483 Stats::Scalar numWorkItemsCompleted;
484};
485
486#endif // __CPU_BASE_HH__