base.hh revision 9749:cffb82b745cf
1/*
2 * Copyright (c) 2011-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Steve Reinhardt
42 *          Nathan Binkert
43 *          Rick Strong
44 */
45
46#ifndef __CPU_BASE_HH__
47#define __CPU_BASE_HH__
48
49#include <vector>
50
51#include "arch/interrupts.hh"
52#include "arch/isa_traits.hh"
53#include "arch/microcode_rom.hh"
54#include "base/statistics.hh"
55#include "config/the_isa.hh"
56#include "mem/mem_object.hh"
57#include "sim/eventq.hh"
58#include "sim/full_system.hh"
59#include "sim/insttracer.hh"
60
61struct BaseCPUParams;
62class BranchPred;
63class CheckerCPU;
64class ThreadContext;
65class System;
66
67class CPUProgressEvent : public Event
68{
69  protected:
70    Tick _interval;
71    Counter lastNumInst;
72    BaseCPU *cpu;
73    bool _repeatEvent;
74
75  public:
76    CPUProgressEvent(BaseCPU *_cpu, Tick ival = 0);
77
78    void process();
79
80    void interval(Tick ival) { _interval = ival; }
81    Tick interval() { return _interval; }
82
83    void repeatEvent(bool repeat) { _repeatEvent = repeat; }
84
85    virtual const char *description() const;
86};
87
88class BaseCPU : public MemObject
89{
90  protected:
91
92    // @todo remove me after debugging with legion done
93    Tick instCnt;
94    // every cpu has an id, put it in the base cpu
95    // Set at initialization, only time a cpuId might change is during a
96    // takeover (which should be done from within the BaseCPU anyway,
97    // therefore no setCpuId() method is provided
98    int _cpuId;
99
100    /** instruction side request id that must be placed in all requests */
101    MasterID _instMasterId;
102
103    /** data side request id that must be placed in all requests */
104    MasterID _dataMasterId;
105
106    /** An intrenal representation of a task identifier within gem5. This is
107     * used so the CPU can add which taskId (which is an internal representation
108     * of the OS process ID) to each request so components in the memory system
109     * can track which process IDs are ultimately interacting with them
110     */
111    uint32_t _taskId;
112
113    /** The current OS process ID that is executing on this processor. This is
114     * used to generate a taskId */
115    uint32_t _pid;
116
117    /** Is the CPU switched out or active? */
118    bool _switchedOut;
119
120  public:
121
122    /**
123     * Purely virtual method that returns a reference to the data
124     * port. All subclasses must implement this method.
125     *
126     * @return a reference to the data port
127     */
128    virtual MasterPort &getDataPort() = 0;
129
130    /**
131     * Purely virtual method that returns a reference to the instruction
132     * port. All subclasses must implement this method.
133     *
134     * @return a reference to the instruction port
135     */
136    virtual MasterPort &getInstPort() = 0;
137
138    /** Reads this CPU's ID. */
139    int cpuId() { return _cpuId; }
140
141    /** Reads this CPU's unique data requestor ID */
142    MasterID dataMasterId() { return _dataMasterId; }
143    /** Reads this CPU's unique instruction requestor ID */
144    MasterID instMasterId() { return _instMasterId; }
145
146    /**
147     * Get a master port on this CPU. All CPUs have a data and
148     * instruction port, and this method uses getDataPort and
149     * getInstPort of the subclasses to resolve the two ports.
150     *
151     * @param if_name the port name
152     * @param idx ignored index
153     *
154     * @return a reference to the port with the given name
155     */
156    BaseMasterPort &getMasterPort(const std::string &if_name,
157                                  PortID idx = InvalidPortID);
158
159    /** Get cpu task id */
160    uint32_t taskId() const { return _taskId; }
161    /** Set cpu task id */
162    void taskId(uint32_t id) { _taskId = id; }
163
164    uint32_t getPid() const { return _pid; }
165    void setPid(uint32_t pid) { _pid = pid; }
166
167    inline void workItemBegin() { numWorkItemsStarted++; }
168    inline void workItemEnd() { numWorkItemsCompleted++; }
169    // @todo remove me after debugging with legion done
170    Tick instCount() { return instCnt; }
171
172    TheISA::MicrocodeRom microcodeRom;
173
174  protected:
175    TheISA::Interrupts *interrupts;
176
177  public:
178    TheISA::Interrupts *
179    getInterruptController()
180    {
181        return interrupts;
182    }
183
184    virtual void wakeup() = 0;
185
186    void
187    postInterrupt(int int_num, int index)
188    {
189        interrupts->post(int_num, index);
190        if (FullSystem)
191            wakeup();
192    }
193
194    void
195    clearInterrupt(int int_num, int index)
196    {
197        interrupts->clear(int_num, index);
198    }
199
200    void
201    clearInterrupts()
202    {
203        interrupts->clearAll();
204    }
205
206    bool
207    checkInterrupts(ThreadContext *tc) const
208    {
209        return FullSystem && interrupts->checkInterrupts(tc);
210    }
211
212    class ProfileEvent : public Event
213    {
214      private:
215        BaseCPU *cpu;
216        Tick interval;
217
218      public:
219        ProfileEvent(BaseCPU *cpu, Tick interval);
220        void process();
221    };
222    ProfileEvent *profileEvent;
223
224  protected:
225    std::vector<ThreadContext *> threadContexts;
226
227    Trace::InstTracer * tracer;
228
229  public:
230
231    // Mask to align PCs to MachInst sized boundaries
232    static const Addr PCMask = ~((Addr)sizeof(TheISA::MachInst) - 1);
233
234    /// Provide access to the tracer pointer
235    Trace::InstTracer * getTracer() { return tracer; }
236
237    /// Notify the CPU that the indicated context is now active.  The
238    /// delay parameter indicates the number of ticks to wait before
239    /// executing (typically 0 or 1).
240    virtual void activateContext(ThreadID thread_num, Cycles delay) {}
241
242    /// Notify the CPU that the indicated context is now suspended.
243    virtual void suspendContext(ThreadID thread_num) {}
244
245    /// Notify the CPU that the indicated context is now deallocated.
246    virtual void deallocateContext(ThreadID thread_num) {}
247
248    /// Notify the CPU that the indicated context is now halted.
249    virtual void haltContext(ThreadID thread_num) {}
250
251   /// Given a Thread Context pointer return the thread num
252   int findContext(ThreadContext *tc);
253
254   /// Given a thread num get tho thread context for it
255   virtual ThreadContext *getContext(int tn) { return threadContexts[tn]; }
256
257  public:
258    typedef BaseCPUParams Params;
259    const Params *params() const
260    { return reinterpret_cast<const Params *>(_params); }
261    BaseCPU(Params *params, bool is_checker = false);
262    virtual ~BaseCPU();
263
264    virtual void init();
265    virtual void startup();
266    virtual void regStats();
267
268    virtual void activateWhenReady(ThreadID tid) {};
269
270    void registerThreadContexts();
271
272    /**
273     * Prepare for another CPU to take over execution.
274     *
275     * When this method exits, all internal state should have been
276     * flushed. After the method returns, the simulator calls
277     * takeOverFrom() on the new CPU with this CPU as its parameter.
278     */
279    virtual void switchOut();
280
281    /**
282     * Load the state of a CPU from the previous CPU object, invoked
283     * on all new CPUs that are about to be switched in.
284     *
285     * A CPU model implementing this method is expected to initialize
286     * its state from the old CPU and connect its memory (unless they
287     * are already connected) to the memories connected to the old
288     * CPU.
289     *
290     * @param cpu CPU to initialize read state from.
291     */
292    virtual void takeOverFrom(BaseCPU *cpu);
293
294    /**
295     * Flush all TLBs in the CPU.
296     *
297     * This method is mainly used to flush stale translations when
298     * switching CPUs. It is also exported to the Python world to
299     * allow it to request a TLB flush after draining the CPU to make
300     * it easier to compare traces when debugging
301     * handover/checkpointing.
302     */
303    void flushTLBs();
304
305    /**
306     * Determine if the CPU is switched out.
307     *
308     * @return True if the CPU is switched out, false otherwise.
309     */
310    bool switchedOut() const { return _switchedOut; }
311
312    /**
313     * Verify that the system is in a memory mode supported by the
314     * CPU.
315     *
316     * Implementations are expected to query the system for the
317     * current memory mode and ensure that it is what the CPU model
318     * expects. If the check fails, the implementation should
319     * terminate the simulation using fatal().
320     */
321    virtual void verifyMemoryMode() const { };
322
323    /**
324     *  Number of threads we're actually simulating (<= SMT_MAX_THREADS).
325     * This is a constant for the duration of the simulation.
326     */
327    ThreadID numThreads;
328
329    /**
330     * Vector of per-thread instruction-based event queues.  Used for
331     * scheduling events based on number of instructions committed by
332     * a particular thread.
333     */
334    EventQueue **comInstEventQueue;
335
336    /**
337     * Vector of per-thread load-based event queues.  Used for
338     * scheduling events based on number of loads committed by
339     *a particular thread.
340     */
341    EventQueue **comLoadEventQueue;
342
343    System *system;
344
345    /**
346     * Serialize this object to the given output stream.
347     *
348     * @note CPU models should normally overload the serializeThread()
349     * method instead of the serialize() method as this provides a
350     * uniform data format for all CPU models and promotes better code
351     * reuse.
352     *
353     * @param os The stream to serialize to.
354     */
355    virtual void serialize(std::ostream &os);
356
357    /**
358     * Reconstruct the state of this object from a checkpoint.
359     *
360     * @note CPU models should normally overload the
361     * unserializeThread() method instead of the unserialize() method
362     * as this provides a uniform data format for all CPU models and
363     * promotes better code reuse.
364
365     * @param cp The checkpoint use.
366     * @param section The section name of this object.
367     */
368    virtual void unserialize(Checkpoint *cp, const std::string &section);
369
370    /**
371     * Serialize a single thread.
372     *
373     * @param os The stream to serialize to.
374     * @param tid ID of the current thread.
375     */
376    virtual void serializeThread(std::ostream &os, ThreadID tid) {};
377
378    /**
379     * Unserialize one thread.
380     *
381     * @param cp The checkpoint use.
382     * @param section The section name of this thread.
383     * @param tid ID of the current thread.
384     */
385    virtual void unserializeThread(Checkpoint *cp, const std::string &section,
386                                   ThreadID tid) {};
387
388    /**
389     * Return pointer to CPU's branch predictor (NULL if none).
390     * @return Branch predictor pointer.
391     */
392    virtual BranchPred *getBranchPred() { return NULL; };
393
394    virtual Counter totalInsts() const = 0;
395
396    virtual Counter totalOps() const = 0;
397
398    /**
399     * Schedule an event that exits the simulation loops after a
400     * predefined number of instructions.
401     *
402     * This method is usually called from the configuration script to
403     * get an exit event some time in the future. It is typically used
404     * when the script wants to simulate for a specific number of
405     * instructions rather than ticks.
406     *
407     * @param tid Thread monitor.
408     * @param insts Number of instructions into the future.
409     * @param cause Cause to signal in the exit event.
410     */
411    void scheduleInstStop(ThreadID tid, Counter insts, const char *cause);
412
413    /**
414     * Schedule an event that exits the simulation loops after a
415     * predefined number of load operations.
416     *
417     * This method is usually called from the configuration script to
418     * get an exit event some time in the future. It is typically used
419     * when the script wants to simulate for a specific number of
420     * loads rather than ticks.
421     *
422     * @param tid Thread monitor.
423     * @param loads Number of load instructions into the future.
424     * @param cause Cause to signal in the exit event.
425     */
426    void scheduleLoadStop(ThreadID tid, Counter loads, const char *cause);
427
428    // Function tracing
429  private:
430    bool functionTracingEnabled;
431    std::ostream *functionTraceStream;
432    Addr currentFunctionStart;
433    Addr currentFunctionEnd;
434    Tick functionEntryTick;
435    void enableFunctionTrace();
436    void traceFunctionsInternal(Addr pc);
437
438  private:
439    static std::vector<BaseCPU *> cpuList;   //!< Static global cpu list
440
441  public:
442    void traceFunctions(Addr pc)
443    {
444        if (functionTracingEnabled)
445            traceFunctionsInternal(pc);
446    }
447
448    static int numSimulatedCPUs() { return cpuList.size(); }
449    static Counter numSimulatedInsts()
450    {
451        Counter total = 0;
452
453        int size = cpuList.size();
454        for (int i = 0; i < size; ++i)
455            total += cpuList[i]->totalInsts();
456
457        return total;
458    }
459
460    static Counter numSimulatedOps()
461    {
462        Counter total = 0;
463
464        int size = cpuList.size();
465        for (int i = 0; i < size; ++i)
466            total += cpuList[i]->totalOps();
467
468        return total;
469    }
470
471  public:
472    // Number of CPU cycles simulated
473    Stats::Scalar numCycles;
474    Stats::Scalar numWorkItemsStarted;
475    Stats::Scalar numWorkItemsCompleted;
476};
477
478#endif // __CPU_BASE_HH__
479