dispatcher.hh revision 12126:06c1fbaa5724
1/*
2 * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: Brad Beckmann, Marc Orr
34 */
35
36#ifndef __GPU_DISPATCHER_HH__
37#define __GPU_DISPATCHER_HH__
38
39#include <queue>
40#include <vector>
41
42#include "base/statistics.hh"
43#include "dev/dma_device.hh"
44#include "gpu-compute/compute_unit.hh"
45#include "gpu-compute/ndrange.hh"
46#include "gpu-compute/qstruct.hh"
47#include "mem/port.hh"
48#include "params/GpuDispatcher.hh"
49
50class BaseCPU;
51class Shader;
52
53class GpuDispatcher : public DmaDevice
54{
55    public:
56        typedef GpuDispatcherParams Params;
57
58        MasterID masterId() { return _masterId; }
59
60    protected:
61        MasterID _masterId;
62
63        // Base and length of PIO register space
64        Addr pioAddr;
65        Addr pioSize;
66        Tick pioDelay;
67
68        HsaQueueEntry curTask;
69
70        std::unordered_map<int, NDRange> ndRangeMap;
71        NDRange ndRange;
72
73        // list of kernel_ids to launch
74        std::queue<int> execIds;
75        // list of kernel_ids that have finished
76        std::queue<int> doneIds;
77
78        uint64_t dispatchCount;
79        // is there a kernel in execution?
80        bool dispatchActive;
81
82        BaseCPU *cpu;
83        Shader *shader;
84        ClDriver *driver;
85        EventFunctionWrapper tickEvent;
86
87
88        static GpuDispatcher *instance;
89
90        // sycall emulation mode can have only 1 application running(?)
91        // else we have to do some pid based tagging
92        // unused
93        typedef std::unordered_map<uint64_t, uint64_t> TranslationBuffer;
94        TranslationBuffer tlb;
95
96    public:
97        /*statistics*/
98        Stats::Scalar num_kernelLaunched;
99        GpuDispatcher(const Params *p);
100
101        ~GpuDispatcher() { }
102
103        void exec();
104        virtual void serialize(CheckpointOut &cp) const;
105        virtual void unserialize(CheckpointIn &cp);
106        void notifyWgCompl(Wavefront *w);
107        void scheduleDispatch();
108        void accessUserVar(BaseCPU *cpu, uint64_t addr, int val, int off);
109
110        // using singleton so that glue code can pass pointer locations
111        // to the dispatcher. when there are multiple dispatchers, we can
112        // call something like getInstance(index)
113        static void
114         setInstance(GpuDispatcher *_instance)
115        {
116            instance = _instance;
117        }
118
119        static GpuDispatcher* getInstance() { return instance; }
120
121        class TLBPort : public MasterPort
122        {
123          public:
124
125            TLBPort(const std::string &_name, GpuDispatcher *_dispatcher)
126                : MasterPort(_name, _dispatcher), dispatcher(_dispatcher) { }
127
128          protected:
129            GpuDispatcher *dispatcher;
130
131            virtual bool recvTimingResp(PacketPtr pkt) { return true; }
132            virtual Tick recvAtomic(PacketPtr pkt) { return 0; }
133            virtual void recvFunctional(PacketPtr pkt) { }
134            virtual void recvRangeChange() { }
135            virtual void recvReqRetry() { }
136
137        };
138
139        TLBPort *tlbPort;
140
141        virtual BaseMasterPort& getMasterPort(const std::string &if_name,
142                                              PortID idx);
143
144        AddrRangeList getAddrRanges() const;
145        Tick read(PacketPtr pkt);
146        Tick write(PacketPtr pkt);
147
148        // helper functions to retrieve/set GPU attributes
149        int getNumCUs();
150        int wfSize() const;
151        void setFuncargsSize(int funcargs_size);
152
153        /** Returns the size of the static hardware context of a wavefront */
154        uint32_t getStaticContextSize() const;
155};
156
157#endif // __GPU_DISPATCHER_HH__
158