dispatcher.hh revision 11534:7106f550afad
1/*
2 * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: Brad Beckmann, Marc Orr
34 */
35
36#ifndef __GPU_DISPATCHER_HH__
37#define __GPU_DISPATCHER_HH__
38
39#include <queue>
40#include <vector>
41
42#include "base/statistics.hh"
43#include "dev/dma_device.hh"
44#include "gpu-compute/compute_unit.hh"
45#include "gpu-compute/ndrange.hh"
46#include "gpu-compute/qstruct.hh"
47#include "mem/port.hh"
48#include "params/GpuDispatcher.hh"
49
50class BaseCPU;
51class Shader;
52
53class GpuDispatcher : public DmaDevice
54{
55    public:
56        typedef GpuDispatcherParams Params;
57
58        class TickEvent : public Event
59        {
60            private:
61                GpuDispatcher *dispatcher;
62
63            public:
64                TickEvent(GpuDispatcher *);
65                void process();
66                const char *description() const;
67        };
68
69        MasterID masterId() { return _masterId; }
70
71    protected:
72        MasterID _masterId;
73
74        // Base and length of PIO register space
75        Addr pioAddr;
76        Addr pioSize;
77        Tick pioDelay;
78
79        HsaQueueEntry curTask;
80
81        std::unordered_map<int, NDRange> ndRangeMap;
82        NDRange ndRange;
83
84        // list of kernel_ids to launch
85        std::queue<int> execIds;
86        // list of kernel_ids that have finished
87        std::queue<int> doneIds;
88
89        uint64_t dispatchCount;
90        // is there a kernel in execution?
91        bool dispatchActive;
92
93        BaseCPU *cpu;
94        Shader *shader;
95        ClDriver *driver;
96        TickEvent tickEvent;
97
98        static GpuDispatcher *instance;
99
100        // sycall emulation mode can have only 1 application running(?)
101        // else we have to do some pid based tagging
102        // unused
103        typedef std::unordered_map<uint64_t, uint64_t> TranslationBuffer;
104        TranslationBuffer tlb;
105
106    public:
107        /*statistics*/
108        Stats::Scalar num_kernelLaunched;
109        GpuDispatcher(const Params *p);
110
111        ~GpuDispatcher() { }
112
113        void exec();
114        virtual void serialize(CheckpointOut &cp) const;
115        virtual void unserialize(CheckpointIn &cp);
116        void notifyWgCompl(Wavefront *w);
117        void scheduleDispatch();
118        void accessUserVar(BaseCPU *cpu, uint64_t addr, int val, int off);
119
120        // using singleton so that glue code can pass pointer locations
121        // to the dispatcher. when there are multiple dispatchers, we can
122        // call something like getInstance(index)
123        static void
124         setInstance(GpuDispatcher *_instance)
125        {
126            instance = _instance;
127        }
128
129        static GpuDispatcher* getInstance() { return instance; }
130
131        class TLBPort : public MasterPort
132        {
133          public:
134
135            TLBPort(const std::string &_name, GpuDispatcher *_dispatcher)
136                : MasterPort(_name, _dispatcher), dispatcher(_dispatcher) { }
137
138          protected:
139            GpuDispatcher *dispatcher;
140
141            virtual bool recvTimingResp(PacketPtr pkt) { return true; }
142            virtual Tick recvAtomic(PacketPtr pkt) { return 0; }
143            virtual void recvFunctional(PacketPtr pkt) { }
144            virtual void recvRangeChange() { }
145            virtual void recvReqRetry() { }
146
147        };
148
149        TLBPort *tlbPort;
150
151        virtual BaseMasterPort& getMasterPort(const std::string &if_name,
152                                              PortID idx);
153
154        AddrRangeList getAddrRanges() const;
155        Tick read(PacketPtr pkt);
156        Tick write(PacketPtr pkt);
157
158        // helper functions to retrieve/set GPU attributes
159        int getNumCUs();
160        int wfSize() const;
161        void setFuncargsSize(int funcargs_size);
162};
163
164#endif // __GPU_DISPATCHER_HH__
165