1/*
2 * Copyright (c) 2011-2015,2018 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Authors: Brad Beckmann,
34 *          Marc Orr,
35 *          Anthony Gutierrez
36 */
37
38#ifndef __GPU_DISPATCHER_HH__
39#define __GPU_DISPATCHER_HH__
40
41#include <queue>
42#include <vector>
43
44#include "base/statistics.hh"
45#include "dev/dma_device.hh"
46#include "gpu-compute/compute_unit.hh"
47#include "gpu-compute/ndrange.hh"
48#include "gpu-compute/qstruct.hh"
49#include "mem/port.hh"
50#include "params/GpuDispatcher.hh"
51
52class BaseCPU;
53class Shader;
54
55class GpuDispatcher : public DmaDevice
56{
57    public:
58        typedef GpuDispatcherParams Params;
59
60        MasterID masterId() { return _masterId; }
61
62    protected:
63        MasterID _masterId;
64
65        // Base and length of PIO register space
66        Addr pioAddr;
67        Addr pioSize;
68        Tick pioDelay;
69
70        HsaQueueEntry curTask;
71
72        std::unordered_map<int, NDRange> ndRangeMap;
73        NDRange ndRange;
74
75        // list of kernel_ids to launch
76        std::queue<int> execIds;
77        // list of kernel_ids that have finished
78        std::queue<int> doneIds;
79
80        uint64_t dispatchCount;
81        // is there a kernel in execution?
82        bool dispatchActive;
83
84        BaseCPU *cpu;
85        Shader *shader;
86        ClDriver *driver;
87        EventFunctionWrapper tickEvent;
88
89
90        static GpuDispatcher *instance;
91
92        // sycall emulation mode can have only 1 application running(?)
93        // else we have to do some pid based tagging
94        // unused
95        typedef std::unordered_map<uint64_t, uint64_t> TranslationBuffer;
96        TranslationBuffer tlb;
97
98    public:
99        /*statistics*/
100        Stats::Scalar num_kernelLaunched;
101        GpuDispatcher(const Params *p);
102
103        ~GpuDispatcher() { }
104
105        void exec();
106        virtual void serialize(CheckpointOut &cp) const override;
107        virtual void unserialize(CheckpointIn &cp) override;
108        void notifyWgCompl(Wavefront *w);
109        void scheduleDispatch();
110        void accessUserVar(BaseCPU *cpu, uint64_t addr, int val, int off);
111
112        // using singleton so that glue code can pass pointer locations
113        // to the dispatcher. when there are multiple dispatchers, we can
114        // call something like getInstance(index)
115        static void
116         setInstance(GpuDispatcher *_instance)
117        {
118            instance = _instance;
119        }
120
121        static GpuDispatcher* getInstance() { return instance; }
122
123        class TLBPort : public MasterPort
124        {
125          public:
126
127            TLBPort(const std::string &_name, GpuDispatcher *_dispatcher)
128                : MasterPort(_name, _dispatcher), dispatcher(_dispatcher) { }
129
130          protected:
131            GpuDispatcher *dispatcher;
132
133            virtual bool recvTimingResp(PacketPtr pkt) { return true; }
134            virtual Tick recvAtomic(PacketPtr pkt) { return 0; }
135            virtual void recvFunctional(PacketPtr pkt) { }
136            virtual void recvRangeChange() { }
137            virtual void recvReqRetry() { }
138
139        };
140
141        TLBPort *tlbPort;
142
143        Port &getPort(const std::string &if_name,
144                      PortID idx=InvalidPortID) override;
145
146        AddrRangeList getAddrRanges() const override;
147        Tick read(PacketPtr pkt) override;
148        Tick write(PacketPtr pkt) override;
149
150        // helper functions to retrieve/set GPU attributes
151        int getNumCUs();
152        int wfSize() const;
153        void setFuncargsSize(int funcargs_size);
154
155        /** Returns the size of the static hardware context of a wavefront */
156        uint32_t getStaticContextSize() const;
157};
158
159#endif // __GPU_DISPATCHER_HH__
160