global_memory_pipeline.cc revision 11534:7106f550afad
111986Sandreas.sandberg@arm.com/*
211986Sandreas.sandberg@arm.com * Copyright (c) 2014-2015 Advanced Micro Devices, Inc.
311986Sandreas.sandberg@arm.com * All rights reserved.
411986Sandreas.sandberg@arm.com *
511986Sandreas.sandberg@arm.com * For use for simulation and test purposes only
611986Sandreas.sandberg@arm.com *
711986Sandreas.sandberg@arm.com * Redistribution and use in source and binary forms, with or without
811986Sandreas.sandberg@arm.com * modification, are permitted provided that the following conditions are met:
911986Sandreas.sandberg@arm.com *
1011986Sandreas.sandberg@arm.com * 1. Redistributions of source code must retain the above copyright notice,
1111986Sandreas.sandberg@arm.com * this list of conditions and the following disclaimer.
1211986Sandreas.sandberg@arm.com *
1311986Sandreas.sandberg@arm.com * 2. Redistributions in binary form must reproduce the above copyright notice,
1411986Sandreas.sandberg@arm.com * this list of conditions and the following disclaimer in the documentation
1511986Sandreas.sandberg@arm.com * and/or other materials provided with the distribution.
1611986Sandreas.sandberg@arm.com *
1711986Sandreas.sandberg@arm.com * 3. Neither the name of the copyright holder nor the names of its contributors
1811986Sandreas.sandberg@arm.com * may be used to endorse or promote products derived from this software
1911986Sandreas.sandberg@arm.com * without specific prior written permission.
2011986Sandreas.sandberg@arm.com *
2111986Sandreas.sandberg@arm.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2211986Sandreas.sandberg@arm.com * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2311986Sandreas.sandberg@arm.com * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2411986Sandreas.sandberg@arm.com * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
2511986Sandreas.sandberg@arm.com * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2611986Sandreas.sandberg@arm.com * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2711986Sandreas.sandberg@arm.com * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2811986Sandreas.sandberg@arm.com * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2911986Sandreas.sandberg@arm.com * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3011986Sandreas.sandberg@arm.com * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3111986Sandreas.sandberg@arm.com * POSSIBILITY OF SUCH DAMAGE.
3211986Sandreas.sandberg@arm.com *
3311986Sandreas.sandberg@arm.com * Author: John Kalamatianos, Sooraj Puthoor
3411986Sandreas.sandberg@arm.com */
3511986Sandreas.sandberg@arm.com
3611986Sandreas.sandberg@arm.com#include "gpu-compute/global_memory_pipeline.hh"
3711986Sandreas.sandberg@arm.com
3811986Sandreas.sandberg@arm.com#include "debug/GPUMem.hh"
3911986Sandreas.sandberg@arm.com#include "debug/GPUReg.hh"
4011986Sandreas.sandberg@arm.com#include "gpu-compute/compute_unit.hh"
4111986Sandreas.sandberg@arm.com#include "gpu-compute/gpu_dyn_inst.hh"
4211986Sandreas.sandberg@arm.com#include "gpu-compute/shader.hh"
4311986Sandreas.sandberg@arm.com#include "gpu-compute/vector_register_file.hh"
4411986Sandreas.sandberg@arm.com#include "gpu-compute/wavefront.hh"
4511986Sandreas.sandberg@arm.com
4611986Sandreas.sandberg@arm.comGlobalMemPipeline::GlobalMemPipeline(const ComputeUnitParams* p) :
4711986Sandreas.sandberg@arm.com    computeUnit(nullptr), gmQueueSize(p->global_mem_queue_size),
4811986Sandreas.sandberg@arm.com    inflightStores(0), inflightLoads(0)
4911986Sandreas.sandberg@arm.com{
5011986Sandreas.sandberg@arm.com}
5111986Sandreas.sandberg@arm.com
5211986Sandreas.sandberg@arm.comvoid
5311986Sandreas.sandberg@arm.comGlobalMemPipeline::init(ComputeUnit *cu)
5411986Sandreas.sandberg@arm.com{
5511986Sandreas.sandberg@arm.com    computeUnit = cu;
5611986Sandreas.sandberg@arm.com    globalMemSize = computeUnit->shader->globalMemSize;
5711986Sandreas.sandberg@arm.com    _name = computeUnit->name() + ".GlobalMemPipeline";
5811986Sandreas.sandberg@arm.com}
5911986Sandreas.sandberg@arm.com
6011986Sandreas.sandberg@arm.comvoid
6111986Sandreas.sandberg@arm.comGlobalMemPipeline::exec()
6211986Sandreas.sandberg@arm.com{
6311986Sandreas.sandberg@arm.com    // apply any returned global memory operations
6411986Sandreas.sandberg@arm.com    GPUDynInstPtr m = !gmReturnedLoads.empty() ? gmReturnedLoads.front() :
6511986Sandreas.sandberg@arm.com        !gmReturnedStores.empty() ? gmReturnedStores.front() : nullptr;
6611986Sandreas.sandberg@arm.com
6711986Sandreas.sandberg@arm.com    bool accessVrf = true;
6811986Sandreas.sandberg@arm.com    // check the VRF to see if the operands of a load (or load component
6911986Sandreas.sandberg@arm.com    // of an atomic) are accessible
7011986Sandreas.sandberg@arm.com    if ((m) && (m->m_op==Enums::MO_LD || MO_A(m->m_op))) {
7111986Sandreas.sandberg@arm.com        Wavefront *w = computeUnit->wfList[m->simdId][m->wfSlotId];
7211986Sandreas.sandberg@arm.com
7311986Sandreas.sandberg@arm.com        accessVrf =
7411986Sandreas.sandberg@arm.com            w->computeUnit->vrf[m->simdId]->
7511986Sandreas.sandberg@arm.com            vrfOperandAccessReady(m->seqNum(), w, m,
7611986Sandreas.sandberg@arm.com                                  VrfAccessType::WRITE);
7711986Sandreas.sandberg@arm.com    }
7811986Sandreas.sandberg@arm.com
7911986Sandreas.sandberg@arm.com    if ((!gmReturnedStores.empty() || !gmReturnedLoads.empty()) &&
8011986Sandreas.sandberg@arm.com        m->latency.rdy() && computeUnit->glbMemToVrfBus.rdy() &&
8111986Sandreas.sandberg@arm.com        accessVrf && m->statusBitVector == VectorMask(0) &&
8211986Sandreas.sandberg@arm.com        (computeUnit->shader->coissue_return ||
8311986Sandreas.sandberg@arm.com         computeUnit->wfWait.at(m->pipeId).rdy())) {
84
85        if (m->v_type == VT_32 && m->m_type == Enums::M_U8)
86            doGmReturn<uint32_t, uint8_t>(m);
87        else if (m->v_type == VT_32 && m->m_type == Enums::M_U16)
88            doGmReturn<uint32_t, uint16_t>(m);
89        else if (m->v_type == VT_32 && m->m_type == Enums::M_U32)
90            doGmReturn<uint32_t, uint32_t>(m);
91        else if (m->v_type == VT_32 && m->m_type == Enums::M_S8)
92            doGmReturn<int32_t, int8_t>(m);
93        else if (m->v_type == VT_32 && m->m_type == Enums::M_S16)
94            doGmReturn<int32_t, int16_t>(m);
95        else if (m->v_type == VT_32 && m->m_type == Enums::M_S32)
96            doGmReturn<int32_t, int32_t>(m);
97        else if (m->v_type == VT_32 && m->m_type == Enums::M_F16)
98            doGmReturn<float, Float16>(m);
99        else if (m->v_type == VT_32 && m->m_type == Enums::M_F32)
100            doGmReturn<float, float>(m);
101        else if (m->v_type == VT_64 && m->m_type == Enums::M_U8)
102            doGmReturn<uint64_t, uint8_t>(m);
103        else if (m->v_type == VT_64 && m->m_type == Enums::M_U16)
104            doGmReturn<uint64_t, uint16_t>(m);
105        else if (m->v_type == VT_64 && m->m_type == Enums::M_U32)
106            doGmReturn<uint64_t, uint32_t>(m);
107        else if (m->v_type == VT_64 && m->m_type == Enums::M_U64)
108            doGmReturn<uint64_t, uint64_t>(m);
109        else if (m->v_type == VT_64 && m->m_type == Enums::M_S8)
110            doGmReturn<int64_t, int8_t>(m);
111        else if (m->v_type == VT_64 && m->m_type == Enums::M_S16)
112            doGmReturn<int64_t, int16_t>(m);
113        else if (m->v_type == VT_64 && m->m_type == Enums::M_S32)
114            doGmReturn<int64_t, int32_t>(m);
115        else if (m->v_type == VT_64 && m->m_type == Enums::M_S64)
116            doGmReturn<int64_t, int64_t>(m);
117        else if (m->v_type == VT_64 && m->m_type == Enums::M_F16)
118            doGmReturn<double, Float16>(m);
119        else if (m->v_type == VT_64 && m->m_type == Enums::M_F32)
120            doGmReturn<double, float>(m);
121        else if (m->v_type == VT_64 && m->m_type == Enums::M_F64)
122            doGmReturn<double, double>(m);
123    }
124
125    // If pipeline has executed a global memory instruction
126    // execute global memory packets and issue global
127    // memory packets to DTLB
128    if (!gmIssuedRequests.empty()) {
129        GPUDynInstPtr mp = gmIssuedRequests.front();
130        if (mp->m_op == Enums::MO_LD ||
131            (mp->m_op >= Enums::MO_AAND && mp->m_op <= Enums::MO_AMIN) ||
132            (mp->m_op >= Enums::MO_ANRAND && mp->m_op <= Enums::MO_ANRMIN)) {
133
134            if (inflightLoads >= gmQueueSize) {
135                return;
136            } else {
137                ++inflightLoads;
138            }
139        } else {
140            if (inflightStores >= gmQueueSize) {
141                return;
142            } else if (mp->m_op == Enums::MO_ST) {
143                ++inflightStores;
144            }
145        }
146
147        mp->initiateAcc(mp);
148        gmIssuedRequests.pop();
149
150        DPRINTF(GPUMem, "CU%d: WF[%d][%d] Popping 0 mem_op = %s\n",
151                computeUnit->cu_id, mp->simdId, mp->wfSlotId,
152                Enums::MemOpTypeStrings[mp->m_op]);
153    }
154}
155
156template<typename c0, typename c1>
157void
158GlobalMemPipeline::doGmReturn(GPUDynInstPtr m)
159{
160    Wavefront *w = computeUnit->wfList[m->simdId][m->wfSlotId];
161
162    // Return data to registers
163    if (m->m_op == Enums::MO_LD || MO_A(m->m_op) || MO_ANR(m->m_op)) {
164        gmReturnedLoads.pop();
165        assert(inflightLoads > 0);
166        --inflightLoads;
167
168        if (m->m_op == Enums::MO_LD || MO_A(m->m_op)) {
169            std::vector<uint32_t> regVec;
170            // iterate over number of destination register operands since
171            // this is a load or atomic operation
172            for (int k = 0; k < m->n_reg; ++k) {
173                assert((sizeof(c1) * m->n_reg) <= MAX_WIDTH_FOR_MEM_INST);
174                int dst = m->dst_reg + k;
175
176                if (m->n_reg > MAX_REGS_FOR_NON_VEC_MEM_INST)
177                    dst = m->dst_reg_vec[k];
178                // virtual->physical VGPR mapping
179                int physVgpr = w->remap(dst, sizeof(c0), 1);
180                // save the physical VGPR index
181                regVec.push_back(physVgpr);
182                c1 *p1 = &((c1 *)m->d_data)[k * w->computeUnit->wfSize()];
183
184                for (int i = 0; i < w->computeUnit->wfSize(); ++i) {
185                    if (m->exec_mask[i]) {
186                        DPRINTF(GPUReg, "CU%d, WF[%d][%d], lane %d: "
187                                "$%s%d <- %d global ld done (src = wavefront "
188                                "ld inst)\n", w->computeUnit->cu_id, w->simdId,
189                                w->wfSlotId, i, sizeof(c0) == 4 ? "s" : "d",
190                                dst, *p1);
191                        // write the value into the physical VGPR. This is a
192                        // purely functional operation. No timing is modeled.
193                        w->computeUnit->vrf[w->simdId]->write<c0>(physVgpr,
194                                                                    *p1, i);
195                    }
196                    ++p1;
197                }
198            }
199
200            // Schedule the write operation of the load data on the VRF.
201            // This simply models the timing aspect of the VRF write operation.
202            // It does not modify the physical VGPR.
203            loadVrfBankConflictCycles +=
204                w->computeUnit->vrf[w->simdId]->exec(m->seqNum(),
205                                                     w, regVec, sizeof(c0),
206                                                     m->time);
207        }
208    } else {
209        gmReturnedStores.pop();
210        assert(inflightStores > 0);
211        --inflightStores;
212    }
213
214    // Decrement outstanding register count
215    computeUnit->shader->ScheduleAdd(&w->outstanding_reqs, m->time, -1);
216
217    if (m->m_op == Enums::MO_ST || MO_A(m->m_op) || MO_ANR(m->m_op) ||
218        MO_H(m->m_op)) {
219        computeUnit->shader->ScheduleAdd(&w->outstanding_reqs_wr_gm, m->time,
220                                         -1);
221    }
222
223    if (m->m_op == Enums::MO_LD || MO_A(m->m_op) || MO_ANR(m->m_op)) {
224        computeUnit->shader->ScheduleAdd(&w->outstanding_reqs_rd_gm, m->time,
225                                         -1);
226    }
227
228    // Mark write bus busy for appropriate amount of time
229    computeUnit->glbMemToVrfBus.set(m->time);
230    if (!computeUnit->shader->coissue_return)
231        w->computeUnit->wfWait.at(m->pipeId).set(m->time);
232}
233
234void
235GlobalMemPipeline::regStats()
236{
237    loadVrfBankConflictCycles
238        .name(name() + ".load_vrf_bank_conflict_cycles")
239        .desc("total number of cycles GM data are delayed before updating "
240              "the VRF")
241        ;
242}
243