1/* 2 * Copyright (c) 2014-2015 Advanced Micro Devices, Inc. 3 * All rights reserved. 4 * 5 * For use for simulation and test purposes only 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the copyright holder nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Author: John Kalamatianos, Sooraj Puthoor 34 */ 35 36#include "gpu-compute/global_memory_pipeline.hh" 37 38#include "debug/GPUMem.hh" 39#include "debug/GPUReg.hh" 40#include "gpu-compute/compute_unit.hh" 41#include "gpu-compute/gpu_dyn_inst.hh" 42#include "gpu-compute/shader.hh" 43#include "gpu-compute/vector_register_file.hh" 44#include "gpu-compute/wavefront.hh" 45 46GlobalMemPipeline::GlobalMemPipeline(const ComputeUnitParams* p) : 47 computeUnit(nullptr), gmQueueSize(p->global_mem_queue_size), 48 inflightStores(0), inflightLoads(0) 49{ 50} 51 52void 53GlobalMemPipeline::init(ComputeUnit *cu) 54{ 55 computeUnit = cu; 56 globalMemSize = computeUnit->shader->globalMemSize; 57 _name = computeUnit->name() + ".GlobalMemPipeline"; 58} 59 60void 61GlobalMemPipeline::exec() 62{ 63 // apply any returned global memory operations 64 GPUDynInstPtr m = !gmReturnedLoads.empty() ? gmReturnedLoads.front() : 65 !gmReturnedStores.empty() ? gmReturnedStores.front() : nullptr; 66 67 bool accessVrf = true;
|
68 Wavefront *w = nullptr; 69 |
70 // check the VRF to see if the operands of a load (or load component 71 // of an atomic) are accessible 72 if ((m) && (m->isLoad() || m->isAtomicRet())) {
|
71 Wavefront *w = computeUnit->wfList[m->simdId][m->wfSlotId];
|
73 w = m->wavefront(); |
74 75 accessVrf =
|
74 w->computeUnit->vrf[m->simdId]->
|
76 w->computeUnit->vrf[w->simdId]-> |
77 vrfOperandAccessReady(m->seqNum(), w, m, 78 VrfAccessType::WRITE); 79 } 80 81 if ((!gmReturnedStores.empty() || !gmReturnedLoads.empty()) && 82 m->latency.rdy() && computeUnit->glbMemToVrfBus.rdy() && 83 accessVrf && m->statusBitVector == VectorMask(0) && 84 (computeUnit->shader->coissue_return || 85 computeUnit->wfWait.at(m->pipeId).rdy())) { 86
|
85 if (m->v_type == VT_32 && m->m_type == Enums::M_U8)
86 doGmReturn<uint32_t, uint8_t>(m);
87 else if (m->v_type == VT_32 && m->m_type == Enums::M_U16)
88 doGmReturn<uint32_t, uint16_t>(m);
89 else if (m->v_type == VT_32 && m->m_type == Enums::M_U32)
90 doGmReturn<uint32_t, uint32_t>(m);
91 else if (m->v_type == VT_32 && m->m_type == Enums::M_S8)
92 doGmReturn<int32_t, int8_t>(m);
93 else if (m->v_type == VT_32 && m->m_type == Enums::M_S16)
94 doGmReturn<int32_t, int16_t>(m);
95 else if (m->v_type == VT_32 && m->m_type == Enums::M_S32)
96 doGmReturn<int32_t, int32_t>(m);
97 else if (m->v_type == VT_32 && m->m_type == Enums::M_F16)
98 doGmReturn<float, Float16>(m);
99 else if (m->v_type == VT_32 && m->m_type == Enums::M_F32)
100 doGmReturn<float, float>(m);
101 else if (m->v_type == VT_64 && m->m_type == Enums::M_U8)
102 doGmReturn<uint64_t, uint8_t>(m);
103 else if (m->v_type == VT_64 && m->m_type == Enums::M_U16)
104 doGmReturn<uint64_t, uint16_t>(m);
105 else if (m->v_type == VT_64 && m->m_type == Enums::M_U32)
106 doGmReturn<uint64_t, uint32_t>(m);
107 else if (m->v_type == VT_64 && m->m_type == Enums::M_U64)
108 doGmReturn<uint64_t, uint64_t>(m);
109 else if (m->v_type == VT_64 && m->m_type == Enums::M_S8)
110 doGmReturn<int64_t, int8_t>(m);
111 else if (m->v_type == VT_64 && m->m_type == Enums::M_S16)
112 doGmReturn<int64_t, int16_t>(m);
113 else if (m->v_type == VT_64 && m->m_type == Enums::M_S32)
114 doGmReturn<int64_t, int32_t>(m);
115 else if (m->v_type == VT_64 && m->m_type == Enums::M_S64)
116 doGmReturn<int64_t, int64_t>(m);
117 else if (m->v_type == VT_64 && m->m_type == Enums::M_F16)
118 doGmReturn<double, Float16>(m);
119 else if (m->v_type == VT_64 && m->m_type == Enums::M_F32)
120 doGmReturn<double, float>(m);
121 else if (m->v_type == VT_64 && m->m_type == Enums::M_F64)
122 doGmReturn<double, double>(m);
|
87 w = m->wavefront(); 88 89 m->completeAcc(m); 90 91 if (m->isLoad() || m->isAtomic()) { 92 gmReturnedLoads.pop(); 93 assert(inflightLoads > 0); 94 --inflightLoads; 95 } else { 96 assert(m->isStore()); 97 gmReturnedStores.pop(); 98 assert(inflightStores > 0); 99 --inflightStores; 100 } 101 102 // Decrement outstanding register count 103 computeUnit->shader->ScheduleAdd(&w->outstandingReqs, m->time, -1); 104 105 if (m->isStore() || m->isAtomic()) { 106 computeUnit->shader->ScheduleAdd(&w->outstandingReqsWrGm, 107 m->time, -1); 108 } 109 110 if (m->isLoad() || m->isAtomic()) { 111 computeUnit->shader->ScheduleAdd(&w->outstandingReqsRdGm, 112 m->time, -1); 113 } 114 115 // Mark write bus busy for appropriate amount of time 116 computeUnit->glbMemToVrfBus.set(m->time); 117 if (!computeUnit->shader->coissue_return) 118 w->computeUnit->wfWait.at(m->pipeId).set(m->time); |
119 } 120 121 // If pipeline has executed a global memory instruction 122 // execute global memory packets and issue global 123 // memory packets to DTLB 124 if (!gmIssuedRequests.empty()) { 125 GPUDynInstPtr mp = gmIssuedRequests.front(); 126 if (mp->isLoad() || mp->isAtomic()) { 127 if (inflightLoads >= gmQueueSize) { 128 return; 129 } else { 130 ++inflightLoads; 131 } 132 } else { 133 if (inflightStores >= gmQueueSize) { 134 return; 135 } else if (mp->isStore()) { 136 ++inflightStores; 137 } 138 } 139 140 mp->initiateAcc(mp); 141 gmIssuedRequests.pop(); 142 143 DPRINTF(GPUMem, "CU%d: WF[%d][%d] Popping 0 mem_op = \n", 144 computeUnit->cu_id, mp->simdId, mp->wfSlotId); 145 } 146} 147
|
152template<typename c0, typename c1>
|
148void
|
154GlobalMemPipeline::doGmReturn(GPUDynInstPtr m)
155{
156 Wavefront *w = computeUnit->wfList[m->simdId][m->wfSlotId];
157
158 // Return data to registers
159 if (m->isLoad() || m->isAtomic()) {
160 gmReturnedLoads.pop();
161 assert(inflightLoads > 0);
162 --inflightLoads;
163
164 if (m->isLoad() || m->isAtomicRet()) {
165 std::vector<uint32_t> regVec;
166 // iterate over number of destination register operands since
167 // this is a load or atomic operation
168 for (int k = 0; k < m->n_reg; ++k) {
169 assert((sizeof(c1) * m->n_reg) <= MAX_WIDTH_FOR_MEM_INST);
170 int dst = m->dst_reg + k;
171
172 if (m->n_reg > MAX_REGS_FOR_NON_VEC_MEM_INST)
173 dst = m->dst_reg_vec[k];
174 // virtual->physical VGPR mapping
175 int physVgpr = w->remap(dst, sizeof(c0), 1);
176 // save the physical VGPR index
177 regVec.push_back(physVgpr);
178 c1 *p1 = &((c1 *)m->d_data)[k * w->computeUnit->wfSize()];
179
180 for (int i = 0; i < w->computeUnit->wfSize(); ++i) {
181 if (m->exec_mask[i]) {
182 DPRINTF(GPUReg, "CU%d, WF[%d][%d], lane %d: "
183 "$%s%d <- %d global ld done (src = wavefront "
184 "ld inst)\n", w->computeUnit->cu_id, w->simdId,
185 w->wfSlotId, i, sizeof(c0) == 4 ? "s" : "d",
186 dst, *p1);
187 // write the value into the physical VGPR. This is a
188 // purely functional operation. No timing is modeled.
189 w->computeUnit->vrf[w->simdId]->write<c0>(physVgpr,
190 *p1, i);
191 }
192 ++p1;
193 }
194 }
195
196 // Schedule the write operation of the load data on the VRF.
197 // This simply models the timing aspect of the VRF write operation.
198 // It does not modify the physical VGPR.
199 loadVrfBankConflictCycles +=
200 w->computeUnit->vrf[w->simdId]->exec(m->seqNum(),
201 w, regVec, sizeof(c0),
202 m->time);
203 }
204 } else {
205 gmReturnedStores.pop();
206 assert(inflightStores > 0);
207 --inflightStores;
208 }
209
210 // Decrement outstanding register count
211 computeUnit->shader->ScheduleAdd(&w->outstandingReqs, m->time, -1);
212
213 if (m->isStore() || m->isAtomic()) {
214 computeUnit->shader->ScheduleAdd(&w->outstandingReqsWrGm, m->time,
215 -1);
216 }
217
218 if (m->isLoad() || m->isAtomic()) {
219 computeUnit->shader->ScheduleAdd(&w->outstandingReqsRdGm, m->time,
220 -1);
221 }
222
223 // Mark write bus busy for appropriate amount of time
224 computeUnit->glbMemToVrfBus.set(m->time);
225 if (!computeUnit->shader->coissue_return)
226 w->computeUnit->wfWait.at(m->pipeId).set(m->time);
227}
228
229void
|
149GlobalMemPipeline::regStats() 150{ 151 loadVrfBankConflictCycles 152 .name(name() + ".load_vrf_bank_conflict_cycles") 153 .desc("total number of cycles GM data are delayed before updating " 154 "the VRF") 155 ; 156}
|