global_memory_pipeline.cc (11700:7d4d424c9f17) global_memory_pipeline.cc (12697:cd71b966be1e)
1/*
2 * Copyright (c) 2014-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
1/*
2 * Copyright (c) 2014-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: John Kalamatianos, Sooraj Puthoor
33 * Authors: John Kalamatianos,
34 * Sooraj Puthoor
34 */
35
36#include "gpu-compute/global_memory_pipeline.hh"
37
38#include "debug/GPUMem.hh"
39#include "debug/GPUReg.hh"
40#include "gpu-compute/compute_unit.hh"
41#include "gpu-compute/gpu_dyn_inst.hh"
42#include "gpu-compute/shader.hh"
43#include "gpu-compute/vector_register_file.hh"
44#include "gpu-compute/wavefront.hh"
45
46GlobalMemPipeline::GlobalMemPipeline(const ComputeUnitParams* p) :
47 computeUnit(nullptr), gmQueueSize(p->global_mem_queue_size),
48 outOfOrderDataDelivery(p->out_of_order_data_delivery), inflightStores(0),
49 inflightLoads(0)
50{
51}
52
53void
54GlobalMemPipeline::init(ComputeUnit *cu)
55{
56 computeUnit = cu;
57 globalMemSize = computeUnit->shader->globalMemSize;
58 _name = computeUnit->name() + ".GlobalMemPipeline";
59}
60
61void
62GlobalMemPipeline::exec()
63{
64 // apply any returned global memory operations
65 GPUDynInstPtr m = getNextReadyResp();
66
67 bool accessVrf = true;
68 Wavefront *w = nullptr;
69
70 // check the VRF to see if the operands of a load (or load component
71 // of an atomic) are accessible
72 if ((m) && (m->isLoad() || m->isAtomicRet())) {
73 w = m->wavefront();
74
75 accessVrf =
76 w->computeUnit->vrf[w->simdId]->
77 vrfOperandAccessReady(m->seqNum(), w, m, VrfAccessType::WRITE);
78 }
79
80 if (m && m->latency.rdy() && computeUnit->glbMemToVrfBus.rdy() &&
81 accessVrf && m->statusBitVector == VectorMask(0) &&
82 (computeUnit->shader->coissue_return ||
83 computeUnit->wfWait.at(m->pipeId).rdy())) {
84
85 w = m->wavefront();
86
87 m->completeAcc(m);
88
89 completeRequest(m);
90
91 // Decrement outstanding register count
92 computeUnit->shader->ScheduleAdd(&w->outstandingReqs, m->time, -1);
93
94 if (m->isStore() || m->isAtomic()) {
95 computeUnit->shader->ScheduleAdd(&w->outstandingReqsWrGm,
96 m->time, -1);
97 }
98
99 if (m->isLoad() || m->isAtomic()) {
100 computeUnit->shader->ScheduleAdd(&w->outstandingReqsRdGm,
101 m->time, -1);
102 }
103
104 // Mark write bus busy for appropriate amount of time
105 computeUnit->glbMemToVrfBus.set(m->time);
106 if (!computeUnit->shader->coissue_return)
107 w->computeUnit->wfWait.at(m->pipeId).set(m->time);
108 }
109
110 // If pipeline has executed a global memory instruction
111 // execute global memory packets and issue global
112 // memory packets to DTLB
113 if (!gmIssuedRequests.empty()) {
114 GPUDynInstPtr mp = gmIssuedRequests.front();
115 if (mp->isLoad() || mp->isAtomic()) {
116 if (inflightLoads >= gmQueueSize) {
117 return;
118 } else {
119 ++inflightLoads;
120 }
121 } else if (mp->isStore()) {
122 if (inflightStores >= gmQueueSize) {
123 return;
124 } else {
125 ++inflightStores;
126 }
127 }
128
129 mp->initiateAcc(mp);
130
131 if (!outOfOrderDataDelivery && !mp->isMemFence()) {
132 /**
133 * if we are not in out-of-order data delivery mode
134 * then we keep the responses sorted in program order.
135 * in order to do so we must reserve an entry in the
136 * resp buffer before we issue the request to the mem
137 * system. mem fence requests will not be stored here
138 * because once they are issued from the GM pipeline,
139 * they do not send any response back to it.
140 */
141 gmOrderedRespBuffer.insert(std::make_pair(mp->seqNum(),
142 std::make_pair(mp, false)));
143 }
144
145 gmIssuedRequests.pop();
146
147 DPRINTF(GPUMem, "CU%d: WF[%d][%d] Popping 0 mem_op = \n",
148 computeUnit->cu_id, mp->simdId, mp->wfSlotId);
149 }
150}
151
152GPUDynInstPtr
153GlobalMemPipeline::getNextReadyResp()
154{
155 if (outOfOrderDataDelivery) {
156 if (!gmReturnedLoads.empty()) {
157 return gmReturnedLoads.front();
158 } else if (!gmReturnedStores.empty()) {
159 return gmReturnedStores.front();
160 }
161 } else {
162 if (!gmOrderedRespBuffer.empty()) {
163 auto mem_req = gmOrderedRespBuffer.begin();
164
165 if (mem_req->second.second) {
166 return mem_req->second.first;
167 }
168 }
169 }
170
171 return nullptr;
172}
173
174void
175GlobalMemPipeline::completeRequest(GPUDynInstPtr gpuDynInst)
176{
177 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
178 assert(inflightLoads > 0);
179 --inflightLoads;
180 } else if (gpuDynInst->isStore()) {
181 assert(inflightStores > 0);
182 --inflightStores;
183 }
184
185 if (outOfOrderDataDelivery) {
186 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
187 assert(!gmReturnedLoads.empty());
188 gmReturnedLoads.pop();
189 } else if (gpuDynInst->isStore()) {
190 assert(!gmReturnedStores.empty());
191 gmReturnedStores.pop();
192 }
193 } else {
194 // we should only pop the oldest requst, and it
195 // should be marked as done if we are here
196 assert(gmOrderedRespBuffer.begin()->first == gpuDynInst->seqNum());
197 assert(gmOrderedRespBuffer.begin()->second.first == gpuDynInst);
198 assert(gmOrderedRespBuffer.begin()->second.second);
199 // remove this instruction from the buffer by its
200 // unique seq ID
201 gmOrderedRespBuffer.erase(gpuDynInst->seqNum());
202 }
203}
204
205void
206GlobalMemPipeline::issueRequest(GPUDynInstPtr gpuDynInst)
207{
208 gmIssuedRequests.push(gpuDynInst);
209}
210
211void
212GlobalMemPipeline::handleResponse(GPUDynInstPtr gpuDynInst)
213{
214 if (outOfOrderDataDelivery) {
215 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
216 assert(isGMLdRespFIFOWrRdy());
217 gmReturnedLoads.push(gpuDynInst);
218 } else {
219 assert(isGMStRespFIFOWrRdy());
220 gmReturnedStores.push(gpuDynInst);
221 }
222 } else {
223 auto mem_req = gmOrderedRespBuffer.find(gpuDynInst->seqNum());
224 // if we are getting a response for this mem request,
225 // then it ought to already be in the ordered response
226 // buffer
227 assert(mem_req != gmOrderedRespBuffer.end());
228 mem_req->second.second = true;
229 }
230}
231
232void
233GlobalMemPipeline::regStats()
234{
235 loadVrfBankConflictCycles
236 .name(name() + ".load_vrf_bank_conflict_cycles")
237 .desc("total number of cycles GM data are delayed before updating "
238 "the VRF")
239 ;
240}
35 */
36
37#include "gpu-compute/global_memory_pipeline.hh"
38
39#include "debug/GPUMem.hh"
40#include "debug/GPUReg.hh"
41#include "gpu-compute/compute_unit.hh"
42#include "gpu-compute/gpu_dyn_inst.hh"
43#include "gpu-compute/shader.hh"
44#include "gpu-compute/vector_register_file.hh"
45#include "gpu-compute/wavefront.hh"
46
47GlobalMemPipeline::GlobalMemPipeline(const ComputeUnitParams* p) :
48 computeUnit(nullptr), gmQueueSize(p->global_mem_queue_size),
49 outOfOrderDataDelivery(p->out_of_order_data_delivery), inflightStores(0),
50 inflightLoads(0)
51{
52}
53
54void
55GlobalMemPipeline::init(ComputeUnit *cu)
56{
57 computeUnit = cu;
58 globalMemSize = computeUnit->shader->globalMemSize;
59 _name = computeUnit->name() + ".GlobalMemPipeline";
60}
61
62void
63GlobalMemPipeline::exec()
64{
65 // apply any returned global memory operations
66 GPUDynInstPtr m = getNextReadyResp();
67
68 bool accessVrf = true;
69 Wavefront *w = nullptr;
70
71 // check the VRF to see if the operands of a load (or load component
72 // of an atomic) are accessible
73 if ((m) && (m->isLoad() || m->isAtomicRet())) {
74 w = m->wavefront();
75
76 accessVrf =
77 w->computeUnit->vrf[w->simdId]->
78 vrfOperandAccessReady(m->seqNum(), w, m, VrfAccessType::WRITE);
79 }
80
81 if (m && m->latency.rdy() && computeUnit->glbMemToVrfBus.rdy() &&
82 accessVrf && m->statusBitVector == VectorMask(0) &&
83 (computeUnit->shader->coissue_return ||
84 computeUnit->wfWait.at(m->pipeId).rdy())) {
85
86 w = m->wavefront();
87
88 m->completeAcc(m);
89
90 completeRequest(m);
91
92 // Decrement outstanding register count
93 computeUnit->shader->ScheduleAdd(&w->outstandingReqs, m->time, -1);
94
95 if (m->isStore() || m->isAtomic()) {
96 computeUnit->shader->ScheduleAdd(&w->outstandingReqsWrGm,
97 m->time, -1);
98 }
99
100 if (m->isLoad() || m->isAtomic()) {
101 computeUnit->shader->ScheduleAdd(&w->outstandingReqsRdGm,
102 m->time, -1);
103 }
104
105 // Mark write bus busy for appropriate amount of time
106 computeUnit->glbMemToVrfBus.set(m->time);
107 if (!computeUnit->shader->coissue_return)
108 w->computeUnit->wfWait.at(m->pipeId).set(m->time);
109 }
110
111 // If pipeline has executed a global memory instruction
112 // execute global memory packets and issue global
113 // memory packets to DTLB
114 if (!gmIssuedRequests.empty()) {
115 GPUDynInstPtr mp = gmIssuedRequests.front();
116 if (mp->isLoad() || mp->isAtomic()) {
117 if (inflightLoads >= gmQueueSize) {
118 return;
119 } else {
120 ++inflightLoads;
121 }
122 } else if (mp->isStore()) {
123 if (inflightStores >= gmQueueSize) {
124 return;
125 } else {
126 ++inflightStores;
127 }
128 }
129
130 mp->initiateAcc(mp);
131
132 if (!outOfOrderDataDelivery && !mp->isMemFence()) {
133 /**
134 * if we are not in out-of-order data delivery mode
135 * then we keep the responses sorted in program order.
136 * in order to do so we must reserve an entry in the
137 * resp buffer before we issue the request to the mem
138 * system. mem fence requests will not be stored here
139 * because once they are issued from the GM pipeline,
140 * they do not send any response back to it.
141 */
142 gmOrderedRespBuffer.insert(std::make_pair(mp->seqNum(),
143 std::make_pair(mp, false)));
144 }
145
146 gmIssuedRequests.pop();
147
148 DPRINTF(GPUMem, "CU%d: WF[%d][%d] Popping 0 mem_op = \n",
149 computeUnit->cu_id, mp->simdId, mp->wfSlotId);
150 }
151}
152
153GPUDynInstPtr
154GlobalMemPipeline::getNextReadyResp()
155{
156 if (outOfOrderDataDelivery) {
157 if (!gmReturnedLoads.empty()) {
158 return gmReturnedLoads.front();
159 } else if (!gmReturnedStores.empty()) {
160 return gmReturnedStores.front();
161 }
162 } else {
163 if (!gmOrderedRespBuffer.empty()) {
164 auto mem_req = gmOrderedRespBuffer.begin();
165
166 if (mem_req->second.second) {
167 return mem_req->second.first;
168 }
169 }
170 }
171
172 return nullptr;
173}
174
175void
176GlobalMemPipeline::completeRequest(GPUDynInstPtr gpuDynInst)
177{
178 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
179 assert(inflightLoads > 0);
180 --inflightLoads;
181 } else if (gpuDynInst->isStore()) {
182 assert(inflightStores > 0);
183 --inflightStores;
184 }
185
186 if (outOfOrderDataDelivery) {
187 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
188 assert(!gmReturnedLoads.empty());
189 gmReturnedLoads.pop();
190 } else if (gpuDynInst->isStore()) {
191 assert(!gmReturnedStores.empty());
192 gmReturnedStores.pop();
193 }
194 } else {
195 // we should only pop the oldest requst, and it
196 // should be marked as done if we are here
197 assert(gmOrderedRespBuffer.begin()->first == gpuDynInst->seqNum());
198 assert(gmOrderedRespBuffer.begin()->second.first == gpuDynInst);
199 assert(gmOrderedRespBuffer.begin()->second.second);
200 // remove this instruction from the buffer by its
201 // unique seq ID
202 gmOrderedRespBuffer.erase(gpuDynInst->seqNum());
203 }
204}
205
206void
207GlobalMemPipeline::issueRequest(GPUDynInstPtr gpuDynInst)
208{
209 gmIssuedRequests.push(gpuDynInst);
210}
211
212void
213GlobalMemPipeline::handleResponse(GPUDynInstPtr gpuDynInst)
214{
215 if (outOfOrderDataDelivery) {
216 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
217 assert(isGMLdRespFIFOWrRdy());
218 gmReturnedLoads.push(gpuDynInst);
219 } else {
220 assert(isGMStRespFIFOWrRdy());
221 gmReturnedStores.push(gpuDynInst);
222 }
223 } else {
224 auto mem_req = gmOrderedRespBuffer.find(gpuDynInst->seqNum());
225 // if we are getting a response for this mem request,
226 // then it ought to already be in the ordered response
227 // buffer
228 assert(mem_req != gmOrderedRespBuffer.end());
229 mem_req->second.second = true;
230 }
231}
232
233void
234GlobalMemPipeline::regStats()
235{
236 loadVrfBankConflictCycles
237 .name(name() + ".load_vrf_bank_conflict_cycles")
238 .desc("total number of cycles GM data are delayed before updating "
239 "the VRF")
240 ;
241}