compute_unit.cc (11692:e772fdcd3809) compute_unit.cc (11695:0a65922d564d)
1/*
2 * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: John Kalamatianos, Anthony Gutierrez
34 */
35#include "gpu-compute/compute_unit.hh"
36
37#include <limits>
38
39#include "base/output.hh"
40#include "debug/GPUDisp.hh"
41#include "debug/GPUExec.hh"
42#include "debug/GPUFetch.hh"
43#include "debug/GPUMem.hh"
44#include "debug/GPUPort.hh"
45#include "debug/GPUPrefetch.hh"
46#include "debug/GPUSync.hh"
47#include "debug/GPUTLB.hh"
48#include "gpu-compute/dispatcher.hh"
49#include "gpu-compute/gpu_dyn_inst.hh"
50#include "gpu-compute/gpu_static_inst.hh"
51#include "gpu-compute/ndrange.hh"
52#include "gpu-compute/shader.hh"
53#include "gpu-compute/simple_pool_manager.hh"
54#include "gpu-compute/vector_register_file.hh"
55#include "gpu-compute/wavefront.hh"
56#include "mem/page_table.hh"
57#include "sim/process.hh"
58
59ComputeUnit::ComputeUnit(const Params *p) : MemObject(p), fetchStage(p),
60 scoreboardCheckStage(p), scheduleStage(p), execStage(p),
61 globalMemoryPipe(p), localMemoryPipe(p), rrNextMemID(0), rrNextALUWp(0),
62 cu_id(p->cu_id), vrf(p->vector_register_file), numSIMDs(p->num_SIMDs),
63 spBypassPipeLength(p->spbypass_pipe_length),
64 dpBypassPipeLength(p->dpbypass_pipe_length),
65 issuePeriod(p->issue_period),
66 numGlbMemUnits(p->num_global_mem_pipes),
67 numLocMemUnits(p->num_shared_mem_pipes),
68 perLaneTLB(p->perLaneTLB), prefetchDepth(p->prefetch_depth),
69 prefetchStride(p->prefetch_stride), prefetchType(p->prefetch_prev_type),
70 xact_cas_mode(p->xactCasMode), debugSegFault(p->debugSegFault),
71 functionalTLB(p->functionalTLB), localMemBarrier(p->localMemBarrier),
72 countPages(p->countPages), barrier_id(0),
73 vrfToCoalescerBusWidth(p->vrf_to_coalescer_bus_width),
74 coalescerToVrfBusWidth(p->coalescer_to_vrf_bus_width),
75 req_tick_latency(p->mem_req_latency * p->clk_domain->clockPeriod()),
76 resp_tick_latency(p->mem_resp_latency * p->clk_domain->clockPeriod()),
77 _masterId(p->system->getMasterId(name() + ".ComputeUnit")),
78 lds(*p->localDataStore), globalSeqNum(0), wavefrontSize(p->wfSize),
79 kernelLaunchInst(new KernelLaunchStaticInst())
80{
81 /**
82 * This check is necessary because std::bitset only provides conversion
83 * to unsigned long or unsigned long long via to_ulong() or to_ullong().
84 * there are * a few places in the code where to_ullong() is used, however
85 * if VSZ is larger than a value the host can support then bitset will
86 * throw a runtime exception. we should remove all use of to_long() or
87 * to_ullong() so we can have VSZ greater than 64b, however until that is
88 * done this assert is required.
89 */
90 fatal_if(p->wfSize > std::numeric_limits<unsigned long long>::digits ||
91 p->wfSize <= 0,
92 "WF size is larger than the host can support");
93 fatal_if(!isPowerOf2(wavefrontSize),
94 "Wavefront size should be a power of 2");
95 // calculate how many cycles a vector load or store will need to transfer
96 // its data over the corresponding buses
97 numCyclesPerStoreTransfer =
98 (uint32_t)ceil((double)(wfSize() * sizeof(uint32_t)) /
99 (double)vrfToCoalescerBusWidth);
100
101 numCyclesPerLoadTransfer = (wfSize() * sizeof(uint32_t))
102 / coalescerToVrfBusWidth;
103
104 lastVaddrWF.resize(numSIMDs);
105 wfList.resize(numSIMDs);
106
107 for (int j = 0; j < numSIMDs; ++j) {
108 lastVaddrWF[j].resize(p->n_wf);
109
110 for (int i = 0; i < p->n_wf; ++i) {
111 lastVaddrWF[j][i].resize(wfSize());
112
113 wfList[j].push_back(p->wavefronts[j * p->n_wf + i]);
114 wfList[j][i]->setParent(this);
115
116 for (int k = 0; k < wfSize(); ++k) {
117 lastVaddrWF[j][i][k] = 0;
118 }
119 }
120 }
121
122 lastVaddrSimd.resize(numSIMDs);
123
124 for (int i = 0; i < numSIMDs; ++i) {
125 lastVaddrSimd[i].resize(wfSize(), 0);
126 }
127
128 lastVaddrCU.resize(wfSize());
129
130 lds.setParent(this);
131
132 if (p->execPolicy == "OLDEST-FIRST") {
133 exec_policy = EXEC_POLICY::OLDEST;
134 } else if (p->execPolicy == "ROUND-ROBIN") {
135 exec_policy = EXEC_POLICY::RR;
136 } else {
137 fatal("Invalid WF execution policy (CU)\n");
138 }
139
140 memPort.resize(wfSize());
141
142 // resize the tlbPort vectorArray
143 int tlbPort_width = perLaneTLB ? wfSize() : 1;
144 tlbPort.resize(tlbPort_width);
145
146 cuExitCallback = new CUExitCallback(this);
147 registerExitCallback(cuExitCallback);
148
149 xactCasLoadMap.clear();
150 lastExecCycle.resize(numSIMDs, 0);
151
152 for (int i = 0; i < vrf.size(); ++i) {
153 vrf[i]->setParent(this);
154 }
155
156 numVecRegsPerSimd = vrf[0]->numRegs();
157}
158
159ComputeUnit::~ComputeUnit()
160{
161 // Delete wavefront slots
162 for (int j = 0; j < numSIMDs; ++j) {
163 for (int i = 0; i < shader->n_wf; ++i) {
164 delete wfList[j][i];
165 }
166 lastVaddrSimd[j].clear();
167 }
168 lastVaddrCU.clear();
169 readyList.clear();
170 waveStatusList.clear();
171 dispatchList.clear();
172 vectorAluInstAvail.clear();
173 delete cuExitCallback;
174 delete ldsPort;
175}
176
177void
178ComputeUnit::fillKernelState(Wavefront *w, NDRange *ndr)
179{
180 w->resizeRegFiles(ndr->q.cRegCount, ndr->q.sRegCount, ndr->q.dRegCount);
181
182 w->workGroupSz[0] = ndr->q.wgSize[0];
183 w->workGroupSz[1] = ndr->q.wgSize[1];
184 w->workGroupSz[2] = ndr->q.wgSize[2];
185 w->wgSz = w->workGroupSz[0] * w->workGroupSz[1] * w->workGroupSz[2];
186 w->gridSz[0] = ndr->q.gdSize[0];
187 w->gridSz[1] = ndr->q.gdSize[1];
188 w->gridSz[2] = ndr->q.gdSize[2];
189 w->kernelArgs = ndr->q.args;
190 w->privSizePerItem = ndr->q.privMemPerItem;
191 w->spillSizePerItem = ndr->q.spillMemPerItem;
192 w->roBase = ndr->q.roMemStart;
193 w->roSize = ndr->q.roMemTotal;
194 w->computeActualWgSz(ndr);
195}
196
197void
198ComputeUnit::updateEvents() {
199
200 if (!timestampVec.empty()) {
201 uint32_t vecSize = timestampVec.size();
202 uint32_t i = 0;
203 while (i < vecSize) {
204 if (timestampVec[i] <= shader->tick_cnt) {
205 std::pair<uint32_t, uint32_t> regInfo = regIdxVec[i];
206 vrf[regInfo.first]->markReg(regInfo.second, sizeof(uint32_t),
207 statusVec[i]);
208 timestampVec.erase(timestampVec.begin() + i);
209 regIdxVec.erase(regIdxVec.begin() + i);
210 statusVec.erase(statusVec.begin() + i);
211 --vecSize;
212 --i;
213 }
214 ++i;
215 }
216 }
217
218 for (int i = 0; i< numSIMDs; ++i) {
219 vrf[i]->updateEvents();
220 }
221}
222
223
224void
225ComputeUnit::startWavefront(Wavefront *w, int waveId, LdsChunk *ldsChunk,
226 NDRange *ndr)
227{
228 static int _n_wave = 0;
229
230 VectorMask init_mask;
231 init_mask.reset();
232
233 for (int k = 0; k < wfSize(); ++k) {
234 if (k + waveId * wfSize() < w->actualWgSzTotal)
235 init_mask[k] = 1;
236 }
237
238 w->kernId = ndr->dispatchId;
239 w->wfId = waveId;
240 w->initMask = init_mask.to_ullong();
241
242 for (int k = 0; k < wfSize(); ++k) {
243 w->workItemId[0][k] = (k + waveId * wfSize()) % w->actualWgSz[0];
244 w->workItemId[1][k] = ((k + waveId * wfSize()) / w->actualWgSz[0]) %
245 w->actualWgSz[1];
246 w->workItemId[2][k] = (k + waveId * wfSize()) /
247 (w->actualWgSz[0] * w->actualWgSz[1]);
248
249 w->workItemFlatId[k] = w->workItemId[2][k] * w->actualWgSz[0] *
250 w->actualWgSz[1] + w->workItemId[1][k] * w->actualWgSz[0] +
251 w->workItemId[0][k];
252 }
253
254 w->barrierSlots = divCeil(w->actualWgSzTotal, wfSize());
255
256 w->barCnt.resize(wfSize(), 0);
257
258 w->maxBarCnt = 0;
259 w->oldBarrierCnt = 0;
260 w->barrierCnt = 0;
261
262 w->privBase = ndr->q.privMemStart;
263 ndr->q.privMemStart += ndr->q.privMemPerItem * wfSize();
264
265 w->spillBase = ndr->q.spillMemStart;
266 ndr->q.spillMemStart += ndr->q.spillMemPerItem * wfSize();
267
268 w->pushToReconvergenceStack(0, UINT32_MAX, init_mask.to_ulong());
269
270 // WG state
271 w->wgId = ndr->globalWgId;
272 w->dispatchId = ndr->dispatchId;
273 w->workGroupId[0] = w->wgId % ndr->numWg[0];
274 w->workGroupId[1] = (w->wgId / ndr->numWg[0]) % ndr->numWg[1];
275 w->workGroupId[2] = w->wgId / (ndr->numWg[0] * ndr->numWg[1]);
276
277 w->barrierId = barrier_id;
278 w->stalledAtBarrier = false;
279
280 // set the wavefront context to have a pointer to this section of the LDS
281 w->ldsChunk = ldsChunk;
282
283 int32_t refCount M5_VAR_USED =
284 lds.increaseRefCounter(w->dispatchId, w->wgId);
285 DPRINTF(GPUDisp, "CU%d: increase ref ctr wg[%d] to [%d]\n",
286 cu_id, w->wgId, refCount);
287
288 w->instructionBuffer.clear();
289
290 if (w->pendingFetch)
291 w->dropFetch = true;
292
293 // is this the last wavefront in the workgroup
294 // if set the spillWidth to be the remaining work-items
295 // so that the vector access is correct
296 if ((waveId + 1) * wfSize() >= w->actualWgSzTotal) {
297 w->spillWidth = w->actualWgSzTotal - (waveId * wfSize());
298 } else {
299 w->spillWidth = wfSize();
300 }
301
302 DPRINTF(GPUDisp, "Scheduling wfDynId/barrier_id %d/%d on CU%d: "
303 "WF[%d][%d]\n", _n_wave, barrier_id, cu_id, w->simdId, w->wfSlotId);
304
305 w->start(++_n_wave, ndr->q.code_ptr);
306}
307
308void
309ComputeUnit::StartWorkgroup(NDRange *ndr)
310{
311 // reserve the LDS capacity allocated to the work group
312 // disambiguated by the dispatch ID and workgroup ID, which should be
313 // globally unique
314 LdsChunk *ldsChunk = lds.reserveSpace(ndr->dispatchId, ndr->globalWgId,
315 ndr->q.ldsSize);
316
317 // Send L1 cache acquire
318 // isKernel + isAcquire = Kernel Begin
319 if (shader->impl_kern_boundary_sync) {
320 GPUDynInstPtr gpuDynInst =
321 std::make_shared<GPUDynInst>(this, nullptr, kernelLaunchInst,
322 getAndIncSeqNum());
323
324 gpuDynInst->useContinuation = false;
325 injectGlobalMemFence(gpuDynInst, true);
326 }
327
328 // calculate the number of 32-bit vector registers required by wavefront
329 int vregDemand = ndr->q.sRegCount + (2 * ndr->q.dRegCount);
330 int wave_id = 0;
331
332 // Assign WFs by spreading them across SIMDs, 1 WF per SIMD at a time
333 for (int m = 0; m < shader->n_wf * numSIMDs; ++m) {
334 Wavefront *w = wfList[m % numSIMDs][m / numSIMDs];
335 // Check if this wavefront slot is available:
336 // It must be stopped and not waiting
337 // for a release to complete S_RETURNING
338 if (w->status == Wavefront::S_STOPPED) {
339 fillKernelState(w, ndr);
340 // if we have scheduled all work items then stop
341 // scheduling wavefronts
342 if (wave_id * wfSize() >= w->actualWgSzTotal)
343 break;
344
345 // reserve vector registers for the scheduled wavefront
346 assert(vectorRegsReserved[m % numSIMDs] <= numVecRegsPerSimd);
347 uint32_t normSize = 0;
348
349 w->startVgprIndex = vrf[m % numSIMDs]->manager->
350 allocateRegion(vregDemand, &normSize);
351
352 w->reservedVectorRegs = normSize;
353 vectorRegsReserved[m % numSIMDs] += w->reservedVectorRegs;
354
355 startWavefront(w, wave_id, ldsChunk, ndr);
356 ++wave_id;
357 }
358 }
359 ++barrier_id;
360}
361
362int
363ComputeUnit::ReadyWorkgroup(NDRange *ndr)
364{
365 // Get true size of workgroup (after clamping to grid size)
366 int trueWgSize[3];
367 int trueWgSizeTotal = 1;
368
369 for (int d = 0; d < 3; ++d) {
370 trueWgSize[d] = std::min(ndr->q.wgSize[d], ndr->q.gdSize[d] -
371 ndr->wgId[d] * ndr->q.wgSize[d]);
372
373 trueWgSizeTotal *= trueWgSize[d];
374 DPRINTF(GPUDisp, "trueWgSize[%d] = %d\n", d, trueWgSize[d]);
375 }
376
377 DPRINTF(GPUDisp, "trueWgSizeTotal = %d\n", trueWgSizeTotal);
378
379 // calculate the number of 32-bit vector registers required by each
380 // work item of the work group
381 int vregDemandPerWI = ndr->q.sRegCount + (2 * ndr->q.dRegCount);
382 bool vregAvail = true;
383 int numWfs = (trueWgSizeTotal + wfSize() - 1) / wfSize();
384 int freeWfSlots = 0;
385 // check if the total number of VGPRs required by all WFs of the WG
386 // fit in the VRFs of all SIMD units
387 assert((numWfs * vregDemandPerWI) <= (numSIMDs * numVecRegsPerSimd));
388 int numMappedWfs = 0;
389 std::vector<int> numWfsPerSimd;
390 numWfsPerSimd.resize(numSIMDs, 0);
391 // find how many free WF slots we have across all SIMDs
392 for (int j = 0; j < shader->n_wf; ++j) {
393 for (int i = 0; i < numSIMDs; ++i) {
394 if (wfList[i][j]->status == Wavefront::S_STOPPED) {
395 // count the number of free WF slots
396 ++freeWfSlots;
397 if (numMappedWfs < numWfs) {
398 // count the WFs to be assigned per SIMD
399 numWfsPerSimd[i]++;
400 }
401 numMappedWfs++;
402 }
403 }
404 }
405
406 // if there are enough free WF slots then find if there are enough
407 // free VGPRs per SIMD based on the WF->SIMD mapping
408 if (freeWfSlots >= numWfs) {
409 for (int j = 0; j < numSIMDs; ++j) {
410 // find if there are enough free VGPR regions in the SIMD's VRF
411 // to accommodate the WFs of the new WG that would be mapped to
412 // this SIMD unit
413 vregAvail = vrf[j]->manager->canAllocate(numWfsPerSimd[j],
414 vregDemandPerWI);
415
416 // stop searching if there is at least one SIMD
417 // whose VRF does not have enough free VGPR pools.
418 // This is because a WG is scheduled only if ALL
419 // of its WFs can be scheduled
420 if (!vregAvail)
421 break;
422 }
423 }
424
425 DPRINTF(GPUDisp, "Free WF slots = %d, VGPR Availability = %d\n",
426 freeWfSlots, vregAvail);
427
428 if (!vregAvail) {
429 ++numTimesWgBlockedDueVgprAlloc;
430 }
431
432 // Return true if enough WF slots to submit workgroup and if there are
433 // enough VGPRs to schedule all WFs to their SIMD units
434 if (!lds.canReserve(ndr->q.ldsSize)) {
435 wgBlockedDueLdsAllocation++;
436 }
437
438 // Return true if (a) there are enough free WF slots to submit
439 // workgrounp and (b) if there are enough VGPRs to schedule all WFs to their
440 // SIMD units and (c) if there is enough space in LDS
441 return freeWfSlots >= numWfs && vregAvail && lds.canReserve(ndr->q.ldsSize);
442}
443
444int
445ComputeUnit::AllAtBarrier(uint32_t _barrier_id, uint32_t bcnt, uint32_t bslots)
446{
447 DPRINTF(GPUSync, "CU%d: Checking for All At Barrier\n", cu_id);
448 int ccnt = 0;
449
450 for (int i_simd = 0; i_simd < numSIMDs; ++i_simd) {
451 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf) {
452 Wavefront *w = wfList[i_simd][i_wf];
453
454 if (w->status == Wavefront::S_RUNNING) {
455 DPRINTF(GPUSync, "Checking WF[%d][%d]\n", i_simd, i_wf);
456
457 DPRINTF(GPUSync, "wf->barrier_id = %d, _barrier_id = %d\n",
458 w->barrierId, _barrier_id);
459
460 DPRINTF(GPUSync, "wf->barrier_cnt %d, bcnt = %d\n",
461 w->barrierCnt, bcnt);
462 }
463
464 if (w->status == Wavefront::S_RUNNING &&
465 w->barrierId == _barrier_id && w->barrierCnt == bcnt &&
466 !w->outstandingReqs) {
467 ++ccnt;
468
469 DPRINTF(GPUSync, "WF[%d][%d] at barrier, increment ccnt to "
470 "%d\n", i_simd, i_wf, ccnt);
471 }
472 }
473 }
474
475 DPRINTF(GPUSync, "CU%d: returning allAtBarrier ccnt = %d, bslots = %d\n",
476 cu_id, ccnt, bslots);
477
478 return ccnt == bslots;
479}
480
481// Check if the current wavefront is blocked on additional resources.
482bool
483ComputeUnit::cedeSIMD(int simdId, int wfSlotId)
484{
485 bool cede = false;
486
487 // If --xact-cas-mode option is enabled in run.py, then xact_cas_ld
488 // magic instructions will impact the scheduling of wavefronts
489 if (xact_cas_mode) {
490 /*
491 * When a wavefront calls xact_cas_ld, it adds itself to a per address
492 * queue. All per address queues are managed by the xactCasLoadMap.
493 *
494 * A wavefront is not blocked if: it is not in ANY per address queue or
495 * if it is at the head of a per address queue.
496 */
497 for (auto itMap : xactCasLoadMap) {
498 std::list<waveIdentifier> curWaveIDQueue = itMap.second.waveIDQueue;
499
500 if (!curWaveIDQueue.empty()) {
501 for (auto it : curWaveIDQueue) {
502 waveIdentifier cur_wave = it;
503
504 if (cur_wave.simdId == simdId &&
505 cur_wave.wfSlotId == wfSlotId) {
506 // 2 possibilities
507 // 1: this WF has a green light
508 // 2: another WF has a green light
509 waveIdentifier owner_wave = curWaveIDQueue.front();
510
511 if (owner_wave.simdId != cur_wave.simdId ||
512 owner_wave.wfSlotId != cur_wave.wfSlotId) {
513 // possibility 2
514 cede = true;
515 break;
516 } else {
517 // possibility 1
518 break;
519 }
520 }
521 }
522 }
523 }
524 }
525
526 return cede;
527}
528
529// Execute one clock worth of work on the ComputeUnit.
530void
531ComputeUnit::exec()
532{
533 updateEvents();
534 // Execute pipeline stages in reverse order to simulate
535 // the pipeline latency
536 globalMemoryPipe.exec();
537 localMemoryPipe.exec();
538 execStage.exec();
539 scheduleStage.exec();
540 scoreboardCheckStage.exec();
541 fetchStage.exec();
542
543 totalCycles++;
544}
545
546void
547ComputeUnit::init()
548{
549 // Initialize CU Bus models
550 glbMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1));
551 locMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1));
552 nextGlbMemBus = 0;
553 nextLocMemBus = 0;
554 fatal_if(numGlbMemUnits > 1,
555 "No support for multiple Global Memory Pipelines exists!!!");
556 vrfToGlobalMemPipeBus.resize(numGlbMemUnits);
557 for (int j = 0; j < numGlbMemUnits; ++j) {
558 vrfToGlobalMemPipeBus[j] = WaitClass();
559 vrfToGlobalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1));
560 }
561
562 fatal_if(numLocMemUnits > 1,
563 "No support for multiple Local Memory Pipelines exists!!!");
564 vrfToLocalMemPipeBus.resize(numLocMemUnits);
565 for (int j = 0; j < numLocMemUnits; ++j) {
566 vrfToLocalMemPipeBus[j] = WaitClass();
567 vrfToLocalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1));
568 }
569 vectorRegsReserved.resize(numSIMDs, 0);
570 aluPipe.resize(numSIMDs);
571 wfWait.resize(numSIMDs + numLocMemUnits + numGlbMemUnits);
572
573 for (int i = 0; i < numSIMDs + numLocMemUnits + numGlbMemUnits; ++i) {
574 wfWait[i] = WaitClass();
575 wfWait[i].init(&shader->tick_cnt, shader->ticks(1));
576 }
577
578 for (int i = 0; i < numSIMDs; ++i) {
579 aluPipe[i] = WaitClass();
580 aluPipe[i].init(&shader->tick_cnt, shader->ticks(1));
581 }
582
583 // Setup space for call args
584 for (int j = 0; j < numSIMDs; ++j) {
585 for (int i = 0; i < shader->n_wf; ++i) {
586 wfList[j][i]->initCallArgMem(shader->funcargs_size, wavefrontSize);
587 }
588 }
589
590 // Initializing pipeline resources
591 readyList.resize(numSIMDs + numGlbMemUnits + numLocMemUnits);
592 waveStatusList.resize(numSIMDs);
593
594 for (int j = 0; j < numSIMDs; ++j) {
595 for (int i = 0; i < shader->n_wf; ++i) {
596 waveStatusList[j].push_back(
597 std::make_pair(wfList[j][i], BLOCKED));
598 }
599 }
600
601 for (int j = 0; j < (numSIMDs + numGlbMemUnits + numLocMemUnits); ++j) {
602 dispatchList.push_back(std::make_pair((Wavefront*)nullptr, EMPTY));
603 }
604
605 fetchStage.init(this);
606 scoreboardCheckStage.init(this);
607 scheduleStage.init(this);
608 execStage.init(this);
609 globalMemoryPipe.init(this);
610 localMemoryPipe.init(this);
611 // initialize state for statistics calculation
612 vectorAluInstAvail.resize(numSIMDs, false);
613 shrMemInstAvail = 0;
614 glbMemInstAvail = 0;
615}
616
617bool
618ComputeUnit::DataPort::recvTimingResp(PacketPtr pkt)
619{
620 // Ruby has completed the memory op. Schedule the mem_resp_event at the
621 // appropriate cycle to process the timing memory response
622 // This delay represents the pipeline delay
623 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState);
624 int index = sender_state->port_index;
625 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
626
627 // Is the packet returned a Kernel End or Barrier
628 if (pkt->req->isKernel() && pkt->req->isRelease()) {
629 Wavefront *w =
630 computeUnit->wfList[gpuDynInst->simdId][gpuDynInst->wfSlotId];
631
632 // Check if we are waiting on Kernel End Release
633 if (w->status == Wavefront::S_RETURNING) {
634 DPRINTF(GPUDisp, "CU%d: WF[%d][%d][wv=%d]: WG id completed %d\n",
635 computeUnit->cu_id, w->simdId, w->wfSlotId,
636 w->wfDynId, w->kernId);
637
638 computeUnit->shader->dispatcher->notifyWgCompl(w);
639 w->status = Wavefront::S_STOPPED;
640 } else {
641 w->outstandingReqs--;
642 }
643
644 DPRINTF(GPUSync, "CU%d: WF[%d][%d]: barrier_cnt = %d\n",
645 computeUnit->cu_id, gpuDynInst->simdId,
646 gpuDynInst->wfSlotId, w->barrierCnt);
647
648 if (gpuDynInst->useContinuation) {
649 assert(!gpuDynInst->isNoScope());
650 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(),
651 gpuDynInst);
652 }
653
654 delete pkt->senderState;
655 delete pkt->req;
656 delete pkt;
657 return true;
658 } else if (pkt->req->isKernel() && pkt->req->isAcquire()) {
659 if (gpuDynInst->useContinuation) {
660 assert(!gpuDynInst->isNoScope());
661 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(),
662 gpuDynInst);
663 }
664
665 delete pkt->senderState;
666 delete pkt->req;
667 delete pkt;
668 return true;
669 }
670
671 ComputeUnit::DataPort::MemRespEvent *mem_resp_event =
672 new ComputeUnit::DataPort::MemRespEvent(computeUnit->memPort[index],
673 pkt);
674
675 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x received!\n",
676 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
677 index, pkt->req->getPaddr());
678
679 computeUnit->schedule(mem_resp_event,
680 curTick() + computeUnit->resp_tick_latency);
681 return true;
682}
683
684void
685ComputeUnit::DataPort::recvReqRetry()
686{
687 int len = retries.size();
688
689 assert(len > 0);
690
691 for (int i = 0; i < len; ++i) {
692 PacketPtr pkt = retries.front().first;
693 GPUDynInstPtr gpuDynInst M5_VAR_USED = retries.front().second;
694 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: retry mem inst addr %#x\n",
695 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
696 pkt->req->getPaddr());
697
698 /** Currently Ruby can return false due to conflicts for the particular
699 * cache block or address. Thus other requests should be allowed to
700 * pass and the data port should expect multiple retries. */
701 if (!sendTimingReq(pkt)) {
702 DPRINTF(GPUMem, "failed again!\n");
703 break;
704 } else {
705 DPRINTF(GPUMem, "successful!\n");
706 retries.pop_front();
707 }
708 }
709}
710
711bool
712ComputeUnit::SQCPort::recvTimingResp(PacketPtr pkt)
713{
714 computeUnit->fetchStage.processFetchReturn(pkt);
715
716 return true;
717}
718
719void
720ComputeUnit::SQCPort::recvReqRetry()
721{
722 int len = retries.size();
723
724 assert(len > 0);
725
726 for (int i = 0; i < len; ++i) {
727 PacketPtr pkt = retries.front().first;
728 Wavefront *wavefront M5_VAR_USED = retries.front().second;
729 DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: retrying FETCH addr %#x\n",
730 computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId,
731 pkt->req->getPaddr());
732 if (!sendTimingReq(pkt)) {
733 DPRINTF(GPUFetch, "failed again!\n");
734 break;
735 } else {
736 DPRINTF(GPUFetch, "successful!\n");
737 retries.pop_front();
738 }
739 }
740}
741
742void
743ComputeUnit::sendRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt)
744{
745 // There must be a way around this check to do the globalMemStart...
746 Addr tmp_vaddr = pkt->req->getVaddr();
747
748 updatePageDivergenceDist(tmp_vaddr);
749
750 pkt->req->setVirt(pkt->req->getAsid(), tmp_vaddr, pkt->req->getSize(),
751 pkt->req->getFlags(), pkt->req->masterId(),
752 pkt->req->getPC());
753
754 // figure out the type of the request to set read/write
755 BaseTLB::Mode TLB_mode;
756 assert(pkt->isRead() || pkt->isWrite());
757
758 // Check write before read for atomic operations
759 // since atomic operations should use BaseTLB::Write
760 if (pkt->isWrite()){
761 TLB_mode = BaseTLB::Write;
762 } else if (pkt->isRead()) {
763 TLB_mode = BaseTLB::Read;
764 } else {
765 fatal("pkt is not a read nor a write\n");
766 }
767
768 tlbCycles -= curTick();
769 ++tlbRequests;
770
771 int tlbPort_index = perLaneTLB ? index : 0;
772
773 if (shader->timingSim) {
774 if (debugSegFault) {
775 Process *p = shader->gpuTc->getProcessPtr();
776 Addr vaddr = pkt->req->getVaddr();
777 unsigned size = pkt->getSize();
778
779 if ((vaddr + size - 1) % 64 < vaddr % 64) {
780 panic("CU%d: WF[%d][%d]: Access to addr %#x is unaligned!\n",
781 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, vaddr);
782 }
783
784 Addr paddr;
785
786 if (!p->pTable->translate(vaddr, paddr)) {
787 if (!p->fixupStackFault(vaddr)) {
788 panic("CU%d: WF[%d][%d]: Fault on addr %#x!\n",
789 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
790 vaddr);
791 }
792 }
793 }
794
795 // This is the SenderState needed upon return
796 pkt->senderState = new DTLBPort::SenderState(gpuDynInst, index);
797
798 // This is the senderState needed by the TLB hierarchy to function
799 TheISA::GpuTLB::TranslationState *translation_state =
800 new TheISA::GpuTLB::TranslationState(TLB_mode, shader->gpuTc, false,
801 pkt->senderState);
802
803 pkt->senderState = translation_state;
804
805 if (functionalTLB) {
806 tlbPort[tlbPort_index]->sendFunctional(pkt);
807
808 // update the hitLevel distribution
809 int hit_level = translation_state->hitLevel;
810 assert(hit_level != -1);
811 hitsPerTLBLevel[hit_level]++;
812
813 // New SenderState for the memory access
814 X86ISA::GpuTLB::TranslationState *sender_state =
815 safe_cast<X86ISA::GpuTLB::TranslationState*>(pkt->senderState);
816
817 delete sender_state->tlbEntry;
818 delete sender_state->saved;
819 delete sender_state;
820
821 assert(pkt->req->hasPaddr());
822 assert(pkt->req->hasSize());
823
824 uint8_t *tmpData = pkt->getPtr<uint8_t>();
825
826 // this is necessary because the GPU TLB receives packets instead
827 // of requests. when the translation is complete, all relevent
828 // fields in the request will be populated, but not in the packet.
829 // here we create the new packet so we can set the size, addr,
830 // and proper flags.
831 PacketPtr oldPkt = pkt;
832 pkt = new Packet(oldPkt->req, oldPkt->cmd);
833 delete oldPkt;
834 pkt->dataStatic(tmpData);
835
836
837 // New SenderState for the memory access
838 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst,
839 index, nullptr);
840
841 gpuDynInst->memStatusVector[pkt->getAddr()].push_back(index);
842 gpuDynInst->tlbHitLevel[index] = hit_level;
843
844
845 // translation is done. Schedule the mem_req_event at the
846 // appropriate cycle to send the timing memory request to ruby
847 ComputeUnit::DataPort::MemReqEvent *mem_req_event =
848 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt);
849
850 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data "
851 "scheduled\n", cu_id, gpuDynInst->simdId,
852 gpuDynInst->wfSlotId, index, pkt->req->getPaddr());
853
854 schedule(mem_req_event, curTick() + req_tick_latency);
855 } else if (tlbPort[tlbPort_index]->isStalled()) {
856 assert(tlbPort[tlbPort_index]->retries.size() > 0);
857
858 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x "
859 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
860 tmp_vaddr);
861
862 tlbPort[tlbPort_index]->retries.push_back(pkt);
863 } else if (!tlbPort[tlbPort_index]->sendTimingReq(pkt)) {
864 // Stall the data port;
865 // No more packet will be issued till
866 // ruby indicates resources are freed by
867 // a recvReqRetry() call back on this port.
868 tlbPort[tlbPort_index]->stallPort();
869
870 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x "
871 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
872 tmp_vaddr);
873
874 tlbPort[tlbPort_index]->retries.push_back(pkt);
875 } else {
876 DPRINTF(GPUTLB,
877 "CU%d: WF[%d][%d]: Translation for addr %#x sent!\n",
878 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, tmp_vaddr);
879 }
880 } else {
881 if (pkt->cmd == MemCmd::MemFenceReq) {
882 gpuDynInst->statusBitVector = VectorMask(0);
883 } else {
884 gpuDynInst->statusBitVector &= (~(1ll << index));
885 }
886
887 // New SenderState for the memory access
888 delete pkt->senderState;
889
890 // Because it's atomic operation, only need TLB translation state
891 pkt->senderState = new TheISA::GpuTLB::TranslationState(TLB_mode,
892 shader->gpuTc);
893
894 tlbPort[tlbPort_index]->sendFunctional(pkt);
895
896 // the addr of the packet is not modified, so we need to create a new
897 // packet, or otherwise the memory access will have the old virtual
898 // address sent in the translation packet, instead of the physical
899 // address returned by the translation.
900 PacketPtr new_pkt = new Packet(pkt->req, pkt->cmd);
901 new_pkt->dataStatic(pkt->getPtr<uint8_t>());
902
903 // Translation is done. It is safe to send the packet to memory.
904 memPort[0]->sendFunctional(new_pkt);
905
906 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: index %d: addr %#x\n", cu_id,
907 gpuDynInst->simdId, gpuDynInst->wfSlotId, index,
908 new_pkt->req->getPaddr());
909
910 // safe_cast the senderState
911 TheISA::GpuTLB::TranslationState *sender_state =
912 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState);
913
914 delete sender_state->tlbEntry;
915 delete new_pkt;
916 delete pkt->senderState;
917 delete pkt->req;
918 delete pkt;
919 }
920}
921
922void
923ComputeUnit::sendSyncRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt)
924{
925 ComputeUnit::DataPort::MemReqEvent *mem_req_event =
926 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt);
927
928
929 // New SenderState for the memory access
930 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, index,
931 nullptr);
932
933 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x sync scheduled\n",
934 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, index,
935 pkt->req->getPaddr());
936
937 schedule(mem_req_event, curTick() + req_tick_latency);
938}
939
940void
941ComputeUnit::injectGlobalMemFence(GPUDynInstPtr gpuDynInst, bool kernelLaunch,
942 Request* req)
943{
944 assert(gpuDynInst->isGlobalSeg());
945
946 if (!req) {
947 req = new Request(0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId);
948 }
949 req->setPaddr(0);
950 if (kernelLaunch) {
951 req->setFlags(Request::KERNEL);
952 }
953
954 // for non-kernel MemFence operations, memorder flags are set depending
955 // on which type of request is currently being sent, so this
956 // should be set by the caller (e.g. if an inst has acq-rel
957 // semantics, it will send one acquire req an one release req)
958 gpuDynInst->setRequestFlags(req, kernelLaunch);
959
960 // a mem fence must correspond to an acquire/release request
961 assert(req->isAcquire() || req->isRelease());
962
963 // create packet
964 PacketPtr pkt = new Packet(req, MemCmd::MemFenceReq);
965
966 // set packet's sender state
967 pkt->senderState =
968 new ComputeUnit::DataPort::SenderState(gpuDynInst, 0, nullptr);
969
970 // send the packet
971 sendSyncRequest(gpuDynInst, 0, pkt);
972}
973
974const char*
975ComputeUnit::DataPort::MemRespEvent::description() const
976{
977 return "ComputeUnit memory response event";
978}
979
980void
981ComputeUnit::DataPort::MemRespEvent::process()
982{
983 DataPort::SenderState *sender_state =
984 safe_cast<DataPort::SenderState*>(pkt->senderState);
985
986 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
987 ComputeUnit *compute_unit = dataPort->computeUnit;
988
989 assert(gpuDynInst);
990
991 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: Response for addr %#x, index %d\n",
992 compute_unit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
993 pkt->req->getPaddr(), dataPort->index);
994
995 Addr paddr = pkt->req->getPaddr();
996
997 if (pkt->cmd != MemCmd::MemFenceResp) {
998 int index = gpuDynInst->memStatusVector[paddr].back();
999
1000 DPRINTF(GPUMem, "Response for addr %#x, index %d\n",
1001 pkt->req->getPaddr(), index);
1002
1003 gpuDynInst->memStatusVector[paddr].pop_back();
1004 gpuDynInst->pAddr = pkt->req->getPaddr();
1005
1006 if (pkt->isRead() || pkt->isWrite()) {
1007
1008 if (gpuDynInst->n_reg <= MAX_REGS_FOR_NON_VEC_MEM_INST) {
1009 gpuDynInst->statusBitVector &= (~(1ULL << index));
1010 } else {
1011 assert(gpuDynInst->statusVector[index] > 0);
1012 gpuDynInst->statusVector[index]--;
1013
1014 if (!gpuDynInst->statusVector[index])
1015 gpuDynInst->statusBitVector &= (~(1ULL << index));
1016 }
1017
1018 DPRINTF(GPUMem, "bitvector is now %#x\n",
1019 gpuDynInst->statusBitVector);
1020
1021 if (gpuDynInst->statusBitVector == VectorMask(0)) {
1022 auto iter = gpuDynInst->memStatusVector.begin();
1023 auto end = gpuDynInst->memStatusVector.end();
1024
1025 while (iter != end) {
1026 assert(iter->second.empty());
1027 ++iter;
1028 }
1029
1030 gpuDynInst->memStatusVector.clear();
1031
1032 if (gpuDynInst->n_reg > MAX_REGS_FOR_NON_VEC_MEM_INST)
1033 gpuDynInst->statusVector.clear();
1034
1035 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
1036 assert(compute_unit->globalMemoryPipe.isGMLdRespFIFOWrRdy());
1037
1038 compute_unit->globalMemoryPipe.getGMLdRespFIFO()
1039 .push(gpuDynInst);
1040 } else {
1041 assert(compute_unit->globalMemoryPipe.isGMStRespFIFOWrRdy());
1042
1043 compute_unit->globalMemoryPipe.getGMStRespFIFO()
1044 .push(gpuDynInst);
1045 }
1046
1047 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: packet totally complete\n",
1048 compute_unit->cu_id, gpuDynInst->simdId,
1049 gpuDynInst->wfSlotId);
1050
1051 // after clearing the status vectors,
1052 // see if there is a continuation to perform
1053 // the continuation may generate more work for
1054 // this memory request
1055 if (gpuDynInst->useContinuation) {
1056 assert(!gpuDynInst->isNoScope());
1057 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(),
1058 gpuDynInst);
1059 }
1060 }
1061 }
1062 } else {
1063 gpuDynInst->statusBitVector = VectorMask(0);
1064
1065 if (gpuDynInst->useContinuation) {
1066 assert(!gpuDynInst->isNoScope());
1067 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(),
1068 gpuDynInst);
1069 }
1070 }
1071
1072 delete pkt->senderState;
1073 delete pkt->req;
1074 delete pkt;
1075}
1076
1077ComputeUnit*
1078ComputeUnitParams::create()
1079{
1080 return new ComputeUnit(this);
1081}
1082
1083bool
1084ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt)
1085{
1086 Addr line = pkt->req->getPaddr();
1087
1088 DPRINTF(GPUTLB, "CU%d: DTLBPort received %#x->%#x\n", computeUnit->cu_id,
1089 pkt->req->getVaddr(), line);
1090
1091 assert(pkt->senderState);
1092 computeUnit->tlbCycles += curTick();
1093
1094 // pop off the TLB translation state
1095 TheISA::GpuTLB::TranslationState *translation_state =
1096 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState);
1097
1098 // no PageFaults are permitted for data accesses
1099 if (!translation_state->tlbEntry->valid) {
1100 DTLBPort::SenderState *sender_state =
1101 safe_cast<DTLBPort::SenderState*>(translation_state->saved);
1102
1103 Wavefront *w M5_VAR_USED =
1104 computeUnit->wfList[sender_state->_gpuDynInst->simdId]
1105 [sender_state->_gpuDynInst->wfSlotId];
1106
1107 DPRINTFN("Wave %d couldn't tranlate vaddr %#x\n", w->wfDynId,
1108 pkt->req->getVaddr());
1109 }
1110
1111 assert(translation_state->tlbEntry->valid);
1112
1113 // update the hitLevel distribution
1114 int hit_level = translation_state->hitLevel;
1115 computeUnit->hitsPerTLBLevel[hit_level]++;
1116
1117 delete translation_state->tlbEntry;
1118 assert(!translation_state->ports.size());
1119 pkt->senderState = translation_state->saved;
1120
1121 // for prefetch pkt
1122 BaseTLB::Mode TLB_mode = translation_state->tlbMode;
1123
1124 delete translation_state;
1125
1126 // use the original sender state to know how to close this transaction
1127 DTLBPort::SenderState *sender_state =
1128 safe_cast<DTLBPort::SenderState*>(pkt->senderState);
1129
1130 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
1131 int mp_index = sender_state->portIndex;
1132 Addr vaddr = pkt->req->getVaddr();
1133 gpuDynInst->memStatusVector[line].push_back(mp_index);
1134 gpuDynInst->tlbHitLevel[mp_index] = hit_level;
1135
1136 MemCmd requestCmd;
1137
1138 if (pkt->cmd == MemCmd::ReadResp) {
1139 requestCmd = MemCmd::ReadReq;
1140 } else if (pkt->cmd == MemCmd::WriteResp) {
1141 requestCmd = MemCmd::WriteReq;
1142 } else if (pkt->cmd == MemCmd::SwapResp) {
1143 requestCmd = MemCmd::SwapReq;
1144 } else {
1145 panic("unsupported response to request conversion %s\n",
1146 pkt->cmd.toString());
1147 }
1148
1149 if (computeUnit->prefetchDepth) {
1150 int simdId = gpuDynInst->simdId;
1151 int wfSlotId = gpuDynInst->wfSlotId;
1152 Addr last = 0;
1153
1154 switch(computeUnit->prefetchType) {
1155 case Enums::PF_CU:
1156 last = computeUnit->lastVaddrCU[mp_index];
1157 break;
1158 case Enums::PF_PHASE:
1159 last = computeUnit->lastVaddrSimd[simdId][mp_index];
1160 break;
1161 case Enums::PF_WF:
1162 last = computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index];
1163 default:
1164 break;
1165 }
1166
1167 DPRINTF(GPUPrefetch, "CU[%d][%d][%d][%d]: %#x was last\n",
1168 computeUnit->cu_id, simdId, wfSlotId, mp_index, last);
1169
1170 int stride = last ? (roundDown(vaddr, TheISA::PageBytes) -
1171 roundDown(last, TheISA::PageBytes)) >> TheISA::PageShift
1172 : 0;
1173
1174 DPRINTF(GPUPrefetch, "Stride is %d\n", stride);
1175
1176 computeUnit->lastVaddrCU[mp_index] = vaddr;
1177 computeUnit->lastVaddrSimd[simdId][mp_index] = vaddr;
1178 computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index] = vaddr;
1179
1180 stride = (computeUnit->prefetchType == Enums::PF_STRIDE) ?
1181 computeUnit->prefetchStride: stride;
1182
1183 DPRINTF(GPUPrefetch, "%#x to: CU[%d][%d][%d][%d]\n", vaddr,
1184 computeUnit->cu_id, simdId, wfSlotId, mp_index);
1185
1186 DPRINTF(GPUPrefetch, "Prefetching from %#x:", vaddr);
1187
1188 // Prefetch Next few pages atomically
1189 for (int pf = 1; pf <= computeUnit->prefetchDepth; ++pf) {
1190 DPRINTF(GPUPrefetch, "%d * %d: %#x\n", pf, stride,
1191 vaddr+stride*pf*TheISA::PageBytes);
1192
1193 if (!stride)
1194 break;
1195
1196 Request *prefetch_req = new Request(0, vaddr + stride * pf *
1197 TheISA::PageBytes,
1198 sizeof(uint8_t), 0,
1199 computeUnit->masterId(),
1200 0, 0, 0);
1201
1202 PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd);
1203 uint8_t foo = 0;
1204 prefetch_pkt->dataStatic(&foo);
1205
1206 // Because it's atomic operation, only need TLB translation state
1207 prefetch_pkt->senderState =
1208 new TheISA::GpuTLB::TranslationState(TLB_mode,
1209 computeUnit->shader->gpuTc,
1210 true);
1211
1212 // Currently prefetches are zero-latency, hence the sendFunctional
1213 sendFunctional(prefetch_pkt);
1214
1215 /* safe_cast the senderState */
1216 TheISA::GpuTLB::TranslationState *tlb_state =
1217 safe_cast<TheISA::GpuTLB::TranslationState*>(
1218 prefetch_pkt->senderState);
1219
1220
1221 delete tlb_state->tlbEntry;
1222 delete tlb_state;
1223 delete prefetch_pkt->req;
1224 delete prefetch_pkt;
1225 }
1226 }
1227
1228 // First we must convert the response cmd back to a request cmd so that
1229 // the request can be sent through the cu's master port
1230 PacketPtr new_pkt = new Packet(pkt->req, requestCmd);
1231 new_pkt->dataStatic(pkt->getPtr<uint8_t>());
1232 delete pkt->senderState;
1233 delete pkt;
1234
1235 // New SenderState for the memory access
1236 new_pkt->senderState =
1237 new ComputeUnit::DataPort::SenderState(gpuDynInst, mp_index,
1238 nullptr);
1239
1240 // translation is done. Schedule the mem_req_event at the appropriate
1241 // cycle to send the timing memory request to ruby
1242 ComputeUnit::DataPort::MemReqEvent *mem_req_event =
1243 new ComputeUnit::DataPort::MemReqEvent(computeUnit->memPort[mp_index],
1244 new_pkt);
1245
1246 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data scheduled\n",
1247 computeUnit->cu_id, gpuDynInst->simdId,
1248 gpuDynInst->wfSlotId, mp_index, new_pkt->req->getPaddr());
1249
1250 computeUnit->schedule(mem_req_event, curTick() +
1251 computeUnit->req_tick_latency);
1252
1253 return true;
1254}
1255
1256const char*
1257ComputeUnit::DataPort::MemReqEvent::description() const
1258{
1259 return "ComputeUnit memory request event";
1260}
1261
1262void
1263ComputeUnit::DataPort::MemReqEvent::process()
1264{
1265 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState);
1266 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
1267 ComputeUnit *compute_unit M5_VAR_USED = dataPort->computeUnit;
1268
1269 if (!(dataPort->sendTimingReq(pkt))) {
1270 dataPort->retries.push_back(std::make_pair(pkt, gpuDynInst));
1271
1272 DPRINTF(GPUPort,
1273 "CU%d: WF[%d][%d]: index %d, addr %#x data req failed!\n",
1274 compute_unit->cu_id, gpuDynInst->simdId,
1275 gpuDynInst->wfSlotId, dataPort->index,
1276 pkt->req->getPaddr());
1277 } else {
1278 DPRINTF(GPUPort,
1279 "CU%d: WF[%d][%d]: index %d, addr %#x data req sent!\n",
1280 compute_unit->cu_id, gpuDynInst->simdId,
1281 gpuDynInst->wfSlotId, dataPort->index,
1282 pkt->req->getPaddr());
1283 }
1284}
1285
1286/*
1287 * The initial translation request could have been rejected,
1288 * if <retries> queue is not Retry sending the translation
1289 * request. sendRetry() is called from the peer port whenever
1290 * a translation completes.
1291 */
1292void
1293ComputeUnit::DTLBPort::recvReqRetry()
1294{
1295 int len = retries.size();
1296
1297 DPRINTF(GPUTLB, "CU%d: DTLB recvReqRetry - %d pending requests\n",
1298 computeUnit->cu_id, len);
1299
1300 assert(len > 0);
1301 assert(isStalled());
1302 // recvReqRetry is an indication that the resource on which this
1303 // port was stalling on is freed. So, remove the stall first
1304 unstallPort();
1305
1306 for (int i = 0; i < len; ++i) {
1307 PacketPtr pkt = retries.front();
1308 Addr vaddr M5_VAR_USED = pkt->req->getVaddr();
1309 DPRINTF(GPUTLB, "CU%d: retrying D-translaton for address%#x", vaddr);
1310
1311 if (!sendTimingReq(pkt)) {
1312 // Stall port
1313 stallPort();
1314 DPRINTF(GPUTLB, ": failed again\n");
1315 break;
1316 } else {
1317 DPRINTF(GPUTLB, ": successful\n");
1318 retries.pop_front();
1319 }
1320 }
1321}
1322
1323bool
1324ComputeUnit::ITLBPort::recvTimingResp(PacketPtr pkt)
1325{
1326 Addr line M5_VAR_USED = pkt->req->getPaddr();
1327 DPRINTF(GPUTLB, "CU%d: ITLBPort received %#x->%#x\n",
1328 computeUnit->cu_id, pkt->req->getVaddr(), line);
1329
1330 assert(pkt->senderState);
1331
1332 // pop off the TLB translation state
1333 TheISA::GpuTLB::TranslationState *translation_state =
1334 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState);
1335
1336 bool success = translation_state->tlbEntry->valid;
1337 delete translation_state->tlbEntry;
1338 assert(!translation_state->ports.size());
1339 pkt->senderState = translation_state->saved;
1340 delete translation_state;
1341
1342 // use the original sender state to know how to close this transaction
1343 ITLBPort::SenderState *sender_state =
1344 safe_cast<ITLBPort::SenderState*>(pkt->senderState);
1345
1346 // get the wavefront associated with this translation request
1347 Wavefront *wavefront = sender_state->wavefront;
1348 delete pkt->senderState;
1349
1350 if (success) {
1351 // pkt is reused in fetch(), don't delete it here. However, we must
1352 // reset the command to be a request so that it can be sent through
1353 // the cu's master port
1354 assert(pkt->cmd == MemCmd::ReadResp);
1355 pkt->cmd = MemCmd::ReadReq;
1356
1357 computeUnit->fetchStage.fetch(pkt, wavefront);
1358 } else {
1359 if (wavefront->dropFetch) {
1360 assert(wavefront->instructionBuffer.empty());
1361 wavefront->dropFetch = false;
1362 }
1363
1364 wavefront->pendingFetch = 0;
1365 }
1366
1367 return true;
1368}
1369
1370/*
1371 * The initial translation request could have been rejected, if
1372 * <retries> queue is not empty. Retry sending the translation
1373 * request. sendRetry() is called from the peer port whenever
1374 * a translation completes.
1375 */
1376void
1377ComputeUnit::ITLBPort::recvReqRetry()
1378{
1379
1380 int len = retries.size();
1381 DPRINTF(GPUTLB, "CU%d: ITLB recvReqRetry - %d pending requests\n", len);
1382
1383 assert(len > 0);
1384 assert(isStalled());
1385
1386 // recvReqRetry is an indication that the resource on which this
1387 // port was stalling on is freed. So, remove the stall first
1388 unstallPort();
1389
1390 for (int i = 0; i < len; ++i) {
1391 PacketPtr pkt = retries.front();
1392 Addr vaddr M5_VAR_USED = pkt->req->getVaddr();
1393 DPRINTF(GPUTLB, "CU%d: retrying I-translaton for address%#x", vaddr);
1394
1395 if (!sendTimingReq(pkt)) {
1396 stallPort(); // Stall port
1397 DPRINTF(GPUTLB, ": failed again\n");
1398 break;
1399 } else {
1400 DPRINTF(GPUTLB, ": successful\n");
1401 retries.pop_front();
1402 }
1403 }
1404}
1405
1406void
1407ComputeUnit::regStats()
1408{
1409 MemObject::regStats();
1410
1/*
2 * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: John Kalamatianos, Anthony Gutierrez
34 */
35#include "gpu-compute/compute_unit.hh"
36
37#include <limits>
38
39#include "base/output.hh"
40#include "debug/GPUDisp.hh"
41#include "debug/GPUExec.hh"
42#include "debug/GPUFetch.hh"
43#include "debug/GPUMem.hh"
44#include "debug/GPUPort.hh"
45#include "debug/GPUPrefetch.hh"
46#include "debug/GPUSync.hh"
47#include "debug/GPUTLB.hh"
48#include "gpu-compute/dispatcher.hh"
49#include "gpu-compute/gpu_dyn_inst.hh"
50#include "gpu-compute/gpu_static_inst.hh"
51#include "gpu-compute/ndrange.hh"
52#include "gpu-compute/shader.hh"
53#include "gpu-compute/simple_pool_manager.hh"
54#include "gpu-compute/vector_register_file.hh"
55#include "gpu-compute/wavefront.hh"
56#include "mem/page_table.hh"
57#include "sim/process.hh"
58
59ComputeUnit::ComputeUnit(const Params *p) : MemObject(p), fetchStage(p),
60 scoreboardCheckStage(p), scheduleStage(p), execStage(p),
61 globalMemoryPipe(p), localMemoryPipe(p), rrNextMemID(0), rrNextALUWp(0),
62 cu_id(p->cu_id), vrf(p->vector_register_file), numSIMDs(p->num_SIMDs),
63 spBypassPipeLength(p->spbypass_pipe_length),
64 dpBypassPipeLength(p->dpbypass_pipe_length),
65 issuePeriod(p->issue_period),
66 numGlbMemUnits(p->num_global_mem_pipes),
67 numLocMemUnits(p->num_shared_mem_pipes),
68 perLaneTLB(p->perLaneTLB), prefetchDepth(p->prefetch_depth),
69 prefetchStride(p->prefetch_stride), prefetchType(p->prefetch_prev_type),
70 xact_cas_mode(p->xactCasMode), debugSegFault(p->debugSegFault),
71 functionalTLB(p->functionalTLB), localMemBarrier(p->localMemBarrier),
72 countPages(p->countPages), barrier_id(0),
73 vrfToCoalescerBusWidth(p->vrf_to_coalescer_bus_width),
74 coalescerToVrfBusWidth(p->coalescer_to_vrf_bus_width),
75 req_tick_latency(p->mem_req_latency * p->clk_domain->clockPeriod()),
76 resp_tick_latency(p->mem_resp_latency * p->clk_domain->clockPeriod()),
77 _masterId(p->system->getMasterId(name() + ".ComputeUnit")),
78 lds(*p->localDataStore), globalSeqNum(0), wavefrontSize(p->wfSize),
79 kernelLaunchInst(new KernelLaunchStaticInst())
80{
81 /**
82 * This check is necessary because std::bitset only provides conversion
83 * to unsigned long or unsigned long long via to_ulong() or to_ullong().
84 * there are * a few places in the code where to_ullong() is used, however
85 * if VSZ is larger than a value the host can support then bitset will
86 * throw a runtime exception. we should remove all use of to_long() or
87 * to_ullong() so we can have VSZ greater than 64b, however until that is
88 * done this assert is required.
89 */
90 fatal_if(p->wfSize > std::numeric_limits<unsigned long long>::digits ||
91 p->wfSize <= 0,
92 "WF size is larger than the host can support");
93 fatal_if(!isPowerOf2(wavefrontSize),
94 "Wavefront size should be a power of 2");
95 // calculate how many cycles a vector load or store will need to transfer
96 // its data over the corresponding buses
97 numCyclesPerStoreTransfer =
98 (uint32_t)ceil((double)(wfSize() * sizeof(uint32_t)) /
99 (double)vrfToCoalescerBusWidth);
100
101 numCyclesPerLoadTransfer = (wfSize() * sizeof(uint32_t))
102 / coalescerToVrfBusWidth;
103
104 lastVaddrWF.resize(numSIMDs);
105 wfList.resize(numSIMDs);
106
107 for (int j = 0; j < numSIMDs; ++j) {
108 lastVaddrWF[j].resize(p->n_wf);
109
110 for (int i = 0; i < p->n_wf; ++i) {
111 lastVaddrWF[j][i].resize(wfSize());
112
113 wfList[j].push_back(p->wavefronts[j * p->n_wf + i]);
114 wfList[j][i]->setParent(this);
115
116 for (int k = 0; k < wfSize(); ++k) {
117 lastVaddrWF[j][i][k] = 0;
118 }
119 }
120 }
121
122 lastVaddrSimd.resize(numSIMDs);
123
124 for (int i = 0; i < numSIMDs; ++i) {
125 lastVaddrSimd[i].resize(wfSize(), 0);
126 }
127
128 lastVaddrCU.resize(wfSize());
129
130 lds.setParent(this);
131
132 if (p->execPolicy == "OLDEST-FIRST") {
133 exec_policy = EXEC_POLICY::OLDEST;
134 } else if (p->execPolicy == "ROUND-ROBIN") {
135 exec_policy = EXEC_POLICY::RR;
136 } else {
137 fatal("Invalid WF execution policy (CU)\n");
138 }
139
140 memPort.resize(wfSize());
141
142 // resize the tlbPort vectorArray
143 int tlbPort_width = perLaneTLB ? wfSize() : 1;
144 tlbPort.resize(tlbPort_width);
145
146 cuExitCallback = new CUExitCallback(this);
147 registerExitCallback(cuExitCallback);
148
149 xactCasLoadMap.clear();
150 lastExecCycle.resize(numSIMDs, 0);
151
152 for (int i = 0; i < vrf.size(); ++i) {
153 vrf[i]->setParent(this);
154 }
155
156 numVecRegsPerSimd = vrf[0]->numRegs();
157}
158
159ComputeUnit::~ComputeUnit()
160{
161 // Delete wavefront slots
162 for (int j = 0; j < numSIMDs; ++j) {
163 for (int i = 0; i < shader->n_wf; ++i) {
164 delete wfList[j][i];
165 }
166 lastVaddrSimd[j].clear();
167 }
168 lastVaddrCU.clear();
169 readyList.clear();
170 waveStatusList.clear();
171 dispatchList.clear();
172 vectorAluInstAvail.clear();
173 delete cuExitCallback;
174 delete ldsPort;
175}
176
177void
178ComputeUnit::fillKernelState(Wavefront *w, NDRange *ndr)
179{
180 w->resizeRegFiles(ndr->q.cRegCount, ndr->q.sRegCount, ndr->q.dRegCount);
181
182 w->workGroupSz[0] = ndr->q.wgSize[0];
183 w->workGroupSz[1] = ndr->q.wgSize[1];
184 w->workGroupSz[2] = ndr->q.wgSize[2];
185 w->wgSz = w->workGroupSz[0] * w->workGroupSz[1] * w->workGroupSz[2];
186 w->gridSz[0] = ndr->q.gdSize[0];
187 w->gridSz[1] = ndr->q.gdSize[1];
188 w->gridSz[2] = ndr->q.gdSize[2];
189 w->kernelArgs = ndr->q.args;
190 w->privSizePerItem = ndr->q.privMemPerItem;
191 w->spillSizePerItem = ndr->q.spillMemPerItem;
192 w->roBase = ndr->q.roMemStart;
193 w->roSize = ndr->q.roMemTotal;
194 w->computeActualWgSz(ndr);
195}
196
197void
198ComputeUnit::updateEvents() {
199
200 if (!timestampVec.empty()) {
201 uint32_t vecSize = timestampVec.size();
202 uint32_t i = 0;
203 while (i < vecSize) {
204 if (timestampVec[i] <= shader->tick_cnt) {
205 std::pair<uint32_t, uint32_t> regInfo = regIdxVec[i];
206 vrf[regInfo.first]->markReg(regInfo.second, sizeof(uint32_t),
207 statusVec[i]);
208 timestampVec.erase(timestampVec.begin() + i);
209 regIdxVec.erase(regIdxVec.begin() + i);
210 statusVec.erase(statusVec.begin() + i);
211 --vecSize;
212 --i;
213 }
214 ++i;
215 }
216 }
217
218 for (int i = 0; i< numSIMDs; ++i) {
219 vrf[i]->updateEvents();
220 }
221}
222
223
224void
225ComputeUnit::startWavefront(Wavefront *w, int waveId, LdsChunk *ldsChunk,
226 NDRange *ndr)
227{
228 static int _n_wave = 0;
229
230 VectorMask init_mask;
231 init_mask.reset();
232
233 for (int k = 0; k < wfSize(); ++k) {
234 if (k + waveId * wfSize() < w->actualWgSzTotal)
235 init_mask[k] = 1;
236 }
237
238 w->kernId = ndr->dispatchId;
239 w->wfId = waveId;
240 w->initMask = init_mask.to_ullong();
241
242 for (int k = 0; k < wfSize(); ++k) {
243 w->workItemId[0][k] = (k + waveId * wfSize()) % w->actualWgSz[0];
244 w->workItemId[1][k] = ((k + waveId * wfSize()) / w->actualWgSz[0]) %
245 w->actualWgSz[1];
246 w->workItemId[2][k] = (k + waveId * wfSize()) /
247 (w->actualWgSz[0] * w->actualWgSz[1]);
248
249 w->workItemFlatId[k] = w->workItemId[2][k] * w->actualWgSz[0] *
250 w->actualWgSz[1] + w->workItemId[1][k] * w->actualWgSz[0] +
251 w->workItemId[0][k];
252 }
253
254 w->barrierSlots = divCeil(w->actualWgSzTotal, wfSize());
255
256 w->barCnt.resize(wfSize(), 0);
257
258 w->maxBarCnt = 0;
259 w->oldBarrierCnt = 0;
260 w->barrierCnt = 0;
261
262 w->privBase = ndr->q.privMemStart;
263 ndr->q.privMemStart += ndr->q.privMemPerItem * wfSize();
264
265 w->spillBase = ndr->q.spillMemStart;
266 ndr->q.spillMemStart += ndr->q.spillMemPerItem * wfSize();
267
268 w->pushToReconvergenceStack(0, UINT32_MAX, init_mask.to_ulong());
269
270 // WG state
271 w->wgId = ndr->globalWgId;
272 w->dispatchId = ndr->dispatchId;
273 w->workGroupId[0] = w->wgId % ndr->numWg[0];
274 w->workGroupId[1] = (w->wgId / ndr->numWg[0]) % ndr->numWg[1];
275 w->workGroupId[2] = w->wgId / (ndr->numWg[0] * ndr->numWg[1]);
276
277 w->barrierId = barrier_id;
278 w->stalledAtBarrier = false;
279
280 // set the wavefront context to have a pointer to this section of the LDS
281 w->ldsChunk = ldsChunk;
282
283 int32_t refCount M5_VAR_USED =
284 lds.increaseRefCounter(w->dispatchId, w->wgId);
285 DPRINTF(GPUDisp, "CU%d: increase ref ctr wg[%d] to [%d]\n",
286 cu_id, w->wgId, refCount);
287
288 w->instructionBuffer.clear();
289
290 if (w->pendingFetch)
291 w->dropFetch = true;
292
293 // is this the last wavefront in the workgroup
294 // if set the spillWidth to be the remaining work-items
295 // so that the vector access is correct
296 if ((waveId + 1) * wfSize() >= w->actualWgSzTotal) {
297 w->spillWidth = w->actualWgSzTotal - (waveId * wfSize());
298 } else {
299 w->spillWidth = wfSize();
300 }
301
302 DPRINTF(GPUDisp, "Scheduling wfDynId/barrier_id %d/%d on CU%d: "
303 "WF[%d][%d]\n", _n_wave, barrier_id, cu_id, w->simdId, w->wfSlotId);
304
305 w->start(++_n_wave, ndr->q.code_ptr);
306}
307
308void
309ComputeUnit::StartWorkgroup(NDRange *ndr)
310{
311 // reserve the LDS capacity allocated to the work group
312 // disambiguated by the dispatch ID and workgroup ID, which should be
313 // globally unique
314 LdsChunk *ldsChunk = lds.reserveSpace(ndr->dispatchId, ndr->globalWgId,
315 ndr->q.ldsSize);
316
317 // Send L1 cache acquire
318 // isKernel + isAcquire = Kernel Begin
319 if (shader->impl_kern_boundary_sync) {
320 GPUDynInstPtr gpuDynInst =
321 std::make_shared<GPUDynInst>(this, nullptr, kernelLaunchInst,
322 getAndIncSeqNum());
323
324 gpuDynInst->useContinuation = false;
325 injectGlobalMemFence(gpuDynInst, true);
326 }
327
328 // calculate the number of 32-bit vector registers required by wavefront
329 int vregDemand = ndr->q.sRegCount + (2 * ndr->q.dRegCount);
330 int wave_id = 0;
331
332 // Assign WFs by spreading them across SIMDs, 1 WF per SIMD at a time
333 for (int m = 0; m < shader->n_wf * numSIMDs; ++m) {
334 Wavefront *w = wfList[m % numSIMDs][m / numSIMDs];
335 // Check if this wavefront slot is available:
336 // It must be stopped and not waiting
337 // for a release to complete S_RETURNING
338 if (w->status == Wavefront::S_STOPPED) {
339 fillKernelState(w, ndr);
340 // if we have scheduled all work items then stop
341 // scheduling wavefronts
342 if (wave_id * wfSize() >= w->actualWgSzTotal)
343 break;
344
345 // reserve vector registers for the scheduled wavefront
346 assert(vectorRegsReserved[m % numSIMDs] <= numVecRegsPerSimd);
347 uint32_t normSize = 0;
348
349 w->startVgprIndex = vrf[m % numSIMDs]->manager->
350 allocateRegion(vregDemand, &normSize);
351
352 w->reservedVectorRegs = normSize;
353 vectorRegsReserved[m % numSIMDs] += w->reservedVectorRegs;
354
355 startWavefront(w, wave_id, ldsChunk, ndr);
356 ++wave_id;
357 }
358 }
359 ++barrier_id;
360}
361
362int
363ComputeUnit::ReadyWorkgroup(NDRange *ndr)
364{
365 // Get true size of workgroup (after clamping to grid size)
366 int trueWgSize[3];
367 int trueWgSizeTotal = 1;
368
369 for (int d = 0; d < 3; ++d) {
370 trueWgSize[d] = std::min(ndr->q.wgSize[d], ndr->q.gdSize[d] -
371 ndr->wgId[d] * ndr->q.wgSize[d]);
372
373 trueWgSizeTotal *= trueWgSize[d];
374 DPRINTF(GPUDisp, "trueWgSize[%d] = %d\n", d, trueWgSize[d]);
375 }
376
377 DPRINTF(GPUDisp, "trueWgSizeTotal = %d\n", trueWgSizeTotal);
378
379 // calculate the number of 32-bit vector registers required by each
380 // work item of the work group
381 int vregDemandPerWI = ndr->q.sRegCount + (2 * ndr->q.dRegCount);
382 bool vregAvail = true;
383 int numWfs = (trueWgSizeTotal + wfSize() - 1) / wfSize();
384 int freeWfSlots = 0;
385 // check if the total number of VGPRs required by all WFs of the WG
386 // fit in the VRFs of all SIMD units
387 assert((numWfs * vregDemandPerWI) <= (numSIMDs * numVecRegsPerSimd));
388 int numMappedWfs = 0;
389 std::vector<int> numWfsPerSimd;
390 numWfsPerSimd.resize(numSIMDs, 0);
391 // find how many free WF slots we have across all SIMDs
392 for (int j = 0; j < shader->n_wf; ++j) {
393 for (int i = 0; i < numSIMDs; ++i) {
394 if (wfList[i][j]->status == Wavefront::S_STOPPED) {
395 // count the number of free WF slots
396 ++freeWfSlots;
397 if (numMappedWfs < numWfs) {
398 // count the WFs to be assigned per SIMD
399 numWfsPerSimd[i]++;
400 }
401 numMappedWfs++;
402 }
403 }
404 }
405
406 // if there are enough free WF slots then find if there are enough
407 // free VGPRs per SIMD based on the WF->SIMD mapping
408 if (freeWfSlots >= numWfs) {
409 for (int j = 0; j < numSIMDs; ++j) {
410 // find if there are enough free VGPR regions in the SIMD's VRF
411 // to accommodate the WFs of the new WG that would be mapped to
412 // this SIMD unit
413 vregAvail = vrf[j]->manager->canAllocate(numWfsPerSimd[j],
414 vregDemandPerWI);
415
416 // stop searching if there is at least one SIMD
417 // whose VRF does not have enough free VGPR pools.
418 // This is because a WG is scheduled only if ALL
419 // of its WFs can be scheduled
420 if (!vregAvail)
421 break;
422 }
423 }
424
425 DPRINTF(GPUDisp, "Free WF slots = %d, VGPR Availability = %d\n",
426 freeWfSlots, vregAvail);
427
428 if (!vregAvail) {
429 ++numTimesWgBlockedDueVgprAlloc;
430 }
431
432 // Return true if enough WF slots to submit workgroup and if there are
433 // enough VGPRs to schedule all WFs to their SIMD units
434 if (!lds.canReserve(ndr->q.ldsSize)) {
435 wgBlockedDueLdsAllocation++;
436 }
437
438 // Return true if (a) there are enough free WF slots to submit
439 // workgrounp and (b) if there are enough VGPRs to schedule all WFs to their
440 // SIMD units and (c) if there is enough space in LDS
441 return freeWfSlots >= numWfs && vregAvail && lds.canReserve(ndr->q.ldsSize);
442}
443
444int
445ComputeUnit::AllAtBarrier(uint32_t _barrier_id, uint32_t bcnt, uint32_t bslots)
446{
447 DPRINTF(GPUSync, "CU%d: Checking for All At Barrier\n", cu_id);
448 int ccnt = 0;
449
450 for (int i_simd = 0; i_simd < numSIMDs; ++i_simd) {
451 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf) {
452 Wavefront *w = wfList[i_simd][i_wf];
453
454 if (w->status == Wavefront::S_RUNNING) {
455 DPRINTF(GPUSync, "Checking WF[%d][%d]\n", i_simd, i_wf);
456
457 DPRINTF(GPUSync, "wf->barrier_id = %d, _barrier_id = %d\n",
458 w->barrierId, _barrier_id);
459
460 DPRINTF(GPUSync, "wf->barrier_cnt %d, bcnt = %d\n",
461 w->barrierCnt, bcnt);
462 }
463
464 if (w->status == Wavefront::S_RUNNING &&
465 w->barrierId == _barrier_id && w->barrierCnt == bcnt &&
466 !w->outstandingReqs) {
467 ++ccnt;
468
469 DPRINTF(GPUSync, "WF[%d][%d] at barrier, increment ccnt to "
470 "%d\n", i_simd, i_wf, ccnt);
471 }
472 }
473 }
474
475 DPRINTF(GPUSync, "CU%d: returning allAtBarrier ccnt = %d, bslots = %d\n",
476 cu_id, ccnt, bslots);
477
478 return ccnt == bslots;
479}
480
481// Check if the current wavefront is blocked on additional resources.
482bool
483ComputeUnit::cedeSIMD(int simdId, int wfSlotId)
484{
485 bool cede = false;
486
487 // If --xact-cas-mode option is enabled in run.py, then xact_cas_ld
488 // magic instructions will impact the scheduling of wavefronts
489 if (xact_cas_mode) {
490 /*
491 * When a wavefront calls xact_cas_ld, it adds itself to a per address
492 * queue. All per address queues are managed by the xactCasLoadMap.
493 *
494 * A wavefront is not blocked if: it is not in ANY per address queue or
495 * if it is at the head of a per address queue.
496 */
497 for (auto itMap : xactCasLoadMap) {
498 std::list<waveIdentifier> curWaveIDQueue = itMap.second.waveIDQueue;
499
500 if (!curWaveIDQueue.empty()) {
501 for (auto it : curWaveIDQueue) {
502 waveIdentifier cur_wave = it;
503
504 if (cur_wave.simdId == simdId &&
505 cur_wave.wfSlotId == wfSlotId) {
506 // 2 possibilities
507 // 1: this WF has a green light
508 // 2: another WF has a green light
509 waveIdentifier owner_wave = curWaveIDQueue.front();
510
511 if (owner_wave.simdId != cur_wave.simdId ||
512 owner_wave.wfSlotId != cur_wave.wfSlotId) {
513 // possibility 2
514 cede = true;
515 break;
516 } else {
517 // possibility 1
518 break;
519 }
520 }
521 }
522 }
523 }
524 }
525
526 return cede;
527}
528
529// Execute one clock worth of work on the ComputeUnit.
530void
531ComputeUnit::exec()
532{
533 updateEvents();
534 // Execute pipeline stages in reverse order to simulate
535 // the pipeline latency
536 globalMemoryPipe.exec();
537 localMemoryPipe.exec();
538 execStage.exec();
539 scheduleStage.exec();
540 scoreboardCheckStage.exec();
541 fetchStage.exec();
542
543 totalCycles++;
544}
545
546void
547ComputeUnit::init()
548{
549 // Initialize CU Bus models
550 glbMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1));
551 locMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1));
552 nextGlbMemBus = 0;
553 nextLocMemBus = 0;
554 fatal_if(numGlbMemUnits > 1,
555 "No support for multiple Global Memory Pipelines exists!!!");
556 vrfToGlobalMemPipeBus.resize(numGlbMemUnits);
557 for (int j = 0; j < numGlbMemUnits; ++j) {
558 vrfToGlobalMemPipeBus[j] = WaitClass();
559 vrfToGlobalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1));
560 }
561
562 fatal_if(numLocMemUnits > 1,
563 "No support for multiple Local Memory Pipelines exists!!!");
564 vrfToLocalMemPipeBus.resize(numLocMemUnits);
565 for (int j = 0; j < numLocMemUnits; ++j) {
566 vrfToLocalMemPipeBus[j] = WaitClass();
567 vrfToLocalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1));
568 }
569 vectorRegsReserved.resize(numSIMDs, 0);
570 aluPipe.resize(numSIMDs);
571 wfWait.resize(numSIMDs + numLocMemUnits + numGlbMemUnits);
572
573 for (int i = 0; i < numSIMDs + numLocMemUnits + numGlbMemUnits; ++i) {
574 wfWait[i] = WaitClass();
575 wfWait[i].init(&shader->tick_cnt, shader->ticks(1));
576 }
577
578 for (int i = 0; i < numSIMDs; ++i) {
579 aluPipe[i] = WaitClass();
580 aluPipe[i].init(&shader->tick_cnt, shader->ticks(1));
581 }
582
583 // Setup space for call args
584 for (int j = 0; j < numSIMDs; ++j) {
585 for (int i = 0; i < shader->n_wf; ++i) {
586 wfList[j][i]->initCallArgMem(shader->funcargs_size, wavefrontSize);
587 }
588 }
589
590 // Initializing pipeline resources
591 readyList.resize(numSIMDs + numGlbMemUnits + numLocMemUnits);
592 waveStatusList.resize(numSIMDs);
593
594 for (int j = 0; j < numSIMDs; ++j) {
595 for (int i = 0; i < shader->n_wf; ++i) {
596 waveStatusList[j].push_back(
597 std::make_pair(wfList[j][i], BLOCKED));
598 }
599 }
600
601 for (int j = 0; j < (numSIMDs + numGlbMemUnits + numLocMemUnits); ++j) {
602 dispatchList.push_back(std::make_pair((Wavefront*)nullptr, EMPTY));
603 }
604
605 fetchStage.init(this);
606 scoreboardCheckStage.init(this);
607 scheduleStage.init(this);
608 execStage.init(this);
609 globalMemoryPipe.init(this);
610 localMemoryPipe.init(this);
611 // initialize state for statistics calculation
612 vectorAluInstAvail.resize(numSIMDs, false);
613 shrMemInstAvail = 0;
614 glbMemInstAvail = 0;
615}
616
617bool
618ComputeUnit::DataPort::recvTimingResp(PacketPtr pkt)
619{
620 // Ruby has completed the memory op. Schedule the mem_resp_event at the
621 // appropriate cycle to process the timing memory response
622 // This delay represents the pipeline delay
623 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState);
624 int index = sender_state->port_index;
625 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
626
627 // Is the packet returned a Kernel End or Barrier
628 if (pkt->req->isKernel() && pkt->req->isRelease()) {
629 Wavefront *w =
630 computeUnit->wfList[gpuDynInst->simdId][gpuDynInst->wfSlotId];
631
632 // Check if we are waiting on Kernel End Release
633 if (w->status == Wavefront::S_RETURNING) {
634 DPRINTF(GPUDisp, "CU%d: WF[%d][%d][wv=%d]: WG id completed %d\n",
635 computeUnit->cu_id, w->simdId, w->wfSlotId,
636 w->wfDynId, w->kernId);
637
638 computeUnit->shader->dispatcher->notifyWgCompl(w);
639 w->status = Wavefront::S_STOPPED;
640 } else {
641 w->outstandingReqs--;
642 }
643
644 DPRINTF(GPUSync, "CU%d: WF[%d][%d]: barrier_cnt = %d\n",
645 computeUnit->cu_id, gpuDynInst->simdId,
646 gpuDynInst->wfSlotId, w->barrierCnt);
647
648 if (gpuDynInst->useContinuation) {
649 assert(!gpuDynInst->isNoScope());
650 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(),
651 gpuDynInst);
652 }
653
654 delete pkt->senderState;
655 delete pkt->req;
656 delete pkt;
657 return true;
658 } else if (pkt->req->isKernel() && pkt->req->isAcquire()) {
659 if (gpuDynInst->useContinuation) {
660 assert(!gpuDynInst->isNoScope());
661 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(),
662 gpuDynInst);
663 }
664
665 delete pkt->senderState;
666 delete pkt->req;
667 delete pkt;
668 return true;
669 }
670
671 ComputeUnit::DataPort::MemRespEvent *mem_resp_event =
672 new ComputeUnit::DataPort::MemRespEvent(computeUnit->memPort[index],
673 pkt);
674
675 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x received!\n",
676 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
677 index, pkt->req->getPaddr());
678
679 computeUnit->schedule(mem_resp_event,
680 curTick() + computeUnit->resp_tick_latency);
681 return true;
682}
683
684void
685ComputeUnit::DataPort::recvReqRetry()
686{
687 int len = retries.size();
688
689 assert(len > 0);
690
691 for (int i = 0; i < len; ++i) {
692 PacketPtr pkt = retries.front().first;
693 GPUDynInstPtr gpuDynInst M5_VAR_USED = retries.front().second;
694 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: retry mem inst addr %#x\n",
695 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
696 pkt->req->getPaddr());
697
698 /** Currently Ruby can return false due to conflicts for the particular
699 * cache block or address. Thus other requests should be allowed to
700 * pass and the data port should expect multiple retries. */
701 if (!sendTimingReq(pkt)) {
702 DPRINTF(GPUMem, "failed again!\n");
703 break;
704 } else {
705 DPRINTF(GPUMem, "successful!\n");
706 retries.pop_front();
707 }
708 }
709}
710
711bool
712ComputeUnit::SQCPort::recvTimingResp(PacketPtr pkt)
713{
714 computeUnit->fetchStage.processFetchReturn(pkt);
715
716 return true;
717}
718
719void
720ComputeUnit::SQCPort::recvReqRetry()
721{
722 int len = retries.size();
723
724 assert(len > 0);
725
726 for (int i = 0; i < len; ++i) {
727 PacketPtr pkt = retries.front().first;
728 Wavefront *wavefront M5_VAR_USED = retries.front().second;
729 DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: retrying FETCH addr %#x\n",
730 computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId,
731 pkt->req->getPaddr());
732 if (!sendTimingReq(pkt)) {
733 DPRINTF(GPUFetch, "failed again!\n");
734 break;
735 } else {
736 DPRINTF(GPUFetch, "successful!\n");
737 retries.pop_front();
738 }
739 }
740}
741
742void
743ComputeUnit::sendRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt)
744{
745 // There must be a way around this check to do the globalMemStart...
746 Addr tmp_vaddr = pkt->req->getVaddr();
747
748 updatePageDivergenceDist(tmp_vaddr);
749
750 pkt->req->setVirt(pkt->req->getAsid(), tmp_vaddr, pkt->req->getSize(),
751 pkt->req->getFlags(), pkt->req->masterId(),
752 pkt->req->getPC());
753
754 // figure out the type of the request to set read/write
755 BaseTLB::Mode TLB_mode;
756 assert(pkt->isRead() || pkt->isWrite());
757
758 // Check write before read for atomic operations
759 // since atomic operations should use BaseTLB::Write
760 if (pkt->isWrite()){
761 TLB_mode = BaseTLB::Write;
762 } else if (pkt->isRead()) {
763 TLB_mode = BaseTLB::Read;
764 } else {
765 fatal("pkt is not a read nor a write\n");
766 }
767
768 tlbCycles -= curTick();
769 ++tlbRequests;
770
771 int tlbPort_index = perLaneTLB ? index : 0;
772
773 if (shader->timingSim) {
774 if (debugSegFault) {
775 Process *p = shader->gpuTc->getProcessPtr();
776 Addr vaddr = pkt->req->getVaddr();
777 unsigned size = pkt->getSize();
778
779 if ((vaddr + size - 1) % 64 < vaddr % 64) {
780 panic("CU%d: WF[%d][%d]: Access to addr %#x is unaligned!\n",
781 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, vaddr);
782 }
783
784 Addr paddr;
785
786 if (!p->pTable->translate(vaddr, paddr)) {
787 if (!p->fixupStackFault(vaddr)) {
788 panic("CU%d: WF[%d][%d]: Fault on addr %#x!\n",
789 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
790 vaddr);
791 }
792 }
793 }
794
795 // This is the SenderState needed upon return
796 pkt->senderState = new DTLBPort::SenderState(gpuDynInst, index);
797
798 // This is the senderState needed by the TLB hierarchy to function
799 TheISA::GpuTLB::TranslationState *translation_state =
800 new TheISA::GpuTLB::TranslationState(TLB_mode, shader->gpuTc, false,
801 pkt->senderState);
802
803 pkt->senderState = translation_state;
804
805 if (functionalTLB) {
806 tlbPort[tlbPort_index]->sendFunctional(pkt);
807
808 // update the hitLevel distribution
809 int hit_level = translation_state->hitLevel;
810 assert(hit_level != -1);
811 hitsPerTLBLevel[hit_level]++;
812
813 // New SenderState for the memory access
814 X86ISA::GpuTLB::TranslationState *sender_state =
815 safe_cast<X86ISA::GpuTLB::TranslationState*>(pkt->senderState);
816
817 delete sender_state->tlbEntry;
818 delete sender_state->saved;
819 delete sender_state;
820
821 assert(pkt->req->hasPaddr());
822 assert(pkt->req->hasSize());
823
824 uint8_t *tmpData = pkt->getPtr<uint8_t>();
825
826 // this is necessary because the GPU TLB receives packets instead
827 // of requests. when the translation is complete, all relevent
828 // fields in the request will be populated, but not in the packet.
829 // here we create the new packet so we can set the size, addr,
830 // and proper flags.
831 PacketPtr oldPkt = pkt;
832 pkt = new Packet(oldPkt->req, oldPkt->cmd);
833 delete oldPkt;
834 pkt->dataStatic(tmpData);
835
836
837 // New SenderState for the memory access
838 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst,
839 index, nullptr);
840
841 gpuDynInst->memStatusVector[pkt->getAddr()].push_back(index);
842 gpuDynInst->tlbHitLevel[index] = hit_level;
843
844
845 // translation is done. Schedule the mem_req_event at the
846 // appropriate cycle to send the timing memory request to ruby
847 ComputeUnit::DataPort::MemReqEvent *mem_req_event =
848 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt);
849
850 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data "
851 "scheduled\n", cu_id, gpuDynInst->simdId,
852 gpuDynInst->wfSlotId, index, pkt->req->getPaddr());
853
854 schedule(mem_req_event, curTick() + req_tick_latency);
855 } else if (tlbPort[tlbPort_index]->isStalled()) {
856 assert(tlbPort[tlbPort_index]->retries.size() > 0);
857
858 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x "
859 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
860 tmp_vaddr);
861
862 tlbPort[tlbPort_index]->retries.push_back(pkt);
863 } else if (!tlbPort[tlbPort_index]->sendTimingReq(pkt)) {
864 // Stall the data port;
865 // No more packet will be issued till
866 // ruby indicates resources are freed by
867 // a recvReqRetry() call back on this port.
868 tlbPort[tlbPort_index]->stallPort();
869
870 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x "
871 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
872 tmp_vaddr);
873
874 tlbPort[tlbPort_index]->retries.push_back(pkt);
875 } else {
876 DPRINTF(GPUTLB,
877 "CU%d: WF[%d][%d]: Translation for addr %#x sent!\n",
878 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, tmp_vaddr);
879 }
880 } else {
881 if (pkt->cmd == MemCmd::MemFenceReq) {
882 gpuDynInst->statusBitVector = VectorMask(0);
883 } else {
884 gpuDynInst->statusBitVector &= (~(1ll << index));
885 }
886
887 // New SenderState for the memory access
888 delete pkt->senderState;
889
890 // Because it's atomic operation, only need TLB translation state
891 pkt->senderState = new TheISA::GpuTLB::TranslationState(TLB_mode,
892 shader->gpuTc);
893
894 tlbPort[tlbPort_index]->sendFunctional(pkt);
895
896 // the addr of the packet is not modified, so we need to create a new
897 // packet, or otherwise the memory access will have the old virtual
898 // address sent in the translation packet, instead of the physical
899 // address returned by the translation.
900 PacketPtr new_pkt = new Packet(pkt->req, pkt->cmd);
901 new_pkt->dataStatic(pkt->getPtr<uint8_t>());
902
903 // Translation is done. It is safe to send the packet to memory.
904 memPort[0]->sendFunctional(new_pkt);
905
906 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: index %d: addr %#x\n", cu_id,
907 gpuDynInst->simdId, gpuDynInst->wfSlotId, index,
908 new_pkt->req->getPaddr());
909
910 // safe_cast the senderState
911 TheISA::GpuTLB::TranslationState *sender_state =
912 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState);
913
914 delete sender_state->tlbEntry;
915 delete new_pkt;
916 delete pkt->senderState;
917 delete pkt->req;
918 delete pkt;
919 }
920}
921
922void
923ComputeUnit::sendSyncRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt)
924{
925 ComputeUnit::DataPort::MemReqEvent *mem_req_event =
926 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt);
927
928
929 // New SenderState for the memory access
930 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, index,
931 nullptr);
932
933 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x sync scheduled\n",
934 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, index,
935 pkt->req->getPaddr());
936
937 schedule(mem_req_event, curTick() + req_tick_latency);
938}
939
940void
941ComputeUnit::injectGlobalMemFence(GPUDynInstPtr gpuDynInst, bool kernelLaunch,
942 Request* req)
943{
944 assert(gpuDynInst->isGlobalSeg());
945
946 if (!req) {
947 req = new Request(0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId);
948 }
949 req->setPaddr(0);
950 if (kernelLaunch) {
951 req->setFlags(Request::KERNEL);
952 }
953
954 // for non-kernel MemFence operations, memorder flags are set depending
955 // on which type of request is currently being sent, so this
956 // should be set by the caller (e.g. if an inst has acq-rel
957 // semantics, it will send one acquire req an one release req)
958 gpuDynInst->setRequestFlags(req, kernelLaunch);
959
960 // a mem fence must correspond to an acquire/release request
961 assert(req->isAcquire() || req->isRelease());
962
963 // create packet
964 PacketPtr pkt = new Packet(req, MemCmd::MemFenceReq);
965
966 // set packet's sender state
967 pkt->senderState =
968 new ComputeUnit::DataPort::SenderState(gpuDynInst, 0, nullptr);
969
970 // send the packet
971 sendSyncRequest(gpuDynInst, 0, pkt);
972}
973
974const char*
975ComputeUnit::DataPort::MemRespEvent::description() const
976{
977 return "ComputeUnit memory response event";
978}
979
980void
981ComputeUnit::DataPort::MemRespEvent::process()
982{
983 DataPort::SenderState *sender_state =
984 safe_cast<DataPort::SenderState*>(pkt->senderState);
985
986 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
987 ComputeUnit *compute_unit = dataPort->computeUnit;
988
989 assert(gpuDynInst);
990
991 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: Response for addr %#x, index %d\n",
992 compute_unit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
993 pkt->req->getPaddr(), dataPort->index);
994
995 Addr paddr = pkt->req->getPaddr();
996
997 if (pkt->cmd != MemCmd::MemFenceResp) {
998 int index = gpuDynInst->memStatusVector[paddr].back();
999
1000 DPRINTF(GPUMem, "Response for addr %#x, index %d\n",
1001 pkt->req->getPaddr(), index);
1002
1003 gpuDynInst->memStatusVector[paddr].pop_back();
1004 gpuDynInst->pAddr = pkt->req->getPaddr();
1005
1006 if (pkt->isRead() || pkt->isWrite()) {
1007
1008 if (gpuDynInst->n_reg <= MAX_REGS_FOR_NON_VEC_MEM_INST) {
1009 gpuDynInst->statusBitVector &= (~(1ULL << index));
1010 } else {
1011 assert(gpuDynInst->statusVector[index] > 0);
1012 gpuDynInst->statusVector[index]--;
1013
1014 if (!gpuDynInst->statusVector[index])
1015 gpuDynInst->statusBitVector &= (~(1ULL << index));
1016 }
1017
1018 DPRINTF(GPUMem, "bitvector is now %#x\n",
1019 gpuDynInst->statusBitVector);
1020
1021 if (gpuDynInst->statusBitVector == VectorMask(0)) {
1022 auto iter = gpuDynInst->memStatusVector.begin();
1023 auto end = gpuDynInst->memStatusVector.end();
1024
1025 while (iter != end) {
1026 assert(iter->second.empty());
1027 ++iter;
1028 }
1029
1030 gpuDynInst->memStatusVector.clear();
1031
1032 if (gpuDynInst->n_reg > MAX_REGS_FOR_NON_VEC_MEM_INST)
1033 gpuDynInst->statusVector.clear();
1034
1035 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
1036 assert(compute_unit->globalMemoryPipe.isGMLdRespFIFOWrRdy());
1037
1038 compute_unit->globalMemoryPipe.getGMLdRespFIFO()
1039 .push(gpuDynInst);
1040 } else {
1041 assert(compute_unit->globalMemoryPipe.isGMStRespFIFOWrRdy());
1042
1043 compute_unit->globalMemoryPipe.getGMStRespFIFO()
1044 .push(gpuDynInst);
1045 }
1046
1047 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: packet totally complete\n",
1048 compute_unit->cu_id, gpuDynInst->simdId,
1049 gpuDynInst->wfSlotId);
1050
1051 // after clearing the status vectors,
1052 // see if there is a continuation to perform
1053 // the continuation may generate more work for
1054 // this memory request
1055 if (gpuDynInst->useContinuation) {
1056 assert(!gpuDynInst->isNoScope());
1057 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(),
1058 gpuDynInst);
1059 }
1060 }
1061 }
1062 } else {
1063 gpuDynInst->statusBitVector = VectorMask(0);
1064
1065 if (gpuDynInst->useContinuation) {
1066 assert(!gpuDynInst->isNoScope());
1067 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(),
1068 gpuDynInst);
1069 }
1070 }
1071
1072 delete pkt->senderState;
1073 delete pkt->req;
1074 delete pkt;
1075}
1076
1077ComputeUnit*
1078ComputeUnitParams::create()
1079{
1080 return new ComputeUnit(this);
1081}
1082
1083bool
1084ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt)
1085{
1086 Addr line = pkt->req->getPaddr();
1087
1088 DPRINTF(GPUTLB, "CU%d: DTLBPort received %#x->%#x\n", computeUnit->cu_id,
1089 pkt->req->getVaddr(), line);
1090
1091 assert(pkt->senderState);
1092 computeUnit->tlbCycles += curTick();
1093
1094 // pop off the TLB translation state
1095 TheISA::GpuTLB::TranslationState *translation_state =
1096 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState);
1097
1098 // no PageFaults are permitted for data accesses
1099 if (!translation_state->tlbEntry->valid) {
1100 DTLBPort::SenderState *sender_state =
1101 safe_cast<DTLBPort::SenderState*>(translation_state->saved);
1102
1103 Wavefront *w M5_VAR_USED =
1104 computeUnit->wfList[sender_state->_gpuDynInst->simdId]
1105 [sender_state->_gpuDynInst->wfSlotId];
1106
1107 DPRINTFN("Wave %d couldn't tranlate vaddr %#x\n", w->wfDynId,
1108 pkt->req->getVaddr());
1109 }
1110
1111 assert(translation_state->tlbEntry->valid);
1112
1113 // update the hitLevel distribution
1114 int hit_level = translation_state->hitLevel;
1115 computeUnit->hitsPerTLBLevel[hit_level]++;
1116
1117 delete translation_state->tlbEntry;
1118 assert(!translation_state->ports.size());
1119 pkt->senderState = translation_state->saved;
1120
1121 // for prefetch pkt
1122 BaseTLB::Mode TLB_mode = translation_state->tlbMode;
1123
1124 delete translation_state;
1125
1126 // use the original sender state to know how to close this transaction
1127 DTLBPort::SenderState *sender_state =
1128 safe_cast<DTLBPort::SenderState*>(pkt->senderState);
1129
1130 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
1131 int mp_index = sender_state->portIndex;
1132 Addr vaddr = pkt->req->getVaddr();
1133 gpuDynInst->memStatusVector[line].push_back(mp_index);
1134 gpuDynInst->tlbHitLevel[mp_index] = hit_level;
1135
1136 MemCmd requestCmd;
1137
1138 if (pkt->cmd == MemCmd::ReadResp) {
1139 requestCmd = MemCmd::ReadReq;
1140 } else if (pkt->cmd == MemCmd::WriteResp) {
1141 requestCmd = MemCmd::WriteReq;
1142 } else if (pkt->cmd == MemCmd::SwapResp) {
1143 requestCmd = MemCmd::SwapReq;
1144 } else {
1145 panic("unsupported response to request conversion %s\n",
1146 pkt->cmd.toString());
1147 }
1148
1149 if (computeUnit->prefetchDepth) {
1150 int simdId = gpuDynInst->simdId;
1151 int wfSlotId = gpuDynInst->wfSlotId;
1152 Addr last = 0;
1153
1154 switch(computeUnit->prefetchType) {
1155 case Enums::PF_CU:
1156 last = computeUnit->lastVaddrCU[mp_index];
1157 break;
1158 case Enums::PF_PHASE:
1159 last = computeUnit->lastVaddrSimd[simdId][mp_index];
1160 break;
1161 case Enums::PF_WF:
1162 last = computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index];
1163 default:
1164 break;
1165 }
1166
1167 DPRINTF(GPUPrefetch, "CU[%d][%d][%d][%d]: %#x was last\n",
1168 computeUnit->cu_id, simdId, wfSlotId, mp_index, last);
1169
1170 int stride = last ? (roundDown(vaddr, TheISA::PageBytes) -
1171 roundDown(last, TheISA::PageBytes)) >> TheISA::PageShift
1172 : 0;
1173
1174 DPRINTF(GPUPrefetch, "Stride is %d\n", stride);
1175
1176 computeUnit->lastVaddrCU[mp_index] = vaddr;
1177 computeUnit->lastVaddrSimd[simdId][mp_index] = vaddr;
1178 computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index] = vaddr;
1179
1180 stride = (computeUnit->prefetchType == Enums::PF_STRIDE) ?
1181 computeUnit->prefetchStride: stride;
1182
1183 DPRINTF(GPUPrefetch, "%#x to: CU[%d][%d][%d][%d]\n", vaddr,
1184 computeUnit->cu_id, simdId, wfSlotId, mp_index);
1185
1186 DPRINTF(GPUPrefetch, "Prefetching from %#x:", vaddr);
1187
1188 // Prefetch Next few pages atomically
1189 for (int pf = 1; pf <= computeUnit->prefetchDepth; ++pf) {
1190 DPRINTF(GPUPrefetch, "%d * %d: %#x\n", pf, stride,
1191 vaddr+stride*pf*TheISA::PageBytes);
1192
1193 if (!stride)
1194 break;
1195
1196 Request *prefetch_req = new Request(0, vaddr + stride * pf *
1197 TheISA::PageBytes,
1198 sizeof(uint8_t), 0,
1199 computeUnit->masterId(),
1200 0, 0, 0);
1201
1202 PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd);
1203 uint8_t foo = 0;
1204 prefetch_pkt->dataStatic(&foo);
1205
1206 // Because it's atomic operation, only need TLB translation state
1207 prefetch_pkt->senderState =
1208 new TheISA::GpuTLB::TranslationState(TLB_mode,
1209 computeUnit->shader->gpuTc,
1210 true);
1211
1212 // Currently prefetches are zero-latency, hence the sendFunctional
1213 sendFunctional(prefetch_pkt);
1214
1215 /* safe_cast the senderState */
1216 TheISA::GpuTLB::TranslationState *tlb_state =
1217 safe_cast<TheISA::GpuTLB::TranslationState*>(
1218 prefetch_pkt->senderState);
1219
1220
1221 delete tlb_state->tlbEntry;
1222 delete tlb_state;
1223 delete prefetch_pkt->req;
1224 delete prefetch_pkt;
1225 }
1226 }
1227
1228 // First we must convert the response cmd back to a request cmd so that
1229 // the request can be sent through the cu's master port
1230 PacketPtr new_pkt = new Packet(pkt->req, requestCmd);
1231 new_pkt->dataStatic(pkt->getPtr<uint8_t>());
1232 delete pkt->senderState;
1233 delete pkt;
1234
1235 // New SenderState for the memory access
1236 new_pkt->senderState =
1237 new ComputeUnit::DataPort::SenderState(gpuDynInst, mp_index,
1238 nullptr);
1239
1240 // translation is done. Schedule the mem_req_event at the appropriate
1241 // cycle to send the timing memory request to ruby
1242 ComputeUnit::DataPort::MemReqEvent *mem_req_event =
1243 new ComputeUnit::DataPort::MemReqEvent(computeUnit->memPort[mp_index],
1244 new_pkt);
1245
1246 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data scheduled\n",
1247 computeUnit->cu_id, gpuDynInst->simdId,
1248 gpuDynInst->wfSlotId, mp_index, new_pkt->req->getPaddr());
1249
1250 computeUnit->schedule(mem_req_event, curTick() +
1251 computeUnit->req_tick_latency);
1252
1253 return true;
1254}
1255
1256const char*
1257ComputeUnit::DataPort::MemReqEvent::description() const
1258{
1259 return "ComputeUnit memory request event";
1260}
1261
1262void
1263ComputeUnit::DataPort::MemReqEvent::process()
1264{
1265 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState);
1266 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
1267 ComputeUnit *compute_unit M5_VAR_USED = dataPort->computeUnit;
1268
1269 if (!(dataPort->sendTimingReq(pkt))) {
1270 dataPort->retries.push_back(std::make_pair(pkt, gpuDynInst));
1271
1272 DPRINTF(GPUPort,
1273 "CU%d: WF[%d][%d]: index %d, addr %#x data req failed!\n",
1274 compute_unit->cu_id, gpuDynInst->simdId,
1275 gpuDynInst->wfSlotId, dataPort->index,
1276 pkt->req->getPaddr());
1277 } else {
1278 DPRINTF(GPUPort,
1279 "CU%d: WF[%d][%d]: index %d, addr %#x data req sent!\n",
1280 compute_unit->cu_id, gpuDynInst->simdId,
1281 gpuDynInst->wfSlotId, dataPort->index,
1282 pkt->req->getPaddr());
1283 }
1284}
1285
1286/*
1287 * The initial translation request could have been rejected,
1288 * if <retries> queue is not Retry sending the translation
1289 * request. sendRetry() is called from the peer port whenever
1290 * a translation completes.
1291 */
1292void
1293ComputeUnit::DTLBPort::recvReqRetry()
1294{
1295 int len = retries.size();
1296
1297 DPRINTF(GPUTLB, "CU%d: DTLB recvReqRetry - %d pending requests\n",
1298 computeUnit->cu_id, len);
1299
1300 assert(len > 0);
1301 assert(isStalled());
1302 // recvReqRetry is an indication that the resource on which this
1303 // port was stalling on is freed. So, remove the stall first
1304 unstallPort();
1305
1306 for (int i = 0; i < len; ++i) {
1307 PacketPtr pkt = retries.front();
1308 Addr vaddr M5_VAR_USED = pkt->req->getVaddr();
1309 DPRINTF(GPUTLB, "CU%d: retrying D-translaton for address%#x", vaddr);
1310
1311 if (!sendTimingReq(pkt)) {
1312 // Stall port
1313 stallPort();
1314 DPRINTF(GPUTLB, ": failed again\n");
1315 break;
1316 } else {
1317 DPRINTF(GPUTLB, ": successful\n");
1318 retries.pop_front();
1319 }
1320 }
1321}
1322
1323bool
1324ComputeUnit::ITLBPort::recvTimingResp(PacketPtr pkt)
1325{
1326 Addr line M5_VAR_USED = pkt->req->getPaddr();
1327 DPRINTF(GPUTLB, "CU%d: ITLBPort received %#x->%#x\n",
1328 computeUnit->cu_id, pkt->req->getVaddr(), line);
1329
1330 assert(pkt->senderState);
1331
1332 // pop off the TLB translation state
1333 TheISA::GpuTLB::TranslationState *translation_state =
1334 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState);
1335
1336 bool success = translation_state->tlbEntry->valid;
1337 delete translation_state->tlbEntry;
1338 assert(!translation_state->ports.size());
1339 pkt->senderState = translation_state->saved;
1340 delete translation_state;
1341
1342 // use the original sender state to know how to close this transaction
1343 ITLBPort::SenderState *sender_state =
1344 safe_cast<ITLBPort::SenderState*>(pkt->senderState);
1345
1346 // get the wavefront associated with this translation request
1347 Wavefront *wavefront = sender_state->wavefront;
1348 delete pkt->senderState;
1349
1350 if (success) {
1351 // pkt is reused in fetch(), don't delete it here. However, we must
1352 // reset the command to be a request so that it can be sent through
1353 // the cu's master port
1354 assert(pkt->cmd == MemCmd::ReadResp);
1355 pkt->cmd = MemCmd::ReadReq;
1356
1357 computeUnit->fetchStage.fetch(pkt, wavefront);
1358 } else {
1359 if (wavefront->dropFetch) {
1360 assert(wavefront->instructionBuffer.empty());
1361 wavefront->dropFetch = false;
1362 }
1363
1364 wavefront->pendingFetch = 0;
1365 }
1366
1367 return true;
1368}
1369
1370/*
1371 * The initial translation request could have been rejected, if
1372 * <retries> queue is not empty. Retry sending the translation
1373 * request. sendRetry() is called from the peer port whenever
1374 * a translation completes.
1375 */
1376void
1377ComputeUnit::ITLBPort::recvReqRetry()
1378{
1379
1380 int len = retries.size();
1381 DPRINTF(GPUTLB, "CU%d: ITLB recvReqRetry - %d pending requests\n", len);
1382
1383 assert(len > 0);
1384 assert(isStalled());
1385
1386 // recvReqRetry is an indication that the resource on which this
1387 // port was stalling on is freed. So, remove the stall first
1388 unstallPort();
1389
1390 for (int i = 0; i < len; ++i) {
1391 PacketPtr pkt = retries.front();
1392 Addr vaddr M5_VAR_USED = pkt->req->getVaddr();
1393 DPRINTF(GPUTLB, "CU%d: retrying I-translaton for address%#x", vaddr);
1394
1395 if (!sendTimingReq(pkt)) {
1396 stallPort(); // Stall port
1397 DPRINTF(GPUTLB, ": failed again\n");
1398 break;
1399 } else {
1400 DPRINTF(GPUTLB, ": successful\n");
1401 retries.pop_front();
1402 }
1403 }
1404}
1405
1406void
1407ComputeUnit::regStats()
1408{
1409 MemObject::regStats();
1410
1411 vALUInsts
1412 .name(name() + ".valu_insts")
1413 .desc("Number of vector ALU insts issued.")
1414 ;
1415 vALUInstsPerWF
1416 .name(name() + ".valu_insts_per_wf")
1417 .desc("The avg. number of vector ALU insts issued per-wavefront.")
1418 ;
1419 sALUInsts
1420 .name(name() + ".salu_insts")
1421 .desc("Number of scalar ALU insts issued.")
1422 ;
1423 sALUInstsPerWF
1424 .name(name() + ".salu_insts_per_wf")
1425 .desc("The avg. number of scalar ALU insts issued per-wavefront.")
1426 ;
1427 instCyclesVALU
1428 .name(name() + ".inst_cycles_valu")
1429 .desc("Number of cycles needed to execute VALU insts.")
1430 ;
1431 instCyclesSALU
1432 .name(name() + ".inst_cycles_salu")
1433 .desc("Number of cycles needed to execute SALU insts.")
1434 ;
1435 threadCyclesVALU
1436 .name(name() + ".thread_cycles_valu")
1437 .desc("Number of thread cycles used to execute vector ALU ops. "
1438 "Similar to instCyclesVALU but multiplied by the number of "
1439 "active threads.")
1440 ;
1441 vALUUtilization
1442 .name(name() + ".valu_utilization")
1443 .desc("Percentage of active vector ALU threads in a wave.")
1444 ;
1445 ldsNoFlatInsts
1446 .name(name() + ".lds_no_flat_insts")
1447 .desc("Number of LDS insts issued, not including FLAT "
1448 "accesses that resolve to LDS.")
1449 ;
1450 ldsNoFlatInstsPerWF
1451 .name(name() + ".lds_no_flat_insts_per_wf")
1452 .desc("The avg. number of LDS insts (not including FLAT "
1453 "accesses that resolve to LDS) per-wavefront.")
1454 ;
1455 flatVMemInsts
1456 .name(name() + ".flat_vmem_insts")
1457 .desc("The number of FLAT insts that resolve to vmem issued.")
1458 ;
1459 flatVMemInstsPerWF
1460 .name(name() + ".flat_vmem_insts_per_wf")
1461 .desc("The average number of FLAT insts that resolve to vmem "
1462 "issued per-wavefront.")
1463 ;
1464 flatLDSInsts
1465 .name(name() + ".flat_lds_insts")
1466 .desc("The number of FLAT insts that resolve to LDS issued.")
1467 ;
1468 flatLDSInstsPerWF
1469 .name(name() + ".flat_lds_insts_per_wf")
1470 .desc("The average number of FLAT insts that resolve to LDS "
1471 "issued per-wavefront.")
1472 ;
1473 vectorMemWrites
1474 .name(name() + ".vector_mem_writes")
1475 .desc("Number of vector mem write insts (excluding FLAT insts).")
1476 ;
1477 vectorMemWritesPerWF
1478 .name(name() + ".vector_mem_writes_per_wf")
1479 .desc("The average number of vector mem write insts "
1480 "(excluding FLAT insts) per-wavefront.")
1481 ;
1482 vectorMemReads
1483 .name(name() + ".vector_mem_reads")
1484 .desc("Number of vector mem read insts (excluding FLAT insts).")
1485 ;
1486 vectorMemReadsPerWF
1487 .name(name() + ".vector_mem_reads_per_wf")
1488 .desc("The avg. number of vector mem read insts (excluding "
1489 "FLAT insts) per-wavefront.")
1490 ;
1491 scalarMemWrites
1492 .name(name() + ".scalar_mem_writes")
1493 .desc("Number of scalar mem write insts.")
1494 ;
1495 scalarMemWritesPerWF
1496 .name(name() + ".scalar_mem_writes_per_wf")
1497 .desc("The average number of scalar mem write insts per-wavefront.")
1498 ;
1499 scalarMemReads
1500 .name(name() + ".scalar_mem_reads")
1501 .desc("Number of scalar mem read insts.")
1502 ;
1503 scalarMemReadsPerWF
1504 .name(name() + ".scalar_mem_reads_per_wf")
1505 .desc("The average number of scalar mem read insts per-wavefront.")
1506 ;
1507
1508 vALUInstsPerWF = vALUInsts / completedWfs;
1509 sALUInstsPerWF = sALUInsts / completedWfs;
1510 vALUUtilization = (threadCyclesVALU / (64 * instCyclesVALU)) * 100;
1511 ldsNoFlatInstsPerWF = ldsNoFlatInsts / completedWfs;
1512 flatVMemInstsPerWF = flatVMemInsts / completedWfs;
1513 flatLDSInstsPerWF = flatLDSInsts / completedWfs;
1514 vectorMemWritesPerWF = vectorMemWrites / completedWfs;
1515 vectorMemReadsPerWF = vectorMemReads / completedWfs;
1516 scalarMemWritesPerWF = scalarMemWrites / completedWfs;
1517 scalarMemReadsPerWF = scalarMemReads / completedWfs;
1518
1411 tlbCycles
1412 .name(name() + ".tlb_cycles")
1413 .desc("total number of cycles for all uncoalesced requests")
1414 ;
1415
1416 tlbRequests
1417 .name(name() + ".tlb_requests")
1418 .desc("number of uncoalesced requests")
1419 ;
1420
1421 tlbLatency
1422 .name(name() + ".avg_translation_latency")
1423 .desc("Avg. translation latency for data translations")
1424 ;
1425
1426 tlbLatency = tlbCycles / tlbRequests;
1427
1428 hitsPerTLBLevel
1429 .init(4)
1430 .name(name() + ".TLB_hits_distribution")
1431 .desc("TLB hits distribution (0 for page table, x for Lx-TLB")
1432 ;
1433
1434 // fixed number of TLB levels
1435 for (int i = 0; i < 4; ++i) {
1436 if (!i)
1437 hitsPerTLBLevel.subname(i,"page_table");
1438 else
1439 hitsPerTLBLevel.subname(i, csprintf("L%d_TLB",i));
1440 }
1441
1442 execRateDist
1443 .init(0, 10, 2)
1444 .name(name() + ".inst_exec_rate")
1445 .desc("Instruction Execution Rate: Number of executed vector "
1446 "instructions per cycle")
1447 ;
1448
1449 ldsBankConflictDist
1450 .init(0, wfSize(), 2)
1451 .name(name() + ".lds_bank_conflicts")
1452 .desc("Number of bank conflicts per LDS memory packet")
1453 ;
1454
1455 ldsBankAccesses
1456 .name(name() + ".lds_bank_access_cnt")
1457 .desc("Total number of LDS bank accesses")
1458 ;
1459
1460 pageDivergenceDist
1461 // A wavefront can touch up to N pages per memory instruction where
1462 // N is equal to the wavefront size
1463 // The number of pages per bin can be configured (here it's 4).
1464 .init(1, wfSize(), 4)
1465 .name(name() + ".page_divergence_dist")
1466 .desc("pages touched per wf (over all mem. instr.)")
1467 ;
1468
1469 controlFlowDivergenceDist
1470 .init(1, wfSize(), 4)
1471 .name(name() + ".warp_execution_dist")
1472 .desc("number of lanes active per instruction (oval all instructions)")
1473 ;
1474
1475 activeLanesPerGMemInstrDist
1476 .init(1, wfSize(), 4)
1477 .name(name() + ".gmem_lanes_execution_dist")
1478 .desc("number of active lanes per global memory instruction")
1479 ;
1480
1481 activeLanesPerLMemInstrDist
1482 .init(1, wfSize(), 4)
1483 .name(name() + ".lmem_lanes_execution_dist")
1484 .desc("number of active lanes per local memory instruction")
1485 ;
1486
1487 numInstrExecuted
1488 .name(name() + ".num_instr_executed")
1489 .desc("number of instructions executed")
1490 ;
1491
1492 numVecOpsExecuted
1493 .name(name() + ".num_vec_ops_executed")
1494 .desc("number of vec ops executed (e.g. WF size/inst)")
1495 ;
1496
1497 totalCycles
1498 .name(name() + ".num_total_cycles")
1499 .desc("number of cycles the CU ran for")
1500 ;
1501
1502 ipc
1503 .name(name() + ".ipc")
1504 .desc("Instructions per cycle (this CU only)")
1505 ;
1506
1507 vpc
1508 .name(name() + ".vpc")
1509 .desc("Vector Operations per cycle (this CU only)")
1510 ;
1511
1512 numALUInstsExecuted
1513 .name(name() + ".num_alu_insts_executed")
1514 .desc("Number of dynamic non-GM memory insts executed")
1515 ;
1516
1517 wgBlockedDueLdsAllocation
1518 .name(name() + ".wg_blocked_due_lds_alloc")
1519 .desc("Workgroup blocked due to LDS capacity")
1520 ;
1521
1522 ipc = numInstrExecuted / totalCycles;
1523 vpc = numVecOpsExecuted / totalCycles;
1524
1525 numTimesWgBlockedDueVgprAlloc
1526 .name(name() + ".times_wg_blocked_due_vgpr_alloc")
1527 .desc("Number of times WGs are blocked due to VGPR allocation per SIMD")
1528 ;
1529
1530 dynamicGMemInstrCnt
1531 .name(name() + ".global_mem_instr_cnt")
1532 .desc("dynamic global memory instructions count")
1533 ;
1534
1535 dynamicLMemInstrCnt
1536 .name(name() + ".local_mem_instr_cnt")
1537 .desc("dynamic local memory intruction count")
1538 ;
1539
1540 numALUInstsExecuted = numInstrExecuted - dynamicGMemInstrCnt -
1541 dynamicLMemInstrCnt;
1542
1543 completedWfs
1544 .name(name() + ".num_completed_wfs")
1545 .desc("number of completed wavefronts")
1546 ;
1547
1548 numCASOps
1549 .name(name() + ".num_CAS_ops")
1550 .desc("number of compare and swap operations")
1551 ;
1552
1553 numFailedCASOps
1554 .name(name() + ".num_failed_CAS_ops")
1555 .desc("number of compare and swap operations that failed")
1556 ;
1557
1558 // register stats of pipeline stages
1559 fetchStage.regStats();
1560 scoreboardCheckStage.regStats();
1561 scheduleStage.regStats();
1562 execStage.regStats();
1563
1564 // register stats of memory pipeline
1565 globalMemoryPipe.regStats();
1566 localMemoryPipe.regStats();
1567}
1568
1569void
1519 tlbCycles
1520 .name(name() + ".tlb_cycles")
1521 .desc("total number of cycles for all uncoalesced requests")
1522 ;
1523
1524 tlbRequests
1525 .name(name() + ".tlb_requests")
1526 .desc("number of uncoalesced requests")
1527 ;
1528
1529 tlbLatency
1530 .name(name() + ".avg_translation_latency")
1531 .desc("Avg. translation latency for data translations")
1532 ;
1533
1534 tlbLatency = tlbCycles / tlbRequests;
1535
1536 hitsPerTLBLevel
1537 .init(4)
1538 .name(name() + ".TLB_hits_distribution")
1539 .desc("TLB hits distribution (0 for page table, x for Lx-TLB")
1540 ;
1541
1542 // fixed number of TLB levels
1543 for (int i = 0; i < 4; ++i) {
1544 if (!i)
1545 hitsPerTLBLevel.subname(i,"page_table");
1546 else
1547 hitsPerTLBLevel.subname(i, csprintf("L%d_TLB",i));
1548 }
1549
1550 execRateDist
1551 .init(0, 10, 2)
1552 .name(name() + ".inst_exec_rate")
1553 .desc("Instruction Execution Rate: Number of executed vector "
1554 "instructions per cycle")
1555 ;
1556
1557 ldsBankConflictDist
1558 .init(0, wfSize(), 2)
1559 .name(name() + ".lds_bank_conflicts")
1560 .desc("Number of bank conflicts per LDS memory packet")
1561 ;
1562
1563 ldsBankAccesses
1564 .name(name() + ".lds_bank_access_cnt")
1565 .desc("Total number of LDS bank accesses")
1566 ;
1567
1568 pageDivergenceDist
1569 // A wavefront can touch up to N pages per memory instruction where
1570 // N is equal to the wavefront size
1571 // The number of pages per bin can be configured (here it's 4).
1572 .init(1, wfSize(), 4)
1573 .name(name() + ".page_divergence_dist")
1574 .desc("pages touched per wf (over all mem. instr.)")
1575 ;
1576
1577 controlFlowDivergenceDist
1578 .init(1, wfSize(), 4)
1579 .name(name() + ".warp_execution_dist")
1580 .desc("number of lanes active per instruction (oval all instructions)")
1581 ;
1582
1583 activeLanesPerGMemInstrDist
1584 .init(1, wfSize(), 4)
1585 .name(name() + ".gmem_lanes_execution_dist")
1586 .desc("number of active lanes per global memory instruction")
1587 ;
1588
1589 activeLanesPerLMemInstrDist
1590 .init(1, wfSize(), 4)
1591 .name(name() + ".lmem_lanes_execution_dist")
1592 .desc("number of active lanes per local memory instruction")
1593 ;
1594
1595 numInstrExecuted
1596 .name(name() + ".num_instr_executed")
1597 .desc("number of instructions executed")
1598 ;
1599
1600 numVecOpsExecuted
1601 .name(name() + ".num_vec_ops_executed")
1602 .desc("number of vec ops executed (e.g. WF size/inst)")
1603 ;
1604
1605 totalCycles
1606 .name(name() + ".num_total_cycles")
1607 .desc("number of cycles the CU ran for")
1608 ;
1609
1610 ipc
1611 .name(name() + ".ipc")
1612 .desc("Instructions per cycle (this CU only)")
1613 ;
1614
1615 vpc
1616 .name(name() + ".vpc")
1617 .desc("Vector Operations per cycle (this CU only)")
1618 ;
1619
1620 numALUInstsExecuted
1621 .name(name() + ".num_alu_insts_executed")
1622 .desc("Number of dynamic non-GM memory insts executed")
1623 ;
1624
1625 wgBlockedDueLdsAllocation
1626 .name(name() + ".wg_blocked_due_lds_alloc")
1627 .desc("Workgroup blocked due to LDS capacity")
1628 ;
1629
1630 ipc = numInstrExecuted / totalCycles;
1631 vpc = numVecOpsExecuted / totalCycles;
1632
1633 numTimesWgBlockedDueVgprAlloc
1634 .name(name() + ".times_wg_blocked_due_vgpr_alloc")
1635 .desc("Number of times WGs are blocked due to VGPR allocation per SIMD")
1636 ;
1637
1638 dynamicGMemInstrCnt
1639 .name(name() + ".global_mem_instr_cnt")
1640 .desc("dynamic global memory instructions count")
1641 ;
1642
1643 dynamicLMemInstrCnt
1644 .name(name() + ".local_mem_instr_cnt")
1645 .desc("dynamic local memory intruction count")
1646 ;
1647
1648 numALUInstsExecuted = numInstrExecuted - dynamicGMemInstrCnt -
1649 dynamicLMemInstrCnt;
1650
1651 completedWfs
1652 .name(name() + ".num_completed_wfs")
1653 .desc("number of completed wavefronts")
1654 ;
1655
1656 numCASOps
1657 .name(name() + ".num_CAS_ops")
1658 .desc("number of compare and swap operations")
1659 ;
1660
1661 numFailedCASOps
1662 .name(name() + ".num_failed_CAS_ops")
1663 .desc("number of compare and swap operations that failed")
1664 ;
1665
1666 // register stats of pipeline stages
1667 fetchStage.regStats();
1668 scoreboardCheckStage.regStats();
1669 scheduleStage.regStats();
1670 execStage.regStats();
1671
1672 // register stats of memory pipeline
1673 globalMemoryPipe.regStats();
1674 localMemoryPipe.regStats();
1675}
1676
1677void
1678ComputeUnit::updateInstStats(GPUDynInstPtr gpuDynInst)
1679{
1680 if (gpuDynInst->isScalar()) {
1681 if (gpuDynInst->isALU() && !gpuDynInst->isWaitcnt()) {
1682 sALUInsts++;
1683 instCyclesSALU++;
1684 } else if (gpuDynInst->isLoad()) {
1685 scalarMemReads++;
1686 } else if (gpuDynInst->isStore()) {
1687 scalarMemWrites++;
1688 }
1689 } else {
1690 if (gpuDynInst->isALU()) {
1691 vALUInsts++;
1692 instCyclesVALU++;
1693 threadCyclesVALU += gpuDynInst->wavefront()->execMask().count();
1694 } else if (gpuDynInst->isFlat()) {
1695 if (gpuDynInst->isLocalMem()) {
1696 flatLDSInsts++;
1697 } else {
1698 flatVMemInsts++;
1699 }
1700 } else if (gpuDynInst->isLocalMem()) {
1701 ldsNoFlatInsts++;
1702 } else if (gpuDynInst->isLoad()) {
1703 vectorMemReads++;
1704 } else if (gpuDynInst->isStore()) {
1705 vectorMemWrites++;
1706 }
1707 }
1708}
1709
1710void
1570ComputeUnit::updatePageDivergenceDist(Addr addr)
1571{
1572 Addr virt_page_addr = roundDown(addr, TheISA::PageBytes);
1573
1574 if (!pagesTouched.count(virt_page_addr))
1575 pagesTouched[virt_page_addr] = 1;
1576 else
1577 pagesTouched[virt_page_addr]++;
1578}
1579
1580void
1581ComputeUnit::CUExitCallback::process()
1582{
1583 if (computeUnit->countPages) {
1584 std::ostream *page_stat_file =
1585 simout.create(computeUnit->name().c_str())->stream();
1586
1587 *page_stat_file << "page, wavefront accesses, workitem accesses" <<
1588 std::endl;
1589
1590 for (auto iter : computeUnit->pageAccesses) {
1591 *page_stat_file << std::hex << iter.first << ",";
1592 *page_stat_file << std::dec << iter.second.first << ",";
1593 *page_stat_file << std::dec << iter.second.second << std::endl;
1594 }
1595 }
1596 }
1597
1598bool
1599ComputeUnit::isDone() const
1600{
1601 for (int i = 0; i < numSIMDs; ++i) {
1602 if (!isSimdDone(i)) {
1603 return false;
1604 }
1605 }
1606
1607 bool glbMemBusRdy = true;
1608 for (int j = 0; j < numGlbMemUnits; ++j) {
1609 glbMemBusRdy &= vrfToGlobalMemPipeBus[j].rdy();
1610 }
1611 bool locMemBusRdy = true;
1612 for (int j = 0; j < numLocMemUnits; ++j) {
1613 locMemBusRdy &= vrfToLocalMemPipeBus[j].rdy();
1614 }
1615
1616 if (!globalMemoryPipe.isGMLdRespFIFOWrRdy() ||
1617 !globalMemoryPipe.isGMStRespFIFOWrRdy() ||
1618 !globalMemoryPipe.isGMReqFIFOWrRdy() || !localMemoryPipe.isLMReqFIFOWrRdy()
1619 || !localMemoryPipe.isLMRespFIFOWrRdy() || !locMemToVrfBus.rdy() ||
1620 !glbMemToVrfBus.rdy() || !locMemBusRdy || !glbMemBusRdy) {
1621 return false;
1622 }
1623
1624 return true;
1625}
1626
1627int32_t
1628ComputeUnit::getRefCounter(const uint32_t dispatchId, const uint32_t wgId) const
1629{
1630 return lds.getRefCounter(dispatchId, wgId);
1631}
1632
1633bool
1634ComputeUnit::isSimdDone(uint32_t simdId) const
1635{
1636 assert(simdId < numSIMDs);
1637
1638 for (int i=0; i < numGlbMemUnits; ++i) {
1639 if (!vrfToGlobalMemPipeBus[i].rdy())
1640 return false;
1641 }
1642 for (int i=0; i < numLocMemUnits; ++i) {
1643 if (!vrfToLocalMemPipeBus[i].rdy())
1644 return false;
1645 }
1646 if (!aluPipe[simdId].rdy()) {
1647 return false;
1648 }
1649
1650 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf){
1651 if (wfList[simdId][i_wf]->status != Wavefront::S_STOPPED) {
1652 return false;
1653 }
1654 }
1655
1656 return true;
1657}
1658
1659/**
1660 * send a general request to the LDS
1661 * make sure to look at the return value here as your request might be
1662 * NACK'd and returning false means that you have to have some backup plan
1663 */
1664bool
1665ComputeUnit::sendToLds(GPUDynInstPtr gpuDynInst)
1666{
1667 // this is just a request to carry the GPUDynInstPtr
1668 // back and forth
1669 Request *newRequest = new Request();
1670 newRequest->setPaddr(0x0);
1671
1672 // ReadReq is not evaluted by the LDS but the Packet ctor requires this
1673 PacketPtr newPacket = new Packet(newRequest, MemCmd::ReadReq);
1674
1675 // This is the SenderState needed upon return
1676 newPacket->senderState = new LDSPort::SenderState(gpuDynInst);
1677
1678 return ldsPort->sendTimingReq(newPacket);
1679}
1680
1681/**
1682 * get the result of packets sent to the LDS when they return
1683 */
1684bool
1685ComputeUnit::LDSPort::recvTimingResp(PacketPtr packet)
1686{
1687 const ComputeUnit::LDSPort::SenderState *senderState =
1688 dynamic_cast<ComputeUnit::LDSPort::SenderState *>(packet->senderState);
1689
1690 fatal_if(!senderState, "did not get the right sort of sender state");
1691
1692 GPUDynInstPtr gpuDynInst = senderState->getMemInst();
1693
1694 delete packet->senderState;
1695 delete packet->req;
1696 delete packet;
1697
1698 computeUnit->localMemoryPipe.getLMRespFIFO().push(gpuDynInst);
1699 return true;
1700}
1701
1702/**
1703 * attempt to send this packet, either the port is already stalled, the request
1704 * is nack'd and must stall or the request goes through
1705 * when a request cannot be sent, add it to the retries queue
1706 */
1707bool
1708ComputeUnit::LDSPort::sendTimingReq(PacketPtr pkt)
1709{
1710 ComputeUnit::LDSPort::SenderState *sender_state =
1711 dynamic_cast<ComputeUnit::LDSPort::SenderState*>(pkt->senderState);
1712 fatal_if(!sender_state, "packet without a valid sender state");
1713
1714 GPUDynInstPtr gpuDynInst M5_VAR_USED = sender_state->getMemInst();
1715
1716 if (isStalled()) {
1717 fatal_if(retries.empty(), "must have retries waiting to be stalled");
1718
1719 retries.push(pkt);
1720
1721 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: LDS send failed!\n",
1722 computeUnit->cu_id, gpuDynInst->simdId,
1723 gpuDynInst->wfSlotId);
1724 return false;
1725 } else if (!MasterPort::sendTimingReq(pkt)) {
1726 // need to stall the LDS port until a recvReqRetry() is received
1727 // this indicates that there is more space
1728 stallPort();
1729 retries.push(pkt);
1730
1731 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req failed!\n",
1732 computeUnit->cu_id, gpuDynInst->simdId,
1733 gpuDynInst->wfSlotId, pkt->req->getPaddr());
1734 return false;
1735 } else {
1736 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req sent!\n",
1737 computeUnit->cu_id, gpuDynInst->simdId,
1738 gpuDynInst->wfSlotId, pkt->req->getPaddr());
1739 return true;
1740 }
1741}
1742
1743/**
1744 * the bus is telling the port that there is now space so retrying stalled
1745 * requests should work now
1746 * this allows the port to have a request be nack'd and then have the receiver
1747 * say when there is space, rather than simply retrying the send every cycle
1748 */
1749void
1750ComputeUnit::LDSPort::recvReqRetry()
1751{
1752 auto queueSize = retries.size();
1753
1754 DPRINTF(GPUPort, "CU%d: LDSPort recvReqRetry - %d pending requests\n",
1755 computeUnit->cu_id, queueSize);
1756
1757 fatal_if(queueSize < 1,
1758 "why was there a recvReqRetry() with no pending reqs?");
1759 fatal_if(!isStalled(),
1760 "recvReqRetry() happened when the port was not stalled");
1761
1762 unstallPort();
1763
1764 while (!retries.empty()) {
1765 PacketPtr packet = retries.front();
1766
1767 DPRINTF(GPUPort, "CU%d: retrying LDS send\n", computeUnit->cu_id);
1768
1769 if (!MasterPort::sendTimingReq(packet)) {
1770 // Stall port
1771 stallPort();
1772 DPRINTF(GPUPort, ": LDS send failed again\n");
1773 break;
1774 } else {
1775 DPRINTF(GPUTLB, ": LDS send successful\n");
1776 retries.pop();
1777 }
1778 }
1779}
1711ComputeUnit::updatePageDivergenceDist(Addr addr)
1712{
1713 Addr virt_page_addr = roundDown(addr, TheISA::PageBytes);
1714
1715 if (!pagesTouched.count(virt_page_addr))
1716 pagesTouched[virt_page_addr] = 1;
1717 else
1718 pagesTouched[virt_page_addr]++;
1719}
1720
1721void
1722ComputeUnit::CUExitCallback::process()
1723{
1724 if (computeUnit->countPages) {
1725 std::ostream *page_stat_file =
1726 simout.create(computeUnit->name().c_str())->stream();
1727
1728 *page_stat_file << "page, wavefront accesses, workitem accesses" <<
1729 std::endl;
1730
1731 for (auto iter : computeUnit->pageAccesses) {
1732 *page_stat_file << std::hex << iter.first << ",";
1733 *page_stat_file << std::dec << iter.second.first << ",";
1734 *page_stat_file << std::dec << iter.second.second << std::endl;
1735 }
1736 }
1737 }
1738
1739bool
1740ComputeUnit::isDone() const
1741{
1742 for (int i = 0; i < numSIMDs; ++i) {
1743 if (!isSimdDone(i)) {
1744 return false;
1745 }
1746 }
1747
1748 bool glbMemBusRdy = true;
1749 for (int j = 0; j < numGlbMemUnits; ++j) {
1750 glbMemBusRdy &= vrfToGlobalMemPipeBus[j].rdy();
1751 }
1752 bool locMemBusRdy = true;
1753 for (int j = 0; j < numLocMemUnits; ++j) {
1754 locMemBusRdy &= vrfToLocalMemPipeBus[j].rdy();
1755 }
1756
1757 if (!globalMemoryPipe.isGMLdRespFIFOWrRdy() ||
1758 !globalMemoryPipe.isGMStRespFIFOWrRdy() ||
1759 !globalMemoryPipe.isGMReqFIFOWrRdy() || !localMemoryPipe.isLMReqFIFOWrRdy()
1760 || !localMemoryPipe.isLMRespFIFOWrRdy() || !locMemToVrfBus.rdy() ||
1761 !glbMemToVrfBus.rdy() || !locMemBusRdy || !glbMemBusRdy) {
1762 return false;
1763 }
1764
1765 return true;
1766}
1767
1768int32_t
1769ComputeUnit::getRefCounter(const uint32_t dispatchId, const uint32_t wgId) const
1770{
1771 return lds.getRefCounter(dispatchId, wgId);
1772}
1773
1774bool
1775ComputeUnit::isSimdDone(uint32_t simdId) const
1776{
1777 assert(simdId < numSIMDs);
1778
1779 for (int i=0; i < numGlbMemUnits; ++i) {
1780 if (!vrfToGlobalMemPipeBus[i].rdy())
1781 return false;
1782 }
1783 for (int i=0; i < numLocMemUnits; ++i) {
1784 if (!vrfToLocalMemPipeBus[i].rdy())
1785 return false;
1786 }
1787 if (!aluPipe[simdId].rdy()) {
1788 return false;
1789 }
1790
1791 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf){
1792 if (wfList[simdId][i_wf]->status != Wavefront::S_STOPPED) {
1793 return false;
1794 }
1795 }
1796
1797 return true;
1798}
1799
1800/**
1801 * send a general request to the LDS
1802 * make sure to look at the return value here as your request might be
1803 * NACK'd and returning false means that you have to have some backup plan
1804 */
1805bool
1806ComputeUnit::sendToLds(GPUDynInstPtr gpuDynInst)
1807{
1808 // this is just a request to carry the GPUDynInstPtr
1809 // back and forth
1810 Request *newRequest = new Request();
1811 newRequest->setPaddr(0x0);
1812
1813 // ReadReq is not evaluted by the LDS but the Packet ctor requires this
1814 PacketPtr newPacket = new Packet(newRequest, MemCmd::ReadReq);
1815
1816 // This is the SenderState needed upon return
1817 newPacket->senderState = new LDSPort::SenderState(gpuDynInst);
1818
1819 return ldsPort->sendTimingReq(newPacket);
1820}
1821
1822/**
1823 * get the result of packets sent to the LDS when they return
1824 */
1825bool
1826ComputeUnit::LDSPort::recvTimingResp(PacketPtr packet)
1827{
1828 const ComputeUnit::LDSPort::SenderState *senderState =
1829 dynamic_cast<ComputeUnit::LDSPort::SenderState *>(packet->senderState);
1830
1831 fatal_if(!senderState, "did not get the right sort of sender state");
1832
1833 GPUDynInstPtr gpuDynInst = senderState->getMemInst();
1834
1835 delete packet->senderState;
1836 delete packet->req;
1837 delete packet;
1838
1839 computeUnit->localMemoryPipe.getLMRespFIFO().push(gpuDynInst);
1840 return true;
1841}
1842
1843/**
1844 * attempt to send this packet, either the port is already stalled, the request
1845 * is nack'd and must stall or the request goes through
1846 * when a request cannot be sent, add it to the retries queue
1847 */
1848bool
1849ComputeUnit::LDSPort::sendTimingReq(PacketPtr pkt)
1850{
1851 ComputeUnit::LDSPort::SenderState *sender_state =
1852 dynamic_cast<ComputeUnit::LDSPort::SenderState*>(pkt->senderState);
1853 fatal_if(!sender_state, "packet without a valid sender state");
1854
1855 GPUDynInstPtr gpuDynInst M5_VAR_USED = sender_state->getMemInst();
1856
1857 if (isStalled()) {
1858 fatal_if(retries.empty(), "must have retries waiting to be stalled");
1859
1860 retries.push(pkt);
1861
1862 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: LDS send failed!\n",
1863 computeUnit->cu_id, gpuDynInst->simdId,
1864 gpuDynInst->wfSlotId);
1865 return false;
1866 } else if (!MasterPort::sendTimingReq(pkt)) {
1867 // need to stall the LDS port until a recvReqRetry() is received
1868 // this indicates that there is more space
1869 stallPort();
1870 retries.push(pkt);
1871
1872 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req failed!\n",
1873 computeUnit->cu_id, gpuDynInst->simdId,
1874 gpuDynInst->wfSlotId, pkt->req->getPaddr());
1875 return false;
1876 } else {
1877 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req sent!\n",
1878 computeUnit->cu_id, gpuDynInst->simdId,
1879 gpuDynInst->wfSlotId, pkt->req->getPaddr());
1880 return true;
1881 }
1882}
1883
1884/**
1885 * the bus is telling the port that there is now space so retrying stalled
1886 * requests should work now
1887 * this allows the port to have a request be nack'd and then have the receiver
1888 * say when there is space, rather than simply retrying the send every cycle
1889 */
1890void
1891ComputeUnit::LDSPort::recvReqRetry()
1892{
1893 auto queueSize = retries.size();
1894
1895 DPRINTF(GPUPort, "CU%d: LDSPort recvReqRetry - %d pending requests\n",
1896 computeUnit->cu_id, queueSize);
1897
1898 fatal_if(queueSize < 1,
1899 "why was there a recvReqRetry() with no pending reqs?");
1900 fatal_if(!isStalled(),
1901 "recvReqRetry() happened when the port was not stalled");
1902
1903 unstallPort();
1904
1905 while (!retries.empty()) {
1906 PacketPtr packet = retries.front();
1907
1908 DPRINTF(GPUPort, "CU%d: retrying LDS send\n", computeUnit->cu_id);
1909
1910 if (!MasterPort::sendTimingReq(packet)) {
1911 // Stall port
1912 stallPort();
1913 DPRINTF(GPUPort, ": LDS send failed again\n");
1914 break;
1915 } else {
1916 DPRINTF(GPUTLB, ": LDS send successful\n");
1917 retries.pop();
1918 }
1919 }
1920}