compute_unit.cc revision 12749
1/* 2 * Copyright (c) 2011-2015 Advanced Micro Devices, Inc. 3 * All rights reserved. 4 * 5 * For use for simulation and test purposes only 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived from this 19 * software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Authors: John Kalamatianos, 34 * Anthony Gutierrez 35 */ 36 37#include "gpu-compute/compute_unit.hh" 38 39#include <limits> 40 41#include "base/output.hh" 42#include "debug/GPUDisp.hh" 43#include "debug/GPUExec.hh" 44#include "debug/GPUFetch.hh" 45#include "debug/GPUMem.hh" 46#include "debug/GPUPort.hh" 47#include "debug/GPUPrefetch.hh" 48#include "debug/GPUSync.hh" 49#include "debug/GPUTLB.hh" 50#include "gpu-compute/dispatcher.hh" 51#include "gpu-compute/gpu_dyn_inst.hh" 52#include "gpu-compute/gpu_static_inst.hh" 53#include "gpu-compute/ndrange.hh" 54#include "gpu-compute/shader.hh" 55#include "gpu-compute/simple_pool_manager.hh" 56#include "gpu-compute/vector_register_file.hh" 57#include "gpu-compute/wavefront.hh" 58#include "mem/page_table.hh" 59#include "sim/process.hh" 60 61ComputeUnit::ComputeUnit(const Params *p) : MemObject(p), fetchStage(p), 62 scoreboardCheckStage(p), scheduleStage(p), execStage(p), 63 globalMemoryPipe(p), localMemoryPipe(p), rrNextMemID(0), rrNextALUWp(0), 64 cu_id(p->cu_id), vrf(p->vector_register_file), numSIMDs(p->num_SIMDs), 65 spBypassPipeLength(p->spbypass_pipe_length), 66 dpBypassPipeLength(p->dpbypass_pipe_length), 67 issuePeriod(p->issue_period), 68 numGlbMemUnits(p->num_global_mem_pipes), 69 numLocMemUnits(p->num_shared_mem_pipes), 70 perLaneTLB(p->perLaneTLB), prefetchDepth(p->prefetch_depth), 71 prefetchStride(p->prefetch_stride), prefetchType(p->prefetch_prev_type), 72 xact_cas_mode(p->xactCasMode), debugSegFault(p->debugSegFault), 73 functionalTLB(p->functionalTLB), localMemBarrier(p->localMemBarrier), 74 countPages(p->countPages), barrier_id(0), 75 vrfToCoalescerBusWidth(p->vrf_to_coalescer_bus_width), 76 coalescerToVrfBusWidth(p->coalescer_to_vrf_bus_width), 77 req_tick_latency(p->mem_req_latency * p->clk_domain->clockPeriod()), 78 resp_tick_latency(p->mem_resp_latency * p->clk_domain->clockPeriod()), 79 _masterId(p->system->getMasterId(this, "ComputeUnit")), 80 lds(*p->localDataStore), _cacheLineSize(p->system->cacheLineSize()), 81 globalSeqNum(0), wavefrontSize(p->wfSize), 82 kernelLaunchInst(new KernelLaunchStaticInst()) 83{ 84 /** 85 * This check is necessary because std::bitset only provides conversion 86 * to unsigned long or unsigned long long via to_ulong() or to_ullong(). 87 * there are * a few places in the code where to_ullong() is used, however 88 * if VSZ is larger than a value the host can support then bitset will 89 * throw a runtime exception. we should remove all use of to_long() or 90 * to_ullong() so we can have VSZ greater than 64b, however until that is 91 * done this assert is required. 92 */ 93 fatal_if(p->wfSize > std::numeric_limits<unsigned long long>::digits || 94 p->wfSize <= 0, 95 "WF size is larger than the host can support"); 96 fatal_if(!isPowerOf2(wavefrontSize), 97 "Wavefront size should be a power of 2"); 98 // calculate how many cycles a vector load or store will need to transfer 99 // its data over the corresponding buses 100 numCyclesPerStoreTransfer = 101 (uint32_t)ceil((double)(wfSize() * sizeof(uint32_t)) / 102 (double)vrfToCoalescerBusWidth); 103 104 numCyclesPerLoadTransfer = (wfSize() * sizeof(uint32_t)) 105 / coalescerToVrfBusWidth; 106 107 lastVaddrWF.resize(numSIMDs); 108 wfList.resize(numSIMDs); 109 110 for (int j = 0; j < numSIMDs; ++j) { 111 lastVaddrWF[j].resize(p->n_wf); 112 113 for (int i = 0; i < p->n_wf; ++i) { 114 lastVaddrWF[j][i].resize(wfSize()); 115 116 wfList[j].push_back(p->wavefronts[j * p->n_wf + i]); 117 wfList[j][i]->setParent(this); 118 119 for (int k = 0; k < wfSize(); ++k) { 120 lastVaddrWF[j][i][k] = 0; 121 } 122 } 123 } 124 125 lastVaddrSimd.resize(numSIMDs); 126 127 for (int i = 0; i < numSIMDs; ++i) { 128 lastVaddrSimd[i].resize(wfSize(), 0); 129 } 130 131 lastVaddrCU.resize(wfSize()); 132 133 lds.setParent(this); 134 135 if (p->execPolicy == "OLDEST-FIRST") { 136 exec_policy = EXEC_POLICY::OLDEST; 137 } else if (p->execPolicy == "ROUND-ROBIN") { 138 exec_policy = EXEC_POLICY::RR; 139 } else { 140 fatal("Invalid WF execution policy (CU)\n"); 141 } 142 143 memPort.resize(wfSize()); 144 145 // resize the tlbPort vectorArray 146 int tlbPort_width = perLaneTLB ? wfSize() : 1; 147 tlbPort.resize(tlbPort_width); 148 149 cuExitCallback = new CUExitCallback(this); 150 registerExitCallback(cuExitCallback); 151 152 xactCasLoadMap.clear(); 153 lastExecCycle.resize(numSIMDs, 0); 154 155 for (int i = 0; i < vrf.size(); ++i) { 156 vrf[i]->setParent(this); 157 } 158 159 numVecRegsPerSimd = vrf[0]->numRegs(); 160} 161 162ComputeUnit::~ComputeUnit() 163{ 164 // Delete wavefront slots 165 for (int j = 0; j < numSIMDs; ++j) { 166 for (int i = 0; i < shader->n_wf; ++i) { 167 delete wfList[j][i]; 168 } 169 lastVaddrSimd[j].clear(); 170 } 171 lastVaddrCU.clear(); 172 readyList.clear(); 173 waveStatusList.clear(); 174 dispatchList.clear(); 175 vectorAluInstAvail.clear(); 176 delete cuExitCallback; 177 delete ldsPort; 178} 179 180void 181ComputeUnit::fillKernelState(Wavefront *w, NDRange *ndr) 182{ 183 w->resizeRegFiles(ndr->q.cRegCount, ndr->q.sRegCount, ndr->q.dRegCount); 184 185 w->workGroupSz[0] = ndr->q.wgSize[0]; 186 w->workGroupSz[1] = ndr->q.wgSize[1]; 187 w->workGroupSz[2] = ndr->q.wgSize[2]; 188 w->wgSz = w->workGroupSz[0] * w->workGroupSz[1] * w->workGroupSz[2]; 189 w->gridSz[0] = ndr->q.gdSize[0]; 190 w->gridSz[1] = ndr->q.gdSize[1]; 191 w->gridSz[2] = ndr->q.gdSize[2]; 192 w->kernelArgs = ndr->q.args; 193 w->privSizePerItem = ndr->q.privMemPerItem; 194 w->spillSizePerItem = ndr->q.spillMemPerItem; 195 w->roBase = ndr->q.roMemStart; 196 w->roSize = ndr->q.roMemTotal; 197 w->computeActualWgSz(ndr); 198} 199 200void 201ComputeUnit::updateEvents() { 202 203 if (!timestampVec.empty()) { 204 uint32_t vecSize = timestampVec.size(); 205 uint32_t i = 0; 206 while (i < vecSize) { 207 if (timestampVec[i] <= shader->tick_cnt) { 208 std::pair<uint32_t, uint32_t> regInfo = regIdxVec[i]; 209 vrf[regInfo.first]->markReg(regInfo.second, sizeof(uint32_t), 210 statusVec[i]); 211 timestampVec.erase(timestampVec.begin() + i); 212 regIdxVec.erase(regIdxVec.begin() + i); 213 statusVec.erase(statusVec.begin() + i); 214 --vecSize; 215 --i; 216 } 217 ++i; 218 } 219 } 220 221 for (int i = 0; i< numSIMDs; ++i) { 222 vrf[i]->updateEvents(); 223 } 224} 225 226 227void 228ComputeUnit::startWavefront(Wavefront *w, int waveId, LdsChunk *ldsChunk, 229 NDRange *ndr) 230{ 231 static int _n_wave = 0; 232 233 VectorMask init_mask; 234 init_mask.reset(); 235 236 for (int k = 0; k < wfSize(); ++k) { 237 if (k + waveId * wfSize() < w->actualWgSzTotal) 238 init_mask[k] = 1; 239 } 240 241 w->kernId = ndr->dispatchId; 242 w->wfId = waveId; 243 w->initMask = init_mask.to_ullong(); 244 245 for (int k = 0; k < wfSize(); ++k) { 246 w->workItemId[0][k] = (k + waveId * wfSize()) % w->actualWgSz[0]; 247 w->workItemId[1][k] = ((k + waveId * wfSize()) / w->actualWgSz[0]) % 248 w->actualWgSz[1]; 249 w->workItemId[2][k] = (k + waveId * wfSize()) / 250 (w->actualWgSz[0] * w->actualWgSz[1]); 251 252 w->workItemFlatId[k] = w->workItemId[2][k] * w->actualWgSz[0] * 253 w->actualWgSz[1] + w->workItemId[1][k] * w->actualWgSz[0] + 254 w->workItemId[0][k]; 255 } 256 257 w->barrierSlots = divCeil(w->actualWgSzTotal, wfSize()); 258 259 w->barCnt.resize(wfSize(), 0); 260 261 w->maxBarCnt = 0; 262 w->oldBarrierCnt = 0; 263 w->barrierCnt = 0; 264 265 w->privBase = ndr->q.privMemStart; 266 ndr->q.privMemStart += ndr->q.privMemPerItem * wfSize(); 267 268 w->spillBase = ndr->q.spillMemStart; 269 ndr->q.spillMemStart += ndr->q.spillMemPerItem * wfSize(); 270 271 w->pushToReconvergenceStack(0, UINT32_MAX, init_mask.to_ulong()); 272 273 // WG state 274 w->wgId = ndr->globalWgId; 275 w->dispatchId = ndr->dispatchId; 276 w->workGroupId[0] = w->wgId % ndr->numWg[0]; 277 w->workGroupId[1] = (w->wgId / ndr->numWg[0]) % ndr->numWg[1]; 278 w->workGroupId[2] = w->wgId / (ndr->numWg[0] * ndr->numWg[1]); 279 280 w->barrierId = barrier_id; 281 w->stalledAtBarrier = false; 282 283 // set the wavefront context to have a pointer to this section of the LDS 284 w->ldsChunk = ldsChunk; 285 286 int32_t refCount M5_VAR_USED = 287 lds.increaseRefCounter(w->dispatchId, w->wgId); 288 DPRINTF(GPUDisp, "CU%d: increase ref ctr wg[%d] to [%d]\n", 289 cu_id, w->wgId, refCount); 290 291 w->instructionBuffer.clear(); 292 293 if (w->pendingFetch) 294 w->dropFetch = true; 295 296 // is this the last wavefront in the workgroup 297 // if set the spillWidth to be the remaining work-items 298 // so that the vector access is correct 299 if ((waveId + 1) * wfSize() >= w->actualWgSzTotal) { 300 w->spillWidth = w->actualWgSzTotal - (waveId * wfSize()); 301 } else { 302 w->spillWidth = wfSize(); 303 } 304 305 DPRINTF(GPUDisp, "Scheduling wfDynId/barrier_id %d/%d on CU%d: " 306 "WF[%d][%d]\n", _n_wave, barrier_id, cu_id, w->simdId, w->wfSlotId); 307 308 w->start(++_n_wave, ndr->q.code_ptr); 309} 310 311void 312ComputeUnit::StartWorkgroup(NDRange *ndr) 313{ 314 // reserve the LDS capacity allocated to the work group 315 // disambiguated by the dispatch ID and workgroup ID, which should be 316 // globally unique 317 LdsChunk *ldsChunk = lds.reserveSpace(ndr->dispatchId, ndr->globalWgId, 318 ndr->q.ldsSize); 319 320 // Send L1 cache acquire 321 // isKernel + isAcquire = Kernel Begin 322 if (shader->impl_kern_boundary_sync) { 323 GPUDynInstPtr gpuDynInst = 324 std::make_shared<GPUDynInst>(this, nullptr, kernelLaunchInst, 325 getAndIncSeqNum()); 326 327 gpuDynInst->useContinuation = false; 328 injectGlobalMemFence(gpuDynInst, true); 329 } 330 331 // calculate the number of 32-bit vector registers required by wavefront 332 int vregDemand = ndr->q.sRegCount + (2 * ndr->q.dRegCount); 333 int wave_id = 0; 334 335 // Assign WFs by spreading them across SIMDs, 1 WF per SIMD at a time 336 for (int m = 0; m < shader->n_wf * numSIMDs; ++m) { 337 Wavefront *w = wfList[m % numSIMDs][m / numSIMDs]; 338 // Check if this wavefront slot is available: 339 // It must be stopped and not waiting 340 // for a release to complete S_RETURNING 341 if (w->status == Wavefront::S_STOPPED) { 342 fillKernelState(w, ndr); 343 // if we have scheduled all work items then stop 344 // scheduling wavefronts 345 if (wave_id * wfSize() >= w->actualWgSzTotal) 346 break; 347 348 // reserve vector registers for the scheduled wavefront 349 assert(vectorRegsReserved[m % numSIMDs] <= numVecRegsPerSimd); 350 uint32_t normSize = 0; 351 352 w->startVgprIndex = vrf[m % numSIMDs]->manager-> 353 allocateRegion(vregDemand, &normSize); 354 355 w->reservedVectorRegs = normSize; 356 vectorRegsReserved[m % numSIMDs] += w->reservedVectorRegs; 357 358 startWavefront(w, wave_id, ldsChunk, ndr); 359 ++wave_id; 360 } 361 } 362 ++barrier_id; 363} 364 365int 366ComputeUnit::ReadyWorkgroup(NDRange *ndr) 367{ 368 // Get true size of workgroup (after clamping to grid size) 369 int trueWgSize[3]; 370 int trueWgSizeTotal = 1; 371 372 for (int d = 0; d < 3; ++d) { 373 trueWgSize[d] = std::min(ndr->q.wgSize[d], ndr->q.gdSize[d] - 374 ndr->wgId[d] * ndr->q.wgSize[d]); 375 376 trueWgSizeTotal *= trueWgSize[d]; 377 DPRINTF(GPUDisp, "trueWgSize[%d] = %d\n", d, trueWgSize[d]); 378 } 379 380 DPRINTF(GPUDisp, "trueWgSizeTotal = %d\n", trueWgSizeTotal); 381 382 // calculate the number of 32-bit vector registers required by each 383 // work item of the work group 384 int vregDemandPerWI = ndr->q.sRegCount + (2 * ndr->q.dRegCount); 385 bool vregAvail = true; 386 int numWfs = (trueWgSizeTotal + wfSize() - 1) / wfSize(); 387 int freeWfSlots = 0; 388 // check if the total number of VGPRs required by all WFs of the WG 389 // fit in the VRFs of all SIMD units 390 assert((numWfs * vregDemandPerWI) <= (numSIMDs * numVecRegsPerSimd)); 391 int numMappedWfs = 0; 392 std::vector<int> numWfsPerSimd; 393 numWfsPerSimd.resize(numSIMDs, 0); 394 // find how many free WF slots we have across all SIMDs 395 for (int j = 0; j < shader->n_wf; ++j) { 396 for (int i = 0; i < numSIMDs; ++i) { 397 if (wfList[i][j]->status == Wavefront::S_STOPPED) { 398 // count the number of free WF slots 399 ++freeWfSlots; 400 if (numMappedWfs < numWfs) { 401 // count the WFs to be assigned per SIMD 402 numWfsPerSimd[i]++; 403 } 404 numMappedWfs++; 405 } 406 } 407 } 408 409 // if there are enough free WF slots then find if there are enough 410 // free VGPRs per SIMD based on the WF->SIMD mapping 411 if (freeWfSlots >= numWfs) { 412 for (int j = 0; j < numSIMDs; ++j) { 413 // find if there are enough free VGPR regions in the SIMD's VRF 414 // to accommodate the WFs of the new WG that would be mapped to 415 // this SIMD unit 416 vregAvail = vrf[j]->manager->canAllocate(numWfsPerSimd[j], 417 vregDemandPerWI); 418 419 // stop searching if there is at least one SIMD 420 // whose VRF does not have enough free VGPR pools. 421 // This is because a WG is scheduled only if ALL 422 // of its WFs can be scheduled 423 if (!vregAvail) 424 break; 425 } 426 } 427 428 DPRINTF(GPUDisp, "Free WF slots = %d, VGPR Availability = %d\n", 429 freeWfSlots, vregAvail); 430 431 if (!vregAvail) { 432 ++numTimesWgBlockedDueVgprAlloc; 433 } 434 435 // Return true if enough WF slots to submit workgroup and if there are 436 // enough VGPRs to schedule all WFs to their SIMD units 437 if (!lds.canReserve(ndr->q.ldsSize)) { 438 wgBlockedDueLdsAllocation++; 439 } 440 441 // Return true if (a) there are enough free WF slots to submit 442 // workgrounp and (b) if there are enough VGPRs to schedule all WFs to their 443 // SIMD units and (c) if there is enough space in LDS 444 return freeWfSlots >= numWfs && vregAvail && lds.canReserve(ndr->q.ldsSize); 445} 446 447int 448ComputeUnit::AllAtBarrier(uint32_t _barrier_id, uint32_t bcnt, uint32_t bslots) 449{ 450 DPRINTF(GPUSync, "CU%d: Checking for All At Barrier\n", cu_id); 451 int ccnt = 0; 452 453 for (int i_simd = 0; i_simd < numSIMDs; ++i_simd) { 454 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf) { 455 Wavefront *w = wfList[i_simd][i_wf]; 456 457 if (w->status == Wavefront::S_RUNNING) { 458 DPRINTF(GPUSync, "Checking WF[%d][%d]\n", i_simd, i_wf); 459 460 DPRINTF(GPUSync, "wf->barrier_id = %d, _barrier_id = %d\n", 461 w->barrierId, _barrier_id); 462 463 DPRINTF(GPUSync, "wf->barrier_cnt %d, bcnt = %d\n", 464 w->barrierCnt, bcnt); 465 } 466 467 if (w->status == Wavefront::S_RUNNING && 468 w->barrierId == _barrier_id && w->barrierCnt == bcnt && 469 !w->outstandingReqs) { 470 ++ccnt; 471 472 DPRINTF(GPUSync, "WF[%d][%d] at barrier, increment ccnt to " 473 "%d\n", i_simd, i_wf, ccnt); 474 } 475 } 476 } 477 478 DPRINTF(GPUSync, "CU%d: returning allAtBarrier ccnt = %d, bslots = %d\n", 479 cu_id, ccnt, bslots); 480 481 return ccnt == bslots; 482} 483 484// Check if the current wavefront is blocked on additional resources. 485bool 486ComputeUnit::cedeSIMD(int simdId, int wfSlotId) 487{ 488 bool cede = false; 489 490 // If --xact-cas-mode option is enabled in run.py, then xact_cas_ld 491 // magic instructions will impact the scheduling of wavefronts 492 if (xact_cas_mode) { 493 /* 494 * When a wavefront calls xact_cas_ld, it adds itself to a per address 495 * queue. All per address queues are managed by the xactCasLoadMap. 496 * 497 * A wavefront is not blocked if: it is not in ANY per address queue or 498 * if it is at the head of a per address queue. 499 */ 500 for (auto itMap : xactCasLoadMap) { 501 std::list<waveIdentifier> curWaveIDQueue = itMap.second.waveIDQueue; 502 503 if (!curWaveIDQueue.empty()) { 504 for (auto it : curWaveIDQueue) { 505 waveIdentifier cur_wave = it; 506 507 if (cur_wave.simdId == simdId && 508 cur_wave.wfSlotId == wfSlotId) { 509 // 2 possibilities 510 // 1: this WF has a green light 511 // 2: another WF has a green light 512 waveIdentifier owner_wave = curWaveIDQueue.front(); 513 514 if (owner_wave.simdId != cur_wave.simdId || 515 owner_wave.wfSlotId != cur_wave.wfSlotId) { 516 // possibility 2 517 cede = true; 518 break; 519 } else { 520 // possibility 1 521 break; 522 } 523 } 524 } 525 } 526 } 527 } 528 529 return cede; 530} 531 532// Execute one clock worth of work on the ComputeUnit. 533void 534ComputeUnit::exec() 535{ 536 updateEvents(); 537 // Execute pipeline stages in reverse order to simulate 538 // the pipeline latency 539 globalMemoryPipe.exec(); 540 localMemoryPipe.exec(); 541 execStage.exec(); 542 scheduleStage.exec(); 543 scoreboardCheckStage.exec(); 544 fetchStage.exec(); 545 546 totalCycles++; 547} 548 549void 550ComputeUnit::init() 551{ 552 // Initialize CU Bus models 553 glbMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1)); 554 locMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1)); 555 nextGlbMemBus = 0; 556 nextLocMemBus = 0; 557 fatal_if(numGlbMemUnits > 1, 558 "No support for multiple Global Memory Pipelines exists!!!"); 559 vrfToGlobalMemPipeBus.resize(numGlbMemUnits); 560 for (int j = 0; j < numGlbMemUnits; ++j) { 561 vrfToGlobalMemPipeBus[j] = WaitClass(); 562 vrfToGlobalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1)); 563 } 564 565 fatal_if(numLocMemUnits > 1, 566 "No support for multiple Local Memory Pipelines exists!!!"); 567 vrfToLocalMemPipeBus.resize(numLocMemUnits); 568 for (int j = 0; j < numLocMemUnits; ++j) { 569 vrfToLocalMemPipeBus[j] = WaitClass(); 570 vrfToLocalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1)); 571 } 572 vectorRegsReserved.resize(numSIMDs, 0); 573 aluPipe.resize(numSIMDs); 574 wfWait.resize(numSIMDs + numLocMemUnits + numGlbMemUnits); 575 576 for (int i = 0; i < numSIMDs + numLocMemUnits + numGlbMemUnits; ++i) { 577 wfWait[i] = WaitClass(); 578 wfWait[i].init(&shader->tick_cnt, shader->ticks(1)); 579 } 580 581 for (int i = 0; i < numSIMDs; ++i) { 582 aluPipe[i] = WaitClass(); 583 aluPipe[i].init(&shader->tick_cnt, shader->ticks(1)); 584 } 585 586 // Setup space for call args 587 for (int j = 0; j < numSIMDs; ++j) { 588 for (int i = 0; i < shader->n_wf; ++i) { 589 wfList[j][i]->initCallArgMem(shader->funcargs_size, wavefrontSize); 590 } 591 } 592 593 // Initializing pipeline resources 594 readyList.resize(numSIMDs + numGlbMemUnits + numLocMemUnits); 595 waveStatusList.resize(numSIMDs); 596 597 for (int j = 0; j < numSIMDs; ++j) { 598 for (int i = 0; i < shader->n_wf; ++i) { 599 waveStatusList[j].push_back( 600 std::make_pair(wfList[j][i], BLOCKED)); 601 } 602 } 603 604 for (int j = 0; j < (numSIMDs + numGlbMemUnits + numLocMemUnits); ++j) { 605 dispatchList.push_back(std::make_pair((Wavefront*)nullptr, EMPTY)); 606 } 607 608 fetchStage.init(this); 609 scoreboardCheckStage.init(this); 610 scheduleStage.init(this); 611 execStage.init(this); 612 globalMemoryPipe.init(this); 613 localMemoryPipe.init(this); 614 // initialize state for statistics calculation 615 vectorAluInstAvail.resize(numSIMDs, false); 616 shrMemInstAvail = 0; 617 glbMemInstAvail = 0; 618} 619 620bool 621ComputeUnit::DataPort::recvTimingResp(PacketPtr pkt) 622{ 623 // Ruby has completed the memory op. Schedule the mem_resp_event at the 624 // appropriate cycle to process the timing memory response 625 // This delay represents the pipeline delay 626 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState); 627 int index = sender_state->port_index; 628 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 629 630 // Is the packet returned a Kernel End or Barrier 631 if (pkt->req->isKernel() && pkt->req->isRelease()) { 632 Wavefront *w = 633 computeUnit->wfList[gpuDynInst->simdId][gpuDynInst->wfSlotId]; 634 635 // Check if we are waiting on Kernel End Release 636 if (w->status == Wavefront::S_RETURNING) { 637 DPRINTF(GPUDisp, "CU%d: WF[%d][%d][wv=%d]: WG id completed %d\n", 638 computeUnit->cu_id, w->simdId, w->wfSlotId, 639 w->wfDynId, w->kernId); 640 641 computeUnit->shader->dispatcher->notifyWgCompl(w); 642 w->status = Wavefront::S_STOPPED; 643 } else { 644 w->outstandingReqs--; 645 } 646 647 DPRINTF(GPUSync, "CU%d: WF[%d][%d]: barrier_cnt = %d\n", 648 computeUnit->cu_id, gpuDynInst->simdId, 649 gpuDynInst->wfSlotId, w->barrierCnt); 650 651 if (gpuDynInst->useContinuation) { 652 assert(!gpuDynInst->isNoScope()); 653 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 654 gpuDynInst); 655 } 656 657 delete pkt->senderState; 658 delete pkt; 659 return true; 660 } else if (pkt->req->isKernel() && pkt->req->isAcquire()) { 661 if (gpuDynInst->useContinuation) { 662 assert(!gpuDynInst->isNoScope()); 663 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 664 gpuDynInst); 665 } 666 667 delete pkt->senderState; 668 delete pkt; 669 return true; 670 } 671 672 EventFunctionWrapper *mem_resp_event = 673 computeUnit->memPort[index]->createMemRespEvent(pkt); 674 675 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x received!\n", 676 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 677 index, pkt->req->getPaddr()); 678 679 computeUnit->schedule(mem_resp_event, 680 curTick() + computeUnit->resp_tick_latency); 681 return true; 682} 683 684void 685ComputeUnit::DataPort::recvReqRetry() 686{ 687 int len = retries.size(); 688 689 assert(len > 0); 690 691 for (int i = 0; i < len; ++i) { 692 PacketPtr pkt = retries.front().first; 693 GPUDynInstPtr gpuDynInst M5_VAR_USED = retries.front().second; 694 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: retry mem inst addr %#x\n", 695 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 696 pkt->req->getPaddr()); 697 698 /** Currently Ruby can return false due to conflicts for the particular 699 * cache block or address. Thus other requests should be allowed to 700 * pass and the data port should expect multiple retries. */ 701 if (!sendTimingReq(pkt)) { 702 DPRINTF(GPUMem, "failed again!\n"); 703 break; 704 } else { 705 DPRINTF(GPUMem, "successful!\n"); 706 retries.pop_front(); 707 } 708 } 709} 710 711bool 712ComputeUnit::SQCPort::recvTimingResp(PacketPtr pkt) 713{ 714 computeUnit->fetchStage.processFetchReturn(pkt); 715 716 return true; 717} 718 719void 720ComputeUnit::SQCPort::recvReqRetry() 721{ 722 int len = retries.size(); 723 724 assert(len > 0); 725 726 for (int i = 0; i < len; ++i) { 727 PacketPtr pkt = retries.front().first; 728 Wavefront *wavefront M5_VAR_USED = retries.front().second; 729 DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: retrying FETCH addr %#x\n", 730 computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId, 731 pkt->req->getPaddr()); 732 if (!sendTimingReq(pkt)) { 733 DPRINTF(GPUFetch, "failed again!\n"); 734 break; 735 } else { 736 DPRINTF(GPUFetch, "successful!\n"); 737 retries.pop_front(); 738 } 739 } 740} 741 742void 743ComputeUnit::sendRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt) 744{ 745 // There must be a way around this check to do the globalMemStart... 746 Addr tmp_vaddr = pkt->req->getVaddr(); 747 748 updatePageDivergenceDist(tmp_vaddr); 749 750 pkt->req->setVirt(pkt->req->getAsid(), tmp_vaddr, pkt->req->getSize(), 751 pkt->req->getFlags(), pkt->req->masterId(), 752 pkt->req->getPC()); 753 754 // figure out the type of the request to set read/write 755 BaseTLB::Mode TLB_mode; 756 assert(pkt->isRead() || pkt->isWrite()); 757 758 // Check write before read for atomic operations 759 // since atomic operations should use BaseTLB::Write 760 if (pkt->isWrite()){ 761 TLB_mode = BaseTLB::Write; 762 } else if (pkt->isRead()) { 763 TLB_mode = BaseTLB::Read; 764 } else { 765 fatal("pkt is not a read nor a write\n"); 766 } 767 768 tlbCycles -= curTick(); 769 ++tlbRequests; 770 771 int tlbPort_index = perLaneTLB ? index : 0; 772 773 if (shader->timingSim) { 774 if (debugSegFault) { 775 Process *p = shader->gpuTc->getProcessPtr(); 776 Addr vaddr = pkt->req->getVaddr(); 777 unsigned size = pkt->getSize(); 778 779 if ((vaddr + size - 1) % 64 < vaddr % 64) { 780 panic("CU%d: WF[%d][%d]: Access to addr %#x is unaligned!\n", 781 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, vaddr); 782 } 783 784 Addr paddr; 785 786 if (!p->pTable->translate(vaddr, paddr)) { 787 if (!p->fixupStackFault(vaddr)) { 788 panic("CU%d: WF[%d][%d]: Fault on addr %#x!\n", 789 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 790 vaddr); 791 } 792 } 793 } 794 795 // This is the SenderState needed upon return 796 pkt->senderState = new DTLBPort::SenderState(gpuDynInst, index); 797 798 // This is the senderState needed by the TLB hierarchy to function 799 TheISA::GpuTLB::TranslationState *translation_state = 800 new TheISA::GpuTLB::TranslationState(TLB_mode, shader->gpuTc, false, 801 pkt->senderState); 802 803 pkt->senderState = translation_state; 804 805 if (functionalTLB) { 806 tlbPort[tlbPort_index]->sendFunctional(pkt); 807 808 // update the hitLevel distribution 809 int hit_level = translation_state->hitLevel; 810 assert(hit_level != -1); 811 hitsPerTLBLevel[hit_level]++; 812 813 // New SenderState for the memory access 814 X86ISA::GpuTLB::TranslationState *sender_state = 815 safe_cast<X86ISA::GpuTLB::TranslationState*>(pkt->senderState); 816 817 delete sender_state->tlbEntry; 818 delete sender_state->saved; 819 delete sender_state; 820 821 assert(pkt->req->hasPaddr()); 822 assert(pkt->req->hasSize()); 823 824 uint8_t *tmpData = pkt->getPtr<uint8_t>(); 825 826 // this is necessary because the GPU TLB receives packets instead 827 // of requests. when the translation is complete, all relevent 828 // fields in the request will be populated, but not in the packet. 829 // here we create the new packet so we can set the size, addr, 830 // and proper flags. 831 PacketPtr oldPkt = pkt; 832 pkt = new Packet(oldPkt->req, oldPkt->cmd); 833 delete oldPkt; 834 pkt->dataStatic(tmpData); 835 836 837 // New SenderState for the memory access 838 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, 839 index, nullptr); 840 841 gpuDynInst->memStatusVector[pkt->getAddr()].push_back(index); 842 gpuDynInst->tlbHitLevel[index] = hit_level; 843 844 845 // translation is done. Schedule the mem_req_event at the 846 // appropriate cycle to send the timing memory request to ruby 847 EventFunctionWrapper *mem_req_event = 848 memPort[index]->createMemReqEvent(pkt); 849 850 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data " 851 "scheduled\n", cu_id, gpuDynInst->simdId, 852 gpuDynInst->wfSlotId, index, pkt->req->getPaddr()); 853 854 schedule(mem_req_event, curTick() + req_tick_latency); 855 } else if (tlbPort[tlbPort_index]->isStalled()) { 856 assert(tlbPort[tlbPort_index]->retries.size() > 0); 857 858 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x " 859 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 860 tmp_vaddr); 861 862 tlbPort[tlbPort_index]->retries.push_back(pkt); 863 } else if (!tlbPort[tlbPort_index]->sendTimingReq(pkt)) { 864 // Stall the data port; 865 // No more packet will be issued till 866 // ruby indicates resources are freed by 867 // a recvReqRetry() call back on this port. 868 tlbPort[tlbPort_index]->stallPort(); 869 870 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x " 871 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 872 tmp_vaddr); 873 874 tlbPort[tlbPort_index]->retries.push_back(pkt); 875 } else { 876 DPRINTF(GPUTLB, 877 "CU%d: WF[%d][%d]: Translation for addr %#x sent!\n", 878 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, tmp_vaddr); 879 } 880 } else { 881 if (pkt->cmd == MemCmd::MemFenceReq) { 882 gpuDynInst->statusBitVector = VectorMask(0); 883 } else { 884 gpuDynInst->statusBitVector &= (~(1ll << index)); 885 } 886 887 // New SenderState for the memory access 888 delete pkt->senderState; 889 890 // Because it's atomic operation, only need TLB translation state 891 pkt->senderState = new TheISA::GpuTLB::TranslationState(TLB_mode, 892 shader->gpuTc); 893 894 tlbPort[tlbPort_index]->sendFunctional(pkt); 895 896 // the addr of the packet is not modified, so we need to create a new 897 // packet, or otherwise the memory access will have the old virtual 898 // address sent in the translation packet, instead of the physical 899 // address returned by the translation. 900 PacketPtr new_pkt = new Packet(pkt->req, pkt->cmd); 901 new_pkt->dataStatic(pkt->getPtr<uint8_t>()); 902 903 // Translation is done. It is safe to send the packet to memory. 904 memPort[0]->sendFunctional(new_pkt); 905 906 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: index %d: addr %#x\n", cu_id, 907 gpuDynInst->simdId, gpuDynInst->wfSlotId, index, 908 new_pkt->req->getPaddr()); 909 910 // safe_cast the senderState 911 TheISA::GpuTLB::TranslationState *sender_state = 912 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 913 914 delete sender_state->tlbEntry; 915 delete new_pkt; 916 delete pkt->senderState; 917 delete pkt; 918 } 919} 920 921void 922ComputeUnit::sendSyncRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt) 923{ 924 EventFunctionWrapper *mem_req_event = 925 memPort[index]->createMemReqEvent(pkt); 926 927 928 // New SenderState for the memory access 929 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, index, 930 nullptr); 931 932 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x sync scheduled\n", 933 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, index, 934 pkt->req->getPaddr()); 935 936 schedule(mem_req_event, curTick() + req_tick_latency); 937} 938 939void 940ComputeUnit::injectGlobalMemFence(GPUDynInstPtr gpuDynInst, bool kernelLaunch, 941 RequestPtr req) 942{ 943 assert(gpuDynInst->isGlobalSeg()); 944 945 if (!req) { 946 req = std::make_shared<Request>( 947 0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId); 948 } 949 req->setPaddr(0); 950 if (kernelLaunch) { 951 req->setFlags(Request::KERNEL); 952 } 953 954 // for non-kernel MemFence operations, memorder flags are set depending 955 // on which type of request is currently being sent, so this 956 // should be set by the caller (e.g. if an inst has acq-rel 957 // semantics, it will send one acquire req an one release req) 958 gpuDynInst->setRequestFlags(req, kernelLaunch); 959 960 // a mem fence must correspond to an acquire/release request 961 assert(req->isAcquire() || req->isRelease()); 962 963 // create packet 964 PacketPtr pkt = new Packet(req, MemCmd::MemFenceReq); 965 966 // set packet's sender state 967 pkt->senderState = 968 new ComputeUnit::DataPort::SenderState(gpuDynInst, 0, nullptr); 969 970 // send the packet 971 sendSyncRequest(gpuDynInst, 0, pkt); 972} 973 974void 975ComputeUnit::DataPort::processMemRespEvent(PacketPtr pkt) 976{ 977 DataPort::SenderState *sender_state = 978 safe_cast<DataPort::SenderState*>(pkt->senderState); 979 980 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 981 ComputeUnit *compute_unit = computeUnit; 982 983 assert(gpuDynInst); 984 985 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: Response for addr %#x, index %d\n", 986 compute_unit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 987 pkt->req->getPaddr(), index); 988 989 Addr paddr = pkt->req->getPaddr(); 990 991 if (pkt->cmd != MemCmd::MemFenceResp) { 992 int index = gpuDynInst->memStatusVector[paddr].back(); 993 994 DPRINTF(GPUMem, "Response for addr %#x, index %d\n", 995 pkt->req->getPaddr(), index); 996 997 gpuDynInst->memStatusVector[paddr].pop_back(); 998 gpuDynInst->pAddr = pkt->req->getPaddr(); 999 1000 if (pkt->isRead() || pkt->isWrite()) { 1001 1002 if (gpuDynInst->n_reg <= MAX_REGS_FOR_NON_VEC_MEM_INST) { 1003 gpuDynInst->statusBitVector &= (~(1ULL << index)); 1004 } else { 1005 assert(gpuDynInst->statusVector[index] > 0); 1006 gpuDynInst->statusVector[index]--; 1007 1008 if (!gpuDynInst->statusVector[index]) 1009 gpuDynInst->statusBitVector &= (~(1ULL << index)); 1010 } 1011 1012 DPRINTF(GPUMem, "bitvector is now %#x\n", 1013 gpuDynInst->statusBitVector); 1014 1015 if (gpuDynInst->statusBitVector == VectorMask(0)) { 1016 auto iter = gpuDynInst->memStatusVector.begin(); 1017 auto end = gpuDynInst->memStatusVector.end(); 1018 1019 while (iter != end) { 1020 assert(iter->second.empty()); 1021 ++iter; 1022 } 1023 1024 gpuDynInst->memStatusVector.clear(); 1025 1026 if (gpuDynInst->n_reg > MAX_REGS_FOR_NON_VEC_MEM_INST) 1027 gpuDynInst->statusVector.clear(); 1028 1029 compute_unit->globalMemoryPipe.handleResponse(gpuDynInst); 1030 1031 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: packet totally complete\n", 1032 compute_unit->cu_id, gpuDynInst->simdId, 1033 gpuDynInst->wfSlotId); 1034 1035 // after clearing the status vectors, 1036 // see if there is a continuation to perform 1037 // the continuation may generate more work for 1038 // this memory request 1039 if (gpuDynInst->useContinuation) { 1040 assert(!gpuDynInst->isNoScope()); 1041 gpuDynInst->execContinuation( 1042 gpuDynInst->staticInstruction(), 1043 gpuDynInst); 1044 } 1045 } 1046 } 1047 } else { 1048 gpuDynInst->statusBitVector = VectorMask(0); 1049 1050 if (gpuDynInst->useContinuation) { 1051 assert(!gpuDynInst->isNoScope()); 1052 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 1053 gpuDynInst); 1054 } 1055 } 1056 1057 delete pkt->senderState; 1058 delete pkt; 1059} 1060 1061ComputeUnit* 1062ComputeUnitParams::create() 1063{ 1064 return new ComputeUnit(this); 1065} 1066 1067bool 1068ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt) 1069{ 1070 Addr line = pkt->req->getPaddr(); 1071 1072 DPRINTF(GPUTLB, "CU%d: DTLBPort received %#x->%#x\n", computeUnit->cu_id, 1073 pkt->req->getVaddr(), line); 1074 1075 assert(pkt->senderState); 1076 computeUnit->tlbCycles += curTick(); 1077 1078 // pop off the TLB translation state 1079 TheISA::GpuTLB::TranslationState *translation_state = 1080 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 1081 1082 // no PageFaults are permitted for data accesses 1083 if (!translation_state->tlbEntry) { 1084 DTLBPort::SenderState *sender_state = 1085 safe_cast<DTLBPort::SenderState*>(translation_state->saved); 1086 1087 Wavefront *w M5_VAR_USED = 1088 computeUnit->wfList[sender_state->_gpuDynInst->simdId] 1089 [sender_state->_gpuDynInst->wfSlotId]; 1090 1091 DPRINTFN("Wave %d couldn't tranlate vaddr %#x\n", w->wfDynId, 1092 pkt->req->getVaddr()); 1093 } 1094 1095 // update the hitLevel distribution 1096 int hit_level = translation_state->hitLevel; 1097 computeUnit->hitsPerTLBLevel[hit_level]++; 1098 1099 delete translation_state->tlbEntry; 1100 assert(!translation_state->ports.size()); 1101 pkt->senderState = translation_state->saved; 1102 1103 // for prefetch pkt 1104 BaseTLB::Mode TLB_mode = translation_state->tlbMode; 1105 1106 delete translation_state; 1107 1108 // use the original sender state to know how to close this transaction 1109 DTLBPort::SenderState *sender_state = 1110 safe_cast<DTLBPort::SenderState*>(pkt->senderState); 1111 1112 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1113 int mp_index = sender_state->portIndex; 1114 Addr vaddr = pkt->req->getVaddr(); 1115 gpuDynInst->memStatusVector[line].push_back(mp_index); 1116 gpuDynInst->tlbHitLevel[mp_index] = hit_level; 1117 1118 MemCmd requestCmd; 1119 1120 if (pkt->cmd == MemCmd::ReadResp) { 1121 requestCmd = MemCmd::ReadReq; 1122 } else if (pkt->cmd == MemCmd::WriteResp) { 1123 requestCmd = MemCmd::WriteReq; 1124 } else if (pkt->cmd == MemCmd::SwapResp) { 1125 requestCmd = MemCmd::SwapReq; 1126 } else { 1127 panic("unsupported response to request conversion %s\n", 1128 pkt->cmd.toString()); 1129 } 1130 1131 if (computeUnit->prefetchDepth) { 1132 int simdId = gpuDynInst->simdId; 1133 int wfSlotId = gpuDynInst->wfSlotId; 1134 Addr last = 0; 1135 1136 switch(computeUnit->prefetchType) { 1137 case Enums::PF_CU: 1138 last = computeUnit->lastVaddrCU[mp_index]; 1139 break; 1140 case Enums::PF_PHASE: 1141 last = computeUnit->lastVaddrSimd[simdId][mp_index]; 1142 break; 1143 case Enums::PF_WF: 1144 last = computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index]; 1145 default: 1146 break; 1147 } 1148 1149 DPRINTF(GPUPrefetch, "CU[%d][%d][%d][%d]: %#x was last\n", 1150 computeUnit->cu_id, simdId, wfSlotId, mp_index, last); 1151 1152 int stride = last ? (roundDown(vaddr, TheISA::PageBytes) - 1153 roundDown(last, TheISA::PageBytes)) >> TheISA::PageShift 1154 : 0; 1155 1156 DPRINTF(GPUPrefetch, "Stride is %d\n", stride); 1157 1158 computeUnit->lastVaddrCU[mp_index] = vaddr; 1159 computeUnit->lastVaddrSimd[simdId][mp_index] = vaddr; 1160 computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index] = vaddr; 1161 1162 stride = (computeUnit->prefetchType == Enums::PF_STRIDE) ? 1163 computeUnit->prefetchStride: stride; 1164 1165 DPRINTF(GPUPrefetch, "%#x to: CU[%d][%d][%d][%d]\n", vaddr, 1166 computeUnit->cu_id, simdId, wfSlotId, mp_index); 1167 1168 DPRINTF(GPUPrefetch, "Prefetching from %#x:", vaddr); 1169 1170 // Prefetch Next few pages atomically 1171 for (int pf = 1; pf <= computeUnit->prefetchDepth; ++pf) { 1172 DPRINTF(GPUPrefetch, "%d * %d: %#x\n", pf, stride, 1173 vaddr+stride*pf*TheISA::PageBytes); 1174 1175 if (!stride) 1176 break; 1177 1178 RequestPtr prefetch_req = std::make_shared<Request>( 1179 0, vaddr + stride * pf * TheISA::PageBytes, 1180 sizeof(uint8_t), 0, 1181 computeUnit->masterId(), 1182 0, 0, nullptr); 1183 1184 PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd); 1185 uint8_t foo = 0; 1186 prefetch_pkt->dataStatic(&foo); 1187 1188 // Because it's atomic operation, only need TLB translation state 1189 prefetch_pkt->senderState = 1190 new TheISA::GpuTLB::TranslationState(TLB_mode, 1191 computeUnit->shader->gpuTc, 1192 true); 1193 1194 // Currently prefetches are zero-latency, hence the sendFunctional 1195 sendFunctional(prefetch_pkt); 1196 1197 /* safe_cast the senderState */ 1198 TheISA::GpuTLB::TranslationState *tlb_state = 1199 safe_cast<TheISA::GpuTLB::TranslationState*>( 1200 prefetch_pkt->senderState); 1201 1202 1203 delete tlb_state->tlbEntry; 1204 delete tlb_state; 1205 delete prefetch_pkt; 1206 } 1207 } 1208 1209 // First we must convert the response cmd back to a request cmd so that 1210 // the request can be sent through the cu's master port 1211 PacketPtr new_pkt = new Packet(pkt->req, requestCmd); 1212 new_pkt->dataStatic(pkt->getPtr<uint8_t>()); 1213 delete pkt->senderState; 1214 delete pkt; 1215 1216 // New SenderState for the memory access 1217 new_pkt->senderState = 1218 new ComputeUnit::DataPort::SenderState(gpuDynInst, mp_index, 1219 nullptr); 1220 1221 // translation is done. Schedule the mem_req_event at the appropriate 1222 // cycle to send the timing memory request to ruby 1223 EventFunctionWrapper *mem_req_event = 1224 computeUnit->memPort[mp_index]->createMemReqEvent(new_pkt); 1225 1226 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data scheduled\n", 1227 computeUnit->cu_id, gpuDynInst->simdId, 1228 gpuDynInst->wfSlotId, mp_index, new_pkt->req->getPaddr()); 1229 1230 computeUnit->schedule(mem_req_event, curTick() + 1231 computeUnit->req_tick_latency); 1232 1233 return true; 1234} 1235 1236EventFunctionWrapper* 1237ComputeUnit::DataPort::createMemReqEvent(PacketPtr pkt) 1238{ 1239 return new EventFunctionWrapper( 1240 [this, pkt]{ processMemReqEvent(pkt); }, 1241 "ComputeUnit memory request event", true); 1242} 1243 1244EventFunctionWrapper* 1245ComputeUnit::DataPort::createMemRespEvent(PacketPtr pkt) 1246{ 1247 return new EventFunctionWrapper( 1248 [this, pkt]{ processMemRespEvent(pkt); }, 1249 "ComputeUnit memory response event", true); 1250} 1251 1252void 1253ComputeUnit::DataPort::processMemReqEvent(PacketPtr pkt) 1254{ 1255 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState); 1256 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1257 ComputeUnit *compute_unit M5_VAR_USED = computeUnit; 1258 1259 if (!(sendTimingReq(pkt))) { 1260 retries.push_back(std::make_pair(pkt, gpuDynInst)); 1261 1262 DPRINTF(GPUPort, 1263 "CU%d: WF[%d][%d]: index %d, addr %#x data req failed!\n", 1264 compute_unit->cu_id, gpuDynInst->simdId, 1265 gpuDynInst->wfSlotId, index, 1266 pkt->req->getPaddr()); 1267 } else { 1268 DPRINTF(GPUPort, 1269 "CU%d: WF[%d][%d]: index %d, addr %#x data req sent!\n", 1270 compute_unit->cu_id, gpuDynInst->simdId, 1271 gpuDynInst->wfSlotId, index, 1272 pkt->req->getPaddr()); 1273 } 1274} 1275 1276/* 1277 * The initial translation request could have been rejected, 1278 * if <retries> queue is not Retry sending the translation 1279 * request. sendRetry() is called from the peer port whenever 1280 * a translation completes. 1281 */ 1282void 1283ComputeUnit::DTLBPort::recvReqRetry() 1284{ 1285 int len = retries.size(); 1286 1287 DPRINTF(GPUTLB, "CU%d: DTLB recvReqRetry - %d pending requests\n", 1288 computeUnit->cu_id, len); 1289 1290 assert(len > 0); 1291 assert(isStalled()); 1292 // recvReqRetry is an indication that the resource on which this 1293 // port was stalling on is freed. So, remove the stall first 1294 unstallPort(); 1295 1296 for (int i = 0; i < len; ++i) { 1297 PacketPtr pkt = retries.front(); 1298 Addr vaddr M5_VAR_USED = pkt->req->getVaddr(); 1299 DPRINTF(GPUTLB, "CU%d: retrying D-translaton for address%#x", vaddr); 1300 1301 if (!sendTimingReq(pkt)) { 1302 // Stall port 1303 stallPort(); 1304 DPRINTF(GPUTLB, ": failed again\n"); 1305 break; 1306 } else { 1307 DPRINTF(GPUTLB, ": successful\n"); 1308 retries.pop_front(); 1309 } 1310 } 1311} 1312 1313bool 1314ComputeUnit::ITLBPort::recvTimingResp(PacketPtr pkt) 1315{ 1316 Addr line M5_VAR_USED = pkt->req->getPaddr(); 1317 DPRINTF(GPUTLB, "CU%d: ITLBPort received %#x->%#x\n", 1318 computeUnit->cu_id, pkt->req->getVaddr(), line); 1319 1320 assert(pkt->senderState); 1321 1322 // pop off the TLB translation state 1323 TheISA::GpuTLB::TranslationState *translation_state = 1324 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 1325 1326 bool success = translation_state->tlbEntry != nullptr; 1327 delete translation_state->tlbEntry; 1328 assert(!translation_state->ports.size()); 1329 pkt->senderState = translation_state->saved; 1330 delete translation_state; 1331 1332 // use the original sender state to know how to close this transaction 1333 ITLBPort::SenderState *sender_state = 1334 safe_cast<ITLBPort::SenderState*>(pkt->senderState); 1335 1336 // get the wavefront associated with this translation request 1337 Wavefront *wavefront = sender_state->wavefront; 1338 delete pkt->senderState; 1339 1340 if (success) { 1341 // pkt is reused in fetch(), don't delete it here. However, we must 1342 // reset the command to be a request so that it can be sent through 1343 // the cu's master port 1344 assert(pkt->cmd == MemCmd::ReadResp); 1345 pkt->cmd = MemCmd::ReadReq; 1346 1347 computeUnit->fetchStage.fetch(pkt, wavefront); 1348 } else { 1349 if (wavefront->dropFetch) { 1350 assert(wavefront->instructionBuffer.empty()); 1351 wavefront->dropFetch = false; 1352 } 1353 1354 wavefront->pendingFetch = 0; 1355 } 1356 1357 return true; 1358} 1359 1360/* 1361 * The initial translation request could have been rejected, if 1362 * <retries> queue is not empty. Retry sending the translation 1363 * request. sendRetry() is called from the peer port whenever 1364 * a translation completes. 1365 */ 1366void 1367ComputeUnit::ITLBPort::recvReqRetry() 1368{ 1369 1370 int len = retries.size(); 1371 DPRINTF(GPUTLB, "CU%d: ITLB recvReqRetry - %d pending requests\n", len); 1372 1373 assert(len > 0); 1374 assert(isStalled()); 1375 1376 // recvReqRetry is an indication that the resource on which this 1377 // port was stalling on is freed. So, remove the stall first 1378 unstallPort(); 1379 1380 for (int i = 0; i < len; ++i) { 1381 PacketPtr pkt = retries.front(); 1382 Addr vaddr M5_VAR_USED = pkt->req->getVaddr(); 1383 DPRINTF(GPUTLB, "CU%d: retrying I-translaton for address%#x", vaddr); 1384 1385 if (!sendTimingReq(pkt)) { 1386 stallPort(); // Stall port 1387 DPRINTF(GPUTLB, ": failed again\n"); 1388 break; 1389 } else { 1390 DPRINTF(GPUTLB, ": successful\n"); 1391 retries.pop_front(); 1392 } 1393 } 1394} 1395 1396void 1397ComputeUnit::regStats() 1398{ 1399 MemObject::regStats(); 1400 1401 vALUInsts 1402 .name(name() + ".valu_insts") 1403 .desc("Number of vector ALU insts issued.") 1404 ; 1405 vALUInstsPerWF 1406 .name(name() + ".valu_insts_per_wf") 1407 .desc("The avg. number of vector ALU insts issued per-wavefront.") 1408 ; 1409 sALUInsts 1410 .name(name() + ".salu_insts") 1411 .desc("Number of scalar ALU insts issued.") 1412 ; 1413 sALUInstsPerWF 1414 .name(name() + ".salu_insts_per_wf") 1415 .desc("The avg. number of scalar ALU insts issued per-wavefront.") 1416 ; 1417 instCyclesVALU 1418 .name(name() + ".inst_cycles_valu") 1419 .desc("Number of cycles needed to execute VALU insts.") 1420 ; 1421 instCyclesSALU 1422 .name(name() + ".inst_cycles_salu") 1423 .desc("Number of cycles needed to execute SALU insts.") 1424 ; 1425 threadCyclesVALU 1426 .name(name() + ".thread_cycles_valu") 1427 .desc("Number of thread cycles used to execute vector ALU ops. " 1428 "Similar to instCyclesVALU but multiplied by the number of " 1429 "active threads.") 1430 ; 1431 vALUUtilization 1432 .name(name() + ".valu_utilization") 1433 .desc("Percentage of active vector ALU threads in a wave.") 1434 ; 1435 ldsNoFlatInsts 1436 .name(name() + ".lds_no_flat_insts") 1437 .desc("Number of LDS insts issued, not including FLAT " 1438 "accesses that resolve to LDS.") 1439 ; 1440 ldsNoFlatInstsPerWF 1441 .name(name() + ".lds_no_flat_insts_per_wf") 1442 .desc("The avg. number of LDS insts (not including FLAT " 1443 "accesses that resolve to LDS) per-wavefront.") 1444 ; 1445 flatVMemInsts 1446 .name(name() + ".flat_vmem_insts") 1447 .desc("The number of FLAT insts that resolve to vmem issued.") 1448 ; 1449 flatVMemInstsPerWF 1450 .name(name() + ".flat_vmem_insts_per_wf") 1451 .desc("The average number of FLAT insts that resolve to vmem " 1452 "issued per-wavefront.") 1453 ; 1454 flatLDSInsts 1455 .name(name() + ".flat_lds_insts") 1456 .desc("The number of FLAT insts that resolve to LDS issued.") 1457 ; 1458 flatLDSInstsPerWF 1459 .name(name() + ".flat_lds_insts_per_wf") 1460 .desc("The average number of FLAT insts that resolve to LDS " 1461 "issued per-wavefront.") 1462 ; 1463 vectorMemWrites 1464 .name(name() + ".vector_mem_writes") 1465 .desc("Number of vector mem write insts (excluding FLAT insts).") 1466 ; 1467 vectorMemWritesPerWF 1468 .name(name() + ".vector_mem_writes_per_wf") 1469 .desc("The average number of vector mem write insts " 1470 "(excluding FLAT insts) per-wavefront.") 1471 ; 1472 vectorMemReads 1473 .name(name() + ".vector_mem_reads") 1474 .desc("Number of vector mem read insts (excluding FLAT insts).") 1475 ; 1476 vectorMemReadsPerWF 1477 .name(name() + ".vector_mem_reads_per_wf") 1478 .desc("The avg. number of vector mem read insts (excluding " 1479 "FLAT insts) per-wavefront.") 1480 ; 1481 scalarMemWrites 1482 .name(name() + ".scalar_mem_writes") 1483 .desc("Number of scalar mem write insts.") 1484 ; 1485 scalarMemWritesPerWF 1486 .name(name() + ".scalar_mem_writes_per_wf") 1487 .desc("The average number of scalar mem write insts per-wavefront.") 1488 ; 1489 scalarMemReads 1490 .name(name() + ".scalar_mem_reads") 1491 .desc("Number of scalar mem read insts.") 1492 ; 1493 scalarMemReadsPerWF 1494 .name(name() + ".scalar_mem_reads_per_wf") 1495 .desc("The average number of scalar mem read insts per-wavefront.") 1496 ; 1497 1498 vALUInstsPerWF = vALUInsts / completedWfs; 1499 sALUInstsPerWF = sALUInsts / completedWfs; 1500 vALUUtilization = (threadCyclesVALU / (64 * instCyclesVALU)) * 100; 1501 ldsNoFlatInstsPerWF = ldsNoFlatInsts / completedWfs; 1502 flatVMemInstsPerWF = flatVMemInsts / completedWfs; 1503 flatLDSInstsPerWF = flatLDSInsts / completedWfs; 1504 vectorMemWritesPerWF = vectorMemWrites / completedWfs; 1505 vectorMemReadsPerWF = vectorMemReads / completedWfs; 1506 scalarMemWritesPerWF = scalarMemWrites / completedWfs; 1507 scalarMemReadsPerWF = scalarMemReads / completedWfs; 1508 1509 tlbCycles 1510 .name(name() + ".tlb_cycles") 1511 .desc("total number of cycles for all uncoalesced requests") 1512 ; 1513 1514 tlbRequests 1515 .name(name() + ".tlb_requests") 1516 .desc("number of uncoalesced requests") 1517 ; 1518 1519 tlbLatency 1520 .name(name() + ".avg_translation_latency") 1521 .desc("Avg. translation latency for data translations") 1522 ; 1523 1524 tlbLatency = tlbCycles / tlbRequests; 1525 1526 hitsPerTLBLevel 1527 .init(4) 1528 .name(name() + ".TLB_hits_distribution") 1529 .desc("TLB hits distribution (0 for page table, x for Lx-TLB") 1530 ; 1531 1532 // fixed number of TLB levels 1533 for (int i = 0; i < 4; ++i) { 1534 if (!i) 1535 hitsPerTLBLevel.subname(i,"page_table"); 1536 else 1537 hitsPerTLBLevel.subname(i, csprintf("L%d_TLB",i)); 1538 } 1539 1540 execRateDist 1541 .init(0, 10, 2) 1542 .name(name() + ".inst_exec_rate") 1543 .desc("Instruction Execution Rate: Number of executed vector " 1544 "instructions per cycle") 1545 ; 1546 1547 ldsBankConflictDist 1548 .init(0, wfSize(), 2) 1549 .name(name() + ".lds_bank_conflicts") 1550 .desc("Number of bank conflicts per LDS memory packet") 1551 ; 1552 1553 ldsBankAccesses 1554 .name(name() + ".lds_bank_access_cnt") 1555 .desc("Total number of LDS bank accesses") 1556 ; 1557 1558 pageDivergenceDist 1559 // A wavefront can touch up to N pages per memory instruction where 1560 // N is equal to the wavefront size 1561 // The number of pages per bin can be configured (here it's 4). 1562 .init(1, wfSize(), 4) 1563 .name(name() + ".page_divergence_dist") 1564 .desc("pages touched per wf (over all mem. instr.)") 1565 ; 1566 1567 controlFlowDivergenceDist 1568 .init(1, wfSize(), 4) 1569 .name(name() + ".warp_execution_dist") 1570 .desc("number of lanes active per instruction (oval all instructions)") 1571 ; 1572 1573 activeLanesPerGMemInstrDist 1574 .init(1, wfSize(), 4) 1575 .name(name() + ".gmem_lanes_execution_dist") 1576 .desc("number of active lanes per global memory instruction") 1577 ; 1578 1579 activeLanesPerLMemInstrDist 1580 .init(1, wfSize(), 4) 1581 .name(name() + ".lmem_lanes_execution_dist") 1582 .desc("number of active lanes per local memory instruction") 1583 ; 1584 1585 numInstrExecuted 1586 .name(name() + ".num_instr_executed") 1587 .desc("number of instructions executed") 1588 ; 1589 1590 numVecOpsExecuted 1591 .name(name() + ".num_vec_ops_executed") 1592 .desc("number of vec ops executed (e.g. WF size/inst)") 1593 ; 1594 1595 totalCycles 1596 .name(name() + ".num_total_cycles") 1597 .desc("number of cycles the CU ran for") 1598 ; 1599 1600 ipc 1601 .name(name() + ".ipc") 1602 .desc("Instructions per cycle (this CU only)") 1603 ; 1604 1605 vpc 1606 .name(name() + ".vpc") 1607 .desc("Vector Operations per cycle (this CU only)") 1608 ; 1609 1610 numALUInstsExecuted 1611 .name(name() + ".num_alu_insts_executed") 1612 .desc("Number of dynamic non-GM memory insts executed") 1613 ; 1614 1615 wgBlockedDueLdsAllocation 1616 .name(name() + ".wg_blocked_due_lds_alloc") 1617 .desc("Workgroup blocked due to LDS capacity") 1618 ; 1619 1620 ipc = numInstrExecuted / totalCycles; 1621 vpc = numVecOpsExecuted / totalCycles; 1622 1623 numTimesWgBlockedDueVgprAlloc 1624 .name(name() + ".times_wg_blocked_due_vgpr_alloc") 1625 .desc("Number of times WGs are blocked due to VGPR allocation per SIMD") 1626 ; 1627 1628 dynamicGMemInstrCnt 1629 .name(name() + ".global_mem_instr_cnt") 1630 .desc("dynamic global memory instructions count") 1631 ; 1632 1633 dynamicLMemInstrCnt 1634 .name(name() + ".local_mem_instr_cnt") 1635 .desc("dynamic local memory intruction count") 1636 ; 1637 1638 numALUInstsExecuted = numInstrExecuted - dynamicGMemInstrCnt - 1639 dynamicLMemInstrCnt; 1640 1641 completedWfs 1642 .name(name() + ".num_completed_wfs") 1643 .desc("number of completed wavefronts") 1644 ; 1645 1646 numCASOps 1647 .name(name() + ".num_CAS_ops") 1648 .desc("number of compare and swap operations") 1649 ; 1650 1651 numFailedCASOps 1652 .name(name() + ".num_failed_CAS_ops") 1653 .desc("number of compare and swap operations that failed") 1654 ; 1655 1656 // register stats of pipeline stages 1657 fetchStage.regStats(); 1658 scoreboardCheckStage.regStats(); 1659 scheduleStage.regStats(); 1660 execStage.regStats(); 1661 1662 // register stats of memory pipeline 1663 globalMemoryPipe.regStats(); 1664 localMemoryPipe.regStats(); 1665} 1666 1667void 1668ComputeUnit::updateInstStats(GPUDynInstPtr gpuDynInst) 1669{ 1670 if (gpuDynInst->isScalar()) { 1671 if (gpuDynInst->isALU() && !gpuDynInst->isWaitcnt()) { 1672 sALUInsts++; 1673 instCyclesSALU++; 1674 } else if (gpuDynInst->isLoad()) { 1675 scalarMemReads++; 1676 } else if (gpuDynInst->isStore()) { 1677 scalarMemWrites++; 1678 } 1679 } else { 1680 if (gpuDynInst->isALU()) { 1681 vALUInsts++; 1682 instCyclesVALU++; 1683 threadCyclesVALU += gpuDynInst->wavefront()->execMask().count(); 1684 } else if (gpuDynInst->isFlat()) { 1685 if (gpuDynInst->isLocalMem()) { 1686 flatLDSInsts++; 1687 } else { 1688 flatVMemInsts++; 1689 } 1690 } else if (gpuDynInst->isLocalMem()) { 1691 ldsNoFlatInsts++; 1692 } else if (gpuDynInst->isLoad()) { 1693 vectorMemReads++; 1694 } else if (gpuDynInst->isStore()) { 1695 vectorMemWrites++; 1696 } 1697 } 1698} 1699 1700void 1701ComputeUnit::updatePageDivergenceDist(Addr addr) 1702{ 1703 Addr virt_page_addr = roundDown(addr, TheISA::PageBytes); 1704 1705 if (!pagesTouched.count(virt_page_addr)) 1706 pagesTouched[virt_page_addr] = 1; 1707 else 1708 pagesTouched[virt_page_addr]++; 1709} 1710 1711void 1712ComputeUnit::CUExitCallback::process() 1713{ 1714 if (computeUnit->countPages) { 1715 std::ostream *page_stat_file = 1716 simout.create(computeUnit->name().c_str())->stream(); 1717 1718 *page_stat_file << "page, wavefront accesses, workitem accesses" << 1719 std::endl; 1720 1721 for (auto iter : computeUnit->pageAccesses) { 1722 *page_stat_file << std::hex << iter.first << ","; 1723 *page_stat_file << std::dec << iter.second.first << ","; 1724 *page_stat_file << std::dec << iter.second.second << std::endl; 1725 } 1726 } 1727 } 1728 1729bool 1730ComputeUnit::isDone() const 1731{ 1732 for (int i = 0; i < numSIMDs; ++i) { 1733 if (!isSimdDone(i)) { 1734 return false; 1735 } 1736 } 1737 1738 bool glbMemBusRdy = true; 1739 for (int j = 0; j < numGlbMemUnits; ++j) { 1740 glbMemBusRdy &= vrfToGlobalMemPipeBus[j].rdy(); 1741 } 1742 bool locMemBusRdy = true; 1743 for (int j = 0; j < numLocMemUnits; ++j) { 1744 locMemBusRdy &= vrfToLocalMemPipeBus[j].rdy(); 1745 } 1746 1747 if (!globalMemoryPipe.isGMLdRespFIFOWrRdy() || 1748 !globalMemoryPipe.isGMStRespFIFOWrRdy() || 1749 !globalMemoryPipe.isGMReqFIFOWrRdy() || !localMemoryPipe.isLMReqFIFOWrRdy() 1750 || !localMemoryPipe.isLMRespFIFOWrRdy() || !locMemToVrfBus.rdy() || 1751 !glbMemToVrfBus.rdy() || !locMemBusRdy || !glbMemBusRdy) { 1752 return false; 1753 } 1754 1755 return true; 1756} 1757 1758int32_t 1759ComputeUnit::getRefCounter(const uint32_t dispatchId, const uint32_t wgId) const 1760{ 1761 return lds.getRefCounter(dispatchId, wgId); 1762} 1763 1764bool 1765ComputeUnit::isSimdDone(uint32_t simdId) const 1766{ 1767 assert(simdId < numSIMDs); 1768 1769 for (int i=0; i < numGlbMemUnits; ++i) { 1770 if (!vrfToGlobalMemPipeBus[i].rdy()) 1771 return false; 1772 } 1773 for (int i=0; i < numLocMemUnits; ++i) { 1774 if (!vrfToLocalMemPipeBus[i].rdy()) 1775 return false; 1776 } 1777 if (!aluPipe[simdId].rdy()) { 1778 return false; 1779 } 1780 1781 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf){ 1782 if (wfList[simdId][i_wf]->status != Wavefront::S_STOPPED) { 1783 return false; 1784 } 1785 } 1786 1787 return true; 1788} 1789 1790/** 1791 * send a general request to the LDS 1792 * make sure to look at the return value here as your request might be 1793 * NACK'd and returning false means that you have to have some backup plan 1794 */ 1795bool 1796ComputeUnit::sendToLds(GPUDynInstPtr gpuDynInst) 1797{ 1798 // this is just a request to carry the GPUDynInstPtr 1799 // back and forth 1800 RequestPtr newRequest = std::make_shared<Request>(); 1801 newRequest->setPaddr(0x0); 1802 1803 // ReadReq is not evaluted by the LDS but the Packet ctor requires this 1804 PacketPtr newPacket = new Packet(newRequest, MemCmd::ReadReq); 1805 1806 // This is the SenderState needed upon return 1807 newPacket->senderState = new LDSPort::SenderState(gpuDynInst); 1808 1809 return ldsPort->sendTimingReq(newPacket); 1810} 1811 1812/** 1813 * get the result of packets sent to the LDS when they return 1814 */ 1815bool 1816ComputeUnit::LDSPort::recvTimingResp(PacketPtr packet) 1817{ 1818 const ComputeUnit::LDSPort::SenderState *senderState = 1819 dynamic_cast<ComputeUnit::LDSPort::SenderState *>(packet->senderState); 1820 1821 fatal_if(!senderState, "did not get the right sort of sender state"); 1822 1823 GPUDynInstPtr gpuDynInst = senderState->getMemInst(); 1824 1825 delete packet->senderState; 1826 delete packet; 1827 1828 computeUnit->localMemoryPipe.getLMRespFIFO().push(gpuDynInst); 1829 return true; 1830} 1831 1832/** 1833 * attempt to send this packet, either the port is already stalled, the request 1834 * is nack'd and must stall or the request goes through 1835 * when a request cannot be sent, add it to the retries queue 1836 */ 1837bool 1838ComputeUnit::LDSPort::sendTimingReq(PacketPtr pkt) 1839{ 1840 ComputeUnit::LDSPort::SenderState *sender_state = 1841 dynamic_cast<ComputeUnit::LDSPort::SenderState*>(pkt->senderState); 1842 fatal_if(!sender_state, "packet without a valid sender state"); 1843 1844 GPUDynInstPtr gpuDynInst M5_VAR_USED = sender_state->getMemInst(); 1845 1846 if (isStalled()) { 1847 fatal_if(retries.empty(), "must have retries waiting to be stalled"); 1848 1849 retries.push(pkt); 1850 1851 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: LDS send failed!\n", 1852 computeUnit->cu_id, gpuDynInst->simdId, 1853 gpuDynInst->wfSlotId); 1854 return false; 1855 } else if (!MasterPort::sendTimingReq(pkt)) { 1856 // need to stall the LDS port until a recvReqRetry() is received 1857 // this indicates that there is more space 1858 stallPort(); 1859 retries.push(pkt); 1860 1861 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req failed!\n", 1862 computeUnit->cu_id, gpuDynInst->simdId, 1863 gpuDynInst->wfSlotId, pkt->req->getPaddr()); 1864 return false; 1865 } else { 1866 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req sent!\n", 1867 computeUnit->cu_id, gpuDynInst->simdId, 1868 gpuDynInst->wfSlotId, pkt->req->getPaddr()); 1869 return true; 1870 } 1871} 1872 1873/** 1874 * the bus is telling the port that there is now space so retrying stalled 1875 * requests should work now 1876 * this allows the port to have a request be nack'd and then have the receiver 1877 * say when there is space, rather than simply retrying the send every cycle 1878 */ 1879void 1880ComputeUnit::LDSPort::recvReqRetry() 1881{ 1882 auto queueSize = retries.size(); 1883 1884 DPRINTF(GPUPort, "CU%d: LDSPort recvReqRetry - %d pending requests\n", 1885 computeUnit->cu_id, queueSize); 1886 1887 fatal_if(queueSize < 1, 1888 "why was there a recvReqRetry() with no pending reqs?"); 1889 fatal_if(!isStalled(), 1890 "recvReqRetry() happened when the port was not stalled"); 1891 1892 unstallPort(); 1893 1894 while (!retries.empty()) { 1895 PacketPtr packet = retries.front(); 1896 1897 DPRINTF(GPUPort, "CU%d: retrying LDS send\n", computeUnit->cu_id); 1898 1899 if (!MasterPort::sendTimingReq(packet)) { 1900 // Stall port 1901 stallPort(); 1902 DPRINTF(GPUPort, ": LDS send failed again\n"); 1903 break; 1904 } else { 1905 DPRINTF(GPUTLB, ": LDS send successful\n"); 1906 retries.pop(); 1907 } 1908 } 1909} 1910