compute_unit.cc revision 11534
1/* 2 * Copyright (c) 2011-2015 Advanced Micro Devices, Inc. 3 * All rights reserved. 4 * 5 * For use for simulation and test purposes only 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the copyright holder nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Author: John Kalamatianos, Anthony Gutierrez 34 */ 35#include "gpu-compute/compute_unit.hh" 36 37#include <limits> 38 39#include "base/output.hh" 40#include "debug/GPUDisp.hh" 41#include "debug/GPUExec.hh" 42#include "debug/GPUFetch.hh" 43#include "debug/GPUMem.hh" 44#include "debug/GPUPort.hh" 45#include "debug/GPUPrefetch.hh" 46#include "debug/GPUSync.hh" 47#include "debug/GPUTLB.hh" 48#include "gpu-compute/dispatcher.hh" 49#include "gpu-compute/gpu_dyn_inst.hh" 50#include "gpu-compute/gpu_static_inst.hh" 51#include "gpu-compute/ndrange.hh" 52#include "gpu-compute/shader.hh" 53#include "gpu-compute/simple_pool_manager.hh" 54#include "gpu-compute/vector_register_file.hh" 55#include "gpu-compute/wavefront.hh" 56#include "mem/page_table.hh" 57#include "sim/process.hh" 58 59ComputeUnit::ComputeUnit(const Params *p) : MemObject(p), fetchStage(p), 60 scoreboardCheckStage(p), scheduleStage(p), execStage(p), 61 globalMemoryPipe(p), localMemoryPipe(p), rrNextMemID(0), rrNextALUWp(0), 62 cu_id(p->cu_id), vrf(p->vector_register_file), numSIMDs(p->num_SIMDs), 63 spBypassPipeLength(p->spbypass_pipe_length), 64 dpBypassPipeLength(p->dpbypass_pipe_length), 65 issuePeriod(p->issue_period), 66 numGlbMemUnits(p->num_global_mem_pipes), 67 numLocMemUnits(p->num_shared_mem_pipes), 68 perLaneTLB(p->perLaneTLB), prefetchDepth(p->prefetch_depth), 69 prefetchStride(p->prefetch_stride), prefetchType(p->prefetch_prev_type), 70 xact_cas_mode(p->xactCasMode), debugSegFault(p->debugSegFault), 71 functionalTLB(p->functionalTLB), localMemBarrier(p->localMemBarrier), 72 countPages(p->countPages), barrier_id(0), 73 vrfToCoalescerBusWidth(p->vrf_to_coalescer_bus_width), 74 coalescerToVrfBusWidth(p->coalescer_to_vrf_bus_width), 75 req_tick_latency(p->mem_req_latency * p->clk_domain->clockPeriod()), 76 resp_tick_latency(p->mem_resp_latency * p->clk_domain->clockPeriod()), 77 _masterId(p->system->getMasterId(name() + ".ComputeUnit")), 78 lds(*p->localDataStore), globalSeqNum(0), wavefrontSize(p->wfSize) 79{ 80 /** 81 * This check is necessary because std::bitset only provides conversion 82 * to unsigned long or unsigned long long via to_ulong() or to_ullong(). 83 * there are * a few places in the code where to_ullong() is used, however 84 * if VSZ is larger than a value the host can support then bitset will 85 * throw a runtime exception. we should remove all use of to_long() or 86 * to_ullong() so we can have VSZ greater than 64b, however until that is 87 * done this assert is required. 88 */ 89 fatal_if(p->wfSize > std::numeric_limits<unsigned long long>::digits || 90 p->wfSize <= 0, 91 "WF size is larger than the host can support"); 92 fatal_if(!isPowerOf2(wavefrontSize), 93 "Wavefront size should be a power of 2"); 94 // calculate how many cycles a vector load or store will need to transfer 95 // its data over the corresponding buses 96 numCyclesPerStoreTransfer = 97 (uint32_t)ceil((double)(wfSize() * sizeof(uint32_t)) / 98 (double)vrfToCoalescerBusWidth); 99 100 numCyclesPerLoadTransfer = (wfSize() * sizeof(uint32_t)) 101 / coalescerToVrfBusWidth; 102 103 lastVaddrWF.resize(numSIMDs); 104 wfList.resize(numSIMDs); 105 106 for (int j = 0; j < numSIMDs; ++j) { 107 lastVaddrWF[j].resize(p->n_wf); 108 109 for (int i = 0; i < p->n_wf; ++i) { 110 lastVaddrWF[j][i].resize(wfSize()); 111 112 wfList[j].push_back(p->wavefronts[j * p->n_wf + i]); 113 wfList[j][i]->setParent(this); 114 115 for (int k = 0; k < wfSize(); ++k) { 116 lastVaddrWF[j][i][k] = 0; 117 } 118 } 119 } 120 121 lastVaddrSimd.resize(numSIMDs); 122 123 for (int i = 0; i < numSIMDs; ++i) { 124 lastVaddrSimd[i].resize(wfSize(), 0); 125 } 126 127 lastVaddrCU.resize(wfSize()); 128 129 lds.setParent(this); 130 131 if (p->execPolicy == "OLDEST-FIRST") { 132 exec_policy = EXEC_POLICY::OLDEST; 133 } else if (p->execPolicy == "ROUND-ROBIN") { 134 exec_policy = EXEC_POLICY::RR; 135 } else { 136 fatal("Invalid WF execution policy (CU)\n"); 137 } 138 139 memPort.resize(wfSize()); 140 141 // resize the tlbPort vectorArray 142 int tlbPort_width = perLaneTLB ? wfSize() : 1; 143 tlbPort.resize(tlbPort_width); 144 145 cuExitCallback = new CUExitCallback(this); 146 registerExitCallback(cuExitCallback); 147 148 xactCasLoadMap.clear(); 149 lastExecCycle.resize(numSIMDs, 0); 150 151 for (int i = 0; i < vrf.size(); ++i) { 152 vrf[i]->setParent(this); 153 } 154 155 numVecRegsPerSimd = vrf[0]->numRegs(); 156} 157 158ComputeUnit::~ComputeUnit() 159{ 160 // Delete wavefront slots 161 for (int j = 0; j < numSIMDs; ++j) { 162 for (int i = 0; i < shader->n_wf; ++i) { 163 delete wfList[j][i]; 164 } 165 lastVaddrSimd[j].clear(); 166 } 167 lastVaddrCU.clear(); 168 readyList.clear(); 169 waveStatusList.clear(); 170 dispatchList.clear(); 171 vectorAluInstAvail.clear(); 172 delete cuExitCallback; 173 delete ldsPort; 174} 175 176void 177ComputeUnit::FillKernelState(Wavefront *w, NDRange *ndr) 178{ 179 w->resizeRegFiles(ndr->q.cRegCount, ndr->q.sRegCount, ndr->q.dRegCount); 180 181 w->workgroupsz[0] = ndr->q.wgSize[0]; 182 w->workgroupsz[1] = ndr->q.wgSize[1]; 183 w->workgroupsz[2] = ndr->q.wgSize[2]; 184 w->wg_sz = w->workgroupsz[0] * w->workgroupsz[1] * w->workgroupsz[2]; 185 w->gridsz[0] = ndr->q.gdSize[0]; 186 w->gridsz[1] = ndr->q.gdSize[1]; 187 w->gridsz[2] = ndr->q.gdSize[2]; 188 w->kernelArgs = ndr->q.args; 189 w->privSizePerItem = ndr->q.privMemPerItem; 190 w->spillSizePerItem = ndr->q.spillMemPerItem; 191 w->roBase = ndr->q.roMemStart; 192 w->roSize = ndr->q.roMemTotal; 193} 194 195void 196ComputeUnit::InitializeWFContext(WFContext *wfCtx, NDRange *ndr, int cnt, 197 int trueWgSize[], int trueWgSizeTotal, 198 LdsChunk *ldsChunk, uint64_t origSpillMemStart) 199{ 200 wfCtx->cnt = cnt; 201 202 VectorMask init_mask; 203 init_mask.reset(); 204 205 for (int k = 0; k < wfSize(); ++k) { 206 if (k + cnt * wfSize() < trueWgSizeTotal) 207 init_mask[k] = 1; 208 } 209 210 wfCtx->init_mask = init_mask.to_ullong(); 211 wfCtx->exec_mask = init_mask.to_ullong(); 212 213 wfCtx->bar_cnt.resize(wfSize(), 0); 214 215 wfCtx->max_bar_cnt = 0; 216 wfCtx->old_barrier_cnt = 0; 217 wfCtx->barrier_cnt = 0; 218 219 wfCtx->privBase = ndr->q.privMemStart; 220 ndr->q.privMemStart += ndr->q.privMemPerItem * wfSize(); 221 222 wfCtx->spillBase = ndr->q.spillMemStart; 223 ndr->q.spillMemStart += ndr->q.spillMemPerItem * wfSize(); 224 225 wfCtx->pc = 0; 226 wfCtx->rpc = UINT32_MAX; 227 228 // set the wavefront context to have a pointer to this section of the LDS 229 wfCtx->ldsChunk = ldsChunk; 230 231 // WG state 232 wfCtx->wg_id = ndr->globalWgId; 233 wfCtx->barrier_id = barrier_id; 234 235 // Kernel wide state 236 wfCtx->ndr = ndr; 237} 238 239void 240ComputeUnit::updateEvents() { 241 242 if (!timestampVec.empty()) { 243 uint32_t vecSize = timestampVec.size(); 244 uint32_t i = 0; 245 while (i < vecSize) { 246 if (timestampVec[i] <= shader->tick_cnt) { 247 std::pair<uint32_t, uint32_t> regInfo = regIdxVec[i]; 248 vrf[regInfo.first]->markReg(regInfo.second, sizeof(uint32_t), 249 statusVec[i]); 250 timestampVec.erase(timestampVec.begin() + i); 251 regIdxVec.erase(regIdxVec.begin() + i); 252 statusVec.erase(statusVec.begin() + i); 253 --vecSize; 254 --i; 255 } 256 ++i; 257 } 258 } 259 260 for (int i = 0; i< numSIMDs; ++i) { 261 vrf[i]->updateEvents(); 262 } 263} 264 265 266void 267ComputeUnit::StartWF(Wavefront *w, WFContext *wfCtx, int trueWgSize[], 268 int trueWgSizeTotal) 269{ 270 static int _n_wave = 0; 271 int cnt = wfCtx->cnt; 272 NDRange *ndr = wfCtx->ndr; 273 274 // Fill in Kernel state 275 FillKernelState(w, ndr); 276 277 w->kern_id = ndr->dispatchId; 278 w->dynwaveid = cnt; 279 w->init_mask = wfCtx->init_mask; 280 281 for (int k = 0; k < wfSize(); ++k) { 282 w->workitemid[0][k] = (k+cnt*wfSize()) % trueWgSize[0]; 283 w->workitemid[1][k] = 284 ((k + cnt * wfSize()) / trueWgSize[0]) % trueWgSize[1]; 285 w->workitemid[2][k] = 286 (k + cnt * wfSize()) / (trueWgSize[0] * trueWgSize[1]); 287 288 w->workitemFlatId[k] = w->workitemid[2][k] * trueWgSize[0] * 289 trueWgSize[1] + w->workitemid[1][k] * trueWgSize[0] + 290 w->workitemid[0][k]; 291 } 292 293 w->old_barrier_cnt = wfCtx->old_barrier_cnt; 294 w->barrier_cnt = wfCtx->barrier_cnt; 295 w->barrier_slots = divCeil(trueWgSizeTotal, wfSize()); 296 297 for (int i = 0; i < wfSize(); ++i) { 298 w->bar_cnt[i] = wfCtx->bar_cnt[i]; 299 } 300 301 w->max_bar_cnt = wfCtx->max_bar_cnt; 302 w->privBase = wfCtx->privBase; 303 w->spillBase = wfCtx->spillBase; 304 305 w->pushToReconvergenceStack(wfCtx->pc, wfCtx->rpc, wfCtx->exec_mask); 306 307 // WG state 308 w->wg_id = wfCtx->wg_id; 309 w->dispatchid = wfCtx->ndr->dispatchId; 310 w->workgroupid[0] = w->wg_id % ndr->numWg[0]; 311 w->workgroupid[1] = (w->wg_id / ndr->numWg[0]) % ndr->numWg[1]; 312 w->workgroupid[2] = w->wg_id / (ndr->numWg[0] * ndr->numWg[1]); 313 314 w->barrier_id = wfCtx->barrier_id; 315 w->stalledAtBarrier = false; 316 317 // move this from the context into the actual wavefront 318 w->ldsChunk = wfCtx->ldsChunk; 319 320 int32_t refCount M5_VAR_USED = 321 lds.increaseRefCounter(w->dispatchid, w->wg_id); 322 DPRINTF(GPUDisp, "CU%d: increase ref ctr wg[%d] to [%d]\n", 323 cu_id, w->wg_id, refCount); 324 325 w->instructionBuffer.clear(); 326 327 if (w->pendingFetch) 328 w->dropFetch = true; 329 330 // is this the last wavefront in the workgroup 331 // if set the spillWidth to be the remaining work-items 332 // so that the vector access is correct 333 if ((cnt + 1) * wfSize() >= trueWgSizeTotal) { 334 w->spillWidth = trueWgSizeTotal - (cnt * wfSize()); 335 } else { 336 w->spillWidth = wfSize(); 337 } 338 339 DPRINTF(GPUDisp, "Scheduling wfDynId/barrier_id %d/%d on CU%d: " 340 "WF[%d][%d]\n", _n_wave, barrier_id, cu_id, w->simdId, w->wfSlotId); 341 342 w->start(++_n_wave, ndr->q.code_ptr); 343 wfCtx->bar_cnt.clear(); 344} 345 346void 347ComputeUnit::StartWorkgroup(NDRange *ndr) 348{ 349 // reserve the LDS capacity allocated to the work group 350 // disambiguated by the dispatch ID and workgroup ID, which should be 351 // globally unique 352 LdsChunk *ldsChunk = lds.reserveSpace(ndr->dispatchId, ndr->globalWgId, 353 ndr->q.ldsSize); 354 355 // Send L1 cache acquire 356 // isKernel + isAcquire = Kernel Begin 357 if (shader->impl_kern_boundary_sync) { 358 GPUDynInstPtr gpuDynInst = std::make_shared<GPUDynInst>(this, 359 nullptr, 360 nullptr, 0); 361 362 gpuDynInst->useContinuation = false; 363 gpuDynInst->memoryOrder = Enums::MEMORY_ORDER_SC_ACQUIRE; 364 gpuDynInst->scope = Enums::MEMORY_SCOPE_SYSTEM; 365 injectGlobalMemFence(gpuDynInst, true); 366 } 367 368 // Get true size of workgroup (after clamping to grid size) 369 int trueWgSize[3]; 370 int trueWgSizeTotal = 1; 371 372 for (int d = 0; d < 3; ++d) { 373 trueWgSize[d] = std::min(ndr->q.wgSize[d], ndr->q.gdSize[d] - 374 ndr->wgId[d] * ndr->q.wgSize[d]); 375 376 trueWgSizeTotal *= trueWgSize[d]; 377 } 378 379 uint64_t origSpillMemStart = ndr->q.spillMemStart; 380 // calculate the number of 32-bit vector registers required by wavefront 381 int vregDemand = ndr->q.sRegCount + (2 * ndr->q.dRegCount); 382 int cnt = 0; 383 384 // Assign WFs by spreading them across SIMDs, 1 WF per SIMD at a time 385 for (int m = 0; m < shader->n_wf * numSIMDs; ++m) { 386 Wavefront *w = wfList[m % numSIMDs][m / numSIMDs]; 387 // Check if this wavefront slot is available: 388 // It must be stopped and not waiting 389 // for a release to complete S_RETURNING 390 if (w->status == Wavefront::S_STOPPED) { 391 // if we have scheduled all work items then stop 392 // scheduling wavefronts 393 if (cnt * wfSize() >= trueWgSizeTotal) 394 break; 395 396 // reserve vector registers for the scheduled wavefront 397 assert(vectorRegsReserved[m % numSIMDs] <= numVecRegsPerSimd); 398 uint32_t normSize = 0; 399 400 w->startVgprIndex = vrf[m % numSIMDs]->manager-> 401 allocateRegion(vregDemand, &normSize); 402 403 w->reservedVectorRegs = normSize; 404 vectorRegsReserved[m % numSIMDs] += w->reservedVectorRegs; 405 406 WFContext wfCtx; 407 408 InitializeWFContext(&wfCtx, ndr, cnt, trueWgSize, trueWgSizeTotal, 409 ldsChunk, origSpillMemStart); 410 411 StartWF(w, &wfCtx, trueWgSize, trueWgSizeTotal); 412 ++cnt; 413 } 414 } 415 ++barrier_id; 416} 417 418int 419ComputeUnit::ReadyWorkgroup(NDRange *ndr) 420{ 421 // Get true size of workgroup (after clamping to grid size) 422 int trueWgSize[3]; 423 int trueWgSizeTotal = 1; 424 425 for (int d = 0; d < 3; ++d) { 426 trueWgSize[d] = std::min(ndr->q.wgSize[d], ndr->q.gdSize[d] - 427 ndr->wgId[d] * ndr->q.wgSize[d]); 428 429 trueWgSizeTotal *= trueWgSize[d]; 430 DPRINTF(GPUDisp, "trueWgSize[%d] = %d\n", d, trueWgSize[d]); 431 } 432 433 DPRINTF(GPUDisp, "trueWgSizeTotal = %d\n", trueWgSizeTotal); 434 435 // calculate the number of 32-bit vector registers required by each 436 // work item of the work group 437 int vregDemandPerWI = ndr->q.sRegCount + (2 * ndr->q.dRegCount); 438 bool vregAvail = true; 439 int numWfs = (trueWgSizeTotal + wfSize() - 1) / wfSize(); 440 int freeWfSlots = 0; 441 // check if the total number of VGPRs required by all WFs of the WG 442 // fit in the VRFs of all SIMD units 443 assert((numWfs * vregDemandPerWI) <= (numSIMDs * numVecRegsPerSimd)); 444 int numMappedWfs = 0; 445 std::vector<int> numWfsPerSimd; 446 numWfsPerSimd.resize(numSIMDs, 0); 447 // find how many free WF slots we have across all SIMDs 448 for (int j = 0; j < shader->n_wf; ++j) { 449 for (int i = 0; i < numSIMDs; ++i) { 450 if (wfList[i][j]->status == Wavefront::S_STOPPED) { 451 // count the number of free WF slots 452 ++freeWfSlots; 453 if (numMappedWfs < numWfs) { 454 // count the WFs to be assigned per SIMD 455 numWfsPerSimd[i]++; 456 } 457 numMappedWfs++; 458 } 459 } 460 } 461 462 // if there are enough free WF slots then find if there are enough 463 // free VGPRs per SIMD based on the WF->SIMD mapping 464 if (freeWfSlots >= numWfs) { 465 for (int j = 0; j < numSIMDs; ++j) { 466 // find if there are enough free VGPR regions in the SIMD's VRF 467 // to accommodate the WFs of the new WG that would be mapped to 468 // this SIMD unit 469 vregAvail = vrf[j]->manager->canAllocate(numWfsPerSimd[j], 470 vregDemandPerWI); 471 472 // stop searching if there is at least one SIMD 473 // whose VRF does not have enough free VGPR pools. 474 // This is because a WG is scheduled only if ALL 475 // of its WFs can be scheduled 476 if (!vregAvail) 477 break; 478 } 479 } 480 481 DPRINTF(GPUDisp, "Free WF slots = %d, VGPR Availability = %d\n", 482 freeWfSlots, vregAvail); 483 484 if (!vregAvail) { 485 ++numTimesWgBlockedDueVgprAlloc; 486 } 487 488 // Return true if enough WF slots to submit workgroup and if there are 489 // enough VGPRs to schedule all WFs to their SIMD units 490 if (!lds.canReserve(ndr->q.ldsSize)) { 491 wgBlockedDueLdsAllocation++; 492 } 493 494 // Return true if (a) there are enough free WF slots to submit 495 // workgrounp and (b) if there are enough VGPRs to schedule all WFs to their 496 // SIMD units and (c) if there is enough space in LDS 497 return freeWfSlots >= numWfs && vregAvail && lds.canReserve(ndr->q.ldsSize); 498} 499 500int 501ComputeUnit::AllAtBarrier(uint32_t _barrier_id, uint32_t bcnt, uint32_t bslots) 502{ 503 DPRINTF(GPUSync, "CU%d: Checking for All At Barrier\n", cu_id); 504 int ccnt = 0; 505 506 for (int i_simd = 0; i_simd < numSIMDs; ++i_simd) { 507 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf) { 508 Wavefront *w = wfList[i_simd][i_wf]; 509 510 if (w->status == Wavefront::S_RUNNING) { 511 DPRINTF(GPUSync, "Checking WF[%d][%d]\n", i_simd, i_wf); 512 513 DPRINTF(GPUSync, "wf->barrier_id = %d, _barrier_id = %d\n", 514 w->barrier_id, _barrier_id); 515 516 DPRINTF(GPUSync, "wf->barrier_cnt %d, bcnt = %d\n", 517 w->barrier_cnt, bcnt); 518 } 519 520 if (w->status == Wavefront::S_RUNNING && 521 w->barrier_id == _barrier_id && w->barrier_cnt == bcnt && 522 !w->outstanding_reqs) { 523 ++ccnt; 524 525 DPRINTF(GPUSync, "WF[%d][%d] at barrier, increment ccnt to " 526 "%d\n", i_simd, i_wf, ccnt); 527 } 528 } 529 } 530 531 DPRINTF(GPUSync, "CU%d: returning allAtBarrier ccnt = %d, bslots = %d\n", 532 cu_id, ccnt, bslots); 533 534 return ccnt == bslots; 535} 536 537// Check if the current wavefront is blocked on additional resources. 538bool 539ComputeUnit::cedeSIMD(int simdId, int wfSlotId) 540{ 541 bool cede = false; 542 543 // If --xact-cas-mode option is enabled in run.py, then xact_cas_ld 544 // magic instructions will impact the scheduling of wavefronts 545 if (xact_cas_mode) { 546 /* 547 * When a wavefront calls xact_cas_ld, it adds itself to a per address 548 * queue. All per address queues are managed by the xactCasLoadMap. 549 * 550 * A wavefront is not blocked if: it is not in ANY per address queue or 551 * if it is at the head of a per address queue. 552 */ 553 for (auto itMap : xactCasLoadMap) { 554 std::list<waveIdentifier> curWaveIDQueue = itMap.second.waveIDQueue; 555 556 if (!curWaveIDQueue.empty()) { 557 for (auto it : curWaveIDQueue) { 558 waveIdentifier cur_wave = it; 559 560 if (cur_wave.simdId == simdId && 561 cur_wave.wfSlotId == wfSlotId) { 562 // 2 possibilities 563 // 1: this WF has a green light 564 // 2: another WF has a green light 565 waveIdentifier owner_wave = curWaveIDQueue.front(); 566 567 if (owner_wave.simdId != cur_wave.simdId || 568 owner_wave.wfSlotId != cur_wave.wfSlotId) { 569 // possibility 2 570 cede = true; 571 break; 572 } else { 573 // possibility 1 574 break; 575 } 576 } 577 } 578 } 579 } 580 } 581 582 return cede; 583} 584 585// Execute one clock worth of work on the ComputeUnit. 586void 587ComputeUnit::exec() 588{ 589 updateEvents(); 590 // Execute pipeline stages in reverse order to simulate 591 // the pipeline latency 592 globalMemoryPipe.exec(); 593 localMemoryPipe.exec(); 594 execStage.exec(); 595 scheduleStage.exec(); 596 scoreboardCheckStage.exec(); 597 fetchStage.exec(); 598 599 totalCycles++; 600} 601 602void 603ComputeUnit::init() 604{ 605 // Initialize CU Bus models 606 glbMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1)); 607 locMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1)); 608 nextGlbMemBus = 0; 609 nextLocMemBus = 0; 610 fatal_if(numGlbMemUnits > 1, 611 "No support for multiple Global Memory Pipelines exists!!!"); 612 vrfToGlobalMemPipeBus.resize(numGlbMemUnits); 613 for (int j = 0; j < numGlbMemUnits; ++j) { 614 vrfToGlobalMemPipeBus[j] = WaitClass(); 615 vrfToGlobalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1)); 616 } 617 618 fatal_if(numLocMemUnits > 1, 619 "No support for multiple Local Memory Pipelines exists!!!"); 620 vrfToLocalMemPipeBus.resize(numLocMemUnits); 621 for (int j = 0; j < numLocMemUnits; ++j) { 622 vrfToLocalMemPipeBus[j] = WaitClass(); 623 vrfToLocalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1)); 624 } 625 vectorRegsReserved.resize(numSIMDs, 0); 626 aluPipe.resize(numSIMDs); 627 wfWait.resize(numSIMDs + numLocMemUnits + numGlbMemUnits); 628 629 for (int i = 0; i < numSIMDs + numLocMemUnits + numGlbMemUnits; ++i) { 630 wfWait[i] = WaitClass(); 631 wfWait[i].init(&shader->tick_cnt, shader->ticks(1)); 632 } 633 634 for (int i = 0; i < numSIMDs; ++i) { 635 aluPipe[i] = WaitClass(); 636 aluPipe[i].init(&shader->tick_cnt, shader->ticks(1)); 637 } 638 639 // Setup space for call args 640 for (int j = 0; j < numSIMDs; ++j) { 641 for (int i = 0; i < shader->n_wf; ++i) { 642 wfList[j][i]->initCallArgMem(shader->funcargs_size, wavefrontSize); 643 } 644 } 645 646 // Initializing pipeline resources 647 readyList.resize(numSIMDs + numGlbMemUnits + numLocMemUnits); 648 waveStatusList.resize(numSIMDs); 649 650 for (int j = 0; j < numSIMDs; ++j) { 651 for (int i = 0; i < shader->n_wf; ++i) { 652 waveStatusList[j].push_back( 653 std::make_pair(wfList[j][i], BLOCKED)); 654 } 655 } 656 657 for (int j = 0; j < (numSIMDs + numGlbMemUnits + numLocMemUnits); ++j) { 658 dispatchList.push_back(std::make_pair((Wavefront*)nullptr, EMPTY)); 659 } 660 661 fetchStage.init(this); 662 scoreboardCheckStage.init(this); 663 scheduleStage.init(this); 664 execStage.init(this); 665 globalMemoryPipe.init(this); 666 localMemoryPipe.init(this); 667 // initialize state for statistics calculation 668 vectorAluInstAvail.resize(numSIMDs, false); 669 shrMemInstAvail = 0; 670 glbMemInstAvail = 0; 671} 672 673bool 674ComputeUnit::DataPort::recvTimingResp(PacketPtr pkt) 675{ 676 // Ruby has completed the memory op. Schedule the mem_resp_event at the 677 // appropriate cycle to process the timing memory response 678 // This delay represents the pipeline delay 679 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState); 680 int index = sender_state->port_index; 681 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 682 683 // Is the packet returned a Kernel End or Barrier 684 if (pkt->req->isKernel() && pkt->req->isRelease()) { 685 Wavefront *w = 686 computeUnit->wfList[gpuDynInst->simdId][gpuDynInst->wfSlotId]; 687 688 // Check if we are waiting on Kernel End Release 689 if (w->status == Wavefront::S_RETURNING) { 690 DPRINTF(GPUDisp, "CU%d: WF[%d][%d][wv=%d]: WG id completed %d\n", 691 computeUnit->cu_id, w->simdId, w->wfSlotId, 692 w->wfDynId, w->kern_id); 693 694 computeUnit->shader->dispatcher->notifyWgCompl(w); 695 w->status = Wavefront::S_STOPPED; 696 } else { 697 w->outstanding_reqs--; 698 } 699 700 DPRINTF(GPUSync, "CU%d: WF[%d][%d]: barrier_cnt = %d\n", 701 computeUnit->cu_id, gpuDynInst->simdId, 702 gpuDynInst->wfSlotId, w->barrier_cnt); 703 704 if (gpuDynInst->useContinuation) { 705 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 706 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 707 gpuDynInst); 708 } 709 710 delete pkt->senderState; 711 delete pkt->req; 712 delete pkt; 713 return true; 714 } else if (pkt->req->isKernel() && pkt->req->isAcquire()) { 715 if (gpuDynInst->useContinuation) { 716 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 717 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 718 gpuDynInst); 719 } 720 721 delete pkt->senderState; 722 delete pkt->req; 723 delete pkt; 724 return true; 725 } 726 727 ComputeUnit::DataPort::MemRespEvent *mem_resp_event = 728 new ComputeUnit::DataPort::MemRespEvent(computeUnit->memPort[index], 729 pkt); 730 731 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x received!\n", 732 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 733 index, pkt->req->getPaddr()); 734 735 computeUnit->schedule(mem_resp_event, 736 curTick() + computeUnit->resp_tick_latency); 737 return true; 738} 739 740void 741ComputeUnit::DataPort::recvReqRetry() 742{ 743 int len = retries.size(); 744 745 assert(len > 0); 746 747 for (int i = 0; i < len; ++i) { 748 PacketPtr pkt = retries.front().first; 749 GPUDynInstPtr gpuDynInst M5_VAR_USED = retries.front().second; 750 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: retry mem inst addr %#x\n", 751 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 752 pkt->req->getPaddr()); 753 754 /** Currently Ruby can return false due to conflicts for the particular 755 * cache block or address. Thus other requests should be allowed to 756 * pass and the data port should expect multiple retries. */ 757 if (!sendTimingReq(pkt)) { 758 DPRINTF(GPUMem, "failed again!\n"); 759 break; 760 } else { 761 DPRINTF(GPUMem, "successful!\n"); 762 retries.pop_front(); 763 } 764 } 765} 766 767bool 768ComputeUnit::SQCPort::recvTimingResp(PacketPtr pkt) 769{ 770 computeUnit->fetchStage.processFetchReturn(pkt); 771 772 return true; 773} 774 775void 776ComputeUnit::SQCPort::recvReqRetry() 777{ 778 int len = retries.size(); 779 780 assert(len > 0); 781 782 for (int i = 0; i < len; ++i) { 783 PacketPtr pkt = retries.front().first; 784 Wavefront *wavefront M5_VAR_USED = retries.front().second; 785 DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: retrying FETCH addr %#x\n", 786 computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId, 787 pkt->req->getPaddr()); 788 if (!sendTimingReq(pkt)) { 789 DPRINTF(GPUFetch, "failed again!\n"); 790 break; 791 } else { 792 DPRINTF(GPUFetch, "successful!\n"); 793 retries.pop_front(); 794 } 795 } 796} 797 798void 799ComputeUnit::sendRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt) 800{ 801 // There must be a way around this check to do the globalMemStart... 802 Addr tmp_vaddr = pkt->req->getVaddr(); 803 804 updatePageDivergenceDist(tmp_vaddr); 805 806 pkt->req->setVirt(pkt->req->getAsid(), tmp_vaddr, pkt->req->getSize(), 807 pkt->req->getFlags(), pkt->req->masterId(), 808 pkt->req->getPC()); 809 810 // figure out the type of the request to set read/write 811 BaseTLB::Mode TLB_mode; 812 assert(pkt->isRead() || pkt->isWrite()); 813 814 // Check write before read for atomic operations 815 // since atomic operations should use BaseTLB::Write 816 if (pkt->isWrite()){ 817 TLB_mode = BaseTLB::Write; 818 } else if (pkt->isRead()) { 819 TLB_mode = BaseTLB::Read; 820 } else { 821 fatal("pkt is not a read nor a write\n"); 822 } 823 824 tlbCycles -= curTick(); 825 ++tlbRequests; 826 827 int tlbPort_index = perLaneTLB ? index : 0; 828 829 if (shader->timingSim) { 830 if (debugSegFault) { 831 Process *p = shader->gpuTc->getProcessPtr(); 832 Addr vaddr = pkt->req->getVaddr(); 833 unsigned size = pkt->getSize(); 834 835 if ((vaddr + size - 1) % 64 < vaddr % 64) { 836 panic("CU%d: WF[%d][%d]: Access to addr %#x is unaligned!\n", 837 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, vaddr); 838 } 839 840 Addr paddr; 841 842 if (!p->pTable->translate(vaddr, paddr)) { 843 if (!p->fixupStackFault(vaddr)) { 844 panic("CU%d: WF[%d][%d]: Fault on addr %#x!\n", 845 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 846 vaddr); 847 } 848 } 849 } 850 851 // This is the SenderState needed upon return 852 pkt->senderState = new DTLBPort::SenderState(gpuDynInst, index); 853 854 // This is the senderState needed by the TLB hierarchy to function 855 TheISA::GpuTLB::TranslationState *translation_state = 856 new TheISA::GpuTLB::TranslationState(TLB_mode, shader->gpuTc, false, 857 pkt->senderState); 858 859 pkt->senderState = translation_state; 860 861 if (functionalTLB) { 862 tlbPort[tlbPort_index]->sendFunctional(pkt); 863 864 // update the hitLevel distribution 865 int hit_level = translation_state->hitLevel; 866 assert(hit_level != -1); 867 hitsPerTLBLevel[hit_level]++; 868 869 // New SenderState for the memory access 870 X86ISA::GpuTLB::TranslationState *sender_state = 871 safe_cast<X86ISA::GpuTLB::TranslationState*>(pkt->senderState); 872 873 delete sender_state->tlbEntry; 874 delete sender_state->saved; 875 delete sender_state; 876 877 assert(pkt->req->hasPaddr()); 878 assert(pkt->req->hasSize()); 879 880 uint8_t *tmpData = pkt->getPtr<uint8_t>(); 881 882 // this is necessary because the GPU TLB receives packets instead 883 // of requests. when the translation is complete, all relevent 884 // fields in the request will be populated, but not in the packet. 885 // here we create the new packet so we can set the size, addr, 886 // and proper flags. 887 PacketPtr oldPkt = pkt; 888 pkt = new Packet(oldPkt->req, oldPkt->cmd); 889 delete oldPkt; 890 pkt->dataStatic(tmpData); 891 892 893 // New SenderState for the memory access 894 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, 895 index, nullptr); 896 897 gpuDynInst->memStatusVector[pkt->getAddr()].push_back(index); 898 gpuDynInst->tlbHitLevel[index] = hit_level; 899 900 901 // translation is done. Schedule the mem_req_event at the 902 // appropriate cycle to send the timing memory request to ruby 903 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 904 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt); 905 906 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data " 907 "scheduled\n", cu_id, gpuDynInst->simdId, 908 gpuDynInst->wfSlotId, index, pkt->req->getPaddr()); 909 910 schedule(mem_req_event, curTick() + req_tick_latency); 911 } else if (tlbPort[tlbPort_index]->isStalled()) { 912 assert(tlbPort[tlbPort_index]->retries.size() > 0); 913 914 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x " 915 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 916 tmp_vaddr); 917 918 tlbPort[tlbPort_index]->retries.push_back(pkt); 919 } else if (!tlbPort[tlbPort_index]->sendTimingReq(pkt)) { 920 // Stall the data port; 921 // No more packet will be issued till 922 // ruby indicates resources are freed by 923 // a recvReqRetry() call back on this port. 924 tlbPort[tlbPort_index]->stallPort(); 925 926 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x " 927 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 928 tmp_vaddr); 929 930 tlbPort[tlbPort_index]->retries.push_back(pkt); 931 } else { 932 DPRINTF(GPUTLB, 933 "CU%d: WF[%d][%d]: Translation for addr %#x sent!\n", 934 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, tmp_vaddr); 935 } 936 } else { 937 if (pkt->cmd == MemCmd::MemFenceReq) { 938 gpuDynInst->statusBitVector = VectorMask(0); 939 } else { 940 gpuDynInst->statusBitVector &= (~(1ll << index)); 941 } 942 943 // New SenderState for the memory access 944 delete pkt->senderState; 945 946 // Because it's atomic operation, only need TLB translation state 947 pkt->senderState = new TheISA::GpuTLB::TranslationState(TLB_mode, 948 shader->gpuTc); 949 950 tlbPort[tlbPort_index]->sendFunctional(pkt); 951 952 // the addr of the packet is not modified, so we need to create a new 953 // packet, or otherwise the memory access will have the old virtual 954 // address sent in the translation packet, instead of the physical 955 // address returned by the translation. 956 PacketPtr new_pkt = new Packet(pkt->req, pkt->cmd); 957 new_pkt->dataStatic(pkt->getPtr<uint8_t>()); 958 959 // Translation is done. It is safe to send the packet to memory. 960 memPort[0]->sendFunctional(new_pkt); 961 962 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: index %d: addr %#x\n", cu_id, 963 gpuDynInst->simdId, gpuDynInst->wfSlotId, index, 964 new_pkt->req->getPaddr()); 965 966 // safe_cast the senderState 967 TheISA::GpuTLB::TranslationState *sender_state = 968 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 969 970 delete sender_state->tlbEntry; 971 delete new_pkt; 972 delete pkt->senderState; 973 delete pkt->req; 974 delete pkt; 975 } 976} 977 978void 979ComputeUnit::sendSyncRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt) 980{ 981 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 982 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt); 983 984 985 // New SenderState for the memory access 986 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, index, 987 nullptr); 988 989 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x sync scheduled\n", 990 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, index, 991 pkt->req->getPaddr()); 992 993 schedule(mem_req_event, curTick() + req_tick_latency); 994} 995 996void 997ComputeUnit::injectGlobalMemFence(GPUDynInstPtr gpuDynInst, bool kernelLaunch, 998 Request* req) 999{ 1000 if (!req) { 1001 req = new Request(0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId); 1002 } 1003 req->setPaddr(0); 1004 if (kernelLaunch) { 1005 req->setFlags(Request::KERNEL); 1006 } 1007 1008 gpuDynInst->s_type = SEG_GLOBAL; 1009 1010 // for non-kernel MemFence operations, memorder flags are set depending 1011 // on which type of request is currently being sent, so this 1012 // should be set by the caller (e.g. if an inst has acq-rel 1013 // semantics, it will send one acquire req an one release req) 1014 gpuDynInst->setRequestFlags(req, kernelLaunch); 1015 1016 // a mem fence must correspond to an acquire/release request 1017 assert(req->isAcquire() || req->isRelease()); 1018 1019 // create packet 1020 PacketPtr pkt = new Packet(req, MemCmd::MemFenceReq); 1021 1022 // set packet's sender state 1023 pkt->senderState = 1024 new ComputeUnit::DataPort::SenderState(gpuDynInst, 0, nullptr); 1025 1026 // send the packet 1027 sendSyncRequest(gpuDynInst, 0, pkt); 1028} 1029 1030const char* 1031ComputeUnit::DataPort::MemRespEvent::description() const 1032{ 1033 return "ComputeUnit memory response event"; 1034} 1035 1036void 1037ComputeUnit::DataPort::MemRespEvent::process() 1038{ 1039 DataPort::SenderState *sender_state = 1040 safe_cast<DataPort::SenderState*>(pkt->senderState); 1041 1042 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1043 ComputeUnit *compute_unit = dataPort->computeUnit; 1044 1045 assert(gpuDynInst); 1046 1047 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: Response for addr %#x, index %d\n", 1048 compute_unit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 1049 pkt->req->getPaddr(), dataPort->index); 1050 1051 Addr paddr = pkt->req->getPaddr(); 1052 1053 if (pkt->cmd != MemCmd::MemFenceResp) { 1054 int index = gpuDynInst->memStatusVector[paddr].back(); 1055 1056 DPRINTF(GPUMem, "Response for addr %#x, index %d\n", 1057 pkt->req->getPaddr(), index); 1058 1059 gpuDynInst->memStatusVector[paddr].pop_back(); 1060 gpuDynInst->pAddr = pkt->req->getPaddr(); 1061 1062 if (pkt->isRead() || pkt->isWrite()) { 1063 1064 if (gpuDynInst->n_reg <= MAX_REGS_FOR_NON_VEC_MEM_INST) { 1065 gpuDynInst->statusBitVector &= (~(1ULL << index)); 1066 } else { 1067 assert(gpuDynInst->statusVector[index] > 0); 1068 gpuDynInst->statusVector[index]--; 1069 1070 if (!gpuDynInst->statusVector[index]) 1071 gpuDynInst->statusBitVector &= (~(1ULL << index)); 1072 } 1073 1074 DPRINTF(GPUMem, "bitvector is now %#x\n", 1075 gpuDynInst->statusBitVector); 1076 1077 if (gpuDynInst->statusBitVector == VectorMask(0)) { 1078 auto iter = gpuDynInst->memStatusVector.begin(); 1079 auto end = gpuDynInst->memStatusVector.end(); 1080 1081 while (iter != end) { 1082 assert(iter->second.empty()); 1083 ++iter; 1084 } 1085 1086 gpuDynInst->memStatusVector.clear(); 1087 1088 if (gpuDynInst->n_reg > MAX_REGS_FOR_NON_VEC_MEM_INST) 1089 gpuDynInst->statusVector.clear(); 1090 1091 if (gpuDynInst->m_op == Enums::MO_LD || MO_A(gpuDynInst->m_op) 1092 || MO_ANR(gpuDynInst->m_op)) { 1093 assert(compute_unit->globalMemoryPipe.isGMLdRespFIFOWrRdy()); 1094 1095 compute_unit->globalMemoryPipe.getGMLdRespFIFO() 1096 .push(gpuDynInst); 1097 } else { 1098 assert(compute_unit->globalMemoryPipe.isGMStRespFIFOWrRdy()); 1099 1100 compute_unit->globalMemoryPipe.getGMStRespFIFO() 1101 .push(gpuDynInst); 1102 } 1103 1104 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: packet totally complete\n", 1105 compute_unit->cu_id, gpuDynInst->simdId, 1106 gpuDynInst->wfSlotId); 1107 1108 // after clearing the status vectors, 1109 // see if there is a continuation to perform 1110 // the continuation may generate more work for 1111 // this memory request 1112 if (gpuDynInst->useContinuation) { 1113 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 1114 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 1115 gpuDynInst); 1116 } 1117 } 1118 } 1119 } else { 1120 gpuDynInst->statusBitVector = VectorMask(0); 1121 1122 if (gpuDynInst->useContinuation) { 1123 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 1124 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 1125 gpuDynInst); 1126 } 1127 } 1128 1129 delete pkt->senderState; 1130 delete pkt->req; 1131 delete pkt; 1132} 1133 1134ComputeUnit* 1135ComputeUnitParams::create() 1136{ 1137 return new ComputeUnit(this); 1138} 1139 1140bool 1141ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt) 1142{ 1143 Addr line = pkt->req->getPaddr(); 1144 1145 DPRINTF(GPUTLB, "CU%d: DTLBPort received %#x->%#x\n", computeUnit->cu_id, 1146 pkt->req->getVaddr(), line); 1147 1148 assert(pkt->senderState); 1149 computeUnit->tlbCycles += curTick(); 1150 1151 // pop off the TLB translation state 1152 TheISA::GpuTLB::TranslationState *translation_state = 1153 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 1154 1155 // no PageFaults are permitted for data accesses 1156 if (!translation_state->tlbEntry->valid) { 1157 DTLBPort::SenderState *sender_state = 1158 safe_cast<DTLBPort::SenderState*>(translation_state->saved); 1159 1160 Wavefront *w M5_VAR_USED = 1161 computeUnit->wfList[sender_state->_gpuDynInst->simdId] 1162 [sender_state->_gpuDynInst->wfSlotId]; 1163 1164 DPRINTFN("Wave %d couldn't tranlate vaddr %#x\n", w->wfDynId, 1165 pkt->req->getVaddr()); 1166 } 1167 1168 assert(translation_state->tlbEntry->valid); 1169 1170 // update the hitLevel distribution 1171 int hit_level = translation_state->hitLevel; 1172 computeUnit->hitsPerTLBLevel[hit_level]++; 1173 1174 delete translation_state->tlbEntry; 1175 assert(!translation_state->ports.size()); 1176 pkt->senderState = translation_state->saved; 1177 1178 // for prefetch pkt 1179 BaseTLB::Mode TLB_mode = translation_state->tlbMode; 1180 1181 delete translation_state; 1182 1183 // use the original sender state to know how to close this transaction 1184 DTLBPort::SenderState *sender_state = 1185 safe_cast<DTLBPort::SenderState*>(pkt->senderState); 1186 1187 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1188 int mp_index = sender_state->portIndex; 1189 Addr vaddr = pkt->req->getVaddr(); 1190 gpuDynInst->memStatusVector[line].push_back(mp_index); 1191 gpuDynInst->tlbHitLevel[mp_index] = hit_level; 1192 1193 MemCmd requestCmd; 1194 1195 if (pkt->cmd == MemCmd::ReadResp) { 1196 requestCmd = MemCmd::ReadReq; 1197 } else if (pkt->cmd == MemCmd::WriteResp) { 1198 requestCmd = MemCmd::WriteReq; 1199 } else if (pkt->cmd == MemCmd::SwapResp) { 1200 requestCmd = MemCmd::SwapReq; 1201 } else { 1202 panic("unsupported response to request conversion %s\n", 1203 pkt->cmd.toString()); 1204 } 1205 1206 if (computeUnit->prefetchDepth) { 1207 int simdId = gpuDynInst->simdId; 1208 int wfSlotId = gpuDynInst->wfSlotId; 1209 Addr last = 0; 1210 1211 switch(computeUnit->prefetchType) { 1212 case Enums::PF_CU: 1213 last = computeUnit->lastVaddrCU[mp_index]; 1214 break; 1215 case Enums::PF_PHASE: 1216 last = computeUnit->lastVaddrSimd[simdId][mp_index]; 1217 break; 1218 case Enums::PF_WF: 1219 last = computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index]; 1220 default: 1221 break; 1222 } 1223 1224 DPRINTF(GPUPrefetch, "CU[%d][%d][%d][%d]: %#x was last\n", 1225 computeUnit->cu_id, simdId, wfSlotId, mp_index, last); 1226 1227 int stride = last ? (roundDown(vaddr, TheISA::PageBytes) - 1228 roundDown(last, TheISA::PageBytes)) >> TheISA::PageShift 1229 : 0; 1230 1231 DPRINTF(GPUPrefetch, "Stride is %d\n", stride); 1232 1233 computeUnit->lastVaddrCU[mp_index] = vaddr; 1234 computeUnit->lastVaddrSimd[simdId][mp_index] = vaddr; 1235 computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index] = vaddr; 1236 1237 stride = (computeUnit->prefetchType == Enums::PF_STRIDE) ? 1238 computeUnit->prefetchStride: stride; 1239 1240 DPRINTF(GPUPrefetch, "%#x to: CU[%d][%d][%d][%d]\n", vaddr, 1241 computeUnit->cu_id, simdId, wfSlotId, mp_index); 1242 1243 DPRINTF(GPUPrefetch, "Prefetching from %#x:", vaddr); 1244 1245 // Prefetch Next few pages atomically 1246 for (int pf = 1; pf <= computeUnit->prefetchDepth; ++pf) { 1247 DPRINTF(GPUPrefetch, "%d * %d: %#x\n", pf, stride, 1248 vaddr+stride*pf*TheISA::PageBytes); 1249 1250 if (!stride) 1251 break; 1252 1253 Request *prefetch_req = new Request(0, vaddr + stride * pf * 1254 TheISA::PageBytes, 1255 sizeof(uint8_t), 0, 1256 computeUnit->masterId(), 1257 0, 0, 0); 1258 1259 PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd); 1260 uint8_t foo = 0; 1261 prefetch_pkt->dataStatic(&foo); 1262 1263 // Because it's atomic operation, only need TLB translation state 1264 prefetch_pkt->senderState = 1265 new TheISA::GpuTLB::TranslationState(TLB_mode, 1266 computeUnit->shader->gpuTc, 1267 true); 1268 1269 // Currently prefetches are zero-latency, hence the sendFunctional 1270 sendFunctional(prefetch_pkt); 1271 1272 /* safe_cast the senderState */ 1273 TheISA::GpuTLB::TranslationState *tlb_state = 1274 safe_cast<TheISA::GpuTLB::TranslationState*>( 1275 prefetch_pkt->senderState); 1276 1277 1278 delete tlb_state->tlbEntry; 1279 delete tlb_state; 1280 delete prefetch_pkt->req; 1281 delete prefetch_pkt; 1282 } 1283 } 1284 1285 // First we must convert the response cmd back to a request cmd so that 1286 // the request can be sent through the cu's master port 1287 PacketPtr new_pkt = new Packet(pkt->req, requestCmd); 1288 new_pkt->dataStatic(pkt->getPtr<uint8_t>()); 1289 delete pkt->senderState; 1290 delete pkt; 1291 1292 // New SenderState for the memory access 1293 new_pkt->senderState = 1294 new ComputeUnit::DataPort::SenderState(gpuDynInst, mp_index, 1295 nullptr); 1296 1297 // translation is done. Schedule the mem_req_event at the appropriate 1298 // cycle to send the timing memory request to ruby 1299 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 1300 new ComputeUnit::DataPort::MemReqEvent(computeUnit->memPort[mp_index], 1301 new_pkt); 1302 1303 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data scheduled\n", 1304 computeUnit->cu_id, gpuDynInst->simdId, 1305 gpuDynInst->wfSlotId, mp_index, new_pkt->req->getPaddr()); 1306 1307 computeUnit->schedule(mem_req_event, curTick() + 1308 computeUnit->req_tick_latency); 1309 1310 return true; 1311} 1312 1313const char* 1314ComputeUnit::DataPort::MemReqEvent::description() const 1315{ 1316 return "ComputeUnit memory request event"; 1317} 1318 1319void 1320ComputeUnit::DataPort::MemReqEvent::process() 1321{ 1322 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState); 1323 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1324 ComputeUnit *compute_unit M5_VAR_USED = dataPort->computeUnit; 1325 1326 if (!(dataPort->sendTimingReq(pkt))) { 1327 dataPort->retries.push_back(std::make_pair(pkt, gpuDynInst)); 1328 1329 DPRINTF(GPUPort, 1330 "CU%d: WF[%d][%d]: index %d, addr %#x data req failed!\n", 1331 compute_unit->cu_id, gpuDynInst->simdId, 1332 gpuDynInst->wfSlotId, dataPort->index, 1333 pkt->req->getPaddr()); 1334 } else { 1335 DPRINTF(GPUPort, 1336 "CU%d: WF[%d][%d]: index %d, addr %#x data req sent!\n", 1337 compute_unit->cu_id, gpuDynInst->simdId, 1338 gpuDynInst->wfSlotId, dataPort->index, 1339 pkt->req->getPaddr()); 1340 } 1341} 1342 1343/* 1344 * The initial translation request could have been rejected, 1345 * if <retries> queue is not Retry sending the translation 1346 * request. sendRetry() is called from the peer port whenever 1347 * a translation completes. 1348 */ 1349void 1350ComputeUnit::DTLBPort::recvReqRetry() 1351{ 1352 int len = retries.size(); 1353 1354 DPRINTF(GPUTLB, "CU%d: DTLB recvReqRetry - %d pending requests\n", 1355 computeUnit->cu_id, len); 1356 1357 assert(len > 0); 1358 assert(isStalled()); 1359 // recvReqRetry is an indication that the resource on which this 1360 // port was stalling on is freed. So, remove the stall first 1361 unstallPort(); 1362 1363 for (int i = 0; i < len; ++i) { 1364 PacketPtr pkt = retries.front(); 1365 Addr vaddr M5_VAR_USED = pkt->req->getVaddr(); 1366 DPRINTF(GPUTLB, "CU%d: retrying D-translaton for address%#x", vaddr); 1367 1368 if (!sendTimingReq(pkt)) { 1369 // Stall port 1370 stallPort(); 1371 DPRINTF(GPUTLB, ": failed again\n"); 1372 break; 1373 } else { 1374 DPRINTF(GPUTLB, ": successful\n"); 1375 retries.pop_front(); 1376 } 1377 } 1378} 1379 1380bool 1381ComputeUnit::ITLBPort::recvTimingResp(PacketPtr pkt) 1382{ 1383 Addr line M5_VAR_USED = pkt->req->getPaddr(); 1384 DPRINTF(GPUTLB, "CU%d: ITLBPort received %#x->%#x\n", 1385 computeUnit->cu_id, pkt->req->getVaddr(), line); 1386 1387 assert(pkt->senderState); 1388 1389 // pop off the TLB translation state 1390 TheISA::GpuTLB::TranslationState *translation_state = 1391 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 1392 1393 bool success = translation_state->tlbEntry->valid; 1394 delete translation_state->tlbEntry; 1395 assert(!translation_state->ports.size()); 1396 pkt->senderState = translation_state->saved; 1397 delete translation_state; 1398 1399 // use the original sender state to know how to close this transaction 1400 ITLBPort::SenderState *sender_state = 1401 safe_cast<ITLBPort::SenderState*>(pkt->senderState); 1402 1403 // get the wavefront associated with this translation request 1404 Wavefront *wavefront = sender_state->wavefront; 1405 delete pkt->senderState; 1406 1407 if (success) { 1408 // pkt is reused in fetch(), don't delete it here. However, we must 1409 // reset the command to be a request so that it can be sent through 1410 // the cu's master port 1411 assert(pkt->cmd == MemCmd::ReadResp); 1412 pkt->cmd = MemCmd::ReadReq; 1413 1414 computeUnit->fetchStage.fetch(pkt, wavefront); 1415 } else { 1416 if (wavefront->dropFetch) { 1417 assert(wavefront->instructionBuffer.empty()); 1418 wavefront->dropFetch = false; 1419 } 1420 1421 wavefront->pendingFetch = 0; 1422 } 1423 1424 return true; 1425} 1426 1427/* 1428 * The initial translation request could have been rejected, if 1429 * <retries> queue is not empty. Retry sending the translation 1430 * request. sendRetry() is called from the peer port whenever 1431 * a translation completes. 1432 */ 1433void 1434ComputeUnit::ITLBPort::recvReqRetry() 1435{ 1436 1437 int len = retries.size(); 1438 DPRINTF(GPUTLB, "CU%d: ITLB recvReqRetry - %d pending requests\n", len); 1439 1440 assert(len > 0); 1441 assert(isStalled()); 1442 1443 // recvReqRetry is an indication that the resource on which this 1444 // port was stalling on is freed. So, remove the stall first 1445 unstallPort(); 1446 1447 for (int i = 0; i < len; ++i) { 1448 PacketPtr pkt = retries.front(); 1449 Addr vaddr M5_VAR_USED = pkt->req->getVaddr(); 1450 DPRINTF(GPUTLB, "CU%d: retrying I-translaton for address%#x", vaddr); 1451 1452 if (!sendTimingReq(pkt)) { 1453 stallPort(); // Stall port 1454 DPRINTF(GPUTLB, ": failed again\n"); 1455 break; 1456 } else { 1457 DPRINTF(GPUTLB, ": successful\n"); 1458 retries.pop_front(); 1459 } 1460 } 1461} 1462 1463void 1464ComputeUnit::regStats() 1465{ 1466 MemObject::regStats(); 1467 1468 tlbCycles 1469 .name(name() + ".tlb_cycles") 1470 .desc("total number of cycles for all uncoalesced requests") 1471 ; 1472 1473 tlbRequests 1474 .name(name() + ".tlb_requests") 1475 .desc("number of uncoalesced requests") 1476 ; 1477 1478 tlbLatency 1479 .name(name() + ".avg_translation_latency") 1480 .desc("Avg. translation latency for data translations") 1481 ; 1482 1483 tlbLatency = tlbCycles / tlbRequests; 1484 1485 hitsPerTLBLevel 1486 .init(4) 1487 .name(name() + ".TLB_hits_distribution") 1488 .desc("TLB hits distribution (0 for page table, x for Lx-TLB") 1489 ; 1490 1491 // fixed number of TLB levels 1492 for (int i = 0; i < 4; ++i) { 1493 if (!i) 1494 hitsPerTLBLevel.subname(i,"page_table"); 1495 else 1496 hitsPerTLBLevel.subname(i, csprintf("L%d_TLB",i)); 1497 } 1498 1499 execRateDist 1500 .init(0, 10, 2) 1501 .name(name() + ".inst_exec_rate") 1502 .desc("Instruction Execution Rate: Number of executed vector " 1503 "instructions per cycle") 1504 ; 1505 1506 ldsBankConflictDist 1507 .init(0, wfSize(), 2) 1508 .name(name() + ".lds_bank_conflicts") 1509 .desc("Number of bank conflicts per LDS memory packet") 1510 ; 1511 1512 ldsBankAccesses 1513 .name(name() + ".lds_bank_access_cnt") 1514 .desc("Total number of LDS bank accesses") 1515 ; 1516 1517 pageDivergenceDist 1518 // A wavefront can touch up to N pages per memory instruction where 1519 // N is equal to the wavefront size 1520 // The number of pages per bin can be configured (here it's 4). 1521 .init(1, wfSize(), 4) 1522 .name(name() + ".page_divergence_dist") 1523 .desc("pages touched per wf (over all mem. instr.)") 1524 ; 1525 1526 controlFlowDivergenceDist 1527 .init(1, wfSize(), 4) 1528 .name(name() + ".warp_execution_dist") 1529 .desc("number of lanes active per instruction (oval all instructions)") 1530 ; 1531 1532 activeLanesPerGMemInstrDist 1533 .init(1, wfSize(), 4) 1534 .name(name() + ".gmem_lanes_execution_dist") 1535 .desc("number of active lanes per global memory instruction") 1536 ; 1537 1538 activeLanesPerLMemInstrDist 1539 .init(1, wfSize(), 4) 1540 .name(name() + ".lmem_lanes_execution_dist") 1541 .desc("number of active lanes per local memory instruction") 1542 ; 1543 1544 numInstrExecuted 1545 .name(name() + ".num_instr_executed") 1546 .desc("number of instructions executed") 1547 ; 1548 1549 numVecOpsExecuted 1550 .name(name() + ".num_vec_ops_executed") 1551 .desc("number of vec ops executed (e.g. WF size/inst)") 1552 ; 1553 1554 totalCycles 1555 .name(name() + ".num_total_cycles") 1556 .desc("number of cycles the CU ran for") 1557 ; 1558 1559 ipc 1560 .name(name() + ".ipc") 1561 .desc("Instructions per cycle (this CU only)") 1562 ; 1563 1564 vpc 1565 .name(name() + ".vpc") 1566 .desc("Vector Operations per cycle (this CU only)") 1567 ; 1568 1569 numALUInstsExecuted 1570 .name(name() + ".num_alu_insts_executed") 1571 .desc("Number of dynamic non-GM memory insts executed") 1572 ; 1573 1574 wgBlockedDueLdsAllocation 1575 .name(name() + ".wg_blocked_due_lds_alloc") 1576 .desc("Workgroup blocked due to LDS capacity") 1577 ; 1578 1579 ipc = numInstrExecuted / totalCycles; 1580 vpc = numVecOpsExecuted / totalCycles; 1581 1582 numTimesWgBlockedDueVgprAlloc 1583 .name(name() + ".times_wg_blocked_due_vgpr_alloc") 1584 .desc("Number of times WGs are blocked due to VGPR allocation per SIMD") 1585 ; 1586 1587 dynamicGMemInstrCnt 1588 .name(name() + ".global_mem_instr_cnt") 1589 .desc("dynamic global memory instructions count") 1590 ; 1591 1592 dynamicLMemInstrCnt 1593 .name(name() + ".local_mem_instr_cnt") 1594 .desc("dynamic local memory intruction count") 1595 ; 1596 1597 numALUInstsExecuted = numInstrExecuted - dynamicGMemInstrCnt - 1598 dynamicLMemInstrCnt; 1599 1600 completedWfs 1601 .name(name() + ".num_completed_wfs") 1602 .desc("number of completed wavefronts") 1603 ; 1604 1605 numCASOps 1606 .name(name() + ".num_CAS_ops") 1607 .desc("number of compare and swap operations") 1608 ; 1609 1610 numFailedCASOps 1611 .name(name() + ".num_failed_CAS_ops") 1612 .desc("number of compare and swap operations that failed") 1613 ; 1614 1615 // register stats of pipeline stages 1616 fetchStage.regStats(); 1617 scoreboardCheckStage.regStats(); 1618 scheduleStage.regStats(); 1619 execStage.regStats(); 1620 1621 // register stats of memory pipeline 1622 globalMemoryPipe.regStats(); 1623 localMemoryPipe.regStats(); 1624} 1625 1626void 1627ComputeUnit::updatePageDivergenceDist(Addr addr) 1628{ 1629 Addr virt_page_addr = roundDown(addr, TheISA::PageBytes); 1630 1631 if (!pagesTouched.count(virt_page_addr)) 1632 pagesTouched[virt_page_addr] = 1; 1633 else 1634 pagesTouched[virt_page_addr]++; 1635} 1636 1637void 1638ComputeUnit::CUExitCallback::process() 1639{ 1640 if (computeUnit->countPages) { 1641 std::ostream *page_stat_file = 1642 simout.create(computeUnit->name().c_str())->stream(); 1643 1644 *page_stat_file << "page, wavefront accesses, workitem accesses" << 1645 std::endl; 1646 1647 for (auto iter : computeUnit->pageAccesses) { 1648 *page_stat_file << std::hex << iter.first << ","; 1649 *page_stat_file << std::dec << iter.second.first << ","; 1650 *page_stat_file << std::dec << iter.second.second << std::endl; 1651 } 1652 } 1653 } 1654 1655bool 1656ComputeUnit::isDone() const 1657{ 1658 for (int i = 0; i < numSIMDs; ++i) { 1659 if (!isSimdDone(i)) { 1660 return false; 1661 } 1662 } 1663 1664 bool glbMemBusRdy = true; 1665 for (int j = 0; j < numGlbMemUnits; ++j) { 1666 glbMemBusRdy &= vrfToGlobalMemPipeBus[j].rdy(); 1667 } 1668 bool locMemBusRdy = true; 1669 for (int j = 0; j < numLocMemUnits; ++j) { 1670 locMemBusRdy &= vrfToLocalMemPipeBus[j].rdy(); 1671 } 1672 1673 if (!globalMemoryPipe.isGMLdRespFIFOWrRdy() || 1674 !globalMemoryPipe.isGMStRespFIFOWrRdy() || 1675 !globalMemoryPipe.isGMReqFIFOWrRdy() || !localMemoryPipe.isLMReqFIFOWrRdy() 1676 || !localMemoryPipe.isLMRespFIFOWrRdy() || !locMemToVrfBus.rdy() || 1677 !glbMemToVrfBus.rdy() || !locMemBusRdy || !glbMemBusRdy) { 1678 return false; 1679 } 1680 1681 return true; 1682} 1683 1684int32_t 1685ComputeUnit::getRefCounter(const uint32_t dispatchId, const uint32_t wgId) const 1686{ 1687 return lds.getRefCounter(dispatchId, wgId); 1688} 1689 1690bool 1691ComputeUnit::isSimdDone(uint32_t simdId) const 1692{ 1693 assert(simdId < numSIMDs); 1694 1695 for (int i=0; i < numGlbMemUnits; ++i) { 1696 if (!vrfToGlobalMemPipeBus[i].rdy()) 1697 return false; 1698 } 1699 for (int i=0; i < numLocMemUnits; ++i) { 1700 if (!vrfToLocalMemPipeBus[i].rdy()) 1701 return false; 1702 } 1703 if (!aluPipe[simdId].rdy()) { 1704 return false; 1705 } 1706 1707 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf){ 1708 if (wfList[simdId][i_wf]->status != Wavefront::S_STOPPED) { 1709 return false; 1710 } 1711 } 1712 1713 return true; 1714} 1715 1716/** 1717 * send a general request to the LDS 1718 * make sure to look at the return value here as your request might be 1719 * NACK'd and returning false means that you have to have some backup plan 1720 */ 1721bool 1722ComputeUnit::sendToLds(GPUDynInstPtr gpuDynInst) 1723{ 1724 // this is just a request to carry the GPUDynInstPtr 1725 // back and forth 1726 Request *newRequest = new Request(); 1727 newRequest->setPaddr(0x0); 1728 1729 // ReadReq is not evaluted by the LDS but the Packet ctor requires this 1730 PacketPtr newPacket = new Packet(newRequest, MemCmd::ReadReq); 1731 1732 // This is the SenderState needed upon return 1733 newPacket->senderState = new LDSPort::SenderState(gpuDynInst); 1734 1735 return ldsPort->sendTimingReq(newPacket); 1736} 1737 1738/** 1739 * get the result of packets sent to the LDS when they return 1740 */ 1741bool 1742ComputeUnit::LDSPort::recvTimingResp(PacketPtr packet) 1743{ 1744 const ComputeUnit::LDSPort::SenderState *senderState = 1745 dynamic_cast<ComputeUnit::LDSPort::SenderState *>(packet->senderState); 1746 1747 fatal_if(!senderState, "did not get the right sort of sender state"); 1748 1749 GPUDynInstPtr gpuDynInst = senderState->getMemInst(); 1750 1751 delete packet->senderState; 1752 delete packet->req; 1753 delete packet; 1754 1755 computeUnit->localMemoryPipe.getLMRespFIFO().push(gpuDynInst); 1756 return true; 1757} 1758 1759/** 1760 * attempt to send this packet, either the port is already stalled, the request 1761 * is nack'd and must stall or the request goes through 1762 * when a request cannot be sent, add it to the retries queue 1763 */ 1764bool 1765ComputeUnit::LDSPort::sendTimingReq(PacketPtr pkt) 1766{ 1767 ComputeUnit::LDSPort::SenderState *sender_state = 1768 dynamic_cast<ComputeUnit::LDSPort::SenderState*>(pkt->senderState); 1769 fatal_if(!sender_state, "packet without a valid sender state"); 1770 1771 GPUDynInstPtr gpuDynInst M5_VAR_USED = sender_state->getMemInst(); 1772 1773 if (isStalled()) { 1774 fatal_if(retries.empty(), "must have retries waiting to be stalled"); 1775 1776 retries.push(pkt); 1777 1778 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: LDS send failed!\n", 1779 computeUnit->cu_id, gpuDynInst->simdId, 1780 gpuDynInst->wfSlotId); 1781 return false; 1782 } else if (!MasterPort::sendTimingReq(pkt)) { 1783 // need to stall the LDS port until a recvReqRetry() is received 1784 // this indicates that there is more space 1785 stallPort(); 1786 retries.push(pkt); 1787 1788 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req failed!\n", 1789 computeUnit->cu_id, gpuDynInst->simdId, 1790 gpuDynInst->wfSlotId, pkt->req->getPaddr()); 1791 return false; 1792 } else { 1793 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req sent!\n", 1794 computeUnit->cu_id, gpuDynInst->simdId, 1795 gpuDynInst->wfSlotId, pkt->req->getPaddr()); 1796 return true; 1797 } 1798} 1799 1800/** 1801 * the bus is telling the port that there is now space so retrying stalled 1802 * requests should work now 1803 * this allows the port to have a request be nack'd and then have the receiver 1804 * say when there is space, rather than simply retrying the send every cycle 1805 */ 1806void 1807ComputeUnit::LDSPort::recvReqRetry() 1808{ 1809 auto queueSize = retries.size(); 1810 1811 DPRINTF(GPUPort, "CU%d: LDSPort recvReqRetry - %d pending requests\n", 1812 computeUnit->cu_id, queueSize); 1813 1814 fatal_if(queueSize < 1, 1815 "why was there a recvReqRetry() with no pending reqs?"); 1816 fatal_if(!isStalled(), 1817 "recvReqRetry() happened when the port was not stalled"); 1818 1819 unstallPort(); 1820 1821 while (!retries.empty()) { 1822 PacketPtr packet = retries.front(); 1823 1824 DPRINTF(GPUPort, "CU%d: retrying LDS send\n", computeUnit->cu_id); 1825 1826 if (!MasterPort::sendTimingReq(packet)) { 1827 // Stall port 1828 stallPort(); 1829 DPRINTF(GPUPort, ": LDS send failed again\n"); 1830 break; 1831 } else { 1832 DPRINTF(GPUTLB, ": LDS send successful\n"); 1833 retries.pop(); 1834 } 1835 } 1836} 1837