compute_unit.cc revision 11638:b511733958d0
1/* 2 * Copyright (c) 2011-2015 Advanced Micro Devices, Inc. 3 * All rights reserved. 4 * 5 * For use for simulation and test purposes only 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the copyright holder nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Author: John Kalamatianos, Anthony Gutierrez 34 */ 35#include "gpu-compute/compute_unit.hh" 36 37#include <limits> 38 39#include "base/output.hh" 40#include "debug/GPUDisp.hh" 41#include "debug/GPUExec.hh" 42#include "debug/GPUFetch.hh" 43#include "debug/GPUMem.hh" 44#include "debug/GPUPort.hh" 45#include "debug/GPUPrefetch.hh" 46#include "debug/GPUSync.hh" 47#include "debug/GPUTLB.hh" 48#include "gpu-compute/dispatcher.hh" 49#include "gpu-compute/gpu_dyn_inst.hh" 50#include "gpu-compute/gpu_static_inst.hh" 51#include "gpu-compute/ndrange.hh" 52#include "gpu-compute/shader.hh" 53#include "gpu-compute/simple_pool_manager.hh" 54#include "gpu-compute/vector_register_file.hh" 55#include "gpu-compute/wavefront.hh" 56#include "mem/page_table.hh" 57#include "sim/process.hh" 58 59ComputeUnit::ComputeUnit(const Params *p) : MemObject(p), fetchStage(p), 60 scoreboardCheckStage(p), scheduleStage(p), execStage(p), 61 globalMemoryPipe(p), localMemoryPipe(p), rrNextMemID(0), rrNextALUWp(0), 62 cu_id(p->cu_id), vrf(p->vector_register_file), numSIMDs(p->num_SIMDs), 63 spBypassPipeLength(p->spbypass_pipe_length), 64 dpBypassPipeLength(p->dpbypass_pipe_length), 65 issuePeriod(p->issue_period), 66 numGlbMemUnits(p->num_global_mem_pipes), 67 numLocMemUnits(p->num_shared_mem_pipes), 68 perLaneTLB(p->perLaneTLB), prefetchDepth(p->prefetch_depth), 69 prefetchStride(p->prefetch_stride), prefetchType(p->prefetch_prev_type), 70 xact_cas_mode(p->xactCasMode), debugSegFault(p->debugSegFault), 71 functionalTLB(p->functionalTLB), localMemBarrier(p->localMemBarrier), 72 countPages(p->countPages), barrier_id(0), 73 vrfToCoalescerBusWidth(p->vrf_to_coalescer_bus_width), 74 coalescerToVrfBusWidth(p->coalescer_to_vrf_bus_width), 75 req_tick_latency(p->mem_req_latency * p->clk_domain->clockPeriod()), 76 resp_tick_latency(p->mem_resp_latency * p->clk_domain->clockPeriod()), 77 _masterId(p->system->getMasterId(name() + ".ComputeUnit")), 78 lds(*p->localDataStore), globalSeqNum(0), wavefrontSize(p->wfSize) 79{ 80 /** 81 * This check is necessary because std::bitset only provides conversion 82 * to unsigned long or unsigned long long via to_ulong() or to_ullong(). 83 * there are * a few places in the code where to_ullong() is used, however 84 * if VSZ is larger than a value the host can support then bitset will 85 * throw a runtime exception. we should remove all use of to_long() or 86 * to_ullong() so we can have VSZ greater than 64b, however until that is 87 * done this assert is required. 88 */ 89 fatal_if(p->wfSize > std::numeric_limits<unsigned long long>::digits || 90 p->wfSize <= 0, 91 "WF size is larger than the host can support"); 92 fatal_if(!isPowerOf2(wavefrontSize), 93 "Wavefront size should be a power of 2"); 94 // calculate how many cycles a vector load or store will need to transfer 95 // its data over the corresponding buses 96 numCyclesPerStoreTransfer = 97 (uint32_t)ceil((double)(wfSize() * sizeof(uint32_t)) / 98 (double)vrfToCoalescerBusWidth); 99 100 numCyclesPerLoadTransfer = (wfSize() * sizeof(uint32_t)) 101 / coalescerToVrfBusWidth; 102 103 lastVaddrWF.resize(numSIMDs); 104 wfList.resize(numSIMDs); 105 106 for (int j = 0; j < numSIMDs; ++j) { 107 lastVaddrWF[j].resize(p->n_wf); 108 109 for (int i = 0; i < p->n_wf; ++i) { 110 lastVaddrWF[j][i].resize(wfSize()); 111 112 wfList[j].push_back(p->wavefronts[j * p->n_wf + i]); 113 wfList[j][i]->setParent(this); 114 115 for (int k = 0; k < wfSize(); ++k) { 116 lastVaddrWF[j][i][k] = 0; 117 } 118 } 119 } 120 121 lastVaddrSimd.resize(numSIMDs); 122 123 for (int i = 0; i < numSIMDs; ++i) { 124 lastVaddrSimd[i].resize(wfSize(), 0); 125 } 126 127 lastVaddrCU.resize(wfSize()); 128 129 lds.setParent(this); 130 131 if (p->execPolicy == "OLDEST-FIRST") { 132 exec_policy = EXEC_POLICY::OLDEST; 133 } else if (p->execPolicy == "ROUND-ROBIN") { 134 exec_policy = EXEC_POLICY::RR; 135 } else { 136 fatal("Invalid WF execution policy (CU)\n"); 137 } 138 139 memPort.resize(wfSize()); 140 141 // resize the tlbPort vectorArray 142 int tlbPort_width = perLaneTLB ? wfSize() : 1; 143 tlbPort.resize(tlbPort_width); 144 145 cuExitCallback = new CUExitCallback(this); 146 registerExitCallback(cuExitCallback); 147 148 xactCasLoadMap.clear(); 149 lastExecCycle.resize(numSIMDs, 0); 150 151 for (int i = 0; i < vrf.size(); ++i) { 152 vrf[i]->setParent(this); 153 } 154 155 numVecRegsPerSimd = vrf[0]->numRegs(); 156} 157 158ComputeUnit::~ComputeUnit() 159{ 160 // Delete wavefront slots 161 for (int j = 0; j < numSIMDs; ++j) { 162 for (int i = 0; i < shader->n_wf; ++i) { 163 delete wfList[j][i]; 164 } 165 lastVaddrSimd[j].clear(); 166 } 167 lastVaddrCU.clear(); 168 readyList.clear(); 169 waveStatusList.clear(); 170 dispatchList.clear(); 171 vectorAluInstAvail.clear(); 172 delete cuExitCallback; 173 delete ldsPort; 174} 175 176void 177ComputeUnit::FillKernelState(Wavefront *w, NDRange *ndr) 178{ 179 w->resizeRegFiles(ndr->q.cRegCount, ndr->q.sRegCount, ndr->q.dRegCount); 180 181 w->workgroupsz[0] = ndr->q.wgSize[0]; 182 w->workgroupsz[1] = ndr->q.wgSize[1]; 183 w->workgroupsz[2] = ndr->q.wgSize[2]; 184 w->wg_sz = w->workgroupsz[0] * w->workgroupsz[1] * w->workgroupsz[2]; 185 w->gridsz[0] = ndr->q.gdSize[0]; 186 w->gridsz[1] = ndr->q.gdSize[1]; 187 w->gridsz[2] = ndr->q.gdSize[2]; 188 w->kernelArgs = ndr->q.args; 189 w->privSizePerItem = ndr->q.privMemPerItem; 190 w->spillSizePerItem = ndr->q.spillMemPerItem; 191 w->roBase = ndr->q.roMemStart; 192 w->roSize = ndr->q.roMemTotal; 193} 194 195void 196ComputeUnit::updateEvents() { 197 198 if (!timestampVec.empty()) { 199 uint32_t vecSize = timestampVec.size(); 200 uint32_t i = 0; 201 while (i < vecSize) { 202 if (timestampVec[i] <= shader->tick_cnt) { 203 std::pair<uint32_t, uint32_t> regInfo = regIdxVec[i]; 204 vrf[regInfo.first]->markReg(regInfo.second, sizeof(uint32_t), 205 statusVec[i]); 206 timestampVec.erase(timestampVec.begin() + i); 207 regIdxVec.erase(regIdxVec.begin() + i); 208 statusVec.erase(statusVec.begin() + i); 209 --vecSize; 210 --i; 211 } 212 ++i; 213 } 214 } 215 216 for (int i = 0; i< numSIMDs; ++i) { 217 vrf[i]->updateEvents(); 218 } 219} 220 221 222void 223ComputeUnit::StartWF(Wavefront *w, int trueWgSize[], int trueWgSizeTotal, 224 int cnt, LdsChunk *ldsChunk, NDRange *ndr) 225{ 226 static int _n_wave = 0; 227 228 // Fill in Kernel state 229 FillKernelState(w, ndr); 230 231 VectorMask init_mask; 232 init_mask.reset(); 233 234 for (int k = 0; k < wfSize(); ++k) { 235 if (k + cnt * wfSize() < trueWgSizeTotal) 236 init_mask[k] = 1; 237 } 238 239 w->kern_id = ndr->dispatchId; 240 w->dynwaveid = cnt; 241 w->init_mask = init_mask.to_ullong(); 242 243 for (int k = 0; k < wfSize(); ++k) { 244 w->workitemid[0][k] = (k+cnt*wfSize()) % trueWgSize[0]; 245 w->workitemid[1][k] = 246 ((k + cnt * wfSize()) / trueWgSize[0]) % trueWgSize[1]; 247 w->workitemid[2][k] = 248 (k + cnt * wfSize()) / (trueWgSize[0] * trueWgSize[1]); 249 250 w->workitemFlatId[k] = w->workitemid[2][k] * trueWgSize[0] * 251 trueWgSize[1] + w->workitemid[1][k] * trueWgSize[0] + 252 w->workitemid[0][k]; 253 } 254 255 w->barrier_slots = divCeil(trueWgSizeTotal, wfSize()); 256 257 w->bar_cnt.resize(wfSize(), 0); 258 259 w->max_bar_cnt = 0; 260 w->old_barrier_cnt = 0; 261 w->barrier_cnt = 0; 262 263 w->privBase = ndr->q.privMemStart; 264 ndr->q.privMemStart += ndr->q.privMemPerItem * wfSize(); 265 266 w->spillBase = ndr->q.spillMemStart; 267 ndr->q.spillMemStart += ndr->q.spillMemPerItem * wfSize(); 268 269 w->pushToReconvergenceStack(0, UINT32_MAX, init_mask.to_ulong()); 270 271 // WG state 272 w->wg_id = ndr->globalWgId; 273 w->dispatchid = ndr->dispatchId; 274 w->workgroupid[0] = w->wg_id % ndr->numWg[0]; 275 w->workgroupid[1] = (w->wg_id / ndr->numWg[0]) % ndr->numWg[1]; 276 w->workgroupid[2] = w->wg_id / (ndr->numWg[0] * ndr->numWg[1]); 277 278 w->barrier_id = barrier_id; 279 w->stalledAtBarrier = false; 280 281 // set the wavefront context to have a pointer to this section of the LDS 282 w->ldsChunk = ldsChunk; 283 284 int32_t refCount M5_VAR_USED = 285 lds.increaseRefCounter(w->dispatchid, w->wg_id); 286 DPRINTF(GPUDisp, "CU%d: increase ref ctr wg[%d] to [%d]\n", 287 cu_id, w->wg_id, refCount); 288 289 w->instructionBuffer.clear(); 290 291 if (w->pendingFetch) 292 w->dropFetch = true; 293 294 // is this the last wavefront in the workgroup 295 // if set the spillWidth to be the remaining work-items 296 // so that the vector access is correct 297 if ((cnt + 1) * wfSize() >= trueWgSizeTotal) { 298 w->spillWidth = trueWgSizeTotal - (cnt * wfSize()); 299 } else { 300 w->spillWidth = wfSize(); 301 } 302 303 DPRINTF(GPUDisp, "Scheduling wfDynId/barrier_id %d/%d on CU%d: " 304 "WF[%d][%d]\n", _n_wave, barrier_id, cu_id, w->simdId, w->wfSlotId); 305 306 w->start(++_n_wave, ndr->q.code_ptr); 307} 308 309void 310ComputeUnit::StartWorkgroup(NDRange *ndr) 311{ 312 // reserve the LDS capacity allocated to the work group 313 // disambiguated by the dispatch ID and workgroup ID, which should be 314 // globally unique 315 LdsChunk *ldsChunk = lds.reserveSpace(ndr->dispatchId, ndr->globalWgId, 316 ndr->q.ldsSize); 317 318 // Send L1 cache acquire 319 // isKernel + isAcquire = Kernel Begin 320 if (shader->impl_kern_boundary_sync) { 321 GPUDynInstPtr gpuDynInst = std::make_shared<GPUDynInst>(this, 322 nullptr, 323 nullptr, 0); 324 325 gpuDynInst->useContinuation = false; 326 gpuDynInst->memoryOrder = Enums::MEMORY_ORDER_SC_ACQUIRE; 327 gpuDynInst->scope = Enums::MEMORY_SCOPE_SYSTEM; 328 injectGlobalMemFence(gpuDynInst, true); 329 } 330 331 // Get true size of workgroup (after clamping to grid size) 332 int trueWgSize[3]; 333 int trueWgSizeTotal = 1; 334 335 for (int d = 0; d < 3; ++d) { 336 trueWgSize[d] = std::min(ndr->q.wgSize[d], ndr->q.gdSize[d] - 337 ndr->wgId[d] * ndr->q.wgSize[d]); 338 339 trueWgSizeTotal *= trueWgSize[d]; 340 } 341 342 // calculate the number of 32-bit vector registers required by wavefront 343 int vregDemand = ndr->q.sRegCount + (2 * ndr->q.dRegCount); 344 int cnt = 0; 345 346 // Assign WFs by spreading them across SIMDs, 1 WF per SIMD at a time 347 for (int m = 0; m < shader->n_wf * numSIMDs; ++m) { 348 Wavefront *w = wfList[m % numSIMDs][m / numSIMDs]; 349 // Check if this wavefront slot is available: 350 // It must be stopped and not waiting 351 // for a release to complete S_RETURNING 352 if (w->status == Wavefront::S_STOPPED) { 353 // if we have scheduled all work items then stop 354 // scheduling wavefronts 355 if (cnt * wfSize() >= trueWgSizeTotal) 356 break; 357 358 // reserve vector registers for the scheduled wavefront 359 assert(vectorRegsReserved[m % numSIMDs] <= numVecRegsPerSimd); 360 uint32_t normSize = 0; 361 362 w->startVgprIndex = vrf[m % numSIMDs]->manager-> 363 allocateRegion(vregDemand, &normSize); 364 365 w->reservedVectorRegs = normSize; 366 vectorRegsReserved[m % numSIMDs] += w->reservedVectorRegs; 367 368 StartWF(w, trueWgSize, trueWgSizeTotal, cnt, ldsChunk, ndr); 369 ++cnt; 370 } 371 } 372 ++barrier_id; 373} 374 375int 376ComputeUnit::ReadyWorkgroup(NDRange *ndr) 377{ 378 // Get true size of workgroup (after clamping to grid size) 379 int trueWgSize[3]; 380 int trueWgSizeTotal = 1; 381 382 for (int d = 0; d < 3; ++d) { 383 trueWgSize[d] = std::min(ndr->q.wgSize[d], ndr->q.gdSize[d] - 384 ndr->wgId[d] * ndr->q.wgSize[d]); 385 386 trueWgSizeTotal *= trueWgSize[d]; 387 DPRINTF(GPUDisp, "trueWgSize[%d] = %d\n", d, trueWgSize[d]); 388 } 389 390 DPRINTF(GPUDisp, "trueWgSizeTotal = %d\n", trueWgSizeTotal); 391 392 // calculate the number of 32-bit vector registers required by each 393 // work item of the work group 394 int vregDemandPerWI = ndr->q.sRegCount + (2 * ndr->q.dRegCount); 395 bool vregAvail = true; 396 int numWfs = (trueWgSizeTotal + wfSize() - 1) / wfSize(); 397 int freeWfSlots = 0; 398 // check if the total number of VGPRs required by all WFs of the WG 399 // fit in the VRFs of all SIMD units 400 assert((numWfs * vregDemandPerWI) <= (numSIMDs * numVecRegsPerSimd)); 401 int numMappedWfs = 0; 402 std::vector<int> numWfsPerSimd; 403 numWfsPerSimd.resize(numSIMDs, 0); 404 // find how many free WF slots we have across all SIMDs 405 for (int j = 0; j < shader->n_wf; ++j) { 406 for (int i = 0; i < numSIMDs; ++i) { 407 if (wfList[i][j]->status == Wavefront::S_STOPPED) { 408 // count the number of free WF slots 409 ++freeWfSlots; 410 if (numMappedWfs < numWfs) { 411 // count the WFs to be assigned per SIMD 412 numWfsPerSimd[i]++; 413 } 414 numMappedWfs++; 415 } 416 } 417 } 418 419 // if there are enough free WF slots then find if there are enough 420 // free VGPRs per SIMD based on the WF->SIMD mapping 421 if (freeWfSlots >= numWfs) { 422 for (int j = 0; j < numSIMDs; ++j) { 423 // find if there are enough free VGPR regions in the SIMD's VRF 424 // to accommodate the WFs of the new WG that would be mapped to 425 // this SIMD unit 426 vregAvail = vrf[j]->manager->canAllocate(numWfsPerSimd[j], 427 vregDemandPerWI); 428 429 // stop searching if there is at least one SIMD 430 // whose VRF does not have enough free VGPR pools. 431 // This is because a WG is scheduled only if ALL 432 // of its WFs can be scheduled 433 if (!vregAvail) 434 break; 435 } 436 } 437 438 DPRINTF(GPUDisp, "Free WF slots = %d, VGPR Availability = %d\n", 439 freeWfSlots, vregAvail); 440 441 if (!vregAvail) { 442 ++numTimesWgBlockedDueVgprAlloc; 443 } 444 445 // Return true if enough WF slots to submit workgroup and if there are 446 // enough VGPRs to schedule all WFs to their SIMD units 447 if (!lds.canReserve(ndr->q.ldsSize)) { 448 wgBlockedDueLdsAllocation++; 449 } 450 451 // Return true if (a) there are enough free WF slots to submit 452 // workgrounp and (b) if there are enough VGPRs to schedule all WFs to their 453 // SIMD units and (c) if there is enough space in LDS 454 return freeWfSlots >= numWfs && vregAvail && lds.canReserve(ndr->q.ldsSize); 455} 456 457int 458ComputeUnit::AllAtBarrier(uint32_t _barrier_id, uint32_t bcnt, uint32_t bslots) 459{ 460 DPRINTF(GPUSync, "CU%d: Checking for All At Barrier\n", cu_id); 461 int ccnt = 0; 462 463 for (int i_simd = 0; i_simd < numSIMDs; ++i_simd) { 464 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf) { 465 Wavefront *w = wfList[i_simd][i_wf]; 466 467 if (w->status == Wavefront::S_RUNNING) { 468 DPRINTF(GPUSync, "Checking WF[%d][%d]\n", i_simd, i_wf); 469 470 DPRINTF(GPUSync, "wf->barrier_id = %d, _barrier_id = %d\n", 471 w->barrier_id, _barrier_id); 472 473 DPRINTF(GPUSync, "wf->barrier_cnt %d, bcnt = %d\n", 474 w->barrier_cnt, bcnt); 475 } 476 477 if (w->status == Wavefront::S_RUNNING && 478 w->barrier_id == _barrier_id && w->barrier_cnt == bcnt && 479 !w->outstanding_reqs) { 480 ++ccnt; 481 482 DPRINTF(GPUSync, "WF[%d][%d] at barrier, increment ccnt to " 483 "%d\n", i_simd, i_wf, ccnt); 484 } 485 } 486 } 487 488 DPRINTF(GPUSync, "CU%d: returning allAtBarrier ccnt = %d, bslots = %d\n", 489 cu_id, ccnt, bslots); 490 491 return ccnt == bslots; 492} 493 494// Check if the current wavefront is blocked on additional resources. 495bool 496ComputeUnit::cedeSIMD(int simdId, int wfSlotId) 497{ 498 bool cede = false; 499 500 // If --xact-cas-mode option is enabled in run.py, then xact_cas_ld 501 // magic instructions will impact the scheduling of wavefronts 502 if (xact_cas_mode) { 503 /* 504 * When a wavefront calls xact_cas_ld, it adds itself to a per address 505 * queue. All per address queues are managed by the xactCasLoadMap. 506 * 507 * A wavefront is not blocked if: it is not in ANY per address queue or 508 * if it is at the head of a per address queue. 509 */ 510 for (auto itMap : xactCasLoadMap) { 511 std::list<waveIdentifier> curWaveIDQueue = itMap.second.waveIDQueue; 512 513 if (!curWaveIDQueue.empty()) { 514 for (auto it : curWaveIDQueue) { 515 waveIdentifier cur_wave = it; 516 517 if (cur_wave.simdId == simdId && 518 cur_wave.wfSlotId == wfSlotId) { 519 // 2 possibilities 520 // 1: this WF has a green light 521 // 2: another WF has a green light 522 waveIdentifier owner_wave = curWaveIDQueue.front(); 523 524 if (owner_wave.simdId != cur_wave.simdId || 525 owner_wave.wfSlotId != cur_wave.wfSlotId) { 526 // possibility 2 527 cede = true; 528 break; 529 } else { 530 // possibility 1 531 break; 532 } 533 } 534 } 535 } 536 } 537 } 538 539 return cede; 540} 541 542// Execute one clock worth of work on the ComputeUnit. 543void 544ComputeUnit::exec() 545{ 546 updateEvents(); 547 // Execute pipeline stages in reverse order to simulate 548 // the pipeline latency 549 globalMemoryPipe.exec(); 550 localMemoryPipe.exec(); 551 execStage.exec(); 552 scheduleStage.exec(); 553 scoreboardCheckStage.exec(); 554 fetchStage.exec(); 555 556 totalCycles++; 557} 558 559void 560ComputeUnit::init() 561{ 562 // Initialize CU Bus models 563 glbMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1)); 564 locMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1)); 565 nextGlbMemBus = 0; 566 nextLocMemBus = 0; 567 fatal_if(numGlbMemUnits > 1, 568 "No support for multiple Global Memory Pipelines exists!!!"); 569 vrfToGlobalMemPipeBus.resize(numGlbMemUnits); 570 for (int j = 0; j < numGlbMemUnits; ++j) { 571 vrfToGlobalMemPipeBus[j] = WaitClass(); 572 vrfToGlobalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1)); 573 } 574 575 fatal_if(numLocMemUnits > 1, 576 "No support for multiple Local Memory Pipelines exists!!!"); 577 vrfToLocalMemPipeBus.resize(numLocMemUnits); 578 for (int j = 0; j < numLocMemUnits; ++j) { 579 vrfToLocalMemPipeBus[j] = WaitClass(); 580 vrfToLocalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1)); 581 } 582 vectorRegsReserved.resize(numSIMDs, 0); 583 aluPipe.resize(numSIMDs); 584 wfWait.resize(numSIMDs + numLocMemUnits + numGlbMemUnits); 585 586 for (int i = 0; i < numSIMDs + numLocMemUnits + numGlbMemUnits; ++i) { 587 wfWait[i] = WaitClass(); 588 wfWait[i].init(&shader->tick_cnt, shader->ticks(1)); 589 } 590 591 for (int i = 0; i < numSIMDs; ++i) { 592 aluPipe[i] = WaitClass(); 593 aluPipe[i].init(&shader->tick_cnt, shader->ticks(1)); 594 } 595 596 // Setup space for call args 597 for (int j = 0; j < numSIMDs; ++j) { 598 for (int i = 0; i < shader->n_wf; ++i) { 599 wfList[j][i]->initCallArgMem(shader->funcargs_size, wavefrontSize); 600 } 601 } 602 603 // Initializing pipeline resources 604 readyList.resize(numSIMDs + numGlbMemUnits + numLocMemUnits); 605 waveStatusList.resize(numSIMDs); 606 607 for (int j = 0; j < numSIMDs; ++j) { 608 for (int i = 0; i < shader->n_wf; ++i) { 609 waveStatusList[j].push_back( 610 std::make_pair(wfList[j][i], BLOCKED)); 611 } 612 } 613 614 for (int j = 0; j < (numSIMDs + numGlbMemUnits + numLocMemUnits); ++j) { 615 dispatchList.push_back(std::make_pair((Wavefront*)nullptr, EMPTY)); 616 } 617 618 fetchStage.init(this); 619 scoreboardCheckStage.init(this); 620 scheduleStage.init(this); 621 execStage.init(this); 622 globalMemoryPipe.init(this); 623 localMemoryPipe.init(this); 624 // initialize state for statistics calculation 625 vectorAluInstAvail.resize(numSIMDs, false); 626 shrMemInstAvail = 0; 627 glbMemInstAvail = 0; 628} 629 630bool 631ComputeUnit::DataPort::recvTimingResp(PacketPtr pkt) 632{ 633 // Ruby has completed the memory op. Schedule the mem_resp_event at the 634 // appropriate cycle to process the timing memory response 635 // This delay represents the pipeline delay 636 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState); 637 int index = sender_state->port_index; 638 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 639 640 // Is the packet returned a Kernel End or Barrier 641 if (pkt->req->isKernel() && pkt->req->isRelease()) { 642 Wavefront *w = 643 computeUnit->wfList[gpuDynInst->simdId][gpuDynInst->wfSlotId]; 644 645 // Check if we are waiting on Kernel End Release 646 if (w->status == Wavefront::S_RETURNING) { 647 DPRINTF(GPUDisp, "CU%d: WF[%d][%d][wv=%d]: WG id completed %d\n", 648 computeUnit->cu_id, w->simdId, w->wfSlotId, 649 w->wfDynId, w->kern_id); 650 651 computeUnit->shader->dispatcher->notifyWgCompl(w); 652 w->status = Wavefront::S_STOPPED; 653 } else { 654 w->outstanding_reqs--; 655 } 656 657 DPRINTF(GPUSync, "CU%d: WF[%d][%d]: barrier_cnt = %d\n", 658 computeUnit->cu_id, gpuDynInst->simdId, 659 gpuDynInst->wfSlotId, w->barrier_cnt); 660 661 if (gpuDynInst->useContinuation) { 662 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 663 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 664 gpuDynInst); 665 } 666 667 delete pkt->senderState; 668 delete pkt->req; 669 delete pkt; 670 return true; 671 } else if (pkt->req->isKernel() && pkt->req->isAcquire()) { 672 if (gpuDynInst->useContinuation) { 673 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 674 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 675 gpuDynInst); 676 } 677 678 delete pkt->senderState; 679 delete pkt->req; 680 delete pkt; 681 return true; 682 } 683 684 ComputeUnit::DataPort::MemRespEvent *mem_resp_event = 685 new ComputeUnit::DataPort::MemRespEvent(computeUnit->memPort[index], 686 pkt); 687 688 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x received!\n", 689 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 690 index, pkt->req->getPaddr()); 691 692 computeUnit->schedule(mem_resp_event, 693 curTick() + computeUnit->resp_tick_latency); 694 return true; 695} 696 697void 698ComputeUnit::DataPort::recvReqRetry() 699{ 700 int len = retries.size(); 701 702 assert(len > 0); 703 704 for (int i = 0; i < len; ++i) { 705 PacketPtr pkt = retries.front().first; 706 GPUDynInstPtr gpuDynInst M5_VAR_USED = retries.front().second; 707 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: retry mem inst addr %#x\n", 708 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 709 pkt->req->getPaddr()); 710 711 /** Currently Ruby can return false due to conflicts for the particular 712 * cache block or address. Thus other requests should be allowed to 713 * pass and the data port should expect multiple retries. */ 714 if (!sendTimingReq(pkt)) { 715 DPRINTF(GPUMem, "failed again!\n"); 716 break; 717 } else { 718 DPRINTF(GPUMem, "successful!\n"); 719 retries.pop_front(); 720 } 721 } 722} 723 724bool 725ComputeUnit::SQCPort::recvTimingResp(PacketPtr pkt) 726{ 727 computeUnit->fetchStage.processFetchReturn(pkt); 728 729 return true; 730} 731 732void 733ComputeUnit::SQCPort::recvReqRetry() 734{ 735 int len = retries.size(); 736 737 assert(len > 0); 738 739 for (int i = 0; i < len; ++i) { 740 PacketPtr pkt = retries.front().first; 741 Wavefront *wavefront M5_VAR_USED = retries.front().second; 742 DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: retrying FETCH addr %#x\n", 743 computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId, 744 pkt->req->getPaddr()); 745 if (!sendTimingReq(pkt)) { 746 DPRINTF(GPUFetch, "failed again!\n"); 747 break; 748 } else { 749 DPRINTF(GPUFetch, "successful!\n"); 750 retries.pop_front(); 751 } 752 } 753} 754 755void 756ComputeUnit::sendRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt) 757{ 758 // There must be a way around this check to do the globalMemStart... 759 Addr tmp_vaddr = pkt->req->getVaddr(); 760 761 updatePageDivergenceDist(tmp_vaddr); 762 763 pkt->req->setVirt(pkt->req->getAsid(), tmp_vaddr, pkt->req->getSize(), 764 pkt->req->getFlags(), pkt->req->masterId(), 765 pkt->req->getPC()); 766 767 // figure out the type of the request to set read/write 768 BaseTLB::Mode TLB_mode; 769 assert(pkt->isRead() || pkt->isWrite()); 770 771 // Check write before read for atomic operations 772 // since atomic operations should use BaseTLB::Write 773 if (pkt->isWrite()){ 774 TLB_mode = BaseTLB::Write; 775 } else if (pkt->isRead()) { 776 TLB_mode = BaseTLB::Read; 777 } else { 778 fatal("pkt is not a read nor a write\n"); 779 } 780 781 tlbCycles -= curTick(); 782 ++tlbRequests; 783 784 int tlbPort_index = perLaneTLB ? index : 0; 785 786 if (shader->timingSim) { 787 if (debugSegFault) { 788 Process *p = shader->gpuTc->getProcessPtr(); 789 Addr vaddr = pkt->req->getVaddr(); 790 unsigned size = pkt->getSize(); 791 792 if ((vaddr + size - 1) % 64 < vaddr % 64) { 793 panic("CU%d: WF[%d][%d]: Access to addr %#x is unaligned!\n", 794 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, vaddr); 795 } 796 797 Addr paddr; 798 799 if (!p->pTable->translate(vaddr, paddr)) { 800 if (!p->fixupStackFault(vaddr)) { 801 panic("CU%d: WF[%d][%d]: Fault on addr %#x!\n", 802 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 803 vaddr); 804 } 805 } 806 } 807 808 // This is the SenderState needed upon return 809 pkt->senderState = new DTLBPort::SenderState(gpuDynInst, index); 810 811 // This is the senderState needed by the TLB hierarchy to function 812 TheISA::GpuTLB::TranslationState *translation_state = 813 new TheISA::GpuTLB::TranslationState(TLB_mode, shader->gpuTc, false, 814 pkt->senderState); 815 816 pkt->senderState = translation_state; 817 818 if (functionalTLB) { 819 tlbPort[tlbPort_index]->sendFunctional(pkt); 820 821 // update the hitLevel distribution 822 int hit_level = translation_state->hitLevel; 823 assert(hit_level != -1); 824 hitsPerTLBLevel[hit_level]++; 825 826 // New SenderState for the memory access 827 X86ISA::GpuTLB::TranslationState *sender_state = 828 safe_cast<X86ISA::GpuTLB::TranslationState*>(pkt->senderState); 829 830 delete sender_state->tlbEntry; 831 delete sender_state->saved; 832 delete sender_state; 833 834 assert(pkt->req->hasPaddr()); 835 assert(pkt->req->hasSize()); 836 837 uint8_t *tmpData = pkt->getPtr<uint8_t>(); 838 839 // this is necessary because the GPU TLB receives packets instead 840 // of requests. when the translation is complete, all relevent 841 // fields in the request will be populated, but not in the packet. 842 // here we create the new packet so we can set the size, addr, 843 // and proper flags. 844 PacketPtr oldPkt = pkt; 845 pkt = new Packet(oldPkt->req, oldPkt->cmd); 846 delete oldPkt; 847 pkt->dataStatic(tmpData); 848 849 850 // New SenderState for the memory access 851 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, 852 index, nullptr); 853 854 gpuDynInst->memStatusVector[pkt->getAddr()].push_back(index); 855 gpuDynInst->tlbHitLevel[index] = hit_level; 856 857 858 // translation is done. Schedule the mem_req_event at the 859 // appropriate cycle to send the timing memory request to ruby 860 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 861 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt); 862 863 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data " 864 "scheduled\n", cu_id, gpuDynInst->simdId, 865 gpuDynInst->wfSlotId, index, pkt->req->getPaddr()); 866 867 schedule(mem_req_event, curTick() + req_tick_latency); 868 } else if (tlbPort[tlbPort_index]->isStalled()) { 869 assert(tlbPort[tlbPort_index]->retries.size() > 0); 870 871 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x " 872 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 873 tmp_vaddr); 874 875 tlbPort[tlbPort_index]->retries.push_back(pkt); 876 } else if (!tlbPort[tlbPort_index]->sendTimingReq(pkt)) { 877 // Stall the data port; 878 // No more packet will be issued till 879 // ruby indicates resources are freed by 880 // a recvReqRetry() call back on this port. 881 tlbPort[tlbPort_index]->stallPort(); 882 883 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x " 884 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 885 tmp_vaddr); 886 887 tlbPort[tlbPort_index]->retries.push_back(pkt); 888 } else { 889 DPRINTF(GPUTLB, 890 "CU%d: WF[%d][%d]: Translation for addr %#x sent!\n", 891 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, tmp_vaddr); 892 } 893 } else { 894 if (pkt->cmd == MemCmd::MemFenceReq) { 895 gpuDynInst->statusBitVector = VectorMask(0); 896 } else { 897 gpuDynInst->statusBitVector &= (~(1ll << index)); 898 } 899 900 // New SenderState for the memory access 901 delete pkt->senderState; 902 903 // Because it's atomic operation, only need TLB translation state 904 pkt->senderState = new TheISA::GpuTLB::TranslationState(TLB_mode, 905 shader->gpuTc); 906 907 tlbPort[tlbPort_index]->sendFunctional(pkt); 908 909 // the addr of the packet is not modified, so we need to create a new 910 // packet, or otherwise the memory access will have the old virtual 911 // address sent in the translation packet, instead of the physical 912 // address returned by the translation. 913 PacketPtr new_pkt = new Packet(pkt->req, pkt->cmd); 914 new_pkt->dataStatic(pkt->getPtr<uint8_t>()); 915 916 // Translation is done. It is safe to send the packet to memory. 917 memPort[0]->sendFunctional(new_pkt); 918 919 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: index %d: addr %#x\n", cu_id, 920 gpuDynInst->simdId, gpuDynInst->wfSlotId, index, 921 new_pkt->req->getPaddr()); 922 923 // safe_cast the senderState 924 TheISA::GpuTLB::TranslationState *sender_state = 925 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 926 927 delete sender_state->tlbEntry; 928 delete new_pkt; 929 delete pkt->senderState; 930 delete pkt->req; 931 delete pkt; 932 } 933} 934 935void 936ComputeUnit::sendSyncRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt) 937{ 938 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 939 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt); 940 941 942 // New SenderState for the memory access 943 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, index, 944 nullptr); 945 946 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x sync scheduled\n", 947 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, index, 948 pkt->req->getPaddr()); 949 950 schedule(mem_req_event, curTick() + req_tick_latency); 951} 952 953void 954ComputeUnit::injectGlobalMemFence(GPUDynInstPtr gpuDynInst, bool kernelLaunch, 955 Request* req) 956{ 957 if (!req) { 958 req = new Request(0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId); 959 } 960 req->setPaddr(0); 961 if (kernelLaunch) { 962 req->setFlags(Request::KERNEL); 963 } 964 965 gpuDynInst->s_type = SEG_GLOBAL; 966 967 // for non-kernel MemFence operations, memorder flags are set depending 968 // on which type of request is currently being sent, so this 969 // should be set by the caller (e.g. if an inst has acq-rel 970 // semantics, it will send one acquire req an one release req) 971 gpuDynInst->setRequestFlags(req, kernelLaunch); 972 973 // a mem fence must correspond to an acquire/release request 974 assert(req->isAcquire() || req->isRelease()); 975 976 // create packet 977 PacketPtr pkt = new Packet(req, MemCmd::MemFenceReq); 978 979 // set packet's sender state 980 pkt->senderState = 981 new ComputeUnit::DataPort::SenderState(gpuDynInst, 0, nullptr); 982 983 // send the packet 984 sendSyncRequest(gpuDynInst, 0, pkt); 985} 986 987const char* 988ComputeUnit::DataPort::MemRespEvent::description() const 989{ 990 return "ComputeUnit memory response event"; 991} 992 993void 994ComputeUnit::DataPort::MemRespEvent::process() 995{ 996 DataPort::SenderState *sender_state = 997 safe_cast<DataPort::SenderState*>(pkt->senderState); 998 999 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1000 ComputeUnit *compute_unit = dataPort->computeUnit; 1001 1002 assert(gpuDynInst); 1003 1004 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: Response for addr %#x, index %d\n", 1005 compute_unit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 1006 pkt->req->getPaddr(), dataPort->index); 1007 1008 Addr paddr = pkt->req->getPaddr(); 1009 1010 if (pkt->cmd != MemCmd::MemFenceResp) { 1011 int index = gpuDynInst->memStatusVector[paddr].back(); 1012 1013 DPRINTF(GPUMem, "Response for addr %#x, index %d\n", 1014 pkt->req->getPaddr(), index); 1015 1016 gpuDynInst->memStatusVector[paddr].pop_back(); 1017 gpuDynInst->pAddr = pkt->req->getPaddr(); 1018 1019 if (pkt->isRead() || pkt->isWrite()) { 1020 1021 if (gpuDynInst->n_reg <= MAX_REGS_FOR_NON_VEC_MEM_INST) { 1022 gpuDynInst->statusBitVector &= (~(1ULL << index)); 1023 } else { 1024 assert(gpuDynInst->statusVector[index] > 0); 1025 gpuDynInst->statusVector[index]--; 1026 1027 if (!gpuDynInst->statusVector[index]) 1028 gpuDynInst->statusBitVector &= (~(1ULL << index)); 1029 } 1030 1031 DPRINTF(GPUMem, "bitvector is now %#x\n", 1032 gpuDynInst->statusBitVector); 1033 1034 if (gpuDynInst->statusBitVector == VectorMask(0)) { 1035 auto iter = gpuDynInst->memStatusVector.begin(); 1036 auto end = gpuDynInst->memStatusVector.end(); 1037 1038 while (iter != end) { 1039 assert(iter->second.empty()); 1040 ++iter; 1041 } 1042 1043 gpuDynInst->memStatusVector.clear(); 1044 1045 if (gpuDynInst->n_reg > MAX_REGS_FOR_NON_VEC_MEM_INST) 1046 gpuDynInst->statusVector.clear(); 1047 1048 if (gpuDynInst->m_op == Enums::MO_LD || MO_A(gpuDynInst->m_op) 1049 || MO_ANR(gpuDynInst->m_op)) { 1050 assert(compute_unit->globalMemoryPipe.isGMLdRespFIFOWrRdy()); 1051 1052 compute_unit->globalMemoryPipe.getGMLdRespFIFO() 1053 .push(gpuDynInst); 1054 } else { 1055 assert(compute_unit->globalMemoryPipe.isGMStRespFIFOWrRdy()); 1056 1057 compute_unit->globalMemoryPipe.getGMStRespFIFO() 1058 .push(gpuDynInst); 1059 } 1060 1061 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: packet totally complete\n", 1062 compute_unit->cu_id, gpuDynInst->simdId, 1063 gpuDynInst->wfSlotId); 1064 1065 // after clearing the status vectors, 1066 // see if there is a continuation to perform 1067 // the continuation may generate more work for 1068 // this memory request 1069 if (gpuDynInst->useContinuation) { 1070 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 1071 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 1072 gpuDynInst); 1073 } 1074 } 1075 } 1076 } else { 1077 gpuDynInst->statusBitVector = VectorMask(0); 1078 1079 if (gpuDynInst->useContinuation) { 1080 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 1081 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 1082 gpuDynInst); 1083 } 1084 } 1085 1086 delete pkt->senderState; 1087 delete pkt->req; 1088 delete pkt; 1089} 1090 1091ComputeUnit* 1092ComputeUnitParams::create() 1093{ 1094 return new ComputeUnit(this); 1095} 1096 1097bool 1098ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt) 1099{ 1100 Addr line = pkt->req->getPaddr(); 1101 1102 DPRINTF(GPUTLB, "CU%d: DTLBPort received %#x->%#x\n", computeUnit->cu_id, 1103 pkt->req->getVaddr(), line); 1104 1105 assert(pkt->senderState); 1106 computeUnit->tlbCycles += curTick(); 1107 1108 // pop off the TLB translation state 1109 TheISA::GpuTLB::TranslationState *translation_state = 1110 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 1111 1112 // no PageFaults are permitted for data accesses 1113 if (!translation_state->tlbEntry->valid) { 1114 DTLBPort::SenderState *sender_state = 1115 safe_cast<DTLBPort::SenderState*>(translation_state->saved); 1116 1117 Wavefront *w M5_VAR_USED = 1118 computeUnit->wfList[sender_state->_gpuDynInst->simdId] 1119 [sender_state->_gpuDynInst->wfSlotId]; 1120 1121 DPRINTFN("Wave %d couldn't tranlate vaddr %#x\n", w->wfDynId, 1122 pkt->req->getVaddr()); 1123 } 1124 1125 assert(translation_state->tlbEntry->valid); 1126 1127 // update the hitLevel distribution 1128 int hit_level = translation_state->hitLevel; 1129 computeUnit->hitsPerTLBLevel[hit_level]++; 1130 1131 delete translation_state->tlbEntry; 1132 assert(!translation_state->ports.size()); 1133 pkt->senderState = translation_state->saved; 1134 1135 // for prefetch pkt 1136 BaseTLB::Mode TLB_mode = translation_state->tlbMode; 1137 1138 delete translation_state; 1139 1140 // use the original sender state to know how to close this transaction 1141 DTLBPort::SenderState *sender_state = 1142 safe_cast<DTLBPort::SenderState*>(pkt->senderState); 1143 1144 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1145 int mp_index = sender_state->portIndex; 1146 Addr vaddr = pkt->req->getVaddr(); 1147 gpuDynInst->memStatusVector[line].push_back(mp_index); 1148 gpuDynInst->tlbHitLevel[mp_index] = hit_level; 1149 1150 MemCmd requestCmd; 1151 1152 if (pkt->cmd == MemCmd::ReadResp) { 1153 requestCmd = MemCmd::ReadReq; 1154 } else if (pkt->cmd == MemCmd::WriteResp) { 1155 requestCmd = MemCmd::WriteReq; 1156 } else if (pkt->cmd == MemCmd::SwapResp) { 1157 requestCmd = MemCmd::SwapReq; 1158 } else { 1159 panic("unsupported response to request conversion %s\n", 1160 pkt->cmd.toString()); 1161 } 1162 1163 if (computeUnit->prefetchDepth) { 1164 int simdId = gpuDynInst->simdId; 1165 int wfSlotId = gpuDynInst->wfSlotId; 1166 Addr last = 0; 1167 1168 switch(computeUnit->prefetchType) { 1169 case Enums::PF_CU: 1170 last = computeUnit->lastVaddrCU[mp_index]; 1171 break; 1172 case Enums::PF_PHASE: 1173 last = computeUnit->lastVaddrSimd[simdId][mp_index]; 1174 break; 1175 case Enums::PF_WF: 1176 last = computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index]; 1177 default: 1178 break; 1179 } 1180 1181 DPRINTF(GPUPrefetch, "CU[%d][%d][%d][%d]: %#x was last\n", 1182 computeUnit->cu_id, simdId, wfSlotId, mp_index, last); 1183 1184 int stride = last ? (roundDown(vaddr, TheISA::PageBytes) - 1185 roundDown(last, TheISA::PageBytes)) >> TheISA::PageShift 1186 : 0; 1187 1188 DPRINTF(GPUPrefetch, "Stride is %d\n", stride); 1189 1190 computeUnit->lastVaddrCU[mp_index] = vaddr; 1191 computeUnit->lastVaddrSimd[simdId][mp_index] = vaddr; 1192 computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index] = vaddr; 1193 1194 stride = (computeUnit->prefetchType == Enums::PF_STRIDE) ? 1195 computeUnit->prefetchStride: stride; 1196 1197 DPRINTF(GPUPrefetch, "%#x to: CU[%d][%d][%d][%d]\n", vaddr, 1198 computeUnit->cu_id, simdId, wfSlotId, mp_index); 1199 1200 DPRINTF(GPUPrefetch, "Prefetching from %#x:", vaddr); 1201 1202 // Prefetch Next few pages atomically 1203 for (int pf = 1; pf <= computeUnit->prefetchDepth; ++pf) { 1204 DPRINTF(GPUPrefetch, "%d * %d: %#x\n", pf, stride, 1205 vaddr+stride*pf*TheISA::PageBytes); 1206 1207 if (!stride) 1208 break; 1209 1210 Request *prefetch_req = new Request(0, vaddr + stride * pf * 1211 TheISA::PageBytes, 1212 sizeof(uint8_t), 0, 1213 computeUnit->masterId(), 1214 0, 0, 0); 1215 1216 PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd); 1217 uint8_t foo = 0; 1218 prefetch_pkt->dataStatic(&foo); 1219 1220 // Because it's atomic operation, only need TLB translation state 1221 prefetch_pkt->senderState = 1222 new TheISA::GpuTLB::TranslationState(TLB_mode, 1223 computeUnit->shader->gpuTc, 1224 true); 1225 1226 // Currently prefetches are zero-latency, hence the sendFunctional 1227 sendFunctional(prefetch_pkt); 1228 1229 /* safe_cast the senderState */ 1230 TheISA::GpuTLB::TranslationState *tlb_state = 1231 safe_cast<TheISA::GpuTLB::TranslationState*>( 1232 prefetch_pkt->senderState); 1233 1234 1235 delete tlb_state->tlbEntry; 1236 delete tlb_state; 1237 delete prefetch_pkt->req; 1238 delete prefetch_pkt; 1239 } 1240 } 1241 1242 // First we must convert the response cmd back to a request cmd so that 1243 // the request can be sent through the cu's master port 1244 PacketPtr new_pkt = new Packet(pkt->req, requestCmd); 1245 new_pkt->dataStatic(pkt->getPtr<uint8_t>()); 1246 delete pkt->senderState; 1247 delete pkt; 1248 1249 // New SenderState for the memory access 1250 new_pkt->senderState = 1251 new ComputeUnit::DataPort::SenderState(gpuDynInst, mp_index, 1252 nullptr); 1253 1254 // translation is done. Schedule the mem_req_event at the appropriate 1255 // cycle to send the timing memory request to ruby 1256 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 1257 new ComputeUnit::DataPort::MemReqEvent(computeUnit->memPort[mp_index], 1258 new_pkt); 1259 1260 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data scheduled\n", 1261 computeUnit->cu_id, gpuDynInst->simdId, 1262 gpuDynInst->wfSlotId, mp_index, new_pkt->req->getPaddr()); 1263 1264 computeUnit->schedule(mem_req_event, curTick() + 1265 computeUnit->req_tick_latency); 1266 1267 return true; 1268} 1269 1270const char* 1271ComputeUnit::DataPort::MemReqEvent::description() const 1272{ 1273 return "ComputeUnit memory request event"; 1274} 1275 1276void 1277ComputeUnit::DataPort::MemReqEvent::process() 1278{ 1279 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState); 1280 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1281 ComputeUnit *compute_unit M5_VAR_USED = dataPort->computeUnit; 1282 1283 if (!(dataPort->sendTimingReq(pkt))) { 1284 dataPort->retries.push_back(std::make_pair(pkt, gpuDynInst)); 1285 1286 DPRINTF(GPUPort, 1287 "CU%d: WF[%d][%d]: index %d, addr %#x data req failed!\n", 1288 compute_unit->cu_id, gpuDynInst->simdId, 1289 gpuDynInst->wfSlotId, dataPort->index, 1290 pkt->req->getPaddr()); 1291 } else { 1292 DPRINTF(GPUPort, 1293 "CU%d: WF[%d][%d]: index %d, addr %#x data req sent!\n", 1294 compute_unit->cu_id, gpuDynInst->simdId, 1295 gpuDynInst->wfSlotId, dataPort->index, 1296 pkt->req->getPaddr()); 1297 } 1298} 1299 1300/* 1301 * The initial translation request could have been rejected, 1302 * if <retries> queue is not Retry sending the translation 1303 * request. sendRetry() is called from the peer port whenever 1304 * a translation completes. 1305 */ 1306void 1307ComputeUnit::DTLBPort::recvReqRetry() 1308{ 1309 int len = retries.size(); 1310 1311 DPRINTF(GPUTLB, "CU%d: DTLB recvReqRetry - %d pending requests\n", 1312 computeUnit->cu_id, len); 1313 1314 assert(len > 0); 1315 assert(isStalled()); 1316 // recvReqRetry is an indication that the resource on which this 1317 // port was stalling on is freed. So, remove the stall first 1318 unstallPort(); 1319 1320 for (int i = 0; i < len; ++i) { 1321 PacketPtr pkt = retries.front(); 1322 Addr vaddr M5_VAR_USED = pkt->req->getVaddr(); 1323 DPRINTF(GPUTLB, "CU%d: retrying D-translaton for address%#x", vaddr); 1324 1325 if (!sendTimingReq(pkt)) { 1326 // Stall port 1327 stallPort(); 1328 DPRINTF(GPUTLB, ": failed again\n"); 1329 break; 1330 } else { 1331 DPRINTF(GPUTLB, ": successful\n"); 1332 retries.pop_front(); 1333 } 1334 } 1335} 1336 1337bool 1338ComputeUnit::ITLBPort::recvTimingResp(PacketPtr pkt) 1339{ 1340 Addr line M5_VAR_USED = pkt->req->getPaddr(); 1341 DPRINTF(GPUTLB, "CU%d: ITLBPort received %#x->%#x\n", 1342 computeUnit->cu_id, pkt->req->getVaddr(), line); 1343 1344 assert(pkt->senderState); 1345 1346 // pop off the TLB translation state 1347 TheISA::GpuTLB::TranslationState *translation_state = 1348 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 1349 1350 bool success = translation_state->tlbEntry->valid; 1351 delete translation_state->tlbEntry; 1352 assert(!translation_state->ports.size()); 1353 pkt->senderState = translation_state->saved; 1354 delete translation_state; 1355 1356 // use the original sender state to know how to close this transaction 1357 ITLBPort::SenderState *sender_state = 1358 safe_cast<ITLBPort::SenderState*>(pkt->senderState); 1359 1360 // get the wavefront associated with this translation request 1361 Wavefront *wavefront = sender_state->wavefront; 1362 delete pkt->senderState; 1363 1364 if (success) { 1365 // pkt is reused in fetch(), don't delete it here. However, we must 1366 // reset the command to be a request so that it can be sent through 1367 // the cu's master port 1368 assert(pkt->cmd == MemCmd::ReadResp); 1369 pkt->cmd = MemCmd::ReadReq; 1370 1371 computeUnit->fetchStage.fetch(pkt, wavefront); 1372 } else { 1373 if (wavefront->dropFetch) { 1374 assert(wavefront->instructionBuffer.empty()); 1375 wavefront->dropFetch = false; 1376 } 1377 1378 wavefront->pendingFetch = 0; 1379 } 1380 1381 return true; 1382} 1383 1384/* 1385 * The initial translation request could have been rejected, if 1386 * <retries> queue is not empty. Retry sending the translation 1387 * request. sendRetry() is called from the peer port whenever 1388 * a translation completes. 1389 */ 1390void 1391ComputeUnit::ITLBPort::recvReqRetry() 1392{ 1393 1394 int len = retries.size(); 1395 DPRINTF(GPUTLB, "CU%d: ITLB recvReqRetry - %d pending requests\n", len); 1396 1397 assert(len > 0); 1398 assert(isStalled()); 1399 1400 // recvReqRetry is an indication that the resource on which this 1401 // port was stalling on is freed. So, remove the stall first 1402 unstallPort(); 1403 1404 for (int i = 0; i < len; ++i) { 1405 PacketPtr pkt = retries.front(); 1406 Addr vaddr M5_VAR_USED = pkt->req->getVaddr(); 1407 DPRINTF(GPUTLB, "CU%d: retrying I-translaton for address%#x", vaddr); 1408 1409 if (!sendTimingReq(pkt)) { 1410 stallPort(); // Stall port 1411 DPRINTF(GPUTLB, ": failed again\n"); 1412 break; 1413 } else { 1414 DPRINTF(GPUTLB, ": successful\n"); 1415 retries.pop_front(); 1416 } 1417 } 1418} 1419 1420void 1421ComputeUnit::regStats() 1422{ 1423 MemObject::regStats(); 1424 1425 tlbCycles 1426 .name(name() + ".tlb_cycles") 1427 .desc("total number of cycles for all uncoalesced requests") 1428 ; 1429 1430 tlbRequests 1431 .name(name() + ".tlb_requests") 1432 .desc("number of uncoalesced requests") 1433 ; 1434 1435 tlbLatency 1436 .name(name() + ".avg_translation_latency") 1437 .desc("Avg. translation latency for data translations") 1438 ; 1439 1440 tlbLatency = tlbCycles / tlbRequests; 1441 1442 hitsPerTLBLevel 1443 .init(4) 1444 .name(name() + ".TLB_hits_distribution") 1445 .desc("TLB hits distribution (0 for page table, x for Lx-TLB") 1446 ; 1447 1448 // fixed number of TLB levels 1449 for (int i = 0; i < 4; ++i) { 1450 if (!i) 1451 hitsPerTLBLevel.subname(i,"page_table"); 1452 else 1453 hitsPerTLBLevel.subname(i, csprintf("L%d_TLB",i)); 1454 } 1455 1456 execRateDist 1457 .init(0, 10, 2) 1458 .name(name() + ".inst_exec_rate") 1459 .desc("Instruction Execution Rate: Number of executed vector " 1460 "instructions per cycle") 1461 ; 1462 1463 ldsBankConflictDist 1464 .init(0, wfSize(), 2) 1465 .name(name() + ".lds_bank_conflicts") 1466 .desc("Number of bank conflicts per LDS memory packet") 1467 ; 1468 1469 ldsBankAccesses 1470 .name(name() + ".lds_bank_access_cnt") 1471 .desc("Total number of LDS bank accesses") 1472 ; 1473 1474 pageDivergenceDist 1475 // A wavefront can touch up to N pages per memory instruction where 1476 // N is equal to the wavefront size 1477 // The number of pages per bin can be configured (here it's 4). 1478 .init(1, wfSize(), 4) 1479 .name(name() + ".page_divergence_dist") 1480 .desc("pages touched per wf (over all mem. instr.)") 1481 ; 1482 1483 controlFlowDivergenceDist 1484 .init(1, wfSize(), 4) 1485 .name(name() + ".warp_execution_dist") 1486 .desc("number of lanes active per instruction (oval all instructions)") 1487 ; 1488 1489 activeLanesPerGMemInstrDist 1490 .init(1, wfSize(), 4) 1491 .name(name() + ".gmem_lanes_execution_dist") 1492 .desc("number of active lanes per global memory instruction") 1493 ; 1494 1495 activeLanesPerLMemInstrDist 1496 .init(1, wfSize(), 4) 1497 .name(name() + ".lmem_lanes_execution_dist") 1498 .desc("number of active lanes per local memory instruction") 1499 ; 1500 1501 numInstrExecuted 1502 .name(name() + ".num_instr_executed") 1503 .desc("number of instructions executed") 1504 ; 1505 1506 numVecOpsExecuted 1507 .name(name() + ".num_vec_ops_executed") 1508 .desc("number of vec ops executed (e.g. WF size/inst)") 1509 ; 1510 1511 totalCycles 1512 .name(name() + ".num_total_cycles") 1513 .desc("number of cycles the CU ran for") 1514 ; 1515 1516 ipc 1517 .name(name() + ".ipc") 1518 .desc("Instructions per cycle (this CU only)") 1519 ; 1520 1521 vpc 1522 .name(name() + ".vpc") 1523 .desc("Vector Operations per cycle (this CU only)") 1524 ; 1525 1526 numALUInstsExecuted 1527 .name(name() + ".num_alu_insts_executed") 1528 .desc("Number of dynamic non-GM memory insts executed") 1529 ; 1530 1531 wgBlockedDueLdsAllocation 1532 .name(name() + ".wg_blocked_due_lds_alloc") 1533 .desc("Workgroup blocked due to LDS capacity") 1534 ; 1535 1536 ipc = numInstrExecuted / totalCycles; 1537 vpc = numVecOpsExecuted / totalCycles; 1538 1539 numTimesWgBlockedDueVgprAlloc 1540 .name(name() + ".times_wg_blocked_due_vgpr_alloc") 1541 .desc("Number of times WGs are blocked due to VGPR allocation per SIMD") 1542 ; 1543 1544 dynamicGMemInstrCnt 1545 .name(name() + ".global_mem_instr_cnt") 1546 .desc("dynamic global memory instructions count") 1547 ; 1548 1549 dynamicLMemInstrCnt 1550 .name(name() + ".local_mem_instr_cnt") 1551 .desc("dynamic local memory intruction count") 1552 ; 1553 1554 numALUInstsExecuted = numInstrExecuted - dynamicGMemInstrCnt - 1555 dynamicLMemInstrCnt; 1556 1557 completedWfs 1558 .name(name() + ".num_completed_wfs") 1559 .desc("number of completed wavefronts") 1560 ; 1561 1562 numCASOps 1563 .name(name() + ".num_CAS_ops") 1564 .desc("number of compare and swap operations") 1565 ; 1566 1567 numFailedCASOps 1568 .name(name() + ".num_failed_CAS_ops") 1569 .desc("number of compare and swap operations that failed") 1570 ; 1571 1572 // register stats of pipeline stages 1573 fetchStage.regStats(); 1574 scoreboardCheckStage.regStats(); 1575 scheduleStage.regStats(); 1576 execStage.regStats(); 1577 1578 // register stats of memory pipeline 1579 globalMemoryPipe.regStats(); 1580 localMemoryPipe.regStats(); 1581} 1582 1583void 1584ComputeUnit::updatePageDivergenceDist(Addr addr) 1585{ 1586 Addr virt_page_addr = roundDown(addr, TheISA::PageBytes); 1587 1588 if (!pagesTouched.count(virt_page_addr)) 1589 pagesTouched[virt_page_addr] = 1; 1590 else 1591 pagesTouched[virt_page_addr]++; 1592} 1593 1594void 1595ComputeUnit::CUExitCallback::process() 1596{ 1597 if (computeUnit->countPages) { 1598 std::ostream *page_stat_file = 1599 simout.create(computeUnit->name().c_str())->stream(); 1600 1601 *page_stat_file << "page, wavefront accesses, workitem accesses" << 1602 std::endl; 1603 1604 for (auto iter : computeUnit->pageAccesses) { 1605 *page_stat_file << std::hex << iter.first << ","; 1606 *page_stat_file << std::dec << iter.second.first << ","; 1607 *page_stat_file << std::dec << iter.second.second << std::endl; 1608 } 1609 } 1610 } 1611 1612bool 1613ComputeUnit::isDone() const 1614{ 1615 for (int i = 0; i < numSIMDs; ++i) { 1616 if (!isSimdDone(i)) { 1617 return false; 1618 } 1619 } 1620 1621 bool glbMemBusRdy = true; 1622 for (int j = 0; j < numGlbMemUnits; ++j) { 1623 glbMemBusRdy &= vrfToGlobalMemPipeBus[j].rdy(); 1624 } 1625 bool locMemBusRdy = true; 1626 for (int j = 0; j < numLocMemUnits; ++j) { 1627 locMemBusRdy &= vrfToLocalMemPipeBus[j].rdy(); 1628 } 1629 1630 if (!globalMemoryPipe.isGMLdRespFIFOWrRdy() || 1631 !globalMemoryPipe.isGMStRespFIFOWrRdy() || 1632 !globalMemoryPipe.isGMReqFIFOWrRdy() || !localMemoryPipe.isLMReqFIFOWrRdy() 1633 || !localMemoryPipe.isLMRespFIFOWrRdy() || !locMemToVrfBus.rdy() || 1634 !glbMemToVrfBus.rdy() || !locMemBusRdy || !glbMemBusRdy) { 1635 return false; 1636 } 1637 1638 return true; 1639} 1640 1641int32_t 1642ComputeUnit::getRefCounter(const uint32_t dispatchId, const uint32_t wgId) const 1643{ 1644 return lds.getRefCounter(dispatchId, wgId); 1645} 1646 1647bool 1648ComputeUnit::isSimdDone(uint32_t simdId) const 1649{ 1650 assert(simdId < numSIMDs); 1651 1652 for (int i=0; i < numGlbMemUnits; ++i) { 1653 if (!vrfToGlobalMemPipeBus[i].rdy()) 1654 return false; 1655 } 1656 for (int i=0; i < numLocMemUnits; ++i) { 1657 if (!vrfToLocalMemPipeBus[i].rdy()) 1658 return false; 1659 } 1660 if (!aluPipe[simdId].rdy()) { 1661 return false; 1662 } 1663 1664 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf){ 1665 if (wfList[simdId][i_wf]->status != Wavefront::S_STOPPED) { 1666 return false; 1667 } 1668 } 1669 1670 return true; 1671} 1672 1673/** 1674 * send a general request to the LDS 1675 * make sure to look at the return value here as your request might be 1676 * NACK'd and returning false means that you have to have some backup plan 1677 */ 1678bool 1679ComputeUnit::sendToLds(GPUDynInstPtr gpuDynInst) 1680{ 1681 // this is just a request to carry the GPUDynInstPtr 1682 // back and forth 1683 Request *newRequest = new Request(); 1684 newRequest->setPaddr(0x0); 1685 1686 // ReadReq is not evaluted by the LDS but the Packet ctor requires this 1687 PacketPtr newPacket = new Packet(newRequest, MemCmd::ReadReq); 1688 1689 // This is the SenderState needed upon return 1690 newPacket->senderState = new LDSPort::SenderState(gpuDynInst); 1691 1692 return ldsPort->sendTimingReq(newPacket); 1693} 1694 1695/** 1696 * get the result of packets sent to the LDS when they return 1697 */ 1698bool 1699ComputeUnit::LDSPort::recvTimingResp(PacketPtr packet) 1700{ 1701 const ComputeUnit::LDSPort::SenderState *senderState = 1702 dynamic_cast<ComputeUnit::LDSPort::SenderState *>(packet->senderState); 1703 1704 fatal_if(!senderState, "did not get the right sort of sender state"); 1705 1706 GPUDynInstPtr gpuDynInst = senderState->getMemInst(); 1707 1708 delete packet->senderState; 1709 delete packet->req; 1710 delete packet; 1711 1712 computeUnit->localMemoryPipe.getLMRespFIFO().push(gpuDynInst); 1713 return true; 1714} 1715 1716/** 1717 * attempt to send this packet, either the port is already stalled, the request 1718 * is nack'd and must stall or the request goes through 1719 * when a request cannot be sent, add it to the retries queue 1720 */ 1721bool 1722ComputeUnit::LDSPort::sendTimingReq(PacketPtr pkt) 1723{ 1724 ComputeUnit::LDSPort::SenderState *sender_state = 1725 dynamic_cast<ComputeUnit::LDSPort::SenderState*>(pkt->senderState); 1726 fatal_if(!sender_state, "packet without a valid sender state"); 1727 1728 GPUDynInstPtr gpuDynInst M5_VAR_USED = sender_state->getMemInst(); 1729 1730 if (isStalled()) { 1731 fatal_if(retries.empty(), "must have retries waiting to be stalled"); 1732 1733 retries.push(pkt); 1734 1735 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: LDS send failed!\n", 1736 computeUnit->cu_id, gpuDynInst->simdId, 1737 gpuDynInst->wfSlotId); 1738 return false; 1739 } else if (!MasterPort::sendTimingReq(pkt)) { 1740 // need to stall the LDS port until a recvReqRetry() is received 1741 // this indicates that there is more space 1742 stallPort(); 1743 retries.push(pkt); 1744 1745 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req failed!\n", 1746 computeUnit->cu_id, gpuDynInst->simdId, 1747 gpuDynInst->wfSlotId, pkt->req->getPaddr()); 1748 return false; 1749 } else { 1750 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req sent!\n", 1751 computeUnit->cu_id, gpuDynInst->simdId, 1752 gpuDynInst->wfSlotId, pkt->req->getPaddr()); 1753 return true; 1754 } 1755} 1756 1757/** 1758 * the bus is telling the port that there is now space so retrying stalled 1759 * requests should work now 1760 * this allows the port to have a request be nack'd and then have the receiver 1761 * say when there is space, rather than simply retrying the send every cycle 1762 */ 1763void 1764ComputeUnit::LDSPort::recvReqRetry() 1765{ 1766 auto queueSize = retries.size(); 1767 1768 DPRINTF(GPUPort, "CU%d: LDSPort recvReqRetry - %d pending requests\n", 1769 computeUnit->cu_id, queueSize); 1770 1771 fatal_if(queueSize < 1, 1772 "why was there a recvReqRetry() with no pending reqs?"); 1773 fatal_if(!isStalled(), 1774 "recvReqRetry() happened when the port was not stalled"); 1775 1776 unstallPort(); 1777 1778 while (!retries.empty()) { 1779 PacketPtr packet = retries.front(); 1780 1781 DPRINTF(GPUPort, "CU%d: retrying LDS send\n", computeUnit->cu_id); 1782 1783 if (!MasterPort::sendTimingReq(packet)) { 1784 // Stall port 1785 stallPort(); 1786 DPRINTF(GPUPort, ": LDS send failed again\n"); 1787 break; 1788 } else { 1789 DPRINTF(GPUTLB, ": LDS send successful\n"); 1790 retries.pop(); 1791 } 1792 } 1793} 1794