369 ++wave_id; 370 } 371 } 372 ++barrier_id; 373} 374 375int 376ComputeUnit::ReadyWorkgroup(NDRange *ndr) 377{ 378 // Get true size of workgroup (after clamping to grid size) 379 int trueWgSize[3]; 380 int trueWgSizeTotal = 1; 381 382 for (int d = 0; d < 3; ++d) { 383 trueWgSize[d] = std::min(ndr->q.wgSize[d], ndr->q.gdSize[d] - 384 ndr->wgId[d] * ndr->q.wgSize[d]); 385 386 trueWgSizeTotal *= trueWgSize[d]; 387 DPRINTF(GPUDisp, "trueWgSize[%d] = %d\n", d, trueWgSize[d]); 388 } 389 390 DPRINTF(GPUDisp, "trueWgSizeTotal = %d\n", trueWgSizeTotal); 391 392 // calculate the number of 32-bit vector registers required by each 393 // work item of the work group 394 int vregDemandPerWI = ndr->q.sRegCount + (2 * ndr->q.dRegCount); 395 bool vregAvail = true; 396 int numWfs = (trueWgSizeTotal + wfSize() - 1) / wfSize(); 397 int freeWfSlots = 0; 398 // check if the total number of VGPRs required by all WFs of the WG 399 // fit in the VRFs of all SIMD units 400 assert((numWfs * vregDemandPerWI) <= (numSIMDs * numVecRegsPerSimd)); 401 int numMappedWfs = 0; 402 std::vector<int> numWfsPerSimd; 403 numWfsPerSimd.resize(numSIMDs, 0); 404 // find how many free WF slots we have across all SIMDs 405 for (int j = 0; j < shader->n_wf; ++j) { 406 for (int i = 0; i < numSIMDs; ++i) { 407 if (wfList[i][j]->status == Wavefront::S_STOPPED) { 408 // count the number of free WF slots 409 ++freeWfSlots; 410 if (numMappedWfs < numWfs) { 411 // count the WFs to be assigned per SIMD 412 numWfsPerSimd[i]++; 413 } 414 numMappedWfs++; 415 } 416 } 417 } 418 419 // if there are enough free WF slots then find if there are enough 420 // free VGPRs per SIMD based on the WF->SIMD mapping 421 if (freeWfSlots >= numWfs) { 422 for (int j = 0; j < numSIMDs; ++j) { 423 // find if there are enough free VGPR regions in the SIMD's VRF 424 // to accommodate the WFs of the new WG that would be mapped to 425 // this SIMD unit 426 vregAvail = vrf[j]->manager->canAllocate(numWfsPerSimd[j], 427 vregDemandPerWI); 428 429 // stop searching if there is at least one SIMD 430 // whose VRF does not have enough free VGPR pools. 431 // This is because a WG is scheduled only if ALL 432 // of its WFs can be scheduled 433 if (!vregAvail) 434 break; 435 } 436 } 437 438 DPRINTF(GPUDisp, "Free WF slots = %d, VGPR Availability = %d\n", 439 freeWfSlots, vregAvail); 440 441 if (!vregAvail) { 442 ++numTimesWgBlockedDueVgprAlloc; 443 } 444 445 // Return true if enough WF slots to submit workgroup and if there are 446 // enough VGPRs to schedule all WFs to their SIMD units 447 if (!lds.canReserve(ndr->q.ldsSize)) { 448 wgBlockedDueLdsAllocation++; 449 } 450 451 // Return true if (a) there are enough free WF slots to submit 452 // workgrounp and (b) if there are enough VGPRs to schedule all WFs to their 453 // SIMD units and (c) if there is enough space in LDS 454 return freeWfSlots >= numWfs && vregAvail && lds.canReserve(ndr->q.ldsSize); 455} 456 457int 458ComputeUnit::AllAtBarrier(uint32_t _barrier_id, uint32_t bcnt, uint32_t bslots) 459{ 460 DPRINTF(GPUSync, "CU%d: Checking for All At Barrier\n", cu_id); 461 int ccnt = 0; 462 463 for (int i_simd = 0; i_simd < numSIMDs; ++i_simd) { 464 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf) { 465 Wavefront *w = wfList[i_simd][i_wf]; 466 467 if (w->status == Wavefront::S_RUNNING) { 468 DPRINTF(GPUSync, "Checking WF[%d][%d]\n", i_simd, i_wf); 469 470 DPRINTF(GPUSync, "wf->barrier_id = %d, _barrier_id = %d\n", 471 w->barrierId, _barrier_id); 472 473 DPRINTF(GPUSync, "wf->barrier_cnt %d, bcnt = %d\n", 474 w->barrierCnt, bcnt); 475 } 476 477 if (w->status == Wavefront::S_RUNNING && 478 w->barrierId == _barrier_id && w->barrierCnt == bcnt && 479 !w->outstandingReqs) { 480 ++ccnt; 481 482 DPRINTF(GPUSync, "WF[%d][%d] at barrier, increment ccnt to " 483 "%d\n", i_simd, i_wf, ccnt); 484 } 485 } 486 } 487 488 DPRINTF(GPUSync, "CU%d: returning allAtBarrier ccnt = %d, bslots = %d\n", 489 cu_id, ccnt, bslots); 490 491 return ccnt == bslots; 492} 493 494// Check if the current wavefront is blocked on additional resources. 495bool 496ComputeUnit::cedeSIMD(int simdId, int wfSlotId) 497{ 498 bool cede = false; 499 500 // If --xact-cas-mode option is enabled in run.py, then xact_cas_ld 501 // magic instructions will impact the scheduling of wavefronts 502 if (xact_cas_mode) { 503 /* 504 * When a wavefront calls xact_cas_ld, it adds itself to a per address 505 * queue. All per address queues are managed by the xactCasLoadMap. 506 * 507 * A wavefront is not blocked if: it is not in ANY per address queue or 508 * if it is at the head of a per address queue. 509 */ 510 for (auto itMap : xactCasLoadMap) { 511 std::list<waveIdentifier> curWaveIDQueue = itMap.second.waveIDQueue; 512 513 if (!curWaveIDQueue.empty()) { 514 for (auto it : curWaveIDQueue) { 515 waveIdentifier cur_wave = it; 516 517 if (cur_wave.simdId == simdId && 518 cur_wave.wfSlotId == wfSlotId) { 519 // 2 possibilities 520 // 1: this WF has a green light 521 // 2: another WF has a green light 522 waveIdentifier owner_wave = curWaveIDQueue.front(); 523 524 if (owner_wave.simdId != cur_wave.simdId || 525 owner_wave.wfSlotId != cur_wave.wfSlotId) { 526 // possibility 2 527 cede = true; 528 break; 529 } else { 530 // possibility 1 531 break; 532 } 533 } 534 } 535 } 536 } 537 } 538 539 return cede; 540} 541 542// Execute one clock worth of work on the ComputeUnit. 543void 544ComputeUnit::exec() 545{ 546 updateEvents(); 547 // Execute pipeline stages in reverse order to simulate 548 // the pipeline latency 549 globalMemoryPipe.exec(); 550 localMemoryPipe.exec(); 551 execStage.exec(); 552 scheduleStage.exec(); 553 scoreboardCheckStage.exec(); 554 fetchStage.exec(); 555 556 totalCycles++; 557} 558 559void 560ComputeUnit::init() 561{ 562 // Initialize CU Bus models 563 glbMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1)); 564 locMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1)); 565 nextGlbMemBus = 0; 566 nextLocMemBus = 0; 567 fatal_if(numGlbMemUnits > 1, 568 "No support for multiple Global Memory Pipelines exists!!!"); 569 vrfToGlobalMemPipeBus.resize(numGlbMemUnits); 570 for (int j = 0; j < numGlbMemUnits; ++j) { 571 vrfToGlobalMemPipeBus[j] = WaitClass(); 572 vrfToGlobalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1)); 573 } 574 575 fatal_if(numLocMemUnits > 1, 576 "No support for multiple Local Memory Pipelines exists!!!"); 577 vrfToLocalMemPipeBus.resize(numLocMemUnits); 578 for (int j = 0; j < numLocMemUnits; ++j) { 579 vrfToLocalMemPipeBus[j] = WaitClass(); 580 vrfToLocalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1)); 581 } 582 vectorRegsReserved.resize(numSIMDs, 0); 583 aluPipe.resize(numSIMDs); 584 wfWait.resize(numSIMDs + numLocMemUnits + numGlbMemUnits); 585 586 for (int i = 0; i < numSIMDs + numLocMemUnits + numGlbMemUnits; ++i) { 587 wfWait[i] = WaitClass(); 588 wfWait[i].init(&shader->tick_cnt, shader->ticks(1)); 589 } 590 591 for (int i = 0; i < numSIMDs; ++i) { 592 aluPipe[i] = WaitClass(); 593 aluPipe[i].init(&shader->tick_cnt, shader->ticks(1)); 594 } 595 596 // Setup space for call args 597 for (int j = 0; j < numSIMDs; ++j) { 598 for (int i = 0; i < shader->n_wf; ++i) { 599 wfList[j][i]->initCallArgMem(shader->funcargs_size, wavefrontSize); 600 } 601 } 602 603 // Initializing pipeline resources 604 readyList.resize(numSIMDs + numGlbMemUnits + numLocMemUnits); 605 waveStatusList.resize(numSIMDs); 606 607 for (int j = 0; j < numSIMDs; ++j) { 608 for (int i = 0; i < shader->n_wf; ++i) { 609 waveStatusList[j].push_back( 610 std::make_pair(wfList[j][i], BLOCKED)); 611 } 612 } 613 614 for (int j = 0; j < (numSIMDs + numGlbMemUnits + numLocMemUnits); ++j) { 615 dispatchList.push_back(std::make_pair((Wavefront*)nullptr, EMPTY)); 616 } 617 618 fetchStage.init(this); 619 scoreboardCheckStage.init(this); 620 scheduleStage.init(this); 621 execStage.init(this); 622 globalMemoryPipe.init(this); 623 localMemoryPipe.init(this); 624 // initialize state for statistics calculation 625 vectorAluInstAvail.resize(numSIMDs, false); 626 shrMemInstAvail = 0; 627 glbMemInstAvail = 0; 628} 629 630bool 631ComputeUnit::DataPort::recvTimingResp(PacketPtr pkt) 632{ 633 // Ruby has completed the memory op. Schedule the mem_resp_event at the 634 // appropriate cycle to process the timing memory response 635 // This delay represents the pipeline delay 636 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState); 637 int index = sender_state->port_index; 638 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 639 640 // Is the packet returned a Kernel End or Barrier 641 if (pkt->req->isKernel() && pkt->req->isRelease()) { 642 Wavefront *w = 643 computeUnit->wfList[gpuDynInst->simdId][gpuDynInst->wfSlotId]; 644 645 // Check if we are waiting on Kernel End Release 646 if (w->status == Wavefront::S_RETURNING) { 647 DPRINTF(GPUDisp, "CU%d: WF[%d][%d][wv=%d]: WG id completed %d\n", 648 computeUnit->cu_id, w->simdId, w->wfSlotId, 649 w->wfDynId, w->kernId); 650 651 computeUnit->shader->dispatcher->notifyWgCompl(w); 652 w->status = Wavefront::S_STOPPED; 653 } else { 654 w->outstandingReqs--; 655 } 656 657 DPRINTF(GPUSync, "CU%d: WF[%d][%d]: barrier_cnt = %d\n", 658 computeUnit->cu_id, gpuDynInst->simdId, 659 gpuDynInst->wfSlotId, w->barrierCnt); 660 661 if (gpuDynInst->useContinuation) { 662 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 663 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 664 gpuDynInst); 665 } 666 667 delete pkt->senderState; 668 delete pkt->req; 669 delete pkt; 670 return true; 671 } else if (pkt->req->isKernel() && pkt->req->isAcquire()) { 672 if (gpuDynInst->useContinuation) { 673 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 674 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 675 gpuDynInst); 676 } 677 678 delete pkt->senderState; 679 delete pkt->req; 680 delete pkt; 681 return true; 682 } 683 684 ComputeUnit::DataPort::MemRespEvent *mem_resp_event = 685 new ComputeUnit::DataPort::MemRespEvent(computeUnit->memPort[index], 686 pkt); 687 688 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x received!\n", 689 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 690 index, pkt->req->getPaddr()); 691 692 computeUnit->schedule(mem_resp_event, 693 curTick() + computeUnit->resp_tick_latency); 694 return true; 695} 696 697void 698ComputeUnit::DataPort::recvReqRetry() 699{ 700 int len = retries.size(); 701 702 assert(len > 0); 703 704 for (int i = 0; i < len; ++i) { 705 PacketPtr pkt = retries.front().first; 706 GPUDynInstPtr gpuDynInst M5_VAR_USED = retries.front().second; 707 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: retry mem inst addr %#x\n", 708 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 709 pkt->req->getPaddr()); 710 711 /** Currently Ruby can return false due to conflicts for the particular 712 * cache block or address. Thus other requests should be allowed to 713 * pass and the data port should expect multiple retries. */ 714 if (!sendTimingReq(pkt)) { 715 DPRINTF(GPUMem, "failed again!\n"); 716 break; 717 } else { 718 DPRINTF(GPUMem, "successful!\n"); 719 retries.pop_front(); 720 } 721 } 722} 723 724bool 725ComputeUnit::SQCPort::recvTimingResp(PacketPtr pkt) 726{ 727 computeUnit->fetchStage.processFetchReturn(pkt); 728 729 return true; 730} 731 732void 733ComputeUnit::SQCPort::recvReqRetry() 734{ 735 int len = retries.size(); 736 737 assert(len > 0); 738 739 for (int i = 0; i < len; ++i) { 740 PacketPtr pkt = retries.front().first; 741 Wavefront *wavefront M5_VAR_USED = retries.front().second; 742 DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: retrying FETCH addr %#x\n", 743 computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId, 744 pkt->req->getPaddr()); 745 if (!sendTimingReq(pkt)) { 746 DPRINTF(GPUFetch, "failed again!\n"); 747 break; 748 } else { 749 DPRINTF(GPUFetch, "successful!\n"); 750 retries.pop_front(); 751 } 752 } 753} 754 755void 756ComputeUnit::sendRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt) 757{ 758 // There must be a way around this check to do the globalMemStart... 759 Addr tmp_vaddr = pkt->req->getVaddr(); 760 761 updatePageDivergenceDist(tmp_vaddr); 762 763 pkt->req->setVirt(pkt->req->getAsid(), tmp_vaddr, pkt->req->getSize(), 764 pkt->req->getFlags(), pkt->req->masterId(), 765 pkt->req->getPC()); 766 767 // figure out the type of the request to set read/write 768 BaseTLB::Mode TLB_mode; 769 assert(pkt->isRead() || pkt->isWrite()); 770 771 // Check write before read for atomic operations 772 // since atomic operations should use BaseTLB::Write 773 if (pkt->isWrite()){ 774 TLB_mode = BaseTLB::Write; 775 } else if (pkt->isRead()) { 776 TLB_mode = BaseTLB::Read; 777 } else { 778 fatal("pkt is not a read nor a write\n"); 779 } 780 781 tlbCycles -= curTick(); 782 ++tlbRequests; 783 784 int tlbPort_index = perLaneTLB ? index : 0; 785 786 if (shader->timingSim) { 787 if (debugSegFault) { 788 Process *p = shader->gpuTc->getProcessPtr(); 789 Addr vaddr = pkt->req->getVaddr(); 790 unsigned size = pkt->getSize(); 791 792 if ((vaddr + size - 1) % 64 < vaddr % 64) { 793 panic("CU%d: WF[%d][%d]: Access to addr %#x is unaligned!\n", 794 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, vaddr); 795 } 796 797 Addr paddr; 798 799 if (!p->pTable->translate(vaddr, paddr)) { 800 if (!p->fixupStackFault(vaddr)) { 801 panic("CU%d: WF[%d][%d]: Fault on addr %#x!\n", 802 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 803 vaddr); 804 } 805 } 806 } 807 808 // This is the SenderState needed upon return 809 pkt->senderState = new DTLBPort::SenderState(gpuDynInst, index); 810 811 // This is the senderState needed by the TLB hierarchy to function 812 TheISA::GpuTLB::TranslationState *translation_state = 813 new TheISA::GpuTLB::TranslationState(TLB_mode, shader->gpuTc, false, 814 pkt->senderState); 815 816 pkt->senderState = translation_state; 817 818 if (functionalTLB) { 819 tlbPort[tlbPort_index]->sendFunctional(pkt); 820 821 // update the hitLevel distribution 822 int hit_level = translation_state->hitLevel; 823 assert(hit_level != -1); 824 hitsPerTLBLevel[hit_level]++; 825 826 // New SenderState for the memory access 827 X86ISA::GpuTLB::TranslationState *sender_state = 828 safe_cast<X86ISA::GpuTLB::TranslationState*>(pkt->senderState); 829 830 delete sender_state->tlbEntry; 831 delete sender_state->saved; 832 delete sender_state; 833 834 assert(pkt->req->hasPaddr()); 835 assert(pkt->req->hasSize()); 836 837 uint8_t *tmpData = pkt->getPtr<uint8_t>(); 838 839 // this is necessary because the GPU TLB receives packets instead 840 // of requests. when the translation is complete, all relevent 841 // fields in the request will be populated, but not in the packet. 842 // here we create the new packet so we can set the size, addr, 843 // and proper flags. 844 PacketPtr oldPkt = pkt; 845 pkt = new Packet(oldPkt->req, oldPkt->cmd); 846 delete oldPkt; 847 pkt->dataStatic(tmpData); 848 849 850 // New SenderState for the memory access 851 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, 852 index, nullptr); 853 854 gpuDynInst->memStatusVector[pkt->getAddr()].push_back(index); 855 gpuDynInst->tlbHitLevel[index] = hit_level; 856 857 858 // translation is done. Schedule the mem_req_event at the 859 // appropriate cycle to send the timing memory request to ruby 860 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 861 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt); 862 863 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data " 864 "scheduled\n", cu_id, gpuDynInst->simdId, 865 gpuDynInst->wfSlotId, index, pkt->req->getPaddr()); 866 867 schedule(mem_req_event, curTick() + req_tick_latency); 868 } else if (tlbPort[tlbPort_index]->isStalled()) { 869 assert(tlbPort[tlbPort_index]->retries.size() > 0); 870 871 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x " 872 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 873 tmp_vaddr); 874 875 tlbPort[tlbPort_index]->retries.push_back(pkt); 876 } else if (!tlbPort[tlbPort_index]->sendTimingReq(pkt)) { 877 // Stall the data port; 878 // No more packet will be issued till 879 // ruby indicates resources are freed by 880 // a recvReqRetry() call back on this port. 881 tlbPort[tlbPort_index]->stallPort(); 882 883 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x " 884 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 885 tmp_vaddr); 886 887 tlbPort[tlbPort_index]->retries.push_back(pkt); 888 } else { 889 DPRINTF(GPUTLB, 890 "CU%d: WF[%d][%d]: Translation for addr %#x sent!\n", 891 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, tmp_vaddr); 892 } 893 } else { 894 if (pkt->cmd == MemCmd::MemFenceReq) { 895 gpuDynInst->statusBitVector = VectorMask(0); 896 } else { 897 gpuDynInst->statusBitVector &= (~(1ll << index)); 898 } 899 900 // New SenderState for the memory access 901 delete pkt->senderState; 902 903 // Because it's atomic operation, only need TLB translation state 904 pkt->senderState = new TheISA::GpuTLB::TranslationState(TLB_mode, 905 shader->gpuTc); 906 907 tlbPort[tlbPort_index]->sendFunctional(pkt); 908 909 // the addr of the packet is not modified, so we need to create a new 910 // packet, or otherwise the memory access will have the old virtual 911 // address sent in the translation packet, instead of the physical 912 // address returned by the translation. 913 PacketPtr new_pkt = new Packet(pkt->req, pkt->cmd); 914 new_pkt->dataStatic(pkt->getPtr<uint8_t>()); 915 916 // Translation is done. It is safe to send the packet to memory. 917 memPort[0]->sendFunctional(new_pkt); 918 919 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: index %d: addr %#x\n", cu_id, 920 gpuDynInst->simdId, gpuDynInst->wfSlotId, index, 921 new_pkt->req->getPaddr()); 922 923 // safe_cast the senderState 924 TheISA::GpuTLB::TranslationState *sender_state = 925 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 926 927 delete sender_state->tlbEntry; 928 delete new_pkt; 929 delete pkt->senderState; 930 delete pkt->req; 931 delete pkt; 932 } 933} 934 935void 936ComputeUnit::sendSyncRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt) 937{ 938 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 939 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt); 940 941 942 // New SenderState for the memory access 943 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, index, 944 nullptr); 945 946 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x sync scheduled\n", 947 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, index, 948 pkt->req->getPaddr()); 949 950 schedule(mem_req_event, curTick() + req_tick_latency); 951} 952 953void 954ComputeUnit::injectGlobalMemFence(GPUDynInstPtr gpuDynInst, bool kernelLaunch, 955 Request* req) 956{ 957 if (!req) { 958 req = new Request(0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId); 959 } 960 req->setPaddr(0); 961 if (kernelLaunch) { 962 req->setFlags(Request::KERNEL); 963 } 964 965 gpuDynInst->s_type = SEG_GLOBAL; 966 967 // for non-kernel MemFence operations, memorder flags are set depending 968 // on which type of request is currently being sent, so this 969 // should be set by the caller (e.g. if an inst has acq-rel 970 // semantics, it will send one acquire req an one release req) 971 gpuDynInst->setRequestFlags(req, kernelLaunch); 972 973 // a mem fence must correspond to an acquire/release request 974 assert(req->isAcquire() || req->isRelease()); 975 976 // create packet 977 PacketPtr pkt = new Packet(req, MemCmd::MemFenceReq); 978 979 // set packet's sender state 980 pkt->senderState = 981 new ComputeUnit::DataPort::SenderState(gpuDynInst, 0, nullptr); 982 983 // send the packet 984 sendSyncRequest(gpuDynInst, 0, pkt); 985} 986 987const char* 988ComputeUnit::DataPort::MemRespEvent::description() const 989{ 990 return "ComputeUnit memory response event"; 991} 992 993void 994ComputeUnit::DataPort::MemRespEvent::process() 995{ 996 DataPort::SenderState *sender_state = 997 safe_cast<DataPort::SenderState*>(pkt->senderState); 998 999 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1000 ComputeUnit *compute_unit = dataPort->computeUnit; 1001 1002 assert(gpuDynInst); 1003 1004 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: Response for addr %#x, index %d\n", 1005 compute_unit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 1006 pkt->req->getPaddr(), dataPort->index); 1007 1008 Addr paddr = pkt->req->getPaddr(); 1009 1010 if (pkt->cmd != MemCmd::MemFenceResp) { 1011 int index = gpuDynInst->memStatusVector[paddr].back(); 1012 1013 DPRINTF(GPUMem, "Response for addr %#x, index %d\n", 1014 pkt->req->getPaddr(), index); 1015 1016 gpuDynInst->memStatusVector[paddr].pop_back(); 1017 gpuDynInst->pAddr = pkt->req->getPaddr(); 1018 1019 if (pkt->isRead() || pkt->isWrite()) { 1020 1021 if (gpuDynInst->n_reg <= MAX_REGS_FOR_NON_VEC_MEM_INST) { 1022 gpuDynInst->statusBitVector &= (~(1ULL << index)); 1023 } else { 1024 assert(gpuDynInst->statusVector[index] > 0); 1025 gpuDynInst->statusVector[index]--; 1026 1027 if (!gpuDynInst->statusVector[index]) 1028 gpuDynInst->statusBitVector &= (~(1ULL << index)); 1029 } 1030 1031 DPRINTF(GPUMem, "bitvector is now %#x\n", 1032 gpuDynInst->statusBitVector); 1033 1034 if (gpuDynInst->statusBitVector == VectorMask(0)) { 1035 auto iter = gpuDynInst->memStatusVector.begin(); 1036 auto end = gpuDynInst->memStatusVector.end(); 1037 1038 while (iter != end) { 1039 assert(iter->second.empty()); 1040 ++iter; 1041 } 1042 1043 gpuDynInst->memStatusVector.clear(); 1044 1045 if (gpuDynInst->n_reg > MAX_REGS_FOR_NON_VEC_MEM_INST) 1046 gpuDynInst->statusVector.clear(); 1047 1048 if (gpuDynInst->m_op == Enums::MO_LD || MO_A(gpuDynInst->m_op) 1049 || MO_ANR(gpuDynInst->m_op)) { 1050 assert(compute_unit->globalMemoryPipe.isGMLdRespFIFOWrRdy()); 1051 1052 compute_unit->globalMemoryPipe.getGMLdRespFIFO() 1053 .push(gpuDynInst); 1054 } else { 1055 assert(compute_unit->globalMemoryPipe.isGMStRespFIFOWrRdy()); 1056 1057 compute_unit->globalMemoryPipe.getGMStRespFIFO() 1058 .push(gpuDynInst); 1059 } 1060 1061 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: packet totally complete\n", 1062 compute_unit->cu_id, gpuDynInst->simdId, 1063 gpuDynInst->wfSlotId); 1064 1065 // after clearing the status vectors, 1066 // see if there is a continuation to perform 1067 // the continuation may generate more work for 1068 // this memory request 1069 if (gpuDynInst->useContinuation) { 1070 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 1071 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 1072 gpuDynInst); 1073 } 1074 } 1075 } 1076 } else { 1077 gpuDynInst->statusBitVector = VectorMask(0); 1078 1079 if (gpuDynInst->useContinuation) { 1080 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 1081 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 1082 gpuDynInst); 1083 } 1084 } 1085 1086 delete pkt->senderState; 1087 delete pkt->req; 1088 delete pkt; 1089} 1090 1091ComputeUnit* 1092ComputeUnitParams::create() 1093{ 1094 return new ComputeUnit(this); 1095} 1096 1097bool 1098ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt) 1099{ 1100 Addr line = pkt->req->getPaddr(); 1101 1102 DPRINTF(GPUTLB, "CU%d: DTLBPort received %#x->%#x\n", computeUnit->cu_id, 1103 pkt->req->getVaddr(), line); 1104 1105 assert(pkt->senderState); 1106 computeUnit->tlbCycles += curTick(); 1107 1108 // pop off the TLB translation state 1109 TheISA::GpuTLB::TranslationState *translation_state = 1110 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 1111 1112 // no PageFaults are permitted for data accesses 1113 if (!translation_state->tlbEntry->valid) { 1114 DTLBPort::SenderState *sender_state = 1115 safe_cast<DTLBPort::SenderState*>(translation_state->saved); 1116 1117 Wavefront *w M5_VAR_USED = 1118 computeUnit->wfList[sender_state->_gpuDynInst->simdId] 1119 [sender_state->_gpuDynInst->wfSlotId]; 1120 1121 DPRINTFN("Wave %d couldn't tranlate vaddr %#x\n", w->wfDynId, 1122 pkt->req->getVaddr()); 1123 } 1124 1125 assert(translation_state->tlbEntry->valid); 1126 1127 // update the hitLevel distribution 1128 int hit_level = translation_state->hitLevel; 1129 computeUnit->hitsPerTLBLevel[hit_level]++; 1130 1131 delete translation_state->tlbEntry; 1132 assert(!translation_state->ports.size()); 1133 pkt->senderState = translation_state->saved; 1134 1135 // for prefetch pkt 1136 BaseTLB::Mode TLB_mode = translation_state->tlbMode; 1137 1138 delete translation_state; 1139 1140 // use the original sender state to know how to close this transaction 1141 DTLBPort::SenderState *sender_state = 1142 safe_cast<DTLBPort::SenderState*>(pkt->senderState); 1143 1144 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1145 int mp_index = sender_state->portIndex; 1146 Addr vaddr = pkt->req->getVaddr(); 1147 gpuDynInst->memStatusVector[line].push_back(mp_index); 1148 gpuDynInst->tlbHitLevel[mp_index] = hit_level; 1149 1150 MemCmd requestCmd; 1151 1152 if (pkt->cmd == MemCmd::ReadResp) { 1153 requestCmd = MemCmd::ReadReq; 1154 } else if (pkt->cmd == MemCmd::WriteResp) { 1155 requestCmd = MemCmd::WriteReq; 1156 } else if (pkt->cmd == MemCmd::SwapResp) { 1157 requestCmd = MemCmd::SwapReq; 1158 } else { 1159 panic("unsupported response to request conversion %s\n", 1160 pkt->cmd.toString()); 1161 } 1162 1163 if (computeUnit->prefetchDepth) { 1164 int simdId = gpuDynInst->simdId; 1165 int wfSlotId = gpuDynInst->wfSlotId; 1166 Addr last = 0; 1167 1168 switch(computeUnit->prefetchType) { 1169 case Enums::PF_CU: 1170 last = computeUnit->lastVaddrCU[mp_index]; 1171 break; 1172 case Enums::PF_PHASE: 1173 last = computeUnit->lastVaddrSimd[simdId][mp_index]; 1174 break; 1175 case Enums::PF_WF: 1176 last = computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index]; 1177 default: 1178 break; 1179 } 1180 1181 DPRINTF(GPUPrefetch, "CU[%d][%d][%d][%d]: %#x was last\n", 1182 computeUnit->cu_id, simdId, wfSlotId, mp_index, last); 1183 1184 int stride = last ? (roundDown(vaddr, TheISA::PageBytes) - 1185 roundDown(last, TheISA::PageBytes)) >> TheISA::PageShift 1186 : 0; 1187 1188 DPRINTF(GPUPrefetch, "Stride is %d\n", stride); 1189 1190 computeUnit->lastVaddrCU[mp_index] = vaddr; 1191 computeUnit->lastVaddrSimd[simdId][mp_index] = vaddr; 1192 computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index] = vaddr; 1193 1194 stride = (computeUnit->prefetchType == Enums::PF_STRIDE) ? 1195 computeUnit->prefetchStride: stride; 1196 1197 DPRINTF(GPUPrefetch, "%#x to: CU[%d][%d][%d][%d]\n", vaddr, 1198 computeUnit->cu_id, simdId, wfSlotId, mp_index); 1199 1200 DPRINTF(GPUPrefetch, "Prefetching from %#x:", vaddr); 1201 1202 // Prefetch Next few pages atomically 1203 for (int pf = 1; pf <= computeUnit->prefetchDepth; ++pf) { 1204 DPRINTF(GPUPrefetch, "%d * %d: %#x\n", pf, stride, 1205 vaddr+stride*pf*TheISA::PageBytes); 1206 1207 if (!stride) 1208 break; 1209 1210 Request *prefetch_req = new Request(0, vaddr + stride * pf * 1211 TheISA::PageBytes, 1212 sizeof(uint8_t), 0, 1213 computeUnit->masterId(), 1214 0, 0, 0); 1215 1216 PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd); 1217 uint8_t foo = 0; 1218 prefetch_pkt->dataStatic(&foo); 1219 1220 // Because it's atomic operation, only need TLB translation state 1221 prefetch_pkt->senderState = 1222 new TheISA::GpuTLB::TranslationState(TLB_mode, 1223 computeUnit->shader->gpuTc, 1224 true); 1225 1226 // Currently prefetches are zero-latency, hence the sendFunctional 1227 sendFunctional(prefetch_pkt); 1228 1229 /* safe_cast the senderState */ 1230 TheISA::GpuTLB::TranslationState *tlb_state = 1231 safe_cast<TheISA::GpuTLB::TranslationState*>( 1232 prefetch_pkt->senderState); 1233 1234 1235 delete tlb_state->tlbEntry; 1236 delete tlb_state; 1237 delete prefetch_pkt->req; 1238 delete prefetch_pkt; 1239 } 1240 } 1241 1242 // First we must convert the response cmd back to a request cmd so that 1243 // the request can be sent through the cu's master port 1244 PacketPtr new_pkt = new Packet(pkt->req, requestCmd); 1245 new_pkt->dataStatic(pkt->getPtr<uint8_t>()); 1246 delete pkt->senderState; 1247 delete pkt; 1248 1249 // New SenderState for the memory access 1250 new_pkt->senderState = 1251 new ComputeUnit::DataPort::SenderState(gpuDynInst, mp_index, 1252 nullptr); 1253 1254 // translation is done. Schedule the mem_req_event at the appropriate 1255 // cycle to send the timing memory request to ruby 1256 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 1257 new ComputeUnit::DataPort::MemReqEvent(computeUnit->memPort[mp_index], 1258 new_pkt); 1259 1260 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data scheduled\n", 1261 computeUnit->cu_id, gpuDynInst->simdId, 1262 gpuDynInst->wfSlotId, mp_index, new_pkt->req->getPaddr()); 1263 1264 computeUnit->schedule(mem_req_event, curTick() + 1265 computeUnit->req_tick_latency); 1266 1267 return true; 1268} 1269 1270const char* 1271ComputeUnit::DataPort::MemReqEvent::description() const 1272{ 1273 return "ComputeUnit memory request event"; 1274} 1275 1276void 1277ComputeUnit::DataPort::MemReqEvent::process() 1278{ 1279 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState); 1280 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1281 ComputeUnit *compute_unit M5_VAR_USED = dataPort->computeUnit; 1282 1283 if (!(dataPort->sendTimingReq(pkt))) { 1284 dataPort->retries.push_back(std::make_pair(pkt, gpuDynInst)); 1285 1286 DPRINTF(GPUPort, 1287 "CU%d: WF[%d][%d]: index %d, addr %#x data req failed!\n", 1288 compute_unit->cu_id, gpuDynInst->simdId, 1289 gpuDynInst->wfSlotId, dataPort->index, 1290 pkt->req->getPaddr()); 1291 } else { 1292 DPRINTF(GPUPort, 1293 "CU%d: WF[%d][%d]: index %d, addr %#x data req sent!\n", 1294 compute_unit->cu_id, gpuDynInst->simdId, 1295 gpuDynInst->wfSlotId, dataPort->index, 1296 pkt->req->getPaddr()); 1297 } 1298} 1299 1300/* 1301 * The initial translation request could have been rejected, 1302 * if <retries> queue is not Retry sending the translation 1303 * request. sendRetry() is called from the peer port whenever 1304 * a translation completes. 1305 */ 1306void 1307ComputeUnit::DTLBPort::recvReqRetry() 1308{ 1309 int len = retries.size(); 1310 1311 DPRINTF(GPUTLB, "CU%d: DTLB recvReqRetry - %d pending requests\n", 1312 computeUnit->cu_id, len); 1313 1314 assert(len > 0); 1315 assert(isStalled()); 1316 // recvReqRetry is an indication that the resource on which this 1317 // port was stalling on is freed. So, remove the stall first 1318 unstallPort(); 1319 1320 for (int i = 0; i < len; ++i) { 1321 PacketPtr pkt = retries.front(); 1322 Addr vaddr M5_VAR_USED = pkt->req->getVaddr(); 1323 DPRINTF(GPUTLB, "CU%d: retrying D-translaton for address%#x", vaddr); 1324 1325 if (!sendTimingReq(pkt)) { 1326 // Stall port 1327 stallPort(); 1328 DPRINTF(GPUTLB, ": failed again\n"); 1329 break; 1330 } else { 1331 DPRINTF(GPUTLB, ": successful\n"); 1332 retries.pop_front(); 1333 } 1334 } 1335} 1336 1337bool 1338ComputeUnit::ITLBPort::recvTimingResp(PacketPtr pkt) 1339{ 1340 Addr line M5_VAR_USED = pkt->req->getPaddr(); 1341 DPRINTF(GPUTLB, "CU%d: ITLBPort received %#x->%#x\n", 1342 computeUnit->cu_id, pkt->req->getVaddr(), line); 1343 1344 assert(pkt->senderState); 1345 1346 // pop off the TLB translation state 1347 TheISA::GpuTLB::TranslationState *translation_state = 1348 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 1349 1350 bool success = translation_state->tlbEntry->valid; 1351 delete translation_state->tlbEntry; 1352 assert(!translation_state->ports.size()); 1353 pkt->senderState = translation_state->saved; 1354 delete translation_state; 1355 1356 // use the original sender state to know how to close this transaction 1357 ITLBPort::SenderState *sender_state = 1358 safe_cast<ITLBPort::SenderState*>(pkt->senderState); 1359 1360 // get the wavefront associated with this translation request 1361 Wavefront *wavefront = sender_state->wavefront; 1362 delete pkt->senderState; 1363 1364 if (success) { 1365 // pkt is reused in fetch(), don't delete it here. However, we must 1366 // reset the command to be a request so that it can be sent through 1367 // the cu's master port 1368 assert(pkt->cmd == MemCmd::ReadResp); 1369 pkt->cmd = MemCmd::ReadReq; 1370 1371 computeUnit->fetchStage.fetch(pkt, wavefront); 1372 } else { 1373 if (wavefront->dropFetch) { 1374 assert(wavefront->instructionBuffer.empty()); 1375 wavefront->dropFetch = false; 1376 } 1377 1378 wavefront->pendingFetch = 0; 1379 } 1380 1381 return true; 1382} 1383 1384/* 1385 * The initial translation request could have been rejected, if 1386 * <retries> queue is not empty. Retry sending the translation 1387 * request. sendRetry() is called from the peer port whenever 1388 * a translation completes. 1389 */ 1390void 1391ComputeUnit::ITLBPort::recvReqRetry() 1392{ 1393 1394 int len = retries.size(); 1395 DPRINTF(GPUTLB, "CU%d: ITLB recvReqRetry - %d pending requests\n", len); 1396 1397 assert(len > 0); 1398 assert(isStalled()); 1399 1400 // recvReqRetry is an indication that the resource on which this 1401 // port was stalling on is freed. So, remove the stall first 1402 unstallPort(); 1403 1404 for (int i = 0; i < len; ++i) { 1405 PacketPtr pkt = retries.front(); 1406 Addr vaddr M5_VAR_USED = pkt->req->getVaddr(); 1407 DPRINTF(GPUTLB, "CU%d: retrying I-translaton for address%#x", vaddr); 1408 1409 if (!sendTimingReq(pkt)) { 1410 stallPort(); // Stall port 1411 DPRINTF(GPUTLB, ": failed again\n"); 1412 break; 1413 } else { 1414 DPRINTF(GPUTLB, ": successful\n"); 1415 retries.pop_front(); 1416 } 1417 } 1418} 1419 1420void 1421ComputeUnit::regStats() 1422{ 1423 MemObject::regStats(); 1424 1425 tlbCycles 1426 .name(name() + ".tlb_cycles") 1427 .desc("total number of cycles for all uncoalesced requests") 1428 ; 1429 1430 tlbRequests 1431 .name(name() + ".tlb_requests") 1432 .desc("number of uncoalesced requests") 1433 ; 1434 1435 tlbLatency 1436 .name(name() + ".avg_translation_latency") 1437 .desc("Avg. translation latency for data translations") 1438 ; 1439 1440 tlbLatency = tlbCycles / tlbRequests; 1441 1442 hitsPerTLBLevel 1443 .init(4) 1444 .name(name() + ".TLB_hits_distribution") 1445 .desc("TLB hits distribution (0 for page table, x for Lx-TLB") 1446 ; 1447 1448 // fixed number of TLB levels 1449 for (int i = 0; i < 4; ++i) { 1450 if (!i) 1451 hitsPerTLBLevel.subname(i,"page_table"); 1452 else 1453 hitsPerTLBLevel.subname(i, csprintf("L%d_TLB",i)); 1454 } 1455 1456 execRateDist 1457 .init(0, 10, 2) 1458 .name(name() + ".inst_exec_rate") 1459 .desc("Instruction Execution Rate: Number of executed vector " 1460 "instructions per cycle") 1461 ; 1462 1463 ldsBankConflictDist 1464 .init(0, wfSize(), 2) 1465 .name(name() + ".lds_bank_conflicts") 1466 .desc("Number of bank conflicts per LDS memory packet") 1467 ; 1468 1469 ldsBankAccesses 1470 .name(name() + ".lds_bank_access_cnt") 1471 .desc("Total number of LDS bank accesses") 1472 ; 1473 1474 pageDivergenceDist 1475 // A wavefront can touch up to N pages per memory instruction where 1476 // N is equal to the wavefront size 1477 // The number of pages per bin can be configured (here it's 4). 1478 .init(1, wfSize(), 4) 1479 .name(name() + ".page_divergence_dist") 1480 .desc("pages touched per wf (over all mem. instr.)") 1481 ; 1482 1483 controlFlowDivergenceDist 1484 .init(1, wfSize(), 4) 1485 .name(name() + ".warp_execution_dist") 1486 .desc("number of lanes active per instruction (oval all instructions)") 1487 ; 1488 1489 activeLanesPerGMemInstrDist 1490 .init(1, wfSize(), 4) 1491 .name(name() + ".gmem_lanes_execution_dist") 1492 .desc("number of active lanes per global memory instruction") 1493 ; 1494 1495 activeLanesPerLMemInstrDist 1496 .init(1, wfSize(), 4) 1497 .name(name() + ".lmem_lanes_execution_dist") 1498 .desc("number of active lanes per local memory instruction") 1499 ; 1500 1501 numInstrExecuted 1502 .name(name() + ".num_instr_executed") 1503 .desc("number of instructions executed") 1504 ; 1505 1506 numVecOpsExecuted 1507 .name(name() + ".num_vec_ops_executed") 1508 .desc("number of vec ops executed (e.g. WF size/inst)") 1509 ; 1510 1511 totalCycles 1512 .name(name() + ".num_total_cycles") 1513 .desc("number of cycles the CU ran for") 1514 ; 1515 1516 ipc 1517 .name(name() + ".ipc") 1518 .desc("Instructions per cycle (this CU only)") 1519 ; 1520 1521 vpc 1522 .name(name() + ".vpc") 1523 .desc("Vector Operations per cycle (this CU only)") 1524 ; 1525 1526 numALUInstsExecuted 1527 .name(name() + ".num_alu_insts_executed") 1528 .desc("Number of dynamic non-GM memory insts executed") 1529 ; 1530 1531 wgBlockedDueLdsAllocation 1532 .name(name() + ".wg_blocked_due_lds_alloc") 1533 .desc("Workgroup blocked due to LDS capacity") 1534 ; 1535 1536 ipc = numInstrExecuted / totalCycles; 1537 vpc = numVecOpsExecuted / totalCycles; 1538 1539 numTimesWgBlockedDueVgprAlloc 1540 .name(name() + ".times_wg_blocked_due_vgpr_alloc") 1541 .desc("Number of times WGs are blocked due to VGPR allocation per SIMD") 1542 ; 1543 1544 dynamicGMemInstrCnt 1545 .name(name() + ".global_mem_instr_cnt") 1546 .desc("dynamic global memory instructions count") 1547 ; 1548 1549 dynamicLMemInstrCnt 1550 .name(name() + ".local_mem_instr_cnt") 1551 .desc("dynamic local memory intruction count") 1552 ; 1553 1554 numALUInstsExecuted = numInstrExecuted - dynamicGMemInstrCnt - 1555 dynamicLMemInstrCnt; 1556 1557 completedWfs 1558 .name(name() + ".num_completed_wfs") 1559 .desc("number of completed wavefronts") 1560 ; 1561 1562 numCASOps 1563 .name(name() + ".num_CAS_ops") 1564 .desc("number of compare and swap operations") 1565 ; 1566 1567 numFailedCASOps 1568 .name(name() + ".num_failed_CAS_ops") 1569 .desc("number of compare and swap operations that failed") 1570 ; 1571 1572 // register stats of pipeline stages 1573 fetchStage.regStats(); 1574 scoreboardCheckStage.regStats(); 1575 scheduleStage.regStats(); 1576 execStage.regStats(); 1577 1578 // register stats of memory pipeline 1579 globalMemoryPipe.regStats(); 1580 localMemoryPipe.regStats(); 1581} 1582 1583void 1584ComputeUnit::updatePageDivergenceDist(Addr addr) 1585{ 1586 Addr virt_page_addr = roundDown(addr, TheISA::PageBytes); 1587 1588 if (!pagesTouched.count(virt_page_addr)) 1589 pagesTouched[virt_page_addr] = 1; 1590 else 1591 pagesTouched[virt_page_addr]++; 1592} 1593 1594void 1595ComputeUnit::CUExitCallback::process() 1596{ 1597 if (computeUnit->countPages) { 1598 std::ostream *page_stat_file = 1599 simout.create(computeUnit->name().c_str())->stream(); 1600 1601 *page_stat_file << "page, wavefront accesses, workitem accesses" << 1602 std::endl; 1603 1604 for (auto iter : computeUnit->pageAccesses) { 1605 *page_stat_file << std::hex << iter.first << ","; 1606 *page_stat_file << std::dec << iter.second.first << ","; 1607 *page_stat_file << std::dec << iter.second.second << std::endl; 1608 } 1609 } 1610 } 1611 1612bool 1613ComputeUnit::isDone() const 1614{ 1615 for (int i = 0; i < numSIMDs; ++i) { 1616 if (!isSimdDone(i)) { 1617 return false; 1618 } 1619 } 1620 1621 bool glbMemBusRdy = true; 1622 for (int j = 0; j < numGlbMemUnits; ++j) { 1623 glbMemBusRdy &= vrfToGlobalMemPipeBus[j].rdy(); 1624 } 1625 bool locMemBusRdy = true; 1626 for (int j = 0; j < numLocMemUnits; ++j) { 1627 locMemBusRdy &= vrfToLocalMemPipeBus[j].rdy(); 1628 } 1629 1630 if (!globalMemoryPipe.isGMLdRespFIFOWrRdy() || 1631 !globalMemoryPipe.isGMStRespFIFOWrRdy() || 1632 !globalMemoryPipe.isGMReqFIFOWrRdy() || !localMemoryPipe.isLMReqFIFOWrRdy() 1633 || !localMemoryPipe.isLMRespFIFOWrRdy() || !locMemToVrfBus.rdy() || 1634 !glbMemToVrfBus.rdy() || !locMemBusRdy || !glbMemBusRdy) { 1635 return false; 1636 } 1637 1638 return true; 1639} 1640 1641int32_t 1642ComputeUnit::getRefCounter(const uint32_t dispatchId, const uint32_t wgId) const 1643{ 1644 return lds.getRefCounter(dispatchId, wgId); 1645} 1646 1647bool 1648ComputeUnit::isSimdDone(uint32_t simdId) const 1649{ 1650 assert(simdId < numSIMDs); 1651 1652 for (int i=0; i < numGlbMemUnits; ++i) { 1653 if (!vrfToGlobalMemPipeBus[i].rdy()) 1654 return false; 1655 } 1656 for (int i=0; i < numLocMemUnits; ++i) { 1657 if (!vrfToLocalMemPipeBus[i].rdy()) 1658 return false; 1659 } 1660 if (!aluPipe[simdId].rdy()) { 1661 return false; 1662 } 1663 1664 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf){ 1665 if (wfList[simdId][i_wf]->status != Wavefront::S_STOPPED) { 1666 return false; 1667 } 1668 } 1669 1670 return true; 1671} 1672 1673/** 1674 * send a general request to the LDS 1675 * make sure to look at the return value here as your request might be 1676 * NACK'd and returning false means that you have to have some backup plan 1677 */ 1678bool 1679ComputeUnit::sendToLds(GPUDynInstPtr gpuDynInst) 1680{ 1681 // this is just a request to carry the GPUDynInstPtr 1682 // back and forth 1683 Request *newRequest = new Request(); 1684 newRequest->setPaddr(0x0); 1685 1686 // ReadReq is not evaluted by the LDS but the Packet ctor requires this 1687 PacketPtr newPacket = new Packet(newRequest, MemCmd::ReadReq); 1688 1689 // This is the SenderState needed upon return 1690 newPacket->senderState = new LDSPort::SenderState(gpuDynInst); 1691 1692 return ldsPort->sendTimingReq(newPacket); 1693} 1694 1695/** 1696 * get the result of packets sent to the LDS when they return 1697 */ 1698bool 1699ComputeUnit::LDSPort::recvTimingResp(PacketPtr packet) 1700{ 1701 const ComputeUnit::LDSPort::SenderState *senderState = 1702 dynamic_cast<ComputeUnit::LDSPort::SenderState *>(packet->senderState); 1703 1704 fatal_if(!senderState, "did not get the right sort of sender state"); 1705 1706 GPUDynInstPtr gpuDynInst = senderState->getMemInst(); 1707 1708 delete packet->senderState; 1709 delete packet->req; 1710 delete packet; 1711 1712 computeUnit->localMemoryPipe.getLMRespFIFO().push(gpuDynInst); 1713 return true; 1714} 1715 1716/** 1717 * attempt to send this packet, either the port is already stalled, the request 1718 * is nack'd and must stall or the request goes through 1719 * when a request cannot be sent, add it to the retries queue 1720 */ 1721bool 1722ComputeUnit::LDSPort::sendTimingReq(PacketPtr pkt) 1723{ 1724 ComputeUnit::LDSPort::SenderState *sender_state = 1725 dynamic_cast<ComputeUnit::LDSPort::SenderState*>(pkt->senderState); 1726 fatal_if(!sender_state, "packet without a valid sender state"); 1727 1728 GPUDynInstPtr gpuDynInst M5_VAR_USED = sender_state->getMemInst(); 1729 1730 if (isStalled()) { 1731 fatal_if(retries.empty(), "must have retries waiting to be stalled"); 1732 1733 retries.push(pkt); 1734 1735 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: LDS send failed!\n", 1736 computeUnit->cu_id, gpuDynInst->simdId, 1737 gpuDynInst->wfSlotId); 1738 return false; 1739 } else if (!MasterPort::sendTimingReq(pkt)) { 1740 // need to stall the LDS port until a recvReqRetry() is received 1741 // this indicates that there is more space 1742 stallPort(); 1743 retries.push(pkt); 1744 1745 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req failed!\n", 1746 computeUnit->cu_id, gpuDynInst->simdId, 1747 gpuDynInst->wfSlotId, pkt->req->getPaddr()); 1748 return false; 1749 } else { 1750 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req sent!\n", 1751 computeUnit->cu_id, gpuDynInst->simdId, 1752 gpuDynInst->wfSlotId, pkt->req->getPaddr()); 1753 return true; 1754 } 1755} 1756 1757/** 1758 * the bus is telling the port that there is now space so retrying stalled 1759 * requests should work now 1760 * this allows the port to have a request be nack'd and then have the receiver 1761 * say when there is space, rather than simply retrying the send every cycle 1762 */ 1763void 1764ComputeUnit::LDSPort::recvReqRetry() 1765{ 1766 auto queueSize = retries.size(); 1767 1768 DPRINTF(GPUPort, "CU%d: LDSPort recvReqRetry - %d pending requests\n", 1769 computeUnit->cu_id, queueSize); 1770 1771 fatal_if(queueSize < 1, 1772 "why was there a recvReqRetry() with no pending reqs?"); 1773 fatal_if(!isStalled(), 1774 "recvReqRetry() happened when the port was not stalled"); 1775 1776 unstallPort(); 1777 1778 while (!retries.empty()) { 1779 PacketPtr packet = retries.front(); 1780 1781 DPRINTF(GPUPort, "CU%d: retrying LDS send\n", computeUnit->cu_id); 1782 1783 if (!MasterPort::sendTimingReq(packet)) { 1784 // Stall port 1785 stallPort(); 1786 DPRINTF(GPUPort, ": LDS send failed again\n"); 1787 break; 1788 } else { 1789 DPRINTF(GPUTLB, ": LDS send successful\n"); 1790 retries.pop(); 1791 } 1792 } 1793}
| 357 ++wave_id; 358 } 359 } 360 ++barrier_id; 361} 362 363int 364ComputeUnit::ReadyWorkgroup(NDRange *ndr) 365{ 366 // Get true size of workgroup (after clamping to grid size) 367 int trueWgSize[3]; 368 int trueWgSizeTotal = 1; 369 370 for (int d = 0; d < 3; ++d) { 371 trueWgSize[d] = std::min(ndr->q.wgSize[d], ndr->q.gdSize[d] - 372 ndr->wgId[d] * ndr->q.wgSize[d]); 373 374 trueWgSizeTotal *= trueWgSize[d]; 375 DPRINTF(GPUDisp, "trueWgSize[%d] = %d\n", d, trueWgSize[d]); 376 } 377 378 DPRINTF(GPUDisp, "trueWgSizeTotal = %d\n", trueWgSizeTotal); 379 380 // calculate the number of 32-bit vector registers required by each 381 // work item of the work group 382 int vregDemandPerWI = ndr->q.sRegCount + (2 * ndr->q.dRegCount); 383 bool vregAvail = true; 384 int numWfs = (trueWgSizeTotal + wfSize() - 1) / wfSize(); 385 int freeWfSlots = 0; 386 // check if the total number of VGPRs required by all WFs of the WG 387 // fit in the VRFs of all SIMD units 388 assert((numWfs * vregDemandPerWI) <= (numSIMDs * numVecRegsPerSimd)); 389 int numMappedWfs = 0; 390 std::vector<int> numWfsPerSimd; 391 numWfsPerSimd.resize(numSIMDs, 0); 392 // find how many free WF slots we have across all SIMDs 393 for (int j = 0; j < shader->n_wf; ++j) { 394 for (int i = 0; i < numSIMDs; ++i) { 395 if (wfList[i][j]->status == Wavefront::S_STOPPED) { 396 // count the number of free WF slots 397 ++freeWfSlots; 398 if (numMappedWfs < numWfs) { 399 // count the WFs to be assigned per SIMD 400 numWfsPerSimd[i]++; 401 } 402 numMappedWfs++; 403 } 404 } 405 } 406 407 // if there are enough free WF slots then find if there are enough 408 // free VGPRs per SIMD based on the WF->SIMD mapping 409 if (freeWfSlots >= numWfs) { 410 for (int j = 0; j < numSIMDs; ++j) { 411 // find if there are enough free VGPR regions in the SIMD's VRF 412 // to accommodate the WFs of the new WG that would be mapped to 413 // this SIMD unit 414 vregAvail = vrf[j]->manager->canAllocate(numWfsPerSimd[j], 415 vregDemandPerWI); 416 417 // stop searching if there is at least one SIMD 418 // whose VRF does not have enough free VGPR pools. 419 // This is because a WG is scheduled only if ALL 420 // of its WFs can be scheduled 421 if (!vregAvail) 422 break; 423 } 424 } 425 426 DPRINTF(GPUDisp, "Free WF slots = %d, VGPR Availability = %d\n", 427 freeWfSlots, vregAvail); 428 429 if (!vregAvail) { 430 ++numTimesWgBlockedDueVgprAlloc; 431 } 432 433 // Return true if enough WF slots to submit workgroup and if there are 434 // enough VGPRs to schedule all WFs to their SIMD units 435 if (!lds.canReserve(ndr->q.ldsSize)) { 436 wgBlockedDueLdsAllocation++; 437 } 438 439 // Return true if (a) there are enough free WF slots to submit 440 // workgrounp and (b) if there are enough VGPRs to schedule all WFs to their 441 // SIMD units and (c) if there is enough space in LDS 442 return freeWfSlots >= numWfs && vregAvail && lds.canReserve(ndr->q.ldsSize); 443} 444 445int 446ComputeUnit::AllAtBarrier(uint32_t _barrier_id, uint32_t bcnt, uint32_t bslots) 447{ 448 DPRINTF(GPUSync, "CU%d: Checking for All At Barrier\n", cu_id); 449 int ccnt = 0; 450 451 for (int i_simd = 0; i_simd < numSIMDs; ++i_simd) { 452 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf) { 453 Wavefront *w = wfList[i_simd][i_wf]; 454 455 if (w->status == Wavefront::S_RUNNING) { 456 DPRINTF(GPUSync, "Checking WF[%d][%d]\n", i_simd, i_wf); 457 458 DPRINTF(GPUSync, "wf->barrier_id = %d, _barrier_id = %d\n", 459 w->barrierId, _barrier_id); 460 461 DPRINTF(GPUSync, "wf->barrier_cnt %d, bcnt = %d\n", 462 w->barrierCnt, bcnt); 463 } 464 465 if (w->status == Wavefront::S_RUNNING && 466 w->barrierId == _barrier_id && w->barrierCnt == bcnt && 467 !w->outstandingReqs) { 468 ++ccnt; 469 470 DPRINTF(GPUSync, "WF[%d][%d] at barrier, increment ccnt to " 471 "%d\n", i_simd, i_wf, ccnt); 472 } 473 } 474 } 475 476 DPRINTF(GPUSync, "CU%d: returning allAtBarrier ccnt = %d, bslots = %d\n", 477 cu_id, ccnt, bslots); 478 479 return ccnt == bslots; 480} 481 482// Check if the current wavefront is blocked on additional resources. 483bool 484ComputeUnit::cedeSIMD(int simdId, int wfSlotId) 485{ 486 bool cede = false; 487 488 // If --xact-cas-mode option is enabled in run.py, then xact_cas_ld 489 // magic instructions will impact the scheduling of wavefronts 490 if (xact_cas_mode) { 491 /* 492 * When a wavefront calls xact_cas_ld, it adds itself to a per address 493 * queue. All per address queues are managed by the xactCasLoadMap. 494 * 495 * A wavefront is not blocked if: it is not in ANY per address queue or 496 * if it is at the head of a per address queue. 497 */ 498 for (auto itMap : xactCasLoadMap) { 499 std::list<waveIdentifier> curWaveIDQueue = itMap.second.waveIDQueue; 500 501 if (!curWaveIDQueue.empty()) { 502 for (auto it : curWaveIDQueue) { 503 waveIdentifier cur_wave = it; 504 505 if (cur_wave.simdId == simdId && 506 cur_wave.wfSlotId == wfSlotId) { 507 // 2 possibilities 508 // 1: this WF has a green light 509 // 2: another WF has a green light 510 waveIdentifier owner_wave = curWaveIDQueue.front(); 511 512 if (owner_wave.simdId != cur_wave.simdId || 513 owner_wave.wfSlotId != cur_wave.wfSlotId) { 514 // possibility 2 515 cede = true; 516 break; 517 } else { 518 // possibility 1 519 break; 520 } 521 } 522 } 523 } 524 } 525 } 526 527 return cede; 528} 529 530// Execute one clock worth of work on the ComputeUnit. 531void 532ComputeUnit::exec() 533{ 534 updateEvents(); 535 // Execute pipeline stages in reverse order to simulate 536 // the pipeline latency 537 globalMemoryPipe.exec(); 538 localMemoryPipe.exec(); 539 execStage.exec(); 540 scheduleStage.exec(); 541 scoreboardCheckStage.exec(); 542 fetchStage.exec(); 543 544 totalCycles++; 545} 546 547void 548ComputeUnit::init() 549{ 550 // Initialize CU Bus models 551 glbMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1)); 552 locMemToVrfBus.init(&shader->tick_cnt, shader->ticks(1)); 553 nextGlbMemBus = 0; 554 nextLocMemBus = 0; 555 fatal_if(numGlbMemUnits > 1, 556 "No support for multiple Global Memory Pipelines exists!!!"); 557 vrfToGlobalMemPipeBus.resize(numGlbMemUnits); 558 for (int j = 0; j < numGlbMemUnits; ++j) { 559 vrfToGlobalMemPipeBus[j] = WaitClass(); 560 vrfToGlobalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1)); 561 } 562 563 fatal_if(numLocMemUnits > 1, 564 "No support for multiple Local Memory Pipelines exists!!!"); 565 vrfToLocalMemPipeBus.resize(numLocMemUnits); 566 for (int j = 0; j < numLocMemUnits; ++j) { 567 vrfToLocalMemPipeBus[j] = WaitClass(); 568 vrfToLocalMemPipeBus[j].init(&shader->tick_cnt, shader->ticks(1)); 569 } 570 vectorRegsReserved.resize(numSIMDs, 0); 571 aluPipe.resize(numSIMDs); 572 wfWait.resize(numSIMDs + numLocMemUnits + numGlbMemUnits); 573 574 for (int i = 0; i < numSIMDs + numLocMemUnits + numGlbMemUnits; ++i) { 575 wfWait[i] = WaitClass(); 576 wfWait[i].init(&shader->tick_cnt, shader->ticks(1)); 577 } 578 579 for (int i = 0; i < numSIMDs; ++i) { 580 aluPipe[i] = WaitClass(); 581 aluPipe[i].init(&shader->tick_cnt, shader->ticks(1)); 582 } 583 584 // Setup space for call args 585 for (int j = 0; j < numSIMDs; ++j) { 586 for (int i = 0; i < shader->n_wf; ++i) { 587 wfList[j][i]->initCallArgMem(shader->funcargs_size, wavefrontSize); 588 } 589 } 590 591 // Initializing pipeline resources 592 readyList.resize(numSIMDs + numGlbMemUnits + numLocMemUnits); 593 waveStatusList.resize(numSIMDs); 594 595 for (int j = 0; j < numSIMDs; ++j) { 596 for (int i = 0; i < shader->n_wf; ++i) { 597 waveStatusList[j].push_back( 598 std::make_pair(wfList[j][i], BLOCKED)); 599 } 600 } 601 602 for (int j = 0; j < (numSIMDs + numGlbMemUnits + numLocMemUnits); ++j) { 603 dispatchList.push_back(std::make_pair((Wavefront*)nullptr, EMPTY)); 604 } 605 606 fetchStage.init(this); 607 scoreboardCheckStage.init(this); 608 scheduleStage.init(this); 609 execStage.init(this); 610 globalMemoryPipe.init(this); 611 localMemoryPipe.init(this); 612 // initialize state for statistics calculation 613 vectorAluInstAvail.resize(numSIMDs, false); 614 shrMemInstAvail = 0; 615 glbMemInstAvail = 0; 616} 617 618bool 619ComputeUnit::DataPort::recvTimingResp(PacketPtr pkt) 620{ 621 // Ruby has completed the memory op. Schedule the mem_resp_event at the 622 // appropriate cycle to process the timing memory response 623 // This delay represents the pipeline delay 624 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState); 625 int index = sender_state->port_index; 626 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 627 628 // Is the packet returned a Kernel End or Barrier 629 if (pkt->req->isKernel() && pkt->req->isRelease()) { 630 Wavefront *w = 631 computeUnit->wfList[gpuDynInst->simdId][gpuDynInst->wfSlotId]; 632 633 // Check if we are waiting on Kernel End Release 634 if (w->status == Wavefront::S_RETURNING) { 635 DPRINTF(GPUDisp, "CU%d: WF[%d][%d][wv=%d]: WG id completed %d\n", 636 computeUnit->cu_id, w->simdId, w->wfSlotId, 637 w->wfDynId, w->kernId); 638 639 computeUnit->shader->dispatcher->notifyWgCompl(w); 640 w->status = Wavefront::S_STOPPED; 641 } else { 642 w->outstandingReqs--; 643 } 644 645 DPRINTF(GPUSync, "CU%d: WF[%d][%d]: barrier_cnt = %d\n", 646 computeUnit->cu_id, gpuDynInst->simdId, 647 gpuDynInst->wfSlotId, w->barrierCnt); 648 649 if (gpuDynInst->useContinuation) { 650 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 651 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 652 gpuDynInst); 653 } 654 655 delete pkt->senderState; 656 delete pkt->req; 657 delete pkt; 658 return true; 659 } else if (pkt->req->isKernel() && pkt->req->isAcquire()) { 660 if (gpuDynInst->useContinuation) { 661 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 662 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 663 gpuDynInst); 664 } 665 666 delete pkt->senderState; 667 delete pkt->req; 668 delete pkt; 669 return true; 670 } 671 672 ComputeUnit::DataPort::MemRespEvent *mem_resp_event = 673 new ComputeUnit::DataPort::MemRespEvent(computeUnit->memPort[index], 674 pkt); 675 676 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x received!\n", 677 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 678 index, pkt->req->getPaddr()); 679 680 computeUnit->schedule(mem_resp_event, 681 curTick() + computeUnit->resp_tick_latency); 682 return true; 683} 684 685void 686ComputeUnit::DataPort::recvReqRetry() 687{ 688 int len = retries.size(); 689 690 assert(len > 0); 691 692 for (int i = 0; i < len; ++i) { 693 PacketPtr pkt = retries.front().first; 694 GPUDynInstPtr gpuDynInst M5_VAR_USED = retries.front().second; 695 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: retry mem inst addr %#x\n", 696 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 697 pkt->req->getPaddr()); 698 699 /** Currently Ruby can return false due to conflicts for the particular 700 * cache block or address. Thus other requests should be allowed to 701 * pass and the data port should expect multiple retries. */ 702 if (!sendTimingReq(pkt)) { 703 DPRINTF(GPUMem, "failed again!\n"); 704 break; 705 } else { 706 DPRINTF(GPUMem, "successful!\n"); 707 retries.pop_front(); 708 } 709 } 710} 711 712bool 713ComputeUnit::SQCPort::recvTimingResp(PacketPtr pkt) 714{ 715 computeUnit->fetchStage.processFetchReturn(pkt); 716 717 return true; 718} 719 720void 721ComputeUnit::SQCPort::recvReqRetry() 722{ 723 int len = retries.size(); 724 725 assert(len > 0); 726 727 for (int i = 0; i < len; ++i) { 728 PacketPtr pkt = retries.front().first; 729 Wavefront *wavefront M5_VAR_USED = retries.front().second; 730 DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: retrying FETCH addr %#x\n", 731 computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId, 732 pkt->req->getPaddr()); 733 if (!sendTimingReq(pkt)) { 734 DPRINTF(GPUFetch, "failed again!\n"); 735 break; 736 } else { 737 DPRINTF(GPUFetch, "successful!\n"); 738 retries.pop_front(); 739 } 740 } 741} 742 743void 744ComputeUnit::sendRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt) 745{ 746 // There must be a way around this check to do the globalMemStart... 747 Addr tmp_vaddr = pkt->req->getVaddr(); 748 749 updatePageDivergenceDist(tmp_vaddr); 750 751 pkt->req->setVirt(pkt->req->getAsid(), tmp_vaddr, pkt->req->getSize(), 752 pkt->req->getFlags(), pkt->req->masterId(), 753 pkt->req->getPC()); 754 755 // figure out the type of the request to set read/write 756 BaseTLB::Mode TLB_mode; 757 assert(pkt->isRead() || pkt->isWrite()); 758 759 // Check write before read for atomic operations 760 // since atomic operations should use BaseTLB::Write 761 if (pkt->isWrite()){ 762 TLB_mode = BaseTLB::Write; 763 } else if (pkt->isRead()) { 764 TLB_mode = BaseTLB::Read; 765 } else { 766 fatal("pkt is not a read nor a write\n"); 767 } 768 769 tlbCycles -= curTick(); 770 ++tlbRequests; 771 772 int tlbPort_index = perLaneTLB ? index : 0; 773 774 if (shader->timingSim) { 775 if (debugSegFault) { 776 Process *p = shader->gpuTc->getProcessPtr(); 777 Addr vaddr = pkt->req->getVaddr(); 778 unsigned size = pkt->getSize(); 779 780 if ((vaddr + size - 1) % 64 < vaddr % 64) { 781 panic("CU%d: WF[%d][%d]: Access to addr %#x is unaligned!\n", 782 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, vaddr); 783 } 784 785 Addr paddr; 786 787 if (!p->pTable->translate(vaddr, paddr)) { 788 if (!p->fixupStackFault(vaddr)) { 789 panic("CU%d: WF[%d][%d]: Fault on addr %#x!\n", 790 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 791 vaddr); 792 } 793 } 794 } 795 796 // This is the SenderState needed upon return 797 pkt->senderState = new DTLBPort::SenderState(gpuDynInst, index); 798 799 // This is the senderState needed by the TLB hierarchy to function 800 TheISA::GpuTLB::TranslationState *translation_state = 801 new TheISA::GpuTLB::TranslationState(TLB_mode, shader->gpuTc, false, 802 pkt->senderState); 803 804 pkt->senderState = translation_state; 805 806 if (functionalTLB) { 807 tlbPort[tlbPort_index]->sendFunctional(pkt); 808 809 // update the hitLevel distribution 810 int hit_level = translation_state->hitLevel; 811 assert(hit_level != -1); 812 hitsPerTLBLevel[hit_level]++; 813 814 // New SenderState for the memory access 815 X86ISA::GpuTLB::TranslationState *sender_state = 816 safe_cast<X86ISA::GpuTLB::TranslationState*>(pkt->senderState); 817 818 delete sender_state->tlbEntry; 819 delete sender_state->saved; 820 delete sender_state; 821 822 assert(pkt->req->hasPaddr()); 823 assert(pkt->req->hasSize()); 824 825 uint8_t *tmpData = pkt->getPtr<uint8_t>(); 826 827 // this is necessary because the GPU TLB receives packets instead 828 // of requests. when the translation is complete, all relevent 829 // fields in the request will be populated, but not in the packet. 830 // here we create the new packet so we can set the size, addr, 831 // and proper flags. 832 PacketPtr oldPkt = pkt; 833 pkt = new Packet(oldPkt->req, oldPkt->cmd); 834 delete oldPkt; 835 pkt->dataStatic(tmpData); 836 837 838 // New SenderState for the memory access 839 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, 840 index, nullptr); 841 842 gpuDynInst->memStatusVector[pkt->getAddr()].push_back(index); 843 gpuDynInst->tlbHitLevel[index] = hit_level; 844 845 846 // translation is done. Schedule the mem_req_event at the 847 // appropriate cycle to send the timing memory request to ruby 848 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 849 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt); 850 851 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data " 852 "scheduled\n", cu_id, gpuDynInst->simdId, 853 gpuDynInst->wfSlotId, index, pkt->req->getPaddr()); 854 855 schedule(mem_req_event, curTick() + req_tick_latency); 856 } else if (tlbPort[tlbPort_index]->isStalled()) { 857 assert(tlbPort[tlbPort_index]->retries.size() > 0); 858 859 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x " 860 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 861 tmp_vaddr); 862 863 tlbPort[tlbPort_index]->retries.push_back(pkt); 864 } else if (!tlbPort[tlbPort_index]->sendTimingReq(pkt)) { 865 // Stall the data port; 866 // No more packet will be issued till 867 // ruby indicates resources are freed by 868 // a recvReqRetry() call back on this port. 869 tlbPort[tlbPort_index]->stallPort(); 870 871 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Translation for addr %#x " 872 "failed!\n", cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 873 tmp_vaddr); 874 875 tlbPort[tlbPort_index]->retries.push_back(pkt); 876 } else { 877 DPRINTF(GPUTLB, 878 "CU%d: WF[%d][%d]: Translation for addr %#x sent!\n", 879 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, tmp_vaddr); 880 } 881 } else { 882 if (pkt->cmd == MemCmd::MemFenceReq) { 883 gpuDynInst->statusBitVector = VectorMask(0); 884 } else { 885 gpuDynInst->statusBitVector &= (~(1ll << index)); 886 } 887 888 // New SenderState for the memory access 889 delete pkt->senderState; 890 891 // Because it's atomic operation, only need TLB translation state 892 pkt->senderState = new TheISA::GpuTLB::TranslationState(TLB_mode, 893 shader->gpuTc); 894 895 tlbPort[tlbPort_index]->sendFunctional(pkt); 896 897 // the addr of the packet is not modified, so we need to create a new 898 // packet, or otherwise the memory access will have the old virtual 899 // address sent in the translation packet, instead of the physical 900 // address returned by the translation. 901 PacketPtr new_pkt = new Packet(pkt->req, pkt->cmd); 902 new_pkt->dataStatic(pkt->getPtr<uint8_t>()); 903 904 // Translation is done. It is safe to send the packet to memory. 905 memPort[0]->sendFunctional(new_pkt); 906 907 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: index %d: addr %#x\n", cu_id, 908 gpuDynInst->simdId, gpuDynInst->wfSlotId, index, 909 new_pkt->req->getPaddr()); 910 911 // safe_cast the senderState 912 TheISA::GpuTLB::TranslationState *sender_state = 913 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 914 915 delete sender_state->tlbEntry; 916 delete new_pkt; 917 delete pkt->senderState; 918 delete pkt->req; 919 delete pkt; 920 } 921} 922 923void 924ComputeUnit::sendSyncRequest(GPUDynInstPtr gpuDynInst, int index, PacketPtr pkt) 925{ 926 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 927 new ComputeUnit::DataPort::MemReqEvent(memPort[index], pkt); 928 929 930 // New SenderState for the memory access 931 pkt->senderState = new ComputeUnit::DataPort::SenderState(gpuDynInst, index, 932 nullptr); 933 934 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x sync scheduled\n", 935 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, index, 936 pkt->req->getPaddr()); 937 938 schedule(mem_req_event, curTick() + req_tick_latency); 939} 940 941void 942ComputeUnit::injectGlobalMemFence(GPUDynInstPtr gpuDynInst, bool kernelLaunch, 943 Request* req) 944{ 945 if (!req) { 946 req = new Request(0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId); 947 } 948 req->setPaddr(0); 949 if (kernelLaunch) { 950 req->setFlags(Request::KERNEL); 951 } 952 953 gpuDynInst->s_type = SEG_GLOBAL; 954 955 // for non-kernel MemFence operations, memorder flags are set depending 956 // on which type of request is currently being sent, so this 957 // should be set by the caller (e.g. if an inst has acq-rel 958 // semantics, it will send one acquire req an one release req) 959 gpuDynInst->setRequestFlags(req, kernelLaunch); 960 961 // a mem fence must correspond to an acquire/release request 962 assert(req->isAcquire() || req->isRelease()); 963 964 // create packet 965 PacketPtr pkt = new Packet(req, MemCmd::MemFenceReq); 966 967 // set packet's sender state 968 pkt->senderState = 969 new ComputeUnit::DataPort::SenderState(gpuDynInst, 0, nullptr); 970 971 // send the packet 972 sendSyncRequest(gpuDynInst, 0, pkt); 973} 974 975const char* 976ComputeUnit::DataPort::MemRespEvent::description() const 977{ 978 return "ComputeUnit memory response event"; 979} 980 981void 982ComputeUnit::DataPort::MemRespEvent::process() 983{ 984 DataPort::SenderState *sender_state = 985 safe_cast<DataPort::SenderState*>(pkt->senderState); 986 987 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 988 ComputeUnit *compute_unit = dataPort->computeUnit; 989 990 assert(gpuDynInst); 991 992 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: Response for addr %#x, index %d\n", 993 compute_unit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 994 pkt->req->getPaddr(), dataPort->index); 995 996 Addr paddr = pkt->req->getPaddr(); 997 998 if (pkt->cmd != MemCmd::MemFenceResp) { 999 int index = gpuDynInst->memStatusVector[paddr].back(); 1000 1001 DPRINTF(GPUMem, "Response for addr %#x, index %d\n", 1002 pkt->req->getPaddr(), index); 1003 1004 gpuDynInst->memStatusVector[paddr].pop_back(); 1005 gpuDynInst->pAddr = pkt->req->getPaddr(); 1006 1007 if (pkt->isRead() || pkt->isWrite()) { 1008 1009 if (gpuDynInst->n_reg <= MAX_REGS_FOR_NON_VEC_MEM_INST) { 1010 gpuDynInst->statusBitVector &= (~(1ULL << index)); 1011 } else { 1012 assert(gpuDynInst->statusVector[index] > 0); 1013 gpuDynInst->statusVector[index]--; 1014 1015 if (!gpuDynInst->statusVector[index]) 1016 gpuDynInst->statusBitVector &= (~(1ULL << index)); 1017 } 1018 1019 DPRINTF(GPUMem, "bitvector is now %#x\n", 1020 gpuDynInst->statusBitVector); 1021 1022 if (gpuDynInst->statusBitVector == VectorMask(0)) { 1023 auto iter = gpuDynInst->memStatusVector.begin(); 1024 auto end = gpuDynInst->memStatusVector.end(); 1025 1026 while (iter != end) { 1027 assert(iter->second.empty()); 1028 ++iter; 1029 } 1030 1031 gpuDynInst->memStatusVector.clear(); 1032 1033 if (gpuDynInst->n_reg > MAX_REGS_FOR_NON_VEC_MEM_INST) 1034 gpuDynInst->statusVector.clear(); 1035 1036 if (gpuDynInst->m_op == Enums::MO_LD || MO_A(gpuDynInst->m_op) 1037 || MO_ANR(gpuDynInst->m_op)) { 1038 assert(compute_unit->globalMemoryPipe.isGMLdRespFIFOWrRdy()); 1039 1040 compute_unit->globalMemoryPipe.getGMLdRespFIFO() 1041 .push(gpuDynInst); 1042 } else { 1043 assert(compute_unit->globalMemoryPipe.isGMStRespFIFOWrRdy()); 1044 1045 compute_unit->globalMemoryPipe.getGMStRespFIFO() 1046 .push(gpuDynInst); 1047 } 1048 1049 DPRINTF(GPUMem, "CU%d: WF[%d][%d]: packet totally complete\n", 1050 compute_unit->cu_id, gpuDynInst->simdId, 1051 gpuDynInst->wfSlotId); 1052 1053 // after clearing the status vectors, 1054 // see if there is a continuation to perform 1055 // the continuation may generate more work for 1056 // this memory request 1057 if (gpuDynInst->useContinuation) { 1058 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 1059 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 1060 gpuDynInst); 1061 } 1062 } 1063 } 1064 } else { 1065 gpuDynInst->statusBitVector = VectorMask(0); 1066 1067 if (gpuDynInst->useContinuation) { 1068 assert(gpuDynInst->scope != Enums::MEMORY_SCOPE_NONE); 1069 gpuDynInst->execContinuation(gpuDynInst->staticInstruction(), 1070 gpuDynInst); 1071 } 1072 } 1073 1074 delete pkt->senderState; 1075 delete pkt->req; 1076 delete pkt; 1077} 1078 1079ComputeUnit* 1080ComputeUnitParams::create() 1081{ 1082 return new ComputeUnit(this); 1083} 1084 1085bool 1086ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt) 1087{ 1088 Addr line = pkt->req->getPaddr(); 1089 1090 DPRINTF(GPUTLB, "CU%d: DTLBPort received %#x->%#x\n", computeUnit->cu_id, 1091 pkt->req->getVaddr(), line); 1092 1093 assert(pkt->senderState); 1094 computeUnit->tlbCycles += curTick(); 1095 1096 // pop off the TLB translation state 1097 TheISA::GpuTLB::TranslationState *translation_state = 1098 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 1099 1100 // no PageFaults are permitted for data accesses 1101 if (!translation_state->tlbEntry->valid) { 1102 DTLBPort::SenderState *sender_state = 1103 safe_cast<DTLBPort::SenderState*>(translation_state->saved); 1104 1105 Wavefront *w M5_VAR_USED = 1106 computeUnit->wfList[sender_state->_gpuDynInst->simdId] 1107 [sender_state->_gpuDynInst->wfSlotId]; 1108 1109 DPRINTFN("Wave %d couldn't tranlate vaddr %#x\n", w->wfDynId, 1110 pkt->req->getVaddr()); 1111 } 1112 1113 assert(translation_state->tlbEntry->valid); 1114 1115 // update the hitLevel distribution 1116 int hit_level = translation_state->hitLevel; 1117 computeUnit->hitsPerTLBLevel[hit_level]++; 1118 1119 delete translation_state->tlbEntry; 1120 assert(!translation_state->ports.size()); 1121 pkt->senderState = translation_state->saved; 1122 1123 // for prefetch pkt 1124 BaseTLB::Mode TLB_mode = translation_state->tlbMode; 1125 1126 delete translation_state; 1127 1128 // use the original sender state to know how to close this transaction 1129 DTLBPort::SenderState *sender_state = 1130 safe_cast<DTLBPort::SenderState*>(pkt->senderState); 1131 1132 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1133 int mp_index = sender_state->portIndex; 1134 Addr vaddr = pkt->req->getVaddr(); 1135 gpuDynInst->memStatusVector[line].push_back(mp_index); 1136 gpuDynInst->tlbHitLevel[mp_index] = hit_level; 1137 1138 MemCmd requestCmd; 1139 1140 if (pkt->cmd == MemCmd::ReadResp) { 1141 requestCmd = MemCmd::ReadReq; 1142 } else if (pkt->cmd == MemCmd::WriteResp) { 1143 requestCmd = MemCmd::WriteReq; 1144 } else if (pkt->cmd == MemCmd::SwapResp) { 1145 requestCmd = MemCmd::SwapReq; 1146 } else { 1147 panic("unsupported response to request conversion %s\n", 1148 pkt->cmd.toString()); 1149 } 1150 1151 if (computeUnit->prefetchDepth) { 1152 int simdId = gpuDynInst->simdId; 1153 int wfSlotId = gpuDynInst->wfSlotId; 1154 Addr last = 0; 1155 1156 switch(computeUnit->prefetchType) { 1157 case Enums::PF_CU: 1158 last = computeUnit->lastVaddrCU[mp_index]; 1159 break; 1160 case Enums::PF_PHASE: 1161 last = computeUnit->lastVaddrSimd[simdId][mp_index]; 1162 break; 1163 case Enums::PF_WF: 1164 last = computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index]; 1165 default: 1166 break; 1167 } 1168 1169 DPRINTF(GPUPrefetch, "CU[%d][%d][%d][%d]: %#x was last\n", 1170 computeUnit->cu_id, simdId, wfSlotId, mp_index, last); 1171 1172 int stride = last ? (roundDown(vaddr, TheISA::PageBytes) - 1173 roundDown(last, TheISA::PageBytes)) >> TheISA::PageShift 1174 : 0; 1175 1176 DPRINTF(GPUPrefetch, "Stride is %d\n", stride); 1177 1178 computeUnit->lastVaddrCU[mp_index] = vaddr; 1179 computeUnit->lastVaddrSimd[simdId][mp_index] = vaddr; 1180 computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index] = vaddr; 1181 1182 stride = (computeUnit->prefetchType == Enums::PF_STRIDE) ? 1183 computeUnit->prefetchStride: stride; 1184 1185 DPRINTF(GPUPrefetch, "%#x to: CU[%d][%d][%d][%d]\n", vaddr, 1186 computeUnit->cu_id, simdId, wfSlotId, mp_index); 1187 1188 DPRINTF(GPUPrefetch, "Prefetching from %#x:", vaddr); 1189 1190 // Prefetch Next few pages atomically 1191 for (int pf = 1; pf <= computeUnit->prefetchDepth; ++pf) { 1192 DPRINTF(GPUPrefetch, "%d * %d: %#x\n", pf, stride, 1193 vaddr+stride*pf*TheISA::PageBytes); 1194 1195 if (!stride) 1196 break; 1197 1198 Request *prefetch_req = new Request(0, vaddr + stride * pf * 1199 TheISA::PageBytes, 1200 sizeof(uint8_t), 0, 1201 computeUnit->masterId(), 1202 0, 0, 0); 1203 1204 PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd); 1205 uint8_t foo = 0; 1206 prefetch_pkt->dataStatic(&foo); 1207 1208 // Because it's atomic operation, only need TLB translation state 1209 prefetch_pkt->senderState = 1210 new TheISA::GpuTLB::TranslationState(TLB_mode, 1211 computeUnit->shader->gpuTc, 1212 true); 1213 1214 // Currently prefetches are zero-latency, hence the sendFunctional 1215 sendFunctional(prefetch_pkt); 1216 1217 /* safe_cast the senderState */ 1218 TheISA::GpuTLB::TranslationState *tlb_state = 1219 safe_cast<TheISA::GpuTLB::TranslationState*>( 1220 prefetch_pkt->senderState); 1221 1222 1223 delete tlb_state->tlbEntry; 1224 delete tlb_state; 1225 delete prefetch_pkt->req; 1226 delete prefetch_pkt; 1227 } 1228 } 1229 1230 // First we must convert the response cmd back to a request cmd so that 1231 // the request can be sent through the cu's master port 1232 PacketPtr new_pkt = new Packet(pkt->req, requestCmd); 1233 new_pkt->dataStatic(pkt->getPtr<uint8_t>()); 1234 delete pkt->senderState; 1235 delete pkt; 1236 1237 // New SenderState for the memory access 1238 new_pkt->senderState = 1239 new ComputeUnit::DataPort::SenderState(gpuDynInst, mp_index, 1240 nullptr); 1241 1242 // translation is done. Schedule the mem_req_event at the appropriate 1243 // cycle to send the timing memory request to ruby 1244 ComputeUnit::DataPort::MemReqEvent *mem_req_event = 1245 new ComputeUnit::DataPort::MemReqEvent(computeUnit->memPort[mp_index], 1246 new_pkt); 1247 1248 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x data scheduled\n", 1249 computeUnit->cu_id, gpuDynInst->simdId, 1250 gpuDynInst->wfSlotId, mp_index, new_pkt->req->getPaddr()); 1251 1252 computeUnit->schedule(mem_req_event, curTick() + 1253 computeUnit->req_tick_latency); 1254 1255 return true; 1256} 1257 1258const char* 1259ComputeUnit::DataPort::MemReqEvent::description() const 1260{ 1261 return "ComputeUnit memory request event"; 1262} 1263 1264void 1265ComputeUnit::DataPort::MemReqEvent::process() 1266{ 1267 SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState); 1268 GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst; 1269 ComputeUnit *compute_unit M5_VAR_USED = dataPort->computeUnit; 1270 1271 if (!(dataPort->sendTimingReq(pkt))) { 1272 dataPort->retries.push_back(std::make_pair(pkt, gpuDynInst)); 1273 1274 DPRINTF(GPUPort, 1275 "CU%d: WF[%d][%d]: index %d, addr %#x data req failed!\n", 1276 compute_unit->cu_id, gpuDynInst->simdId, 1277 gpuDynInst->wfSlotId, dataPort->index, 1278 pkt->req->getPaddr()); 1279 } else { 1280 DPRINTF(GPUPort, 1281 "CU%d: WF[%d][%d]: index %d, addr %#x data req sent!\n", 1282 compute_unit->cu_id, gpuDynInst->simdId, 1283 gpuDynInst->wfSlotId, dataPort->index, 1284 pkt->req->getPaddr()); 1285 } 1286} 1287 1288/* 1289 * The initial translation request could have been rejected, 1290 * if <retries> queue is not Retry sending the translation 1291 * request. sendRetry() is called from the peer port whenever 1292 * a translation completes. 1293 */ 1294void 1295ComputeUnit::DTLBPort::recvReqRetry() 1296{ 1297 int len = retries.size(); 1298 1299 DPRINTF(GPUTLB, "CU%d: DTLB recvReqRetry - %d pending requests\n", 1300 computeUnit->cu_id, len); 1301 1302 assert(len > 0); 1303 assert(isStalled()); 1304 // recvReqRetry is an indication that the resource on which this 1305 // port was stalling on is freed. So, remove the stall first 1306 unstallPort(); 1307 1308 for (int i = 0; i < len; ++i) { 1309 PacketPtr pkt = retries.front(); 1310 Addr vaddr M5_VAR_USED = pkt->req->getVaddr(); 1311 DPRINTF(GPUTLB, "CU%d: retrying D-translaton for address%#x", vaddr); 1312 1313 if (!sendTimingReq(pkt)) { 1314 // Stall port 1315 stallPort(); 1316 DPRINTF(GPUTLB, ": failed again\n"); 1317 break; 1318 } else { 1319 DPRINTF(GPUTLB, ": successful\n"); 1320 retries.pop_front(); 1321 } 1322 } 1323} 1324 1325bool 1326ComputeUnit::ITLBPort::recvTimingResp(PacketPtr pkt) 1327{ 1328 Addr line M5_VAR_USED = pkt->req->getPaddr(); 1329 DPRINTF(GPUTLB, "CU%d: ITLBPort received %#x->%#x\n", 1330 computeUnit->cu_id, pkt->req->getVaddr(), line); 1331 1332 assert(pkt->senderState); 1333 1334 // pop off the TLB translation state 1335 TheISA::GpuTLB::TranslationState *translation_state = 1336 safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState); 1337 1338 bool success = translation_state->tlbEntry->valid; 1339 delete translation_state->tlbEntry; 1340 assert(!translation_state->ports.size()); 1341 pkt->senderState = translation_state->saved; 1342 delete translation_state; 1343 1344 // use the original sender state to know how to close this transaction 1345 ITLBPort::SenderState *sender_state = 1346 safe_cast<ITLBPort::SenderState*>(pkt->senderState); 1347 1348 // get the wavefront associated with this translation request 1349 Wavefront *wavefront = sender_state->wavefront; 1350 delete pkt->senderState; 1351 1352 if (success) { 1353 // pkt is reused in fetch(), don't delete it here. However, we must 1354 // reset the command to be a request so that it can be sent through 1355 // the cu's master port 1356 assert(pkt->cmd == MemCmd::ReadResp); 1357 pkt->cmd = MemCmd::ReadReq; 1358 1359 computeUnit->fetchStage.fetch(pkt, wavefront); 1360 } else { 1361 if (wavefront->dropFetch) { 1362 assert(wavefront->instructionBuffer.empty()); 1363 wavefront->dropFetch = false; 1364 } 1365 1366 wavefront->pendingFetch = 0; 1367 } 1368 1369 return true; 1370} 1371 1372/* 1373 * The initial translation request could have been rejected, if 1374 * <retries> queue is not empty. Retry sending the translation 1375 * request. sendRetry() is called from the peer port whenever 1376 * a translation completes. 1377 */ 1378void 1379ComputeUnit::ITLBPort::recvReqRetry() 1380{ 1381 1382 int len = retries.size(); 1383 DPRINTF(GPUTLB, "CU%d: ITLB recvReqRetry - %d pending requests\n", len); 1384 1385 assert(len > 0); 1386 assert(isStalled()); 1387 1388 // recvReqRetry is an indication that the resource on which this 1389 // port was stalling on is freed. So, remove the stall first 1390 unstallPort(); 1391 1392 for (int i = 0; i < len; ++i) { 1393 PacketPtr pkt = retries.front(); 1394 Addr vaddr M5_VAR_USED = pkt->req->getVaddr(); 1395 DPRINTF(GPUTLB, "CU%d: retrying I-translaton for address%#x", vaddr); 1396 1397 if (!sendTimingReq(pkt)) { 1398 stallPort(); // Stall port 1399 DPRINTF(GPUTLB, ": failed again\n"); 1400 break; 1401 } else { 1402 DPRINTF(GPUTLB, ": successful\n"); 1403 retries.pop_front(); 1404 } 1405 } 1406} 1407 1408void 1409ComputeUnit::regStats() 1410{ 1411 MemObject::regStats(); 1412 1413 tlbCycles 1414 .name(name() + ".tlb_cycles") 1415 .desc("total number of cycles for all uncoalesced requests") 1416 ; 1417 1418 tlbRequests 1419 .name(name() + ".tlb_requests") 1420 .desc("number of uncoalesced requests") 1421 ; 1422 1423 tlbLatency 1424 .name(name() + ".avg_translation_latency") 1425 .desc("Avg. translation latency for data translations") 1426 ; 1427 1428 tlbLatency = tlbCycles / tlbRequests; 1429 1430 hitsPerTLBLevel 1431 .init(4) 1432 .name(name() + ".TLB_hits_distribution") 1433 .desc("TLB hits distribution (0 for page table, x for Lx-TLB") 1434 ; 1435 1436 // fixed number of TLB levels 1437 for (int i = 0; i < 4; ++i) { 1438 if (!i) 1439 hitsPerTLBLevel.subname(i,"page_table"); 1440 else 1441 hitsPerTLBLevel.subname(i, csprintf("L%d_TLB",i)); 1442 } 1443 1444 execRateDist 1445 .init(0, 10, 2) 1446 .name(name() + ".inst_exec_rate") 1447 .desc("Instruction Execution Rate: Number of executed vector " 1448 "instructions per cycle") 1449 ; 1450 1451 ldsBankConflictDist 1452 .init(0, wfSize(), 2) 1453 .name(name() + ".lds_bank_conflicts") 1454 .desc("Number of bank conflicts per LDS memory packet") 1455 ; 1456 1457 ldsBankAccesses 1458 .name(name() + ".lds_bank_access_cnt") 1459 .desc("Total number of LDS bank accesses") 1460 ; 1461 1462 pageDivergenceDist 1463 // A wavefront can touch up to N pages per memory instruction where 1464 // N is equal to the wavefront size 1465 // The number of pages per bin can be configured (here it's 4). 1466 .init(1, wfSize(), 4) 1467 .name(name() + ".page_divergence_dist") 1468 .desc("pages touched per wf (over all mem. instr.)") 1469 ; 1470 1471 controlFlowDivergenceDist 1472 .init(1, wfSize(), 4) 1473 .name(name() + ".warp_execution_dist") 1474 .desc("number of lanes active per instruction (oval all instructions)") 1475 ; 1476 1477 activeLanesPerGMemInstrDist 1478 .init(1, wfSize(), 4) 1479 .name(name() + ".gmem_lanes_execution_dist") 1480 .desc("number of active lanes per global memory instruction") 1481 ; 1482 1483 activeLanesPerLMemInstrDist 1484 .init(1, wfSize(), 4) 1485 .name(name() + ".lmem_lanes_execution_dist") 1486 .desc("number of active lanes per local memory instruction") 1487 ; 1488 1489 numInstrExecuted 1490 .name(name() + ".num_instr_executed") 1491 .desc("number of instructions executed") 1492 ; 1493 1494 numVecOpsExecuted 1495 .name(name() + ".num_vec_ops_executed") 1496 .desc("number of vec ops executed (e.g. WF size/inst)") 1497 ; 1498 1499 totalCycles 1500 .name(name() + ".num_total_cycles") 1501 .desc("number of cycles the CU ran for") 1502 ; 1503 1504 ipc 1505 .name(name() + ".ipc") 1506 .desc("Instructions per cycle (this CU only)") 1507 ; 1508 1509 vpc 1510 .name(name() + ".vpc") 1511 .desc("Vector Operations per cycle (this CU only)") 1512 ; 1513 1514 numALUInstsExecuted 1515 .name(name() + ".num_alu_insts_executed") 1516 .desc("Number of dynamic non-GM memory insts executed") 1517 ; 1518 1519 wgBlockedDueLdsAllocation 1520 .name(name() + ".wg_blocked_due_lds_alloc") 1521 .desc("Workgroup blocked due to LDS capacity") 1522 ; 1523 1524 ipc = numInstrExecuted / totalCycles; 1525 vpc = numVecOpsExecuted / totalCycles; 1526 1527 numTimesWgBlockedDueVgprAlloc 1528 .name(name() + ".times_wg_blocked_due_vgpr_alloc") 1529 .desc("Number of times WGs are blocked due to VGPR allocation per SIMD") 1530 ; 1531 1532 dynamicGMemInstrCnt 1533 .name(name() + ".global_mem_instr_cnt") 1534 .desc("dynamic global memory instructions count") 1535 ; 1536 1537 dynamicLMemInstrCnt 1538 .name(name() + ".local_mem_instr_cnt") 1539 .desc("dynamic local memory intruction count") 1540 ; 1541 1542 numALUInstsExecuted = numInstrExecuted - dynamicGMemInstrCnt - 1543 dynamicLMemInstrCnt; 1544 1545 completedWfs 1546 .name(name() + ".num_completed_wfs") 1547 .desc("number of completed wavefronts") 1548 ; 1549 1550 numCASOps 1551 .name(name() + ".num_CAS_ops") 1552 .desc("number of compare and swap operations") 1553 ; 1554 1555 numFailedCASOps 1556 .name(name() + ".num_failed_CAS_ops") 1557 .desc("number of compare and swap operations that failed") 1558 ; 1559 1560 // register stats of pipeline stages 1561 fetchStage.regStats(); 1562 scoreboardCheckStage.regStats(); 1563 scheduleStage.regStats(); 1564 execStage.regStats(); 1565 1566 // register stats of memory pipeline 1567 globalMemoryPipe.regStats(); 1568 localMemoryPipe.regStats(); 1569} 1570 1571void 1572ComputeUnit::updatePageDivergenceDist(Addr addr) 1573{ 1574 Addr virt_page_addr = roundDown(addr, TheISA::PageBytes); 1575 1576 if (!pagesTouched.count(virt_page_addr)) 1577 pagesTouched[virt_page_addr] = 1; 1578 else 1579 pagesTouched[virt_page_addr]++; 1580} 1581 1582void 1583ComputeUnit::CUExitCallback::process() 1584{ 1585 if (computeUnit->countPages) { 1586 std::ostream *page_stat_file = 1587 simout.create(computeUnit->name().c_str())->stream(); 1588 1589 *page_stat_file << "page, wavefront accesses, workitem accesses" << 1590 std::endl; 1591 1592 for (auto iter : computeUnit->pageAccesses) { 1593 *page_stat_file << std::hex << iter.first << ","; 1594 *page_stat_file << std::dec << iter.second.first << ","; 1595 *page_stat_file << std::dec << iter.second.second << std::endl; 1596 } 1597 } 1598 } 1599 1600bool 1601ComputeUnit::isDone() const 1602{ 1603 for (int i = 0; i < numSIMDs; ++i) { 1604 if (!isSimdDone(i)) { 1605 return false; 1606 } 1607 } 1608 1609 bool glbMemBusRdy = true; 1610 for (int j = 0; j < numGlbMemUnits; ++j) { 1611 glbMemBusRdy &= vrfToGlobalMemPipeBus[j].rdy(); 1612 } 1613 bool locMemBusRdy = true; 1614 for (int j = 0; j < numLocMemUnits; ++j) { 1615 locMemBusRdy &= vrfToLocalMemPipeBus[j].rdy(); 1616 } 1617 1618 if (!globalMemoryPipe.isGMLdRespFIFOWrRdy() || 1619 !globalMemoryPipe.isGMStRespFIFOWrRdy() || 1620 !globalMemoryPipe.isGMReqFIFOWrRdy() || !localMemoryPipe.isLMReqFIFOWrRdy() 1621 || !localMemoryPipe.isLMRespFIFOWrRdy() || !locMemToVrfBus.rdy() || 1622 !glbMemToVrfBus.rdy() || !locMemBusRdy || !glbMemBusRdy) { 1623 return false; 1624 } 1625 1626 return true; 1627} 1628 1629int32_t 1630ComputeUnit::getRefCounter(const uint32_t dispatchId, const uint32_t wgId) const 1631{ 1632 return lds.getRefCounter(dispatchId, wgId); 1633} 1634 1635bool 1636ComputeUnit::isSimdDone(uint32_t simdId) const 1637{ 1638 assert(simdId < numSIMDs); 1639 1640 for (int i=0; i < numGlbMemUnits; ++i) { 1641 if (!vrfToGlobalMemPipeBus[i].rdy()) 1642 return false; 1643 } 1644 for (int i=0; i < numLocMemUnits; ++i) { 1645 if (!vrfToLocalMemPipeBus[i].rdy()) 1646 return false; 1647 } 1648 if (!aluPipe[simdId].rdy()) { 1649 return false; 1650 } 1651 1652 for (int i_wf = 0; i_wf < shader->n_wf; ++i_wf){ 1653 if (wfList[simdId][i_wf]->status != Wavefront::S_STOPPED) { 1654 return false; 1655 } 1656 } 1657 1658 return true; 1659} 1660 1661/** 1662 * send a general request to the LDS 1663 * make sure to look at the return value here as your request might be 1664 * NACK'd and returning false means that you have to have some backup plan 1665 */ 1666bool 1667ComputeUnit::sendToLds(GPUDynInstPtr gpuDynInst) 1668{ 1669 // this is just a request to carry the GPUDynInstPtr 1670 // back and forth 1671 Request *newRequest = new Request(); 1672 newRequest->setPaddr(0x0); 1673 1674 // ReadReq is not evaluted by the LDS but the Packet ctor requires this 1675 PacketPtr newPacket = new Packet(newRequest, MemCmd::ReadReq); 1676 1677 // This is the SenderState needed upon return 1678 newPacket->senderState = new LDSPort::SenderState(gpuDynInst); 1679 1680 return ldsPort->sendTimingReq(newPacket); 1681} 1682 1683/** 1684 * get the result of packets sent to the LDS when they return 1685 */ 1686bool 1687ComputeUnit::LDSPort::recvTimingResp(PacketPtr packet) 1688{ 1689 const ComputeUnit::LDSPort::SenderState *senderState = 1690 dynamic_cast<ComputeUnit::LDSPort::SenderState *>(packet->senderState); 1691 1692 fatal_if(!senderState, "did not get the right sort of sender state"); 1693 1694 GPUDynInstPtr gpuDynInst = senderState->getMemInst(); 1695 1696 delete packet->senderState; 1697 delete packet->req; 1698 delete packet; 1699 1700 computeUnit->localMemoryPipe.getLMRespFIFO().push(gpuDynInst); 1701 return true; 1702} 1703 1704/** 1705 * attempt to send this packet, either the port is already stalled, the request 1706 * is nack'd and must stall or the request goes through 1707 * when a request cannot be sent, add it to the retries queue 1708 */ 1709bool 1710ComputeUnit::LDSPort::sendTimingReq(PacketPtr pkt) 1711{ 1712 ComputeUnit::LDSPort::SenderState *sender_state = 1713 dynamic_cast<ComputeUnit::LDSPort::SenderState*>(pkt->senderState); 1714 fatal_if(!sender_state, "packet without a valid sender state"); 1715 1716 GPUDynInstPtr gpuDynInst M5_VAR_USED = sender_state->getMemInst(); 1717 1718 if (isStalled()) { 1719 fatal_if(retries.empty(), "must have retries waiting to be stalled"); 1720 1721 retries.push(pkt); 1722 1723 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: LDS send failed!\n", 1724 computeUnit->cu_id, gpuDynInst->simdId, 1725 gpuDynInst->wfSlotId); 1726 return false; 1727 } else if (!MasterPort::sendTimingReq(pkt)) { 1728 // need to stall the LDS port until a recvReqRetry() is received 1729 // this indicates that there is more space 1730 stallPort(); 1731 retries.push(pkt); 1732 1733 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req failed!\n", 1734 computeUnit->cu_id, gpuDynInst->simdId, 1735 gpuDynInst->wfSlotId, pkt->req->getPaddr()); 1736 return false; 1737 } else { 1738 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: addr %#x lds req sent!\n", 1739 computeUnit->cu_id, gpuDynInst->simdId, 1740 gpuDynInst->wfSlotId, pkt->req->getPaddr()); 1741 return true; 1742 } 1743} 1744 1745/** 1746 * the bus is telling the port that there is now space so retrying stalled 1747 * requests should work now 1748 * this allows the port to have a request be nack'd and then have the receiver 1749 * say when there is space, rather than simply retrying the send every cycle 1750 */ 1751void 1752ComputeUnit::LDSPort::recvReqRetry() 1753{ 1754 auto queueSize = retries.size(); 1755 1756 DPRINTF(GPUPort, "CU%d: LDSPort recvReqRetry - %d pending requests\n", 1757 computeUnit->cu_id, queueSize); 1758 1759 fatal_if(queueSize < 1, 1760 "why was there a recvReqRetry() with no pending reqs?"); 1761 fatal_if(!isStalled(), 1762 "recvReqRetry() happened when the port was not stalled"); 1763 1764 unstallPort(); 1765 1766 while (!retries.empty()) { 1767 PacketPtr packet = retries.front(); 1768 1769 DPRINTF(GPUPort, "CU%d: retrying LDS send\n", computeUnit->cu_id); 1770 1771 if (!MasterPort::sendTimingReq(packet)) { 1772 // Stall port 1773 stallPort(); 1774 DPRINTF(GPUPort, ": LDS send failed again\n"); 1775 break; 1776 } else { 1777 DPRINTF(GPUTLB, ": LDS send successful\n"); 1778 retries.pop(); 1779 } 1780 } 1781}
|