schedule_stage.cc revision 11308:7d8836fd043d
1/* 2 * Copyright (c) 2014-2015 Advanced Micro Devices, Inc. 3 * All rights reserved. 4 * 5 * For use for simulation and test purposes only 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the copyright holder nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Author: Sooraj Puthoor 34 */ 35 36#include "gpu-compute/schedule_stage.hh" 37 38#include "gpu-compute/compute_unit.hh" 39#include "gpu-compute/gpu_static_inst.hh" 40#include "gpu-compute/vector_register_file.hh" 41#include "gpu-compute/wavefront.hh" 42 43ScheduleStage::ScheduleStage(const ComputeUnitParams *p) 44 : numSIMDs(p->num_SIMDs), 45 numMemUnits(p->num_global_mem_pipes + p->num_shared_mem_pipes) 46{ 47 for (int j = 0; j < numSIMDs + numMemUnits; ++j) { 48 Scheduler newScheduler(p); 49 scheduler.push_back(newScheduler); 50 } 51} 52 53ScheduleStage::~ScheduleStage() 54{ 55 scheduler.clear(); 56 waveStatusList.clear(); 57} 58 59void 60ScheduleStage::init(ComputeUnit *cu) 61{ 62 computeUnit = cu; 63 _name = computeUnit->name() + ".ScheduleStage"; 64 65 for (int j = 0; j < numSIMDs + numMemUnits; ++j) { 66 scheduler[j].bindList(&computeUnit->readyList[j]); 67 } 68 69 for (int j = 0; j < numSIMDs; ++j) { 70 waveStatusList.push_back(&computeUnit->waveStatusList[j]); 71 } 72 73 dispatchList = &computeUnit->dispatchList; 74} 75 76void 77ScheduleStage::arbitrate() 78{ 79 // iterate over all Memory pipelines 80 for (int j = numSIMDs; j < numSIMDs + numMemUnits; ++j) { 81 if (dispatchList->at(j).first) { 82 Wavefront *waveToMemPipe = dispatchList->at(j).first; 83 // iterate over all execution pipelines 84 for (int i = 0; i < numSIMDs + numMemUnits; ++i) { 85 if ((i != j) && (dispatchList->at(i).first)) { 86 Wavefront *waveToExePipe = dispatchList->at(i).first; 87 // if the two selected wavefronts are mapped to the same 88 // SIMD unit then they share the VRF 89 if (waveToMemPipe->simdId == waveToExePipe->simdId) { 90 int simdId = waveToMemPipe->simdId; 91 // Read VRF port arbitration: 92 // If there are read VRF port conflicts between the 93 // a memory and another instruction we drop the other 94 // instruction. We don't need to check for write VRF 95 // port conflicts because the memory instruction either 96 // does not need to write to the VRF (store) or will 97 // write to the VRF when the data comes back (load) in 98 // which case the arbiter of the memory pipes will 99 // resolve any conflicts 100 if (computeUnit->vrf[simdId]-> 101 isReadConflict(waveToMemPipe->wfSlotId, 102 waveToExePipe->wfSlotId)) { 103 // FIXME: The "second" member variable is never 104 // used in the model. I am setting it to READY 105 // simply to follow the protocol of setting it 106 // when the WF has an instruction ready to issue 107 waveStatusList[simdId]->at(waveToExePipe->wfSlotId) 108 .second = READY; 109 110 dispatchList->at(i).first = nullptr; 111 dispatchList->at(i).second = EMPTY; 112 break; 113 } 114 } 115 } 116 } 117 } 118 } 119} 120 121void 122ScheduleStage::exec() 123{ 124 for (int j = 0; j < numSIMDs + numMemUnits; ++j) { 125 uint32_t readyListSize = computeUnit->readyList[j].size(); 126 127 // If no wave is ready to be scheduled on the execution resource 128 // then skip scheduling for this execution resource 129 if (!readyListSize) { 130 continue; 131 } 132 133 Wavefront *waveToBeDispatched = scheduler[j].chooseWave(); 134 dispatchList->at(j).first = waveToBeDispatched; 135 waveToBeDispatched->updateResources(); 136 dispatchList->at(j).second = FILLED; 137 138 waveStatusList[waveToBeDispatched->simdId]->at( 139 waveToBeDispatched->wfSlotId).second = BLOCKED; 140 141 assert(computeUnit->readyList[j].size() == readyListSize - 1); 142 } 143 // arbitrate over all shared resources among instructions being issued 144 // simultaneously 145 arbitrate(); 146} 147 148void 149ScheduleStage::regStats() 150{ 151} 152