global_memory_pipeline.cc revision 11693:bc1f702c25b9
1/*
2 * Copyright (c) 2014-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: John Kalamatianos, Sooraj Puthoor
34 */
35
36#include "gpu-compute/global_memory_pipeline.hh"
37
38#include "debug/GPUMem.hh"
39#include "debug/GPUReg.hh"
40#include "gpu-compute/compute_unit.hh"
41#include "gpu-compute/gpu_dyn_inst.hh"
42#include "gpu-compute/shader.hh"
43#include "gpu-compute/vector_register_file.hh"
44#include "gpu-compute/wavefront.hh"
45
46GlobalMemPipeline::GlobalMemPipeline(const ComputeUnitParams* p) :
47    computeUnit(nullptr), gmQueueSize(p->global_mem_queue_size),
48    inflightStores(0), inflightLoads(0)
49{
50}
51
52void
53GlobalMemPipeline::init(ComputeUnit *cu)
54{
55    computeUnit = cu;
56    globalMemSize = computeUnit->shader->globalMemSize;
57    _name = computeUnit->name() + ".GlobalMemPipeline";
58}
59
60void
61GlobalMemPipeline::exec()
62{
63    // apply any returned global memory operations
64    GPUDynInstPtr m = !gmReturnedLoads.empty() ? gmReturnedLoads.front() :
65        !gmReturnedStores.empty() ? gmReturnedStores.front() : nullptr;
66
67    bool accessVrf = true;
68    Wavefront *w = nullptr;
69
70    // check the VRF to see if the operands of a load (or load component
71    // of an atomic) are accessible
72    if ((m) && (m->isLoad() || m->isAtomicRet())) {
73        w = m->wavefront();
74
75        accessVrf =
76            w->computeUnit->vrf[w->simdId]->
77            vrfOperandAccessReady(m->seqNum(), w, m,
78                                  VrfAccessType::WRITE);
79    }
80
81    if ((!gmReturnedStores.empty() || !gmReturnedLoads.empty()) &&
82        m->latency.rdy() && computeUnit->glbMemToVrfBus.rdy() &&
83        accessVrf && m->statusBitVector == VectorMask(0) &&
84        (computeUnit->shader->coissue_return ||
85         computeUnit->wfWait.at(m->pipeId).rdy())) {
86
87        w = m->wavefront();
88
89        m->completeAcc(m);
90
91        if (m->isLoad() || m->isAtomic()) {
92            gmReturnedLoads.pop();
93            assert(inflightLoads > 0);
94            --inflightLoads;
95        } else {
96            assert(m->isStore());
97            gmReturnedStores.pop();
98            assert(inflightStores > 0);
99            --inflightStores;
100        }
101
102        // Decrement outstanding register count
103        computeUnit->shader->ScheduleAdd(&w->outstandingReqs, m->time, -1);
104
105        if (m->isStore() || m->isAtomic()) {
106            computeUnit->shader->ScheduleAdd(&w->outstandingReqsWrGm,
107                                             m->time, -1);
108        }
109
110        if (m->isLoad() || m->isAtomic()) {
111            computeUnit->shader->ScheduleAdd(&w->outstandingReqsRdGm,
112                                             m->time, -1);
113        }
114
115        // Mark write bus busy for appropriate amount of time
116        computeUnit->glbMemToVrfBus.set(m->time);
117        if (!computeUnit->shader->coissue_return)
118            w->computeUnit->wfWait.at(m->pipeId).set(m->time);
119    }
120
121    // If pipeline has executed a global memory instruction
122    // execute global memory packets and issue global
123    // memory packets to DTLB
124    if (!gmIssuedRequests.empty()) {
125        GPUDynInstPtr mp = gmIssuedRequests.front();
126        if (mp->isLoad() || mp->isAtomic()) {
127            if (inflightLoads >= gmQueueSize) {
128                return;
129            } else {
130                ++inflightLoads;
131            }
132        } else {
133            if (inflightStores >= gmQueueSize) {
134                return;
135            } else if (mp->isStore()) {
136                ++inflightStores;
137            }
138        }
139
140        mp->initiateAcc(mp);
141        gmIssuedRequests.pop();
142
143        DPRINTF(GPUMem, "CU%d: WF[%d][%d] Popping 0 mem_op = \n",
144                computeUnit->cu_id, mp->simdId, mp->wfSlotId);
145    }
146}
147
148void
149GlobalMemPipeline::regStats()
150{
151    loadVrfBankConflictCycles
152        .name(name() + ".load_vrf_bank_conflict_cycles")
153        .desc("total number of cycles GM data are delayed before updating "
154              "the VRF")
155        ;
156}
157