111308Santhony.gutierrez@amd.com/*
211308Santhony.gutierrez@amd.com * Copyright (c) 2013-2015 Advanced Micro Devices, Inc.
311308Santhony.gutierrez@amd.com * All rights reserved.
411308Santhony.gutierrez@amd.com *
511308Santhony.gutierrez@amd.com * For use for simulation and test purposes only
611308Santhony.gutierrez@amd.com *
711308Santhony.gutierrez@amd.com * Redistribution and use in source and binary forms, with or without
811308Santhony.gutierrez@amd.com * modification, are permitted provided that the following conditions are met:
911308Santhony.gutierrez@amd.com *
1011308Santhony.gutierrez@amd.com * 1. Redistributions of source code must retain the above copyright notice,
1111308Santhony.gutierrez@amd.com * this list of conditions and the following disclaimer.
1211308Santhony.gutierrez@amd.com *
1311308Santhony.gutierrez@amd.com * 2. Redistributions in binary form must reproduce the above copyright notice,
1411308Santhony.gutierrez@amd.com * this list of conditions and the following disclaimer in the documentation
1511308Santhony.gutierrez@amd.com * and/or other materials provided with the distribution.
1611308Santhony.gutierrez@amd.com *
1712697Santhony.gutierrez@amd.com * 3. Neither the name of the copyright holder nor the names of its
1812697Santhony.gutierrez@amd.com * contributors may be used to endorse or promote products derived from this
1912697Santhony.gutierrez@amd.com * software without specific prior written permission.
2011308Santhony.gutierrez@amd.com *
2111308Santhony.gutierrez@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2211308Santhony.gutierrez@amd.com * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2311308Santhony.gutierrez@amd.com * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2411308Santhony.gutierrez@amd.com * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
2511308Santhony.gutierrez@amd.com * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2611308Santhony.gutierrez@amd.com * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2711308Santhony.gutierrez@amd.com * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2811308Santhony.gutierrez@amd.com * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2911308Santhony.gutierrez@amd.com * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3011308Santhony.gutierrez@amd.com * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3111308Santhony.gutierrez@amd.com * POSSIBILITY OF SUCH DAMAGE.
3211308Santhony.gutierrez@amd.com *
3312697Santhony.gutierrez@amd.com * Authors: Sooraj Puthoor
3411308Santhony.gutierrez@amd.com */
3511308Santhony.gutierrez@amd.com
3612334Sgabeblack@google.com#include "base/logging.hh"
3711308Santhony.gutierrez@amd.com#include "base/str.hh"
3811308Santhony.gutierrez@amd.com#include "config/the_isa.hh"
3911308Santhony.gutierrez@amd.com
4011308Santhony.gutierrez@amd.com#if THE_ISA == X86_ISA
4111308Santhony.gutierrez@amd.com#include "arch/x86/insts/microldstop.hh"
4211308Santhony.gutierrez@amd.com
4311308Santhony.gutierrez@amd.com#endif // X86_ISA
4411308Santhony.gutierrez@amd.com#include "mem/ruby/system/VIPERCoalescer.hh"
4511308Santhony.gutierrez@amd.com
4611308Santhony.gutierrez@amd.com#include "cpu/testers/rubytest/RubyTester.hh"
4711308Santhony.gutierrez@amd.com#include "debug/GPUCoalescer.hh"
4811308Santhony.gutierrez@amd.com#include "debug/MemoryAccess.hh"
4911308Santhony.gutierrez@amd.com#include "mem/packet.hh"
5011308Santhony.gutierrez@amd.com#include "mem/ruby/common/SubBlock.hh"
5111308Santhony.gutierrez@amd.com#include "mem/ruby/network/MessageBuffer.hh"
5211308Santhony.gutierrez@amd.com#include "mem/ruby/profiler/Profiler.hh"
5311308Santhony.gutierrez@amd.com#include "mem/ruby/slicc_interface/AbstractController.hh"
5411308Santhony.gutierrez@amd.com#include "mem/ruby/slicc_interface/RubyRequest.hh"
5511308Santhony.gutierrez@amd.com#include "mem/ruby/structures/CacheMemory.hh"
5611308Santhony.gutierrez@amd.com#include "mem/ruby/system/GPUCoalescer.hh"
5711308Santhony.gutierrez@amd.com#include "mem/ruby/system/RubySystem.hh"
5811308Santhony.gutierrez@amd.com#include "params/VIPERCoalescer.hh"
5911308Santhony.gutierrez@amd.com
6011308Santhony.gutierrez@amd.comusing namespace std;
6111308Santhony.gutierrez@amd.com
6211308Santhony.gutierrez@amd.comVIPERCoalescer *
6311308Santhony.gutierrez@amd.comVIPERCoalescerParams::create()
6411308Santhony.gutierrez@amd.com{
6511308Santhony.gutierrez@amd.com    return new VIPERCoalescer(this);
6611308Santhony.gutierrez@amd.com}
6711308Santhony.gutierrez@amd.com
6811308Santhony.gutierrez@amd.comVIPERCoalescer::VIPERCoalescer(const Params *p)
6911308Santhony.gutierrez@amd.com    : GPUCoalescer(p)
7011308Santhony.gutierrez@amd.com{
7111308Santhony.gutierrez@amd.com    m_max_wb_per_cycle=p->max_wb_per_cycle;
7211308Santhony.gutierrez@amd.com    m_max_inv_per_cycle=p->max_inv_per_cycle;
7311308Santhony.gutierrez@amd.com    m_outstanding_inv = 0;
7411308Santhony.gutierrez@amd.com    m_outstanding_wb = 0;
7511308Santhony.gutierrez@amd.com}
7611308Santhony.gutierrez@amd.com
7711308Santhony.gutierrez@amd.comVIPERCoalescer::~VIPERCoalescer()
7811308Santhony.gutierrez@amd.com{
7911308Santhony.gutierrez@amd.com}
8011308Santhony.gutierrez@amd.com
8111308Santhony.gutierrez@amd.com// Analyzes the packet to see if this request can be coalesced.
8211308Santhony.gutierrez@amd.com// If request can be coalesced, this request is added to the reqCoalescer table
8311308Santhony.gutierrez@amd.com// and makeRequest returns RequestStatus_Issued;
8411308Santhony.gutierrez@amd.com// If this is the first request to a cacheline, request is added to both
8511308Santhony.gutierrez@amd.com// newRequests queue and to the reqCoalescer table; makeRequest
8611308Santhony.gutierrez@amd.com// returns RequestStatus_Issued.
8711308Santhony.gutierrez@amd.com// If there is a pending request to this cacheline and this request
8811308Santhony.gutierrez@amd.com// can't be coalesced, RequestStatus_Aliased is returned and
8911308Santhony.gutierrez@amd.com// the packet needs to be reissued.
9011308Santhony.gutierrez@amd.comRequestStatus
9111308Santhony.gutierrez@amd.comVIPERCoalescer::makeRequest(PacketPtr pkt)
9211308Santhony.gutierrez@amd.com{
9311308Santhony.gutierrez@amd.com    if (m_outstanding_wb | m_outstanding_inv) {
9411308Santhony.gutierrez@amd.com        DPRINTF(GPUCoalescer,
9511308Santhony.gutierrez@amd.com                "There are %d Writebacks and %d Invalidatons\n",
9611308Santhony.gutierrez@amd.com                m_outstanding_wb, m_outstanding_inv);
9711308Santhony.gutierrez@amd.com    }
9811308Santhony.gutierrez@amd.com    // Are we in the middle of a release
9911308Santhony.gutierrez@amd.com    if ((m_outstanding_wb) > 0) {
10011308Santhony.gutierrez@amd.com        if (pkt->req->isKernel()) {
10111308Santhony.gutierrez@amd.com            // Everythign is fine
10211308Santhony.gutierrez@amd.com            // Barriers and Kernel End scan coalesce
10311308Santhony.gutierrez@amd.com            // If it is a Kerenl Begin flush the cache
10411308Santhony.gutierrez@amd.com            if (pkt->req->isAcquire() && (m_outstanding_inv == 0)) {
10511308Santhony.gutierrez@amd.com                invL1();
10611308Santhony.gutierrez@amd.com            }
10711308Santhony.gutierrez@amd.com
10811308Santhony.gutierrez@amd.com            if (pkt->req->isRelease()) {
10911308Santhony.gutierrez@amd.com                insertKernel(pkt->req->contextId(), pkt);
11011308Santhony.gutierrez@amd.com            }
11111308Santhony.gutierrez@amd.com
11211308Santhony.gutierrez@amd.com            return RequestStatus_Issued;
11311308Santhony.gutierrez@amd.com        }
11411308Santhony.gutierrez@amd.com//        return RequestStatus_Aliased;
11511308Santhony.gutierrez@amd.com    } else if (pkt->req->isKernel() && pkt->req->isRelease()) {
11611308Santhony.gutierrez@amd.com        // Flush Dirty Data on Kernel End
11711308Santhony.gutierrez@amd.com        // isKernel + isRelease
11811308Santhony.gutierrez@amd.com        insertKernel(pkt->req->contextId(), pkt);
11911308Santhony.gutierrez@amd.com        wbL1();
12011321Ssteve.reinhardt@amd.com        if (m_outstanding_wb == 0) {
12111308Santhony.gutierrez@amd.com            for (auto it =  kernelEndList.begin(); it != kernelEndList.end(); it++) {
12211308Santhony.gutierrez@amd.com                newKernelEnds.push_back(it->first);
12311308Santhony.gutierrez@amd.com            }
12411308Santhony.gutierrez@amd.com            completeIssue();
12511308Santhony.gutierrez@amd.com        }
12611308Santhony.gutierrez@amd.com        return RequestStatus_Issued;
12711308Santhony.gutierrez@amd.com    }
12811308Santhony.gutierrez@amd.com    RequestStatus requestStatus = GPUCoalescer::makeRequest(pkt);
12911308Santhony.gutierrez@amd.com    if (requestStatus!=RequestStatus_Issued) {
13011308Santhony.gutierrez@amd.com        // Request not isssued
13111308Santhony.gutierrez@amd.com        // enqueue Retry
13211308Santhony.gutierrez@amd.com        DPRINTF(GPUCoalescer, "Request not issued by GPUCoaleser\n");
13311308Santhony.gutierrez@amd.com        return requestStatus;
13411308Santhony.gutierrez@amd.com    } else if (pkt->req->isKernel() && pkt->req->isAcquire()) {
13511308Santhony.gutierrez@amd.com        // Invalidate clean Data on Kernel Begin
13611308Santhony.gutierrez@amd.com        // isKernel + isAcquire
13711308Santhony.gutierrez@amd.com        invL1();
13811308Santhony.gutierrez@amd.com    } else if (pkt->req->isAcquire() && pkt->req->isRelease()) {
13911308Santhony.gutierrez@amd.com        // Deschedule the AtomicAcqRel and
14011308Santhony.gutierrez@amd.com        // Flush and Invalidate the L1 cache
14111308Santhony.gutierrez@amd.com        invwbL1();
14211308Santhony.gutierrez@amd.com        if (m_outstanding_wb > 0 && issueEvent.scheduled()) {
14311308Santhony.gutierrez@amd.com            DPRINTF(GPUCoalescer, "issueEvent Descheduled\n");
14411308Santhony.gutierrez@amd.com            deschedule(issueEvent);
14511308Santhony.gutierrez@amd.com        }
14611308Santhony.gutierrez@amd.com    } else if (pkt->req->isRelease()) {
14711308Santhony.gutierrez@amd.com        // Deschedule the StoreRel and
14811308Santhony.gutierrez@amd.com        // Flush the L1 cache
14911308Santhony.gutierrez@amd.com        wbL1();
15011308Santhony.gutierrez@amd.com        if (m_outstanding_wb > 0 && issueEvent.scheduled()) {
15111308Santhony.gutierrez@amd.com            DPRINTF(GPUCoalescer, "issueEvent Descheduled\n");
15211308Santhony.gutierrez@amd.com            deschedule(issueEvent);
15311308Santhony.gutierrez@amd.com        }
15411308Santhony.gutierrez@amd.com    } else if (pkt->req->isAcquire()) {
15511308Santhony.gutierrez@amd.com        // LoadAcq or AtomicAcq
15611308Santhony.gutierrez@amd.com        // Invalidate the L1 cache
15711308Santhony.gutierrez@amd.com        invL1();
15811308Santhony.gutierrez@amd.com    }
15911308Santhony.gutierrez@amd.com    // Request was successful
16011308Santhony.gutierrez@amd.com    if (m_outstanding_wb == 0) {
16111308Santhony.gutierrez@amd.com        if (!issueEvent.scheduled()) {
16211308Santhony.gutierrez@amd.com            DPRINTF(GPUCoalescer, "issueEvent Rescheduled\n");
16311308Santhony.gutierrez@amd.com            schedule(issueEvent, curTick());
16411308Santhony.gutierrez@amd.com        }
16511308Santhony.gutierrez@amd.com    }
16611308Santhony.gutierrez@amd.com    return RequestStatus_Issued;
16711308Santhony.gutierrez@amd.com}
16811308Santhony.gutierrez@amd.com
16911308Santhony.gutierrez@amd.comvoid
17011308Santhony.gutierrez@amd.comVIPERCoalescer::wbCallback(Addr addr)
17111308Santhony.gutierrez@amd.com{
17211308Santhony.gutierrez@amd.com    m_outstanding_wb--;
17311308Santhony.gutierrez@amd.com    // if L1 Flush Complete
17411308Santhony.gutierrez@amd.com    // attemnpt to schedule issueEvent
17511308Santhony.gutierrez@amd.com    assert(((int) m_outstanding_wb) >= 0);
17611308Santhony.gutierrez@amd.com    if (m_outstanding_wb == 0) {
17711308Santhony.gutierrez@amd.com        for (auto it =  kernelEndList.begin(); it != kernelEndList.end(); it++) {
17811308Santhony.gutierrez@amd.com            newKernelEnds.push_back(it->first);
17911308Santhony.gutierrez@amd.com        }
18011308Santhony.gutierrez@amd.com        completeIssue();
18111308Santhony.gutierrez@amd.com    }
18211308Santhony.gutierrez@amd.com    trySendRetries();
18311308Santhony.gutierrez@amd.com}
18411308Santhony.gutierrez@amd.com
18511308Santhony.gutierrez@amd.comvoid
18611308Santhony.gutierrez@amd.comVIPERCoalescer::invCallback(Addr addr)
18711308Santhony.gutierrez@amd.com{
18811308Santhony.gutierrez@amd.com    m_outstanding_inv--;
18911308Santhony.gutierrez@amd.com    // if L1 Flush Complete
19011308Santhony.gutierrez@amd.com    // attemnpt to schedule issueEvent
19111308Santhony.gutierrez@amd.com    // This probably won't happen, since
19211308Santhony.gutierrez@amd.com    // we dont wait on cache invalidations
19311308Santhony.gutierrez@amd.com    if (m_outstanding_wb == 0) {
19411308Santhony.gutierrez@amd.com        for (auto it =  kernelEndList.begin(); it != kernelEndList.end(); it++) {
19511308Santhony.gutierrez@amd.com            newKernelEnds.push_back(it->first);
19611308Santhony.gutierrez@amd.com        }
19711308Santhony.gutierrez@amd.com        completeIssue();
19811308Santhony.gutierrez@amd.com    }
19911308Santhony.gutierrez@amd.com    trySendRetries();
20011308Santhony.gutierrez@amd.com}
20111308Santhony.gutierrez@amd.com
20211308Santhony.gutierrez@amd.com/**
20311308Santhony.gutierrez@amd.com  * Invalidate L1 cache (Acquire)
20411308Santhony.gutierrez@amd.com  */
20511308Santhony.gutierrez@amd.comvoid
20611308Santhony.gutierrez@amd.comVIPERCoalescer::invL1()
20711308Santhony.gutierrez@amd.com{
20811308Santhony.gutierrez@amd.com    int size = m_dataCache_ptr->getNumBlocks();
20911308Santhony.gutierrez@amd.com    DPRINTF(GPUCoalescer,
21011308Santhony.gutierrez@amd.com            "There are %d Invalidations outstanding before Cache Walk\n",
21111308Santhony.gutierrez@amd.com            m_outstanding_inv);
21211308Santhony.gutierrez@amd.com    // Walk the cache
21311308Santhony.gutierrez@amd.com    for (int i = 0; i < size; i++) {
21411308Santhony.gutierrez@amd.com        Addr addr = m_dataCache_ptr->getAddressAtIdx(i);
21511308Santhony.gutierrez@amd.com        // Evict Read-only data
21614166Spablo.prieto@unican.es        RubyRequestType request_type = RubyRequestType_REPLACEMENT;
21711308Santhony.gutierrez@amd.com        std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
21811308Santhony.gutierrez@amd.com            clockEdge(), addr, (uint8_t*) 0, 0, 0,
21914166Spablo.prieto@unican.es            request_type, RubyAccessMode_Supervisor,
22011308Santhony.gutierrez@amd.com            nullptr);
22111308Santhony.gutierrez@amd.com        assert(m_mandatory_q_ptr != NULL);
22214166Spablo.prieto@unican.es        Tick latency = cyclesToTicks(
22314166Spablo.prieto@unican.es                            m_controller->mandatoryQueueLatency(request_type));
22414166Spablo.prieto@unican.es        assert(latency > 0);
22514166Spablo.prieto@unican.es        m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
22611308Santhony.gutierrez@amd.com        m_outstanding_inv++;
22711308Santhony.gutierrez@amd.com    }
22811308Santhony.gutierrez@amd.com    DPRINTF(GPUCoalescer,
22911308Santhony.gutierrez@amd.com            "There are %d Invalidatons outstanding after Cache Walk\n",
23011308Santhony.gutierrez@amd.com            m_outstanding_inv);
23111308Santhony.gutierrez@amd.com}
23211308Santhony.gutierrez@amd.com
23311308Santhony.gutierrez@amd.com/**
23411308Santhony.gutierrez@amd.com  * Writeback L1 cache (Release)
23511308Santhony.gutierrez@amd.com  */
23611308Santhony.gutierrez@amd.comvoid
23711308Santhony.gutierrez@amd.comVIPERCoalescer::wbL1()
23811308Santhony.gutierrez@amd.com{
23911308Santhony.gutierrez@amd.com    int size = m_dataCache_ptr->getNumBlocks();
24011308Santhony.gutierrez@amd.com    DPRINTF(GPUCoalescer,
24111308Santhony.gutierrez@amd.com            "There are %d Writebacks outstanding before Cache Walk\n",
24211308Santhony.gutierrez@amd.com            m_outstanding_wb);
24311308Santhony.gutierrez@amd.com    // Walk the cache
24411308Santhony.gutierrez@amd.com    for (int i = 0; i < size; i++) {
24511308Santhony.gutierrez@amd.com        Addr addr = m_dataCache_ptr->getAddressAtIdx(i);
24611308Santhony.gutierrez@amd.com        // Write dirty data back
24714166Spablo.prieto@unican.es        RubyRequestType request_type = RubyRequestType_FLUSH;
24811308Santhony.gutierrez@amd.com        std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
24911308Santhony.gutierrez@amd.com            clockEdge(), addr, (uint8_t*) 0, 0, 0,
25014166Spablo.prieto@unican.es            request_type, RubyAccessMode_Supervisor,
25111308Santhony.gutierrez@amd.com            nullptr);
25211308Santhony.gutierrez@amd.com        assert(m_mandatory_q_ptr != NULL);
25314166Spablo.prieto@unican.es        Tick latency = cyclesToTicks(
25414166Spablo.prieto@unican.es                            m_controller->mandatoryQueueLatency(request_type));
25514166Spablo.prieto@unican.es        assert(latency > 0);
25614166Spablo.prieto@unican.es        m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
25711308Santhony.gutierrez@amd.com        m_outstanding_wb++;
25811308Santhony.gutierrez@amd.com    }
25911308Santhony.gutierrez@amd.com    DPRINTF(GPUCoalescer,
26011308Santhony.gutierrez@amd.com            "There are %d Writebacks outstanding after Cache Walk\n",
26111308Santhony.gutierrez@amd.com            m_outstanding_wb);
26211308Santhony.gutierrez@amd.com}
26311308Santhony.gutierrez@amd.com
26411308Santhony.gutierrez@amd.com/**
26511308Santhony.gutierrez@amd.com  * Invalidate and Writeback L1 cache (Acquire&Release)
26611308Santhony.gutierrez@amd.com  */
26711308Santhony.gutierrez@amd.comvoid
26811308Santhony.gutierrez@amd.comVIPERCoalescer::invwbL1()
26911308Santhony.gutierrez@amd.com{
27011308Santhony.gutierrez@amd.com    int size = m_dataCache_ptr->getNumBlocks();
27111308Santhony.gutierrez@amd.com    // Walk the cache
27211321Ssteve.reinhardt@amd.com    for (int i = 0; i < size; i++) {
27311308Santhony.gutierrez@amd.com        Addr addr = m_dataCache_ptr->getAddressAtIdx(i);
27411308Santhony.gutierrez@amd.com        // Evict Read-only data
27514166Spablo.prieto@unican.es        RubyRequestType request_type = RubyRequestType_REPLACEMENT;
27611308Santhony.gutierrez@amd.com        std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
27711308Santhony.gutierrez@amd.com            clockEdge(), addr, (uint8_t*) 0, 0, 0,
27814166Spablo.prieto@unican.es            request_type, RubyAccessMode_Supervisor,
27911308Santhony.gutierrez@amd.com            nullptr);
28011308Santhony.gutierrez@amd.com        assert(m_mandatory_q_ptr != NULL);
28114166Spablo.prieto@unican.es        Tick latency = cyclesToTicks(
28214166Spablo.prieto@unican.es                            m_controller->mandatoryQueueLatency(request_type));
28314166Spablo.prieto@unican.es        assert(latency > 0);
28414166Spablo.prieto@unican.es        m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
28511308Santhony.gutierrez@amd.com        m_outstanding_inv++;
28611308Santhony.gutierrez@amd.com    }
28711308Santhony.gutierrez@amd.com    // Walk the cache
28811321Ssteve.reinhardt@amd.com    for (int i = 0; i< size; i++) {
28911308Santhony.gutierrez@amd.com        Addr addr = m_dataCache_ptr->getAddressAtIdx(i);
29011308Santhony.gutierrez@amd.com        // Write dirty data back
29114166Spablo.prieto@unican.es        RubyRequestType request_type = RubyRequestType_FLUSH;
29211308Santhony.gutierrez@amd.com        std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
29311308Santhony.gutierrez@amd.com            clockEdge(), addr, (uint8_t*) 0, 0, 0,
29414166Spablo.prieto@unican.es            request_type, RubyAccessMode_Supervisor,
29511308Santhony.gutierrez@amd.com            nullptr);
29611308Santhony.gutierrez@amd.com        assert(m_mandatory_q_ptr != NULL);
29714166Spablo.prieto@unican.es        Tick latency = cyclesToTicks(
29814166Spablo.prieto@unican.es                m_controller->mandatoryQueueLatency(request_type));
29914166Spablo.prieto@unican.es        assert(latency > 0);
30014166Spablo.prieto@unican.es        m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
30111308Santhony.gutierrez@amd.com        m_outstanding_wb++;
30211308Santhony.gutierrez@amd.com    }
30311308Santhony.gutierrez@amd.com}
304