111308Santhony.gutierrez@amd.com/*
211308Santhony.gutierrez@amd.com * Copyright (c) 2013-2015 Advanced Micro Devices, Inc.
311308Santhony.gutierrez@amd.com * All rights reserved.
411308Santhony.gutierrez@amd.com *
511308Santhony.gutierrez@amd.com * For use for simulation and test purposes only
611308Santhony.gutierrez@amd.com *
711308Santhony.gutierrez@amd.com * Redistribution and use in source and binary forms, with or without
811308Santhony.gutierrez@amd.com * modification, are permitted provided that the following conditions are met:
911308Santhony.gutierrez@amd.com *
1011308Santhony.gutierrez@amd.com * 1. Redistributions of source code must retain the above copyright notice,
1111308Santhony.gutierrez@amd.com * this list of conditions and the following disclaimer.
1211308Santhony.gutierrez@amd.com *
1311308Santhony.gutierrez@amd.com * 2. Redistributions in binary form must reproduce the above copyright notice,
1411308Santhony.gutierrez@amd.com * this list of conditions and the following disclaimer in the documentation
1511308Santhony.gutierrez@amd.com * and/or other materials provided with the distribution.
1611308Santhony.gutierrez@amd.com *
1712697Santhony.gutierrez@amd.com * 3. Neither the name of the copyright holder nor the names of its
1812697Santhony.gutierrez@amd.com * contributors may be used to endorse or promote products derived from this
1912697Santhony.gutierrez@amd.com * software without specific prior written permission.
2011308Santhony.gutierrez@amd.com *
2111308Santhony.gutierrez@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2211308Santhony.gutierrez@amd.com * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2311308Santhony.gutierrez@amd.com * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2411308Santhony.gutierrez@amd.com * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
2511308Santhony.gutierrez@amd.com * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2611308Santhony.gutierrez@amd.com * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2711308Santhony.gutierrez@amd.com * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2811308Santhony.gutierrez@amd.com * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2911308Santhony.gutierrez@amd.com * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3011308Santhony.gutierrez@amd.com * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3111308Santhony.gutierrez@amd.com * POSSIBILITY OF SUCH DAMAGE.
3211308Santhony.gutierrez@amd.com *
3312697Santhony.gutierrez@amd.com * Authors: Sooraj Puthoor
3411308Santhony.gutierrez@amd.com */
3511308Santhony.gutierrez@amd.com
3612334Sgabeblack@google.com#include "base/logging.hh"
3711308Santhony.gutierrez@amd.com#include "base/str.hh"
3811308Santhony.gutierrez@amd.com#include "config/the_isa.hh"
3911308Santhony.gutierrez@amd.com
4011308Santhony.gutierrez@amd.com#if THE_ISA == X86_ISA
4111308Santhony.gutierrez@amd.com#include "arch/x86/insts/microldstop.hh"
4211308Santhony.gutierrez@amd.com
4311308Santhony.gutierrez@amd.com#endif // X86_ISA
4411308Santhony.gutierrez@amd.com#include "mem/ruby/system/GPUCoalescer.hh"
4511308Santhony.gutierrez@amd.com
4611308Santhony.gutierrez@amd.com#include "cpu/testers/rubytest/RubyTester.hh"
4711308Santhony.gutierrez@amd.com#include "debug/GPUCoalescer.hh"
4811308Santhony.gutierrez@amd.com#include "debug/MemoryAccess.hh"
4911308Santhony.gutierrez@amd.com#include "debug/ProtocolTrace.hh"
5011308Santhony.gutierrez@amd.com#include "debug/RubyPort.hh"
5111308Santhony.gutierrez@amd.com#include "debug/RubyStats.hh"
5211308Santhony.gutierrez@amd.com#include "gpu-compute/shader.hh"
5311308Santhony.gutierrez@amd.com#include "mem/packet.hh"
5411308Santhony.gutierrez@amd.com#include "mem/ruby/common/DataBlock.hh"
5511308Santhony.gutierrez@amd.com#include "mem/ruby/common/SubBlock.hh"
5611308Santhony.gutierrez@amd.com#include "mem/ruby/network/MessageBuffer.hh"
5711308Santhony.gutierrez@amd.com#include "mem/ruby/profiler/Profiler.hh"
5811308Santhony.gutierrez@amd.com#include "mem/ruby/slicc_interface/AbstractController.hh"
5911308Santhony.gutierrez@amd.com#include "mem/ruby/slicc_interface/RubyRequest.hh"
6011308Santhony.gutierrez@amd.com#include "mem/ruby/structures/CacheMemory.hh"
6111308Santhony.gutierrez@amd.com#include "mem/ruby/system/RubySystem.hh"
6211308Santhony.gutierrez@amd.com#include "params/RubyGPUCoalescer.hh"
6311308Santhony.gutierrez@amd.com
6411308Santhony.gutierrez@amd.comusing namespace std;
6511308Santhony.gutierrez@amd.com
6611308Santhony.gutierrez@amd.comGPUCoalescer *
6711308Santhony.gutierrez@amd.comRubyGPUCoalescerParams::create()
6811308Santhony.gutierrez@amd.com{
6911308Santhony.gutierrez@amd.com    return new GPUCoalescer(this);
7011308Santhony.gutierrez@amd.com}
7111308Santhony.gutierrez@amd.com
7211308Santhony.gutierrez@amd.comHSAScope
7312749Sgiacomo.travaglini@arm.comreqScopeToHSAScope(const RequestPtr &req)
7411308Santhony.gutierrez@amd.com{
7511308Santhony.gutierrez@amd.com    HSAScope accessScope = HSAScope_UNSPECIFIED;
7611308Santhony.gutierrez@amd.com    if (req->isScoped()) {
7711308Santhony.gutierrez@amd.com        if (req->isWavefrontScope()) {
7811308Santhony.gutierrez@amd.com            accessScope = HSAScope_WAVEFRONT;
7911308Santhony.gutierrez@amd.com        } else if (req->isWorkgroupScope()) {
8011308Santhony.gutierrez@amd.com            accessScope = HSAScope_WORKGROUP;
8111308Santhony.gutierrez@amd.com        } else if (req->isDeviceScope()) {
8211308Santhony.gutierrez@amd.com            accessScope = HSAScope_DEVICE;
8311308Santhony.gutierrez@amd.com        } else if (req->isSystemScope()) {
8411308Santhony.gutierrez@amd.com            accessScope = HSAScope_SYSTEM;
8511308Santhony.gutierrez@amd.com        } else {
8611308Santhony.gutierrez@amd.com            fatal("Bad scope type");
8711308Santhony.gutierrez@amd.com        }
8811308Santhony.gutierrez@amd.com    }
8911308Santhony.gutierrez@amd.com    return accessScope;
9011308Santhony.gutierrez@amd.com}
9111308Santhony.gutierrez@amd.com
9211308Santhony.gutierrez@amd.comHSASegment
9312749Sgiacomo.travaglini@arm.comreqSegmentToHSASegment(const RequestPtr &req)
9411308Santhony.gutierrez@amd.com{
9511308Santhony.gutierrez@amd.com    HSASegment accessSegment = HSASegment_GLOBAL;
9611308Santhony.gutierrez@amd.com
9711308Santhony.gutierrez@amd.com    if (req->isGlobalSegment()) {
9811308Santhony.gutierrez@amd.com        accessSegment = HSASegment_GLOBAL;
9911308Santhony.gutierrez@amd.com    } else if (req->isGroupSegment()) {
10011308Santhony.gutierrez@amd.com        accessSegment = HSASegment_GROUP;
10111308Santhony.gutierrez@amd.com    } else if (req->isPrivateSegment()) {
10211308Santhony.gutierrez@amd.com        accessSegment = HSASegment_PRIVATE;
10311308Santhony.gutierrez@amd.com    } else if (req->isKernargSegment()) {
10411308Santhony.gutierrez@amd.com        accessSegment = HSASegment_KERNARG;
10511308Santhony.gutierrez@amd.com    } else if (req->isReadonlySegment()) {
10611308Santhony.gutierrez@amd.com        accessSegment = HSASegment_READONLY;
10711308Santhony.gutierrez@amd.com    } else if (req->isSpillSegment()) {
10811308Santhony.gutierrez@amd.com        accessSegment = HSASegment_SPILL;
10911308Santhony.gutierrez@amd.com    } else if (req->isArgSegment()) {
11011308Santhony.gutierrez@amd.com        accessSegment = HSASegment_ARG;
11111308Santhony.gutierrez@amd.com    } else {
11211308Santhony.gutierrez@amd.com        fatal("Bad segment type");
11311308Santhony.gutierrez@amd.com    }
11411308Santhony.gutierrez@amd.com
11511308Santhony.gutierrez@amd.com    return accessSegment;
11611308Santhony.gutierrez@amd.com}
11711308Santhony.gutierrez@amd.com
11811308Santhony.gutierrez@amd.comGPUCoalescer::GPUCoalescer(const Params *p)
11912133Sspwilson2@wisc.edu    : RubyPort(p),
12012133Sspwilson2@wisc.edu      issueEvent([this]{ completeIssue(); }, "Issue coalesced request",
12112133Sspwilson2@wisc.edu                 false, Event::Progress_Event_Pri),
12212133Sspwilson2@wisc.edu      deadlockCheckEvent([this]{ wakeup(); }, "GPUCoalescer deadlock check")
12311308Santhony.gutierrez@amd.com{
12411308Santhony.gutierrez@amd.com    m_store_waiting_on_load_cycles = 0;
12511308Santhony.gutierrez@amd.com    m_store_waiting_on_store_cycles = 0;
12611308Santhony.gutierrez@amd.com    m_load_waiting_on_store_cycles = 0;
12711308Santhony.gutierrez@amd.com    m_load_waiting_on_load_cycles = 0;
12811308Santhony.gutierrez@amd.com
12911308Santhony.gutierrez@amd.com    m_outstanding_count = 0;
13011308Santhony.gutierrez@amd.com
13111308Santhony.gutierrez@amd.com    m_max_outstanding_requests = 0;
13211308Santhony.gutierrez@amd.com    m_deadlock_threshold = 0;
13311308Santhony.gutierrez@amd.com    m_instCache_ptr = nullptr;
13411308Santhony.gutierrez@amd.com    m_dataCache_ptr = nullptr;
13511308Santhony.gutierrez@amd.com
13611308Santhony.gutierrez@amd.com    m_instCache_ptr = p->icache;
13711308Santhony.gutierrez@amd.com    m_dataCache_ptr = p->dcache;
13811308Santhony.gutierrez@amd.com    m_max_outstanding_requests = p->max_outstanding_requests;
13911308Santhony.gutierrez@amd.com    m_deadlock_threshold = p->deadlock_threshold;
14011308Santhony.gutierrez@amd.com
14111308Santhony.gutierrez@amd.com    assert(m_max_outstanding_requests > 0);
14211308Santhony.gutierrez@amd.com    assert(m_deadlock_threshold > 0);
14311308Santhony.gutierrez@amd.com    assert(m_instCache_ptr);
14411308Santhony.gutierrez@amd.com    assert(m_dataCache_ptr);
14511308Santhony.gutierrez@amd.com
14611660Stushar@ece.gatech.edu    m_runningGarnetStandalone = p->garnet_standalone;
14711308Santhony.gutierrez@amd.com    assumingRfOCoherence = p->assume_rfo;
14811308Santhony.gutierrez@amd.com}
14911308Santhony.gutierrez@amd.com
15011308Santhony.gutierrez@amd.comGPUCoalescer::~GPUCoalescer()
15111308Santhony.gutierrez@amd.com{
15211308Santhony.gutierrez@amd.com}
15311308Santhony.gutierrez@amd.com
15411308Santhony.gutierrez@amd.comvoid
15511308Santhony.gutierrez@amd.comGPUCoalescer::wakeup()
15611308Santhony.gutierrez@amd.com{
15711308Santhony.gutierrez@amd.com    // Check for deadlock of any of the requests
15811308Santhony.gutierrez@amd.com    Cycles current_time = curCycle();
15911308Santhony.gutierrez@amd.com
16011308Santhony.gutierrez@amd.com    // Check across all outstanding requests
16111308Santhony.gutierrez@amd.com    int total_outstanding = 0;
16211308Santhony.gutierrez@amd.com
16311308Santhony.gutierrez@amd.com    RequestTable::iterator read = m_readRequestTable.begin();
16411308Santhony.gutierrez@amd.com    RequestTable::iterator read_end = m_readRequestTable.end();
16511308Santhony.gutierrez@amd.com    for (; read != read_end; ++read) {
16611308Santhony.gutierrez@amd.com        GPUCoalescerRequest* request = read->second;
16711308Santhony.gutierrez@amd.com        if (current_time - request->issue_time < m_deadlock_threshold)
16811308Santhony.gutierrez@amd.com            continue;
16911308Santhony.gutierrez@amd.com
17011308Santhony.gutierrez@amd.com        panic("Possible Deadlock detected. Aborting!\n"
17111308Santhony.gutierrez@amd.com             "version: %d request.paddr: 0x%x m_readRequestTable: %d "
17211308Santhony.gutierrez@amd.com             "current time: %u issue_time: %d difference: %d\n", m_version,
17311308Santhony.gutierrez@amd.com              request->pkt->getAddr(), m_readRequestTable.size(),
17411308Santhony.gutierrez@amd.com              current_time * clockPeriod(), request->issue_time * clockPeriod(),
17511308Santhony.gutierrez@amd.com              (current_time - request->issue_time)*clockPeriod());
17611308Santhony.gutierrez@amd.com    }
17711308Santhony.gutierrez@amd.com
17811308Santhony.gutierrez@amd.com    RequestTable::iterator write = m_writeRequestTable.begin();
17911308Santhony.gutierrez@amd.com    RequestTable::iterator write_end = m_writeRequestTable.end();
18011308Santhony.gutierrez@amd.com    for (; write != write_end; ++write) {
18111308Santhony.gutierrez@amd.com        GPUCoalescerRequest* request = write->second;
18211308Santhony.gutierrez@amd.com        if (current_time - request->issue_time < m_deadlock_threshold)
18311308Santhony.gutierrez@amd.com            continue;
18411308Santhony.gutierrez@amd.com
18511308Santhony.gutierrez@amd.com        panic("Possible Deadlock detected. Aborting!\n"
18611308Santhony.gutierrez@amd.com             "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
18711308Santhony.gutierrez@amd.com             "current time: %u issue_time: %d difference: %d\n", m_version,
18811308Santhony.gutierrez@amd.com              request->pkt->getAddr(), m_writeRequestTable.size(),
18911308Santhony.gutierrez@amd.com              current_time * clockPeriod(), request->issue_time * clockPeriod(),
19011308Santhony.gutierrez@amd.com              (current_time - request->issue_time) * clockPeriod());
19111308Santhony.gutierrez@amd.com    }
19211308Santhony.gutierrez@amd.com
19311308Santhony.gutierrez@amd.com    total_outstanding += m_writeRequestTable.size();
19411308Santhony.gutierrez@amd.com    total_outstanding += m_readRequestTable.size();
19511308Santhony.gutierrez@amd.com
19611308Santhony.gutierrez@amd.com    assert(m_outstanding_count == total_outstanding);
19711308Santhony.gutierrez@amd.com
19811308Santhony.gutierrez@amd.com    if (m_outstanding_count > 0) {
19911308Santhony.gutierrez@amd.com        // If there are still outstanding requests, keep checking
20011308Santhony.gutierrez@amd.com        schedule(deadlockCheckEvent,
20111308Santhony.gutierrez@amd.com                 m_deadlock_threshold * clockPeriod() +
20211308Santhony.gutierrez@amd.com                 curTick());
20311308Santhony.gutierrez@amd.com    }
20411308Santhony.gutierrez@amd.com}
20511308Santhony.gutierrez@amd.com
20611308Santhony.gutierrez@amd.comvoid
20711308Santhony.gutierrez@amd.comGPUCoalescer::resetStats()
20811308Santhony.gutierrez@amd.com{
20911308Santhony.gutierrez@amd.com    m_latencyHist.reset();
21011308Santhony.gutierrez@amd.com    m_missLatencyHist.reset();
21111308Santhony.gutierrez@amd.com    for (int i = 0; i < RubyRequestType_NUM; i++) {
21211308Santhony.gutierrez@amd.com        m_typeLatencyHist[i]->reset();
21311308Santhony.gutierrez@amd.com        m_missTypeLatencyHist[i]->reset();
21411308Santhony.gutierrez@amd.com        for (int j = 0; j < MachineType_NUM; j++) {
21511308Santhony.gutierrez@amd.com            m_missTypeMachLatencyHist[i][j]->reset();
21611308Santhony.gutierrez@amd.com        }
21711308Santhony.gutierrez@amd.com    }
21811308Santhony.gutierrez@amd.com
21911308Santhony.gutierrez@amd.com    for (int i = 0; i < MachineType_NUM; i++) {
22011308Santhony.gutierrez@amd.com        m_missMachLatencyHist[i]->reset();
22111308Santhony.gutierrez@amd.com
22211308Santhony.gutierrez@amd.com        m_IssueToInitialDelayHist[i]->reset();
22311308Santhony.gutierrez@amd.com        m_InitialToForwardDelayHist[i]->reset();
22411308Santhony.gutierrez@amd.com        m_ForwardToFirstResponseDelayHist[i]->reset();
22511308Santhony.gutierrez@amd.com        m_FirstResponseToCompletionDelayHist[i]->reset();
22611308Santhony.gutierrez@amd.com    }
22711308Santhony.gutierrez@amd.com}
22811308Santhony.gutierrez@amd.com
22911308Santhony.gutierrez@amd.comvoid
23011308Santhony.gutierrez@amd.comGPUCoalescer::printProgress(ostream& out) const
23111308Santhony.gutierrez@amd.com{
23211308Santhony.gutierrez@amd.com}
23311308Santhony.gutierrez@amd.com
23411308Santhony.gutierrez@amd.comRequestStatus
23511308Santhony.gutierrez@amd.comGPUCoalescer::getRequestStatus(PacketPtr pkt, RubyRequestType request_type)
23611308Santhony.gutierrez@amd.com{
23711308Santhony.gutierrez@amd.com    Addr line_addr = makeLineAddress(pkt->getAddr());
23811308Santhony.gutierrez@amd.com
23911308Santhony.gutierrez@amd.com    if (!m_mandatory_q_ptr->areNSlotsAvailable(1, clockEdge())) {
24011308Santhony.gutierrez@amd.com        return RequestStatus_BufferFull;
24111308Santhony.gutierrez@amd.com    }
24211308Santhony.gutierrez@amd.com
24311321Ssteve.reinhardt@amd.com    if (m_controller->isBlocked(line_addr) &&
24411308Santhony.gutierrez@amd.com       request_type != RubyRequestType_Locked_RMW_Write) {
24511308Santhony.gutierrez@amd.com        return RequestStatus_Aliased;
24611308Santhony.gutierrez@amd.com    }
24711308Santhony.gutierrez@amd.com
24811308Santhony.gutierrez@amd.com    if ((request_type == RubyRequestType_ST) ||
24911308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_ATOMIC) ||
25011308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_ATOMIC_RETURN) ||
25111308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_ATOMIC_NO_RETURN) ||
25211308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_RMW_Read) ||
25311308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_RMW_Write) ||
25411308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_Load_Linked) ||
25511308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_Store_Conditional) ||
25611308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_Locked_RMW_Read) ||
25711308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_Locked_RMW_Write) ||
25811308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_FLUSH)) {
25911308Santhony.gutierrez@amd.com
26011308Santhony.gutierrez@amd.com        // Check if there is any outstanding read request for the same
26111308Santhony.gutierrez@amd.com        // cache line.
26211308Santhony.gutierrez@amd.com        if (m_readRequestTable.count(line_addr) > 0) {
26311308Santhony.gutierrez@amd.com            m_store_waiting_on_load_cycles++;
26411308Santhony.gutierrez@amd.com            return RequestStatus_Aliased;
26511308Santhony.gutierrez@amd.com        }
26611308Santhony.gutierrez@amd.com
26711308Santhony.gutierrez@amd.com        if (m_writeRequestTable.count(line_addr) > 0) {
26811308Santhony.gutierrez@amd.com          // There is an outstanding write request for the cache line
26911308Santhony.gutierrez@amd.com          m_store_waiting_on_store_cycles++;
27011308Santhony.gutierrez@amd.com          return RequestStatus_Aliased;
27111308Santhony.gutierrez@amd.com        }
27211308Santhony.gutierrez@amd.com    } else {
27311308Santhony.gutierrez@amd.com        // Check if there is any outstanding write request for the same
27411308Santhony.gutierrez@amd.com        // cache line.
27511308Santhony.gutierrez@amd.com        if (m_writeRequestTable.count(line_addr) > 0) {
27611308Santhony.gutierrez@amd.com            m_load_waiting_on_store_cycles++;
27711308Santhony.gutierrez@amd.com            return RequestStatus_Aliased;
27811308Santhony.gutierrez@amd.com        }
27911308Santhony.gutierrez@amd.com
28011308Santhony.gutierrez@amd.com        if (m_readRequestTable.count(line_addr) > 0) {
28111308Santhony.gutierrez@amd.com            // There is an outstanding read request for the cache line
28211308Santhony.gutierrez@amd.com            m_load_waiting_on_load_cycles++;
28311308Santhony.gutierrez@amd.com            return RequestStatus_Aliased;
28411308Santhony.gutierrez@amd.com        }
28511308Santhony.gutierrez@amd.com    }
28611308Santhony.gutierrez@amd.com
28711308Santhony.gutierrez@amd.com    return RequestStatus_Ready;
28811308Santhony.gutierrez@amd.com
28911308Santhony.gutierrez@amd.com}
29011308Santhony.gutierrez@amd.com
29111308Santhony.gutierrez@amd.com
29211308Santhony.gutierrez@amd.com
29311308Santhony.gutierrez@amd.com// sets the kernelEndList
29411308Santhony.gutierrez@amd.comvoid
29511308Santhony.gutierrez@amd.comGPUCoalescer::insertKernel(int wavefront_id, PacketPtr pkt)
29611308Santhony.gutierrez@amd.com{
29711308Santhony.gutierrez@amd.com    // Don't know if this will happen or is possible
29811308Santhony.gutierrez@amd.com    // but I just want to be careful and not have it become
29911308Santhony.gutierrez@amd.com    // simulator hang in the future
30011308Santhony.gutierrez@amd.com    DPRINTF(GPUCoalescer, "inserting wf: %d to kernelEndlist\n", wavefront_id);
30111308Santhony.gutierrez@amd.com    assert(kernelEndList.count(wavefront_id) == 0);
30211308Santhony.gutierrez@amd.com
30311308Santhony.gutierrez@amd.com    kernelEndList[wavefront_id] = pkt;
30411308Santhony.gutierrez@amd.com    DPRINTF(GPUCoalescer, "kernelEndList->size() = %d\n",
30511308Santhony.gutierrez@amd.com            kernelEndList.size());
30611308Santhony.gutierrez@amd.com}
30711308Santhony.gutierrez@amd.com
30811308Santhony.gutierrez@amd.com
30911308Santhony.gutierrez@amd.com// Insert the request on the correct request table.  Return true if
31011308Santhony.gutierrez@amd.com// the entry was already present.
31111308Santhony.gutierrez@amd.combool
31211308Santhony.gutierrez@amd.comGPUCoalescer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
31311308Santhony.gutierrez@amd.com{
31411308Santhony.gutierrez@amd.com    assert(getRequestStatus(pkt, request_type) == RequestStatus_Ready ||
31511308Santhony.gutierrez@amd.com           pkt->req->isLockedRMW() ||
31611308Santhony.gutierrez@amd.com           !m_mandatory_q_ptr->areNSlotsAvailable(1, clockEdge()));
31711308Santhony.gutierrez@amd.com
31811308Santhony.gutierrez@amd.com    int total_outstanding M5_VAR_USED =
31911308Santhony.gutierrez@amd.com        m_writeRequestTable.size() + m_readRequestTable.size();
32011308Santhony.gutierrez@amd.com
32111308Santhony.gutierrez@amd.com    assert(m_outstanding_count == total_outstanding);
32211308Santhony.gutierrez@amd.com
32311308Santhony.gutierrez@amd.com    // See if we should schedule a deadlock check
32411325Ssteve.reinhardt@amd.com    if (!deadlockCheckEvent.scheduled()) {
32511308Santhony.gutierrez@amd.com        schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
32611308Santhony.gutierrez@amd.com    }
32711308Santhony.gutierrez@amd.com
32811308Santhony.gutierrez@amd.com    Addr line_addr = makeLineAddress(pkt->getAddr());
32911308Santhony.gutierrez@amd.com    if ((request_type == RubyRequestType_ST) ||
33011308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_ATOMIC) ||
33111308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_ATOMIC_RETURN) ||
33211308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_ATOMIC_NO_RETURN) ||
33311308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_RMW_Read) ||
33411308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_RMW_Write) ||
33511308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_Load_Linked) ||
33611308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_Store_Conditional) ||
33711308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_Locked_RMW_Read) ||
33811308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_Locked_RMW_Write) ||
33911308Santhony.gutierrez@amd.com        (request_type == RubyRequestType_FLUSH)) {
34011308Santhony.gutierrez@amd.com
34111308Santhony.gutierrez@amd.com        pair<RequestTable::iterator, bool> r =
34211308Santhony.gutierrez@amd.com          m_writeRequestTable.insert(RequestTable::value_type(line_addr,
34311308Santhony.gutierrez@amd.com                                       (GPUCoalescerRequest*) NULL));
34411308Santhony.gutierrez@amd.com        if (r.second) {
34511308Santhony.gutierrez@amd.com            RequestTable::iterator i = r.first;
34611308Santhony.gutierrez@amd.com            i->second = new GPUCoalescerRequest(pkt, request_type,
34711308Santhony.gutierrez@amd.com                                                curCycle());
34811308Santhony.gutierrez@amd.com            DPRINTF(GPUCoalescer,
34911308Santhony.gutierrez@amd.com                    "Inserting write request for paddr %#x for type %d\n",
35011308Santhony.gutierrez@amd.com                    pkt->req->getPaddr(), i->second->m_type);
35111308Santhony.gutierrez@amd.com            m_outstanding_count++;
35211308Santhony.gutierrez@amd.com        } else {
35311308Santhony.gutierrez@amd.com            return true;
35411308Santhony.gutierrez@amd.com        }
35511308Santhony.gutierrez@amd.com    } else {
35611308Santhony.gutierrez@amd.com        pair<RequestTable::iterator, bool> r =
35711308Santhony.gutierrez@amd.com            m_readRequestTable.insert(RequestTable::value_type(line_addr,
35811308Santhony.gutierrez@amd.com                                        (GPUCoalescerRequest*) NULL));
35911308Santhony.gutierrez@amd.com
36011308Santhony.gutierrez@amd.com        if (r.second) {
36111308Santhony.gutierrez@amd.com            RequestTable::iterator i = r.first;
36211308Santhony.gutierrez@amd.com            i->second = new GPUCoalescerRequest(pkt, request_type,
36311308Santhony.gutierrez@amd.com                                             curCycle());
36411308Santhony.gutierrez@amd.com            DPRINTF(GPUCoalescer,
36511308Santhony.gutierrez@amd.com                    "Inserting read request for paddr %#x for type %d\n",
36611308Santhony.gutierrez@amd.com                    pkt->req->getPaddr(), i->second->m_type);
36711308Santhony.gutierrez@amd.com            m_outstanding_count++;
36811308Santhony.gutierrez@amd.com        } else {
36911308Santhony.gutierrez@amd.com            return true;
37011308Santhony.gutierrez@amd.com        }
37111308Santhony.gutierrez@amd.com    }
37211308Santhony.gutierrez@amd.com
37311308Santhony.gutierrez@amd.com    m_outstandReqHist.sample(m_outstanding_count);
37411308Santhony.gutierrez@amd.com
37511308Santhony.gutierrez@amd.com    total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
37611308Santhony.gutierrez@amd.com    assert(m_outstanding_count == total_outstanding);
37711308Santhony.gutierrez@amd.com
37811308Santhony.gutierrez@amd.com    return false;
37911308Santhony.gutierrez@amd.com}
38011308Santhony.gutierrez@amd.com
38111308Santhony.gutierrez@amd.comvoid
38211308Santhony.gutierrez@amd.comGPUCoalescer::markRemoved()
38311308Santhony.gutierrez@amd.com{
38411308Santhony.gutierrez@amd.com    m_outstanding_count--;
38511308Santhony.gutierrez@amd.com    assert(m_outstanding_count ==
38611308Santhony.gutierrez@amd.com           m_writeRequestTable.size() + m_readRequestTable.size());
38711308Santhony.gutierrez@amd.com}
38811308Santhony.gutierrez@amd.com
38911308Santhony.gutierrez@amd.comvoid
39011308Santhony.gutierrez@amd.comGPUCoalescer::removeRequest(GPUCoalescerRequest* srequest)
39111308Santhony.gutierrez@amd.com{
39211308Santhony.gutierrez@amd.com    assert(m_outstanding_count ==
39311308Santhony.gutierrez@amd.com           m_writeRequestTable.size() + m_readRequestTable.size());
39411308Santhony.gutierrez@amd.com
39511308Santhony.gutierrez@amd.com    Addr line_addr = makeLineAddress(srequest->pkt->getAddr());
39611308Santhony.gutierrez@amd.com    if ((srequest->m_type == RubyRequestType_ST) ||
39711308Santhony.gutierrez@amd.com        (srequest->m_type == RubyRequestType_RMW_Read) ||
39811308Santhony.gutierrez@amd.com        (srequest->m_type == RubyRequestType_RMW_Write) ||
39911308Santhony.gutierrez@amd.com        (srequest->m_type == RubyRequestType_Load_Linked) ||
40011308Santhony.gutierrez@amd.com        (srequest->m_type == RubyRequestType_Store_Conditional) ||
40111308Santhony.gutierrez@amd.com        (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
40211308Santhony.gutierrez@amd.com        (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
40311308Santhony.gutierrez@amd.com        m_writeRequestTable.erase(line_addr);
40411308Santhony.gutierrez@amd.com    } else {
40511308Santhony.gutierrez@amd.com        m_readRequestTable.erase(line_addr);
40611308Santhony.gutierrez@amd.com    }
40711308Santhony.gutierrez@amd.com
40811308Santhony.gutierrez@amd.com    markRemoved();
40911308Santhony.gutierrez@amd.com}
41011308Santhony.gutierrez@amd.com
41111308Santhony.gutierrez@amd.combool
41211308Santhony.gutierrez@amd.comGPUCoalescer::handleLlsc(Addr address, GPUCoalescerRequest* request)
41311308Santhony.gutierrez@amd.com{
41411308Santhony.gutierrez@amd.com    //
41511308Santhony.gutierrez@amd.com    // The success flag indicates whether the LLSC operation was successful.
41611308Santhony.gutierrez@amd.com    // LL ops will always succeed, but SC may fail if the cache line is no
41711308Santhony.gutierrez@amd.com    // longer locked.
41811308Santhony.gutierrez@amd.com    //
41911308Santhony.gutierrez@amd.com    bool success = true;
42011308Santhony.gutierrez@amd.com    if (request->m_type == RubyRequestType_Store_Conditional) {
42111308Santhony.gutierrez@amd.com        if (!m_dataCache_ptr->isLocked(address, m_version)) {
42211308Santhony.gutierrez@amd.com            //
42311308Santhony.gutierrez@amd.com            // For failed SC requests, indicate the failure to the cpu by
42411308Santhony.gutierrez@amd.com            // setting the extra data to zero.
42511308Santhony.gutierrez@amd.com            //
42611308Santhony.gutierrez@amd.com            request->pkt->req->setExtraData(0);
42711308Santhony.gutierrez@amd.com            success = false;
42811308Santhony.gutierrez@amd.com        } else {
42911308Santhony.gutierrez@amd.com            //
43011308Santhony.gutierrez@amd.com            // For successful SC requests, indicate the success to the cpu by
43111308Santhony.gutierrez@amd.com            // setting the extra data to one.
43211308Santhony.gutierrez@amd.com            //
43311308Santhony.gutierrez@amd.com            request->pkt->req->setExtraData(1);
43411308Santhony.gutierrez@amd.com        }
43511308Santhony.gutierrez@amd.com        //
43611308Santhony.gutierrez@amd.com        // Independent of success, all SC operations must clear the lock
43711308Santhony.gutierrez@amd.com        //
43811308Santhony.gutierrez@amd.com        m_dataCache_ptr->clearLocked(address);
43911308Santhony.gutierrez@amd.com    } else if (request->m_type == RubyRequestType_Load_Linked) {
44011308Santhony.gutierrez@amd.com        //
44111308Santhony.gutierrez@amd.com        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
44211308Santhony.gutierrez@amd.com        // previously locked cache lines?
44311308Santhony.gutierrez@amd.com        //
44411308Santhony.gutierrez@amd.com        m_dataCache_ptr->setLocked(address, m_version);
44511308Santhony.gutierrez@amd.com    } else if ((m_dataCache_ptr->isTagPresent(address)) &&
44611308Santhony.gutierrez@amd.com               (m_dataCache_ptr->isLocked(address, m_version))) {
44711308Santhony.gutierrez@amd.com        //
44811308Santhony.gutierrez@amd.com        // Normal writes should clear the locked address
44911308Santhony.gutierrez@amd.com        //
45011308Santhony.gutierrez@amd.com        m_dataCache_ptr->clearLocked(address);
45111308Santhony.gutierrez@amd.com    }
45211308Santhony.gutierrez@amd.com    return success;
45311308Santhony.gutierrez@amd.com}
45411308Santhony.gutierrez@amd.com
45511308Santhony.gutierrez@amd.comvoid
45611308Santhony.gutierrez@amd.comGPUCoalescer::writeCallback(Addr address, DataBlock& data)
45711308Santhony.gutierrez@amd.com{
45811308Santhony.gutierrez@amd.com    writeCallback(address, MachineType_NULL, data);
45911308Santhony.gutierrez@amd.com}
46011308Santhony.gutierrez@amd.com
46111308Santhony.gutierrez@amd.comvoid
46211308Santhony.gutierrez@amd.comGPUCoalescer::writeCallback(Addr address,
46311308Santhony.gutierrez@amd.com                         MachineType mach,
46411308Santhony.gutierrez@amd.com                         DataBlock& data)
46511308Santhony.gutierrez@amd.com{
46611308Santhony.gutierrez@amd.com    writeCallback(address, mach, data, Cycles(0), Cycles(0), Cycles(0));
46711308Santhony.gutierrez@amd.com}
46811308Santhony.gutierrez@amd.com
46911308Santhony.gutierrez@amd.comvoid
47011308Santhony.gutierrez@amd.comGPUCoalescer::writeCallback(Addr address,
47111308Santhony.gutierrez@amd.com                         MachineType mach,
47211308Santhony.gutierrez@amd.com                         DataBlock& data,
47311308Santhony.gutierrez@amd.com                         Cycles initialRequestTime,
47411308Santhony.gutierrez@amd.com                         Cycles forwardRequestTime,
47511308Santhony.gutierrez@amd.com                         Cycles firstResponseTime)
47611308Santhony.gutierrez@amd.com{
47711308Santhony.gutierrez@amd.com    writeCallback(address, mach, data,
47811308Santhony.gutierrez@amd.com                  initialRequestTime, forwardRequestTime, firstResponseTime,
47911308Santhony.gutierrez@amd.com                  false);
48011308Santhony.gutierrez@amd.com}
48111308Santhony.gutierrez@amd.com
48211308Santhony.gutierrez@amd.comvoid
48311308Santhony.gutierrez@amd.comGPUCoalescer::writeCallback(Addr address,
48411308Santhony.gutierrez@amd.com                         MachineType mach,
48511308Santhony.gutierrez@amd.com                         DataBlock& data,
48611308Santhony.gutierrez@amd.com                         Cycles initialRequestTime,
48711308Santhony.gutierrez@amd.com                         Cycles forwardRequestTime,
48811308Santhony.gutierrez@amd.com                         Cycles firstResponseTime,
48911308Santhony.gutierrez@amd.com                         bool isRegion)
49011308Santhony.gutierrez@amd.com{
49111308Santhony.gutierrez@amd.com    assert(address == makeLineAddress(address));
49211308Santhony.gutierrez@amd.com
49311308Santhony.gutierrez@amd.com    DPRINTF(GPUCoalescer, "write callback for address %#x\n", address);
49411308Santhony.gutierrez@amd.com    assert(m_writeRequestTable.count(makeLineAddress(address)));
49511308Santhony.gutierrez@amd.com
49611308Santhony.gutierrez@amd.com    RequestTable::iterator i = m_writeRequestTable.find(address);
49711308Santhony.gutierrez@amd.com    assert(i != m_writeRequestTable.end());
49811308Santhony.gutierrez@amd.com    GPUCoalescerRequest* request = i->second;
49911308Santhony.gutierrez@amd.com
50011308Santhony.gutierrez@amd.com    m_writeRequestTable.erase(i);
50111308Santhony.gutierrez@amd.com    markRemoved();
50211308Santhony.gutierrez@amd.com
50311308Santhony.gutierrez@amd.com    assert((request->m_type == RubyRequestType_ST) ||
50411308Santhony.gutierrez@amd.com           (request->m_type == RubyRequestType_ATOMIC) ||
50511308Santhony.gutierrez@amd.com           (request->m_type == RubyRequestType_ATOMIC_RETURN) ||
50611308Santhony.gutierrez@amd.com           (request->m_type == RubyRequestType_ATOMIC_NO_RETURN) ||
50711308Santhony.gutierrez@amd.com           (request->m_type == RubyRequestType_RMW_Read) ||
50811308Santhony.gutierrez@amd.com           (request->m_type == RubyRequestType_RMW_Write) ||
50911308Santhony.gutierrez@amd.com           (request->m_type == RubyRequestType_Load_Linked) ||
51011308Santhony.gutierrez@amd.com           (request->m_type == RubyRequestType_Store_Conditional) ||
51111308Santhony.gutierrez@amd.com           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
51211308Santhony.gutierrez@amd.com           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
51311308Santhony.gutierrez@amd.com           (request->m_type == RubyRequestType_FLUSH));
51411308Santhony.gutierrez@amd.com
51511308Santhony.gutierrez@amd.com
51611308Santhony.gutierrez@amd.com    //
51711308Santhony.gutierrez@amd.com    // For Alpha, properly handle LL, SC, and write requests with respect to
51811308Santhony.gutierrez@amd.com    // locked cache blocks.
51911308Santhony.gutierrez@amd.com    //
52011660Stushar@ece.gatech.edu    // Not valid for Garnet_standalone protocl
52111308Santhony.gutierrez@amd.com    //
52211308Santhony.gutierrez@amd.com    bool success = true;
52311660Stushar@ece.gatech.edu    if (!m_runningGarnetStandalone)
52411308Santhony.gutierrez@amd.com        success = handleLlsc(address, request);
52511308Santhony.gutierrez@amd.com
52611308Santhony.gutierrez@amd.com    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
52711308Santhony.gutierrez@amd.com        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
52811308Santhony.gutierrez@amd.com    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
52911308Santhony.gutierrez@amd.com        m_controller->unblock(address);
53011308Santhony.gutierrez@amd.com    }
53111308Santhony.gutierrez@amd.com
53211308Santhony.gutierrez@amd.com    hitCallback(request, mach, data, success,
53311308Santhony.gutierrez@amd.com                request->issue_time, forwardRequestTime, firstResponseTime,
53411308Santhony.gutierrez@amd.com                isRegion);
53511308Santhony.gutierrez@amd.com}
53611308Santhony.gutierrez@amd.com
53711308Santhony.gutierrez@amd.comvoid
53811308Santhony.gutierrez@amd.comGPUCoalescer::readCallback(Addr address, DataBlock& data)
53911308Santhony.gutierrez@amd.com{
54011308Santhony.gutierrez@amd.com    readCallback(address, MachineType_NULL, data);
54111308Santhony.gutierrez@amd.com}
54211308Santhony.gutierrez@amd.com
54311308Santhony.gutierrez@amd.comvoid
54411308Santhony.gutierrez@amd.comGPUCoalescer::readCallback(Addr address,
54511308Santhony.gutierrez@amd.com                        MachineType mach,
54611308Santhony.gutierrez@amd.com                        DataBlock& data)
54711308Santhony.gutierrez@amd.com{
54811308Santhony.gutierrez@amd.com    readCallback(address, mach, data, Cycles(0), Cycles(0), Cycles(0));
54911308Santhony.gutierrez@amd.com}
55011308Santhony.gutierrez@amd.com
55111308Santhony.gutierrez@amd.comvoid
55211308Santhony.gutierrez@amd.comGPUCoalescer::readCallback(Addr address,
55311308Santhony.gutierrez@amd.com                        MachineType mach,
55411308Santhony.gutierrez@amd.com                        DataBlock& data,
55511308Santhony.gutierrez@amd.com                        Cycles initialRequestTime,
55611308Santhony.gutierrez@amd.com                        Cycles forwardRequestTime,
55711308Santhony.gutierrez@amd.com                        Cycles firstResponseTime)
55811308Santhony.gutierrez@amd.com{
55911308Santhony.gutierrez@amd.com
56011308Santhony.gutierrez@amd.com    readCallback(address, mach, data,
56111308Santhony.gutierrez@amd.com                 initialRequestTime, forwardRequestTime, firstResponseTime,
56211308Santhony.gutierrez@amd.com                 false);
56311308Santhony.gutierrez@amd.com}
56411308Santhony.gutierrez@amd.com
56511308Santhony.gutierrez@amd.comvoid
56611308Santhony.gutierrez@amd.comGPUCoalescer::readCallback(Addr address,
56711308Santhony.gutierrez@amd.com                        MachineType mach,
56811308Santhony.gutierrez@amd.com                        DataBlock& data,
56911308Santhony.gutierrez@amd.com                        Cycles initialRequestTime,
57011308Santhony.gutierrez@amd.com                        Cycles forwardRequestTime,
57111308Santhony.gutierrez@amd.com                        Cycles firstResponseTime,
57211308Santhony.gutierrez@amd.com                        bool isRegion)
57311308Santhony.gutierrez@amd.com{
57411308Santhony.gutierrez@amd.com    assert(address == makeLineAddress(address));
57511308Santhony.gutierrez@amd.com    assert(m_readRequestTable.count(makeLineAddress(address)));
57611308Santhony.gutierrez@amd.com
57711308Santhony.gutierrez@amd.com    DPRINTF(GPUCoalescer, "read callback for address %#x\n", address);
57811308Santhony.gutierrez@amd.com    RequestTable::iterator i = m_readRequestTable.find(address);
57911308Santhony.gutierrez@amd.com    assert(i != m_readRequestTable.end());
58011308Santhony.gutierrez@amd.com    GPUCoalescerRequest* request = i->second;
58111308Santhony.gutierrez@amd.com
58211308Santhony.gutierrez@amd.com    m_readRequestTable.erase(i);
58311308Santhony.gutierrez@amd.com    markRemoved();
58411308Santhony.gutierrez@amd.com
58511308Santhony.gutierrez@amd.com    assert((request->m_type == RubyRequestType_LD) ||
58611308Santhony.gutierrez@amd.com           (request->m_type == RubyRequestType_IFETCH));
58711308Santhony.gutierrez@amd.com
58811308Santhony.gutierrez@amd.com    hitCallback(request, mach, data, true,
58911308Santhony.gutierrez@amd.com                request->issue_time, forwardRequestTime, firstResponseTime,
59011308Santhony.gutierrez@amd.com                isRegion);
59111308Santhony.gutierrez@amd.com}
59211308Santhony.gutierrez@amd.com
59311308Santhony.gutierrez@amd.comvoid
59411308Santhony.gutierrez@amd.comGPUCoalescer::hitCallback(GPUCoalescerRequest* srequest,
59511308Santhony.gutierrez@amd.com                       MachineType mach,
59611308Santhony.gutierrez@amd.com                       DataBlock& data,
59711308Santhony.gutierrez@amd.com                       bool success,
59811308Santhony.gutierrez@amd.com                       Cycles initialRequestTime,
59911308Santhony.gutierrez@amd.com                       Cycles forwardRequestTime,
60011308Santhony.gutierrez@amd.com                       Cycles firstResponseTime,
60111308Santhony.gutierrez@amd.com                       bool isRegion)
60211308Santhony.gutierrez@amd.com{
60311308Santhony.gutierrez@amd.com    PacketPtr pkt = srequest->pkt;
60411308Santhony.gutierrez@amd.com    Addr request_address = pkt->getAddr();
60511308Santhony.gutierrez@amd.com    Addr request_line_address = makeLineAddress(request_address);
60611308Santhony.gutierrez@amd.com
60711308Santhony.gutierrez@amd.com    RubyRequestType type = srequest->m_type;
60811308Santhony.gutierrez@amd.com
60911308Santhony.gutierrez@amd.com    // Set this cache entry to the most recently used
61011308Santhony.gutierrez@amd.com    if (type == RubyRequestType_IFETCH) {
61111308Santhony.gutierrez@amd.com        if (m_instCache_ptr->isTagPresent(request_line_address))
61211308Santhony.gutierrez@amd.com            m_instCache_ptr->setMRU(request_line_address);
61311308Santhony.gutierrez@amd.com    } else {
61411308Santhony.gutierrez@amd.com        if (m_dataCache_ptr->isTagPresent(request_line_address))
61511308Santhony.gutierrez@amd.com            m_dataCache_ptr->setMRU(request_line_address);
61611308Santhony.gutierrez@amd.com    }
61711308Santhony.gutierrez@amd.com
61811308Santhony.gutierrez@amd.com    recordMissLatency(srequest, mach,
61911308Santhony.gutierrez@amd.com                      initialRequestTime,
62011308Santhony.gutierrez@amd.com                      forwardRequestTime,
62111308Santhony.gutierrez@amd.com                      firstResponseTime,
62211308Santhony.gutierrez@amd.com                      success, isRegion);
62311308Santhony.gutierrez@amd.com    // update the data
62411308Santhony.gutierrez@amd.com    //
62511308Santhony.gutierrez@amd.com    // MUST AD DOING THIS FOR EACH REQUEST IN COALESCER
62611308Santhony.gutierrez@amd.com    int len = reqCoalescer[request_line_address].size();
62711308Santhony.gutierrez@amd.com    std::vector<PacketPtr> mylist;
62811308Santhony.gutierrez@amd.com    for (int i = 0; i < len; ++i) {
62911689Santhony.gutierrez@amd.com        PacketPtr pkt = reqCoalescer[request_line_address][i].pkt;
63011689Santhony.gutierrez@amd.com        assert(type == reqCoalescer[request_line_address][i].primaryType);
63111308Santhony.gutierrez@amd.com        request_address = pkt->getAddr();
63211308Santhony.gutierrez@amd.com        request_line_address = makeLineAddress(pkt->getAddr());
63311308Santhony.gutierrez@amd.com        if (pkt->getPtr<uint8_t>()) {
63411308Santhony.gutierrez@amd.com            if ((type == RubyRequestType_LD) ||
63511308Santhony.gutierrez@amd.com                (type == RubyRequestType_ATOMIC) ||
63611308Santhony.gutierrez@amd.com                (type == RubyRequestType_ATOMIC_RETURN) ||
63711308Santhony.gutierrez@amd.com                (type == RubyRequestType_IFETCH) ||
63811308Santhony.gutierrez@amd.com                (type == RubyRequestType_RMW_Read) ||
63911308Santhony.gutierrez@amd.com                (type == RubyRequestType_Locked_RMW_Read) ||
64011308Santhony.gutierrez@amd.com                (type == RubyRequestType_Load_Linked)) {
64113399Sodanrc@yahoo.com.br                pkt->setData(
64213399Sodanrc@yahoo.com.br                    data.getData(getOffset(request_address), pkt->getSize()));
64311308Santhony.gutierrez@amd.com            } else {
64411308Santhony.gutierrez@amd.com                data.setData(pkt->getPtr<uint8_t>(),
64511308Santhony.gutierrez@amd.com                             getOffset(request_address), pkt->getSize());
64611308Santhony.gutierrez@amd.com            }
64711308Santhony.gutierrez@amd.com        } else {
64811308Santhony.gutierrez@amd.com            DPRINTF(MemoryAccess,
64911308Santhony.gutierrez@amd.com                    "WARNING.  Data not transfered from Ruby to M5 for type " \
65011308Santhony.gutierrez@amd.com                    "%s\n",
65111308Santhony.gutierrez@amd.com                    RubyRequestType_to_string(type));
65211308Santhony.gutierrez@amd.com        }
65311308Santhony.gutierrez@amd.com
65411308Santhony.gutierrez@amd.com        // If using the RubyTester, update the RubyTester sender state's
65511308Santhony.gutierrez@amd.com        // subBlock with the recieved data.  The tester will later access
65611308Santhony.gutierrez@amd.com        // this state.
65711308Santhony.gutierrez@amd.com        // Note: RubyPort will access it's sender state before the
65811308Santhony.gutierrez@amd.com        // RubyTester.
65911308Santhony.gutierrez@amd.com        if (m_usingRubyTester) {
66011308Santhony.gutierrez@amd.com            RubyPort::SenderState *requestSenderState =
66111308Santhony.gutierrez@amd.com                safe_cast<RubyPort::SenderState*>(pkt->senderState);
66211308Santhony.gutierrez@amd.com            RubyTester::SenderState* testerSenderState =
66311308Santhony.gutierrez@amd.com                safe_cast<RubyTester::SenderState*>(requestSenderState->predecessor);
66411308Santhony.gutierrez@amd.com            testerSenderState->subBlock.mergeFrom(data);
66511308Santhony.gutierrez@amd.com        }
66611308Santhony.gutierrez@amd.com
66711308Santhony.gutierrez@amd.com        mylist.push_back(pkt);
66811308Santhony.gutierrez@amd.com    }
66911308Santhony.gutierrez@amd.com    delete srequest;
67011308Santhony.gutierrez@amd.com    reqCoalescer.erase(request_line_address);
67111308Santhony.gutierrez@amd.com    assert(!reqCoalescer.count(request_line_address));
67211308Santhony.gutierrez@amd.com
67311308Santhony.gutierrez@amd.com
67411308Santhony.gutierrez@amd.com
67511308Santhony.gutierrez@amd.com    completeHitCallback(mylist, len);
67611308Santhony.gutierrez@amd.com}
67711308Santhony.gutierrez@amd.com
67811308Santhony.gutierrez@amd.combool
67911308Santhony.gutierrez@amd.comGPUCoalescer::empty() const
68011308Santhony.gutierrez@amd.com{
68111308Santhony.gutierrez@amd.com    return m_writeRequestTable.empty() && m_readRequestTable.empty();
68211308Santhony.gutierrez@amd.com}
68311308Santhony.gutierrez@amd.com
68411308Santhony.gutierrez@amd.com// Analyzes the packet to see if this request can be coalesced.
68511308Santhony.gutierrez@amd.com// If request can be coalesced, this request is added to the reqCoalescer table
68611308Santhony.gutierrez@amd.com// and makeRequest returns RequestStatus_Issued;
68711308Santhony.gutierrez@amd.com// If this is the first request to a cacheline, request is added to both
68811308Santhony.gutierrez@amd.com// newRequests queue and to the reqCoalescer table; makeRequest
68911308Santhony.gutierrez@amd.com// returns RequestStatus_Issued.
69011308Santhony.gutierrez@amd.com// If there is a pending request to this cacheline and this request
69111308Santhony.gutierrez@amd.com// can't be coalesced, RequestStatus_Aliased is returned and
69211308Santhony.gutierrez@amd.com// the packet needs to be reissued.
69311308Santhony.gutierrez@amd.comRequestStatus
69411308Santhony.gutierrez@amd.comGPUCoalescer::makeRequest(PacketPtr pkt)
69511308Santhony.gutierrez@amd.com{
69611308Santhony.gutierrez@amd.com    // Check for GPU Barrier Kernel End or Kernel Begin
69711308Santhony.gutierrez@amd.com    // Leave these to be handled by the child class
69811308Santhony.gutierrez@amd.com    // Kernel End/Barrier = isFlush + isRelease
69911308Santhony.gutierrez@amd.com    // Kernel Begin = isFlush + isAcquire
70011308Santhony.gutierrez@amd.com    if (pkt->req->isKernel()) {
70111308Santhony.gutierrez@amd.com        if (pkt->req->isAcquire()){
70211308Santhony.gutierrez@amd.com            // This is a Kernel Begin leave handling to
70311308Santhony.gutierrez@amd.com            // virtual xCoalescer::makeRequest
70411308Santhony.gutierrez@amd.com            return RequestStatus_Issued;
70511321Ssteve.reinhardt@amd.com        }else if (pkt->req->isRelease()) {
70611308Santhony.gutierrez@amd.com            // This is a Kernel End leave handling to
70711308Santhony.gutierrez@amd.com            // virtual xCoalescer::makeRequest
70811308Santhony.gutierrez@amd.com            // If we are here then we didn't call
70911308Santhony.gutierrez@amd.com            // a virtual version of this function
71011308Santhony.gutierrez@amd.com            // so we will also schedule the callback
71111308Santhony.gutierrez@amd.com            int wf_id = 0;
71211308Santhony.gutierrez@amd.com            if (pkt->req->hasContextId()) {
71311308Santhony.gutierrez@amd.com                wf_id = pkt->req->contextId();
71411308Santhony.gutierrez@amd.com            }
71511308Santhony.gutierrez@amd.com            insertKernel(wf_id, pkt);
71611308Santhony.gutierrez@amd.com            newKernelEnds.push_back(wf_id);
71711308Santhony.gutierrez@amd.com            if (!issueEvent.scheduled()) {
71811308Santhony.gutierrez@amd.com                schedule(issueEvent, curTick());
71911308Santhony.gutierrez@amd.com            }
72011308Santhony.gutierrez@amd.com            return RequestStatus_Issued;
72111308Santhony.gutierrez@amd.com        }
72211308Santhony.gutierrez@amd.com    }
72311308Santhony.gutierrez@amd.com
72411308Santhony.gutierrez@amd.com    // If number of outstanding requests greater than the max allowed,
72511308Santhony.gutierrez@amd.com    // return RequestStatus_BufferFull. This logic can be extended to
72611308Santhony.gutierrez@amd.com    // support proper backpressure.
72711308Santhony.gutierrez@amd.com    if (m_outstanding_count >= m_max_outstanding_requests) {
72811308Santhony.gutierrez@amd.com        return RequestStatus_BufferFull;
72911308Santhony.gutierrez@amd.com    }
73011308Santhony.gutierrez@amd.com
73111308Santhony.gutierrez@amd.com    RubyRequestType primary_type = RubyRequestType_NULL;
73211308Santhony.gutierrez@amd.com    RubyRequestType secondary_type = RubyRequestType_NULL;
73311308Santhony.gutierrez@amd.com
73411308Santhony.gutierrez@amd.com    if (pkt->isLLSC()) {
73511308Santhony.gutierrez@amd.com        //
73611308Santhony.gutierrez@amd.com        // Alpha LL/SC instructions need to be handled carefully by the cache
73711308Santhony.gutierrez@amd.com        // coherence protocol to ensure they follow the proper semantics. In
73811308Santhony.gutierrez@amd.com        // particular, by identifying the operations as atomic, the protocol
73911308Santhony.gutierrez@amd.com        // should understand that migratory sharing optimizations should not
74011308Santhony.gutierrez@amd.com        // be performed (i.e. a load between the LL and SC should not steal
74111308Santhony.gutierrez@amd.com        // away exclusive permission).
74211308Santhony.gutierrez@amd.com        //
74311308Santhony.gutierrez@amd.com        if (pkt->isWrite()) {
74411308Santhony.gutierrez@amd.com            primary_type = RubyRequestType_Store_Conditional;
74511308Santhony.gutierrez@amd.com        } else {
74611308Santhony.gutierrez@amd.com            assert(pkt->isRead());
74711308Santhony.gutierrez@amd.com            primary_type = RubyRequestType_Load_Linked;
74811308Santhony.gutierrez@amd.com        }
74911308Santhony.gutierrez@amd.com        secondary_type = RubyRequestType_ATOMIC;
75011308Santhony.gutierrez@amd.com    } else if (pkt->req->isLockedRMW()) {
75111308Santhony.gutierrez@amd.com        //
75211308Santhony.gutierrez@amd.com        // x86 locked instructions are translated to store cache coherence
75311308Santhony.gutierrez@amd.com        // requests because these requests should always be treated as read
75411308Santhony.gutierrez@amd.com        // exclusive operations and should leverage any migratory sharing
75511308Santhony.gutierrez@amd.com        // optimization built into the protocol.
75611308Santhony.gutierrez@amd.com        //
75711308Santhony.gutierrez@amd.com        if (pkt->isWrite()) {
75811308Santhony.gutierrez@amd.com            primary_type = RubyRequestType_Locked_RMW_Write;
75911308Santhony.gutierrez@amd.com        } else {
76011308Santhony.gutierrez@amd.com            assert(pkt->isRead());
76111308Santhony.gutierrez@amd.com            primary_type = RubyRequestType_Locked_RMW_Read;
76211308Santhony.gutierrez@amd.com        }
76311308Santhony.gutierrez@amd.com        secondary_type = RubyRequestType_ST;
76411308Santhony.gutierrez@amd.com    } else if (pkt->isAtomicOp()) {
76511308Santhony.gutierrez@amd.com        //
76611308Santhony.gutierrez@amd.com        // GPU Atomic Operation
76711308Santhony.gutierrez@amd.com        //
76811308Santhony.gutierrez@amd.com        primary_type = RubyRequestType_ATOMIC;
76911308Santhony.gutierrez@amd.com        secondary_type = RubyRequestType_ATOMIC;
77011308Santhony.gutierrez@amd.com    } else {
77111308Santhony.gutierrez@amd.com        if (pkt->isRead()) {
77211308Santhony.gutierrez@amd.com            if (pkt->req->isInstFetch()) {
77311308Santhony.gutierrez@amd.com                primary_type = secondary_type = RubyRequestType_IFETCH;
77411308Santhony.gutierrez@amd.com            } else {
77511308Santhony.gutierrez@amd.com#if THE_ISA == X86_ISA
77611308Santhony.gutierrez@amd.com                uint32_t flags = pkt->req->getFlags();
77711308Santhony.gutierrez@amd.com                bool storeCheck = flags &
77811308Santhony.gutierrez@amd.com                        (TheISA::StoreCheck << TheISA::FlagShift);
77911308Santhony.gutierrez@amd.com#else
78011308Santhony.gutierrez@amd.com                bool storeCheck = false;
78111308Santhony.gutierrez@amd.com#endif // X86_ISA
78211308Santhony.gutierrez@amd.com                if (storeCheck) {
78311308Santhony.gutierrez@amd.com                    primary_type = RubyRequestType_RMW_Read;
78411308Santhony.gutierrez@amd.com                    secondary_type = RubyRequestType_ST;
78511308Santhony.gutierrez@amd.com                } else {
78611308Santhony.gutierrez@amd.com                    primary_type = secondary_type = RubyRequestType_LD;
78711308Santhony.gutierrez@amd.com                }
78811308Santhony.gutierrez@amd.com            }
78911308Santhony.gutierrez@amd.com        } else if (pkt->isWrite()) {
79011308Santhony.gutierrez@amd.com            //
79111308Santhony.gutierrez@amd.com            // Note: M5 packets do not differentiate ST from RMW_Write
79211308Santhony.gutierrez@amd.com            //
79311308Santhony.gutierrez@amd.com            primary_type = secondary_type = RubyRequestType_ST;
79411308Santhony.gutierrez@amd.com        } else if (pkt->isFlush()) {
79511308Santhony.gutierrez@amd.com            primary_type = secondary_type = RubyRequestType_FLUSH;
79611308Santhony.gutierrez@amd.com        } else if (pkt->req->isRelease() || pkt->req->isAcquire()) {
79711308Santhony.gutierrez@amd.com            if (assumingRfOCoherence) {
79811308Santhony.gutierrez@amd.com                // If we reached here, this request must be a memFence
79911308Santhony.gutierrez@amd.com                // and the protocol implements RfO, the coalescer can
80011308Santhony.gutierrez@amd.com                // assume sequentially consistency and schedule the callback
80111308Santhony.gutierrez@amd.com                // immediately.
80211308Santhony.gutierrez@amd.com                // Currently the code implements fence callbacks
80311308Santhony.gutierrez@amd.com                // by reusing the mechanism for kernel completions.
80411308Santhony.gutierrez@amd.com                // This should be fixed.
80511308Santhony.gutierrez@amd.com                int wf_id = 0;
80611308Santhony.gutierrez@amd.com                if (pkt->req->hasContextId()) {
80711308Santhony.gutierrez@amd.com                    wf_id = pkt->req->contextId();
80811308Santhony.gutierrez@amd.com                }
80911308Santhony.gutierrez@amd.com                insertKernel(wf_id, pkt);
81011308Santhony.gutierrez@amd.com                newKernelEnds.push_back(wf_id);
81111308Santhony.gutierrez@amd.com                if (!issueEvent.scheduled()) {
81211308Santhony.gutierrez@amd.com                    schedule(issueEvent, curTick());
81311308Santhony.gutierrez@amd.com                }
81411308Santhony.gutierrez@amd.com                return RequestStatus_Issued;
81511308Santhony.gutierrez@amd.com            } else {
81611308Santhony.gutierrez@amd.com                // If not RfO, return issued here and let the child coalescer
81711308Santhony.gutierrez@amd.com                // take care of it.
81811308Santhony.gutierrez@amd.com                return RequestStatus_Issued;
81911308Santhony.gutierrez@amd.com            }
82011308Santhony.gutierrez@amd.com        } else {
82111308Santhony.gutierrez@amd.com            panic("Unsupported ruby packet type\n");
82211308Santhony.gutierrez@amd.com        }
82311308Santhony.gutierrez@amd.com    }
82411308Santhony.gutierrez@amd.com
82511308Santhony.gutierrez@amd.com    // Check if there is any pending request to this cache line from
82611308Santhony.gutierrez@amd.com    // previous cycles.
82711308Santhony.gutierrez@amd.com    // If there is a pending request, return aliased. Since coalescing
82811308Santhony.gutierrez@amd.com    // across time is not permitted, aliased requests are not coalesced.
82911308Santhony.gutierrez@amd.com    // If a request for this address has already been issued, we must block
83011308Santhony.gutierrez@amd.com    RequestStatus status = getRequestStatus(pkt, primary_type);
83111308Santhony.gutierrez@amd.com    if (status != RequestStatus_Ready)
83211308Santhony.gutierrez@amd.com        return status;
83311308Santhony.gutierrez@amd.com
83411308Santhony.gutierrez@amd.com    Addr line_addr = makeLineAddress(pkt->getAddr());
83511308Santhony.gutierrez@amd.com
83611308Santhony.gutierrez@amd.com    // Check if this request can be coalesced with previous
83711308Santhony.gutierrez@amd.com    // requests from this cycle.
83811308Santhony.gutierrez@amd.com    if (!reqCoalescer.count(line_addr)) {
83911308Santhony.gutierrez@amd.com        // This is the first access to this cache line.
84011308Santhony.gutierrez@amd.com        // A new request to the memory subsystem has to be
84111308Santhony.gutierrez@amd.com        // made in the next cycle for this cache line, so
84211308Santhony.gutierrez@amd.com        // add this line addr to the "newRequests" queue
84311308Santhony.gutierrez@amd.com        newRequests.push_back(line_addr);
84411308Santhony.gutierrez@amd.com
84511308Santhony.gutierrez@amd.com    // There was a request to this cache line in this cycle,
84611308Santhony.gutierrez@amd.com    // let us see if we can coalesce this request with the previous
84711308Santhony.gutierrez@amd.com    // requests from this cycle
84811308Santhony.gutierrez@amd.com    } else if (primary_type !=
84911689Santhony.gutierrez@amd.com               reqCoalescer[line_addr][0].primaryType) {
85011308Santhony.gutierrez@amd.com        // can't coalesce loads, stores and atomics!
85111308Santhony.gutierrez@amd.com        return RequestStatus_Aliased;
85211308Santhony.gutierrez@amd.com    } else if (pkt->req->isLockedRMW() ||
85311689Santhony.gutierrez@amd.com               reqCoalescer[line_addr][0].pkt->req->isLockedRMW()) {
85411308Santhony.gutierrez@amd.com        // can't coalesce locked accesses, but can coalesce atomics!
85511308Santhony.gutierrez@amd.com        return RequestStatus_Aliased;
85611308Santhony.gutierrez@amd.com    } else if (pkt->req->hasContextId() && pkt->req->isRelease() &&
85711308Santhony.gutierrez@amd.com               pkt->req->contextId() !=
85811689Santhony.gutierrez@amd.com               reqCoalescer[line_addr][0].pkt->req->contextId()) {
85911308Santhony.gutierrez@amd.com        // can't coalesce releases from different wavefronts
86011308Santhony.gutierrez@amd.com        return RequestStatus_Aliased;
86111308Santhony.gutierrez@amd.com    }
86211308Santhony.gutierrez@amd.com
86311308Santhony.gutierrez@amd.com    // in addition to the packet, we need to save both request types
86411689Santhony.gutierrez@amd.com    reqCoalescer[line_addr].emplace_back(pkt, primary_type, secondary_type);
86511308Santhony.gutierrez@amd.com    if (!issueEvent.scheduled())
86611308Santhony.gutierrez@amd.com        schedule(issueEvent, curTick());
86711308Santhony.gutierrez@amd.com    // TODO: issue hardware prefetches here
86811308Santhony.gutierrez@amd.com    return RequestStatus_Issued;
86911308Santhony.gutierrez@amd.com}
87011308Santhony.gutierrez@amd.com
87111308Santhony.gutierrez@amd.comvoid
87211308Santhony.gutierrez@amd.comGPUCoalescer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
87311308Santhony.gutierrez@amd.com{
87411308Santhony.gutierrez@amd.com
87511308Santhony.gutierrez@amd.com    int proc_id = -1;
87611308Santhony.gutierrez@amd.com    if (pkt != NULL && pkt->req->hasContextId()) {
87711308Santhony.gutierrez@amd.com        proc_id = pkt->req->contextId();
87811308Santhony.gutierrez@amd.com    }
87911308Santhony.gutierrez@amd.com
88011308Santhony.gutierrez@amd.com    // If valid, copy the pc to the ruby request
88111308Santhony.gutierrez@amd.com    Addr pc = 0;
88211308Santhony.gutierrez@amd.com    if (pkt->req->hasPC()) {
88311308Santhony.gutierrez@amd.com        pc = pkt->req->getPC();
88411308Santhony.gutierrez@amd.com    }
88511308Santhony.gutierrez@amd.com
88611308Santhony.gutierrez@amd.com    // At the moment setting scopes only counts
88711308Santhony.gutierrez@amd.com    // for GPU spill space accesses
88811308Santhony.gutierrez@amd.com    // which is pkt->req->isStack()
88911308Santhony.gutierrez@amd.com    // this scope is REPLACE since it
89011308Santhony.gutierrez@amd.com    // does not need to be flushed at the end
89111308Santhony.gutierrez@amd.com    // of a kernel Private and local may need
89211308Santhony.gutierrez@amd.com    // to be visible at the end of the kernel
89311308Santhony.gutierrez@amd.com    HSASegment accessSegment = reqSegmentToHSASegment(pkt->req);
89411308Santhony.gutierrez@amd.com    HSAScope accessScope = reqScopeToHSAScope(pkt->req);
89511308Santhony.gutierrez@amd.com
89611308Santhony.gutierrez@amd.com    Addr line_addr = makeLineAddress(pkt->getAddr());
89711308Santhony.gutierrez@amd.com
89811308Santhony.gutierrez@amd.com    // Creating WriteMask that records written bytes
89911308Santhony.gutierrez@amd.com    // and atomic operations. This enables partial writes
90011308Santhony.gutierrez@amd.com    // and partial reads of those writes
90111308Santhony.gutierrez@amd.com    DataBlock dataBlock;
90211308Santhony.gutierrez@amd.com    dataBlock.clear();
90311308Santhony.gutierrez@amd.com    uint32_t blockSize = RubySystem::getBlockSizeBytes();
90411308Santhony.gutierrez@amd.com    std::vector<bool> accessMask(blockSize,false);
90511308Santhony.gutierrez@amd.com    std::vector< std::pair<int,AtomicOpFunctor*> > atomicOps;
90611308Santhony.gutierrez@amd.com    uint32_t tableSize = reqCoalescer[line_addr].size();
90711308Santhony.gutierrez@amd.com    for (int i = 0; i < tableSize; i++) {
90811689Santhony.gutierrez@amd.com        PacketPtr tmpPkt = reqCoalescer[line_addr][i].pkt;
90911308Santhony.gutierrez@amd.com        uint32_t tmpOffset = (tmpPkt->getAddr()) - line_addr;
91011308Santhony.gutierrez@amd.com        uint32_t tmpSize = tmpPkt->getSize();
91111308Santhony.gutierrez@amd.com        if (tmpPkt->isAtomicOp()) {
91211308Santhony.gutierrez@amd.com            std::pair<int,AtomicOpFunctor *> tmpAtomicOp(tmpOffset,
91311308Santhony.gutierrez@amd.com                                                        tmpPkt->getAtomicOp());
91411308Santhony.gutierrez@amd.com            atomicOps.push_back(tmpAtomicOp);
91511321Ssteve.reinhardt@amd.com        } else if (tmpPkt->isWrite()) {
91611308Santhony.gutierrez@amd.com            dataBlock.setData(tmpPkt->getPtr<uint8_t>(),
91711308Santhony.gutierrez@amd.com                              tmpOffset, tmpSize);
91811308Santhony.gutierrez@amd.com        }
91911308Santhony.gutierrez@amd.com        for (int j = 0; j < tmpSize; j++) {
92011308Santhony.gutierrez@amd.com            accessMask[tmpOffset + j] = true;
92111308Santhony.gutierrez@amd.com        }
92211308Santhony.gutierrez@amd.com    }
92311308Santhony.gutierrez@amd.com    std::shared_ptr<RubyRequest> msg;
92411308Santhony.gutierrez@amd.com    if (pkt->isAtomicOp()) {
92511308Santhony.gutierrez@amd.com        msg = std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
92611308Santhony.gutierrez@amd.com                              pkt->getPtr<uint8_t>(),
92711308Santhony.gutierrez@amd.com                              pkt->getSize(), pc, secondary_type,
92811308Santhony.gutierrez@amd.com                              RubyAccessMode_Supervisor, pkt,
92911308Santhony.gutierrez@amd.com                              PrefetchBit_No, proc_id, 100,
93011308Santhony.gutierrez@amd.com                              blockSize, accessMask,
93111308Santhony.gutierrez@amd.com                              dataBlock, atomicOps,
93211308Santhony.gutierrez@amd.com                              accessScope, accessSegment);
93311308Santhony.gutierrez@amd.com    } else {
93411308Santhony.gutierrez@amd.com        msg = std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
93511308Santhony.gutierrez@amd.com                              pkt->getPtr<uint8_t>(),
93611308Santhony.gutierrez@amd.com                              pkt->getSize(), pc, secondary_type,
93711308Santhony.gutierrez@amd.com                              RubyAccessMode_Supervisor, pkt,
93811308Santhony.gutierrez@amd.com                              PrefetchBit_No, proc_id, 100,
93911308Santhony.gutierrez@amd.com                              blockSize, accessMask,
94011308Santhony.gutierrez@amd.com                              dataBlock,
94111308Santhony.gutierrez@amd.com                              accessScope, accessSegment);
94211308Santhony.gutierrez@amd.com    }
94311308Santhony.gutierrez@amd.com    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
94411308Santhony.gutierrez@amd.com             curTick(), m_version, "Coal", "Begin", "", "",
94511308Santhony.gutierrez@amd.com             printAddress(msg->getPhysicalAddress()),
94611308Santhony.gutierrez@amd.com             RubyRequestType_to_string(secondary_type));
94711308Santhony.gutierrez@amd.com
94811308Santhony.gutierrez@amd.com    fatal_if(secondary_type == RubyRequestType_IFETCH,
94911308Santhony.gutierrez@amd.com             "there should not be any I-Fetch requests in the GPU Coalescer");
95011308Santhony.gutierrez@amd.com
95113974Stiago.muck@arm.com    Tick latency = cyclesToTicks(
95213974Stiago.muck@arm.com                        m_controller->mandatoryQueueLatency(secondary_type));
95313974Stiago.muck@arm.com    assert(latency > 0);
95411308Santhony.gutierrez@amd.com
95511308Santhony.gutierrez@amd.com    assert(m_mandatory_q_ptr);
95613974Stiago.muck@arm.com    m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
95711308Santhony.gutierrez@amd.com}
95811308Santhony.gutierrez@amd.com
95911308Santhony.gutierrez@amd.comtemplate <class KEY, class VALUE>
96011308Santhony.gutierrez@amd.comstd::ostream &
96111308Santhony.gutierrez@amd.comoperator<<(ostream &out, const std::unordered_map<KEY, VALUE> &map)
96211308Santhony.gutierrez@amd.com{
96311308Santhony.gutierrez@amd.com    out << "[";
96411308Santhony.gutierrez@amd.com    for (auto i = map.begin(); i != map.end(); ++i)
96511308Santhony.gutierrez@amd.com        out << " " << i->first << "=" << i->second;
96611308Santhony.gutierrez@amd.com    out << " ]";
96711308Santhony.gutierrez@amd.com
96811308Santhony.gutierrez@amd.com    return out;
96911308Santhony.gutierrez@amd.com}
97011308Santhony.gutierrez@amd.com
97111308Santhony.gutierrez@amd.comvoid
97211308Santhony.gutierrez@amd.comGPUCoalescer::print(ostream& out) const
97311308Santhony.gutierrez@amd.com{
97411308Santhony.gutierrez@amd.com    out << "[GPUCoalescer: " << m_version
97511308Santhony.gutierrez@amd.com        << ", outstanding requests: " << m_outstanding_count
97611308Santhony.gutierrez@amd.com        << ", read request table: " << m_readRequestTable
97711308Santhony.gutierrez@amd.com        << ", write request table: " << m_writeRequestTable
97811308Santhony.gutierrez@amd.com        << "]";
97911308Santhony.gutierrez@amd.com}
98011308Santhony.gutierrez@amd.com
98111308Santhony.gutierrez@amd.com// this can be called from setState whenever coherence permissions are
98211308Santhony.gutierrez@amd.com// upgraded when invoked, coherence violations will be checked for the
98311308Santhony.gutierrez@amd.com// given block
98411308Santhony.gutierrez@amd.comvoid
98511308Santhony.gutierrez@amd.comGPUCoalescer::checkCoherence(Addr addr)
98611308Santhony.gutierrez@amd.com{
98711308Santhony.gutierrez@amd.com}
98811308Santhony.gutierrez@amd.com
98911308Santhony.gutierrez@amd.comvoid
99011308Santhony.gutierrez@amd.comGPUCoalescer::recordRequestType(SequencerRequestType requestType) {
99111308Santhony.gutierrez@amd.com    DPRINTF(RubyStats, "Recorded statistic: %s\n",
99211308Santhony.gutierrez@amd.com            SequencerRequestType_to_string(requestType));
99311308Santhony.gutierrez@amd.com}
99411308Santhony.gutierrez@amd.com
99511308Santhony.gutierrez@amd.com
99611308Santhony.gutierrez@amd.comvoid
99711308Santhony.gutierrez@amd.comGPUCoalescer::completeIssue()
99811308Santhony.gutierrez@amd.com{
99911308Santhony.gutierrez@amd.com    // newRequests has the cacheline addresses of all the
100011308Santhony.gutierrez@amd.com    // requests which need to be issued to the memory subsystem
100111308Santhony.gutierrez@amd.com    // in this cycle
100211308Santhony.gutierrez@amd.com    int len = newRequests.size();
100311308Santhony.gutierrez@amd.com    DPRINTF(GPUCoalescer, "Completing issue for %d new requests.\n", len);
100411308Santhony.gutierrez@amd.com    for (int i = 0; i < len; ++i) {
100511308Santhony.gutierrez@amd.com        // Get the requests from reqCoalescer table. Get only the
100611308Santhony.gutierrez@amd.com        // first request for each cacheline, the remaining requests
100711308Santhony.gutierrez@amd.com        // can be coalesced with the first request. So, only
100811308Santhony.gutierrez@amd.com        // one request is issued per cacheline.
100911308Santhony.gutierrez@amd.com        RequestDesc info = reqCoalescer[newRequests[i]][0];
101011689Santhony.gutierrez@amd.com        PacketPtr pkt = info.pkt;
101111308Santhony.gutierrez@amd.com        DPRINTF(GPUCoalescer, "Completing for newReq %d: paddr %#x\n",
101211308Santhony.gutierrez@amd.com                i, pkt->req->getPaddr());
101311308Santhony.gutierrez@amd.com        // Insert this request to the read/writeRequestTables. These tables
101411308Santhony.gutierrez@amd.com        // are used to track aliased requests in makeRequest subroutine
101511689Santhony.gutierrez@amd.com        bool found = insertRequest(pkt, info.primaryType);
101611308Santhony.gutierrez@amd.com
101711308Santhony.gutierrez@amd.com        if (found) {
101811308Santhony.gutierrez@amd.com            panic("GPUCoalescer::makeRequest should never be called if the "
101911308Santhony.gutierrez@amd.com                  "request is already outstanding\n");
102011308Santhony.gutierrez@amd.com        }
102111308Santhony.gutierrez@amd.com
102211308Santhony.gutierrez@amd.com        // Issue request to ruby subsystem
102311689Santhony.gutierrez@amd.com        issueRequest(pkt, info.secondaryType);
102411308Santhony.gutierrez@amd.com    }
102511308Santhony.gutierrez@amd.com    newRequests.clear();
102611308Santhony.gutierrez@amd.com
102711308Santhony.gutierrez@amd.com    // have Kernel End releases been issued this cycle
102811308Santhony.gutierrez@amd.com    len = newKernelEnds.size();
102911308Santhony.gutierrez@amd.com    for (int i = 0; i < len; i++) {
103011308Santhony.gutierrez@amd.com        kernelCallback(newKernelEnds[i]);
103111308Santhony.gutierrez@amd.com    }
103211308Santhony.gutierrez@amd.com    newKernelEnds.clear();
103311308Santhony.gutierrez@amd.com}
103411308Santhony.gutierrez@amd.com
103511308Santhony.gutierrez@amd.comvoid
103611308Santhony.gutierrez@amd.comGPUCoalescer::evictionCallback(Addr address)
103711308Santhony.gutierrez@amd.com{
103811308Santhony.gutierrez@amd.com    ruby_eviction_callback(address);
103911308Santhony.gutierrez@amd.com}
104011308Santhony.gutierrez@amd.com
104111308Santhony.gutierrez@amd.comvoid
104211308Santhony.gutierrez@amd.comGPUCoalescer::kernelCallback(int wavefront_id)
104311308Santhony.gutierrez@amd.com{
104411308Santhony.gutierrez@amd.com    assert(kernelEndList.count(wavefront_id));
104511308Santhony.gutierrez@amd.com
104611308Santhony.gutierrez@amd.com    ruby_hit_callback(kernelEndList[wavefront_id]);
104711308Santhony.gutierrez@amd.com
104811308Santhony.gutierrez@amd.com    kernelEndList.erase(wavefront_id);
104911308Santhony.gutierrez@amd.com}
105011308Santhony.gutierrez@amd.com
105111308Santhony.gutierrez@amd.comvoid
105211308Santhony.gutierrez@amd.comGPUCoalescer::atomicCallback(Addr address,
105311308Santhony.gutierrez@amd.com                             MachineType mach,
105411308Santhony.gutierrez@amd.com                             const DataBlock& data)
105511308Santhony.gutierrez@amd.com{
105611308Santhony.gutierrez@amd.com    assert(address == makeLineAddress(address));
105711308Santhony.gutierrez@amd.com
105811308Santhony.gutierrez@amd.com    DPRINTF(GPUCoalescer, "atomic callback for address %#x\n", address);
105911308Santhony.gutierrez@amd.com    assert(m_writeRequestTable.count(makeLineAddress(address)));
106011308Santhony.gutierrez@amd.com
106111308Santhony.gutierrez@amd.com    RequestTable::iterator i = m_writeRequestTable.find(address);
106211308Santhony.gutierrez@amd.com    assert(i != m_writeRequestTable.end());
106311308Santhony.gutierrez@amd.com    GPUCoalescerRequest* srequest = i->second;
106411308Santhony.gutierrez@amd.com
106511308Santhony.gutierrez@amd.com    m_writeRequestTable.erase(i);
106611308Santhony.gutierrez@amd.com    markRemoved();
106711308Santhony.gutierrez@amd.com
106811308Santhony.gutierrez@amd.com    assert((srequest->m_type == RubyRequestType_ATOMIC) ||
106911308Santhony.gutierrez@amd.com           (srequest->m_type == RubyRequestType_ATOMIC_RETURN) ||
107011308Santhony.gutierrez@amd.com           (srequest->m_type == RubyRequestType_ATOMIC_NO_RETURN));
107111308Santhony.gutierrez@amd.com
107211308Santhony.gutierrez@amd.com
107311308Santhony.gutierrez@amd.com    // Atomics don't write to cache, so there is no MRU update...
107411308Santhony.gutierrez@amd.com
107511308Santhony.gutierrez@amd.com    recordMissLatency(srequest, mach,
107611308Santhony.gutierrez@amd.com                      srequest->issue_time, Cycles(0), Cycles(0), true, false);
107711308Santhony.gutierrez@amd.com
107811308Santhony.gutierrez@amd.com    PacketPtr pkt = srequest->pkt;
107911308Santhony.gutierrez@amd.com    Addr request_address = pkt->getAddr();
108011308Santhony.gutierrez@amd.com    Addr request_line_address = makeLineAddress(pkt->getAddr());
108111308Santhony.gutierrez@amd.com
108211308Santhony.gutierrez@amd.com    int len = reqCoalescer[request_line_address].size();
108311308Santhony.gutierrez@amd.com    std::vector<PacketPtr> mylist;
108411308Santhony.gutierrez@amd.com    for (int i = 0; i < len; ++i) {
108511689Santhony.gutierrez@amd.com        PacketPtr pkt = reqCoalescer[request_line_address][i].pkt;
108611308Santhony.gutierrez@amd.com        assert(srequest->m_type ==
108711689Santhony.gutierrez@amd.com               reqCoalescer[request_line_address][i].primaryType);
108811308Santhony.gutierrez@amd.com        request_address = (pkt->getAddr());
108911308Santhony.gutierrez@amd.com        request_line_address = makeLineAddress(request_address);
109011308Santhony.gutierrez@amd.com        if (pkt->getPtr<uint8_t>() &&
109111308Santhony.gutierrez@amd.com            srequest->m_type != RubyRequestType_ATOMIC_NO_RETURN) {
109211308Santhony.gutierrez@amd.com            /* atomics are done in memory, and return the data *before* the atomic op... */
109313399Sodanrc@yahoo.com.br            pkt->setData(
109413399Sodanrc@yahoo.com.br                data.getData(getOffset(request_address), pkt->getSize()));
109511308Santhony.gutierrez@amd.com        } else {
109611308Santhony.gutierrez@amd.com            DPRINTF(MemoryAccess,
109711308Santhony.gutierrez@amd.com                    "WARNING.  Data not transfered from Ruby to M5 for type " \
109811308Santhony.gutierrez@amd.com                    "%s\n",
109911308Santhony.gutierrez@amd.com                    RubyRequestType_to_string(srequest->m_type));
110011308Santhony.gutierrez@amd.com        }
110111308Santhony.gutierrez@amd.com
110211308Santhony.gutierrez@amd.com        // If using the RubyTester, update the RubyTester sender state's
110311308Santhony.gutierrez@amd.com        // subBlock with the recieved data.  The tester will later access
110411308Santhony.gutierrez@amd.com        // this state.
110511308Santhony.gutierrez@amd.com        // Note: RubyPort will access it's sender state before the
110611308Santhony.gutierrez@amd.com        // RubyTester.
110711308Santhony.gutierrez@amd.com        if (m_usingRubyTester) {
110811308Santhony.gutierrez@amd.com            RubyPort::SenderState *requestSenderState =
110911308Santhony.gutierrez@amd.com                safe_cast<RubyPort::SenderState*>(pkt->senderState);
111011308Santhony.gutierrez@amd.com            RubyTester::SenderState* testerSenderState =
111111308Santhony.gutierrez@amd.com                safe_cast<RubyTester::SenderState*>(requestSenderState->predecessor);
111211308Santhony.gutierrez@amd.com            testerSenderState->subBlock.mergeFrom(data);
111311308Santhony.gutierrez@amd.com        }
111411308Santhony.gutierrez@amd.com
111511308Santhony.gutierrez@amd.com        mylist.push_back(pkt);
111611308Santhony.gutierrez@amd.com    }
111711308Santhony.gutierrez@amd.com    delete srequest;
111811308Santhony.gutierrez@amd.com    reqCoalescer.erase(request_line_address);
111911308Santhony.gutierrez@amd.com    assert(!reqCoalescer.count(request_line_address));
112011308Santhony.gutierrez@amd.com
112111308Santhony.gutierrez@amd.com    completeHitCallback(mylist, len);
112211308Santhony.gutierrez@amd.com}
112311308Santhony.gutierrez@amd.com
112411308Santhony.gutierrez@amd.comvoid
112511308Santhony.gutierrez@amd.comGPUCoalescer::recordCPReadCallBack(MachineID myMachID, MachineID senderMachID)
112611308Santhony.gutierrez@amd.com{
112711321Ssteve.reinhardt@amd.com    if (myMachID == senderMachID) {
112811308Santhony.gutierrez@amd.com        CP_TCPLdHits++;
112911321Ssteve.reinhardt@amd.com    } else if (machineIDToMachineType(senderMachID) == MachineType_TCP) {
113011308Santhony.gutierrez@amd.com        CP_TCPLdTransfers++;
113111321Ssteve.reinhardt@amd.com    } else if (machineIDToMachineType(senderMachID) == MachineType_TCC) {
113211308Santhony.gutierrez@amd.com        CP_TCCLdHits++;
113311308Santhony.gutierrez@amd.com    } else {
113411308Santhony.gutierrez@amd.com        CP_LdMiss++;
113511308Santhony.gutierrez@amd.com    }
113611308Santhony.gutierrez@amd.com}
113711308Santhony.gutierrez@amd.com
113811308Santhony.gutierrez@amd.comvoid
113911308Santhony.gutierrez@amd.comGPUCoalescer::recordCPWriteCallBack(MachineID myMachID, MachineID senderMachID)
114011308Santhony.gutierrez@amd.com{
114111321Ssteve.reinhardt@amd.com    if (myMachID == senderMachID) {
114211308Santhony.gutierrez@amd.com        CP_TCPStHits++;
114311321Ssteve.reinhardt@amd.com    } else if (machineIDToMachineType(senderMachID) == MachineType_TCP) {
114411308Santhony.gutierrez@amd.com        CP_TCPStTransfers++;
114511321Ssteve.reinhardt@amd.com    } else if (machineIDToMachineType(senderMachID) == MachineType_TCC) {
114611308Santhony.gutierrez@amd.com        CP_TCCStHits++;
114711308Santhony.gutierrez@amd.com    } else {
114811308Santhony.gutierrez@amd.com        CP_StMiss++;
114911308Santhony.gutierrez@amd.com    }
115011308Santhony.gutierrez@amd.com}
115111308Santhony.gutierrez@amd.com
115211308Santhony.gutierrez@amd.comvoid
115311308Santhony.gutierrez@amd.comGPUCoalescer::completeHitCallback(std::vector<PacketPtr> & mylist, int len)
115411308Santhony.gutierrez@amd.com{
115511308Santhony.gutierrez@amd.com    for (int i = 0; i < len; ++i) {
115611308Santhony.gutierrez@amd.com        RubyPort::SenderState *ss =
115711308Santhony.gutierrez@amd.com            safe_cast<RubyPort::SenderState *>(mylist[i]->senderState);
115811308Santhony.gutierrez@amd.com        MemSlavePort *port = ss->port;
115911308Santhony.gutierrez@amd.com        assert(port != NULL);
116011308Santhony.gutierrez@amd.com
116111308Santhony.gutierrez@amd.com        mylist[i]->senderState = ss->predecessor;
116211308Santhony.gutierrez@amd.com        delete ss;
116311308Santhony.gutierrez@amd.com        port->hitCallback(mylist[i]);
116411308Santhony.gutierrez@amd.com        trySendRetries();
116511308Santhony.gutierrez@amd.com    }
116611308Santhony.gutierrez@amd.com
116711308Santhony.gutierrez@amd.com    testDrainComplete();
116811308Santhony.gutierrez@amd.com}
116911308Santhony.gutierrez@amd.com
117011308Santhony.gutierrez@amd.comPacketPtr
117111308Santhony.gutierrez@amd.comGPUCoalescer::mapAddrToPkt(Addr address)
117211308Santhony.gutierrez@amd.com{
117311308Santhony.gutierrez@amd.com    RequestTable::iterator i = m_readRequestTable.find(address);
117411308Santhony.gutierrez@amd.com    assert(i != m_readRequestTable.end());
117511308Santhony.gutierrez@amd.com    GPUCoalescerRequest* request = i->second;
117611308Santhony.gutierrez@amd.com    return request->pkt;
117711308Santhony.gutierrez@amd.com}
117811308Santhony.gutierrez@amd.com
117911308Santhony.gutierrez@amd.comvoid
118011308Santhony.gutierrez@amd.comGPUCoalescer::recordMissLatency(GPUCoalescerRequest* srequest,
118111308Santhony.gutierrez@amd.com                                MachineType mach,
118211308Santhony.gutierrez@amd.com                                Cycles initialRequestTime,
118311308Santhony.gutierrez@amd.com                                Cycles forwardRequestTime,
118411308Santhony.gutierrez@amd.com                                Cycles firstResponseTime,
118511308Santhony.gutierrez@amd.com                                bool success, bool isRegion)
118611308Santhony.gutierrez@amd.com{
118711308Santhony.gutierrez@amd.com    RubyRequestType type = srequest->m_type;
118811308Santhony.gutierrez@amd.com    Cycles issued_time = srequest->issue_time;
118911308Santhony.gutierrez@amd.com    Cycles completion_time = curCycle();
119011308Santhony.gutierrez@amd.com    assert(completion_time >= issued_time);
119111308Santhony.gutierrez@amd.com    Cycles total_lat = completion_time - issued_time;
119211308Santhony.gutierrez@amd.com
119311308Santhony.gutierrez@amd.com    // cache stats (valid for RfO protocol only)
119411308Santhony.gutierrez@amd.com    if (mach == MachineType_TCP) {
119511308Santhony.gutierrez@amd.com        if (type == RubyRequestType_LD) {
119611308Santhony.gutierrez@amd.com            GPU_TCPLdHits++;
119711308Santhony.gutierrez@amd.com        } else {
119811308Santhony.gutierrez@amd.com            GPU_TCPStHits++;
119911308Santhony.gutierrez@amd.com        }
120011308Santhony.gutierrez@amd.com    } else if (mach == MachineType_L1Cache_wCC) {
120111308Santhony.gutierrez@amd.com        if (type == RubyRequestType_LD) {
120211308Santhony.gutierrez@amd.com            GPU_TCPLdTransfers++;
120311308Santhony.gutierrez@amd.com        } else {
120411308Santhony.gutierrez@amd.com            GPU_TCPStTransfers++;
120511308Santhony.gutierrez@amd.com        }
120611308Santhony.gutierrez@amd.com    } else if (mach == MachineType_TCC) {
120711308Santhony.gutierrez@amd.com        if (type == RubyRequestType_LD) {
120811308Santhony.gutierrez@amd.com            GPU_TCCLdHits++;
120911308Santhony.gutierrez@amd.com        } else {
121011308Santhony.gutierrez@amd.com            GPU_TCCStHits++;
121111308Santhony.gutierrez@amd.com        }
121211308Santhony.gutierrez@amd.com    } else  {
121311308Santhony.gutierrez@amd.com        if (type == RubyRequestType_LD) {
121411308Santhony.gutierrez@amd.com            GPU_LdMiss++;
121511308Santhony.gutierrez@amd.com        } else {
121611308Santhony.gutierrez@amd.com            GPU_StMiss++;
121711308Santhony.gutierrez@amd.com        }
121811308Santhony.gutierrez@amd.com    }
121911308Santhony.gutierrez@amd.com
122011308Santhony.gutierrez@amd.com    // Profile all access latency, even zero latency accesses
122111308Santhony.gutierrez@amd.com    m_latencyHist.sample(total_lat);
122211308Santhony.gutierrez@amd.com    m_typeLatencyHist[type]->sample(total_lat);
122311308Santhony.gutierrez@amd.com
122411308Santhony.gutierrez@amd.com    // Profile the miss latency for all non-zero demand misses
122511308Santhony.gutierrez@amd.com    if (total_lat != Cycles(0)) {
122611308Santhony.gutierrez@amd.com        m_missLatencyHist.sample(total_lat);
122711308Santhony.gutierrez@amd.com        m_missTypeLatencyHist[type]->sample(total_lat);
122811308Santhony.gutierrez@amd.com
122911308Santhony.gutierrez@amd.com        if (mach != MachineType_NUM) {
123011308Santhony.gutierrez@amd.com            m_missMachLatencyHist[mach]->sample(total_lat);
123111308Santhony.gutierrez@amd.com            m_missTypeMachLatencyHist[type][mach]->sample(total_lat);
123211308Santhony.gutierrez@amd.com
123311308Santhony.gutierrez@amd.com            if ((issued_time <= initialRequestTime) &&
123411308Santhony.gutierrez@amd.com                (initialRequestTime <= forwardRequestTime) &&
123511308Santhony.gutierrez@amd.com                (forwardRequestTime <= firstResponseTime) &&
123611308Santhony.gutierrez@amd.com                (firstResponseTime <= completion_time)) {
123711308Santhony.gutierrez@amd.com
123811308Santhony.gutierrez@amd.com                m_IssueToInitialDelayHist[mach]->sample(
123911308Santhony.gutierrez@amd.com                    initialRequestTime - issued_time);
124011308Santhony.gutierrez@amd.com                m_InitialToForwardDelayHist[mach]->sample(
124111308Santhony.gutierrez@amd.com                    forwardRequestTime - initialRequestTime);
124211308Santhony.gutierrez@amd.com                m_ForwardToFirstResponseDelayHist[mach]->sample(
124311308Santhony.gutierrez@amd.com                    firstResponseTime - forwardRequestTime);
124411308Santhony.gutierrez@amd.com                m_FirstResponseToCompletionDelayHist[mach]->sample(
124511308Santhony.gutierrez@amd.com                    completion_time - firstResponseTime);
124611308Santhony.gutierrez@amd.com            }
124711308Santhony.gutierrez@amd.com        }
124811308Santhony.gutierrez@amd.com
124911308Santhony.gutierrez@amd.com    }
125011308Santhony.gutierrez@amd.com
125111308Santhony.gutierrez@amd.com    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
125211308Santhony.gutierrez@amd.com             curTick(), m_version, "Coal",
125311308Santhony.gutierrez@amd.com             success ? "Done" : "SC_Failed", "", "",
125411308Santhony.gutierrez@amd.com             printAddress(srequest->pkt->getAddr()), total_lat);
125511308Santhony.gutierrez@amd.com}
125611308Santhony.gutierrez@amd.com
125711308Santhony.gutierrez@amd.comvoid
125811308Santhony.gutierrez@amd.comGPUCoalescer::regStats()
125911308Santhony.gutierrez@amd.com{
126011523Sdavid.guillen@arm.com    RubyPort::regStats();
126111523Sdavid.guillen@arm.com
126211308Santhony.gutierrez@amd.com    // These statistical variables are not for display.
126311308Santhony.gutierrez@amd.com    // The profiler will collate these across different
126411308Santhony.gutierrez@amd.com    // coalescers and display those collated statistics.
126511308Santhony.gutierrez@amd.com    m_outstandReqHist.init(10);
126611308Santhony.gutierrez@amd.com    m_latencyHist.init(10);
126711308Santhony.gutierrez@amd.com    m_missLatencyHist.init(10);
126811308Santhony.gutierrez@amd.com
126911308Santhony.gutierrez@amd.com    for (int i = 0; i < RubyRequestType_NUM; i++) {
127011308Santhony.gutierrez@amd.com        m_typeLatencyHist.push_back(new Stats::Histogram());
127111308Santhony.gutierrez@amd.com        m_typeLatencyHist[i]->init(10);
127211308Santhony.gutierrez@amd.com
127311308Santhony.gutierrez@amd.com        m_missTypeLatencyHist.push_back(new Stats::Histogram());
127411308Santhony.gutierrez@amd.com        m_missTypeLatencyHist[i]->init(10);
127511308Santhony.gutierrez@amd.com    }
127611308Santhony.gutierrez@amd.com
127711308Santhony.gutierrez@amd.com    for (int i = 0; i < MachineType_NUM; i++) {
127811308Santhony.gutierrez@amd.com        m_missMachLatencyHist.push_back(new Stats::Histogram());
127911308Santhony.gutierrez@amd.com        m_missMachLatencyHist[i]->init(10);
128011308Santhony.gutierrez@amd.com
128111308Santhony.gutierrez@amd.com        m_IssueToInitialDelayHist.push_back(new Stats::Histogram());
128211308Santhony.gutierrez@amd.com        m_IssueToInitialDelayHist[i]->init(10);
128311308Santhony.gutierrez@amd.com
128411308Santhony.gutierrez@amd.com        m_InitialToForwardDelayHist.push_back(new Stats::Histogram());
128511308Santhony.gutierrez@amd.com        m_InitialToForwardDelayHist[i]->init(10);
128611308Santhony.gutierrez@amd.com
128711308Santhony.gutierrez@amd.com        m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram());
128811308Santhony.gutierrez@amd.com        m_ForwardToFirstResponseDelayHist[i]->init(10);
128911308Santhony.gutierrez@amd.com
129011308Santhony.gutierrez@amd.com        m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram());
129111308Santhony.gutierrez@amd.com        m_FirstResponseToCompletionDelayHist[i]->init(10);
129211308Santhony.gutierrez@amd.com    }
129311308Santhony.gutierrez@amd.com
129411308Santhony.gutierrez@amd.com    for (int i = 0; i < RubyRequestType_NUM; i++) {
129511308Santhony.gutierrez@amd.com        m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
129611308Santhony.gutierrez@amd.com
129711308Santhony.gutierrez@amd.com        for (int j = 0; j < MachineType_NUM; j++) {
129811308Santhony.gutierrez@amd.com            m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
129911308Santhony.gutierrez@amd.com            m_missTypeMachLatencyHist[i][j]->init(10);
130011308Santhony.gutierrez@amd.com        }
130111308Santhony.gutierrez@amd.com    }
130211308Santhony.gutierrez@amd.com
130311308Santhony.gutierrez@amd.com    // GPU cache stats
130411308Santhony.gutierrez@amd.com    GPU_TCPLdHits
130511308Santhony.gutierrez@amd.com        .name(name() + ".gpu_tcp_ld_hits")
130611308Santhony.gutierrez@amd.com        .desc("loads that hit in the TCP")
130711308Santhony.gutierrez@amd.com        ;
130811308Santhony.gutierrez@amd.com    GPU_TCPLdTransfers
130911308Santhony.gutierrez@amd.com        .name(name() + ".gpu_tcp_ld_transfers")
131011308Santhony.gutierrez@amd.com        .desc("TCP to TCP load transfers")
131111308Santhony.gutierrez@amd.com        ;
131211308Santhony.gutierrez@amd.com    GPU_TCCLdHits
131311308Santhony.gutierrez@amd.com        .name(name() + ".gpu_tcc_ld_hits")
131411308Santhony.gutierrez@amd.com        .desc("loads that hit in the TCC")
131511308Santhony.gutierrez@amd.com        ;
131611308Santhony.gutierrez@amd.com    GPU_LdMiss
131711308Santhony.gutierrez@amd.com        .name(name() + ".gpu_ld_misses")
131811308Santhony.gutierrez@amd.com        .desc("loads that miss in the GPU")
131911308Santhony.gutierrez@amd.com        ;
132011308Santhony.gutierrez@amd.com
132111308Santhony.gutierrez@amd.com    GPU_TCPStHits
132211308Santhony.gutierrez@amd.com        .name(name() + ".gpu_tcp_st_hits")
132311308Santhony.gutierrez@amd.com        .desc("stores that hit in the TCP")
132411308Santhony.gutierrez@amd.com        ;
132511308Santhony.gutierrez@amd.com    GPU_TCPStTransfers
132611308Santhony.gutierrez@amd.com        .name(name() + ".gpu_tcp_st_transfers")
132711308Santhony.gutierrez@amd.com        .desc("TCP to TCP store transfers")
132811308Santhony.gutierrez@amd.com        ;
132911308Santhony.gutierrez@amd.com    GPU_TCCStHits
133011308Santhony.gutierrez@amd.com        .name(name() + ".gpu_tcc_st_hits")
133111308Santhony.gutierrez@amd.com        .desc("stores that hit in the TCC")
133211308Santhony.gutierrez@amd.com        ;
133311308Santhony.gutierrez@amd.com    GPU_StMiss
133411308Santhony.gutierrez@amd.com        .name(name() + ".gpu_st_misses")
133511308Santhony.gutierrez@amd.com        .desc("stores that miss in the GPU")
133611308Santhony.gutierrez@amd.com        ;
133711308Santhony.gutierrez@amd.com
133811308Santhony.gutierrez@amd.com    // CP cache stats
133911308Santhony.gutierrez@amd.com    CP_TCPLdHits
134011308Santhony.gutierrez@amd.com        .name(name() + ".cp_tcp_ld_hits")
134111308Santhony.gutierrez@amd.com        .desc("loads that hit in the TCP")
134211308Santhony.gutierrez@amd.com        ;
134311308Santhony.gutierrez@amd.com    CP_TCPLdTransfers
134411308Santhony.gutierrez@amd.com        .name(name() + ".cp_tcp_ld_transfers")
134511308Santhony.gutierrez@amd.com        .desc("TCP to TCP load transfers")
134611308Santhony.gutierrez@amd.com        ;
134711308Santhony.gutierrez@amd.com    CP_TCCLdHits
134811308Santhony.gutierrez@amd.com        .name(name() + ".cp_tcc_ld_hits")
134911308Santhony.gutierrez@amd.com        .desc("loads that hit in the TCC")
135011308Santhony.gutierrez@amd.com        ;
135111308Santhony.gutierrez@amd.com    CP_LdMiss
135211308Santhony.gutierrez@amd.com        .name(name() + ".cp_ld_misses")
135311308Santhony.gutierrez@amd.com        .desc("loads that miss in the GPU")
135411308Santhony.gutierrez@amd.com        ;
135511308Santhony.gutierrez@amd.com
135611308Santhony.gutierrez@amd.com    CP_TCPStHits
135711308Santhony.gutierrez@amd.com        .name(name() + ".cp_tcp_st_hits")
135811308Santhony.gutierrez@amd.com        .desc("stores that hit in the TCP")
135911308Santhony.gutierrez@amd.com        ;
136011308Santhony.gutierrez@amd.com    CP_TCPStTransfers
136111308Santhony.gutierrez@amd.com        .name(name() + ".cp_tcp_st_transfers")
136211308Santhony.gutierrez@amd.com        .desc("TCP to TCP store transfers")
136311308Santhony.gutierrez@amd.com        ;
136411308Santhony.gutierrez@amd.com    CP_TCCStHits
136511308Santhony.gutierrez@amd.com        .name(name() + ".cp_tcc_st_hits")
136611308Santhony.gutierrez@amd.com        .desc("stores that hit in the TCC")
136711308Santhony.gutierrez@amd.com        ;
136811308Santhony.gutierrez@amd.com    CP_StMiss
136911308Santhony.gutierrez@amd.com        .name(name() + ".cp_st_misses")
137011308Santhony.gutierrez@amd.com        .desc("stores that miss in the GPU")
137111308Santhony.gutierrez@amd.com        ;
137211308Santhony.gutierrez@amd.com}
1373