Throttle.cc revision 10226
16145Snate@binkert.org/* 26145Snate@binkert.org * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 36145Snate@binkert.org * All rights reserved. 46145Snate@binkert.org * 56145Snate@binkert.org * Redistribution and use in source and binary forms, with or without 66145Snate@binkert.org * modification, are permitted provided that the following conditions are 76145Snate@binkert.org * met: redistributions of source code must retain the above copyright 86145Snate@binkert.org * notice, this list of conditions and the following disclaimer; 96145Snate@binkert.org * redistributions in binary form must reproduce the above copyright 106145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 116145Snate@binkert.org * documentation and/or other materials provided with the distribution; 126145Snate@binkert.org * neither the name of the copyright holders nor the names of its 136145Snate@binkert.org * contributors may be used to endorse or promote products derived from 146145Snate@binkert.org * this software without specific prior written permission. 156145Snate@binkert.org * 166145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 176145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 186145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 196145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 206145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 216145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 226145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 266145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276145Snate@binkert.org */ 286145Snate@binkert.org 297832Snate@binkert.org#include <cassert> 307832Snate@binkert.org 318645Snilay@cs.wisc.edu#include "base/cast.hh" 327054Snate@binkert.org#include "base/cprintf.hh" 338232Snate@binkert.org#include "debug/RubyNetwork.hh" 346154Snate@binkert.org#include "mem/ruby/buffers/MessageBuffer.hh" 358229Snate@binkert.org#include "mem/ruby/network/simple/Throttle.hh" 366154Snate@binkert.org#include "mem/ruby/network/Network.hh" 377054Snate@binkert.org#include "mem/ruby/slicc_interface/NetworkMessage.hh" 386154Snate@binkert.org#include "mem/ruby/system/System.hh" 396145Snate@binkert.org 407055Snate@binkert.orgusing namespace std; 417055Snate@binkert.org 426145Snate@binkert.orgconst int MESSAGE_SIZE_MULTIPLIER = 1000; 436145Snate@binkert.org//const int BROADCAST_SCALING = 4; // Have a 16p system act like a 64p systems 446145Snate@binkert.orgconst int BROADCAST_SCALING = 1; 456145Snate@binkert.orgconst int PRIORITY_SWITCH_LIMIT = 128; 466145Snate@binkert.org 476145Snate@binkert.orgstatic int network_message_to_size(NetworkMessage* net_msg_ptr); 486145Snate@binkert.org 499499Snilay@cs.wisc.eduThrottle::Throttle(int sID, NodeID node, Cycles link_latency, 509230Snilay@cs.wisc.edu int link_bandwidth_multiplier, int endpoint_bandwidth, 519465Snilay@cs.wisc.edu ClockedObject *em) 529230Snilay@cs.wisc.edu : Consumer(em) 536145Snate@binkert.org{ 548259SBrad.Beckmann@amd.com init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth); 557054Snate@binkert.org m_sID = sID; 566145Snate@binkert.org} 576145Snate@binkert.org 589499Snilay@cs.wisc.eduThrottle::Throttle(NodeID node, Cycles link_latency, 599230Snilay@cs.wisc.edu int link_bandwidth_multiplier, int endpoint_bandwidth, 609465Snilay@cs.wisc.edu ClockedObject *em) 619230Snilay@cs.wisc.edu : Consumer(em) 626145Snate@binkert.org{ 638259SBrad.Beckmann@amd.com init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth); 647054Snate@binkert.org m_sID = 0; 656145Snate@binkert.org} 666145Snate@binkert.org 677054Snate@binkert.orgvoid 689499Snilay@cs.wisc.eduThrottle::init(NodeID node, Cycles link_latency, 699499Snilay@cs.wisc.edu int link_bandwidth_multiplier, int endpoint_bandwidth) 706145Snate@binkert.org{ 717054Snate@binkert.org m_node = node; 727054Snate@binkert.org m_vnets = 0; 736145Snate@binkert.org 747832Snate@binkert.org assert(link_bandwidth_multiplier > 0); 757054Snate@binkert.org m_link_bandwidth_multiplier = link_bandwidth_multiplier; 767054Snate@binkert.org m_link_latency = link_latency; 778259SBrad.Beckmann@amd.com m_endpoint_bandwidth = endpoint_bandwidth; 786145Snate@binkert.org 797054Snate@binkert.org m_wakeups_wo_switch = 0; 806145Snate@binkert.org 819863Snilay@cs.wisc.edu m_link_utilization_proxy = 0; 826145Snate@binkert.org} 836145Snate@binkert.org 847054Snate@binkert.orgvoid 857454Snate@binkert.orgThrottle::addLinks(const std::vector<MessageBuffer*>& in_vec, 869508Snilay@cs.wisc.edu const std::vector<MessageBuffer*>& out_vec) 876145Snate@binkert.org{ 887054Snate@binkert.org assert(in_vec.size() == out_vec.size()); 897054Snate@binkert.org for (int i=0; i<in_vec.size(); i++) { 909508Snilay@cs.wisc.edu addVirtualNetwork(in_vec[i], out_vec[i]); 917054Snate@binkert.org } 926145Snate@binkert.org} 936145Snate@binkert.org 947054Snate@binkert.orgvoid 959508Snilay@cs.wisc.eduThrottle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr) 966145Snate@binkert.org{ 977454Snate@binkert.org m_units_remaining.push_back(0); 987454Snate@binkert.org m_in.push_back(in_ptr); 997454Snate@binkert.org m_out.push_back(out_ptr); 1006145Snate@binkert.org 1017054Snate@binkert.org // Set consumer and description 1027054Snate@binkert.org m_in[m_vnets]->setConsumer(this); 1039465Snilay@cs.wisc.edu 1048608Snilay@cs.wisc.edu string desc = "[Queue to Throttle " + to_string(m_sID) + " " + 1058608Snilay@cs.wisc.edu to_string(m_node) + "]"; 1067054Snate@binkert.org m_in[m_vnets]->setDescription(desc); 1077054Snate@binkert.org m_vnets++; 1086145Snate@binkert.org} 1096145Snate@binkert.org 1107054Snate@binkert.orgvoid 1117054Snate@binkert.orgThrottle::wakeup() 1126145Snate@binkert.org{ 1137054Snate@binkert.org // Limits the number of message sent to a limited number of bytes/cycle. 1147054Snate@binkert.org assert(getLinkBandwidth() > 0); 1157054Snate@binkert.org int bw_remaining = getLinkBandwidth(); 1166145Snate@binkert.org 1177054Snate@binkert.org // Give the highest numbered link priority most of the time 1187054Snate@binkert.org m_wakeups_wo_switch++; 1197054Snate@binkert.org int highest_prio_vnet = m_vnets-1; 1207054Snate@binkert.org int lowest_prio_vnet = 0; 1217054Snate@binkert.org int counter = 1; 1227054Snate@binkert.org bool schedule_wakeup = false; 1236145Snate@binkert.org 1247054Snate@binkert.org // invert priorities to avoid starvation seen in the component network 1257054Snate@binkert.org if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) { 1267054Snate@binkert.org m_wakeups_wo_switch = 0; 1277054Snate@binkert.org highest_prio_vnet = 0; 1287054Snate@binkert.org lowest_prio_vnet = m_vnets-1; 1297054Snate@binkert.org counter = -1; 1306145Snate@binkert.org } 1316145Snate@binkert.org 1327054Snate@binkert.org for (int vnet = highest_prio_vnet; 1337054Snate@binkert.org (vnet * counter) >= (counter * lowest_prio_vnet); 1347054Snate@binkert.org vnet -= counter) { 1357054Snate@binkert.org 1367054Snate@binkert.org assert(m_out[vnet] != NULL); 1377054Snate@binkert.org assert(m_in[vnet] != NULL); 1387054Snate@binkert.org assert(m_units_remaining[vnet] >= 0); 1397054Snate@binkert.org 1407054Snate@binkert.org while (bw_remaining > 0 && 1417054Snate@binkert.org (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) && 1427054Snate@binkert.org m_out[vnet]->areNSlotsAvailable(1)) { 1437054Snate@binkert.org 1447054Snate@binkert.org // See if we are done transferring the previous message on 1457054Snate@binkert.org // this virtual network 1467054Snate@binkert.org if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) { 1477054Snate@binkert.org // Find the size of the message we are moving 1487054Snate@binkert.org MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr(); 1497054Snate@binkert.org NetworkMessage* net_msg_ptr = 1507453Snate@binkert.org safe_cast<NetworkMessage*>(msg_ptr.get()); 1517054Snate@binkert.org m_units_remaining[vnet] += 1527054Snate@binkert.org network_message_to_size(net_msg_ptr); 1537054Snate@binkert.org 1547780Snilay@cs.wisc.edu DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent " 1557780Snilay@cs.wisc.edu "enqueueing net msg %d time: %lld.\n", 1567054Snate@binkert.org m_node, getLinkBandwidth(), m_units_remaining[vnet], 1579508Snilay@cs.wisc.edu g_system_ptr->curCycle()); 1587054Snate@binkert.org 1597054Snate@binkert.org // Move the message 16010074Snilay@cs.wisc.edu m_in[vnet]->dequeue(); 16110226Snilay@cs.wisc.edu m_out[vnet]->enqueue(msg_ptr, m_link_latency); 1627054Snate@binkert.org 1637054Snate@binkert.org // Count the message 1649863Snilay@cs.wisc.edu m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++; 1657054Snate@binkert.org 1667780Snilay@cs.wisc.edu DPRINTF(RubyNetwork, "%s\n", *m_out[vnet]); 1677054Snate@binkert.org } 1687054Snate@binkert.org 1697054Snate@binkert.org // Calculate the amount of bandwidth we spent on this message 1707054Snate@binkert.org int diff = m_units_remaining[vnet] - bw_remaining; 1717054Snate@binkert.org m_units_remaining[vnet] = max(0, diff); 1727054Snate@binkert.org bw_remaining = max(0, -diff); 1737054Snate@binkert.org } 1747054Snate@binkert.org 1757054Snate@binkert.org if (bw_remaining > 0 && 1767054Snate@binkert.org (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) && 1777054Snate@binkert.org !m_out[vnet]->areNSlotsAvailable(1)) { 1787780Snilay@cs.wisc.edu DPRINTF(RubyNetwork, "vnet: %d", vnet); 1797054Snate@binkert.org // schedule me to wakeup again because I'm waiting for my 1807054Snate@binkert.org // output queue to become available 1817054Snate@binkert.org schedule_wakeup = true; 1827054Snate@binkert.org } 1836145Snate@binkert.org } 1846145Snate@binkert.org 1857054Snate@binkert.org // We should only wake up when we use the bandwidth 1867054Snate@binkert.org // This is only mostly true 1877054Snate@binkert.org // assert(bw_remaining != getLinkBandwidth()); 1886145Snate@binkert.org 1897054Snate@binkert.org // Record that we used some or all of the link bandwidth this cycle 1907054Snate@binkert.org double ratio = 1.0 - (double(bw_remaining) / double(getLinkBandwidth())); 1916145Snate@binkert.org 1927054Snate@binkert.org // If ratio = 0, we used no bandwidth, if ratio = 1, we used all 1939863Snilay@cs.wisc.edu m_link_utilization_proxy += ratio; 1947054Snate@binkert.org 1957054Snate@binkert.org if (bw_remaining > 0 && !schedule_wakeup) { 1967054Snate@binkert.org // We have extra bandwidth and our output buffer was 1977054Snate@binkert.org // available, so we must not have anything else to do until 1987054Snate@binkert.org // another message arrives. 1997780Snilay@cs.wisc.edu DPRINTF(RubyNetwork, "%s not scheduled again\n", *this); 2007054Snate@binkert.org } else { 2017780Snilay@cs.wisc.edu DPRINTF(RubyNetwork, "%s scheduled again\n", *this); 2027054Snate@binkert.org 2037054Snate@binkert.org // We are out of bandwidth for this cycle, so wakeup next 2047054Snate@binkert.org // cycle and continue 2059499Snilay@cs.wisc.edu scheduleEvent(Cycles(1)); 2067054Snate@binkert.org } 2076145Snate@binkert.org} 2086145Snate@binkert.org 2097054Snate@binkert.orgvoid 2109863Snilay@cs.wisc.eduThrottle::regStats(string parent) 2116145Snate@binkert.org{ 2129863Snilay@cs.wisc.edu m_link_utilization 2139863Snilay@cs.wisc.edu .name(parent + csprintf(".throttle%i", m_node) + ".link_utilization"); 2149863Snilay@cs.wisc.edu 2159863Snilay@cs.wisc.edu for (MessageSizeType type = MessageSizeType_FIRST; 2169863Snilay@cs.wisc.edu type < MessageSizeType_NUM; ++type) { 2179863Snilay@cs.wisc.edu m_msg_counts[(unsigned int)type] 2189863Snilay@cs.wisc.edu .init(m_vnets) 2199863Snilay@cs.wisc.edu .name(parent + csprintf(".throttle%i", m_node) + ".msg_count." + 2209863Snilay@cs.wisc.edu MessageSizeType_to_string(type)) 2219863Snilay@cs.wisc.edu .flags(Stats::nozero) 2229863Snilay@cs.wisc.edu ; 2239863Snilay@cs.wisc.edu m_msg_bytes[(unsigned int) type] 2249863Snilay@cs.wisc.edu .name(parent + csprintf(".throttle%i", m_node) + ".msg_bytes." + 2259863Snilay@cs.wisc.edu MessageSizeType_to_string(type)) 2269863Snilay@cs.wisc.edu .flags(Stats::nozero) 2279863Snilay@cs.wisc.edu ; 2289863Snilay@cs.wisc.edu 2299863Snilay@cs.wisc.edu m_msg_bytes[(unsigned int) type] = m_msg_counts[type] * Stats::constant( 2309863Snilay@cs.wisc.edu Network::MessageSizeType_to_int(type)); 2319863Snilay@cs.wisc.edu } 2326145Snate@binkert.org} 2336145Snate@binkert.org 2347054Snate@binkert.orgvoid 2357054Snate@binkert.orgThrottle::clearStats() 2366145Snate@binkert.org{ 2379863Snilay@cs.wisc.edu m_link_utilization_proxy = 0; 2386145Snate@binkert.org} 2396145Snate@binkert.org 2409863Snilay@cs.wisc.eduvoid 2419863Snilay@cs.wisc.eduThrottle::collateStats() 2426145Snate@binkert.org{ 2439863Snilay@cs.wisc.edu m_link_utilization = 100.0 * m_link_utilization_proxy 2449863Snilay@cs.wisc.edu / (double(g_system_ptr->curCycle() - g_ruby_start)); 2456145Snate@binkert.org} 2466145Snate@binkert.org 2477054Snate@binkert.orgvoid 2487054Snate@binkert.orgThrottle::print(ostream& out) const 2496145Snate@binkert.org{ 2508054Sksewell@umich.edu ccprintf(out, "[%i bw: %i]", m_node, getLinkBandwidth()); 2516145Snate@binkert.org} 2526145Snate@binkert.org 2537054Snate@binkert.orgint 2547054Snate@binkert.orgnetwork_message_to_size(NetworkMessage* net_msg_ptr) 2557054Snate@binkert.org{ 2567054Snate@binkert.org assert(net_msg_ptr != NULL); 2576145Snate@binkert.org 2589275Snilay@cs.wisc.edu int size = Network::MessageSizeType_to_int(net_msg_ptr->getMessageSize()); 2597054Snate@binkert.org size *= MESSAGE_SIZE_MULTIPLIER; 2606145Snate@binkert.org 2617054Snate@binkert.org // Artificially increase the size of broadcast messages 2627054Snate@binkert.org if (BROADCAST_SCALING > 1 && net_msg_ptr->getDestination().isBroadcast()) 2637054Snate@binkert.org size *= BROADCAST_SCALING; 2647054Snate@binkert.org 2657054Snate@binkert.org return size; 2666145Snate@binkert.org} 267