Throttle.cc revision 7054
15081Sgblack@eecs.umich.edu/* 25081Sgblack@eecs.umich.edu * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 35081Sgblack@eecs.umich.edu * All rights reserved. 45081Sgblack@eecs.umich.edu * 55081Sgblack@eecs.umich.edu * Redistribution and use in source and binary forms, with or without 65081Sgblack@eecs.umich.edu * modification, are permitted provided that the following conditions are 75081Sgblack@eecs.umich.edu * met: redistributions of source code must retain the above copyright 85081Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer; 95081Sgblack@eecs.umich.edu * redistributions in binary form must reproduce the above copyright 105081Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the 115081Sgblack@eecs.umich.edu * documentation and/or other materials provided with the distribution; 125081Sgblack@eecs.umich.edu * neither the name of the copyright holders nor the names of its 135081Sgblack@eecs.umich.edu * contributors may be used to endorse or promote products derived from 145081Sgblack@eecs.umich.edu * this software without specific prior written permission. 155081Sgblack@eecs.umich.edu * 165081Sgblack@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 175081Sgblack@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 185081Sgblack@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 195081Sgblack@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 205081Sgblack@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 215081Sgblack@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 225081Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 235081Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 245081Sgblack@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 255081Sgblack@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 265081Sgblack@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 275081Sgblack@eecs.umich.edu */ 285081Sgblack@eecs.umich.edu 295081Sgblack@eecs.umich.edu#include "base/cprintf.hh" 305081Sgblack@eecs.umich.edu#include "mem/protocol/Protocol.hh" 315081Sgblack@eecs.umich.edu#include "mem/ruby/buffers/MessageBuffer.hh" 325081Sgblack@eecs.umich.edu#include "mem/ruby/network/Network.hh" 335081Sgblack@eecs.umich.edu#include "mem/ruby/network/simple/Throttle.hh" 345081Sgblack@eecs.umich.edu#include "mem/ruby/slicc_interface/NetworkMessage.hh" 355081Sgblack@eecs.umich.edu#include "mem/ruby/system/System.hh" 365081Sgblack@eecs.umich.edu 375081Sgblack@eecs.umich.educonst int HIGH_RANGE = 256; 385081Sgblack@eecs.umich.educonst int ADJUST_INTERVAL = 50000; 395081Sgblack@eecs.umich.educonst int MESSAGE_SIZE_MULTIPLIER = 1000; 405081Sgblack@eecs.umich.edu//const int BROADCAST_SCALING = 4; // Have a 16p system act like a 64p systems 415081Sgblack@eecs.umich.educonst int BROADCAST_SCALING = 1; 425081Sgblack@eecs.umich.educonst int PRIORITY_SWITCH_LIMIT = 128; 435081Sgblack@eecs.umich.edu 445081Sgblack@eecs.umich.edustatic int network_message_to_size(NetworkMessage* net_msg_ptr); 455081Sgblack@eecs.umich.edu 465081Sgblack@eecs.umich.eduextern std::ostream *debug_cout_ptr; 475081Sgblack@eecs.umich.edu 485081Sgblack@eecs.umich.eduThrottle::Throttle(int sID, NodeID node, int link_latency, 495081Sgblack@eecs.umich.edu int link_bandwidth_multiplier) 505081Sgblack@eecs.umich.edu{ 515081Sgblack@eecs.umich.edu init(node, link_latency, link_bandwidth_multiplier); 525081Sgblack@eecs.umich.edu m_sID = sID; 535081Sgblack@eecs.umich.edu} 545081Sgblack@eecs.umich.edu 555081Sgblack@eecs.umich.eduThrottle::Throttle(NodeID node, int link_latency, 565081Sgblack@eecs.umich.edu int link_bandwidth_multiplier) 575081Sgblack@eecs.umich.edu{ 585081Sgblack@eecs.umich.edu init(node, link_latency, link_bandwidth_multiplier); 595081Sgblack@eecs.umich.edu m_sID = 0; 605081Sgblack@eecs.umich.edu} 615081Sgblack@eecs.umich.edu 625081Sgblack@eecs.umich.eduvoid 635119Sgblack@eecs.umich.eduThrottle::init(NodeID node, int link_latency, int link_bandwidth_multiplier) 645081Sgblack@eecs.umich.edu{ 655081Sgblack@eecs.umich.edu m_node = node; 665081Sgblack@eecs.umich.edu m_vnets = 0; 675081Sgblack@eecs.umich.edu 685081Sgblack@eecs.umich.edu ASSERT(link_bandwidth_multiplier > 0); 695081Sgblack@eecs.umich.edu m_link_bandwidth_multiplier = link_bandwidth_multiplier; 705081Sgblack@eecs.umich.edu m_link_latency = link_latency; 715081Sgblack@eecs.umich.edu 725081Sgblack@eecs.umich.edu m_wakeups_wo_switch = 0; 735081Sgblack@eecs.umich.edu clearStats(); 745081Sgblack@eecs.umich.edu} 755119Sgblack@eecs.umich.edu 765081Sgblack@eecs.umich.eduvoid 775081Sgblack@eecs.umich.eduThrottle::clear() 785081Sgblack@eecs.umich.edu{ 795081Sgblack@eecs.umich.edu for (int counter = 0; counter < m_vnets; counter++) { 805295Sgblack@eecs.umich.edu m_in[counter]->clear(); 815295Sgblack@eecs.umich.edu m_out[counter]->clear(); 825295Sgblack@eecs.umich.edu } 835295Sgblack@eecs.umich.edu} 845295Sgblack@eecs.umich.edu 855295Sgblack@eecs.umich.eduvoid 865295Sgblack@eecs.umich.eduThrottle::addLinks(const Vector<MessageBuffer*>& in_vec, 875295Sgblack@eecs.umich.edu const Vector<MessageBuffer*>& out_vec) 885295Sgblack@eecs.umich.edu{ 895295Sgblack@eecs.umich.edu assert(in_vec.size() == out_vec.size()); 905295Sgblack@eecs.umich.edu for (int i=0; i<in_vec.size(); i++) { 915295Sgblack@eecs.umich.edu addVirtualNetwork(in_vec[i], out_vec[i]); 925295Sgblack@eecs.umich.edu } 935295Sgblack@eecs.umich.edu 945295Sgblack@eecs.umich.edu m_message_counters.setSize(MessageSizeType_NUM); 955295Sgblack@eecs.umich.edu for (int i = 0; i < MessageSizeType_NUM; i++) { 965295Sgblack@eecs.umich.edu m_message_counters[i].setSize(in_vec.size()); 975295Sgblack@eecs.umich.edu for (int j = 0; j<m_message_counters[i].size(); j++) { 985295Sgblack@eecs.umich.edu m_message_counters[i][j] = 0; 995295Sgblack@eecs.umich.edu } 1005295Sgblack@eecs.umich.edu } 1015295Sgblack@eecs.umich.edu} 1025295Sgblack@eecs.umich.edu 1035295Sgblack@eecs.umich.eduvoid 1045295Sgblack@eecs.umich.eduThrottle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr) 1055295Sgblack@eecs.umich.edu{ 1065295Sgblack@eecs.umich.edu m_units_remaining.insertAtBottom(0); 1075295Sgblack@eecs.umich.edu m_in.insertAtBottom(in_ptr); 1085295Sgblack@eecs.umich.edu m_out.insertAtBottom(out_ptr); 1095295Sgblack@eecs.umich.edu 1105295Sgblack@eecs.umich.edu // Set consumer and description 1115295Sgblack@eecs.umich.edu m_in[m_vnets]->setConsumer(this); 1125081Sgblack@eecs.umich.edu string desc = "[Queue to Throttle " + NodeIDToString(m_sID) + " " + 113 NodeIDToString(m_node) + "]"; 114 m_in[m_vnets]->setDescription(desc); 115 m_vnets++; 116} 117 118void 119Throttle::wakeup() 120{ 121 // Limits the number of message sent to a limited number of bytes/cycle. 122 assert(getLinkBandwidth() > 0); 123 int bw_remaining = getLinkBandwidth(); 124 125 // Give the highest numbered link priority most of the time 126 m_wakeups_wo_switch++; 127 int highest_prio_vnet = m_vnets-1; 128 int lowest_prio_vnet = 0; 129 int counter = 1; 130 bool schedule_wakeup = false; 131 132 // invert priorities to avoid starvation seen in the component network 133 if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) { 134 m_wakeups_wo_switch = 0; 135 highest_prio_vnet = 0; 136 lowest_prio_vnet = m_vnets-1; 137 counter = -1; 138 } 139 140 for (int vnet = highest_prio_vnet; 141 (vnet * counter) >= (counter * lowest_prio_vnet); 142 vnet -= counter) { 143 144 assert(m_out[vnet] != NULL); 145 assert(m_in[vnet] != NULL); 146 assert(m_units_remaining[vnet] >= 0); 147 148 while (bw_remaining > 0 && 149 (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) && 150 m_out[vnet]->areNSlotsAvailable(1)) { 151 152 // See if we are done transferring the previous message on 153 // this virtual network 154 if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) { 155 // Find the size of the message we are moving 156 MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr(); 157 NetworkMessage* net_msg_ptr = 158 safe_cast<NetworkMessage*>(msg_ptr.ref()); 159 m_units_remaining[vnet] += 160 network_message_to_size(net_msg_ptr); 161 162 DEBUG_NEWLINE(NETWORK_COMP,HighPrio); 163 DEBUG_MSG(NETWORK_COMP, HighPrio, 164 csprintf("throttle: %d my bw %d bw spent enqueueing " 165 "net msg %d time: %d.", 166 m_node, getLinkBandwidth(), m_units_remaining[vnet], 167 g_eventQueue_ptr->getTime())); 168 169 // Move the message 170 m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency); 171 m_in[vnet]->pop(); 172 173 // Count the message 174 m_message_counters[net_msg_ptr->getMessageSize()][vnet]++; 175 176 DEBUG_MSG(NETWORK_COMP,LowPrio,*m_out[vnet]); 177 DEBUG_NEWLINE(NETWORK_COMP,HighPrio); 178 } 179 180 // Calculate the amount of bandwidth we spent on this message 181 int diff = m_units_remaining[vnet] - bw_remaining; 182 m_units_remaining[vnet] = max(0, diff); 183 bw_remaining = max(0, -diff); 184 } 185 186 if (bw_remaining > 0 && 187 (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) && 188 !m_out[vnet]->areNSlotsAvailable(1)) { 189 DEBUG_MSG(NETWORK_COMP,LowPrio,vnet); 190 // schedule me to wakeup again because I'm waiting for my 191 // output queue to become available 192 schedule_wakeup = true; 193 } 194 } 195 196 // We should only wake up when we use the bandwidth 197 // This is only mostly true 198 // assert(bw_remaining != getLinkBandwidth()); 199 200 // Record that we used some or all of the link bandwidth this cycle 201 double ratio = 1.0 - (double(bw_remaining) / double(getLinkBandwidth())); 202 203 // If ratio = 0, we used no bandwidth, if ratio = 1, we used all 204 linkUtilized(ratio); 205 206 if (bw_remaining > 0 && !schedule_wakeup) { 207 // We have extra bandwidth and our output buffer was 208 // available, so we must not have anything else to do until 209 // another message arrives. 210 DEBUG_MSG(NETWORK_COMP, LowPrio, *this); 211 DEBUG_MSG(NETWORK_COMP, LowPrio, "not scheduled again"); 212 } else { 213 DEBUG_MSG(NETWORK_COMP, LowPrio, *this); 214 DEBUG_MSG(NETWORK_COMP, LowPrio, "scheduled again"); 215 216 // We are out of bandwidth for this cycle, so wakeup next 217 // cycle and continue 218 g_eventQueue_ptr->scheduleEvent(this, 1); 219 } 220} 221 222void 223Throttle::printStats(ostream& out) const 224{ 225 out << "utilized_percent: " << getUtilization() << endl; 226} 227 228void 229Throttle::clearStats() 230{ 231 m_ruby_start = g_eventQueue_ptr->getTime(); 232 m_links_utilized = 0.0; 233 234 for (int i = 0; i < m_message_counters.size(); i++) { 235 for (int j = 0; j < m_message_counters[i].size(); j++) { 236 m_message_counters[i][j] = 0; 237 } 238 } 239} 240 241void 242Throttle::printConfig(ostream& out) const 243{ 244} 245 246double 247Throttle::getUtilization() const 248{ 249 return 100.0 * double(m_links_utilized) / 250 double(g_eventQueue_ptr->getTime()-m_ruby_start); 251} 252 253void 254Throttle::print(ostream& out) const 255{ 256 out << "[Throttle: " << m_sID << " " << m_node 257 << " bw: " << getLinkBandwidth() << "]"; 258} 259 260int 261network_message_to_size(NetworkMessage* net_msg_ptr) 262{ 263 assert(net_msg_ptr != NULL); 264 265 int size = RubySystem::getNetwork()-> 266 MessageSizeType_to_int(net_msg_ptr->getMessageSize()); 267 size *= MESSAGE_SIZE_MULTIPLIER; 268 269 // Artificially increase the size of broadcast messages 270 if (BROADCAST_SCALING > 1 && net_msg_ptr->getDestination().isBroadcast()) 271 size *= BROADCAST_SCALING; 272 273 return size; 274} 275