Deleted Added
sdiff udiff text old ( 7024:30883414ad10 ) new ( 7054:7d6862b80049 )
full compact
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;

--- 12 unchanged lines hidden (view full) ---

22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * $Id$
32 *
33 * Description: see Throttle.hh
34 *
35 */
36
37#include "mem/ruby/network/simple/Throttle.hh"
38#include "mem/ruby/buffers/MessageBuffer.hh"
39#include "mem/ruby/network/Network.hh"
40#include "mem/ruby/system/System.hh"
41#include "mem/ruby/slicc_interface/NetworkMessage.hh"
42#include "mem/protocol/Protocol.hh"
43
44const int HIGH_RANGE = 256;
45const int ADJUST_INTERVAL = 50000;
46const int MESSAGE_SIZE_MULTIPLIER = 1000;
47//const int BROADCAST_SCALING = 4; // Have a 16p system act like a 64p systems
48const int BROADCAST_SCALING = 1;
49const int PRIORITY_SWITCH_LIMIT = 128;
50
51static int network_message_to_size(NetworkMessage* net_msg_ptr);
52
53extern std::ostream * debug_cout_ptr;
54
55Throttle::Throttle(int sID, NodeID node, int link_latency, int link_bandwidth_multiplier)
56{
57 init(node, link_latency, link_bandwidth_multiplier);
58 m_sID = sID;
59}
60
61Throttle::Throttle(NodeID node, int link_latency, int link_bandwidth_multiplier)
62{
63 init(node, link_latency, link_bandwidth_multiplier);
64 m_sID = 0;
65}
66
67void Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier)
68{
69 m_node = node;
70 m_vnets = 0;
71
72 ASSERT(link_bandwidth_multiplier > 0);
73 m_link_bandwidth_multiplier = link_bandwidth_multiplier;
74 m_link_latency = link_latency;
75
76 m_wakeups_wo_switch = 0;
77 clearStats();
78}
79
80void Throttle::clear()
81{
82 for (int counter = 0; counter < m_vnets; counter++) {
83 m_in[counter]->clear();
84 m_out[counter]->clear();
85 }
86}
87
88void Throttle::addLinks(const Vector<MessageBuffer*>& in_vec, const Vector<MessageBuffer*>& out_vec)
89{
90 assert(in_vec.size() == out_vec.size());
91 for (int i=0; i92 addVirtualNetwork(in_vec[i], out_vec[i]);
93 }
94
95 m_message_counters.setSize(MessageSizeType_NUM);
96 for (int i=0; i<MessageSizeType_NUM; i++) {
97 m_message_counters[i].setSize(in_vec.size());
98 for (int j=0; j<m_message_counters[i].size(); j++) {
99 m_message_counters[i][j] = 0;
100 }
101 }
102}
103
104void Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
105{
106 m_units_remaining.insertAtBottom(0);
107 m_in.insertAtBottom(in_ptr);
108 m_out.insertAtBottom(out_ptr);
109
110 // Set consumer and description
111 m_in[m_vnets]->setConsumer(this);
112 string desc = "[Queue to Throttle " + NodeIDToString(m_sID) + " " + NodeIDToString(m_node) + "]";
113 m_in[m_vnets]->setDescription(desc);
114 m_vnets++;
115}
116
117void Throttle::wakeup()
118{
119 // Limits the number of message sent to a limited number of bytes/cycle.
120 assert(getLinkBandwidth() > 0);
121 int bw_remaining = getLinkBandwidth();
122
123 // Give the highest numbered link priority most of the time
124 m_wakeups_wo_switch++;
125 int highest_prio_vnet = m_vnets-1;
126 int lowest_prio_vnet = 0;
127 int counter = 1;
128 bool schedule_wakeup = false;
129
130 // invert priorities to avoid starvation seen in the component network
131 if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
132 m_wakeups_wo_switch = 0;
133 highest_prio_vnet = 0;
134 lowest_prio_vnet = m_vnets-1;
135 counter = -1;
136 }
137
138 for (int vnet = highest_prio_vnet; (vnet*counter) >= (counter*lowest_prio_vnet); vnet -= counter) {
139
140 assert(m_out[vnet] != NULL);
141 assert(m_in[vnet] != NULL);
142 assert(m_units_remaining[vnet] >= 0);
143
144 while ((bw_remaining > 0) && ((m_in[vnet]->isReady()) || (m_units_remaining[vnet] > 0)) && m_out[vnet]->areNSlotsAvailable(1)) {
145
146 // See if we are done transferring the previous message on this virtual network
147 if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
148
149 // Find the size of the message we are moving
150 MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
151 NetworkMessage* net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
152 m_units_remaining[vnet] += network_message_to_size(net_msg_ptr);
153
154 DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
155 DEBUG_MSG(NETWORK_COMP,HighPrio,"throttle: " + int_to_string(m_node)
156 + " my bw " + int_to_string(getLinkBandwidth())
157 + " bw spent enqueueing net msg " + int_to_string(m_units_remaining[vnet])
158 + " time: " + int_to_string(g_eventQueue_ptr->getTime()) + ".");
159
160 // Move the message
161 m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
162 m_in[vnet]->pop();
163
164 // Count the message
165 m_message_counters[net_msg_ptr->getMessageSize()][vnet]++;
166
167 DEBUG_MSG(NETWORK_COMP,LowPrio,*m_out[vnet]);
168 DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
169 }
170
171 // Calculate the amount of bandwidth we spent on this message
172 int diff = m_units_remaining[vnet] - bw_remaining;
173 m_units_remaining[vnet] = max(0, diff);
174 bw_remaining = max(0, -diff);
175 }
176
177 if ((bw_remaining > 0) && ((m_in[vnet]->isReady()) || (m_units_remaining[vnet] > 0)) && !m_out[vnet]->areNSlotsAvailable(1)) {
178 DEBUG_MSG(NETWORK_COMP,LowPrio,vnet);
179 schedule_wakeup = true; // schedule me to wakeup again because I'm waiting for my output queue to become available
180 }
181 }
182
183 // We should only wake up when we use the bandwidth
184 // assert(bw_remaining != getLinkBandwidth()); // This is only mostly true
185
186 // Record that we used some or all of the link bandwidth this cycle
187 double ratio = 1.0-(double(bw_remaining)/double(getLinkBandwidth()));
188 // If ratio = 0, we used no bandwidth, if ratio = 1, we used all
189 linkUtilized(ratio);
190
191 if ((bw_remaining > 0) && !schedule_wakeup) {
192 // We have extra bandwidth and our output buffer was available, so we must not have anything else to do until another message arrives.
193 DEBUG_MSG(NETWORK_COMP,LowPrio,*this);
194 DEBUG_MSG(NETWORK_COMP,LowPrio,"not scheduled again");
195 } else {
196 DEBUG_MSG(NETWORK_COMP,LowPrio,*this);
197 DEBUG_MSG(NETWORK_COMP,LowPrio,"scheduled again");
198 // We are out of bandwidth for this cycle, so wakeup next cycle and continue
199 g_eventQueue_ptr->scheduleEvent(this, 1);
200 }
201}
202
203void Throttle::printStats(ostream& out) const
204{
205 out << "utilized_percent: " << getUtilization() << endl;
206}
207
208void Throttle::clearStats()
209{
210 m_ruby_start = g_eventQueue_ptr->getTime();
211 m_links_utilized = 0.0;
212
213 for (int i=0; i<m_message_counters.size(); i++) {
214 for (int j=0; j<m_message_counters[i].size(); j++) {
215 m_message_counters[i][j] = 0;
216 }
217 }
218}
219
220void Throttle::printConfig(ostream& out) const
221{
222
223}
224
225double Throttle::getUtilization() const
226{
227 return (100.0 * double(m_links_utilized)) / (double(g_eventQueue_ptr->getTime()-m_ruby_start));
228}
229
230void Throttle::print(ostream& out) const
231{
232 out << "[Throttle: " << m_sID << " " << m_node << " bw: " << getLinkBandwidth() << "]";
233}
234
235// Helper function
236
237static
238int network_message_to_size(NetworkMessage* net_msg_ptr)
239{
240 assert(net_msg_ptr != NULL);
241
242 // Artificially increase the size of broadcast messages
243 if (BROADCAST_SCALING > 1) {
244 if (net_msg_ptr->getDestination().isBroadcast()) {
245 return (RubySystem::getNetwork()->MessageSizeType_to_int(net_msg_ptr->getMessageSize()) * MESSAGE_SIZE_MULTIPLIER * BROADCAST_SCALING);
246 }
247 }
248 return (RubySystem::getNetwork()->MessageSizeType_to_int(net_msg_ptr->getMessageSize()) * MESSAGE_SIZE_MULTIPLIER);
249}