Throttle.cc (7054:7d6862b80049) Throttle.cc (7055:4e24742201d7)
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/cprintf.hh"
30#include "mem/protocol/Protocol.hh"
31#include "mem/ruby/buffers/MessageBuffer.hh"
32#include "mem/ruby/network/Network.hh"
33#include "mem/ruby/network/simple/Throttle.hh"
34#include "mem/ruby/slicc_interface/NetworkMessage.hh"
35#include "mem/ruby/system/System.hh"
36
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/cprintf.hh"
30#include "mem/protocol/Protocol.hh"
31#include "mem/ruby/buffers/MessageBuffer.hh"
32#include "mem/ruby/network/Network.hh"
33#include "mem/ruby/network/simple/Throttle.hh"
34#include "mem/ruby/slicc_interface/NetworkMessage.hh"
35#include "mem/ruby/system/System.hh"
36
37using namespace std;
38
37const int HIGH_RANGE = 256;
38const int ADJUST_INTERVAL = 50000;
39const int MESSAGE_SIZE_MULTIPLIER = 1000;
40//const int BROADCAST_SCALING = 4; // Have a 16p system act like a 64p systems
41const int BROADCAST_SCALING = 1;
42const int PRIORITY_SWITCH_LIMIT = 128;
43
44static int network_message_to_size(NetworkMessage* net_msg_ptr);
45
39const int HIGH_RANGE = 256;
40const int ADJUST_INTERVAL = 50000;
41const int MESSAGE_SIZE_MULTIPLIER = 1000;
42//const int BROADCAST_SCALING = 4; // Have a 16p system act like a 64p systems
43const int BROADCAST_SCALING = 1;
44const int PRIORITY_SWITCH_LIMIT = 128;
45
46static int network_message_to_size(NetworkMessage* net_msg_ptr);
47
46extern std::ostream *debug_cout_ptr;
48extern ostream *debug_cout_ptr;
47
48Throttle::Throttle(int sID, NodeID node, int link_latency,
49 int link_bandwidth_multiplier)
50{
51 init(node, link_latency, link_bandwidth_multiplier);
52 m_sID = sID;
53}
54
55Throttle::Throttle(NodeID node, int link_latency,
56 int link_bandwidth_multiplier)
57{
58 init(node, link_latency, link_bandwidth_multiplier);
59 m_sID = 0;
60}
61
62void
63Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier)
64{
65 m_node = node;
66 m_vnets = 0;
67
68 ASSERT(link_bandwidth_multiplier > 0);
69 m_link_bandwidth_multiplier = link_bandwidth_multiplier;
70 m_link_latency = link_latency;
71
72 m_wakeups_wo_switch = 0;
73 clearStats();
74}
75
76void
77Throttle::clear()
78{
79 for (int counter = 0; counter < m_vnets; counter++) {
80 m_in[counter]->clear();
81 m_out[counter]->clear();
82 }
83}
84
85void
86Throttle::addLinks(const Vector<MessageBuffer*>& in_vec,
87 const Vector<MessageBuffer*>& out_vec)
88{
89 assert(in_vec.size() == out_vec.size());
90 for (int i=0; i<in_vec.size(); i++) {
91 addVirtualNetwork(in_vec[i], out_vec[i]);
92 }
93
94 m_message_counters.setSize(MessageSizeType_NUM);
95 for (int i = 0; i < MessageSizeType_NUM; i++) {
96 m_message_counters[i].setSize(in_vec.size());
97 for (int j = 0; j<m_message_counters[i].size(); j++) {
98 m_message_counters[i][j] = 0;
99 }
100 }
101}
102
103void
104Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
105{
106 m_units_remaining.insertAtBottom(0);
107 m_in.insertAtBottom(in_ptr);
108 m_out.insertAtBottom(out_ptr);
109
110 // Set consumer and description
111 m_in[m_vnets]->setConsumer(this);
112 string desc = "[Queue to Throttle " + NodeIDToString(m_sID) + " " +
113 NodeIDToString(m_node) + "]";
114 m_in[m_vnets]->setDescription(desc);
115 m_vnets++;
116}
117
118void
119Throttle::wakeup()
120{
121 // Limits the number of message sent to a limited number of bytes/cycle.
122 assert(getLinkBandwidth() > 0);
123 int bw_remaining = getLinkBandwidth();
124
125 // Give the highest numbered link priority most of the time
126 m_wakeups_wo_switch++;
127 int highest_prio_vnet = m_vnets-1;
128 int lowest_prio_vnet = 0;
129 int counter = 1;
130 bool schedule_wakeup = false;
131
132 // invert priorities to avoid starvation seen in the component network
133 if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
134 m_wakeups_wo_switch = 0;
135 highest_prio_vnet = 0;
136 lowest_prio_vnet = m_vnets-1;
137 counter = -1;
138 }
139
140 for (int vnet = highest_prio_vnet;
141 (vnet * counter) >= (counter * lowest_prio_vnet);
142 vnet -= counter) {
143
144 assert(m_out[vnet] != NULL);
145 assert(m_in[vnet] != NULL);
146 assert(m_units_remaining[vnet] >= 0);
147
148 while (bw_remaining > 0 &&
149 (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
150 m_out[vnet]->areNSlotsAvailable(1)) {
151
152 // See if we are done transferring the previous message on
153 // this virtual network
154 if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
155 // Find the size of the message we are moving
156 MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
157 NetworkMessage* net_msg_ptr =
158 safe_cast<NetworkMessage*>(msg_ptr.ref());
159 m_units_remaining[vnet] +=
160 network_message_to_size(net_msg_ptr);
161
162 DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
163 DEBUG_MSG(NETWORK_COMP, HighPrio,
164 csprintf("throttle: %d my bw %d bw spent enqueueing "
165 "net msg %d time: %d.",
166 m_node, getLinkBandwidth(), m_units_remaining[vnet],
167 g_eventQueue_ptr->getTime()));
168
169 // Move the message
170 m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
171 m_in[vnet]->pop();
172
173 // Count the message
174 m_message_counters[net_msg_ptr->getMessageSize()][vnet]++;
175
176 DEBUG_MSG(NETWORK_COMP,LowPrio,*m_out[vnet]);
177 DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
178 }
179
180 // Calculate the amount of bandwidth we spent on this message
181 int diff = m_units_remaining[vnet] - bw_remaining;
182 m_units_remaining[vnet] = max(0, diff);
183 bw_remaining = max(0, -diff);
184 }
185
186 if (bw_remaining > 0 &&
187 (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
188 !m_out[vnet]->areNSlotsAvailable(1)) {
189 DEBUG_MSG(NETWORK_COMP,LowPrio,vnet);
190 // schedule me to wakeup again because I'm waiting for my
191 // output queue to become available
192 schedule_wakeup = true;
193 }
194 }
195
196 // We should only wake up when we use the bandwidth
197 // This is only mostly true
198 // assert(bw_remaining != getLinkBandwidth());
199
200 // Record that we used some or all of the link bandwidth this cycle
201 double ratio = 1.0 - (double(bw_remaining) / double(getLinkBandwidth()));
202
203 // If ratio = 0, we used no bandwidth, if ratio = 1, we used all
204 linkUtilized(ratio);
205
206 if (bw_remaining > 0 && !schedule_wakeup) {
207 // We have extra bandwidth and our output buffer was
208 // available, so we must not have anything else to do until
209 // another message arrives.
210 DEBUG_MSG(NETWORK_COMP, LowPrio, *this);
211 DEBUG_MSG(NETWORK_COMP, LowPrio, "not scheduled again");
212 } else {
213 DEBUG_MSG(NETWORK_COMP, LowPrio, *this);
214 DEBUG_MSG(NETWORK_COMP, LowPrio, "scheduled again");
215
216 // We are out of bandwidth for this cycle, so wakeup next
217 // cycle and continue
218 g_eventQueue_ptr->scheduleEvent(this, 1);
219 }
220}
221
222void
223Throttle::printStats(ostream& out) const
224{
225 out << "utilized_percent: " << getUtilization() << endl;
226}
227
228void
229Throttle::clearStats()
230{
231 m_ruby_start = g_eventQueue_ptr->getTime();
232 m_links_utilized = 0.0;
233
234 for (int i = 0; i < m_message_counters.size(); i++) {
235 for (int j = 0; j < m_message_counters[i].size(); j++) {
236 m_message_counters[i][j] = 0;
237 }
238 }
239}
240
241void
242Throttle::printConfig(ostream& out) const
243{
244}
245
246double
247Throttle::getUtilization() const
248{
249 return 100.0 * double(m_links_utilized) /
250 double(g_eventQueue_ptr->getTime()-m_ruby_start);
251}
252
253void
254Throttle::print(ostream& out) const
255{
256 out << "[Throttle: " << m_sID << " " << m_node
257 << " bw: " << getLinkBandwidth() << "]";
258}
259
260int
261network_message_to_size(NetworkMessage* net_msg_ptr)
262{
263 assert(net_msg_ptr != NULL);
264
265 int size = RubySystem::getNetwork()->
266 MessageSizeType_to_int(net_msg_ptr->getMessageSize());
267 size *= MESSAGE_SIZE_MULTIPLIER;
268
269 // Artificially increase the size of broadcast messages
270 if (BROADCAST_SCALING > 1 && net_msg_ptr->getDestination().isBroadcast())
271 size *= BROADCAST_SCALING;
272
273 return size;
274}
49
50Throttle::Throttle(int sID, NodeID node, int link_latency,
51 int link_bandwidth_multiplier)
52{
53 init(node, link_latency, link_bandwidth_multiplier);
54 m_sID = sID;
55}
56
57Throttle::Throttle(NodeID node, int link_latency,
58 int link_bandwidth_multiplier)
59{
60 init(node, link_latency, link_bandwidth_multiplier);
61 m_sID = 0;
62}
63
64void
65Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier)
66{
67 m_node = node;
68 m_vnets = 0;
69
70 ASSERT(link_bandwidth_multiplier > 0);
71 m_link_bandwidth_multiplier = link_bandwidth_multiplier;
72 m_link_latency = link_latency;
73
74 m_wakeups_wo_switch = 0;
75 clearStats();
76}
77
78void
79Throttle::clear()
80{
81 for (int counter = 0; counter < m_vnets; counter++) {
82 m_in[counter]->clear();
83 m_out[counter]->clear();
84 }
85}
86
87void
88Throttle::addLinks(const Vector<MessageBuffer*>& in_vec,
89 const Vector<MessageBuffer*>& out_vec)
90{
91 assert(in_vec.size() == out_vec.size());
92 for (int i=0; i<in_vec.size(); i++) {
93 addVirtualNetwork(in_vec[i], out_vec[i]);
94 }
95
96 m_message_counters.setSize(MessageSizeType_NUM);
97 for (int i = 0; i < MessageSizeType_NUM; i++) {
98 m_message_counters[i].setSize(in_vec.size());
99 for (int j = 0; j<m_message_counters[i].size(); j++) {
100 m_message_counters[i][j] = 0;
101 }
102 }
103}
104
105void
106Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
107{
108 m_units_remaining.insertAtBottom(0);
109 m_in.insertAtBottom(in_ptr);
110 m_out.insertAtBottom(out_ptr);
111
112 // Set consumer and description
113 m_in[m_vnets]->setConsumer(this);
114 string desc = "[Queue to Throttle " + NodeIDToString(m_sID) + " " +
115 NodeIDToString(m_node) + "]";
116 m_in[m_vnets]->setDescription(desc);
117 m_vnets++;
118}
119
120void
121Throttle::wakeup()
122{
123 // Limits the number of message sent to a limited number of bytes/cycle.
124 assert(getLinkBandwidth() > 0);
125 int bw_remaining = getLinkBandwidth();
126
127 // Give the highest numbered link priority most of the time
128 m_wakeups_wo_switch++;
129 int highest_prio_vnet = m_vnets-1;
130 int lowest_prio_vnet = 0;
131 int counter = 1;
132 bool schedule_wakeup = false;
133
134 // invert priorities to avoid starvation seen in the component network
135 if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
136 m_wakeups_wo_switch = 0;
137 highest_prio_vnet = 0;
138 lowest_prio_vnet = m_vnets-1;
139 counter = -1;
140 }
141
142 for (int vnet = highest_prio_vnet;
143 (vnet * counter) >= (counter * lowest_prio_vnet);
144 vnet -= counter) {
145
146 assert(m_out[vnet] != NULL);
147 assert(m_in[vnet] != NULL);
148 assert(m_units_remaining[vnet] >= 0);
149
150 while (bw_remaining > 0 &&
151 (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
152 m_out[vnet]->areNSlotsAvailable(1)) {
153
154 // See if we are done transferring the previous message on
155 // this virtual network
156 if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
157 // Find the size of the message we are moving
158 MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
159 NetworkMessage* net_msg_ptr =
160 safe_cast<NetworkMessage*>(msg_ptr.ref());
161 m_units_remaining[vnet] +=
162 network_message_to_size(net_msg_ptr);
163
164 DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
165 DEBUG_MSG(NETWORK_COMP, HighPrio,
166 csprintf("throttle: %d my bw %d bw spent enqueueing "
167 "net msg %d time: %d.",
168 m_node, getLinkBandwidth(), m_units_remaining[vnet],
169 g_eventQueue_ptr->getTime()));
170
171 // Move the message
172 m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
173 m_in[vnet]->pop();
174
175 // Count the message
176 m_message_counters[net_msg_ptr->getMessageSize()][vnet]++;
177
178 DEBUG_MSG(NETWORK_COMP,LowPrio,*m_out[vnet]);
179 DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
180 }
181
182 // Calculate the amount of bandwidth we spent on this message
183 int diff = m_units_remaining[vnet] - bw_remaining;
184 m_units_remaining[vnet] = max(0, diff);
185 bw_remaining = max(0, -diff);
186 }
187
188 if (bw_remaining > 0 &&
189 (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
190 !m_out[vnet]->areNSlotsAvailable(1)) {
191 DEBUG_MSG(NETWORK_COMP,LowPrio,vnet);
192 // schedule me to wakeup again because I'm waiting for my
193 // output queue to become available
194 schedule_wakeup = true;
195 }
196 }
197
198 // We should only wake up when we use the bandwidth
199 // This is only mostly true
200 // assert(bw_remaining != getLinkBandwidth());
201
202 // Record that we used some or all of the link bandwidth this cycle
203 double ratio = 1.0 - (double(bw_remaining) / double(getLinkBandwidth()));
204
205 // If ratio = 0, we used no bandwidth, if ratio = 1, we used all
206 linkUtilized(ratio);
207
208 if (bw_remaining > 0 && !schedule_wakeup) {
209 // We have extra bandwidth and our output buffer was
210 // available, so we must not have anything else to do until
211 // another message arrives.
212 DEBUG_MSG(NETWORK_COMP, LowPrio, *this);
213 DEBUG_MSG(NETWORK_COMP, LowPrio, "not scheduled again");
214 } else {
215 DEBUG_MSG(NETWORK_COMP, LowPrio, *this);
216 DEBUG_MSG(NETWORK_COMP, LowPrio, "scheduled again");
217
218 // We are out of bandwidth for this cycle, so wakeup next
219 // cycle and continue
220 g_eventQueue_ptr->scheduleEvent(this, 1);
221 }
222}
223
224void
225Throttle::printStats(ostream& out) const
226{
227 out << "utilized_percent: " << getUtilization() << endl;
228}
229
230void
231Throttle::clearStats()
232{
233 m_ruby_start = g_eventQueue_ptr->getTime();
234 m_links_utilized = 0.0;
235
236 for (int i = 0; i < m_message_counters.size(); i++) {
237 for (int j = 0; j < m_message_counters[i].size(); j++) {
238 m_message_counters[i][j] = 0;
239 }
240 }
241}
242
243void
244Throttle::printConfig(ostream& out) const
245{
246}
247
248double
249Throttle::getUtilization() const
250{
251 return 100.0 * double(m_links_utilized) /
252 double(g_eventQueue_ptr->getTime()-m_ruby_start);
253}
254
255void
256Throttle::print(ostream& out) const
257{
258 out << "[Throttle: " << m_sID << " " << m_node
259 << " bw: " << getLinkBandwidth() << "]";
260}
261
262int
263network_message_to_size(NetworkMessage* net_msg_ptr)
264{
265 assert(net_msg_ptr != NULL);
266
267 int size = RubySystem::getNetwork()->
268 MessageSizeType_to_int(net_msg_ptr->getMessageSize());
269 size *= MESSAGE_SIZE_MULTIPLIER;
270
271 // Artificially increase the size of broadcast messages
272 if (BROADCAST_SCALING > 1 && net_msg_ptr->getDestination().isBroadcast())
273 size *= BROADCAST_SCALING;
274
275 return size;
276}