Throttle.cc (8232:b28d06a175be) Throttle.cc (8259:36987780169e)
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <cassert>
30
31#include "base/cprintf.hh"
32#include "debug/RubyNetwork.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/ruby/buffers/MessageBuffer.hh"
35#include "mem/ruby/network/simple/Throttle.hh"
36#include "mem/ruby/network/Network.hh"
37#include "mem/ruby/slicc_interface/NetworkMessage.hh"
38#include "mem/ruby/system/System.hh"
39
40using namespace std;
41
42const int HIGH_RANGE = 256;
43const int ADJUST_INTERVAL = 50000;
44const int MESSAGE_SIZE_MULTIPLIER = 1000;
45//const int BROADCAST_SCALING = 4; // Have a 16p system act like a 64p systems
46const int BROADCAST_SCALING = 1;
47const int PRIORITY_SWITCH_LIMIT = 128;
48
49static int network_message_to_size(NetworkMessage* net_msg_ptr);
50
51Throttle::Throttle(int sID, NodeID node, int link_latency,
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <cassert>
30
31#include "base/cprintf.hh"
32#include "debug/RubyNetwork.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/ruby/buffers/MessageBuffer.hh"
35#include "mem/ruby/network/simple/Throttle.hh"
36#include "mem/ruby/network/Network.hh"
37#include "mem/ruby/slicc_interface/NetworkMessage.hh"
38#include "mem/ruby/system/System.hh"
39
40using namespace std;
41
42const int HIGH_RANGE = 256;
43const int ADJUST_INTERVAL = 50000;
44const int MESSAGE_SIZE_MULTIPLIER = 1000;
45//const int BROADCAST_SCALING = 4; // Have a 16p system act like a 64p systems
46const int BROADCAST_SCALING = 1;
47const int PRIORITY_SWITCH_LIMIT = 128;
48
49static int network_message_to_size(NetworkMessage* net_msg_ptr);
50
51Throttle::Throttle(int sID, NodeID node, int link_latency,
52 int link_bandwidth_multiplier)
52 int link_bandwidth_multiplier, int endpoint_bandwidth)
53{
53{
54 init(node, link_latency, link_bandwidth_multiplier);
54 init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth);
55 m_sID = sID;
56}
57
58Throttle::Throttle(NodeID node, int link_latency,
55 m_sID = sID;
56}
57
58Throttle::Throttle(NodeID node, int link_latency,
59 int link_bandwidth_multiplier)
59 int link_bandwidth_multiplier, int endpoint_bandwidth)
60{
60{
61 init(node, link_latency, link_bandwidth_multiplier);
61 init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth);
62 m_sID = 0;
63}
64
65void
62 m_sID = 0;
63}
64
65void
66Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier)
66Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier,
67 int endpoint_bandwidth)
67{
68 m_node = node;
69 m_vnets = 0;
70
71 assert(link_bandwidth_multiplier > 0);
72 m_link_bandwidth_multiplier = link_bandwidth_multiplier;
73 m_link_latency = link_latency;
68{
69 m_node = node;
70 m_vnets = 0;
71
72 assert(link_bandwidth_multiplier > 0);
73 m_link_bandwidth_multiplier = link_bandwidth_multiplier;
74 m_link_latency = link_latency;
75 m_endpoint_bandwidth = endpoint_bandwidth;
74
75 m_wakeups_wo_switch = 0;
76 clearStats();
77}
78
79void
80Throttle::clear()
81{
82 for (int counter = 0; counter < m_vnets; counter++) {
83 m_in[counter]->clear();
84 m_out[counter]->clear();
85 }
86}
87
88void
89Throttle::addLinks(const std::vector<MessageBuffer*>& in_vec,
90 const std::vector<MessageBuffer*>& out_vec)
91{
92 assert(in_vec.size() == out_vec.size());
93 for (int i=0; i<in_vec.size(); i++) {
94 addVirtualNetwork(in_vec[i], out_vec[i]);
95 }
96
97 m_message_counters.resize(MessageSizeType_NUM);
98 for (int i = 0; i < MessageSizeType_NUM; i++) {
99 m_message_counters[i].resize(in_vec.size());
100 for (int j = 0; j<m_message_counters[i].size(); j++) {
101 m_message_counters[i][j] = 0;
102 }
103 }
104}
105
106void
107Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
108{
109 m_units_remaining.push_back(0);
110 m_in.push_back(in_ptr);
111 m_out.push_back(out_ptr);
112
113 // Set consumer and description
114 m_in[m_vnets]->setConsumer(this);
115 string desc = "[Queue to Throttle " + NodeIDToString(m_sID) + " " +
116 NodeIDToString(m_node) + "]";
117 m_in[m_vnets]->setDescription(desc);
118 m_vnets++;
119}
120
121void
122Throttle::wakeup()
123{
124 // Limits the number of message sent to a limited number of bytes/cycle.
125 assert(getLinkBandwidth() > 0);
126 int bw_remaining = getLinkBandwidth();
127
128 // Give the highest numbered link priority most of the time
129 m_wakeups_wo_switch++;
130 int highest_prio_vnet = m_vnets-1;
131 int lowest_prio_vnet = 0;
132 int counter = 1;
133 bool schedule_wakeup = false;
134
135 // invert priorities to avoid starvation seen in the component network
136 if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
137 m_wakeups_wo_switch = 0;
138 highest_prio_vnet = 0;
139 lowest_prio_vnet = m_vnets-1;
140 counter = -1;
141 }
142
143 for (int vnet = highest_prio_vnet;
144 (vnet * counter) >= (counter * lowest_prio_vnet);
145 vnet -= counter) {
146
147 assert(m_out[vnet] != NULL);
148 assert(m_in[vnet] != NULL);
149 assert(m_units_remaining[vnet] >= 0);
150
151 while (bw_remaining > 0 &&
152 (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
153 m_out[vnet]->areNSlotsAvailable(1)) {
154
155 // See if we are done transferring the previous message on
156 // this virtual network
157 if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
158 // Find the size of the message we are moving
159 MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
160 NetworkMessage* net_msg_ptr =
161 safe_cast<NetworkMessage*>(msg_ptr.get());
162 m_units_remaining[vnet] +=
163 network_message_to_size(net_msg_ptr);
164
165 DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent "
166 "enqueueing net msg %d time: %lld.\n",
167 m_node, getLinkBandwidth(), m_units_remaining[vnet],
168 g_eventQueue_ptr->getTime());
169
170 // Move the message
171 m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
172 m_in[vnet]->pop();
173
174 // Count the message
175 m_message_counters[net_msg_ptr->getMessageSize()][vnet]++;
176
177 DPRINTF(RubyNetwork, "%s\n", *m_out[vnet]);
178 }
179
180 // Calculate the amount of bandwidth we spent on this message
181 int diff = m_units_remaining[vnet] - bw_remaining;
182 m_units_remaining[vnet] = max(0, diff);
183 bw_remaining = max(0, -diff);
184 }
185
186 if (bw_remaining > 0 &&
187 (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
188 !m_out[vnet]->areNSlotsAvailable(1)) {
189 DPRINTF(RubyNetwork, "vnet: %d", vnet);
190 // schedule me to wakeup again because I'm waiting for my
191 // output queue to become available
192 schedule_wakeup = true;
193 }
194 }
195
196 // We should only wake up when we use the bandwidth
197 // This is only mostly true
198 // assert(bw_remaining != getLinkBandwidth());
199
200 // Record that we used some or all of the link bandwidth this cycle
201 double ratio = 1.0 - (double(bw_remaining) / double(getLinkBandwidth()));
202
203 // If ratio = 0, we used no bandwidth, if ratio = 1, we used all
204 linkUtilized(ratio);
205
206 if (bw_remaining > 0 && !schedule_wakeup) {
207 // We have extra bandwidth and our output buffer was
208 // available, so we must not have anything else to do until
209 // another message arrives.
210 DPRINTF(RubyNetwork, "%s not scheduled again\n", *this);
211 } else {
212 DPRINTF(RubyNetwork, "%s scheduled again\n", *this);
213
214 // We are out of bandwidth for this cycle, so wakeup next
215 // cycle and continue
216 g_eventQueue_ptr->scheduleEvent(this, 1);
217 }
218}
219
220void
221Throttle::printStats(ostream& out) const
222{
223 out << "utilized_percent: " << getUtilization() << endl;
224}
225
226void
227Throttle::clearStats()
228{
229 m_ruby_start = g_eventQueue_ptr->getTime();
230 m_links_utilized = 0.0;
231
232 for (int i = 0; i < m_message_counters.size(); i++) {
233 for (int j = 0; j < m_message_counters[i].size(); j++) {
234 m_message_counters[i][j] = 0;
235 }
236 }
237}
238
239void
240Throttle::printConfig(ostream& out) const
241{
242}
243
244double
245Throttle::getUtilization() const
246{
247 return 100.0 * double(m_links_utilized) /
248 double(g_eventQueue_ptr->getTime()-m_ruby_start);
249}
250
251void
252Throttle::print(ostream& out) const
253{
254 ccprintf(out, "[%i bw: %i]", m_node, getLinkBandwidth());
255}
256
257int
258network_message_to_size(NetworkMessage* net_msg_ptr)
259{
260 assert(net_msg_ptr != NULL);
261
262 int size = RubySystem::getNetwork()->
263 MessageSizeType_to_int(net_msg_ptr->getMessageSize());
264 size *= MESSAGE_SIZE_MULTIPLIER;
265
266 // Artificially increase the size of broadcast messages
267 if (BROADCAST_SCALING > 1 && net_msg_ptr->getDestination().isBroadcast())
268 size *= BROADCAST_SCALING;
269
270 return size;
271}
76
77 m_wakeups_wo_switch = 0;
78 clearStats();
79}
80
81void
82Throttle::clear()
83{
84 for (int counter = 0; counter < m_vnets; counter++) {
85 m_in[counter]->clear();
86 m_out[counter]->clear();
87 }
88}
89
90void
91Throttle::addLinks(const std::vector<MessageBuffer*>& in_vec,
92 const std::vector<MessageBuffer*>& out_vec)
93{
94 assert(in_vec.size() == out_vec.size());
95 for (int i=0; i<in_vec.size(); i++) {
96 addVirtualNetwork(in_vec[i], out_vec[i]);
97 }
98
99 m_message_counters.resize(MessageSizeType_NUM);
100 for (int i = 0; i < MessageSizeType_NUM; i++) {
101 m_message_counters[i].resize(in_vec.size());
102 for (int j = 0; j<m_message_counters[i].size(); j++) {
103 m_message_counters[i][j] = 0;
104 }
105 }
106}
107
108void
109Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
110{
111 m_units_remaining.push_back(0);
112 m_in.push_back(in_ptr);
113 m_out.push_back(out_ptr);
114
115 // Set consumer and description
116 m_in[m_vnets]->setConsumer(this);
117 string desc = "[Queue to Throttle " + NodeIDToString(m_sID) + " " +
118 NodeIDToString(m_node) + "]";
119 m_in[m_vnets]->setDescription(desc);
120 m_vnets++;
121}
122
123void
124Throttle::wakeup()
125{
126 // Limits the number of message sent to a limited number of bytes/cycle.
127 assert(getLinkBandwidth() > 0);
128 int bw_remaining = getLinkBandwidth();
129
130 // Give the highest numbered link priority most of the time
131 m_wakeups_wo_switch++;
132 int highest_prio_vnet = m_vnets-1;
133 int lowest_prio_vnet = 0;
134 int counter = 1;
135 bool schedule_wakeup = false;
136
137 // invert priorities to avoid starvation seen in the component network
138 if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
139 m_wakeups_wo_switch = 0;
140 highest_prio_vnet = 0;
141 lowest_prio_vnet = m_vnets-1;
142 counter = -1;
143 }
144
145 for (int vnet = highest_prio_vnet;
146 (vnet * counter) >= (counter * lowest_prio_vnet);
147 vnet -= counter) {
148
149 assert(m_out[vnet] != NULL);
150 assert(m_in[vnet] != NULL);
151 assert(m_units_remaining[vnet] >= 0);
152
153 while (bw_remaining > 0 &&
154 (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
155 m_out[vnet]->areNSlotsAvailable(1)) {
156
157 // See if we are done transferring the previous message on
158 // this virtual network
159 if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
160 // Find the size of the message we are moving
161 MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
162 NetworkMessage* net_msg_ptr =
163 safe_cast<NetworkMessage*>(msg_ptr.get());
164 m_units_remaining[vnet] +=
165 network_message_to_size(net_msg_ptr);
166
167 DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent "
168 "enqueueing net msg %d time: %lld.\n",
169 m_node, getLinkBandwidth(), m_units_remaining[vnet],
170 g_eventQueue_ptr->getTime());
171
172 // Move the message
173 m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
174 m_in[vnet]->pop();
175
176 // Count the message
177 m_message_counters[net_msg_ptr->getMessageSize()][vnet]++;
178
179 DPRINTF(RubyNetwork, "%s\n", *m_out[vnet]);
180 }
181
182 // Calculate the amount of bandwidth we spent on this message
183 int diff = m_units_remaining[vnet] - bw_remaining;
184 m_units_remaining[vnet] = max(0, diff);
185 bw_remaining = max(0, -diff);
186 }
187
188 if (bw_remaining > 0 &&
189 (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
190 !m_out[vnet]->areNSlotsAvailable(1)) {
191 DPRINTF(RubyNetwork, "vnet: %d", vnet);
192 // schedule me to wakeup again because I'm waiting for my
193 // output queue to become available
194 schedule_wakeup = true;
195 }
196 }
197
198 // We should only wake up when we use the bandwidth
199 // This is only mostly true
200 // assert(bw_remaining != getLinkBandwidth());
201
202 // Record that we used some or all of the link bandwidth this cycle
203 double ratio = 1.0 - (double(bw_remaining) / double(getLinkBandwidth()));
204
205 // If ratio = 0, we used no bandwidth, if ratio = 1, we used all
206 linkUtilized(ratio);
207
208 if (bw_remaining > 0 && !schedule_wakeup) {
209 // We have extra bandwidth and our output buffer was
210 // available, so we must not have anything else to do until
211 // another message arrives.
212 DPRINTF(RubyNetwork, "%s not scheduled again\n", *this);
213 } else {
214 DPRINTF(RubyNetwork, "%s scheduled again\n", *this);
215
216 // We are out of bandwidth for this cycle, so wakeup next
217 // cycle and continue
218 g_eventQueue_ptr->scheduleEvent(this, 1);
219 }
220}
221
222void
223Throttle::printStats(ostream& out) const
224{
225 out << "utilized_percent: " << getUtilization() << endl;
226}
227
228void
229Throttle::clearStats()
230{
231 m_ruby_start = g_eventQueue_ptr->getTime();
232 m_links_utilized = 0.0;
233
234 for (int i = 0; i < m_message_counters.size(); i++) {
235 for (int j = 0; j < m_message_counters[i].size(); j++) {
236 m_message_counters[i][j] = 0;
237 }
238 }
239}
240
241void
242Throttle::printConfig(ostream& out) const
243{
244}
245
246double
247Throttle::getUtilization() const
248{
249 return 100.0 * double(m_links_utilized) /
250 double(g_eventQueue_ptr->getTime()-m_ruby_start);
251}
252
253void
254Throttle::print(ostream& out) const
255{
256 ccprintf(out, "[%i bw: %i]", m_node, getLinkBandwidth());
257}
258
259int
260network_message_to_size(NetworkMessage* net_msg_ptr)
261{
262 assert(net_msg_ptr != NULL);
263
264 int size = RubySystem::getNetwork()->
265 MessageSizeType_to_int(net_msg_ptr->getMessageSize());
266 size *= MESSAGE_SIZE_MULTIPLIER;
267
268 // Artificially increase the size of broadcast messages
269 if (BROADCAST_SCALING > 1 && net_msg_ptr->getDestination().isBroadcast())
270 size *= BROADCAST_SCALING;
271
272 return size;
273}