PerfectSwitch.cc revision 7780:42da07116e12
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <algorithm>
30
31#include "mem/protocol/Protocol.hh"
32#include "mem/ruby/buffers/MessageBuffer.hh"
33#include "mem/ruby/network/simple/PerfectSwitch.hh"
34#include "mem/ruby/network/simple/SimpleNetwork.hh"
35#include "mem/ruby/profiler/Profiler.hh"
36#include "mem/ruby/slicc_interface/NetworkMessage.hh"
37#include "mem/ruby/system/System.hh"
38
39using namespace std;
40
41const int PRIORITY_SWITCH_LIMIT = 128;
42
43// Operator for helper class
44bool
45operator<(const LinkOrder& l1, const LinkOrder& l2)
46{
47    return (l1.m_value < l2.m_value);
48}
49
50PerfectSwitch::PerfectSwitch(SwitchID sid, SimpleNetwork* network_ptr)
51{
52    m_virtual_networks = network_ptr->getNumberOfVirtualNetworks();
53    m_switch_id = sid;
54    m_round_robin_start = 0;
55    m_network_ptr = network_ptr;
56    m_wakeups_wo_switch = 0;
57}
58
59void
60PerfectSwitch::addInPort(const vector<MessageBuffer*>& in)
61{
62    assert(in.size() == m_virtual_networks);
63    NodeID port = m_in.size();
64    m_in.push_back(in);
65    for (int j = 0; j < m_virtual_networks; j++) {
66        m_in[port][j]->setConsumer(this);
67        string desc = csprintf("[Queue from port %s %s %s to PerfectSwitch]",
68            NodeIDToString(m_switch_id), NodeIDToString(port),
69            NodeIDToString(j));
70        m_in[port][j]->setDescription(desc);
71    }
72}
73
74void
75PerfectSwitch::addOutPort(const vector<MessageBuffer*>& out,
76    const NetDest& routing_table_entry)
77{
78    assert(out.size() == m_virtual_networks);
79
80    // Setup link order
81    LinkOrder l;
82    l.m_value = 0;
83    l.m_link = m_out.size();
84    m_link_order.push_back(l);
85
86    // Add to routing table
87    m_out.push_back(out);
88    m_routing_table.push_back(routing_table_entry);
89}
90
91void
92PerfectSwitch::clearRoutingTables()
93{
94    m_routing_table.clear();
95}
96
97void
98PerfectSwitch::clearBuffers()
99{
100    for (int i = 0; i < m_in.size(); i++){
101        for(int vnet = 0; vnet < m_virtual_networks; vnet++) {
102            m_in[i][vnet]->clear();
103        }
104    }
105
106    for (int i = 0; i < m_out.size(); i++){
107        for(int vnet = 0; vnet < m_virtual_networks; vnet++) {
108            m_out[i][vnet]->clear();
109        }
110    }
111}
112
113void
114PerfectSwitch::reconfigureOutPort(const NetDest& routing_table_entry)
115{
116    m_routing_table.push_back(routing_table_entry);
117}
118
119PerfectSwitch::~PerfectSwitch()
120{
121}
122
123void
124PerfectSwitch::wakeup()
125{
126    DPRINTF(RubyNetwork, "m_switch_id: %d\n",m_switch_id);
127
128    MsgPtr msg_ptr;
129
130    // Give the highest numbered link priority most of the time
131    m_wakeups_wo_switch++;
132    int highest_prio_vnet = m_virtual_networks-1;
133    int lowest_prio_vnet = 0;
134    int decrementer = 1;
135    NetworkMessage* net_msg_ptr = NULL;
136
137    // invert priorities to avoid starvation seen in the component network
138    if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
139        m_wakeups_wo_switch = 0;
140        highest_prio_vnet = 0;
141        lowest_prio_vnet = m_virtual_networks-1;
142        decrementer = -1;
143    }
144
145    // For all components incoming queues
146    for (int vnet = highest_prio_vnet;
147         (vnet * decrementer) >= (decrementer * lowest_prio_vnet);
148         vnet -= decrementer) {
149
150        // This is for round-robin scheduling
151        int incoming = m_round_robin_start;
152        m_round_robin_start++;
153        if (m_round_robin_start >= m_in.size()) {
154            m_round_robin_start = 0;
155        }
156
157        // for all input ports, use round robin scheduling
158        for (int counter = 0; counter < m_in.size(); counter++) {
159            // Round robin scheduling
160            incoming++;
161            if (incoming >= m_in.size()) {
162                incoming = 0;
163            }
164
165            // temporary vectors to store the routing results
166            vector<LinkID> output_links;
167            vector<NetDest> output_link_destinations;
168
169            // Is there a message waiting?
170            while (m_in[incoming][vnet]->isReady()) {
171                DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
172
173                // Peek at message
174                msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
175                net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
176                DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
177
178                output_links.clear();
179                output_link_destinations.clear();
180                NetDest msg_dsts =
181                    net_msg_ptr->getInternalDestination();
182
183                // Unfortunately, the token-protocol sends some
184                // zero-destination messages, so this assert isn't valid
185                // assert(msg_dsts.count() > 0);
186
187                assert(m_link_order.size() == m_routing_table.size());
188                assert(m_link_order.size() == m_out.size());
189
190                if (m_network_ptr->getAdaptiveRouting()) {
191                    if (m_network_ptr->isVNetOrdered(vnet)) {
192                        // Don't adaptively route
193                        for (int out = 0; out < m_out.size(); out++) {
194                            m_link_order[out].m_link = out;
195                            m_link_order[out].m_value = 0;
196                        }
197                    } else {
198                        // Find how clogged each link is
199                        for (int out = 0; out < m_out.size(); out++) {
200                            int out_queue_length = 0;
201                            for (int v = 0; v < m_virtual_networks; v++) {
202                                out_queue_length += m_out[out][v]->getSize();
203                            }
204                            int value =
205                                (out_queue_length << 8) | (random() & 0xff);
206                            m_link_order[out].m_link = out;
207                            m_link_order[out].m_value = value;
208                        }
209
210                        // Look at the most empty link first
211                        sort(m_link_order.begin(), m_link_order.end());
212                    }
213                }
214
215                for (int i = 0; i < m_routing_table.size(); i++) {
216                    // pick the next link to look at
217                    int link = m_link_order[i].m_link;
218                    NetDest dst = m_routing_table[link];
219                    DPRINTF(RubyNetwork, "dst: %s\n", dst);
220
221                    if (!msg_dsts.intersectionIsNotEmpty(dst))
222                        continue;
223
224                    // Remember what link we're using
225                    output_links.push_back(link);
226
227                    // Need to remember which destinations need this
228                    // message in another vector.  This Set is the
229                    // intersection of the routing_table entry and the
230                    // current destination set.  The intersection must
231                    // not be empty, since we are inside "if"
232                    output_link_destinations.push_back(msg_dsts.AND(dst));
233
234                    // Next, we update the msg_destination not to
235                    // include those nodes that were already handled
236                    // by this link
237                    msg_dsts.removeNetDest(dst);
238                }
239
240                assert(msg_dsts.count() == 0);
241                //assert(output_links.size() > 0);
242
243                // Check for resources - for all outgoing queues
244                bool enough = true;
245                for (int i = 0; i < output_links.size(); i++) {
246                    int outgoing = output_links[i];
247                    if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
248                        enough = false;
249                    DPRINTF(RubyNetwork, "Checking if node is blocked\n"
250                            "outgoing: %d, vnet: %d, enough: %d\n",
251                            outgoing, vnet, enough);
252                }
253
254                // There were not enough resources
255                if (!enough) {
256                    g_eventQueue_ptr->scheduleEvent(this, 1);
257                    DPRINTF(RubyNetwork, "Can't deliver message since a node "
258                            "is blocked\n"
259                            "Message: %s\n", (*net_msg_ptr));
260                    break; // go to next incoming port
261                }
262
263                MsgPtr unmodified_msg_ptr;
264
265                if (output_links.size() > 1) {
266                    // If we are sending this message down more than
267                    // one link (size>1), we need to make a copy of
268                    // the message so each branch can have a different
269                    // internal destination we need to create an
270                    // unmodified MsgPtr because the MessageBuffer
271                    // enqueue func will modify the message
272
273                    // This magic line creates a private copy of the
274                    // message
275                    unmodified_msg_ptr = msg_ptr->clone();
276                }
277
278                // Enqueue it - for all outgoing queues
279                for (int i=0; i<output_links.size(); i++) {
280                    int outgoing = output_links[i];
281
282                    if (i > 0) {
283                        // create a private copy of the unmodified
284                        // message
285                        msg_ptr = unmodified_msg_ptr->clone();
286                    }
287
288                    // Change the internal destination set of the
289                    // message so it knows which destinations this
290                    // link is responsible for.
291                    net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
292                    net_msg_ptr->getInternalDestination() =
293                        output_link_destinations[i];
294
295                    // Enqeue msg
296                    DPRINTF(RubyNetwork, "Switch: %d enqueuing net msg from "
297                            "inport[%d][%d] to outport [%d][%d] time: %lld.\n",
298                            m_switch_id, incoming, vnet, outgoing, vnet,
299                            g_eventQueue_ptr->getTime());
300
301                    m_out[outgoing][vnet]->enqueue(msg_ptr);
302                }
303
304                // Dequeue msg
305                m_in[incoming][vnet]->pop();
306            }
307        }
308    }
309}
310
311void
312PerfectSwitch::printStats(std::ostream& out) const
313{
314    out << "PerfectSwitch printStats" << endl;
315}
316
317void
318PerfectSwitch::clearStats()
319{
320}
321
322void
323PerfectSwitch::printConfig(std::ostream& out) const
324{
325}
326
327void
328PerfectSwitch::print(std::ostream& out) const
329{
330    out << "[PerfectSwitch " << m_switch_id << "]";
331}
332
333