PerfectSwitch.cc revision 7055:4e24742201d7
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/gems_common/util.hh"
30#include "mem/protocol/Protocol.hh"
31#include "mem/ruby/buffers/MessageBuffer.hh"
32#include "mem/ruby/network/simple/PerfectSwitch.hh"
33#include "mem/ruby/network/simple/SimpleNetwork.hh"
34#include "mem/ruby/profiler/Profiler.hh"
35#include "mem/ruby/slicc_interface/NetworkMessage.hh"
36#include "mem/ruby/system/System.hh"
37
38using namespace std;
39
40const int PRIORITY_SWITCH_LIMIT = 128;
41
42// Operator for helper class
43bool
44operator<(const LinkOrder& l1, const LinkOrder& l2)
45{
46    return (l1.m_value < l2.m_value);
47}
48
49PerfectSwitch::PerfectSwitch(SwitchID sid, SimpleNetwork* network_ptr)
50{
51    m_virtual_networks = network_ptr->getNumberOfVirtualNetworks();
52    m_switch_id = sid;
53    m_round_robin_start = 0;
54    m_network_ptr = network_ptr;
55    m_wakeups_wo_switch = 0;
56}
57
58void
59PerfectSwitch::addInPort(const Vector<MessageBuffer*>& in)
60{
61    assert(in.size() == m_virtual_networks);
62    NodeID port = m_in.size();
63    m_in.insertAtBottom(in);
64    for (int j = 0; j < m_virtual_networks; j++) {
65        m_in[port][j]->setConsumer(this);
66        string desc = csprintf("[Queue from port %s %s %s to PerfectSwitch]",
67            NodeIDToString(m_switch_id), NodeIDToString(port),
68            NodeIDToString(j));
69        m_in[port][j]->setDescription(desc);
70    }
71}
72
73void
74PerfectSwitch::addOutPort(const Vector<MessageBuffer*>& out,
75    const NetDest& routing_table_entry)
76{
77    assert(out.size() == m_virtual_networks);
78
79    // Setup link order
80    LinkOrder l;
81    l.m_value = 0;
82    l.m_link = m_out.size();
83    m_link_order.insertAtBottom(l);
84
85    // Add to routing table
86    m_out.insertAtBottom(out);
87    m_routing_table.insertAtBottom(routing_table_entry);
88}
89
90void
91PerfectSwitch::clearRoutingTables()
92{
93    m_routing_table.clear();
94}
95
96void
97PerfectSwitch::clearBuffers()
98{
99    for (int i = 0; i < m_in.size(); i++){
100        for(int vnet = 0; vnet < m_virtual_networks; vnet++) {
101            m_in[i][vnet]->clear();
102        }
103    }
104
105    for (int i = 0; i < m_out.size(); i++){
106        for(int vnet = 0; vnet < m_virtual_networks; vnet++) {
107            m_out[i][vnet]->clear();
108        }
109    }
110}
111
112void
113PerfectSwitch::reconfigureOutPort(const NetDest& routing_table_entry)
114{
115    m_routing_table.insertAtBottom(routing_table_entry);
116}
117
118PerfectSwitch::~PerfectSwitch()
119{
120}
121
122void
123PerfectSwitch::wakeup()
124{
125    DEBUG_EXPR(NETWORK_COMP, MedPrio, m_switch_id);
126
127    MsgPtr msg_ptr;
128
129    // Give the highest numbered link priority most of the time
130    m_wakeups_wo_switch++;
131    int highest_prio_vnet = m_virtual_networks-1;
132    int lowest_prio_vnet = 0;
133    int decrementer = 1;
134    NetworkMessage* net_msg_ptr = NULL;
135
136    // invert priorities to avoid starvation seen in the component network
137    if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
138        m_wakeups_wo_switch = 0;
139        highest_prio_vnet = 0;
140        lowest_prio_vnet = m_virtual_networks-1;
141        decrementer = -1;
142    }
143
144    // For all components incoming queues
145    for (int vnet = highest_prio_vnet;
146         (vnet * decrementer) >= (decrementer * lowest_prio_vnet);
147         vnet -= decrementer) {
148
149        // This is for round-robin scheduling
150        int incoming = m_round_robin_start;
151        m_round_robin_start++;
152        if (m_round_robin_start >= m_in.size()) {
153            m_round_robin_start = 0;
154        }
155
156        // for all input ports, use round robin scheduling
157        for (int counter = 0; counter < m_in.size(); counter++) {
158            // Round robin scheduling
159            incoming++;
160            if (incoming >= m_in.size()) {
161                incoming = 0;
162            }
163
164            // temporary vectors to store the routing results
165            Vector<LinkID> output_links;
166            Vector<NetDest> output_link_destinations;
167
168            // Is there a message waiting?
169            while (m_in[incoming][vnet]->isReady()) {
170                DEBUG_EXPR(NETWORK_COMP, MedPrio, incoming);
171
172                // Peek at message
173                msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
174                net_msg_ptr = dynamic_cast<NetworkMessage*>(msg_ptr.ref());
175                DEBUG_EXPR(NETWORK_COMP, MedPrio, *net_msg_ptr);
176
177                output_links.clear();
178                output_link_destinations.clear();
179                NetDest msg_dsts =
180                    net_msg_ptr->getInternalDestination();
181
182                // Unfortunately, the token-protocol sends some
183                // zero-destination messages, so this assert isn't valid
184                // assert(msg_dsts.count() > 0);
185
186                assert(m_link_order.size() == m_routing_table.size());
187                assert(m_link_order.size() == m_out.size());
188
189                if (m_network_ptr->getAdaptiveRouting()) {
190                    if (m_network_ptr->isVNetOrdered(vnet)) {
191                        // Don't adaptively route
192                        for (int out = 0; out < m_out.size(); out++) {
193                            m_link_order[out].m_link = out;
194                            m_link_order[out].m_value = 0;
195                        }
196                    } else {
197                        // Find how clogged each link is
198                        for (int out = 0; out < m_out.size(); out++) {
199                            int out_queue_length = 0;
200                            for (int v = 0; v < m_virtual_networks; v++) {
201                                out_queue_length += m_out[out][v]->getSize();
202                            }
203                            int value =
204                                (out_queue_length << 8) | (random() & 0xff);
205                            m_link_order[out].m_link = out;
206                            m_link_order[out].m_value = value;
207                        }
208
209                        // Look at the most empty link first
210                        m_link_order.sortVector();
211                    }
212                }
213
214                for (int i = 0; i < m_routing_table.size(); i++) {
215                    // pick the next link to look at
216                    int link = m_link_order[i].m_link;
217                    NetDest dst = m_routing_table[link];
218                    DEBUG_EXPR(NETWORK_COMP, MedPrio, dst);
219
220                    if (!msg_dsts.intersectionIsNotEmpty(dst))
221                        continue;
222
223                    // Remember what link we're using
224                    output_links.insertAtBottom(link);
225
226                    // Need to remember which destinations need this
227                    // message in another vector.  This Set is the
228                    // intersection of the routing_table entry and the
229                    // current destination set.  The intersection must
230                    // not be empty, since we are inside "if"
231                    output_link_destinations.insertAtBottom(msg_dsts.AND(dst));
232
233                    // Next, we update the msg_destination not to
234                    // include those nodes that were already handled
235                    // by this link
236                    msg_dsts.removeNetDest(dst);
237                }
238
239                assert(msg_dsts.count() == 0);
240                //assert(output_links.size() > 0);
241
242                // Check for resources - for all outgoing queues
243                bool enough = true;
244                for (int i = 0; i < output_links.size(); i++) {
245                    int outgoing = output_links[i];
246                    if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
247                        enough = false;
248                    DEBUG_MSG(NETWORK_COMP, HighPrio,
249                        "checking if node is blocked");
250                    DEBUG_EXPR(NETWORK_COMP, HighPrio, outgoing);
251                    DEBUG_EXPR(NETWORK_COMP, HighPrio, vnet);
252                    DEBUG_EXPR(NETWORK_COMP, HighPrio, enough);
253                }
254
255                // There were not enough resources
256                if (!enough) {
257                    g_eventQueue_ptr->scheduleEvent(this, 1);
258                    DEBUG_MSG(NETWORK_COMP, HighPrio,
259                        "Can't deliver message since a node is blocked");
260                    DEBUG_EXPR(NETWORK_COMP, HighPrio, *net_msg_ptr);
261                    break; // go to next incoming port
262                }
263
264                MsgPtr unmodified_msg_ptr;
265
266                if (output_links.size() > 1) {
267                    // If we are sending this message down more than
268                    // one link (size>1), we need to make a copy of
269                    // the message so each branch can have a different
270                    // internal destination we need to create an
271                    // unmodified MsgPtr because the MessageBuffer
272                    // enqueue func will modify the message
273
274                    // This magic line creates a private copy of the
275                    // message
276                    unmodified_msg_ptr = *(msg_ptr.ref());
277                }
278
279                // Enqueue it - for all outgoing queues
280                for (int i=0; i<output_links.size(); i++) {
281                    int outgoing = output_links[i];
282
283                    if (i > 0) {
284                        // create a private copy of the unmodified
285                        // message
286                        msg_ptr = *(unmodified_msg_ptr.ref());
287                    }
288
289                    // Change the internal destination set of the
290                    // message so it knows which destinations this
291                    // link is responsible for.
292                    net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.ref());
293                    net_msg_ptr->getInternalDestination() =
294                        output_link_destinations[i];
295
296                    // Enqeue msg
297                    DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
298                    DEBUG_MSG(NETWORK_COMP, HighPrio,
299                        csprintf("switch: %d enqueuing net msg from "
300                            "inport[%d][%d] to outport [%d][%d] time: %d.",
301                            m_switch_id, incoming, vnet, outgoing, vnet,
302                            g_eventQueue_ptr->getTime()));
303                    DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
304
305                    m_out[outgoing][vnet]->enqueue(msg_ptr);
306                }
307
308                // Dequeue msg
309                m_in[incoming][vnet]->pop();
310            }
311        }
312    }
313}
314
315void
316PerfectSwitch::printStats(std::ostream& out) const
317{
318    out << "PerfectSwitch printStats" << endl;
319}
320
321void
322PerfectSwitch::clearStats()
323{
324}
325
326void
327PerfectSwitch::printConfig(std::ostream& out) const
328{
329}
330
331void
332PerfectSwitch::print(std::ostream& out) const
333{
334    out << "[PerfectSwitch " << m_switch_id << "]";
335}
336
337