MessageBuffer.cc revision 11793:ef606668d247
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/network/MessageBuffer.hh"
30
31#include <cassert>
32
33#include "base/cprintf.hh"
34#include "base/misc.hh"
35#include "base/random.hh"
36#include "base/stl_helpers.hh"
37#include "debug/RubyQueue.hh"
38#include "mem/ruby/system/RubySystem.hh"
39
40using namespace std;
41using m5::stl_helpers::operator<<;
42
43MessageBuffer::MessageBuffer(const Params *p)
44    : SimObject(p), m_stall_map_size(0),
45    m_max_size(p->buffer_size), m_time_last_time_size_checked(0),
46    m_time_last_time_enqueue(0), m_time_last_time_pop(0),
47    m_last_arrival_time(0), m_strict_fifo(p->ordered),
48    m_randomization(p->randomization)
49{
50    m_msg_counter = 0;
51    m_consumer = NULL;
52    m_size_last_time_size_checked = 0;
53    m_size_at_cycle_start = 0;
54    m_msgs_this_cycle = 0;
55    m_priority_rank = 0;
56
57    m_stall_msg_map.clear();
58    m_input_link_id = 0;
59    m_vnet_id = 0;
60}
61
62unsigned int
63MessageBuffer::getSize(Tick curTime)
64{
65    if (m_time_last_time_size_checked != curTime) {
66        m_time_last_time_size_checked = curTime;
67        m_size_last_time_size_checked = m_prio_heap.size();
68    }
69
70    return m_size_last_time_size_checked;
71}
72
73bool
74MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time)
75{
76
77    // fast path when message buffers have infinite size
78    if (m_max_size == 0) {
79        return true;
80    }
81
82    // determine the correct size for the current cycle
83    // pop operations shouldn't effect the network's visible size
84    // until schd cycle, but enqueue operations effect the visible
85    // size immediately
86    unsigned int current_size = 0;
87
88    if (m_time_last_time_pop < current_time) {
89        // no pops this cycle - heap size is correct
90        current_size = m_prio_heap.size();
91    } else {
92        if (m_time_last_time_enqueue < current_time) {
93            // no enqueues this cycle - m_size_at_cycle_start is correct
94            current_size = m_size_at_cycle_start;
95        } else {
96            // both pops and enqueues occured this cycle - add new
97            // enqueued msgs to m_size_at_cycle_start
98            current_size = m_size_at_cycle_start + m_msgs_this_cycle;
99        }
100    }
101
102    // now compare the new size with our max size
103    if (current_size + m_stall_map_size + n <= m_max_size) {
104        return true;
105    } else {
106        DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, "
107                "m_max_size: %d\n",
108                n, current_size, m_prio_heap.size(), m_max_size);
109        m_not_avail_count++;
110        return false;
111    }
112}
113
114const Message*
115MessageBuffer::peek() const
116{
117    DPRINTF(RubyQueue, "Peeking at head of queue.\n");
118    const Message* msg_ptr = m_prio_heap.front().get();
119    assert(msg_ptr);
120
121    DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
122    return msg_ptr;
123}
124
125// FIXME - move me somewhere else
126Tick
127random_time()
128{
129    Tick time = 1;
130    time += random_mt.random(0, 3);  // [0...3]
131    if (random_mt.random(0, 7) == 0) {  // 1 in 8 chance
132        time += 100 + random_mt.random(1, 15); // 100 + [1...15]
133    }
134    return time;
135}
136
137void
138MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta)
139{
140    // record current time incase we have a pop that also adjusts my size
141    if (m_time_last_time_enqueue < current_time) {
142        m_msgs_this_cycle = 0;  // first msg this cycle
143        m_time_last_time_enqueue = current_time;
144    }
145
146    m_msg_counter++;
147    m_msgs_this_cycle++;
148
149    // Calculate the arrival time of the message, that is, the first
150    // cycle the message can be dequeued.
151    assert(delta > 0);
152    Tick arrival_time = 0;
153
154    if (!RubySystem::getRandomization() || !m_randomization) {
155        // No randomization
156        arrival_time = current_time + delta;
157    } else {
158        // Randomization - ignore delta
159        if (m_strict_fifo) {
160            if (m_last_arrival_time < current_time) {
161                m_last_arrival_time = current_time;
162            }
163            arrival_time = m_last_arrival_time + random_time();
164        } else {
165            arrival_time = current_time + random_time();
166        }
167    }
168
169    // Check the arrival time
170    assert(arrival_time > current_time);
171    if (m_strict_fifo) {
172        if (arrival_time < m_last_arrival_time) {
173            panic("FIFO ordering violated: %s name: %s current time: %d "
174                  "delta: %d arrival_time: %d last arrival_time: %d\n",
175                  *this, name(), current_time, delta, arrival_time,
176                  m_last_arrival_time);
177        }
178    }
179
180    // If running a cache trace, don't worry about the last arrival checks
181    if (!RubySystem::getWarmupEnabled()) {
182        m_last_arrival_time = arrival_time;
183    }
184
185    // compute the delay cycles and set enqueue time
186    Message* msg_ptr = message.get();
187    assert(msg_ptr != NULL);
188
189    assert(current_time >= msg_ptr->getLastEnqueueTime() &&
190           "ensure we aren't dequeued early");
191
192    msg_ptr->updateDelayedTicks(current_time);
193    msg_ptr->setLastEnqueueTime(arrival_time);
194    msg_ptr->setMsgCounter(m_msg_counter);
195
196    // Insert the message into the priority heap
197    m_prio_heap.push_back(message);
198    push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
199
200    DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
201            arrival_time, *(message.get()));
202
203    // Schedule the wakeup
204    assert(m_consumer != NULL);
205    m_consumer->scheduleEventAbsolute(arrival_time);
206    m_consumer->storeEventInfo(m_vnet_id);
207}
208
209Tick
210MessageBuffer::dequeue(Tick current_time)
211{
212    DPRINTF(RubyQueue, "Popping\n");
213    assert(isReady(current_time));
214
215    // get MsgPtr of the message about to be dequeued
216    MsgPtr message = m_prio_heap.front();
217
218    // get the delay cycles
219    message->updateDelayedTicks(current_time);
220    Tick delay = message->getDelayedTicks();
221
222    // record previous size and time so the current buffer size isn't
223    // adjusted until schd cycle
224    if (m_time_last_time_pop < current_time) {
225        m_size_at_cycle_start = m_prio_heap.size();
226        m_time_last_time_pop = current_time;
227    }
228
229    pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
230    m_prio_heap.pop_back();
231
232    return delay;
233}
234
235void
236MessageBuffer::clear()
237{
238    m_prio_heap.clear();
239
240    m_msg_counter = 0;
241    m_time_last_time_enqueue = 0;
242    m_time_last_time_pop = 0;
243    m_size_at_cycle_start = 0;
244    m_msgs_this_cycle = 0;
245}
246
247void
248MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
249{
250    DPRINTF(RubyQueue, "Recycling.\n");
251    assert(isReady(current_time));
252    MsgPtr node = m_prio_heap.front();
253    pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
254
255    Tick future_time = current_time + recycle_latency;
256    node->setLastEnqueueTime(future_time);
257
258    m_prio_heap.back() = node;
259    push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
260    m_consumer->scheduleEventAbsolute(future_time);
261}
262
263void
264MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick schdTick)
265{
266    while (!lt.empty()) {
267        m_msg_counter++;
268        MsgPtr m = lt.front();
269        m->setLastEnqueueTime(schdTick);
270        m->setMsgCounter(m_msg_counter);
271
272        m_prio_heap.push_back(m);
273        push_heap(m_prio_heap.begin(), m_prio_heap.end(),
274                  greater<MsgPtr>());
275
276        m_consumer->scheduleEventAbsolute(schdTick);
277        lt.pop_front();
278    }
279}
280
281void
282MessageBuffer::reanalyzeMessages(Addr addr, Tick current_time)
283{
284    DPRINTF(RubyQueue, "ReanalyzeMessages %#x\n", addr);
285    assert(m_stall_msg_map.count(addr) > 0);
286
287    //
288    // Put all stalled messages associated with this address back on the
289    // prio heap.  The reanalyzeList call will make sure the consumer is
290    // scheduled for the current cycle so that the previously stalled messages
291    // will be observed before any younger messages that may arrive this cycle
292    //
293    m_stall_map_size -= m_stall_msg_map[addr].size();
294    assert(m_stall_map_size >= 0);
295    reanalyzeList(m_stall_msg_map[addr], current_time);
296    m_stall_msg_map.erase(addr);
297}
298
299void
300MessageBuffer::reanalyzeAllMessages(Tick current_time)
301{
302    DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
303
304    //
305    // Put all stalled messages associated with this address back on the
306    // prio heap.  The reanalyzeList call will make sure the consumer is
307    // scheduled for the current cycle so that the previously stalled messages
308    // will be observed before any younger messages that may arrive this cycle.
309    //
310    for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
311         map_iter != m_stall_msg_map.end(); ++map_iter) {
312        m_stall_map_size -= map_iter->second.size();
313        assert(m_stall_map_size >= 0);
314        reanalyzeList(map_iter->second, current_time);
315    }
316    m_stall_msg_map.clear();
317}
318
319void
320MessageBuffer::stallMessage(Addr addr, Tick current_time)
321{
322    DPRINTF(RubyQueue, "Stalling due to %#x\n", addr);
323    assert(isReady(current_time));
324    assert(getOffset(addr) == 0);
325    MsgPtr message = m_prio_heap.front();
326
327    dequeue(current_time);
328
329    //
330    // Note: no event is scheduled to analyze the map at a later time.
331    // Instead the controller is responsible to call reanalyzeMessages when
332    // these addresses change state.
333    //
334    (m_stall_msg_map[addr]).push_back(message);
335    m_stall_map_size++;
336}
337
338void
339MessageBuffer::print(ostream& out) const
340{
341    ccprintf(out, "[MessageBuffer: ");
342    if (m_consumer != NULL) {
343        ccprintf(out, " consumer-yes ");
344    }
345
346    vector<MsgPtr> copy(m_prio_heap);
347    sort_heap(copy.begin(), copy.end(), greater<MsgPtr>());
348    ccprintf(out, "%s] %s", copy, name());
349}
350
351bool
352MessageBuffer::isReady(Tick current_time) const
353{
354    return ((m_prio_heap.size() > 0) &&
355        (m_prio_heap.front()->getLastEnqueueTime() <= current_time));
356}
357
358void
359MessageBuffer::regStats()
360{
361    m_not_avail_count
362        .name(name() + ".not_avail_count")
363        .desc("Number of times this buffer did not have N slots available")
364        .flags(Stats::nozero);
365}
366
367uint32_t
368MessageBuffer::functionalWrite(Packet *pkt)
369{
370    uint32_t num_functional_writes = 0;
371
372    // Check the priority heap and write any messages that may
373    // correspond to the address in the packet.
374    for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
375        Message *msg = m_prio_heap[i].get();
376        if (msg->functionalWrite(pkt)) {
377            num_functional_writes++;
378        }
379    }
380
381    // Check the stall queue and write any messages that may
382    // correspond to the address in the packet.
383    for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
384         map_iter != m_stall_msg_map.end();
385         ++map_iter) {
386
387        for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
388            it != (map_iter->second).end(); ++it) {
389
390            Message *msg = (*it).get();
391            if (msg->functionalWrite(pkt)) {
392                num_functional_writes++;
393            }
394        }
395    }
396
397    return num_functional_writes;
398}
399
400MessageBuffer *
401MessageBufferParams::create()
402{
403    return new MessageBuffer(this);
404}
405