MessageBuffer.cc revision 11049:dfb0aa3f0649
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <cassert>
30
31#include "base/cprintf.hh"
32#include "base/misc.hh"
33#include "base/random.hh"
34#include "base/stl_helpers.hh"
35#include "debug/RubyQueue.hh"
36#include "mem/ruby/network/MessageBuffer.hh"
37#include "mem/ruby/system/System.hh"
38
39using namespace std;
40using m5::stl_helpers::operator<<;
41
42MessageBuffer::MessageBuffer(const Params *p)
43    : SimObject(p), m_recycle_latency(p->recycle_latency),
44    m_max_size(p->buffer_size), m_time_last_time_size_checked(0),
45    m_time_last_time_enqueue(0), m_time_last_time_pop(0),
46    m_last_arrival_time(0), m_strict_fifo(p->ordered),
47    m_randomization(p->randomization)
48{
49    m_msg_counter = 0;
50    m_consumer = NULL;
51    m_sender = NULL;
52    m_receiver = NULL;
53
54    m_size_last_time_size_checked = 0;
55    m_size_at_cycle_start = 0;
56    m_msgs_this_cycle = 0;
57    m_not_avail_count = 0;
58    m_priority_rank = 0;
59
60    m_stall_msg_map.clear();
61    m_input_link_id = 0;
62    m_vnet_id = 0;
63}
64
65unsigned int
66MessageBuffer::getSize()
67{
68    if (m_time_last_time_size_checked != m_receiver->curCycle()) {
69        m_time_last_time_size_checked = m_receiver->curCycle();
70        m_size_last_time_size_checked = m_prio_heap.size();
71    }
72
73    return m_size_last_time_size_checked;
74}
75
76bool
77MessageBuffer::areNSlotsAvailable(unsigned int n)
78{
79
80    // fast path when message buffers have infinite size
81    if (m_max_size == 0) {
82        return true;
83    }
84
85    // determine the correct size for the current cycle
86    // pop operations shouldn't effect the network's visible size
87    // until schd cycle, but enqueue operations effect the visible
88    // size immediately
89    unsigned int current_size = 0;
90
91    if (m_time_last_time_pop < m_sender->clockEdge()) {
92        // no pops this cycle - heap size is correct
93        current_size = m_prio_heap.size();
94    } else {
95        if (m_time_last_time_enqueue < m_sender->curCycle()) {
96            // no enqueues this cycle - m_size_at_cycle_start is correct
97            current_size = m_size_at_cycle_start;
98        } else {
99            // both pops and enqueues occured this cycle - add new
100            // enqueued msgs to m_size_at_cycle_start
101            current_size = m_size_at_cycle_start + m_msgs_this_cycle;
102        }
103    }
104
105    // now compare the new size with our max size
106    if (current_size + n <= m_max_size) {
107        return true;
108    } else {
109        DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, "
110                "m_max_size: %d\n",
111                n, current_size, m_prio_heap.size(), m_max_size);
112        m_not_avail_count++;
113        return false;
114    }
115}
116
117const Message*
118MessageBuffer::peek() const
119{
120    DPRINTF(RubyQueue, "Peeking at head of queue.\n");
121    assert(isReady());
122
123    const Message* msg_ptr = m_prio_heap.front().get();
124    assert(msg_ptr);
125
126    DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
127    return msg_ptr;
128}
129
130// FIXME - move me somewhere else
131Cycles
132random_time()
133{
134    Cycles time(1);
135    time += Cycles(random_mt.random(0, 3));  // [0...3]
136    if (random_mt.random(0, 7) == 0) {  // 1 in 8 chance
137        time += Cycles(100 + random_mt.random(1, 15)); // 100 + [1...15]
138    }
139    return time;
140}
141
142void
143MessageBuffer::enqueue(MsgPtr message, Cycles delta)
144{
145    // record current time incase we have a pop that also adjusts my size
146    if (m_time_last_time_enqueue < m_sender->curCycle()) {
147        m_msgs_this_cycle = 0;  // first msg this cycle
148        m_time_last_time_enqueue = m_sender->curCycle();
149    }
150
151    m_msg_counter++;
152    m_msgs_this_cycle++;
153
154    // Calculate the arrival time of the message, that is, the first
155    // cycle the message can be dequeued.
156    assert(delta > 0);
157    Tick current_time = m_sender->clockEdge();
158    Tick arrival_time = 0;
159
160    if (!RubySystem::getRandomization() || !m_randomization) {
161        // No randomization
162        arrival_time = current_time + delta * m_sender->clockPeriod();
163    } else {
164        // Randomization - ignore delta
165        if (m_strict_fifo) {
166            if (m_last_arrival_time < current_time) {
167                m_last_arrival_time = current_time;
168            }
169            arrival_time = m_last_arrival_time +
170                           random_time() * m_sender->clockPeriod();
171        } else {
172            arrival_time = current_time +
173                           random_time() * m_sender->clockPeriod();
174        }
175    }
176
177    // Check the arrival time
178    assert(arrival_time > current_time);
179    if (m_strict_fifo) {
180        if (arrival_time < m_last_arrival_time) {
181            panic("FIFO ordering violated: %s name: %s current time: %d "
182                  "delta: %d arrival_time: %d last arrival_time: %d\n",
183                  *this, name(), current_time,
184                  delta * m_sender->clockPeriod(),
185                  arrival_time, m_last_arrival_time);
186        }
187    }
188
189    // If running a cache trace, don't worry about the last arrival checks
190    if (!RubySystem::getWarmupEnabled()) {
191        m_last_arrival_time = arrival_time;
192    }
193
194    // compute the delay cycles and set enqueue time
195    Message* msg_ptr = message.get();
196    assert(msg_ptr != NULL);
197
198    assert(m_sender->clockEdge() >= msg_ptr->getLastEnqueueTime() &&
199           "ensure we aren't dequeued early");
200
201    msg_ptr->updateDelayedTicks(m_sender->clockEdge());
202    msg_ptr->setLastEnqueueTime(arrival_time);
203    msg_ptr->setMsgCounter(m_msg_counter);
204
205    // Insert the message into the priority heap
206    m_prio_heap.push_back(message);
207    push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
208
209    DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
210            arrival_time, *(message.get()));
211
212    // Schedule the wakeup
213    assert(m_consumer != NULL);
214    m_consumer->scheduleEventAbsolute(arrival_time);
215    m_consumer->storeEventInfo(m_vnet_id);
216}
217
218Cycles
219MessageBuffer::dequeue()
220{
221    DPRINTF(RubyQueue, "Popping\n");
222    assert(isReady());
223
224    // get MsgPtr of the message about to be dequeued
225    MsgPtr message = m_prio_heap.front();
226
227    // get the delay cycles
228    message->updateDelayedTicks(m_receiver->clockEdge());
229    Cycles delayCycles =
230        m_receiver->ticksToCycles(message->getDelayedTicks());
231
232    // record previous size and time so the current buffer size isn't
233    // adjusted until schd cycle
234    if (m_time_last_time_pop < m_receiver->clockEdge()) {
235        m_size_at_cycle_start = m_prio_heap.size();
236        m_time_last_time_pop = m_receiver->clockEdge();
237    }
238
239    pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
240        greater<MsgPtr>());
241    m_prio_heap.pop_back();
242
243    return delayCycles;
244}
245
246void
247MessageBuffer::clear()
248{
249    m_prio_heap.clear();
250
251    m_msg_counter = 0;
252    m_time_last_time_enqueue = Cycles(0);
253    m_time_last_time_pop = 0;
254    m_size_at_cycle_start = 0;
255    m_msgs_this_cycle = 0;
256}
257
258void
259MessageBuffer::recycle()
260{
261    DPRINTF(RubyQueue, "Recycling.\n");
262    assert(isReady());
263    MsgPtr node = m_prio_heap.front();
264    pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
265
266    node->setLastEnqueueTime(m_receiver->clockEdge(m_recycle_latency));
267    m_prio_heap.back() = node;
268    push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
269    m_consumer->
270        scheduleEventAbsolute(m_receiver->clockEdge(m_recycle_latency));
271}
272
273void
274MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick schdTick)
275{
276    while(!lt.empty()) {
277        m_msg_counter++;
278        MsgPtr m = lt.front();
279        m->setLastEnqueueTime(schdTick);
280        m->setMsgCounter(m_msg_counter);
281
282        m_prio_heap.push_back(m);
283        push_heap(m_prio_heap.begin(), m_prio_heap.end(),
284                  greater<MsgPtr>());
285
286        m_consumer->scheduleEventAbsolute(schdTick);
287        lt.pop_front();
288    }
289}
290
291void
292MessageBuffer::reanalyzeMessages(Addr addr)
293{
294    DPRINTF(RubyQueue, "ReanalyzeMessages %s\n", addr);
295    assert(m_stall_msg_map.count(addr) > 0);
296    Tick curTick = m_receiver->clockEdge();
297
298    //
299    // Put all stalled messages associated with this address back on the
300    // prio heap.  The reanalyzeList call will make sure the consumer is
301    // scheduled for the current cycle so that the previously stalled messages
302    // will be observed before any younger messages that may arrive this cycle
303    //
304    reanalyzeList(m_stall_msg_map[addr], curTick);
305    m_stall_msg_map.erase(addr);
306}
307
308void
309MessageBuffer::reanalyzeAllMessages()
310{
311    DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
312    Tick curTick = m_receiver->clockEdge();
313
314    //
315    // Put all stalled messages associated with this address back on the
316    // prio heap.  The reanalyzeList call will make sure the consumer is
317    // scheduled for the current cycle so that the previously stalled messages
318    // will be observed before any younger messages that may arrive this cycle.
319    //
320    for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
321         map_iter != m_stall_msg_map.end(); ++map_iter) {
322        reanalyzeList(map_iter->second, curTick);
323    }
324    m_stall_msg_map.clear();
325}
326
327void
328MessageBuffer::stallMessage(Addr addr)
329{
330    DPRINTF(RubyQueue, "Stalling due to %s\n", addr);
331    assert(isReady());
332    assert(getOffset(addr) == 0);
333    MsgPtr message = m_prio_heap.front();
334
335    dequeue();
336
337    //
338    // Note: no event is scheduled to analyze the map at a later time.
339    // Instead the controller is responsible to call reanalyzeMessages when
340    // these addresses change state.
341    //
342    (m_stall_msg_map[addr]).push_back(message);
343}
344
345void
346MessageBuffer::print(ostream& out) const
347{
348    ccprintf(out, "[MessageBuffer: ");
349    if (m_consumer != NULL) {
350        ccprintf(out, " consumer-yes ");
351    }
352
353    vector<MsgPtr> copy(m_prio_heap);
354    sort_heap(copy.begin(), copy.end(), greater<MsgPtr>());
355    ccprintf(out, "%s] %s", copy, name());
356}
357
358bool
359MessageBuffer::isReady() const
360{
361    return ((m_prio_heap.size() > 0) &&
362        (m_prio_heap.front()->getLastEnqueueTime() <= m_receiver->clockEdge()));
363}
364
365bool
366MessageBuffer::functionalRead(Packet *pkt)
367{
368    // Check the priority heap and read any messages that may
369    // correspond to the address in the packet.
370    for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
371        Message *msg = m_prio_heap[i].get();
372        if (msg->functionalRead(pkt)) return true;
373    }
374
375    // Read the messages in the stall queue that correspond
376    // to the address in the packet.
377    for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
378         map_iter != m_stall_msg_map.end();
379         ++map_iter) {
380
381        for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
382            it != (map_iter->second).end(); ++it) {
383
384            Message *msg = (*it).get();
385            if (msg->functionalRead(pkt)) return true;
386        }
387    }
388    return false;
389}
390
391uint32_t
392MessageBuffer::functionalWrite(Packet *pkt)
393{
394    uint32_t num_functional_writes = 0;
395
396    // Check the priority heap and write any messages that may
397    // correspond to the address in the packet.
398    for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
399        Message *msg = m_prio_heap[i].get();
400        if (msg->functionalWrite(pkt)) {
401            num_functional_writes++;
402        }
403    }
404
405    // Check the stall queue and write any messages that may
406    // correspond to the address in the packet.
407    for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
408         map_iter != m_stall_msg_map.end();
409         ++map_iter) {
410
411        for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
412            it != (map_iter->second).end(); ++it) {
413
414            Message *msg = (*it).get();
415            if (msg->functionalWrite(pkt)) {
416                num_functional_writes++;
417            }
418        }
419    }
420
421    return num_functional_writes;
422}
423
424MessageBuffer *
425MessageBufferParams::create()
426{
427    return new MessageBuffer(this);
428}
429