MessageBuffer.cc (12334:e0ab29a34764) MessageBuffer.cc (13062:6f9defe1c11e)
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/network/MessageBuffer.hh"
30
31#include <cassert>
32
33#include "base/cprintf.hh"
34#include "base/logging.hh"
35#include "base/random.hh"
36#include "base/stl_helpers.hh"
37#include "debug/RubyQueue.hh"
38#include "mem/ruby/system/RubySystem.hh"
39
40using namespace std;
41using m5::stl_helpers::operator<<;
42
43MessageBuffer::MessageBuffer(const Params *p)
44 : SimObject(p), m_stall_map_size(0),
45 m_max_size(p->buffer_size), m_time_last_time_size_checked(0),
46 m_time_last_time_enqueue(0), m_time_last_time_pop(0),
47 m_last_arrival_time(0), m_strict_fifo(p->ordered),
48 m_randomization(p->randomization)
49{
50 m_msg_counter = 0;
51 m_consumer = NULL;
52 m_size_last_time_size_checked = 0;
53 m_size_at_cycle_start = 0;
54 m_msgs_this_cycle = 0;
55 m_priority_rank = 0;
56
57 m_stall_msg_map.clear();
58 m_input_link_id = 0;
59 m_vnet_id = 0;
60
61 m_buf_msgs = 0;
62 m_stall_time = 0;
63
64 m_dequeue_callback = nullptr;
65}
66
67unsigned int
68MessageBuffer::getSize(Tick curTime)
69{
70 if (m_time_last_time_size_checked != curTime) {
71 m_time_last_time_size_checked = curTime;
72 m_size_last_time_size_checked = m_prio_heap.size();
73 }
74
75 return m_size_last_time_size_checked;
76}
77
78bool
79MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time)
80{
81
82 // fast path when message buffers have infinite size
83 if (m_max_size == 0) {
84 return true;
85 }
86
87 // determine the correct size for the current cycle
88 // pop operations shouldn't effect the network's visible size
89 // until schd cycle, but enqueue operations effect the visible
90 // size immediately
91 unsigned int current_size = 0;
92
93 if (m_time_last_time_pop < current_time) {
94 // no pops this cycle - heap size is correct
95 current_size = m_prio_heap.size();
96 } else {
97 if (m_time_last_time_enqueue < current_time) {
98 // no enqueues this cycle - m_size_at_cycle_start is correct
99 current_size = m_size_at_cycle_start;
100 } else {
101 // both pops and enqueues occured this cycle - add new
102 // enqueued msgs to m_size_at_cycle_start
103 current_size = m_size_at_cycle_start + m_msgs_this_cycle;
104 }
105 }
106
107 // now compare the new size with our max size
108 if (current_size + m_stall_map_size + n <= m_max_size) {
109 return true;
110 } else {
111 DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, "
112 "m_max_size: %d\n",
113 n, current_size, m_prio_heap.size(), m_max_size);
114 m_not_avail_count++;
115 return false;
116 }
117}
118
119const Message*
120MessageBuffer::peek() const
121{
122 DPRINTF(RubyQueue, "Peeking at head of queue.\n");
123 const Message* msg_ptr = m_prio_heap.front().get();
124 assert(msg_ptr);
125
126 DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
127 return msg_ptr;
128}
129
130// FIXME - move me somewhere else
131Tick
132random_time()
133{
134 Tick time = 1;
135 time += random_mt.random(0, 3); // [0...3]
136 if (random_mt.random(0, 7) == 0) { // 1 in 8 chance
137 time += 100 + random_mt.random(1, 15); // 100 + [1...15]
138 }
139 return time;
140}
141
142void
143MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta)
144{
145 // record current time incase we have a pop that also adjusts my size
146 if (m_time_last_time_enqueue < current_time) {
147 m_msgs_this_cycle = 0; // first msg this cycle
148 m_time_last_time_enqueue = current_time;
149 }
150
151 m_msg_counter++;
152 m_msgs_this_cycle++;
153
154 // Calculate the arrival time of the message, that is, the first
155 // cycle the message can be dequeued.
156 assert(delta > 0);
157 Tick arrival_time = 0;
158
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/network/MessageBuffer.hh"
30
31#include <cassert>
32
33#include "base/cprintf.hh"
34#include "base/logging.hh"
35#include "base/random.hh"
36#include "base/stl_helpers.hh"
37#include "debug/RubyQueue.hh"
38#include "mem/ruby/system/RubySystem.hh"
39
40using namespace std;
41using m5::stl_helpers::operator<<;
42
43MessageBuffer::MessageBuffer(const Params *p)
44 : SimObject(p), m_stall_map_size(0),
45 m_max_size(p->buffer_size), m_time_last_time_size_checked(0),
46 m_time_last_time_enqueue(0), m_time_last_time_pop(0),
47 m_last_arrival_time(0), m_strict_fifo(p->ordered),
48 m_randomization(p->randomization)
49{
50 m_msg_counter = 0;
51 m_consumer = NULL;
52 m_size_last_time_size_checked = 0;
53 m_size_at_cycle_start = 0;
54 m_msgs_this_cycle = 0;
55 m_priority_rank = 0;
56
57 m_stall_msg_map.clear();
58 m_input_link_id = 0;
59 m_vnet_id = 0;
60
61 m_buf_msgs = 0;
62 m_stall_time = 0;
63
64 m_dequeue_callback = nullptr;
65}
66
67unsigned int
68MessageBuffer::getSize(Tick curTime)
69{
70 if (m_time_last_time_size_checked != curTime) {
71 m_time_last_time_size_checked = curTime;
72 m_size_last_time_size_checked = m_prio_heap.size();
73 }
74
75 return m_size_last_time_size_checked;
76}
77
78bool
79MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time)
80{
81
82 // fast path when message buffers have infinite size
83 if (m_max_size == 0) {
84 return true;
85 }
86
87 // determine the correct size for the current cycle
88 // pop operations shouldn't effect the network's visible size
89 // until schd cycle, but enqueue operations effect the visible
90 // size immediately
91 unsigned int current_size = 0;
92
93 if (m_time_last_time_pop < current_time) {
94 // no pops this cycle - heap size is correct
95 current_size = m_prio_heap.size();
96 } else {
97 if (m_time_last_time_enqueue < current_time) {
98 // no enqueues this cycle - m_size_at_cycle_start is correct
99 current_size = m_size_at_cycle_start;
100 } else {
101 // both pops and enqueues occured this cycle - add new
102 // enqueued msgs to m_size_at_cycle_start
103 current_size = m_size_at_cycle_start + m_msgs_this_cycle;
104 }
105 }
106
107 // now compare the new size with our max size
108 if (current_size + m_stall_map_size + n <= m_max_size) {
109 return true;
110 } else {
111 DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, "
112 "m_max_size: %d\n",
113 n, current_size, m_prio_heap.size(), m_max_size);
114 m_not_avail_count++;
115 return false;
116 }
117}
118
119const Message*
120MessageBuffer::peek() const
121{
122 DPRINTF(RubyQueue, "Peeking at head of queue.\n");
123 const Message* msg_ptr = m_prio_heap.front().get();
124 assert(msg_ptr);
125
126 DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
127 return msg_ptr;
128}
129
130// FIXME - move me somewhere else
131Tick
132random_time()
133{
134 Tick time = 1;
135 time += random_mt.random(0, 3); // [0...3]
136 if (random_mt.random(0, 7) == 0) { // 1 in 8 chance
137 time += 100 + random_mt.random(1, 15); // 100 + [1...15]
138 }
139 return time;
140}
141
142void
143MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta)
144{
145 // record current time incase we have a pop that also adjusts my size
146 if (m_time_last_time_enqueue < current_time) {
147 m_msgs_this_cycle = 0; // first msg this cycle
148 m_time_last_time_enqueue = current_time;
149 }
150
151 m_msg_counter++;
152 m_msgs_this_cycle++;
153
154 // Calculate the arrival time of the message, that is, the first
155 // cycle the message can be dequeued.
156 assert(delta > 0);
157 Tick arrival_time = 0;
158
159 if (!RubySystem::getRandomization() || !m_randomization) {
159 // random delays are inserted if either RubySystem level randomization flag
160 // is turned on, or the buffer level randomization is set
161 if (!RubySystem::getRandomization() && !m_randomization) {
160 // No randomization
161 arrival_time = current_time + delta;
162 } else {
163 // Randomization - ignore delta
164 if (m_strict_fifo) {
165 if (m_last_arrival_time < current_time) {
166 m_last_arrival_time = current_time;
167 }
168 arrival_time = m_last_arrival_time + random_time();
169 } else {
170 arrival_time = current_time + random_time();
171 }
172 }
173
174 // Check the arrival time
175 assert(arrival_time > current_time);
176 if (m_strict_fifo) {
177 if (arrival_time < m_last_arrival_time) {
178 panic("FIFO ordering violated: %s name: %s current time: %d "
179 "delta: %d arrival_time: %d last arrival_time: %d\n",
180 *this, name(), current_time, delta, arrival_time,
181 m_last_arrival_time);
182 }
183 }
184
185 // If running a cache trace, don't worry about the last arrival checks
186 if (!RubySystem::getWarmupEnabled()) {
187 m_last_arrival_time = arrival_time;
188 }
189
190 // compute the delay cycles and set enqueue time
191 Message* msg_ptr = message.get();
192 assert(msg_ptr != NULL);
193
194 assert(current_time >= msg_ptr->getLastEnqueueTime() &&
195 "ensure we aren't dequeued early");
196
197 msg_ptr->updateDelayedTicks(current_time);
198 msg_ptr->setLastEnqueueTime(arrival_time);
199 msg_ptr->setMsgCounter(m_msg_counter);
200
201 // Insert the message into the priority heap
202 m_prio_heap.push_back(message);
203 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
204 // Increment the number of messages statistic
205 m_buf_msgs++;
206
207 DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
208 arrival_time, *(message.get()));
209
210 // Schedule the wakeup
211 assert(m_consumer != NULL);
212 m_consumer->scheduleEventAbsolute(arrival_time);
213 m_consumer->storeEventInfo(m_vnet_id);
214}
215
216Tick
217MessageBuffer::dequeue(Tick current_time, bool decrement_messages)
218{
219 DPRINTF(RubyQueue, "Popping\n");
220 assert(isReady(current_time));
221
222 // get MsgPtr of the message about to be dequeued
223 MsgPtr message = m_prio_heap.front();
224
225 // get the delay cycles
226 message->updateDelayedTicks(current_time);
227 Tick delay = message->getDelayedTicks();
228
229 m_stall_time = curTick() - message->getTime();
230
231 // record previous size and time so the current buffer size isn't
232 // adjusted until schd cycle
233 if (m_time_last_time_pop < current_time) {
234 m_size_at_cycle_start = m_prio_heap.size();
235 m_time_last_time_pop = current_time;
236 }
237
238 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
239 m_prio_heap.pop_back();
240 if (decrement_messages) {
241 // If the message will be removed from the queue, decrement the
242 // number of message in the queue.
243 m_buf_msgs--;
244 }
245
246 // if a dequeue callback was requested, call it now
247 if (m_dequeue_callback) {
248 m_dequeue_callback();
249 }
250
251 return delay;
252}
253
254void
255MessageBuffer::registerDequeueCallback(std::function<void()> callback)
256{
257 m_dequeue_callback = callback;
258}
259
260void
261MessageBuffer::unregisterDequeueCallback()
262{
263 m_dequeue_callback = nullptr;
264}
265
266void
267MessageBuffer::clear()
268{
269 m_prio_heap.clear();
270
271 m_msg_counter = 0;
272 m_time_last_time_enqueue = 0;
273 m_time_last_time_pop = 0;
274 m_size_at_cycle_start = 0;
275 m_msgs_this_cycle = 0;
276}
277
278void
279MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
280{
281 DPRINTF(RubyQueue, "Recycling.\n");
282 assert(isReady(current_time));
283 MsgPtr node = m_prio_heap.front();
284 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
285
286 Tick future_time = current_time + recycle_latency;
287 node->setLastEnqueueTime(future_time);
288
289 m_prio_heap.back() = node;
290 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
291 m_consumer->scheduleEventAbsolute(future_time);
292}
293
294void
295MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick schdTick)
296{
297 while (!lt.empty()) {
298 m_msg_counter++;
299 MsgPtr m = lt.front();
300 m->setLastEnqueueTime(schdTick);
301 m->setMsgCounter(m_msg_counter);
302
303 m_prio_heap.push_back(m);
304 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
305 greater<MsgPtr>());
306
307 m_consumer->scheduleEventAbsolute(schdTick);
308 lt.pop_front();
309 }
310}
311
312void
313MessageBuffer::reanalyzeMessages(Addr addr, Tick current_time)
314{
315 DPRINTF(RubyQueue, "ReanalyzeMessages %#x\n", addr);
316 assert(m_stall_msg_map.count(addr) > 0);
317
318 //
319 // Put all stalled messages associated with this address back on the
320 // prio heap. The reanalyzeList call will make sure the consumer is
321 // scheduled for the current cycle so that the previously stalled messages
322 // will be observed before any younger messages that may arrive this cycle
323 //
324 m_stall_map_size -= m_stall_msg_map[addr].size();
325 assert(m_stall_map_size >= 0);
326 reanalyzeList(m_stall_msg_map[addr], current_time);
327 m_stall_msg_map.erase(addr);
328}
329
330void
331MessageBuffer::reanalyzeAllMessages(Tick current_time)
332{
333 DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
334
335 //
336 // Put all stalled messages associated with this address back on the
337 // prio heap. The reanalyzeList call will make sure the consumer is
338 // scheduled for the current cycle so that the previously stalled messages
339 // will be observed before any younger messages that may arrive this cycle.
340 //
341 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
342 map_iter != m_stall_msg_map.end(); ++map_iter) {
343 m_stall_map_size -= map_iter->second.size();
344 assert(m_stall_map_size >= 0);
345 reanalyzeList(map_iter->second, current_time);
346 }
347 m_stall_msg_map.clear();
348}
349
350void
351MessageBuffer::stallMessage(Addr addr, Tick current_time)
352{
353 DPRINTF(RubyQueue, "Stalling due to %#x\n", addr);
354 assert(isReady(current_time));
355 assert(getOffset(addr) == 0);
356 MsgPtr message = m_prio_heap.front();
357
358 // Since the message will just be moved to stall map, indicate that the
359 // buffer should not decrement the m_buf_msgs statistic
360 dequeue(current_time, false);
361
362 //
363 // Note: no event is scheduled to analyze the map at a later time.
364 // Instead the controller is responsible to call reanalyzeMessages when
365 // these addresses change state.
366 //
367 (m_stall_msg_map[addr]).push_back(message);
368 m_stall_map_size++;
369 m_stall_count++;
370}
371
372void
373MessageBuffer::print(ostream& out) const
374{
375 ccprintf(out, "[MessageBuffer: ");
376 if (m_consumer != NULL) {
377 ccprintf(out, " consumer-yes ");
378 }
379
380 vector<MsgPtr> copy(m_prio_heap);
381 sort_heap(copy.begin(), copy.end(), greater<MsgPtr>());
382 ccprintf(out, "%s] %s", copy, name());
383}
384
385bool
386MessageBuffer::isReady(Tick current_time) const
387{
388 return ((m_prio_heap.size() > 0) &&
389 (m_prio_heap.front()->getLastEnqueueTime() <= current_time));
390}
391
392void
393MessageBuffer::regStats()
394{
395 m_not_avail_count
396 .name(name() + ".not_avail_count")
397 .desc("Number of times this buffer did not have N slots available")
398 .flags(Stats::nozero);
399
400 m_buf_msgs
401 .name(name() + ".avg_buf_msgs")
402 .desc("Average number of messages in buffer")
403 .flags(Stats::nozero);
404
405 m_stall_count
406 .name(name() + ".num_msg_stalls")
407 .desc("Number of times messages were stalled")
408 .flags(Stats::nozero);
409
410 m_occupancy
411 .name(name() + ".avg_buf_occ")
412 .desc("Average occupancy of buffer capacity")
413 .flags(Stats::nozero);
414
415 m_stall_time
416 .name(name() + ".avg_stall_time")
417 .desc("Average number of cycles messages are stalled in this MB")
418 .flags(Stats::nozero);
419
420 if (m_max_size > 0) {
421 m_occupancy = m_buf_msgs / m_max_size;
422 } else {
423 m_occupancy = 0;
424 }
425}
426
427uint32_t
428MessageBuffer::functionalWrite(Packet *pkt)
429{
430 uint32_t num_functional_writes = 0;
431
432 // Check the priority heap and write any messages that may
433 // correspond to the address in the packet.
434 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
435 Message *msg = m_prio_heap[i].get();
436 if (msg->functionalWrite(pkt)) {
437 num_functional_writes++;
438 }
439 }
440
441 // Check the stall queue and write any messages that may
442 // correspond to the address in the packet.
443 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
444 map_iter != m_stall_msg_map.end();
445 ++map_iter) {
446
447 for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
448 it != (map_iter->second).end(); ++it) {
449
450 Message *msg = (*it).get();
451 if (msg->functionalWrite(pkt)) {
452 num_functional_writes++;
453 }
454 }
455 }
456
457 return num_functional_writes;
458}
459
460MessageBuffer *
461MessageBufferParams::create()
462{
463 return new MessageBuffer(this);
464}
162 // No randomization
163 arrival_time = current_time + delta;
164 } else {
165 // Randomization - ignore delta
166 if (m_strict_fifo) {
167 if (m_last_arrival_time < current_time) {
168 m_last_arrival_time = current_time;
169 }
170 arrival_time = m_last_arrival_time + random_time();
171 } else {
172 arrival_time = current_time + random_time();
173 }
174 }
175
176 // Check the arrival time
177 assert(arrival_time > current_time);
178 if (m_strict_fifo) {
179 if (arrival_time < m_last_arrival_time) {
180 panic("FIFO ordering violated: %s name: %s current time: %d "
181 "delta: %d arrival_time: %d last arrival_time: %d\n",
182 *this, name(), current_time, delta, arrival_time,
183 m_last_arrival_time);
184 }
185 }
186
187 // If running a cache trace, don't worry about the last arrival checks
188 if (!RubySystem::getWarmupEnabled()) {
189 m_last_arrival_time = arrival_time;
190 }
191
192 // compute the delay cycles and set enqueue time
193 Message* msg_ptr = message.get();
194 assert(msg_ptr != NULL);
195
196 assert(current_time >= msg_ptr->getLastEnqueueTime() &&
197 "ensure we aren't dequeued early");
198
199 msg_ptr->updateDelayedTicks(current_time);
200 msg_ptr->setLastEnqueueTime(arrival_time);
201 msg_ptr->setMsgCounter(m_msg_counter);
202
203 // Insert the message into the priority heap
204 m_prio_heap.push_back(message);
205 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
206 // Increment the number of messages statistic
207 m_buf_msgs++;
208
209 DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
210 arrival_time, *(message.get()));
211
212 // Schedule the wakeup
213 assert(m_consumer != NULL);
214 m_consumer->scheduleEventAbsolute(arrival_time);
215 m_consumer->storeEventInfo(m_vnet_id);
216}
217
218Tick
219MessageBuffer::dequeue(Tick current_time, bool decrement_messages)
220{
221 DPRINTF(RubyQueue, "Popping\n");
222 assert(isReady(current_time));
223
224 // get MsgPtr of the message about to be dequeued
225 MsgPtr message = m_prio_heap.front();
226
227 // get the delay cycles
228 message->updateDelayedTicks(current_time);
229 Tick delay = message->getDelayedTicks();
230
231 m_stall_time = curTick() - message->getTime();
232
233 // record previous size and time so the current buffer size isn't
234 // adjusted until schd cycle
235 if (m_time_last_time_pop < current_time) {
236 m_size_at_cycle_start = m_prio_heap.size();
237 m_time_last_time_pop = current_time;
238 }
239
240 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
241 m_prio_heap.pop_back();
242 if (decrement_messages) {
243 // If the message will be removed from the queue, decrement the
244 // number of message in the queue.
245 m_buf_msgs--;
246 }
247
248 // if a dequeue callback was requested, call it now
249 if (m_dequeue_callback) {
250 m_dequeue_callback();
251 }
252
253 return delay;
254}
255
256void
257MessageBuffer::registerDequeueCallback(std::function<void()> callback)
258{
259 m_dequeue_callback = callback;
260}
261
262void
263MessageBuffer::unregisterDequeueCallback()
264{
265 m_dequeue_callback = nullptr;
266}
267
268void
269MessageBuffer::clear()
270{
271 m_prio_heap.clear();
272
273 m_msg_counter = 0;
274 m_time_last_time_enqueue = 0;
275 m_time_last_time_pop = 0;
276 m_size_at_cycle_start = 0;
277 m_msgs_this_cycle = 0;
278}
279
280void
281MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
282{
283 DPRINTF(RubyQueue, "Recycling.\n");
284 assert(isReady(current_time));
285 MsgPtr node = m_prio_heap.front();
286 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
287
288 Tick future_time = current_time + recycle_latency;
289 node->setLastEnqueueTime(future_time);
290
291 m_prio_heap.back() = node;
292 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
293 m_consumer->scheduleEventAbsolute(future_time);
294}
295
296void
297MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick schdTick)
298{
299 while (!lt.empty()) {
300 m_msg_counter++;
301 MsgPtr m = lt.front();
302 m->setLastEnqueueTime(schdTick);
303 m->setMsgCounter(m_msg_counter);
304
305 m_prio_heap.push_back(m);
306 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
307 greater<MsgPtr>());
308
309 m_consumer->scheduleEventAbsolute(schdTick);
310 lt.pop_front();
311 }
312}
313
314void
315MessageBuffer::reanalyzeMessages(Addr addr, Tick current_time)
316{
317 DPRINTF(RubyQueue, "ReanalyzeMessages %#x\n", addr);
318 assert(m_stall_msg_map.count(addr) > 0);
319
320 //
321 // Put all stalled messages associated with this address back on the
322 // prio heap. The reanalyzeList call will make sure the consumer is
323 // scheduled for the current cycle so that the previously stalled messages
324 // will be observed before any younger messages that may arrive this cycle
325 //
326 m_stall_map_size -= m_stall_msg_map[addr].size();
327 assert(m_stall_map_size >= 0);
328 reanalyzeList(m_stall_msg_map[addr], current_time);
329 m_stall_msg_map.erase(addr);
330}
331
332void
333MessageBuffer::reanalyzeAllMessages(Tick current_time)
334{
335 DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
336
337 //
338 // Put all stalled messages associated with this address back on the
339 // prio heap. The reanalyzeList call will make sure the consumer is
340 // scheduled for the current cycle so that the previously stalled messages
341 // will be observed before any younger messages that may arrive this cycle.
342 //
343 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
344 map_iter != m_stall_msg_map.end(); ++map_iter) {
345 m_stall_map_size -= map_iter->second.size();
346 assert(m_stall_map_size >= 0);
347 reanalyzeList(map_iter->second, current_time);
348 }
349 m_stall_msg_map.clear();
350}
351
352void
353MessageBuffer::stallMessage(Addr addr, Tick current_time)
354{
355 DPRINTF(RubyQueue, "Stalling due to %#x\n", addr);
356 assert(isReady(current_time));
357 assert(getOffset(addr) == 0);
358 MsgPtr message = m_prio_heap.front();
359
360 // Since the message will just be moved to stall map, indicate that the
361 // buffer should not decrement the m_buf_msgs statistic
362 dequeue(current_time, false);
363
364 //
365 // Note: no event is scheduled to analyze the map at a later time.
366 // Instead the controller is responsible to call reanalyzeMessages when
367 // these addresses change state.
368 //
369 (m_stall_msg_map[addr]).push_back(message);
370 m_stall_map_size++;
371 m_stall_count++;
372}
373
374void
375MessageBuffer::print(ostream& out) const
376{
377 ccprintf(out, "[MessageBuffer: ");
378 if (m_consumer != NULL) {
379 ccprintf(out, " consumer-yes ");
380 }
381
382 vector<MsgPtr> copy(m_prio_heap);
383 sort_heap(copy.begin(), copy.end(), greater<MsgPtr>());
384 ccprintf(out, "%s] %s", copy, name());
385}
386
387bool
388MessageBuffer::isReady(Tick current_time) const
389{
390 return ((m_prio_heap.size() > 0) &&
391 (m_prio_heap.front()->getLastEnqueueTime() <= current_time));
392}
393
394void
395MessageBuffer::regStats()
396{
397 m_not_avail_count
398 .name(name() + ".not_avail_count")
399 .desc("Number of times this buffer did not have N slots available")
400 .flags(Stats::nozero);
401
402 m_buf_msgs
403 .name(name() + ".avg_buf_msgs")
404 .desc("Average number of messages in buffer")
405 .flags(Stats::nozero);
406
407 m_stall_count
408 .name(name() + ".num_msg_stalls")
409 .desc("Number of times messages were stalled")
410 .flags(Stats::nozero);
411
412 m_occupancy
413 .name(name() + ".avg_buf_occ")
414 .desc("Average occupancy of buffer capacity")
415 .flags(Stats::nozero);
416
417 m_stall_time
418 .name(name() + ".avg_stall_time")
419 .desc("Average number of cycles messages are stalled in this MB")
420 .flags(Stats::nozero);
421
422 if (m_max_size > 0) {
423 m_occupancy = m_buf_msgs / m_max_size;
424 } else {
425 m_occupancy = 0;
426 }
427}
428
429uint32_t
430MessageBuffer::functionalWrite(Packet *pkt)
431{
432 uint32_t num_functional_writes = 0;
433
434 // Check the priority heap and write any messages that may
435 // correspond to the address in the packet.
436 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
437 Message *msg = m_prio_heap[i].get();
438 if (msg->functionalWrite(pkt)) {
439 num_functional_writes++;
440 }
441 }
442
443 // Check the stall queue and write any messages that may
444 // correspond to the address in the packet.
445 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
446 map_iter != m_stall_msg_map.end();
447 ++map_iter) {
448
449 for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
450 it != (map_iter->second).end(); ++it) {
451
452 Message *msg = (*it).get();
453 if (msg->functionalWrite(pkt)) {
454 num_functional_writes++;
455 }
456 }
457 }
458
459 return num_functional_writes;
460}
461
462MessageBuffer *
463MessageBufferParams::create()
464{
465 return new MessageBuffer(this);
466}