MessageBuffer.cc (10348:c91b23c72d5e) MessageBuffer.cc (10524:fff17530cef6)
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <cassert>
30
31#include "base/cprintf.hh"
32#include "base/misc.hh"
33#include "base/random.hh"
34#include "base/stl_helpers.hh"
35#include "debug/RubyQueue.hh"
36#include "mem/ruby/network/MessageBuffer.hh"
37#include "mem/ruby/system/System.hh"
38
39using namespace std;
40using m5::stl_helpers::operator<<;
41
42MessageBuffer::MessageBuffer(const string &name)
43 : m_time_last_time_size_checked(0), m_time_last_time_enqueue(0),
44 m_time_last_time_pop(0), m_last_arrival_time(0)
45{
46 m_msg_counter = 0;
47 m_consumer = NULL;
48 m_sender = NULL;
49 m_receiver = NULL;
50
51 m_ordering_set = false;
52 m_strict_fifo = true;
53 m_max_size = 0;
54 m_randomization = true;
55 m_size_last_time_size_checked = 0;
56 m_size_at_cycle_start = 0;
57 m_msgs_this_cycle = 0;
58 m_not_avail_count = 0;
59 m_priority_rank = 0;
60 m_name = name;
61
62 m_stall_msg_map.clear();
63 m_input_link_id = 0;
64 m_vnet_id = 0;
65}
66
67unsigned int
68MessageBuffer::getSize()
69{
70 if (m_time_last_time_size_checked != m_receiver->curCycle()) {
71 m_time_last_time_size_checked = m_receiver->curCycle();
72 m_size_last_time_size_checked = m_prio_heap.size();
73 }
74
75 return m_size_last_time_size_checked;
76}
77
78bool
79MessageBuffer::areNSlotsAvailable(unsigned int n)
80{
81
82 // fast path when message buffers have infinite size
83 if (m_max_size == 0) {
84 return true;
85 }
86
87 // determine the correct size for the current cycle
88 // pop operations shouldn't effect the network's visible size
89 // until next cycle, but enqueue operations effect the visible
90 // size immediately
91 unsigned int current_size = 0;
92
93 if (m_time_last_time_pop < m_sender->clockEdge()) {
94 // no pops this cycle - heap size is correct
95 current_size = m_prio_heap.size();
96 } else {
97 if (m_time_last_time_enqueue < m_sender->curCycle()) {
98 // no enqueues this cycle - m_size_at_cycle_start is correct
99 current_size = m_size_at_cycle_start;
100 } else {
101 // both pops and enqueues occured this cycle - add new
102 // enqueued msgs to m_size_at_cycle_start
103 current_size = m_size_at_cycle_start + m_msgs_this_cycle;
104 }
105 }
106
107 // now compare the new size with our max size
108 if (current_size + n <= m_max_size) {
109 return true;
110 } else {
111 DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, "
112 "m_max_size: %d\n",
113 n, current_size, m_prio_heap.size(), m_max_size);
114 m_not_avail_count++;
115 return false;
116 }
117}
118
119const Message*
120MessageBuffer::peek() const
121{
122 DPRINTF(RubyQueue, "Peeking at head of queue.\n");
123 assert(isReady());
124
125 const Message* msg_ptr = m_prio_heap.front().m_msgptr.get();
126 assert(msg_ptr);
127
128 DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
129 return msg_ptr;
130}
131
132// FIXME - move me somewhere else
133Cycles
134random_time()
135{
136 Cycles time(1);
137 time += Cycles(random_mt.random(0, 3)); // [0...3]
138 if (random_mt.random(0, 7) == 0) { // 1 in 8 chance
139 time += Cycles(100 + random_mt.random(1, 15)); // 100 + [1...15]
140 }
141 return time;
142}
143
144void
145MessageBuffer::enqueue(MsgPtr message, Cycles delta)
146{
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <cassert>
30
31#include "base/cprintf.hh"
32#include "base/misc.hh"
33#include "base/random.hh"
34#include "base/stl_helpers.hh"
35#include "debug/RubyQueue.hh"
36#include "mem/ruby/network/MessageBuffer.hh"
37#include "mem/ruby/system/System.hh"
38
39using namespace std;
40using m5::stl_helpers::operator<<;
41
42MessageBuffer::MessageBuffer(const string &name)
43 : m_time_last_time_size_checked(0), m_time_last_time_enqueue(0),
44 m_time_last_time_pop(0), m_last_arrival_time(0)
45{
46 m_msg_counter = 0;
47 m_consumer = NULL;
48 m_sender = NULL;
49 m_receiver = NULL;
50
51 m_ordering_set = false;
52 m_strict_fifo = true;
53 m_max_size = 0;
54 m_randomization = true;
55 m_size_last_time_size_checked = 0;
56 m_size_at_cycle_start = 0;
57 m_msgs_this_cycle = 0;
58 m_not_avail_count = 0;
59 m_priority_rank = 0;
60 m_name = name;
61
62 m_stall_msg_map.clear();
63 m_input_link_id = 0;
64 m_vnet_id = 0;
65}
66
67unsigned int
68MessageBuffer::getSize()
69{
70 if (m_time_last_time_size_checked != m_receiver->curCycle()) {
71 m_time_last_time_size_checked = m_receiver->curCycle();
72 m_size_last_time_size_checked = m_prio_heap.size();
73 }
74
75 return m_size_last_time_size_checked;
76}
77
78bool
79MessageBuffer::areNSlotsAvailable(unsigned int n)
80{
81
82 // fast path when message buffers have infinite size
83 if (m_max_size == 0) {
84 return true;
85 }
86
87 // determine the correct size for the current cycle
88 // pop operations shouldn't effect the network's visible size
89 // until next cycle, but enqueue operations effect the visible
90 // size immediately
91 unsigned int current_size = 0;
92
93 if (m_time_last_time_pop < m_sender->clockEdge()) {
94 // no pops this cycle - heap size is correct
95 current_size = m_prio_heap.size();
96 } else {
97 if (m_time_last_time_enqueue < m_sender->curCycle()) {
98 // no enqueues this cycle - m_size_at_cycle_start is correct
99 current_size = m_size_at_cycle_start;
100 } else {
101 // both pops and enqueues occured this cycle - add new
102 // enqueued msgs to m_size_at_cycle_start
103 current_size = m_size_at_cycle_start + m_msgs_this_cycle;
104 }
105 }
106
107 // now compare the new size with our max size
108 if (current_size + n <= m_max_size) {
109 return true;
110 } else {
111 DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, "
112 "m_max_size: %d\n",
113 n, current_size, m_prio_heap.size(), m_max_size);
114 m_not_avail_count++;
115 return false;
116 }
117}
118
119const Message*
120MessageBuffer::peek() const
121{
122 DPRINTF(RubyQueue, "Peeking at head of queue.\n");
123 assert(isReady());
124
125 const Message* msg_ptr = m_prio_heap.front().m_msgptr.get();
126 assert(msg_ptr);
127
128 DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
129 return msg_ptr;
130}
131
132// FIXME - move me somewhere else
133Cycles
134random_time()
135{
136 Cycles time(1);
137 time += Cycles(random_mt.random(0, 3)); // [0...3]
138 if (random_mt.random(0, 7) == 0) { // 1 in 8 chance
139 time += Cycles(100 + random_mt.random(1, 15)); // 100 + [1...15]
140 }
141 return time;
142}
143
144void
145MessageBuffer::enqueue(MsgPtr message, Cycles delta)
146{
147 m_msg_counter++;
147 assert(m_ordering_set);
148
149 // record current time incase we have a pop that also adjusts my size
150 if (m_time_last_time_enqueue < m_sender->curCycle()) {
151 m_msgs_this_cycle = 0; // first msg this cycle
152 m_time_last_time_enqueue = m_sender->curCycle();
153 }
148
149 // record current time incase we have a pop that also adjusts my size
150 if (m_time_last_time_enqueue < m_sender->curCycle()) {
151 m_msgs_this_cycle = 0; // first msg this cycle
152 m_time_last_time_enqueue = m_sender->curCycle();
153 }
154
155 m_msg_counter++;
154 m_msgs_this_cycle++;
155
156 m_msgs_this_cycle++;
157
156 assert(m_ordering_set);
157
158 // Calculate the arrival time of the message, that is, the first
159 // cycle the message can be dequeued.
160 assert(delta > 0);
161 Tick current_time = m_sender->clockEdge();
162 Tick arrival_time = 0;
163
164 if (!RubySystem::getRandomization() || !m_randomization) {
165 // No randomization
166 arrival_time = current_time + delta * m_sender->clockPeriod();
167 } else {
168 // Randomization - ignore delta
169 if (m_strict_fifo) {
170 if (m_last_arrival_time < current_time) {
171 m_last_arrival_time = current_time;
172 }
173 arrival_time = m_last_arrival_time +
174 random_time() * m_sender->clockPeriod();
175 } else {
176 arrival_time = current_time +
177 random_time() * m_sender->clockPeriod();
178 }
179 }
180
181 // Check the arrival time
182 assert(arrival_time > current_time);
183 if (m_strict_fifo) {
184 if (arrival_time < m_last_arrival_time) {
185 panic("FIFO ordering violated: %s name: %s current time: %d "
186 "delta: %d arrival_time: %d last arrival_time: %d\n",
187 *this, m_name, current_time,
188 delta * m_sender->clockPeriod(),
189 arrival_time, m_last_arrival_time);
190 }
191 }
192
193 // If running a cache trace, don't worry about the last arrival checks
194 if (!g_system_ptr->m_warmup_enabled) {
195 m_last_arrival_time = arrival_time;
196 }
197
198 // compute the delay cycles and set enqueue time
199 Message* msg_ptr = message.get();
200 assert(msg_ptr != NULL);
201
202 assert(m_sender->clockEdge() >= msg_ptr->getLastEnqueueTime() &&
203 "ensure we aren't dequeued early");
204
205 msg_ptr->updateDelayedTicks(m_sender->clockEdge());
206 msg_ptr->setLastEnqueueTime(arrival_time);
207
208 // Insert the message into the priority heap
209 MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
210 m_prio_heap.push_back(thisNode);
211 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
212 greater<MessageBufferNode>());
213
214 DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
215 arrival_time, *(message.get()));
216
217 // Schedule the wakeup
218 assert(m_consumer != NULL);
219 m_consumer->scheduleEventAbsolute(arrival_time);
220 m_consumer->storeEventInfo(m_vnet_id);
221}
222
223Cycles
224MessageBuffer::dequeue()
225{
226 DPRINTF(RubyQueue, "Popping\n");
227 assert(isReady());
228
229 // get MsgPtr of the message about to be dequeued
230 MsgPtr message = m_prio_heap.front().m_msgptr;
231
232 // get the delay cycles
233 message->updateDelayedTicks(m_receiver->clockEdge());
234 Cycles delayCycles =
235 m_receiver->ticksToCycles(message->getDelayedTicks());
236
237 // record previous size and time so the current buffer size isn't
238 // adjusted until next cycle
239 if (m_time_last_time_pop < m_receiver->clockEdge()) {
240 m_size_at_cycle_start = m_prio_heap.size();
241 m_time_last_time_pop = m_receiver->clockEdge();
242 }
243
244 pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
245 greater<MessageBufferNode>());
246 m_prio_heap.pop_back();
247
248 return delayCycles;
249}
250
251void
252MessageBuffer::clear()
253{
254 m_prio_heap.clear();
255
256 m_msg_counter = 0;
257 m_time_last_time_enqueue = Cycles(0);
258 m_time_last_time_pop = 0;
259 m_size_at_cycle_start = 0;
260 m_msgs_this_cycle = 0;
261}
262
263void
264MessageBuffer::recycle()
265{
266 DPRINTF(RubyQueue, "Recycling.\n");
267 assert(isReady());
268 MessageBufferNode node = m_prio_heap.front();
269 pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
270 greater<MessageBufferNode>());
271
272 node.m_time = m_receiver->clockEdge(m_recycle_latency);
273 m_prio_heap.back() = node;
274 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
275 greater<MessageBufferNode>());
276 m_consumer->
277 scheduleEventAbsolute(m_receiver->clockEdge(m_recycle_latency));
278}
279
280void
281MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick nextTick)
282{
283 while(!lt.empty()) {
284 m_msg_counter++;
285 MessageBufferNode msgNode(nextTick, m_msg_counter, lt.front());
286
287 m_prio_heap.push_back(msgNode);
288 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
289 greater<MessageBufferNode>());
290
291 m_consumer->scheduleEventAbsolute(nextTick);
292 lt.pop_front();
293 }
294}
295
296void
297MessageBuffer::reanalyzeMessages(const Address& addr)
298{
299 DPRINTF(RubyQueue, "ReanalyzeMessages\n");
300 assert(m_stall_msg_map.count(addr) > 0);
301 Tick nextTick = m_receiver->clockEdge(Cycles(1));
302
303 //
304 // Put all stalled messages associated with this address back on the
305 // prio heap
306 //
307 reanalyzeList(m_stall_msg_map[addr], nextTick);
308 m_stall_msg_map.erase(addr);
309}
310
311void
312MessageBuffer::reanalyzeAllMessages()
313{
314 DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
315 Tick nextTick = m_receiver->clockEdge(Cycles(1));
316
317 //
318 // Put all stalled messages associated with this address back on the
319 // prio heap
320 //
321 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
322 map_iter != m_stall_msg_map.end(); ++map_iter) {
323 reanalyzeList(map_iter->second, nextTick);
324 }
325 m_stall_msg_map.clear();
326}
327
328void
329MessageBuffer::stallMessage(const Address& addr)
330{
331 DPRINTF(RubyQueue, "Stalling due to %s\n", addr);
332 assert(isReady());
333 assert(addr.getOffset() == 0);
334 MsgPtr message = m_prio_heap.front().m_msgptr;
335
336 dequeue();
337
338 //
339 // Note: no event is scheduled to analyze the map at a later time.
340 // Instead the controller is responsible to call reanalyzeMessages when
341 // these addresses change state.
342 //
343 (m_stall_msg_map[addr]).push_back(message);
344}
345
346void
347MessageBuffer::print(ostream& out) const
348{
349 ccprintf(out, "[MessageBuffer: ");
350 if (m_consumer != NULL) {
351 ccprintf(out, " consumer-yes ");
352 }
353
354 vector<MessageBufferNode> copy(m_prio_heap);
355 sort_heap(copy.begin(), copy.end(), greater<MessageBufferNode>());
356 ccprintf(out, "%s] %s", copy, m_name);
357}
358
359bool
360MessageBuffer::isReady() const
361{
362 return ((m_prio_heap.size() > 0) &&
363 (m_prio_heap.front().m_time <= m_receiver->clockEdge()));
364}
365
366bool
367MessageBuffer::functionalRead(Packet *pkt)
368{
369 // Check the priority heap and read any messages that may
370 // correspond to the address in the packet.
371 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
372 Message *msg = m_prio_heap[i].m_msgptr.get();
373 if (msg->functionalRead(pkt)) return true;
374 }
375
376 // Read the messages in the stall queue that correspond
377 // to the address in the packet.
378 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
379 map_iter != m_stall_msg_map.end();
380 ++map_iter) {
381
382 for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
383 it != (map_iter->second).end(); ++it) {
384
385 Message *msg = (*it).get();
386 if (msg->functionalRead(pkt)) return true;
387 }
388 }
389 return false;
390}
391
392uint32_t
393MessageBuffer::functionalWrite(Packet *pkt)
394{
395 uint32_t num_functional_writes = 0;
396
397 // Check the priority heap and write any messages that may
398 // correspond to the address in the packet.
399 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
400 Message *msg = m_prio_heap[i].m_msgptr.get();
401 if (msg->functionalWrite(pkt)) {
402 num_functional_writes++;
403 }
404 }
405
406 // Check the stall queue and write any messages that may
407 // correspond to the address in the packet.
408 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
409 map_iter != m_stall_msg_map.end();
410 ++map_iter) {
411
412 for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
413 it != (map_iter->second).end(); ++it) {
414
415 Message *msg = (*it).get();
416 if (msg->functionalWrite(pkt)) {
417 num_functional_writes++;
418 }
419 }
420 }
421
422 return num_functional_writes;
423}
158 // Calculate the arrival time of the message, that is, the first
159 // cycle the message can be dequeued.
160 assert(delta > 0);
161 Tick current_time = m_sender->clockEdge();
162 Tick arrival_time = 0;
163
164 if (!RubySystem::getRandomization() || !m_randomization) {
165 // No randomization
166 arrival_time = current_time + delta * m_sender->clockPeriod();
167 } else {
168 // Randomization - ignore delta
169 if (m_strict_fifo) {
170 if (m_last_arrival_time < current_time) {
171 m_last_arrival_time = current_time;
172 }
173 arrival_time = m_last_arrival_time +
174 random_time() * m_sender->clockPeriod();
175 } else {
176 arrival_time = current_time +
177 random_time() * m_sender->clockPeriod();
178 }
179 }
180
181 // Check the arrival time
182 assert(arrival_time > current_time);
183 if (m_strict_fifo) {
184 if (arrival_time < m_last_arrival_time) {
185 panic("FIFO ordering violated: %s name: %s current time: %d "
186 "delta: %d arrival_time: %d last arrival_time: %d\n",
187 *this, m_name, current_time,
188 delta * m_sender->clockPeriod(),
189 arrival_time, m_last_arrival_time);
190 }
191 }
192
193 // If running a cache trace, don't worry about the last arrival checks
194 if (!g_system_ptr->m_warmup_enabled) {
195 m_last_arrival_time = arrival_time;
196 }
197
198 // compute the delay cycles and set enqueue time
199 Message* msg_ptr = message.get();
200 assert(msg_ptr != NULL);
201
202 assert(m_sender->clockEdge() >= msg_ptr->getLastEnqueueTime() &&
203 "ensure we aren't dequeued early");
204
205 msg_ptr->updateDelayedTicks(m_sender->clockEdge());
206 msg_ptr->setLastEnqueueTime(arrival_time);
207
208 // Insert the message into the priority heap
209 MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
210 m_prio_heap.push_back(thisNode);
211 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
212 greater<MessageBufferNode>());
213
214 DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
215 arrival_time, *(message.get()));
216
217 // Schedule the wakeup
218 assert(m_consumer != NULL);
219 m_consumer->scheduleEventAbsolute(arrival_time);
220 m_consumer->storeEventInfo(m_vnet_id);
221}
222
223Cycles
224MessageBuffer::dequeue()
225{
226 DPRINTF(RubyQueue, "Popping\n");
227 assert(isReady());
228
229 // get MsgPtr of the message about to be dequeued
230 MsgPtr message = m_prio_heap.front().m_msgptr;
231
232 // get the delay cycles
233 message->updateDelayedTicks(m_receiver->clockEdge());
234 Cycles delayCycles =
235 m_receiver->ticksToCycles(message->getDelayedTicks());
236
237 // record previous size and time so the current buffer size isn't
238 // adjusted until next cycle
239 if (m_time_last_time_pop < m_receiver->clockEdge()) {
240 m_size_at_cycle_start = m_prio_heap.size();
241 m_time_last_time_pop = m_receiver->clockEdge();
242 }
243
244 pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
245 greater<MessageBufferNode>());
246 m_prio_heap.pop_back();
247
248 return delayCycles;
249}
250
251void
252MessageBuffer::clear()
253{
254 m_prio_heap.clear();
255
256 m_msg_counter = 0;
257 m_time_last_time_enqueue = Cycles(0);
258 m_time_last_time_pop = 0;
259 m_size_at_cycle_start = 0;
260 m_msgs_this_cycle = 0;
261}
262
263void
264MessageBuffer::recycle()
265{
266 DPRINTF(RubyQueue, "Recycling.\n");
267 assert(isReady());
268 MessageBufferNode node = m_prio_heap.front();
269 pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
270 greater<MessageBufferNode>());
271
272 node.m_time = m_receiver->clockEdge(m_recycle_latency);
273 m_prio_heap.back() = node;
274 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
275 greater<MessageBufferNode>());
276 m_consumer->
277 scheduleEventAbsolute(m_receiver->clockEdge(m_recycle_latency));
278}
279
280void
281MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick nextTick)
282{
283 while(!lt.empty()) {
284 m_msg_counter++;
285 MessageBufferNode msgNode(nextTick, m_msg_counter, lt.front());
286
287 m_prio_heap.push_back(msgNode);
288 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
289 greater<MessageBufferNode>());
290
291 m_consumer->scheduleEventAbsolute(nextTick);
292 lt.pop_front();
293 }
294}
295
296void
297MessageBuffer::reanalyzeMessages(const Address& addr)
298{
299 DPRINTF(RubyQueue, "ReanalyzeMessages\n");
300 assert(m_stall_msg_map.count(addr) > 0);
301 Tick nextTick = m_receiver->clockEdge(Cycles(1));
302
303 //
304 // Put all stalled messages associated with this address back on the
305 // prio heap
306 //
307 reanalyzeList(m_stall_msg_map[addr], nextTick);
308 m_stall_msg_map.erase(addr);
309}
310
311void
312MessageBuffer::reanalyzeAllMessages()
313{
314 DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
315 Tick nextTick = m_receiver->clockEdge(Cycles(1));
316
317 //
318 // Put all stalled messages associated with this address back on the
319 // prio heap
320 //
321 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
322 map_iter != m_stall_msg_map.end(); ++map_iter) {
323 reanalyzeList(map_iter->second, nextTick);
324 }
325 m_stall_msg_map.clear();
326}
327
328void
329MessageBuffer::stallMessage(const Address& addr)
330{
331 DPRINTF(RubyQueue, "Stalling due to %s\n", addr);
332 assert(isReady());
333 assert(addr.getOffset() == 0);
334 MsgPtr message = m_prio_heap.front().m_msgptr;
335
336 dequeue();
337
338 //
339 // Note: no event is scheduled to analyze the map at a later time.
340 // Instead the controller is responsible to call reanalyzeMessages when
341 // these addresses change state.
342 //
343 (m_stall_msg_map[addr]).push_back(message);
344}
345
346void
347MessageBuffer::print(ostream& out) const
348{
349 ccprintf(out, "[MessageBuffer: ");
350 if (m_consumer != NULL) {
351 ccprintf(out, " consumer-yes ");
352 }
353
354 vector<MessageBufferNode> copy(m_prio_heap);
355 sort_heap(copy.begin(), copy.end(), greater<MessageBufferNode>());
356 ccprintf(out, "%s] %s", copy, m_name);
357}
358
359bool
360MessageBuffer::isReady() const
361{
362 return ((m_prio_heap.size() > 0) &&
363 (m_prio_heap.front().m_time <= m_receiver->clockEdge()));
364}
365
366bool
367MessageBuffer::functionalRead(Packet *pkt)
368{
369 // Check the priority heap and read any messages that may
370 // correspond to the address in the packet.
371 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
372 Message *msg = m_prio_heap[i].m_msgptr.get();
373 if (msg->functionalRead(pkt)) return true;
374 }
375
376 // Read the messages in the stall queue that correspond
377 // to the address in the packet.
378 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
379 map_iter != m_stall_msg_map.end();
380 ++map_iter) {
381
382 for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
383 it != (map_iter->second).end(); ++it) {
384
385 Message *msg = (*it).get();
386 if (msg->functionalRead(pkt)) return true;
387 }
388 }
389 return false;
390}
391
392uint32_t
393MessageBuffer::functionalWrite(Packet *pkt)
394{
395 uint32_t num_functional_writes = 0;
396
397 // Check the priority heap and write any messages that may
398 // correspond to the address in the packet.
399 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
400 Message *msg = m_prio_heap[i].m_msgptr.get();
401 if (msg->functionalWrite(pkt)) {
402 num_functional_writes++;
403 }
404 }
405
406 // Check the stall queue and write any messages that may
407 // correspond to the address in the packet.
408 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
409 map_iter != m_stall_msg_map.end();
410 ++map_iter) {
411
412 for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
413 it != (map_iter->second).end(); ++it) {
414
415 Message *msg = (*it).get();
416 if (msg->functionalWrite(pkt)) {
417 num_functional_writes++;
418 }
419 }
420 }
421
422 return num_functional_writes;
423}