MessageBuffer.cc (11108:6342ddf6d733) MessageBuffer.cc (11111:6da33e720481)
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 26 unchanged lines hidden (view full) ---

35#include "debug/RubyQueue.hh"
36#include "mem/ruby/network/MessageBuffer.hh"
37#include "mem/ruby/system/RubySystem.hh"
38
39using namespace std;
40using m5::stl_helpers::operator<<;
41
42MessageBuffer::MessageBuffer(const Params *p)
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 26 unchanged lines hidden (view full) ---

35#include "debug/RubyQueue.hh"
36#include "mem/ruby/network/MessageBuffer.hh"
37#include "mem/ruby/system/RubySystem.hh"
38
39using namespace std;
40using m5::stl_helpers::operator<<;
41
42MessageBuffer::MessageBuffer(const Params *p)
43 : SimObject(p), m_recycle_latency(p->recycle_latency),
43 : SimObject(p),
44 m_max_size(p->buffer_size), m_time_last_time_size_checked(0),
45 m_time_last_time_enqueue(0), m_time_last_time_pop(0),
46 m_last_arrival_time(0), m_strict_fifo(p->ordered),
47 m_randomization(p->randomization)
48{
49 m_msg_counter = 0;
50 m_consumer = NULL;
44 m_max_size(p->buffer_size), m_time_last_time_size_checked(0),
45 m_time_last_time_enqueue(0), m_time_last_time_pop(0),
46 m_last_arrival_time(0), m_strict_fifo(p->ordered),
47 m_randomization(p->randomization)
48{
49 m_msg_counter = 0;
50 m_consumer = NULL;
51 m_sender = NULL;
52 m_receiver = NULL;
53
54 m_size_last_time_size_checked = 0;
55 m_size_at_cycle_start = 0;
56 m_msgs_this_cycle = 0;
57 m_not_avail_count = 0;
58 m_priority_rank = 0;
59
60 m_stall_msg_map.clear();
61 m_input_link_id = 0;
62 m_vnet_id = 0;
63}
64
65unsigned int
51 m_size_last_time_size_checked = 0;
52 m_size_at_cycle_start = 0;
53 m_msgs_this_cycle = 0;
54 m_not_avail_count = 0;
55 m_priority_rank = 0;
56
57 m_stall_msg_map.clear();
58 m_input_link_id = 0;
59 m_vnet_id = 0;
60}
61
62unsigned int
66MessageBuffer::getSize()
63MessageBuffer::getSize(Tick curTime)
67{
64{
68 if (m_time_last_time_size_checked != m_receiver->curCycle()) {
69 m_time_last_time_size_checked = m_receiver->curCycle();
65 if (m_time_last_time_size_checked != curTime) {
66 m_time_last_time_size_checked = curTime;
70 m_size_last_time_size_checked = m_prio_heap.size();
71 }
72
73 return m_size_last_time_size_checked;
74}
75
76bool
67 m_size_last_time_size_checked = m_prio_heap.size();
68 }
69
70 return m_size_last_time_size_checked;
71}
72
73bool
77MessageBuffer::areNSlotsAvailable(unsigned int n)
74MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time)
78{
79
80 // fast path when message buffers have infinite size
81 if (m_max_size == 0) {
82 return true;
83 }
84
85 // determine the correct size for the current cycle
86 // pop operations shouldn't effect the network's visible size
87 // until schd cycle, but enqueue operations effect the visible
88 // size immediately
89 unsigned int current_size = 0;
90
75{
76
77 // fast path when message buffers have infinite size
78 if (m_max_size == 0) {
79 return true;
80 }
81
82 // determine the correct size for the current cycle
83 // pop operations shouldn't effect the network's visible size
84 // until schd cycle, but enqueue operations effect the visible
85 // size immediately
86 unsigned int current_size = 0;
87
91 if (m_time_last_time_pop < m_sender->clockEdge()) {
88 if (m_time_last_time_pop < current_time) {
92 // no pops this cycle - heap size is correct
93 current_size = m_prio_heap.size();
94 } else {
89 // no pops this cycle - heap size is correct
90 current_size = m_prio_heap.size();
91 } else {
95 if (m_time_last_time_enqueue < m_sender->curCycle()) {
92 if (m_time_last_time_enqueue < current_time) {
96 // no enqueues this cycle - m_size_at_cycle_start is correct
97 current_size = m_size_at_cycle_start;
98 } else {
99 // both pops and enqueues occured this cycle - add new
100 // enqueued msgs to m_size_at_cycle_start
101 current_size = m_size_at_cycle_start + m_msgs_this_cycle;
102 }
103 }

--- 9 unchanged lines hidden (view full) ---

113 return false;
114 }
115}
116
117const Message*
118MessageBuffer::peek() const
119{
120 DPRINTF(RubyQueue, "Peeking at head of queue.\n");
93 // no enqueues this cycle - m_size_at_cycle_start is correct
94 current_size = m_size_at_cycle_start;
95 } else {
96 // both pops and enqueues occured this cycle - add new
97 // enqueued msgs to m_size_at_cycle_start
98 current_size = m_size_at_cycle_start + m_msgs_this_cycle;
99 }
100 }

--- 9 unchanged lines hidden (view full) ---

110 return false;
111 }
112}
113
114const Message*
115MessageBuffer::peek() const
116{
117 DPRINTF(RubyQueue, "Peeking at head of queue.\n");
121 assert(isReady());
122
123 const Message* msg_ptr = m_prio_heap.front().get();
124 assert(msg_ptr);
125
126 DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
127 return msg_ptr;
128}
129
130// FIXME - move me somewhere else
118 const Message* msg_ptr = m_prio_heap.front().get();
119 assert(msg_ptr);
120
121 DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
122 return msg_ptr;
123}
124
125// FIXME - move me somewhere else
131Cycles
126Tick
132random_time()
133{
127random_time()
128{
134 Cycles time(1);
135 time += Cycles(random_mt.random(0, 3)); // [0...3]
129 Tick time = 1;
130 time += random_mt.random(0, 3); // [0...3]
136 if (random_mt.random(0, 7) == 0) { // 1 in 8 chance
131 if (random_mt.random(0, 7) == 0) { // 1 in 8 chance
137 time += Cycles(100 + random_mt.random(1, 15)); // 100 + [1...15]
132 time += 100 + random_mt.random(1, 15); // 100 + [1...15]
138 }
139 return time;
140}
141
142void
133 }
134 return time;
135}
136
137void
143MessageBuffer::enqueue(MsgPtr message, Cycles delta)
138MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta)
144{
145 // record current time incase we have a pop that also adjusts my size
139{
140 // record current time incase we have a pop that also adjusts my size
146 if (m_time_last_time_enqueue < m_sender->curCycle()) {
141 if (m_time_last_time_enqueue < current_time) {
147 m_msgs_this_cycle = 0; // first msg this cycle
142 m_msgs_this_cycle = 0; // first msg this cycle
148 m_time_last_time_enqueue = m_sender->curCycle();
143 m_time_last_time_enqueue = current_time;
149 }
150
151 m_msg_counter++;
152 m_msgs_this_cycle++;
153
154 // Calculate the arrival time of the message, that is, the first
155 // cycle the message can be dequeued.
156 assert(delta > 0);
144 }
145
146 m_msg_counter++;
147 m_msgs_this_cycle++;
148
149 // Calculate the arrival time of the message, that is, the first
150 // cycle the message can be dequeued.
151 assert(delta > 0);
157 Tick current_time = m_sender->clockEdge();
158 Tick arrival_time = 0;
159
160 if (!RubySystem::getRandomization() || !m_randomization) {
161 // No randomization
152 Tick arrival_time = 0;
153
154 if (!RubySystem::getRandomization() || !m_randomization) {
155 // No randomization
162 arrival_time = current_time + delta * m_sender->clockPeriod();
156 arrival_time = current_time + delta;
163 } else {
164 // Randomization - ignore delta
165 if (m_strict_fifo) {
166 if (m_last_arrival_time < current_time) {
167 m_last_arrival_time = current_time;
168 }
157 } else {
158 // Randomization - ignore delta
159 if (m_strict_fifo) {
160 if (m_last_arrival_time < current_time) {
161 m_last_arrival_time = current_time;
162 }
169 arrival_time = m_last_arrival_time +
170 random_time() * m_sender->clockPeriod();
163 arrival_time = m_last_arrival_time + random_time();
171 } else {
164 } else {
172 arrival_time = current_time +
173 random_time() * m_sender->clockPeriod();
165 arrival_time = current_time + random_time();
174 }
175 }
176
177 // Check the arrival time
178 assert(arrival_time > current_time);
179 if (m_strict_fifo) {
180 if (arrival_time < m_last_arrival_time) {
181 panic("FIFO ordering violated: %s name: %s current time: %d "
182 "delta: %d arrival_time: %d last arrival_time: %d\n",
166 }
167 }
168
169 // Check the arrival time
170 assert(arrival_time > current_time);
171 if (m_strict_fifo) {
172 if (arrival_time < m_last_arrival_time) {
173 panic("FIFO ordering violated: %s name: %s current time: %d "
174 "delta: %d arrival_time: %d last arrival_time: %d\n",
183 *this, name(), current_time,
184 delta * m_sender->clockPeriod(),
185 arrival_time, m_last_arrival_time);
175 *this, name(), current_time, delta, arrival_time,
176 m_last_arrival_time);
186 }
187 }
188
189 // If running a cache trace, don't worry about the last arrival checks
190 if (!RubySystem::getWarmupEnabled()) {
191 m_last_arrival_time = arrival_time;
192 }
193
194 // compute the delay cycles and set enqueue time
195 Message* msg_ptr = message.get();
196 assert(msg_ptr != NULL);
197
177 }
178 }
179
180 // If running a cache trace, don't worry about the last arrival checks
181 if (!RubySystem::getWarmupEnabled()) {
182 m_last_arrival_time = arrival_time;
183 }
184
185 // compute the delay cycles and set enqueue time
186 Message* msg_ptr = message.get();
187 assert(msg_ptr != NULL);
188
198 assert(m_sender->clockEdge() >= msg_ptr->getLastEnqueueTime() &&
189 assert(current_time >= msg_ptr->getLastEnqueueTime() &&
199 "ensure we aren't dequeued early");
200
190 "ensure we aren't dequeued early");
191
201 msg_ptr->updateDelayedTicks(m_sender->clockEdge());
192 msg_ptr->updateDelayedTicks(current_time);
202 msg_ptr->setLastEnqueueTime(arrival_time);
203 msg_ptr->setMsgCounter(m_msg_counter);
204
205 // Insert the message into the priority heap
206 m_prio_heap.push_back(message);
207 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
208
209 DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
210 arrival_time, *(message.get()));
211
212 // Schedule the wakeup
213 assert(m_consumer != NULL);
214 m_consumer->scheduleEventAbsolute(arrival_time);
215 m_consumer->storeEventInfo(m_vnet_id);
216}
217
193 msg_ptr->setLastEnqueueTime(arrival_time);
194 msg_ptr->setMsgCounter(m_msg_counter);
195
196 // Insert the message into the priority heap
197 m_prio_heap.push_back(message);
198 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
199
200 DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
201 arrival_time, *(message.get()));
202
203 // Schedule the wakeup
204 assert(m_consumer != NULL);
205 m_consumer->scheduleEventAbsolute(arrival_time);
206 m_consumer->storeEventInfo(m_vnet_id);
207}
208
218Cycles
219MessageBuffer::dequeue()
209Tick
210MessageBuffer::dequeue(Tick current_time)
220{
221 DPRINTF(RubyQueue, "Popping\n");
211{
212 DPRINTF(RubyQueue, "Popping\n");
222 assert(isReady());
213 assert(isReady(current_time));
223
224 // get MsgPtr of the message about to be dequeued
225 MsgPtr message = m_prio_heap.front();
226
227 // get the delay cycles
214
215 // get MsgPtr of the message about to be dequeued
216 MsgPtr message = m_prio_heap.front();
217
218 // get the delay cycles
228 message->updateDelayedTicks(m_receiver->clockEdge());
229 Cycles delayCycles =
230 m_receiver->ticksToCycles(message->getDelayedTicks());
219 message->updateDelayedTicks(current_time);
220 Tick delay = message->getDelayedTicks();
231
232 // record previous size and time so the current buffer size isn't
233 // adjusted until schd cycle
221
222 // record previous size and time so the current buffer size isn't
223 // adjusted until schd cycle
234 if (m_time_last_time_pop < m_receiver->clockEdge()) {
224 if (m_time_last_time_pop < current_time) {
235 m_size_at_cycle_start = m_prio_heap.size();
225 m_size_at_cycle_start = m_prio_heap.size();
236 m_time_last_time_pop = m_receiver->clockEdge();
226 m_time_last_time_pop = current_time;
237 }
238
227 }
228
239 pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
240 greater<MsgPtr>());
229 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
241 m_prio_heap.pop_back();
242
230 m_prio_heap.pop_back();
231
243 return delayCycles;
232 return delay;
244}
245
246void
247MessageBuffer::clear()
248{
249 m_prio_heap.clear();
250
251 m_msg_counter = 0;
233}
234
235void
236MessageBuffer::clear()
237{
238 m_prio_heap.clear();
239
240 m_msg_counter = 0;
252 m_time_last_time_enqueue = Cycles(0);
241 m_time_last_time_enqueue = 0;
253 m_time_last_time_pop = 0;
254 m_size_at_cycle_start = 0;
255 m_msgs_this_cycle = 0;
256}
257
258void
242 m_time_last_time_pop = 0;
243 m_size_at_cycle_start = 0;
244 m_msgs_this_cycle = 0;
245}
246
247void
259MessageBuffer::recycle()
248MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
260{
261 DPRINTF(RubyQueue, "Recycling.\n");
249{
250 DPRINTF(RubyQueue, "Recycling.\n");
262 assert(isReady());
251 assert(isReady(current_time));
263 MsgPtr node = m_prio_heap.front();
264 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
265
252 MsgPtr node = m_prio_heap.front();
253 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
254
266 node->setLastEnqueueTime(m_receiver->clockEdge(m_recycle_latency));
255 Tick future_time = current_time + recycle_latency;
256 node->setLastEnqueueTime(future_time);
257
267 m_prio_heap.back() = node;
268 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
258 m_prio_heap.back() = node;
259 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
269 m_consumer->
270 scheduleEventAbsolute(m_receiver->clockEdge(m_recycle_latency));
260 m_consumer->scheduleEventAbsolute(future_time);
271}
272
273void
274MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick schdTick)
275{
276 while(!lt.empty()) {
277 m_msg_counter++;
278 MsgPtr m = lt.front();

--- 5 unchanged lines hidden (view full) ---

284 greater<MsgPtr>());
285
286 m_consumer->scheduleEventAbsolute(schdTick);
287 lt.pop_front();
288 }
289}
290
291void
261}
262
263void
264MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick schdTick)
265{
266 while(!lt.empty()) {
267 m_msg_counter++;
268 MsgPtr m = lt.front();

--- 5 unchanged lines hidden (view full) ---

274 greater<MsgPtr>());
275
276 m_consumer->scheduleEventAbsolute(schdTick);
277 lt.pop_front();
278 }
279}
280
281void
292MessageBuffer::reanalyzeMessages(Addr addr)
282MessageBuffer::reanalyzeMessages(Addr addr, Tick current_time)
293{
294 DPRINTF(RubyQueue, "ReanalyzeMessages %s\n", addr);
295 assert(m_stall_msg_map.count(addr) > 0);
283{
284 DPRINTF(RubyQueue, "ReanalyzeMessages %s\n", addr);
285 assert(m_stall_msg_map.count(addr) > 0);
296 Tick curTick = m_receiver->clockEdge();
297
298 //
299 // Put all stalled messages associated with this address back on the
300 // prio heap. The reanalyzeList call will make sure the consumer is
301 // scheduled for the current cycle so that the previously stalled messages
302 // will be observed before any younger messages that may arrive this cycle
303 //
286
287 //
288 // Put all stalled messages associated with this address back on the
289 // prio heap. The reanalyzeList call will make sure the consumer is
290 // scheduled for the current cycle so that the previously stalled messages
291 // will be observed before any younger messages that may arrive this cycle
292 //
304 reanalyzeList(m_stall_msg_map[addr], curTick);
293 reanalyzeList(m_stall_msg_map[addr], current_time);
305 m_stall_msg_map.erase(addr);
306}
307
308void
294 m_stall_msg_map.erase(addr);
295}
296
297void
309MessageBuffer::reanalyzeAllMessages()
298MessageBuffer::reanalyzeAllMessages(Tick current_time)
310{
311 DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
299{
300 DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
312 Tick curTick = m_receiver->clockEdge();
313
314 //
315 // Put all stalled messages associated with this address back on the
316 // prio heap. The reanalyzeList call will make sure the consumer is
317 // scheduled for the current cycle so that the previously stalled messages
318 // will be observed before any younger messages that may arrive this cycle.
319 //
320 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
321 map_iter != m_stall_msg_map.end(); ++map_iter) {
301
302 //
303 // Put all stalled messages associated with this address back on the
304 // prio heap. The reanalyzeList call will make sure the consumer is
305 // scheduled for the current cycle so that the previously stalled messages
306 // will be observed before any younger messages that may arrive this cycle.
307 //
308 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
309 map_iter != m_stall_msg_map.end(); ++map_iter) {
322 reanalyzeList(map_iter->second, curTick);
310 reanalyzeList(map_iter->second, current_time);
323 }
324 m_stall_msg_map.clear();
325}
326
327void
311 }
312 m_stall_msg_map.clear();
313}
314
315void
328MessageBuffer::stallMessage(Addr addr)
316MessageBuffer::stallMessage(Addr addr, Tick current_time)
329{
330 DPRINTF(RubyQueue, "Stalling due to %s\n", addr);
317{
318 DPRINTF(RubyQueue, "Stalling due to %s\n", addr);
331 assert(isReady());
319 assert(isReady(current_time));
332 assert(getOffset(addr) == 0);
333 MsgPtr message = m_prio_heap.front();
334
320 assert(getOffset(addr) == 0);
321 MsgPtr message = m_prio_heap.front();
322
335 dequeue();
323 dequeue(current_time);
336
337 //
338 // Note: no event is scheduled to analyze the map at a later time.
339 // Instead the controller is responsible to call reanalyzeMessages when
340 // these addresses change state.
341 //
342 (m_stall_msg_map[addr]).push_back(message);
343}

--- 7 unchanged lines hidden (view full) ---

351 }
352
353 vector<MsgPtr> copy(m_prio_heap);
354 sort_heap(copy.begin(), copy.end(), greater<MsgPtr>());
355 ccprintf(out, "%s] %s", copy, name());
356}
357
358bool
324
325 //
326 // Note: no event is scheduled to analyze the map at a later time.
327 // Instead the controller is responsible to call reanalyzeMessages when
328 // these addresses change state.
329 //
330 (m_stall_msg_map[addr]).push_back(message);
331}

--- 7 unchanged lines hidden (view full) ---

339 }
340
341 vector<MsgPtr> copy(m_prio_heap);
342 sort_heap(copy.begin(), copy.end(), greater<MsgPtr>());
343 ccprintf(out, "%s] %s", copy, name());
344}
345
346bool
359MessageBuffer::isReady() const
347MessageBuffer::isReady(Tick current_time) const
360{
361 return ((m_prio_heap.size() > 0) &&
348{
349 return ((m_prio_heap.size() > 0) &&
362 (m_prio_heap.front()->getLastEnqueueTime() <= m_receiver->clockEdge()));
350 (m_prio_heap.front()->getLastEnqueueTime() <= current_time));
363}
364
365bool
366MessageBuffer::functionalRead(Packet *pkt)
367{
368 // Check the priority heap and read any messages that may
369 // correspond to the address in the packet.
370 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {

--- 58 unchanged lines hidden ---
351}
352
353bool
354MessageBuffer::functionalRead(Packet *pkt)
355{
356 // Check the priority heap and read any messages that may
357 // correspond to the address in the packet.
358 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {

--- 58 unchanged lines hidden ---