1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; --- 72 unchanged lines hidden (view full) --- 81 82 // fast path when message buffers have infinite size 83 if (m_max_size == 0) { 84 return true; 85 } 86 87 // determine the correct size for the current cycle 88 // pop operations shouldn't effect the network's visible size |
89 // until schd cycle, but enqueue operations effect the visible |
90 // size immediately 91 unsigned int current_size = 0; 92 93 if (m_time_last_time_pop < m_sender->clockEdge()) { 94 // no pops this cycle - heap size is correct 95 current_size = m_prio_heap.size(); 96 } else { 97 if (m_time_last_time_enqueue < m_sender->curCycle()) { --- 131 unchanged lines hidden (view full) --- 229 MsgPtr message = m_prio_heap.front(); 230 231 // get the delay cycles 232 message->updateDelayedTicks(m_receiver->clockEdge()); 233 Cycles delayCycles = 234 m_receiver->ticksToCycles(message->getDelayedTicks()); 235 236 // record previous size and time so the current buffer size isn't |
237 // adjusted until schd cycle |
238 if (m_time_last_time_pop < m_receiver->clockEdge()) { 239 m_size_at_cycle_start = m_prio_heap.size(); 240 m_time_last_time_pop = m_receiver->clockEdge(); 241 } 242 243 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), 244 greater<MsgPtr>()); 245 m_prio_heap.pop_back(); --- 24 unchanged lines hidden (view full) --- 270 node->setLastEnqueueTime(m_receiver->clockEdge(m_recycle_latency)); 271 m_prio_heap.back() = node; 272 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>()); 273 m_consumer-> 274 scheduleEventAbsolute(m_receiver->clockEdge(m_recycle_latency)); 275} 276 277void |
278MessageBuffer::reanalyzeList(list<MsgPtr> <, Tick schdTick) |
279{ 280 while(!lt.empty()) { 281 m_msg_counter++; 282 MsgPtr m = lt.front(); |
283 m->setLastEnqueueTime(schdTick); |
284 m->setMsgCounter(m_msg_counter); 285 286 m_prio_heap.push_back(m); 287 push_heap(m_prio_heap.begin(), m_prio_heap.end(), 288 greater<MsgPtr>()); 289 |
290 m_consumer->scheduleEventAbsolute(schdTick); |
291 lt.pop_front(); 292 } 293} 294 295void 296MessageBuffer::reanalyzeMessages(const Address& addr) 297{ 298 DPRINTF(RubyQueue, "ReanalyzeMessages\n"); 299 assert(m_stall_msg_map.count(addr) > 0); |
300 Tick curTick = m_receiver->clockEdge(); |
301 302 // 303 // Put all stalled messages associated with this address back on the |
304 // prio heap. The reanalyzeList call will make sure the consumer is 305 // scheduled for the current cycle so that the previously stalled messages 306 // will be observed before any younger messages that may arrive this cycle |
307 // |
308 reanalyzeList(m_stall_msg_map[addr], curTick); |
309 m_stall_msg_map.erase(addr); 310} 311 312void 313MessageBuffer::reanalyzeAllMessages() 314{ 315 DPRINTF(RubyQueue, "ReanalyzeAllMessages\n"); |
316 Tick curTick = m_receiver->clockEdge(); |
317 318 // 319 // Put all stalled messages associated with this address back on the |
320 // prio heap. The reanalyzeList call will make sure the consumer is 321 // scheduled for the current cycle so that the previously stalled messages 322 // will be observed before any younger messages that may arrive this cycle. |
323 // 324 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin(); 325 map_iter != m_stall_msg_map.end(); ++map_iter) { |
326 reanalyzeList(map_iter->second, curTick); |
327 } 328 m_stall_msg_map.clear(); 329} 330 331void 332MessageBuffer::stallMessage(const Address& addr) 333{ 334 DPRINTF(RubyQueue, "Stalling due to %s\n", addr); --- 92 unchanged lines hidden --- |