1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; --- 26 unchanged lines hidden (view full) --- 35#include "debug/RubyQueue.hh" 36#include "mem/ruby/network/MessageBuffer.hh" 37#include "mem/ruby/system/RubySystem.hh" 38 39using namespace std; 40using m5::stl_helpers::operator<<; 41 42MessageBuffer::MessageBuffer(const Params *p) |
43 : SimObject(p), m_stall_map_size(0), |
44 m_max_size(p->buffer_size), m_time_last_time_size_checked(0), 45 m_time_last_time_enqueue(0), m_time_last_time_pop(0), 46 m_last_arrival_time(0), m_strict_fifo(p->ordered), 47 m_randomization(p->randomization) 48{ 49 m_msg_counter = 0; 50 m_consumer = NULL; 51 m_size_last_time_size_checked = 0; --- 42 unchanged lines hidden (view full) --- 94 } else { 95 // both pops and enqueues occured this cycle - add new 96 // enqueued msgs to m_size_at_cycle_start 97 current_size = m_size_at_cycle_start + m_msgs_this_cycle; 98 } 99 } 100 101 // now compare the new size with our max size |
102 if (current_size + m_stall_map_size + n <= m_max_size) { |
103 return true; 104 } else { 105 DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, " 106 "m_max_size: %d\n", 107 n, current_size, m_prio_heap.size(), m_max_size); 108 m_not_avail_count++; 109 return false; 110 } --- 173 unchanged lines hidden (view full) --- 284 assert(m_stall_msg_map.count(addr) > 0); 285 286 // 287 // Put all stalled messages associated with this address back on the 288 // prio heap. The reanalyzeList call will make sure the consumer is 289 // scheduled for the current cycle so that the previously stalled messages 290 // will be observed before any younger messages that may arrive this cycle 291 // |
292 m_stall_map_size -= m_stall_msg_map[addr].size(); 293 assert(m_stall_map_size >= 0); |
294 reanalyzeList(m_stall_msg_map[addr], current_time); 295 m_stall_msg_map.erase(addr); 296} 297 298void 299MessageBuffer::reanalyzeAllMessages(Tick current_time) 300{ 301 DPRINTF(RubyQueue, "ReanalyzeAllMessages\n"); 302 303 // 304 // Put all stalled messages associated with this address back on the 305 // prio heap. The reanalyzeList call will make sure the consumer is 306 // scheduled for the current cycle so that the previously stalled messages 307 // will be observed before any younger messages that may arrive this cycle. 308 // 309 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin(); 310 map_iter != m_stall_msg_map.end(); ++map_iter) { |
311 m_stall_map_size -= map_iter->second.size(); 312 assert(m_stall_map_size >= 0); |
313 reanalyzeList(map_iter->second, current_time); 314 } 315 m_stall_msg_map.clear(); 316} 317 318void 319MessageBuffer::stallMessage(Addr addr, Tick current_time) 320{ --- 5 unchanged lines hidden (view full) --- 326 dequeue(current_time); 327 328 // 329 // Note: no event is scheduled to analyze the map at a later time. 330 // Instead the controller is responsible to call reanalyzeMessages when 331 // these addresses change state. 332 // 333 (m_stall_msg_map[addr]).push_back(message); |
334 m_stall_map_size++; |
335} 336 337void 338MessageBuffer::print(ostream& out) const 339{ 340 ccprintf(out, "[MessageBuffer: "); 341 if (m_consumer != NULL) { 342 ccprintf(out, " consumer-yes "); --- 61 unchanged lines hidden --- |