MessageBuffer.cc revision 11796:315e133f45df
12914SN/A/* 28856SN/A * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 38856SN/A * All rights reserved. 48856SN/A * 58856SN/A * Redistribution and use in source and binary forms, with or without 68856SN/A * modification, are permitted provided that the following conditions are 78856SN/A * met: redistributions of source code must retain the above copyright 88856SN/A * notice, this list of conditions and the following disclaimer; 98856SN/A * redistributions in binary form must reproduce the above copyright 108856SN/A * notice, this list of conditions and the following disclaimer in the 118856SN/A * documentation and/or other materials provided with the distribution; 128856SN/A * neither the name of the copyright holders nor the names of its 138856SN/A * contributors may be used to endorse or promote products derived from 142914SN/A * this software without specific prior written permission. 152914SN/A * 162914SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172914SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182914SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192914SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202914SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212914SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222914SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232914SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242914SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252914SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262914SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272914SN/A */ 282914SN/A 292914SN/A#include "mem/ruby/network/MessageBuffer.hh" 302914SN/A 312914SN/A#include <cassert> 322914SN/A 332914SN/A#include "base/cprintf.hh" 342914SN/A#include "base/misc.hh" 352914SN/A#include "base/random.hh" 362914SN/A#include "base/stl_helpers.hh" 372914SN/A#include "debug/RubyQueue.hh" 382914SN/A#include "mem/ruby/system/RubySystem.hh" 392914SN/A 402914SN/Ausing namespace std; 418856SN/Ausing m5::stl_helpers::operator<<; 422914SN/A 432914SN/AMessageBuffer::MessageBuffer(const Params *p) 449356Snilay@cs.wisc.edu : SimObject(p), m_stall_map_size(0), 459152Satgutier@umich.edu m_max_size(p->buffer_size), m_time_last_time_size_checked(0), 468914Sandreas.hansson@arm.com m_time_last_time_enqueue(0), m_time_last_time_pop(0), 478914Sandreas.hansson@arm.com m_last_arrival_time(0), m_strict_fifo(p->ordered), 482914SN/A m_randomization(p->randomization) 495740SN/A{ 505740SN/A m_msg_counter = 0; 518975Sandreas.hansson@arm.com m_consumer = NULL; 529342SAndreas.Sandberg@arm.com m_size_last_time_size_checked = 0; 535740SN/A m_size_at_cycle_start = 0; 545740SN/A m_msgs_this_cycle = 0; 555740SN/A m_priority_rank = 0; 565740SN/A 578914Sandreas.hansson@arm.com m_stall_msg_map.clear(); 585740SN/A m_input_link_id = 0; 595740SN/A m_vnet_id = 0; 605740SN/A 618914Sandreas.hansson@arm.com m_buf_msgs = 0; 628914Sandreas.hansson@arm.com m_stall_time = 0; 638914Sandreas.hansson@arm.com} 648914Sandreas.hansson@arm.com 658914Sandreas.hansson@arm.comunsigned int 668914Sandreas.hansson@arm.comMessageBuffer::getSize(Tick curTime) 678914Sandreas.hansson@arm.com{ 688914Sandreas.hansson@arm.com if (m_time_last_time_size_checked != curTime) { 694929SN/A m_time_last_time_size_checked = curTime; 708914Sandreas.hansson@arm.com m_size_last_time_size_checked = m_prio_heap.size(); 713091SN/A } 728856SN/A 738856SN/A return m_size_last_time_size_checked; 744490SN/A} 754490SN/A 768856SN/Abool 773296SN/AMessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time) 788856SN/A{ 798856SN/A 808856SN/A // fast path when message buffers have infinite size 818856SN/A if (m_max_size == 0) { 828856SN/A return true; 833284SN/A } 844929SN/A 858856SN/A // determine the correct size for the current cycle 868856SN/A // pop operations shouldn't effect the network's visible size 878856SN/A // until schd cycle, but enqueue operations effect the visible 884490SN/A // size immediately 893342SN/A unsigned int current_size = 0; 904490SN/A 918914Sandreas.hansson@arm.com if (m_time_last_time_pop < current_time) { 928708SN/A // no pops this cycle - heap size is correct 938856SN/A current_size = m_prio_heap.size(); 948856SN/A } else { 958708SN/A if (m_time_last_time_enqueue < current_time) { 968856SN/A // no enqueues this cycle - m_size_at_cycle_start is correct 978708SN/A current_size = m_size_at_cycle_start; 988708SN/A } else { 998708SN/A // both pops and enqueues occured this cycle - add new 1008856SN/A // enqueued msgs to m_size_at_cycle_start 1018914Sandreas.hansson@arm.com current_size = m_size_at_cycle_start + m_msgs_this_cycle; 1028856SN/A } 1038914Sandreas.hansson@arm.com } 1048708SN/A 1058708SN/A // now compare the new size with our max size 1062914SN/A if (current_size + m_stall_map_size + n <= m_max_size) { 1072914SN/A return true; 1088948Sandreas.hansson@arm.com } else { 1093403SN/A DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, " 1109160Sandreas.hansson@arm.com "m_max_size: %d\n", 1119160Sandreas.hansson@arm.com n, current_size, m_prio_heap.size(), m_max_size); 1124492SN/A m_not_avail_count++; 1139163Sandreas.hansson@arm.com return false; 1149163Sandreas.hansson@arm.com } 1159163Sandreas.hansson@arm.com} 1168914Sandreas.hansson@arm.com 1178914Sandreas.hansson@arm.comconst Message* 1184666SN/AMessageBuffer::peek() const 1198914Sandreas.hansson@arm.com{ 1208914Sandreas.hansson@arm.com DPRINTF(RubyQueue, "Peeking at head of queue.\n"); 1218914Sandreas.hansson@arm.com const Message* msg_ptr = m_prio_heap.front().get(); 1228948Sandreas.hansson@arm.com assert(msg_ptr); 1234666SN/A 1244666SN/A DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr)); 1254666SN/A return msg_ptr; 1264666SN/A} 1278914Sandreas.hansson@arm.com 1284666SN/A// FIXME - move me somewhere else 1298948Sandreas.hansson@arm.comTick 1303450SN/Arandom_time() 1313403SN/A{ 1323450SN/A Tick time = 1; 1338914Sandreas.hansson@arm.com time += random_mt.random(0, 3); // [0...3] 1344490SN/A if (random_mt.random(0, 7) == 0) { // 1 in 8 chance 1358914Sandreas.hansson@arm.com time += 100 + random_mt.random(1, 15); // 100 + [1...15] 1368914Sandreas.hansson@arm.com } 1378914Sandreas.hansson@arm.com return time; 1388948Sandreas.hansson@arm.com} 1393403SN/A 1403403SN/Avoid 1418914Sandreas.hansson@arm.comMessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta) 1428856SN/A{ 1438856SN/A // record current time incase we have a pop that also adjusts my size 1448914Sandreas.hansson@arm.com if (m_time_last_time_enqueue < current_time) { 1458856SN/A m_msgs_this_cycle = 0; // first msg this cycle 1468856SN/A m_time_last_time_enqueue = current_time; 1478856SN/A } 1488856SN/A 1498856SN/A m_msg_counter++; 1508975Sandreas.hansson@arm.com m_msgs_this_cycle++; 1518975Sandreas.hansson@arm.com 1528975Sandreas.hansson@arm.com // Calculate the arrival time of the message, that is, the first 1538975Sandreas.hansson@arm.com // cycle the message can be dequeued. 1548856SN/A assert(delta > 0); 1558856SN/A Tick arrival_time = 0; 1568856SN/A 1578856SN/A if (!RubySystem::getRandomization() || !m_randomization) { 1588856SN/A // No randomization 1598856SN/A arrival_time = current_time + delta; 1608856SN/A } else { 1618856SN/A // Randomization - ignore delta 1628856SN/A if (m_strict_fifo) { 1638856SN/A if (m_last_arrival_time < current_time) { 1648914Sandreas.hansson@arm.com m_last_arrival_time = current_time; 1658856SN/A } 1668856SN/A arrival_time = m_last_arrival_time + random_time(); 1678856SN/A } else { 1688856SN/A arrival_time = current_time + random_time(); 1698914Sandreas.hansson@arm.com } 1708856SN/A } 1718856SN/A 1728856SN/A // Check the arrival time 1738856SN/A assert(arrival_time > current_time); 1748914Sandreas.hansson@arm.com if (m_strict_fifo) { 1758856SN/A if (arrival_time < m_last_arrival_time) { 1768856SN/A panic("FIFO ordering violated: %s name: %s current time: %d " 1779342SAndreas.Sandberg@arm.com "delta: %d arrival_time: %d last arrival_time: %d\n", 1789152Satgutier@umich.edu *this, name(), current_time, delta, arrival_time, 1799152Satgutier@umich.edu m_last_arrival_time); 1809342SAndreas.Sandberg@arm.com } 1819342SAndreas.Sandberg@arm.com } 1828856SN/A 1838856SN/A // If running a cache trace, don't worry about the last arrival checks 1848856SN/A if (!RubySystem::getWarmupEnabled()) { 1854492SN/A m_last_arrival_time = arrival_time; 1863403SN/A } 1878914Sandreas.hansson@arm.com 1882914SN/A // compute the delay cycles and set enqueue time 1898914Sandreas.hansson@arm.com Message* msg_ptr = message.get(); 1908914Sandreas.hansson@arm.com assert(msg_ptr != NULL); 1918856SN/A 1924492SN/A assert(current_time >= msg_ptr->getLastEnqueueTime() && 1938856SN/A "ensure we aren't dequeued early"); 1948856SN/A 1958856SN/A msg_ptr->updateDelayedTicks(current_time); 1968856SN/A msg_ptr->setLastEnqueueTime(arrival_time); 1974492SN/A msg_ptr->setMsgCounter(m_msg_counter); 1984492SN/A 1994492SN/A // Insert the message into the priority heap 2004492SN/A m_prio_heap.push_back(message); 2018914Sandreas.hansson@arm.com push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>()); 2024492SN/A // Increment the number of messages statistic 2034492SN/A m_buf_msgs++; 2044492SN/A 2052914SN/A DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n", 2062914SN/A arrival_time, *(message.get())); 2072914SN/A 2089342SAndreas.Sandberg@arm.com // Schedule the wakeup 2092914SN/A assert(m_consumer != NULL); 2108856SN/A m_consumer->scheduleEventAbsolute(arrival_time); 2112914SN/A m_consumer->storeEventInfo(m_vnet_id); 2129152Satgutier@umich.edu} 2139342SAndreas.Sandberg@arm.com 2142914SN/ATick 2152914SN/AMessageBuffer::dequeue(Tick current_time, bool decrement_messages) 2168975Sandreas.hansson@arm.com{ 2178975Sandreas.hansson@arm.com DPRINTF(RubyQueue, "Popping\n"); 2188975Sandreas.hansson@arm.com assert(isReady(current_time)); 2198975Sandreas.hansson@arm.com 2208975Sandreas.hansson@arm.com // get MsgPtr of the message about to be dequeued 2218975Sandreas.hansson@arm.com MsgPtr message = m_prio_heap.front(); 2228975Sandreas.hansson@arm.com 2238975Sandreas.hansson@arm.com // get the delay cycles 2248975Sandreas.hansson@arm.com message->updateDelayedTicks(current_time); 2258975Sandreas.hansson@arm.com Tick delay = message->getDelayedTicks(); 2268975Sandreas.hansson@arm.com 2278975Sandreas.hansson@arm.com m_stall_time = curTick() - message->getTime(); 2288975Sandreas.hansson@arm.com 2298975Sandreas.hansson@arm.com // record previous size and time so the current buffer size isn't 2308975Sandreas.hansson@arm.com // adjusted until schd cycle 2318975Sandreas.hansson@arm.com if (m_time_last_time_pop < current_time) { 2328975Sandreas.hansson@arm.com m_size_at_cycle_start = m_prio_heap.size(); 2338975Sandreas.hansson@arm.com m_time_last_time_pop = current_time; 2348975Sandreas.hansson@arm.com } 2358975Sandreas.hansson@arm.com 2368975Sandreas.hansson@arm.com pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>()); 2378975Sandreas.hansson@arm.com m_prio_heap.pop_back(); 2388975Sandreas.hansson@arm.com if (decrement_messages) { 2398975Sandreas.hansson@arm.com // If the message will be removed from the queue, decrement the 2408975Sandreas.hansson@arm.com // number of message in the queue. 2418975Sandreas.hansson@arm.com m_buf_msgs--; 2428975Sandreas.hansson@arm.com } 2438975Sandreas.hansson@arm.com 2448975Sandreas.hansson@arm.com return delay; 2458975Sandreas.hansson@arm.com} 246 247void 248MessageBuffer::clear() 249{ 250 m_prio_heap.clear(); 251 252 m_msg_counter = 0; 253 m_time_last_time_enqueue = 0; 254 m_time_last_time_pop = 0; 255 m_size_at_cycle_start = 0; 256 m_msgs_this_cycle = 0; 257} 258 259void 260MessageBuffer::recycle(Tick current_time, Tick recycle_latency) 261{ 262 DPRINTF(RubyQueue, "Recycling.\n"); 263 assert(isReady(current_time)); 264 MsgPtr node = m_prio_heap.front(); 265 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>()); 266 267 Tick future_time = current_time + recycle_latency; 268 node->setLastEnqueueTime(future_time); 269 270 m_prio_heap.back() = node; 271 push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>()); 272 m_consumer->scheduleEventAbsolute(future_time); 273} 274 275void 276MessageBuffer::reanalyzeList(list<MsgPtr> <, Tick schdTick) 277{ 278 while (!lt.empty()) { 279 m_msg_counter++; 280 MsgPtr m = lt.front(); 281 m->setLastEnqueueTime(schdTick); 282 m->setMsgCounter(m_msg_counter); 283 284 m_prio_heap.push_back(m); 285 push_heap(m_prio_heap.begin(), m_prio_heap.end(), 286 greater<MsgPtr>()); 287 288 m_consumer->scheduleEventAbsolute(schdTick); 289 lt.pop_front(); 290 } 291} 292 293void 294MessageBuffer::reanalyzeMessages(Addr addr, Tick current_time) 295{ 296 DPRINTF(RubyQueue, "ReanalyzeMessages %#x\n", addr); 297 assert(m_stall_msg_map.count(addr) > 0); 298 299 // 300 // Put all stalled messages associated with this address back on the 301 // prio heap. The reanalyzeList call will make sure the consumer is 302 // scheduled for the current cycle so that the previously stalled messages 303 // will be observed before any younger messages that may arrive this cycle 304 // 305 m_stall_map_size -= m_stall_msg_map[addr].size(); 306 assert(m_stall_map_size >= 0); 307 reanalyzeList(m_stall_msg_map[addr], current_time); 308 m_stall_msg_map.erase(addr); 309} 310 311void 312MessageBuffer::reanalyzeAllMessages(Tick current_time) 313{ 314 DPRINTF(RubyQueue, "ReanalyzeAllMessages\n"); 315 316 // 317 // Put all stalled messages associated with this address back on the 318 // prio heap. The reanalyzeList call will make sure the consumer is 319 // scheduled for the current cycle so that the previously stalled messages 320 // will be observed before any younger messages that may arrive this cycle. 321 // 322 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin(); 323 map_iter != m_stall_msg_map.end(); ++map_iter) { 324 m_stall_map_size -= map_iter->second.size(); 325 assert(m_stall_map_size >= 0); 326 reanalyzeList(map_iter->second, current_time); 327 } 328 m_stall_msg_map.clear(); 329} 330 331void 332MessageBuffer::stallMessage(Addr addr, Tick current_time) 333{ 334 DPRINTF(RubyQueue, "Stalling due to %#x\n", addr); 335 assert(isReady(current_time)); 336 assert(getOffset(addr) == 0); 337 MsgPtr message = m_prio_heap.front(); 338 339 // Since the message will just be moved to stall map, indicate that the 340 // buffer should not decrement the m_buf_msgs statistic 341 dequeue(current_time, false); 342 343 // 344 // Note: no event is scheduled to analyze the map at a later time. 345 // Instead the controller is responsible to call reanalyzeMessages when 346 // these addresses change state. 347 // 348 (m_stall_msg_map[addr]).push_back(message); 349 m_stall_map_size++; 350 m_stall_count++; 351} 352 353void 354MessageBuffer::print(ostream& out) const 355{ 356 ccprintf(out, "[MessageBuffer: "); 357 if (m_consumer != NULL) { 358 ccprintf(out, " consumer-yes "); 359 } 360 361 vector<MsgPtr> copy(m_prio_heap); 362 sort_heap(copy.begin(), copy.end(), greater<MsgPtr>()); 363 ccprintf(out, "%s] %s", copy, name()); 364} 365 366bool 367MessageBuffer::isReady(Tick current_time) const 368{ 369 return ((m_prio_heap.size() > 0) && 370 (m_prio_heap.front()->getLastEnqueueTime() <= current_time)); 371} 372 373void 374MessageBuffer::regStats() 375{ 376 m_not_avail_count 377 .name(name() + ".not_avail_count") 378 .desc("Number of times this buffer did not have N slots available") 379 .flags(Stats::nozero); 380 381 m_buf_msgs 382 .name(name() + ".avg_buf_msgs") 383 .desc("Average number of messages in buffer") 384 .flags(Stats::nozero); 385 386 m_stall_count 387 .name(name() + ".num_msg_stalls") 388 .desc("Number of times messages were stalled") 389 .flags(Stats::nozero); 390 391 m_occupancy 392 .name(name() + ".avg_buf_occ") 393 .desc("Average occupancy of buffer capacity") 394 .flags(Stats::nozero); 395 396 m_stall_time 397 .name(name() + ".avg_stall_time") 398 .desc("Average number of cycles messages are stalled in this MB") 399 .flags(Stats::nozero); 400 401 if (m_max_size > 0) { 402 m_occupancy = m_buf_msgs / m_max_size; 403 } else { 404 m_occupancy = 0; 405 } 406} 407 408uint32_t 409MessageBuffer::functionalWrite(Packet *pkt) 410{ 411 uint32_t num_functional_writes = 0; 412 413 // Check the priority heap and write any messages that may 414 // correspond to the address in the packet. 415 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) { 416 Message *msg = m_prio_heap[i].get(); 417 if (msg->functionalWrite(pkt)) { 418 num_functional_writes++; 419 } 420 } 421 422 // Check the stall queue and write any messages that may 423 // correspond to the address in the packet. 424 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin(); 425 map_iter != m_stall_msg_map.end(); 426 ++map_iter) { 427 428 for (std::list<MsgPtr>::iterator it = (map_iter->second).begin(); 429 it != (map_iter->second).end(); ++it) { 430 431 Message *msg = (*it).get(); 432 if (msg->functionalWrite(pkt)) { 433 num_functional_writes++; 434 } 435 } 436 } 437 438 return num_functional_writes; 439} 440 441MessageBuffer * 442MessageBufferParams::create() 443{ 444 return new MessageBuffer(this); 445} 446