Profiler.cc revision 7565
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/* 30 This file has been modified by Kevin Moore and Dan Nussbaum of the 31 Scalable Systems Research Group at Sun Microsystems Laboratories 32 (http://research.sun.com/scalable/) to support the Adaptive 33 Transactional Memory Test Platform (ATMTP). 34 35 Please send email to atmtp-interest@sun.com with feedback, questions, or 36 to request future announcements about ATMTP. 37 38 ---------------------------------------------------------------------- 39 40 File modification date: 2008-02-23 41 42 ---------------------------------------------------------------------- 43*/ 44 45// Allows use of times() library call, which determines virtual runtime 46#include <sys/resource.h> 47#include <sys/times.h> 48 49#include <algorithm> 50 51#include "base/stl_helpers.hh" 52#include "base/str.hh" 53#include "mem/protocol/CacheMsg.hh" 54#include "mem/protocol/MachineType.hh" 55#include "mem/protocol/Protocol.hh" 56#include "mem/ruby/common/Debug.hh" 57#include "mem/ruby/network/Network.hh" 58#include "mem/ruby/profiler/AddressProfiler.hh" 59#include "mem/ruby/profiler/Profiler.hh" 60#include "mem/ruby/system/System.hh" 61#include "mem/ruby/system/System.hh" 62 63using namespace std; 64using m5::stl_helpers::operator<<; 65 66extern ostream* debug_cout_ptr; 67 68static double process_memory_total(); 69static double process_memory_resident(); 70 71Profiler::Profiler(const Params *p) 72 : SimObject(p) 73{ 74 m_inst_profiler_ptr = NULL; 75 m_address_profiler_ptr = NULL; 76 77 m_real_time_start_time = time(NULL); // Not reset in clearStats() 78 m_stats_period = 1000000; // Default 79 m_periodic_output_file_ptr = &cerr; 80 81 m_hot_lines = p->hot_lines; 82 m_all_instructions = p->all_instructions; 83 84 m_num_of_sequencers = p->num_of_sequencers; 85 86 m_hot_lines = false; 87 m_all_instructions = false; 88 89 m_address_profiler_ptr = new AddressProfiler(m_num_of_sequencers); 90 m_address_profiler_ptr->setHotLines(m_hot_lines); 91 m_address_profiler_ptr->setAllInstructions(m_all_instructions); 92 93 if (m_all_instructions) { 94 m_inst_profiler_ptr = new AddressProfiler(m_num_of_sequencers); 95 m_inst_profiler_ptr->setHotLines(m_hot_lines); 96 m_inst_profiler_ptr->setAllInstructions(m_all_instructions); 97 } 98} 99 100Profiler::~Profiler() 101{ 102 if (m_periodic_output_file_ptr != &cerr) { 103 delete m_periodic_output_file_ptr; 104 } 105} 106 107void 108Profiler::wakeup() 109{ 110 // FIXME - avoid the repeated code 111 112 vector<integer_t> perProcCycleCount(m_num_of_sequencers); 113 114 for (int i = 0; i < m_num_of_sequencers; i++) { 115 perProcCycleCount[i] = 116 g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1; 117 // The +1 allows us to avoid division by zero 118 } 119 120 ostream &out = *m_periodic_output_file_ptr; 121 122 out << "ruby_cycles: " << g_eventQueue_ptr->getTime()-m_ruby_start << endl 123 << "mbytes_resident: " << process_memory_resident() << endl 124 << "mbytes_total: " << process_memory_total() << endl; 125 126 if (process_memory_total() > 0) { 127 out << "resident_ratio: " 128 << process_memory_resident() / process_memory_total() << endl; 129 } 130 131 out << "miss_latency: " << m_allMissLatencyHistogram << endl; 132 133 out << endl; 134 135 if (m_all_instructions) { 136 m_inst_profiler_ptr->printStats(out); 137 } 138 139 //g_system_ptr->getNetwork()->printStats(out); 140 g_eventQueue_ptr->scheduleEvent(this, m_stats_period); 141} 142 143void 144Profiler::setPeriodicStatsFile(const string& filename) 145{ 146 cout << "Recording periodic statistics to file '" << filename << "' every " 147 << m_stats_period << " Ruby cycles" << endl; 148 149 if (m_periodic_output_file_ptr != &cerr) { 150 delete m_periodic_output_file_ptr; 151 } 152 153 m_periodic_output_file_ptr = new ofstream(filename.c_str()); 154 g_eventQueue_ptr->scheduleEvent(this, 1); 155} 156 157void 158Profiler::setPeriodicStatsInterval(integer_t period) 159{ 160 cout << "Recording periodic statistics every " << m_stats_period 161 << " Ruby cycles" << endl; 162 163 m_stats_period = period; 164 g_eventQueue_ptr->scheduleEvent(this, 1); 165} 166 167void 168Profiler::printConfig(ostream& out) const 169{ 170 out << endl; 171 out << "Profiler Configuration" << endl; 172 out << "----------------------" << endl; 173 out << "periodic_stats_period: " << m_stats_period << endl; 174} 175 176void 177Profiler::print(ostream& out) const 178{ 179 out << "[Profiler]"; 180} 181 182void 183Profiler::printStats(ostream& out, bool short_stats) 184{ 185 out << endl; 186 if (short_stats) { 187 out << "SHORT "; 188 } 189 out << "Profiler Stats" << endl; 190 out << "--------------" << endl; 191 192 time_t real_time_current = time(NULL); 193 double seconds = difftime(real_time_current, m_real_time_start_time); 194 double minutes = seconds / 60.0; 195 double hours = minutes / 60.0; 196 double days = hours / 24.0; 197 Time ruby_cycles = g_eventQueue_ptr->getTime()-m_ruby_start; 198 199 if (!short_stats) { 200 out << "Elapsed_time_in_seconds: " << seconds << endl; 201 out << "Elapsed_time_in_minutes: " << minutes << endl; 202 out << "Elapsed_time_in_hours: " << hours << endl; 203 out << "Elapsed_time_in_days: " << days << endl; 204 out << endl; 205 } 206 207 // print the virtual runtimes as well 208 struct tms vtime; 209 times(&vtime); 210 seconds = (vtime.tms_utime + vtime.tms_stime) / 100.0; 211 minutes = seconds / 60.0; 212 hours = minutes / 60.0; 213 days = hours / 24.0; 214 out << "Virtual_time_in_seconds: " << seconds << endl; 215 out << "Virtual_time_in_minutes: " << minutes << endl; 216 out << "Virtual_time_in_hours: " << hours << endl; 217 out << "Virtual_time_in_days: " << days << endl; 218 out << endl; 219 220 out << "Ruby_current_time: " << g_eventQueue_ptr->getTime() << endl; 221 out << "Ruby_start_time: " << m_ruby_start << endl; 222 out << "Ruby_cycles: " << ruby_cycles << endl; 223 out << endl; 224 225 if (!short_stats) { 226 out << "mbytes_resident: " << process_memory_resident() << endl; 227 out << "mbytes_total: " << process_memory_total() << endl; 228 if (process_memory_total() > 0) { 229 out << "resident_ratio: " 230 << process_memory_resident()/process_memory_total() << endl; 231 } 232 out << endl; 233 } 234 235 vector<integer_t> perProcCycleCount(m_num_of_sequencers); 236 237 for (int i = 0; i < m_num_of_sequencers; i++) { 238 perProcCycleCount[i] = 239 g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1; 240 // The +1 allows us to avoid division by zero 241 } 242 243 out << "ruby_cycles_executed: " << perProcCycleCount << endl; 244 245 out << endl; 246 247 if (!short_stats) { 248 out << "Busy Controller Counts:" << endl; 249 for (int i = 0; i < MachineType_NUM; i++) { 250 int size = MachineType_base_count((MachineType)i); 251 for (int j = 0; j < size; j++) { 252 MachineID machID; 253 machID.type = (MachineType)i; 254 machID.num = j; 255 out << machID << ":" << m_busyControllerCount[i][j] << " "; 256 if ((j + 1) % 8 == 0) { 257 out << endl; 258 } 259 } 260 out << endl; 261 } 262 out << endl; 263 264 out << "Busy Bank Count:" << m_busyBankCount << endl; 265 out << endl; 266 267 out << "sequencer_requests_outstanding: " 268 << m_sequencer_requests << endl; 269 out << endl; 270 } 271 272 if (!short_stats) { 273 out << "All Non-Zero Cycle Demand Cache Accesses" << endl; 274 out << "----------------------------------------" << endl; 275 out << "miss_latency: " << m_allMissLatencyHistogram << endl; 276 for (int i = 0; i < m_missLatencyHistograms.size(); i++) { 277 if (m_missLatencyHistograms[i].size() > 0) { 278 out << "miss_latency_" << RubyRequestType(i) << ": " 279 << m_missLatencyHistograms[i] << endl; 280 } 281 } 282 for (int i = 0; i < m_machLatencyHistograms.size(); i++) { 283 if (m_machLatencyHistograms[i].size() > 0) { 284 out << "miss_latency_" << GenericMachineType(i) << ": " 285 << m_machLatencyHistograms[i] << endl; 286 } 287 } 288 289 out << "miss_latency_wCC_issue_to_initial_request: " 290 << m_wCCIssueToInitialRequestHistogram << endl; 291 out << "miss_latency_wCC_initial_forward_request: " 292 << m_wCCInitialRequestToForwardRequestHistogram << endl; 293 out << "miss_latency_wCC_forward_to_first_response: " 294 << m_wCCForwardRequestToFirstResponseHistogram << endl; 295 out << "miss_latency_wCC_first_response_to_completion: " 296 << m_wCCFirstResponseToCompleteHistogram << endl; 297 out << "imcomplete_wCC_Times: " << m_wCCIncompleteTimes << endl; 298 out << "miss_latency_dir_issue_to_initial_request: " 299 << m_dirIssueToInitialRequestHistogram << endl; 300 out << "miss_latency_dir_initial_forward_request: " 301 << m_dirInitialRequestToForwardRequestHistogram << endl; 302 out << "miss_latency_dir_forward_to_first_response: " 303 << m_dirForwardRequestToFirstResponseHistogram << endl; 304 out << "miss_latency_dir_first_response_to_completion: " 305 << m_dirFirstResponseToCompleteHistogram << endl; 306 out << "imcomplete_dir_Times: " << m_dirIncompleteTimes << endl; 307 308 for (int i = 0; i < m_missMachLatencyHistograms.size(); i++) { 309 for (int j = 0; j < m_missMachLatencyHistograms[i].size(); j++) { 310 if (m_missMachLatencyHistograms[i][j].size() > 0) { 311 out << "miss_latency_" << RubyRequestType(i) 312 << "_" << GenericMachineType(j) << ": " 313 << m_missMachLatencyHistograms[i][j] << endl; 314 } 315 } 316 } 317 318 out << endl; 319 320 out << "All Non-Zero Cycle SW Prefetch Requests" << endl; 321 out << "------------------------------------" << endl; 322 out << "prefetch_latency: " << m_allSWPrefetchLatencyHistogram << endl; 323 for (int i = 0; i < m_SWPrefetchLatencyHistograms.size(); i++) { 324 if (m_SWPrefetchLatencyHistograms[i].size() > 0) { 325 out << "prefetch_latency_" << CacheRequestType(i) << ": " 326 << m_SWPrefetchLatencyHistograms[i] << endl; 327 } 328 } 329 for (int i = 0; i < m_SWPrefetchMachLatencyHistograms.size(); i++) { 330 if (m_SWPrefetchMachLatencyHistograms[i].size() > 0) { 331 out << "prefetch_latency_" << GenericMachineType(i) << ": " 332 << m_SWPrefetchMachLatencyHistograms[i] << endl; 333 } 334 } 335 out << "prefetch_latency_L2Miss:" 336 << m_SWPrefetchL2MissLatencyHistogram << endl; 337 338 if (m_all_sharing_histogram.size() > 0) { 339 out << "all_sharing: " << m_all_sharing_histogram << endl; 340 out << "read_sharing: " << m_read_sharing_histogram << endl; 341 out << "write_sharing: " << m_write_sharing_histogram << endl; 342 343 out << "all_sharing_percent: "; 344 m_all_sharing_histogram.printPercent(out); 345 out << endl; 346 347 out << "read_sharing_percent: "; 348 m_read_sharing_histogram.printPercent(out); 349 out << endl; 350 351 out << "write_sharing_percent: "; 352 m_write_sharing_histogram.printPercent(out); 353 out << endl; 354 355 int64 total_miss = m_cache_to_cache + m_memory_to_cache; 356 out << "all_misses: " << total_miss << endl; 357 out << "cache_to_cache_misses: " << m_cache_to_cache << endl; 358 out << "memory_to_cache_misses: " << m_memory_to_cache << endl; 359 out << "cache_to_cache_percent: " 360 << 100.0 * (double(m_cache_to_cache) / double(total_miss)) 361 << endl; 362 out << "memory_to_cache_percent: " 363 << 100.0 * (double(m_memory_to_cache) / double(total_miss)) 364 << endl; 365 out << endl; 366 } 367 368 if (m_outstanding_requests.size() > 0) { 369 out << "outstanding_requests: "; 370 m_outstanding_requests.printPercent(out); 371 out << endl; 372 out << endl; 373 } 374 } 375 376 if (!short_stats) { 377 out << "Request vs. RubySystem State Profile" << endl; 378 out << "--------------------------------" << endl; 379 out << endl; 380 381 map<string, int>::const_iterator i = m_requestProfileMap.begin(); 382 map<string, int>::const_iterator end = m_requestProfileMap.end(); 383 for (; i != end; ++i) { 384 const string &key = i->first; 385 int count = i->second; 386 387 double percent = (100.0 * double(count)) / double(m_requests); 388 vector<string> items; 389 tokenize(items, key, ':'); 390 vector<string>::iterator j = items.begin(); 391 vector<string>::iterator end = items.end(); 392 for (; j != end; ++i) 393 out << setw(10) << *j; 394 out << setw(11) << count; 395 out << setw(14) << percent << endl; 396 } 397 out << endl; 398 399 out << "filter_action: " << m_filter_action_histogram << endl; 400 401 if (!m_all_instructions) { 402 m_address_profiler_ptr->printStats(out); 403 } 404 405 if (m_all_instructions) { 406 m_inst_profiler_ptr->printStats(out); 407 } 408 409 out << endl; 410 out << "Message Delayed Cycles" << endl; 411 out << "----------------------" << endl; 412 out << "Total_delay_cycles: " << m_delayedCyclesHistogram << endl; 413 out << "Total_nonPF_delay_cycles: " 414 << m_delayedCyclesNonPFHistogram << endl; 415 for (int i = 0; i < m_delayedCyclesVCHistograms.size(); i++) { 416 out << " virtual_network_" << i << "_delay_cycles: " 417 << m_delayedCyclesVCHistograms[i] << endl; 418 } 419 420 printResourceUsage(out); 421 } 422} 423 424void 425Profiler::printResourceUsage(ostream& out) const 426{ 427 out << endl; 428 out << "Resource Usage" << endl; 429 out << "--------------" << endl; 430 431 integer_t pagesize = getpagesize(); // page size in bytes 432 out << "page_size: " << pagesize << endl; 433 434 rusage usage; 435 getrusage (RUSAGE_SELF, &usage); 436 437 out << "user_time: " << usage.ru_utime.tv_sec << endl; 438 out << "system_time: " << usage.ru_stime.tv_sec << endl; 439 out << "page_reclaims: " << usage.ru_minflt << endl; 440 out << "page_faults: " << usage.ru_majflt << endl; 441 out << "swaps: " << usage.ru_nswap << endl; 442 out << "block_inputs: " << usage.ru_inblock << endl; 443 out << "block_outputs: " << usage.ru_oublock << endl; 444} 445 446void 447Profiler::clearStats() 448{ 449 m_ruby_start = g_eventQueue_ptr->getTime(); 450 451 m_cycles_executed_at_start.resize(m_num_of_sequencers); 452 for (int i = 0; i < m_num_of_sequencers; i++) { 453 if (g_system_ptr == NULL) { 454 m_cycles_executed_at_start[i] = 0; 455 } else { 456 m_cycles_executed_at_start[i] = g_system_ptr->getCycleCount(i); 457 } 458 } 459 460 m_busyControllerCount.resize(MachineType_NUM); // all machines 461 for (int i = 0; i < MachineType_NUM; i++) { 462 int size = MachineType_base_count((MachineType)i); 463 m_busyControllerCount[i].resize(size); 464 for (int j = 0; j < size; j++) { 465 m_busyControllerCount[i][j] = 0; 466 } 467 } 468 m_busyBankCount = 0; 469 470 m_delayedCyclesHistogram.clear(); 471 m_delayedCyclesNonPFHistogram.clear(); 472 int size = RubySystem::getNetwork()->getNumberOfVirtualNetworks(); 473 m_delayedCyclesVCHistograms.resize(size); 474 for (int i = 0; i < size; i++) { 475 m_delayedCyclesVCHistograms[i].clear(); 476 } 477 478 m_missLatencyHistograms.resize(RubyRequestType_NUM); 479 for (int i = 0; i < m_missLatencyHistograms.size(); i++) { 480 m_missLatencyHistograms[i].clear(200); 481 } 482 m_machLatencyHistograms.resize(GenericMachineType_NUM+1); 483 for (int i = 0; i < m_machLatencyHistograms.size(); i++) { 484 m_machLatencyHistograms[i].clear(200); 485 } 486 m_missMachLatencyHistograms.resize(RubyRequestType_NUM); 487 for (int i = 0; i < m_missLatencyHistograms.size(); i++) { 488 m_missMachLatencyHistograms[i].resize(GenericMachineType_NUM+1); 489 for (int j = 0; j < m_missMachLatencyHistograms[i].size(); j++) { 490 m_missMachLatencyHistograms[i][j].clear(200); 491 } 492 } 493 m_allMissLatencyHistogram.clear(200); 494 m_wCCIssueToInitialRequestHistogram.clear(200); 495 m_wCCInitialRequestToForwardRequestHistogram.clear(200); 496 m_wCCForwardRequestToFirstResponseHistogram.clear(200); 497 m_wCCFirstResponseToCompleteHistogram.clear(200); 498 m_wCCIncompleteTimes = 0; 499 m_dirIssueToInitialRequestHistogram.clear(200); 500 m_dirInitialRequestToForwardRequestHistogram.clear(200); 501 m_dirForwardRequestToFirstResponseHistogram.clear(200); 502 m_dirFirstResponseToCompleteHistogram.clear(200); 503 m_dirIncompleteTimes = 0; 504 505 m_SWPrefetchLatencyHistograms.resize(CacheRequestType_NUM); 506 for (int i = 0; i < m_SWPrefetchLatencyHistograms.size(); i++) { 507 m_SWPrefetchLatencyHistograms[i].clear(200); 508 } 509 m_SWPrefetchMachLatencyHistograms.resize(GenericMachineType_NUM+1); 510 for (int i = 0; i < m_SWPrefetchMachLatencyHistograms.size(); i++) { 511 m_SWPrefetchMachLatencyHistograms[i].clear(200); 512 } 513 m_allSWPrefetchLatencyHistogram.clear(200); 514 515 m_sequencer_requests.clear(); 516 m_read_sharing_histogram.clear(); 517 m_write_sharing_histogram.clear(); 518 m_all_sharing_histogram.clear(); 519 m_cache_to_cache = 0; 520 m_memory_to_cache = 0; 521 522 // clear HashMaps 523 m_requestProfileMap.clear(); 524 525 // count requests profiled 526 m_requests = 0; 527 528 m_outstanding_requests.clear(); 529 m_outstanding_persistent_requests.clear(); 530 531 // Flush the prefetches through the system - used so that there 532 // are no outstanding requests after stats are cleared 533 //g_eventQueue_ptr->triggerAllEvents(); 534 535 // update the start time 536 m_ruby_start = g_eventQueue_ptr->getTime(); 537} 538 539void 540Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id) 541{ 542 if (msg.getType() != CacheRequestType_IFETCH) { 543 // Note: The following line should be commented out if you 544 // want to use the special profiling that is part of the GS320 545 // protocol 546 547 // NOTE: Unless PROFILE_HOT_LINES is enabled, nothing will be 548 // profiled by the AddressProfiler 549 m_address_profiler_ptr-> 550 addTraceSample(msg.getLineAddress(), msg.getProgramCounter(), 551 msg.getType(), msg.getAccessMode(), id, false); 552 } 553} 554 555void 556Profiler::profileSharing(const Address& addr, AccessType type, 557 NodeID requestor, const Set& sharers, 558 const Set& owner) 559{ 560 Set set_contacted(owner); 561 if (type == AccessType_Write) { 562 set_contacted.addSet(sharers); 563 } 564 set_contacted.remove(requestor); 565 int number_contacted = set_contacted.count(); 566 567 if (type == AccessType_Write) { 568 m_write_sharing_histogram.add(number_contacted); 569 } else { 570 m_read_sharing_histogram.add(number_contacted); 571 } 572 m_all_sharing_histogram.add(number_contacted); 573 574 if (number_contacted == 0) { 575 m_memory_to_cache++; 576 } else { 577 m_cache_to_cache++; 578 } 579} 580 581void 582Profiler::profileMsgDelay(int virtualNetwork, int delayCycles) 583{ 584 assert(virtualNetwork < m_delayedCyclesVCHistograms.size()); 585 m_delayedCyclesHistogram.add(delayCycles); 586 m_delayedCyclesVCHistograms[virtualNetwork].add(delayCycles); 587 if (virtualNetwork != 0) { 588 m_delayedCyclesNonPFHistogram.add(delayCycles); 589 } 590} 591 592// profiles original cache requests including PUTs 593void 594Profiler::profileRequest(const string& requestStr) 595{ 596 m_requests++; 597 598 // if it doesn't exist, conveniently, it will be created with the 599 // default value which is 0 600 m_requestProfileMap[requestStr]++; 601} 602 603void 604Profiler::controllerBusy(MachineID machID) 605{ 606 m_busyControllerCount[(int)machID.type][(int)machID.num]++; 607} 608 609void 610Profiler::profilePFWait(Time waitTime) 611{ 612 m_prefetchWaitHistogram.add(waitTime); 613} 614 615void 616Profiler::bankBusy() 617{ 618 m_busyBankCount++; 619} 620 621// non-zero cycle demand request 622void 623Profiler::missLatency(Time cycles, 624 RubyRequestType type, 625 const GenericMachineType respondingMach) 626{ 627 m_allMissLatencyHistogram.add(cycles); 628 m_missLatencyHistograms[type].add(cycles); 629 m_machLatencyHistograms[respondingMach].add(cycles); 630 m_missMachLatencyHistograms[type][respondingMach].add(cycles); 631} 632 633void 634Profiler::missLatencyWcc(Time issuedTime, 635 Time initialRequestTime, 636 Time forwardRequestTime, 637 Time firstResponseTime, 638 Time completionTime) 639{ 640 if ((issuedTime <= initialRequestTime) && 641 (initialRequestTime <= forwardRequestTime) && 642 (forwardRequestTime <= firstResponseTime) && 643 (firstResponseTime <= completionTime)) { 644 m_wCCIssueToInitialRequestHistogram.add(initialRequestTime - issuedTime); 645 646 m_wCCInitialRequestToForwardRequestHistogram.add(forwardRequestTime - 647 initialRequestTime); 648 649 m_wCCForwardRequestToFirstResponseHistogram.add(firstResponseTime - 650 forwardRequestTime); 651 652 m_wCCFirstResponseToCompleteHistogram.add(completionTime - 653 firstResponseTime); 654 } else { 655 m_wCCIncompleteTimes++; 656 } 657} 658 659void 660Profiler::missLatencyDir(Time issuedTime, 661 Time initialRequestTime, 662 Time forwardRequestTime, 663 Time firstResponseTime, 664 Time completionTime) 665{ 666 if ((issuedTime <= initialRequestTime) && 667 (initialRequestTime <= forwardRequestTime) && 668 (forwardRequestTime <= firstResponseTime) && 669 (firstResponseTime <= completionTime)) { 670 m_dirIssueToInitialRequestHistogram.add(initialRequestTime - issuedTime); 671 672 m_dirInitialRequestToForwardRequestHistogram.add(forwardRequestTime - 673 initialRequestTime); 674 675 m_dirForwardRequestToFirstResponseHistogram.add(firstResponseTime - 676 forwardRequestTime); 677 678 m_dirFirstResponseToCompleteHistogram.add(completionTime - 679 firstResponseTime); 680 } else { 681 m_dirIncompleteTimes++; 682 } 683} 684 685// non-zero cycle prefetch request 686void 687Profiler::swPrefetchLatency(Time cycles, 688 CacheRequestType type, 689 const GenericMachineType respondingMach) 690{ 691 m_allSWPrefetchLatencyHistogram.add(cycles); 692 m_SWPrefetchLatencyHistograms[type].add(cycles); 693 m_SWPrefetchMachLatencyHistograms[respondingMach].add(cycles); 694 if (respondingMach == GenericMachineType_Directory || 695 respondingMach == GenericMachineType_NUM) { 696 m_SWPrefetchL2MissLatencyHistogram.add(cycles); 697 } 698} 699 700void 701Profiler::profileTransition(const string& component, NodeID version, 702 Address addr, const string& state, const string& event, 703 const string& next_state, const string& note) 704{ 705 const int EVENT_SPACES = 20; 706 const int ID_SPACES = 3; 707 const int TIME_SPACES = 7; 708 const int COMP_SPACES = 10; 709 const int STATE_SPACES = 6; 710 711 if (g_debug_ptr->getDebugTime() <= 0 || 712 g_eventQueue_ptr->getTime() < g_debug_ptr->getDebugTime()) 713 return; 714 715 ostream &out = *debug_cout_ptr; 716 out.flags(ios::right); 717 out << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " "; 718 out << setw(ID_SPACES) << version << " "; 719 out << setw(COMP_SPACES) << component; 720 out << setw(EVENT_SPACES) << event << " "; 721 722 out.flags(ios::right); 723 out << setw(STATE_SPACES) << state; 724 out << ">"; 725 out.flags(ios::left); 726 out << setw(STATE_SPACES) << next_state; 727 728 out << " " << addr << " " << note; 729 730 out << endl; 731} 732 733// Helper function 734static double 735process_memory_total() 736{ 737 // 4kB page size, 1024*1024 bytes per MB, 738 const double MULTIPLIER = 4096.0 / (1024.0 * 1024.0); 739 ifstream proc_file; 740 proc_file.open("/proc/self/statm"); 741 int total_size_in_pages = 0; 742 int res_size_in_pages = 0; 743 proc_file >> total_size_in_pages; 744 proc_file >> res_size_in_pages; 745 return double(total_size_in_pages) * MULTIPLIER; // size in megabytes 746} 747 748static double 749process_memory_resident() 750{ 751 // 4kB page size, 1024*1024 bytes per MB, 752 const double MULTIPLIER = 4096.0 / (1024.0 * 1024.0); 753 ifstream proc_file; 754 proc_file.open("/proc/self/statm"); 755 int total_size_in_pages = 0; 756 int res_size_in_pages = 0; 757 proc_file >> total_size_in_pages; 758 proc_file >> res_size_in_pages; 759 return double(res_size_in_pages) * MULTIPLIER; // size in megabytes 760} 761 762void 763Profiler::rubyWatch(int id) 764{ 765 uint64 tr = 0; 766 Address watch_address = Address(tr); 767 const int ID_SPACES = 3; 768 const int TIME_SPACES = 7; 769 770 ostream &out = *debug_cout_ptr; 771 772 out.flags(ios::right); 773 out << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " "; 774 out << setw(ID_SPACES) << id << " " 775 << "RUBY WATCH " << watch_address << endl; 776 777 // don't care about success or failure 778 m_watch_address_set.insert(watch_address); 779} 780 781bool 782Profiler::watchAddress(Address addr) 783{ 784 return m_watch_address_set.count(addr) > 0; 785} 786 787Profiler * 788RubyProfilerParams::create() 789{ 790 return new Profiler(this); 791} 792