1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; --- 38 unchanged lines hidden (view full) --- 47 * 48 * Description: See Profiler.hh 49 * 50 * $Id$ 51 * 52 */ 53 54#include "mem/ruby/profiler/Profiler.hh" |
55#include "mem/ruby/profiler/AddressProfiler.hh" 56#include "mem/ruby/system/System.hh" 57#include "mem/ruby/network/Network.hh" 58#include "mem/gems_common/PrioHeap.hh" 59#include "mem/protocol/CacheMsg.hh" 60#include "mem/protocol/Protocol.hh" 61#include "mem/gems_common/util.hh" 62#include "mem/gems_common/Map.hh" --- 4 unchanged lines hidden (view full) --- 67#include <sys/times.h> 68 69extern std::ostream * debug_cout_ptr; 70 71static double process_memory_total(); 72static double process_memory_resident(); 73 74Profiler::Profiler(const string & name) |
75{ 76 m_name = name; 77 m_requestProfileMap_ptr = new Map<string, int>; |
78 |
79 m_inst_profiler_ptr = NULL; 80 m_address_profiler_ptr = NULL; 81 |
82 m_real_time_start_time = time(NULL); // Not reset in clearStats() 83 m_stats_period = 1000000; // Default 84 m_periodic_output_file_ptr = &cerr; 85 86} 87 88Profiler::~Profiler() 89{ 90 if (m_periodic_output_file_ptr != &cerr) { 91 delete m_periodic_output_file_ptr; 92 } |
93 delete m_requestProfileMap_ptr; |
94} 95 96void Profiler::init(const vector<string> & argv, vector<string> memory_control_names) 97{ 98 // added by SS 99 vector<string>::iterator it; 100 memory_control_profiler* mcp; 101 m_memory_control_names = memory_control_names; --- 55 unchanged lines hidden (view full) --- 157 m_inst_profiler_ptr -> setAllInstructions(m_all_instructions); 158 } 159} 160 161void Profiler::wakeup() 162{ 163 // FIXME - avoid the repeated code 164 |
165 Vector<integer_t> perProcCycleCount; 166 perProcCycleCount.setSize(RubySystem::getNumberOfSequencers()); 167 168 for(int i=0; i < RubySystem::getNumberOfSequencers(); i++) { |
169 perProcCycleCount[i] = g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1; 170 // The +1 allows us to avoid division by zero 171 } 172 173 integer_t total_misses = m_perProcTotalMisses.sum(); |
174 integer_t simics_cycles_executed = perProcCycleCount.sum(); 175 integer_t transactions_started = m_perProcStartTransaction.sum(); 176 integer_t transactions_ended = m_perProcEndTransaction.sum(); 177 178 (*m_periodic_output_file_ptr) << "ruby_cycles: " << g_eventQueue_ptr->getTime()-m_ruby_start << endl; 179 (*m_periodic_output_file_ptr) << "total_misses: " << total_misses << " " << m_perProcTotalMisses << endl; |
180 (*m_periodic_output_file_ptr) << "simics_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl; 181 (*m_periodic_output_file_ptr) << "transactions_started: " << transactions_started << " " << m_perProcStartTransaction << endl; 182 (*m_periodic_output_file_ptr) << "transactions_ended: " << transactions_ended << " " << m_perProcEndTransaction << endl; |
183 (*m_periodic_output_file_ptr) << "mbytes_resident: " << process_memory_resident() << endl; 184 (*m_periodic_output_file_ptr) << "mbytes_total: " << process_memory_total() << endl; 185 if (process_memory_total() > 0) { 186 (*m_periodic_output_file_ptr) << "resident_ratio: " << process_memory_resident()/process_memory_total() << endl; 187 } 188 (*m_periodic_output_file_ptr) << "miss_latency: " << m_allMissLatencyHistogram << endl; 189 190 *m_periodic_output_file_ptr << endl; --- 68 unchanged lines hidden (view full) --- 259 times(&vtime); 260 seconds = (vtime.tms_utime + vtime.tms_stime) / 100.0; 261 minutes = seconds / 60.0; 262 hours = minutes / 60.0; 263 days = hours / 24.0; 264 out << "Virtual_time_in_seconds: " << seconds << endl; 265 out << "Virtual_time_in_minutes: " << minutes << endl; 266 out << "Virtual_time_in_hours: " << hours << endl; |
267 out << "Virtual_time_in_days: " << days << endl; |
268 out << endl; 269 270 out << "Ruby_current_time: " << g_eventQueue_ptr->getTime() << endl; 271 out << "Ruby_start_time: " << m_ruby_start << endl; 272 out << "Ruby_cycles: " << ruby_cycles << endl; 273 out << endl; 274 275 if (!short_stats) { 276 out << "mbytes_resident: " << process_memory_resident() << endl; 277 out << "mbytes_total: " << process_memory_total() << endl; 278 if (process_memory_total() > 0) { 279 out << "resident_ratio: " << process_memory_resident()/process_memory_total() << endl; 280 } 281 out << endl; 282 |
283 } 284 |
285 Vector<integer_t> perProcCycleCount; |
286 Vector<double> perProcCyclesPerTrans; 287 Vector<double> perProcMissesPerTrans; 288 |
289 |
290 perProcCycleCount.setSize(RubySystem::getNumberOfSequencers()); |
291 perProcCyclesPerTrans.setSize(RubySystem::getNumberOfSequencers()); 292 perProcMissesPerTrans.setSize(RubySystem::getNumberOfSequencers()); 293 294 for(int i=0; i < RubySystem::getNumberOfSequencers(); i++) { |
295 perProcCycleCount[i] = g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1; 296 // The +1 allows us to avoid division by zero |
297 298 int trans = m_perProcEndTransaction[i]; 299 if (trans == 0) { |
300 perProcCyclesPerTrans[i] = 0; 301 perProcMissesPerTrans[i] = 0; 302 } else { |
303 perProcCyclesPerTrans[i] = ruby_cycles / double(trans); 304 perProcMissesPerTrans[i] = m_perProcTotalMisses[i] / double(trans); 305 } 306 } 307 308 integer_t total_misses = m_perProcTotalMisses.sum(); 309 integer_t user_misses = m_perProcUserMisses.sum(); 310 integer_t supervisor_misses = m_perProcSupervisorMisses.sum(); |
311 integer_t simics_cycles_executed = perProcCycleCount.sum(); 312 integer_t transactions_started = m_perProcStartTransaction.sum(); 313 integer_t transactions_ended = m_perProcEndTransaction.sum(); 314 |
315 double cycles_per_transaction = (transactions_ended != 0) ? (RubySystem::getNumberOfSequencers() * double(ruby_cycles)) / double(transactions_ended) : 0; 316 double misses_per_transaction = (transactions_ended != 0) ? double(total_misses) / double(transactions_ended) : 0; 317 318 out << "Total_misses: " << total_misses << endl; 319 out << "total_misses: " << total_misses << " " << m_perProcTotalMisses << endl; 320 out << "user_misses: " << user_misses << " " << m_perProcUserMisses << endl; 321 out << "supervisor_misses: " << supervisor_misses << " " << m_perProcSupervisorMisses << endl; 322 out << endl; |
323 out << "ruby_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl; |
324 out << endl; 325 out << "transactions_started: " << transactions_started << " " << m_perProcStartTransaction << endl; 326 out << "transactions_ended: " << transactions_ended << " " << m_perProcEndTransaction << endl; |
327 out << "cycles_per_transaction: " << cycles_per_transaction << " " << perProcCyclesPerTrans << endl; 328 out << "misses_per_transaction: " << misses_per_transaction << " " << perProcMissesPerTrans << endl; 329 330 out << endl; 331 |
332 out << endl; 333 334 vector<string>::iterator it; 335 336 for ( it=m_memory_control_names.begin() ; it < m_memory_control_names.end(); it++ ){ 337 long long int m_memReq = m_memory_control_profilers[(*it).c_str()] -> m_memReq; 338 long long int m_memRefresh = m_memory_control_profilers[(*it).c_str()] -> m_memRefresh; 339 long long int m_memInputQ = m_memory_control_profilers[(*it).c_str()] -> m_memInputQ; --- 9 unchanged lines hidden (view full) --- 349 long long int m_memTfawBusy = m_memory_control_profilers[(*it).c_str()] -> m_memTfawBusy; 350 long long int m_memReadWriteBusy = m_memory_control_profilers[(*it).c_str()] -> m_memReadWriteBusy; 351 long long int m_memDataBusBusy = m_memory_control_profilers[(*it).c_str()] -> m_memDataBusBusy; 352 Vector<long long int> m_memBankCount = m_memory_control_profilers[(*it).c_str()] -> m_memBankCount; 353 354 if (m_memReq || m_memRefresh) { // if there's a memory controller at all 355 long long int total_stalls = m_memInputQ + m_memBankQ + m_memWaitCycles; 356 double stallsPerReq = total_stalls * 1.0 / m_memReq; |
357 out << "Memory control " << (*it) << ":" << endl; |
358 out << " memory_total_requests: " << m_memReq << endl; // does not include refreshes 359 out << " memory_reads: " << m_memRead << endl; 360 out << " memory_writes: " << m_memWrite << endl; 361 out << " memory_refreshes: " << m_memRefresh << endl; 362 out << " memory_total_request_delays: " << total_stalls << endl; 363 out << " memory_delays_per_request: " << stallsPerReq << endl; 364 out << " memory_delays_in_input_queue: " << m_memInputQ << endl; 365 out << " memory_delays_behind_head_of_bank_queue: " << m_memBankQ << endl; --- 37 unchanged lines hidden (view full) --- 403 } 404 out << endl; 405 } 406 out << endl; 407 408 out << "Busy Bank Count:" << m_busyBankCount << endl; 409 out << endl; 410 |
411 out << "sequencer_requests_outstanding: " << m_sequencer_requests << endl; |
412 out << endl; 413 } 414 415 if (!short_stats) { 416 out << "All Non-Zero Cycle Demand Cache Accesses" << endl; 417 out << "----------------------------------------" << endl; 418 out << "miss_latency: " << m_allMissLatencyHistogram << endl; 419 for(int i=0; i<m_missLatencyHistograms.size(); i++) { 420 if (m_missLatencyHistograms[i].size() > 0) { 421 out << "miss_latency_" << RubyRequestType(i) << ": " << m_missLatencyHistograms[i] << endl; 422 } 423 } 424 for(int i=0; i<m_machLatencyHistograms.size(); i++) { 425 if (m_machLatencyHistograms[i].size() > 0) { 426 out << "miss_latency_" << GenericMachineType(i) << ": " << m_machLatencyHistograms[i] << endl; 427 } 428 } |
429 430 out << endl; 431 432 out << "All Non-Zero Cycle SW Prefetch Requests" << endl; 433 out << "------------------------------------" << endl; 434 out << "prefetch_latency: " << m_allSWPrefetchLatencyHistogram << endl; 435 for(int i=0; i<m_SWPrefetchLatencyHistograms.size(); i++) { 436 if (m_SWPrefetchLatencyHistograms[i].size() > 0) { 437 out << "prefetch_latency_" << CacheRequestType(i) << ": " << m_SWPrefetchLatencyHistograms[i] << endl; 438 } 439 } 440 for(int i=0; i<m_SWPrefetchMachLatencyHistograms.size(); i++) { 441 if (m_SWPrefetchMachLatencyHistograms[i].size() > 0) { 442 out << "prefetch_latency_" << GenericMachineType(i) << ": " << m_SWPrefetchMachLatencyHistograms[i] << endl; 443 } 444 } 445 out << "prefetch_latency_L2Miss:" << m_SWPrefetchL2MissLatencyHistogram << endl; 446 |
447 if (m_all_sharing_histogram.size() > 0) { 448 out << "all_sharing: " << m_all_sharing_histogram << endl; 449 out << "read_sharing: " << m_read_sharing_histogram << endl; 450 out << "write_sharing: " << m_write_sharing_histogram << endl; 451 452 out << "all_sharing_percent: "; m_all_sharing_histogram.printPercent(out); out << endl; 453 out << "read_sharing_percent: "; m_read_sharing_histogram.printPercent(out); out << endl; 454 out << "write_sharing_percent: "; m_write_sharing_histogram.printPercent(out); out << endl; 455 456 int64 total_miss = m_cache_to_cache + m_memory_to_cache; 457 out << "all_misses: " << total_miss << endl; 458 out << "cache_to_cache_misses: " << m_cache_to_cache << endl; 459 out << "memory_to_cache_misses: " << m_memory_to_cache << endl; 460 out << "cache_to_cache_percent: " << 100.0 * (double(m_cache_to_cache) / double(total_miss)) << endl; 461 out << "memory_to_cache_percent: " << 100.0 * (double(m_memory_to_cache) / double(total_miss)) << endl; 462 out << endl; 463 } 464 |
465 if (m_outstanding_requests.size() > 0) { 466 out << "outstanding_requests: "; m_outstanding_requests.printPercent(out); out << endl; |
467 out << endl; 468 } 469 } 470 471 if (!short_stats) { 472 out << "Request vs. RubySystem State Profile" << endl; 473 out << "--------------------------------" << endl; 474 out << endl; --- 54 unchanged lines hidden (view full) --- 529 out << "page_faults: " << usage.ru_majflt << endl; 530 out << "swaps: " << usage.ru_nswap << endl; 531 out << "block_inputs: " << usage.ru_inblock << endl; 532 out << "block_outputs: " << usage.ru_oublock << endl; 533} 534 535void Profiler::clearStats() 536{ |
537 m_ruby_start = g_eventQueue_ptr->getTime(); 538 |
539 m_cycles_executed_at_start.setSize(RubySystem::getNumberOfSequencers()); 540 for (int i=0; i < RubySystem::getNumberOfSequencers(); i++) { 541 if (g_system_ptr == NULL) { |
542 m_cycles_executed_at_start[i] = 0; 543 } else { |
544 m_cycles_executed_at_start[i] = g_system_ptr->getCycleCount(i); 545 } 546 } 547 548 m_perProcTotalMisses.setSize(RubySystem::getNumberOfSequencers()); 549 m_perProcUserMisses.setSize(RubySystem::getNumberOfSequencers()); 550 m_perProcSupervisorMisses.setSize(RubySystem::getNumberOfSequencers()); 551 m_perProcStartTransaction.setSize(RubySystem::getNumberOfSequencers()); --- 18 unchanged lines hidden (view full) --- 570 571 m_delayedCyclesHistogram.clear(); 572 m_delayedCyclesNonPFHistogram.clear(); 573 m_delayedCyclesVCHistograms.setSize(RubySystem::getNetwork()->getNumberOfVirtualNetworks()); 574 for (int i = 0; i < RubySystem::getNetwork()->getNumberOfVirtualNetworks(); i++) { 575 m_delayedCyclesVCHistograms[i].clear(); 576 } 577 |
578 m_missLatencyHistograms.setSize(RubyRequestType_NUM); |
579 for(int i=0; i<m_missLatencyHistograms.size(); i++) { 580 m_missLatencyHistograms[i].clear(200); 581 } 582 m_machLatencyHistograms.setSize(GenericMachineType_NUM+1); 583 for(int i=0; i<m_machLatencyHistograms.size(); i++) { 584 m_machLatencyHistograms[i].clear(200); 585 } 586 m_allMissLatencyHistogram.clear(200); |
587 588 m_SWPrefetchLatencyHistograms.setSize(CacheRequestType_NUM); 589 for(int i=0; i<m_SWPrefetchLatencyHistograms.size(); i++) { 590 m_SWPrefetchLatencyHistograms[i].clear(200); 591 } 592 m_SWPrefetchMachLatencyHistograms.setSize(GenericMachineType_NUM+1); 593 for(int i=0; i<m_SWPrefetchMachLatencyHistograms.size(); i++) { 594 m_SWPrefetchMachLatencyHistograms[i].clear(200); 595 } 596 m_allSWPrefetchLatencyHistogram.clear(200); |
597 |
598 m_sequencer_requests.clear(); |
599 m_read_sharing_histogram.clear(); 600 m_write_sharing_histogram.clear(); 601 m_all_sharing_histogram.clear(); 602 m_cache_to_cache = 0; 603 m_memory_to_cache = 0; 604 |
605 // clear HashMaps 606 m_requestProfileMap_ptr->clear(); 607 608 // count requests profiled 609 m_requests = 0; 610 |
611 m_outstanding_requests.clear(); 612 m_outstanding_persistent_requests.clear(); 613 |
614//added by SS 615 vector<string>::iterator it; 616 617 for ( it=m_memory_control_names.begin() ; it < m_memory_control_names.end(); it++ ){ 618 m_memory_control_profilers[(*it).c_str()] -> m_memReq = 0; 619 m_memory_control_profilers[(*it).c_str()] -> m_memBankBusy = 0; 620 m_memory_control_profilers[(*it).c_str()] -> m_memBusBusy = 0; 621 m_memory_control_profilers[(*it).c_str()] -> m_memTfawBusy = 0; --- 15 unchanged lines hidden (view full) --- 637 } 638 // Flush the prefetches through the system - used so that there are no outstanding requests after stats are cleared 639 //g_eventQueue_ptr->triggerAllEvents(); 640 641 // update the start time 642 m_ruby_start = g_eventQueue_ptr->getTime(); 643} 644 |
645void Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id) 646{ 647 if (msg.getType() != CacheRequestType_IFETCH) { 648 649 // Note: The following line should be commented out if you want to 650 // use the special profiling that is part of the GS320 protocol 651 652 // NOTE: Unless PROFILE_HOT_LINES is enabled, nothing will be profiled by the AddressProfiler --- 41 unchanged lines hidden (view full) --- 694 695 if (m_requestProfileMap_ptr->exist(requestStr)) { 696 (m_requestProfileMap_ptr->lookup(requestStr))++; 697 } else { 698 m_requestProfileMap_ptr->add(requestStr, 1); 699 } 700} 701 |
702void Profiler::startTransaction(int cpu) 703{ 704 m_perProcStartTransaction[cpu]++; 705} 706 707void Profiler::endTransaction(int cpu) 708{ 709 m_perProcEndTransaction[cpu]++; --- 14 unchanged lines hidden (view full) --- 724 m_busyBankCount++; 725} 726 727// non-zero cycle demand request 728void Profiler::missLatency(Time t, RubyRequestType type) 729{ 730 m_allMissLatencyHistogram.add(t); 731 m_missLatencyHistograms[type].add(t); |
732} 733 734// non-zero cycle prefetch request 735void Profiler::swPrefetchLatency(Time t, CacheRequestType type, GenericMachineType respondingMach) 736{ 737 m_allSWPrefetchLatencyHistogram.add(t); 738 m_SWPrefetchLatencyHistograms[type].add(t); 739 m_SWPrefetchMachLatencyHistograms[respondingMach].add(t); --- 52 unchanged lines hidden (view full) --- 792 proc_file.open("/proc/self/statm"); 793 int total_size_in_pages = 0; 794 int res_size_in_pages = 0; 795 proc_file >> total_size_in_pages; 796 proc_file >> res_size_in_pages; 797 return double(res_size_in_pages)*MULTIPLIER; // size in megabytes 798} 799 |
800void Profiler::rubyWatch(int id){ 801 //int rn_g1 = 0;//SIMICS_get_register_number(id, "g1"); 802 uint64 tr = 0;//SIMICS_read_register(id, rn_g1); 803 Address watch_address = Address(tr); 804 const int ID_SPACES = 3; 805 const int TIME_SPACES = 7; 806 807 (* debug_cout_ptr).flags(ios::right); --- 10 unchanged lines hidden (view full) --- 818 819bool Profiler::watchAddress(Address addr){ 820 if (m_watch_address_list_ptr->exist(addr)) 821 return true; 822 else 823 return false; 824} 825 |
826int64 Profiler::getTotalTransactionsExecuted() const { 827 return m_perProcEndTransaction.sum(); 828} 829 |
830// For MemoryControl: 831void Profiler::profileMemReq(string name, int bank) { 832// printf("name is %s", name.c_str()); 833 assert(m_memory_control_profilers.count(name) == 1); 834 m_memory_control_profilers[name] -> m_memReq++; 835 m_memory_control_profilers[name] -> m_memBankCount[bank]++; 836} 837void Profiler::profileMemBankBusy(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memBankBusy++; } --- 14 unchanged lines hidden --- |