Deleted Added
sdiff udiff text old ( 6374:11423b4639c0 ) new ( 6433:0f0f0fbef977 )
full compact
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 38 unchanged lines hidden (view full) ---

47 *
48 * Description: See Profiler.hh
49 *
50 * $Id$
51 *
52 */
53
54#include "mem/ruby/profiler/Profiler.hh"
55#include "mem/ruby/profiler/CacheProfiler.hh"
56#include "mem/ruby/profiler/AddressProfiler.hh"
57#include "mem/ruby/system/System.hh"
58#include "mem/ruby/network/Network.hh"
59#include "mem/gems_common/PrioHeap.hh"
60#include "mem/protocol/CacheMsg.hh"
61#include "mem/protocol/Protocol.hh"
62#include "mem/gems_common/util.hh"
63#include "mem/gems_common/Map.hh"

--- 4 unchanged lines hidden (view full) ---

68#include <sys/times.h>
69
70extern std::ostream * debug_cout_ptr;
71
72static double process_memory_total();
73static double process_memory_resident();
74
75Profiler::Profiler(const string & name)
76 : m_conflicting_histogram(-1)
77{
78 m_name = name;
79 m_requestProfileMap_ptr = new Map<string, int>;
80 m_L1D_cache_profiler_ptr = new CacheProfiler("L1D_cache");
81 m_L1I_cache_profiler_ptr = new CacheProfiler("L1I_cache");
82
83 m_L2_cache_profiler_ptr = new CacheProfiler("L2_cache");
84
85 m_inst_profiler_ptr = NULL;
86 m_address_profiler_ptr = NULL;
87
88/*
89 m_address_profiler_ptr = new AddressProfiler;
90 m_inst_profiler_ptr = NULL;
91 if (m_all_instructions) {
92 m_inst_profiler_ptr = new AddressProfiler;
93 }
94*/
95 m_conflicting_map_ptr = new Map<Address, Time>;
96
97 m_real_time_start_time = time(NULL); // Not reset in clearStats()
98 m_stats_period = 1000000; // Default
99 m_periodic_output_file_ptr = &cerr;
100
101}
102
103Profiler::~Profiler()
104{
105 if (m_periodic_output_file_ptr != &cerr) {
106 delete m_periodic_output_file_ptr;
107 }
108 delete m_address_profiler_ptr;
109 delete m_L1D_cache_profiler_ptr;
110 delete m_L1I_cache_profiler_ptr;
111 delete m_L2_cache_profiler_ptr;
112 delete m_requestProfileMap_ptr;
113 delete m_conflicting_map_ptr;
114}
115
116void Profiler::init(const vector<string> & argv, vector<string> memory_control_names)
117{
118 // added by SS
119 vector<string>::iterator it;
120 memory_control_profiler* mcp;
121 m_memory_control_names = memory_control_names;

--- 55 unchanged lines hidden (view full) ---

177 m_inst_profiler_ptr -> setAllInstructions(m_all_instructions);
178 }
179}
180
181void Profiler::wakeup()
182{
183 // FIXME - avoid the repeated code
184
185 Vector<integer_t> perProcInstructionCount;
186 perProcInstructionCount.setSize(RubySystem::getNumberOfSequencers());
187
188 Vector<integer_t> perProcCycleCount;
189 perProcCycleCount.setSize(RubySystem::getNumberOfSequencers());
190
191 for(int i=0; i < RubySystem::getNumberOfSequencers(); i++) {
192 perProcInstructionCount[i] = g_system_ptr->getInstructionCount(i) - m_instructions_executed_at_start[i] + 1;
193 perProcCycleCount[i] = g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
194 // The +1 allows us to avoid division by zero
195 }
196
197 integer_t total_misses = m_perProcTotalMisses.sum();
198 integer_t instruction_executed = perProcInstructionCount.sum();
199 integer_t simics_cycles_executed = perProcCycleCount.sum();
200 integer_t transactions_started = m_perProcStartTransaction.sum();
201 integer_t transactions_ended = m_perProcEndTransaction.sum();
202
203 (*m_periodic_output_file_ptr) << "ruby_cycles: " << g_eventQueue_ptr->getTime()-m_ruby_start << endl;
204 (*m_periodic_output_file_ptr) << "total_misses: " << total_misses << " " << m_perProcTotalMisses << endl;
205 (*m_periodic_output_file_ptr) << "instruction_executed: " << instruction_executed << " " << perProcInstructionCount << endl;
206 (*m_periodic_output_file_ptr) << "simics_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl;
207 (*m_periodic_output_file_ptr) << "transactions_started: " << transactions_started << " " << m_perProcStartTransaction << endl;
208 (*m_periodic_output_file_ptr) << "transactions_ended: " << transactions_ended << " " << m_perProcEndTransaction << endl;
209 (*m_periodic_output_file_ptr) << "L1TBE_usage: " << m_L1tbeProfile << endl;
210 (*m_periodic_output_file_ptr) << "L2TBE_usage: " << m_L2tbeProfile << endl;
211 (*m_periodic_output_file_ptr) << "mbytes_resident: " << process_memory_resident() << endl;
212 (*m_periodic_output_file_ptr) << "mbytes_total: " << process_memory_total() << endl;
213 if (process_memory_total() > 0) {
214 (*m_periodic_output_file_ptr) << "resident_ratio: " << process_memory_resident()/process_memory_total() << endl;
215 }
216 (*m_periodic_output_file_ptr) << "miss_latency: " << m_allMissLatencyHistogram << endl;
217
218 *m_periodic_output_file_ptr << endl;

--- 68 unchanged lines hidden (view full) ---

287 times(&vtime);
288 seconds = (vtime.tms_utime + vtime.tms_stime) / 100.0;
289 minutes = seconds / 60.0;
290 hours = minutes / 60.0;
291 days = hours / 24.0;
292 out << "Virtual_time_in_seconds: " << seconds << endl;
293 out << "Virtual_time_in_minutes: " << minutes << endl;
294 out << "Virtual_time_in_hours: " << hours << endl;
295 out << "Virtual_time_in_days: " << hours << endl;
296 out << endl;
297
298 out << "Ruby_current_time: " << g_eventQueue_ptr->getTime() << endl;
299 out << "Ruby_start_time: " << m_ruby_start << endl;
300 out << "Ruby_cycles: " << ruby_cycles << endl;
301 out << endl;
302
303 if (!short_stats) {
304 out << "mbytes_resident: " << process_memory_resident() << endl;
305 out << "mbytes_total: " << process_memory_total() << endl;
306 if (process_memory_total() > 0) {
307 out << "resident_ratio: " << process_memory_resident()/process_memory_total() << endl;
308 }
309 out << endl;
310
311 if(m_num_BA_broadcasts + m_num_BA_unicasts != 0){
312 out << endl;
313 out << "Broadcast_percent: " << (float)m_num_BA_broadcasts/(m_num_BA_broadcasts+m_num_BA_unicasts) << endl;
314 }
315 }
316
317 Vector<integer_t> perProcInstructionCount;
318 Vector<integer_t> perProcCycleCount;
319 Vector<double> perProcCPI;
320 Vector<double> perProcMissesPerInsn;
321 Vector<double> perProcInsnPerTrans;
322 Vector<double> perProcCyclesPerTrans;
323 Vector<double> perProcMissesPerTrans;
324
325 perProcInstructionCount.setSize(RubySystem::getNumberOfSequencers());
326 perProcCycleCount.setSize(RubySystem::getNumberOfSequencers());
327 perProcCPI.setSize(RubySystem::getNumberOfSequencers());
328 perProcMissesPerInsn.setSize(RubySystem::getNumberOfSequencers());
329
330 perProcInsnPerTrans.setSize(RubySystem::getNumberOfSequencers());
331 perProcCyclesPerTrans.setSize(RubySystem::getNumberOfSequencers());
332 perProcMissesPerTrans.setSize(RubySystem::getNumberOfSequencers());
333
334 for(int i=0; i < RubySystem::getNumberOfSequencers(); i++) {
335 perProcInstructionCount[i] = g_system_ptr->getInstructionCount(i) - m_instructions_executed_at_start[i] + 1;
336 perProcCycleCount[i] = g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
337 // The +1 allows us to avoid division by zero
338 perProcCPI[i] = double(ruby_cycles)/perProcInstructionCount[i];
339 perProcMissesPerInsn[i] = 1000.0 * (double(m_perProcTotalMisses[i]) / double(perProcInstructionCount[i]));
340
341 int trans = m_perProcEndTransaction[i];
342 if (trans == 0) {
343 perProcInsnPerTrans[i] = 0;
344 perProcCyclesPerTrans[i] = 0;
345 perProcMissesPerTrans[i] = 0;
346 } else {
347 perProcInsnPerTrans[i] = perProcInstructionCount[i] / double(trans);
348 perProcCyclesPerTrans[i] = ruby_cycles / double(trans);
349 perProcMissesPerTrans[i] = m_perProcTotalMisses[i] / double(trans);
350 }
351 }
352
353 integer_t total_misses = m_perProcTotalMisses.sum();
354 integer_t user_misses = m_perProcUserMisses.sum();
355 integer_t supervisor_misses = m_perProcSupervisorMisses.sum();
356 integer_t instruction_executed = perProcInstructionCount.sum();
357 integer_t simics_cycles_executed = perProcCycleCount.sum();
358 integer_t transactions_started = m_perProcStartTransaction.sum();
359 integer_t transactions_ended = m_perProcEndTransaction.sum();
360
361 double instructions_per_transaction = (transactions_ended != 0) ? double(instruction_executed) / double(transactions_ended) : 0;
362 double cycles_per_transaction = (transactions_ended != 0) ? (RubySystem::getNumberOfSequencers() * double(ruby_cycles)) / double(transactions_ended) : 0;
363 double misses_per_transaction = (transactions_ended != 0) ? double(total_misses) / double(transactions_ended) : 0;
364
365 out << "Total_misses: " << total_misses << endl;
366 out << "total_misses: " << total_misses << " " << m_perProcTotalMisses << endl;
367 out << "user_misses: " << user_misses << " " << m_perProcUserMisses << endl;
368 out << "supervisor_misses: " << supervisor_misses << " " << m_perProcSupervisorMisses << endl;
369 out << endl;
370 out << "instruction_executed: " << instruction_executed << " " << perProcInstructionCount << endl;
371 out << "ruby_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl;
372 out << "cycles_per_instruction: " << (RubySystem::getNumberOfSequencers()*double(ruby_cycles))/double(instruction_executed) << " " << perProcCPI << endl;
373 out << "misses_per_thousand_instructions: " << 1000.0 * (double(total_misses) / double(instruction_executed)) << " " << perProcMissesPerInsn << endl;
374 out << endl;
375 out << "transactions_started: " << transactions_started << " " << m_perProcStartTransaction << endl;
376 out << "transactions_ended: " << transactions_ended << " " << m_perProcEndTransaction << endl;
377 out << "instructions_per_transaction: " << instructions_per_transaction << " " << perProcInsnPerTrans << endl;
378 out << "cycles_per_transaction: " << cycles_per_transaction << " " << perProcCyclesPerTrans << endl;
379 out << "misses_per_transaction: " << misses_per_transaction << " " << perProcMissesPerTrans << endl;
380
381 out << endl;
382
383 // m_L1D_cache_profiler_ptr->printStats(out);
384 // m_L1I_cache_profiler_ptr->printStats(out);
385 // m_L2_cache_profiler_ptr->printStats(out);
386
387 out << endl;
388
389 vector<string>::iterator it;
390
391 for ( it=m_memory_control_names.begin() ; it < m_memory_control_names.end(); it++ ){
392 long long int m_memReq = m_memory_control_profilers[(*it).c_str()] -> m_memReq;
393 long long int m_memRefresh = m_memory_control_profilers[(*it).c_str()] -> m_memRefresh;
394 long long int m_memInputQ = m_memory_control_profilers[(*it).c_str()] -> m_memInputQ;

--- 9 unchanged lines hidden (view full) ---

404 long long int m_memTfawBusy = m_memory_control_profilers[(*it).c_str()] -> m_memTfawBusy;
405 long long int m_memReadWriteBusy = m_memory_control_profilers[(*it).c_str()] -> m_memReadWriteBusy;
406 long long int m_memDataBusBusy = m_memory_control_profilers[(*it).c_str()] -> m_memDataBusBusy;
407 Vector<long long int> m_memBankCount = m_memory_control_profilers[(*it).c_str()] -> m_memBankCount;
408
409 if (m_memReq || m_memRefresh) { // if there's a memory controller at all
410 long long int total_stalls = m_memInputQ + m_memBankQ + m_memWaitCycles;
411 double stallsPerReq = total_stalls * 1.0 / m_memReq;
412 out << "Memory control:" << endl;
413 out << " memory_total_requests: " << m_memReq << endl; // does not include refreshes
414 out << " memory_reads: " << m_memRead << endl;
415 out << " memory_writes: " << m_memWrite << endl;
416 out << " memory_refreshes: " << m_memRefresh << endl;
417 out << " memory_total_request_delays: " << total_stalls << endl;
418 out << " memory_delays_per_request: " << stallsPerReq << endl;
419 out << " memory_delays_in_input_queue: " << m_memInputQ << endl;
420 out << " memory_delays_behind_head_of_bank_queue: " << m_memBankQ << endl;

--- 37 unchanged lines hidden (view full) ---

458 }
459 out << endl;
460 }
461 out << endl;
462
463 out << "Busy Bank Count:" << m_busyBankCount << endl;
464 out << endl;
465
466 out << "L1TBE_usage: " << m_L1tbeProfile << endl;
467 out << "L2TBE_usage: " << m_L2tbeProfile << endl;
468 out << "StopTable_usage: " << m_stopTableProfile << endl;
469 out << "sequencer_requests_outstanding: " << m_sequencer_requests << endl;
470 out << "store_buffer_size: " << m_store_buffer_size << endl;
471 out << "unique_blocks_in_store_buffer: " << m_store_buffer_blocks << endl;
472 out << endl;
473 }
474
475 if (!short_stats) {
476 out << "All Non-Zero Cycle Demand Cache Accesses" << endl;
477 out << "----------------------------------------" << endl;
478 out << "miss_latency: " << m_allMissLatencyHistogram << endl;
479 for(int i=0; i<m_missLatencyHistograms.size(); i++) {
480 if (m_missLatencyHistograms[i].size() > 0) {
481 out << "miss_latency_" << RubyRequestType(i) << ": " << m_missLatencyHistograms[i] << endl;
482 }
483 }
484 for(int i=0; i<m_machLatencyHistograms.size(); i++) {
485 if (m_machLatencyHistograms[i].size() > 0) {
486 out << "miss_latency_" << GenericMachineType(i) << ": " << m_machLatencyHistograms[i] << endl;
487 }
488 }
489 out << "miss_latency_L2Miss: " << m_L2MissLatencyHistogram << endl;
490
491 out << endl;
492
493 out << "All Non-Zero Cycle SW Prefetch Requests" << endl;
494 out << "------------------------------------" << endl;
495 out << "prefetch_latency: " << m_allSWPrefetchLatencyHistogram << endl;
496 for(int i=0; i<m_SWPrefetchLatencyHistograms.size(); i++) {
497 if (m_SWPrefetchLatencyHistograms[i].size() > 0) {
498 out << "prefetch_latency_" << CacheRequestType(i) << ": " << m_SWPrefetchLatencyHistograms[i] << endl;
499 }
500 }
501 for(int i=0; i<m_SWPrefetchMachLatencyHistograms.size(); i++) {
502 if (m_SWPrefetchMachLatencyHistograms[i].size() > 0) {
503 out << "prefetch_latency_" << GenericMachineType(i) << ": " << m_SWPrefetchMachLatencyHistograms[i] << endl;
504 }
505 }
506 out << "prefetch_latency_L2Miss:" << m_SWPrefetchL2MissLatencyHistogram << endl;
507
508 out << "multicast_retries: " << m_multicast_retry_histogram << endl;
509 out << "gets_mask_prediction_count: " << m_gets_mask_prediction << endl;
510 out << "getx_mask_prediction_count: " << m_getx_mask_prediction << endl;
511 out << "explicit_training_mask: " << m_explicit_training_mask << endl;
512 out << endl;
513
514 if (m_all_sharing_histogram.size() > 0) {
515 out << "all_sharing: " << m_all_sharing_histogram << endl;
516 out << "read_sharing: " << m_read_sharing_histogram << endl;
517 out << "write_sharing: " << m_write_sharing_histogram << endl;
518
519 out << "all_sharing_percent: "; m_all_sharing_histogram.printPercent(out); out << endl;
520 out << "read_sharing_percent: "; m_read_sharing_histogram.printPercent(out); out << endl;
521 out << "write_sharing_percent: "; m_write_sharing_histogram.printPercent(out); out << endl;
522
523 int64 total_miss = m_cache_to_cache + m_memory_to_cache;
524 out << "all_misses: " << total_miss << endl;
525 out << "cache_to_cache_misses: " << m_cache_to_cache << endl;
526 out << "memory_to_cache_misses: " << m_memory_to_cache << endl;
527 out << "cache_to_cache_percent: " << 100.0 * (double(m_cache_to_cache) / double(total_miss)) << endl;
528 out << "memory_to_cache_percent: " << 100.0 * (double(m_memory_to_cache) / double(total_miss)) << endl;
529 out << endl;
530 }
531
532 if (m_conflicting_histogram.size() > 0) {
533 out << "conflicting_histogram: " << m_conflicting_histogram << endl;
534 out << "conflicting_histogram_percent: "; m_conflicting_histogram.printPercent(out); out << endl;
535 out << endl;
536 }
537
538 if (m_outstanding_requests.size() > 0) {
539 out << "outstanding_requests: "; m_outstanding_requests.printPercent(out); out << endl;
540 if (m_outstanding_persistent_requests.size() > 0) {
541 out << "outstanding_persistent_requests: "; m_outstanding_persistent_requests.printPercent(out); out << endl;
542 }
543 out << endl;
544 }
545 }
546
547 if (!short_stats) {
548 out << "Request vs. RubySystem State Profile" << endl;
549 out << "--------------------------------" << endl;
550 out << endl;

--- 54 unchanged lines hidden (view full) ---

605 out << "page_faults: " << usage.ru_majflt << endl;
606 out << "swaps: " << usage.ru_nswap << endl;
607 out << "block_inputs: " << usage.ru_inblock << endl;
608 out << "block_outputs: " << usage.ru_oublock << endl;
609}
610
611void Profiler::clearStats()
612{
613 m_num_BA_unicasts = 0;
614 m_num_BA_broadcasts = 0;
615
616 m_ruby_start = g_eventQueue_ptr->getTime();
617
618 m_instructions_executed_at_start.setSize(RubySystem::getNumberOfSequencers());
619 m_cycles_executed_at_start.setSize(RubySystem::getNumberOfSequencers());
620 for (int i=0; i < RubySystem::getNumberOfSequencers(); i++) {
621 if (g_system_ptr == NULL) {
622 m_instructions_executed_at_start[i] = 0;
623 m_cycles_executed_at_start[i] = 0;
624 } else {
625 m_instructions_executed_at_start[i] = g_system_ptr->getInstructionCount(i);
626 m_cycles_executed_at_start[i] = g_system_ptr->getCycleCount(i);
627 }
628 }
629
630 m_perProcTotalMisses.setSize(RubySystem::getNumberOfSequencers());
631 m_perProcUserMisses.setSize(RubySystem::getNumberOfSequencers());
632 m_perProcSupervisorMisses.setSize(RubySystem::getNumberOfSequencers());
633 m_perProcStartTransaction.setSize(RubySystem::getNumberOfSequencers());

--- 18 unchanged lines hidden (view full) ---

652
653 m_delayedCyclesHistogram.clear();
654 m_delayedCyclesNonPFHistogram.clear();
655 m_delayedCyclesVCHistograms.setSize(RubySystem::getNetwork()->getNumberOfVirtualNetworks());
656 for (int i = 0; i < RubySystem::getNetwork()->getNumberOfVirtualNetworks(); i++) {
657 m_delayedCyclesVCHistograms[i].clear();
658 }
659
660 m_gets_mask_prediction.clear();
661 m_getx_mask_prediction.clear();
662 m_explicit_training_mask.clear();
663
664 m_missLatencyHistograms.setSize(CacheRequestType_NUM);
665 for(int i=0; i<m_missLatencyHistograms.size(); i++) {
666 m_missLatencyHistograms[i].clear(200);
667 }
668 m_machLatencyHistograms.setSize(GenericMachineType_NUM+1);
669 for(int i=0; i<m_machLatencyHistograms.size(); i++) {
670 m_machLatencyHistograms[i].clear(200);
671 }
672 m_allMissLatencyHistogram.clear(200);
673 m_L2MissLatencyHistogram.clear(200);
674
675 m_SWPrefetchLatencyHistograms.setSize(CacheRequestType_NUM);
676 for(int i=0; i<m_SWPrefetchLatencyHistograms.size(); i++) {
677 m_SWPrefetchLatencyHistograms[i].clear(200);
678 }
679 m_SWPrefetchMachLatencyHistograms.setSize(GenericMachineType_NUM+1);
680 for(int i=0; i<m_SWPrefetchMachLatencyHistograms.size(); i++) {
681 m_SWPrefetchMachLatencyHistograms[i].clear(200);
682 }
683 m_allSWPrefetchLatencyHistogram.clear(200);
684 m_SWPrefetchL2MissLatencyHistogram.clear(200);
685
686 m_multicast_retry_histogram.clear();
687
688 m_L1tbeProfile.clear();
689 m_L2tbeProfile.clear();
690 m_stopTableProfile.clear();
691 m_filter_action_histogram.clear();
692
693 m_sequencer_requests.clear();
694 m_store_buffer_size.clear();
695 m_store_buffer_blocks.clear();
696 m_read_sharing_histogram.clear();
697 m_write_sharing_histogram.clear();
698 m_all_sharing_histogram.clear();
699 m_cache_to_cache = 0;
700 m_memory_to_cache = 0;
701
702 m_predictions = 0;
703 m_predictionOpportunities = 0;
704 m_goodPredictions = 0;
705
706 // clear HashMaps
707 m_requestProfileMap_ptr->clear();
708
709 // count requests profiled
710 m_requests = 0;
711
712 // Conflicting requests
713 m_conflicting_map_ptr->clear();
714 m_conflicting_histogram.clear();
715
716 m_outstanding_requests.clear();
717 m_outstanding_persistent_requests.clear();
718
719 m_L1D_cache_profiler_ptr->clearStats();
720 m_L1I_cache_profiler_ptr->clearStats();
721 m_L2_cache_profiler_ptr->clearStats();
722
723 // for MemoryControl:
724/*
725 m_memReq = 0;
726 m_memBankBusy = 0;
727 m_memBusBusy = 0;
728 m_memTfawBusy = 0;
729 m_memReadWriteBusy = 0;
730 m_memDataBusBusy = 0;
731 m_memRefresh = 0;
732 m_memRead = 0;
733 m_memWrite = 0;
734 m_memWaitCycles = 0;
735 m_memInputQ = 0;
736 m_memBankQ = 0;
737 m_memArbWait = 0;
738 m_memRandBusy = 0;
739 m_memNotOld = 0;
740
741 for (int bank=0; bank < m_memBankCount.size(); bank++) {
742 m_memBankCount[bank] = 0;
743 }
744*/
745//added by SS
746 vector<string>::iterator it;
747
748 for ( it=m_memory_control_names.begin() ; it < m_memory_control_names.end(); it++ ){
749 m_memory_control_profilers[(*it).c_str()] -> m_memReq = 0;
750 m_memory_control_profilers[(*it).c_str()] -> m_memBankBusy = 0;
751 m_memory_control_profilers[(*it).c_str()] -> m_memBusBusy = 0;
752 m_memory_control_profilers[(*it).c_str()] -> m_memTfawBusy = 0;

--- 15 unchanged lines hidden (view full) ---

768 }
769 // Flush the prefetches through the system - used so that there are no outstanding requests after stats are cleared
770 //g_eventQueue_ptr->triggerAllEvents();
771
772 // update the start time
773 m_ruby_start = g_eventQueue_ptr->getTime();
774}
775
776void Profiler::profileConflictingRequests(const Address& addr)
777{
778 assert(addr == line_address(addr));
779 Time last_time = m_ruby_start;
780 if (m_conflicting_map_ptr->exist(addr)) {
781 last_time = m_conflicting_map_ptr->lookup(addr);
782 }
783 Time current_time = g_eventQueue_ptr->getTime();
784 assert (current_time - last_time > 0);
785 m_conflicting_histogram.add(current_time - last_time);
786 m_conflicting_map_ptr->add(addr, current_time);
787}
788
789void Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id)
790{
791 if (msg.getType() != CacheRequestType_IFETCH) {
792
793 // Note: The following line should be commented out if you want to
794 // use the special profiling that is part of the GS320 protocol
795
796 // NOTE: Unless PROFILE_HOT_LINES is enabled, nothing will be profiled by the AddressProfiler

--- 41 unchanged lines hidden (view full) ---

838
839 if (m_requestProfileMap_ptr->exist(requestStr)) {
840 (m_requestProfileMap_ptr->lookup(requestStr))++;
841 } else {
842 m_requestProfileMap_ptr->add(requestStr, 1);
843 }
844}
845
846void Profiler::recordPrediction(bool wasGood, bool wasPredicted)
847{
848 m_predictionOpportunities++;
849 if(wasPredicted){
850 m_predictions++;
851 if(wasGood){
852 m_goodPredictions++;
853 }
854 }
855}
856
857void Profiler::profileFilterAction(int action)
858{
859 m_filter_action_histogram.add(action);
860}
861
862void Profiler::profileMulticastRetry(const Address& addr, int count)
863{
864 m_multicast_retry_histogram.add(count);
865}
866
867void Profiler::startTransaction(int cpu)
868{
869 m_perProcStartTransaction[cpu]++;
870}
871
872void Profiler::endTransaction(int cpu)
873{
874 m_perProcEndTransaction[cpu]++;

--- 14 unchanged lines hidden (view full) ---

889 m_busyBankCount++;
890}
891
892// non-zero cycle demand request
893void Profiler::missLatency(Time t, RubyRequestType type)
894{
895 m_allMissLatencyHistogram.add(t);
896 m_missLatencyHistograms[type].add(t);
897 /*
898 m_machLatencyHistograms[respondingMach].add(t);
899 if(respondingMach == GenericMachineType_Directory || respondingMach == GenericMachineType_NUM) {
900 m_L2MissLatencyHistogram.add(t);
901 }
902 */
903}
904
905// non-zero cycle prefetch request
906void Profiler::swPrefetchLatency(Time t, CacheRequestType type, GenericMachineType respondingMach)
907{
908 m_allSWPrefetchLatencyHistogram.add(t);
909 m_SWPrefetchLatencyHistograms[type].add(t);
910 m_SWPrefetchMachLatencyHistograms[respondingMach].add(t);

--- 52 unchanged lines hidden (view full) ---

963 proc_file.open("/proc/self/statm");
964 int total_size_in_pages = 0;
965 int res_size_in_pages = 0;
966 proc_file >> total_size_in_pages;
967 proc_file >> res_size_in_pages;
968 return double(res_size_in_pages)*MULTIPLIER; // size in megabytes
969}
970
971void Profiler::profileGetXMaskPrediction(const Set& pred_set)
972{
973 m_getx_mask_prediction.add(pred_set.count());
974}
975
976void Profiler::profileGetSMaskPrediction(const Set& pred_set)
977{
978 m_gets_mask_prediction.add(pred_set.count());
979}
980
981void Profiler::profileTrainingMask(const Set& pred_set)
982{
983 m_explicit_training_mask.add(pred_set.count());
984}
985
986int64 Profiler::getTotalInstructionsExecuted() const
987{
988 int64 sum = 1; // Starting at 1 allows us to avoid division by zero
989 for(int i=0; i < RubySystem::getNumberOfSequencers(); i++) {
990 sum += (g_system_ptr->getInstructionCount(i) - m_instructions_executed_at_start[i]);
991 }
992 return sum;
993}
994
995int64 Profiler::getTotalTransactionsExecuted() const
996{
997 int64 sum = m_perProcEndTransaction.sum();
998 if (sum > 0) {
999 return sum;
1000 } else {
1001 return 1; // Avoid division by zero errors
1002 }
1003}
1004
1005
1006void Profiler::rubyWatch(int id){
1007 //int rn_g1 = 0;//SIMICS_get_register_number(id, "g1");
1008 uint64 tr = 0;//SIMICS_read_register(id, rn_g1);
1009 Address watch_address = Address(tr);
1010 const int ID_SPACES = 3;
1011 const int TIME_SPACES = 7;
1012
1013 (* debug_cout_ptr).flags(ios::right);

--- 10 unchanged lines hidden (view full) ---

1024
1025bool Profiler::watchAddress(Address addr){
1026 if (m_watch_address_list_ptr->exist(addr))
1027 return true;
1028 else
1029 return false;
1030}
1031
1032// For MemoryControl:
1033void Profiler::profileMemReq(string name, int bank) {
1034// printf("name is %s", name.c_str());
1035 assert(m_memory_control_profilers.count(name) == 1);
1036 m_memory_control_profilers[name] -> m_memReq++;
1037 m_memory_control_profilers[name] -> m_memBankCount[bank]++;
1038}
1039void Profiler::profileMemBankBusy(string name) { assert(m_memory_control_profilers.count(name) == 1); m_memory_control_profilers[name] -> m_memBankBusy++; }

--- 14 unchanged lines hidden ---