Profiler.cc revision 6145
16145Snate@binkert.org/*
26145Snate@binkert.org * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
36145Snate@binkert.org * All rights reserved.
46145Snate@binkert.org *
56145Snate@binkert.org * Redistribution and use in source and binary forms, with or without
66145Snate@binkert.org * modification, are permitted provided that the following conditions are
76145Snate@binkert.org * met: redistributions of source code must retain the above copyright
86145Snate@binkert.org * notice, this list of conditions and the following disclaimer;
96145Snate@binkert.org * redistributions in binary form must reproduce the above copyright
106145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the
116145Snate@binkert.org * documentation and/or other materials provided with the distribution;
126145Snate@binkert.org * neither the name of the copyright holders nor the names of its
136145Snate@binkert.org * contributors may be used to endorse or promote products derived from
146145Snate@binkert.org * this software without specific prior written permission.
156145Snate@binkert.org *
166145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
176145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
186145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
196145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
206145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
216145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
226145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
236145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
246145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
256145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
266145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
276145Snate@binkert.org */
286145Snate@binkert.org
296145Snate@binkert.org/*
306145Snate@binkert.org   This file has been modified by Kevin Moore and Dan Nussbaum of the
316145Snate@binkert.org   Scalable Systems Research Group at Sun Microsystems Laboratories
326145Snate@binkert.org   (http://research.sun.com/scalable/) to support the Adaptive
336145Snate@binkert.org   Transactional Memory Test Platform (ATMTP).
346145Snate@binkert.org
356145Snate@binkert.org   Please send email to atmtp-interest@sun.com with feedback, questions, or
366145Snate@binkert.org   to request future announcements about ATMTP.
376145Snate@binkert.org
386145Snate@binkert.org   ----------------------------------------------------------------------
396145Snate@binkert.org
406145Snate@binkert.org   File modification date: 2008-02-23
416145Snate@binkert.org
426145Snate@binkert.org   ----------------------------------------------------------------------
436145Snate@binkert.org*/
446145Snate@binkert.org
456145Snate@binkert.org/*
466145Snate@binkert.org * Profiler.C
476145Snate@binkert.org *
486145Snate@binkert.org * Description: See Profiler.h
496145Snate@binkert.org *
506145Snate@binkert.org * $Id$
516145Snate@binkert.org *
526145Snate@binkert.org */
536145Snate@binkert.org
546145Snate@binkert.org#include "Profiler.hh"
556145Snate@binkert.org#include "CacheProfiler.hh"
566145Snate@binkert.org#include "AddressProfiler.hh"
576145Snate@binkert.org#include "System.hh"
586145Snate@binkert.org#include "Network.hh"
596145Snate@binkert.org#include "PrioHeap.hh"
606145Snate@binkert.org#include "CacheMsg.hh"
616145Snate@binkert.org#include "Driver.hh"
626145Snate@binkert.org#include "Protocol.hh"
636145Snate@binkert.org#include "util.hh"
646145Snate@binkert.org#include "Map.hh"
656145Snate@binkert.org#include "Debug.hh"
666145Snate@binkert.org#include "MachineType.hh"
676145Snate@binkert.org// #include "TransactionInterfaceManager.hh"
686145Snate@binkert.org#include "interface.hh"
696145Snate@binkert.org//#include "XactVisualizer.hh"  //gem5:Arka for decomissioning log_tm
706145Snate@binkert.org//#include "XactProfiler.hh"   //gem5:Arka for decomissioning log_tm
716145Snate@binkert.org
726145Snate@binkert.org// extern "C" {
736145Snate@binkert.org// #include "Rock.hh"
746145Snate@binkert.org// }
756145Snate@binkert.org
766145Snate@binkert.org// Allows use of times() library call, which determines virtual runtime
776145Snate@binkert.org#include <sys/times.h>
786145Snate@binkert.org
796145Snate@binkert.orgextern std::ostream * debug_cout_ptr;
806145Snate@binkert.orgextern std::ostream * xact_cout_ptr;
816145Snate@binkert.org
826145Snate@binkert.orgstatic double process_memory_total();
836145Snate@binkert.orgstatic double process_memory_resident();
846145Snate@binkert.org
856145Snate@binkert.orgProfiler::Profiler()
866145Snate@binkert.org  : m_conflicting_histogram(-1)
876145Snate@binkert.org{
886145Snate@binkert.org  m_requestProfileMap_ptr = new Map<string, int>;
896145Snate@binkert.org  m_L1D_cache_profiler_ptr = new CacheProfiler("L1D_cache");
906145Snate@binkert.org  m_L1I_cache_profiler_ptr = new CacheProfiler("L1I_cache");
916145Snate@binkert.org
926145Snate@binkert.org  m_L2_cache_profiler_ptr = new CacheProfiler("L2_cache");
936145Snate@binkert.org
946145Snate@binkert.org  m_address_profiler_ptr = new AddressProfiler;
956145Snate@binkert.org  m_inst_profiler_ptr = NULL;
966145Snate@binkert.org  if (PROFILE_ALL_INSTRUCTIONS) {
976145Snate@binkert.org    m_inst_profiler_ptr = new AddressProfiler;
986145Snate@binkert.org  }
996145Snate@binkert.org
1006145Snate@binkert.org  //m_xact_profiler_ptr = new XactProfiler; //gem5:Arka for decomissioning og log_tm
1016145Snate@binkert.org
1026145Snate@binkert.org  m_conflicting_map_ptr = new Map<Address, Time>;
1036145Snate@binkert.org
1046145Snate@binkert.org  m_real_time_start_time = time(NULL); // Not reset in clearStats()
1056145Snate@binkert.org  m_stats_period = 1000000; // Default
1066145Snate@binkert.org  m_periodic_output_file_ptr = &cerr;
1076145Snate@binkert.org  m_xact_visualizer_ptr      = &cout;
1086145Snate@binkert.org
1096145Snate@binkert.org  //---- begin XACT_MEM code
1106145Snate@binkert.org  m_xactExceptionMap_ptr = new Map<int, int>;
1116145Snate@binkert.org  m_procsInXactMap_ptr = new Map<int, int>;
1126145Snate@binkert.org  m_abortIDMap_ptr = new Map<int, int>;
1136145Snate@binkert.org  m_commitIDMap_ptr = new Map<int, int>;
1146145Snate@binkert.org  m_xactRetryIDMap_ptr = new Map<int, int>;
1156145Snate@binkert.org  m_xactCyclesIDMap_ptr = new Map<int, int>;
1166145Snate@binkert.org  m_xactReadSetIDMap_ptr = new Map<int, int>;
1176145Snate@binkert.org  m_xactWriteSetIDMap_ptr = new Map<int, int>;
1186145Snate@binkert.org  m_xactLoadMissIDMap_ptr = new Map<int, int>;
1196145Snate@binkert.org  m_xactStoreMissIDMap_ptr = new Map<int, int>;
1206145Snate@binkert.org  m_xactInstrCountIDMap_ptr = new Map<int, integer_t>;
1216145Snate@binkert.org  m_abortPCMap_ptr = new Map<Address, int>;
1226145Snate@binkert.org  m_abortAddressMap_ptr = new Map<Address, int>;
1236145Snate@binkert.org  m_nackXIDMap_ptr = new Map<int, int>;
1246145Snate@binkert.org  m_nackXIDPairMap_ptr = new Map<int, Map<int, int> * >;
1256145Snate@binkert.org  m_nackPCMap_ptr = new Map<Address, int>;
1266145Snate@binkert.org  m_watch_address_list_ptr = new Map<Address, int>;
1276145Snate@binkert.org  m_readSetMatch_ptr = new Map<Address, int>;
1286145Snate@binkert.org  m_readSetNoMatch_ptr = new Map<Address, int>;
1296145Snate@binkert.org  m_writeSetMatch_ptr = new Map<Address, int>;
1306145Snate@binkert.org  m_writeSetNoMatch_ptr = new Map<Address, int>;
1316145Snate@binkert.org  m_xactReadFilterBitsSetOnCommit = new Map<int, Histogram>;
1326145Snate@binkert.org  m_xactReadFilterBitsSetOnAbort = new Map<int, Histogram>;
1336145Snate@binkert.org  m_xactWriteFilterBitsSetOnCommit = new Map<int, Histogram>;
1346145Snate@binkert.org  m_xactWriteFilterBitsSetOnAbort = new Map<int, Histogram>;
1356145Snate@binkert.org  //---- end XACT_MEM code
1366145Snate@binkert.org
1376145Snate@binkert.org  // for MemoryControl:
1386145Snate@binkert.org  m_memReq = 0;
1396145Snate@binkert.org  m_memBankBusy = 0;
1406145Snate@binkert.org  m_memBusBusy = 0;
1416145Snate@binkert.org  m_memReadWriteBusy = 0;
1426145Snate@binkert.org  m_memDataBusBusy = 0;
1436145Snate@binkert.org  m_memTfawBusy = 0;
1446145Snate@binkert.org  m_memRefresh = 0;
1456145Snate@binkert.org  m_memRead = 0;
1466145Snate@binkert.org  m_memWrite = 0;
1476145Snate@binkert.org  m_memWaitCycles = 0;
1486145Snate@binkert.org  m_memInputQ = 0;
1496145Snate@binkert.org  m_memBankQ = 0;
1506145Snate@binkert.org  m_memArbWait = 0;
1516145Snate@binkert.org  m_memRandBusy = 0;
1526145Snate@binkert.org  m_memNotOld = 0;
1536145Snate@binkert.org
1546145Snate@binkert.org
1556145Snate@binkert.org  int totalBanks = RubyConfig::banksPerRank()
1566145Snate@binkert.org                 * RubyConfig::ranksPerDimm()
1576145Snate@binkert.org                 * RubyConfig::dimmsPerChannel();
1586145Snate@binkert.org  m_memBankCount.setSize(totalBanks);
1596145Snate@binkert.org
1606145Snate@binkert.org  clearStats();
1616145Snate@binkert.org}
1626145Snate@binkert.org
1636145Snate@binkert.orgProfiler::~Profiler()
1646145Snate@binkert.org{
1656145Snate@binkert.org  if (m_periodic_output_file_ptr != &cerr) {
1666145Snate@binkert.org    delete m_periodic_output_file_ptr;
1676145Snate@binkert.org  }
1686145Snate@binkert.org  delete m_address_profiler_ptr;
1696145Snate@binkert.org  delete m_L1D_cache_profiler_ptr;
1706145Snate@binkert.org  delete m_L1I_cache_profiler_ptr;
1716145Snate@binkert.org  delete m_L2_cache_profiler_ptr;
1726145Snate@binkert.org  //delete m_xact_profiler_ptr; //gem5:Arka for decomissioning of log_tm
1736145Snate@binkert.org  delete m_requestProfileMap_ptr;
1746145Snate@binkert.org  delete m_conflicting_map_ptr;
1756145Snate@binkert.org}
1766145Snate@binkert.org
1776145Snate@binkert.orgvoid Profiler::wakeup()
1786145Snate@binkert.org{
1796145Snate@binkert.org  // FIXME - avoid the repeated code
1806145Snate@binkert.org
1816145Snate@binkert.org  Vector<integer_t> perProcInstructionCount;
1826145Snate@binkert.org  perProcInstructionCount.setSize(RubyConfig::numberOfProcessors());
1836145Snate@binkert.org
1846145Snate@binkert.org  Vector<integer_t> perProcCycleCount;
1856145Snate@binkert.org  perProcCycleCount.setSize(RubyConfig::numberOfProcessors());
1866145Snate@binkert.org
1876145Snate@binkert.org  for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
1886145Snate@binkert.org    perProcInstructionCount[i] = g_system_ptr->getDriver()->getInstructionCount(i) - m_instructions_executed_at_start[i] + 1;
1896145Snate@binkert.org    perProcCycleCount[i] = g_system_ptr->getDriver()->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
1906145Snate@binkert.org    // The +1 allows us to avoid division by zero
1916145Snate@binkert.org  }
1926145Snate@binkert.org
1936145Snate@binkert.org  integer_t total_misses = m_perProcTotalMisses.sum();
1946145Snate@binkert.org  integer_t instruction_executed = perProcInstructionCount.sum();
1956145Snate@binkert.org  integer_t simics_cycles_executed = perProcCycleCount.sum();
1966145Snate@binkert.org  integer_t transactions_started = m_perProcStartTransaction.sum();
1976145Snate@binkert.org  integer_t transactions_ended = m_perProcEndTransaction.sum();
1986145Snate@binkert.org
1996145Snate@binkert.org  (*m_periodic_output_file_ptr) << "ruby_cycles: " << g_eventQueue_ptr->getTime()-m_ruby_start << endl;
2006145Snate@binkert.org  (*m_periodic_output_file_ptr) << "total_misses: " << total_misses << " " << m_perProcTotalMisses << endl;
2016145Snate@binkert.org  (*m_periodic_output_file_ptr) << "instruction_executed: " << instruction_executed << " " << perProcInstructionCount << endl;
2026145Snate@binkert.org  (*m_periodic_output_file_ptr) << "simics_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl;
2036145Snate@binkert.org  (*m_periodic_output_file_ptr) << "transactions_started: " << transactions_started << " " << m_perProcStartTransaction << endl;
2046145Snate@binkert.org  (*m_periodic_output_file_ptr) << "transactions_ended: " << transactions_ended << " " << m_perProcEndTransaction << endl;
2056145Snate@binkert.org  (*m_periodic_output_file_ptr) << "L1TBE_usage: " << m_L1tbeProfile << endl;
2066145Snate@binkert.org  (*m_periodic_output_file_ptr) << "L2TBE_usage: " << m_L2tbeProfile << endl;
2076145Snate@binkert.org  (*m_periodic_output_file_ptr) << "mbytes_resident: " << process_memory_resident() << endl;
2086145Snate@binkert.org  (*m_periodic_output_file_ptr) << "mbytes_total: " << process_memory_total() << endl;
2096145Snate@binkert.org  if (process_memory_total() > 0) {
2106145Snate@binkert.org    (*m_periodic_output_file_ptr) << "resident_ratio: " << process_memory_resident()/process_memory_total() << endl;
2116145Snate@binkert.org  }
2126145Snate@binkert.org  (*m_periodic_output_file_ptr) << "miss_latency: " << m_allMissLatencyHistogram << endl;
2136145Snate@binkert.org
2146145Snate@binkert.org  *m_periodic_output_file_ptr << endl;
2156145Snate@binkert.org
2166145Snate@binkert.org  if (PROFILE_ALL_INSTRUCTIONS) {
2176145Snate@binkert.org    m_inst_profiler_ptr->printStats(*m_periodic_output_file_ptr);
2186145Snate@binkert.org  }
2196145Snate@binkert.org
2206145Snate@binkert.org  //g_system_ptr->getNetwork()->printStats(*m_periodic_output_file_ptr);
2216145Snate@binkert.org  g_eventQueue_ptr->scheduleEvent(this, m_stats_period);
2226145Snate@binkert.org}
2236145Snate@binkert.org
2246145Snate@binkert.orgvoid Profiler::setPeriodicStatsFile(const string& filename)
2256145Snate@binkert.org{
2266145Snate@binkert.org  cout << "Recording periodic statistics to file '" << filename << "' every "
2276145Snate@binkert.org       << m_stats_period << " Ruby cycles" << endl;
2286145Snate@binkert.org
2296145Snate@binkert.org  if (m_periodic_output_file_ptr != &cerr) {
2306145Snate@binkert.org    delete m_periodic_output_file_ptr;
2316145Snate@binkert.org  }
2326145Snate@binkert.org
2336145Snate@binkert.org  m_periodic_output_file_ptr = new ofstream(filename.c_str());
2346145Snate@binkert.org  g_eventQueue_ptr->scheduleEvent(this, 1);
2356145Snate@binkert.org}
2366145Snate@binkert.org
2376145Snate@binkert.orgvoid Profiler::setPeriodicStatsInterval(integer_t period)
2386145Snate@binkert.org{
2396145Snate@binkert.org  cout << "Recording periodic statistics every " << m_stats_period << " Ruby cycles" << endl;
2406145Snate@binkert.org  m_stats_period = period;
2416145Snate@binkert.org  g_eventQueue_ptr->scheduleEvent(this, 1);
2426145Snate@binkert.org}
2436145Snate@binkert.org
2446145Snate@binkert.orgvoid Profiler::printConfig(ostream& out) const
2456145Snate@binkert.org{
2466145Snate@binkert.org  out << endl;
2476145Snate@binkert.org  out << "Profiler Configuration" << endl;
2486145Snate@binkert.org  out << "----------------------" << endl;
2496145Snate@binkert.org  out << "periodic_stats_period: " << m_stats_period << endl;
2506145Snate@binkert.org}
2516145Snate@binkert.org
2526145Snate@binkert.orgvoid Profiler::print(ostream& out) const
2536145Snate@binkert.org{
2546145Snate@binkert.org  out << "[Profiler]";
2556145Snate@binkert.org}
2566145Snate@binkert.org
2576145Snate@binkert.orgvoid Profiler::printStats(ostream& out, bool short_stats)
2586145Snate@binkert.org{
2596145Snate@binkert.org  out << endl;
2606145Snate@binkert.org  if (short_stats) {
2616145Snate@binkert.org    out << "SHORT ";
2626145Snate@binkert.org  }
2636145Snate@binkert.org  out << "Profiler Stats" << endl;
2646145Snate@binkert.org  out << "--------------" << endl;
2656145Snate@binkert.org
2666145Snate@binkert.org  time_t real_time_current = time(NULL);
2676145Snate@binkert.org  double seconds = difftime(real_time_current, m_real_time_start_time);
2686145Snate@binkert.org  double minutes = seconds/60.0;
2696145Snate@binkert.org  double hours = minutes/60.0;
2706145Snate@binkert.org  double days = hours/24.0;
2716145Snate@binkert.org  Time ruby_cycles = g_eventQueue_ptr->getTime()-m_ruby_start;
2726145Snate@binkert.org
2736145Snate@binkert.org  if (!short_stats) {
2746145Snate@binkert.org    out << "Elapsed_time_in_seconds: " << seconds << endl;
2756145Snate@binkert.org    out << "Elapsed_time_in_minutes: " << minutes << endl;
2766145Snate@binkert.org    out << "Elapsed_time_in_hours: " << hours << endl;
2776145Snate@binkert.org    out << "Elapsed_time_in_days: " << days << endl;
2786145Snate@binkert.org    out << endl;
2796145Snate@binkert.org  }
2806145Snate@binkert.org
2816145Snate@binkert.org  // print the virtual runtimes as well
2826145Snate@binkert.org  struct tms vtime;
2836145Snate@binkert.org  times(&vtime);
2846145Snate@binkert.org  seconds = (vtime.tms_utime + vtime.tms_stime) / 100.0;
2856145Snate@binkert.org  minutes = seconds / 60.0;
2866145Snate@binkert.org  hours = minutes / 60.0;
2876145Snate@binkert.org  days = hours / 24.0;
2886145Snate@binkert.org  out << "Virtual_time_in_seconds: " << seconds << endl;
2896145Snate@binkert.org  out << "Virtual_time_in_minutes: " << minutes << endl;
2906145Snate@binkert.org  out << "Virtual_time_in_hours:   " << hours << endl;
2916145Snate@binkert.org  out << "Virtual_time_in_days:    " << hours << endl;
2926145Snate@binkert.org  out << endl;
2936145Snate@binkert.org
2946145Snate@binkert.org  out << "Ruby_current_time: " << g_eventQueue_ptr->getTime() << endl;
2956145Snate@binkert.org  out << "Ruby_start_time: " << m_ruby_start << endl;
2966145Snate@binkert.org  out << "Ruby_cycles: " << ruby_cycles << endl;
2976145Snate@binkert.org  out << endl;
2986145Snate@binkert.org
2996145Snate@binkert.org  if (!short_stats) {
3006145Snate@binkert.org    out << "mbytes_resident: " << process_memory_resident() << endl;
3016145Snate@binkert.org    out << "mbytes_total: " << process_memory_total() << endl;
3026145Snate@binkert.org    if (process_memory_total() > 0) {
3036145Snate@binkert.org      out << "resident_ratio: " << process_memory_resident()/process_memory_total() << endl;
3046145Snate@binkert.org    }
3056145Snate@binkert.org    out << endl;
3066145Snate@binkert.org
3076145Snate@binkert.org    if(m_num_BA_broadcasts + m_num_BA_unicasts != 0){
3086145Snate@binkert.org      out << endl;
3096145Snate@binkert.org      out << "Broadcast_percent: " << (float)m_num_BA_broadcasts/(m_num_BA_broadcasts+m_num_BA_unicasts) << endl;
3106145Snate@binkert.org    }
3116145Snate@binkert.org  }
3126145Snate@binkert.org
3136145Snate@binkert.org  Vector<integer_t> perProcInstructionCount;
3146145Snate@binkert.org  Vector<integer_t> perProcCycleCount;
3156145Snate@binkert.org  Vector<double> perProcCPI;
3166145Snate@binkert.org  Vector<double> perProcMissesPerInsn;
3176145Snate@binkert.org  Vector<double> perProcInsnPerTrans;
3186145Snate@binkert.org  Vector<double> perProcCyclesPerTrans;
3196145Snate@binkert.org  Vector<double> perProcMissesPerTrans;
3206145Snate@binkert.org
3216145Snate@binkert.org  perProcInstructionCount.setSize(RubyConfig::numberOfProcessors());
3226145Snate@binkert.org  perProcCycleCount.setSize(RubyConfig::numberOfProcessors());
3236145Snate@binkert.org  perProcCPI.setSize(RubyConfig::numberOfProcessors());
3246145Snate@binkert.org  perProcMissesPerInsn.setSize(RubyConfig::numberOfProcessors());
3256145Snate@binkert.org
3266145Snate@binkert.org  perProcInsnPerTrans.setSize(RubyConfig::numberOfProcessors());
3276145Snate@binkert.org  perProcCyclesPerTrans.setSize(RubyConfig::numberOfProcessors());
3286145Snate@binkert.org  perProcMissesPerTrans.setSize(RubyConfig::numberOfProcessors());
3296145Snate@binkert.org
3306145Snate@binkert.org  for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
3316145Snate@binkert.org    perProcInstructionCount[i] = g_system_ptr->getDriver()->getInstructionCount(i) - m_instructions_executed_at_start[i] + 1;
3326145Snate@binkert.org    perProcCycleCount[i] = g_system_ptr->getDriver()->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
3336145Snate@binkert.org    // The +1 allows us to avoid division by zero
3346145Snate@binkert.org    perProcCPI[i] = double(ruby_cycles)/perProcInstructionCount[i];
3356145Snate@binkert.org    perProcMissesPerInsn[i] = 1000.0 * (double(m_perProcTotalMisses[i]) / double(perProcInstructionCount[i]));
3366145Snate@binkert.org
3376145Snate@binkert.org    int trans = m_perProcEndTransaction[i];
3386145Snate@binkert.org    if (trans == 0) {
3396145Snate@binkert.org      perProcInsnPerTrans[i] = 0;
3406145Snate@binkert.org      perProcCyclesPerTrans[i] = 0;
3416145Snate@binkert.org      perProcMissesPerTrans[i] = 0;
3426145Snate@binkert.org    } else {
3436145Snate@binkert.org      perProcInsnPerTrans[i] = perProcInstructionCount[i] / double(trans);
3446145Snate@binkert.org      perProcCyclesPerTrans[i] = ruby_cycles / double(trans);
3456145Snate@binkert.org      perProcMissesPerTrans[i] = m_perProcTotalMisses[i] / double(trans);
3466145Snate@binkert.org    }
3476145Snate@binkert.org  }
3486145Snate@binkert.org
3496145Snate@binkert.org  integer_t total_misses = m_perProcTotalMisses.sum();
3506145Snate@binkert.org  integer_t user_misses = m_perProcUserMisses.sum();
3516145Snate@binkert.org  integer_t supervisor_misses = m_perProcSupervisorMisses.sum();
3526145Snate@binkert.org  integer_t instruction_executed = perProcInstructionCount.sum();
3536145Snate@binkert.org  integer_t simics_cycles_executed = perProcCycleCount.sum();
3546145Snate@binkert.org  integer_t transactions_started = m_perProcStartTransaction.sum();
3556145Snate@binkert.org  integer_t transactions_ended = m_perProcEndTransaction.sum();
3566145Snate@binkert.org
3576145Snate@binkert.org  double instructions_per_transaction = (transactions_ended != 0) ? double(instruction_executed) / double(transactions_ended) : 0;
3586145Snate@binkert.org  double cycles_per_transaction = (transactions_ended != 0) ? (RubyConfig::numberOfProcessors() * double(ruby_cycles)) / double(transactions_ended) : 0;
3596145Snate@binkert.org  double misses_per_transaction = (transactions_ended != 0) ? double(total_misses) / double(transactions_ended) : 0;
3606145Snate@binkert.org
3616145Snate@binkert.org  out << "Total_misses: " << total_misses << endl;
3626145Snate@binkert.org  out << "total_misses: " << total_misses << " " << m_perProcTotalMisses << endl;
3636145Snate@binkert.org  out << "user_misses: " << user_misses << " " << m_perProcUserMisses << endl;
3646145Snate@binkert.org  out << "supervisor_misses: " << supervisor_misses << " " << m_perProcSupervisorMisses << endl;
3656145Snate@binkert.org  out << endl;
3666145Snate@binkert.org  out << "instruction_executed: " << instruction_executed << " " << perProcInstructionCount << endl;
3676145Snate@binkert.org  out << "simics_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl;
3686145Snate@binkert.org  out << "cycles_per_instruction: " << (RubyConfig::numberOfProcessors()*double(ruby_cycles))/double(instruction_executed) << " " << perProcCPI << endl;
3696145Snate@binkert.org  out << "misses_per_thousand_instructions: " << 1000.0 * (double(total_misses) / double(instruction_executed)) << " " << perProcMissesPerInsn << endl;
3706145Snate@binkert.org  out << endl;
3716145Snate@binkert.org  out << "transactions_started: " << transactions_started << " " << m_perProcStartTransaction << endl;
3726145Snate@binkert.org  out << "transactions_ended: " << transactions_ended << " " << m_perProcEndTransaction << endl;
3736145Snate@binkert.org  out << "instructions_per_transaction: " << instructions_per_transaction << " " << perProcInsnPerTrans << endl;
3746145Snate@binkert.org  out << "cycles_per_transaction: " << cycles_per_transaction  << " " << perProcCyclesPerTrans << endl;
3756145Snate@binkert.org  out << "misses_per_transaction: " << misses_per_transaction << " " << perProcMissesPerTrans << endl;
3766145Snate@binkert.org
3776145Snate@binkert.org  out << endl;
3786145Snate@binkert.org
3796145Snate@binkert.org  m_L1D_cache_profiler_ptr->printStats(out);
3806145Snate@binkert.org  m_L1I_cache_profiler_ptr->printStats(out);
3816145Snate@binkert.org  m_L2_cache_profiler_ptr->printStats(out);
3826145Snate@binkert.org
3836145Snate@binkert.org  out << endl;
3846145Snate@binkert.org
3856145Snate@binkert.org  if (m_memReq || m_memRefresh) {    // if there's a memory controller at all
3866145Snate@binkert.org    long long int total_stalls = m_memInputQ + m_memBankQ + m_memWaitCycles;
3876145Snate@binkert.org    double stallsPerReq = total_stalls * 1.0 / m_memReq;
3886145Snate@binkert.org    out << "Memory control:" << endl;
3896145Snate@binkert.org    out << "  memory_total_requests: " << m_memReq << endl;  // does not include refreshes
3906145Snate@binkert.org    out << "  memory_reads: " << m_memRead << endl;
3916145Snate@binkert.org    out << "  memory_writes: " << m_memWrite << endl;
3926145Snate@binkert.org    out << "  memory_refreshes: " << m_memRefresh << endl;
3936145Snate@binkert.org    out << "  memory_total_request_delays: " << total_stalls << endl;
3946145Snate@binkert.org    out << "  memory_delays_per_request: " << stallsPerReq << endl;
3956145Snate@binkert.org    out << "  memory_delays_in_input_queue: " << m_memInputQ << endl;
3966145Snate@binkert.org    out << "  memory_delays_behind_head_of_bank_queue: " << m_memBankQ << endl;
3976145Snate@binkert.org    out << "  memory_delays_stalled_at_head_of_bank_queue: " << m_memWaitCycles << endl;
3986145Snate@binkert.org    // Note:  The following "memory stalls" entries are a breakdown of the
3996145Snate@binkert.org    // cycles which already showed up in m_memWaitCycles.  The order is
4006145Snate@binkert.org    // significant; it is the priority of attributing the cycles.
4016145Snate@binkert.org    // For example, bank_busy is before arbitration because if the bank was
4026145Snate@binkert.org    // busy, we didn't even check arbitration.
4036145Snate@binkert.org    // Note:  "not old enough" means that since we grouped waiting heads-of-queues
4046145Snate@binkert.org    // into batches to avoid starvation, a request in a newer batch
4056145Snate@binkert.org    // didn't try to arbitrate yet because there are older requests waiting.
4066145Snate@binkert.org    out << "  memory_stalls_for_bank_busy: " << m_memBankBusy << endl;
4076145Snate@binkert.org    out << "  memory_stalls_for_random_busy: " << m_memRandBusy << endl;
4086145Snate@binkert.org    out << "  memory_stalls_for_anti_starvation: " << m_memNotOld << endl;
4096145Snate@binkert.org    out << "  memory_stalls_for_arbitration: " << m_memArbWait << endl;
4106145Snate@binkert.org    out << "  memory_stalls_for_bus: " << m_memBusBusy << endl;
4116145Snate@binkert.org    out << "  memory_stalls_for_tfaw: " << m_memTfawBusy << endl;
4126145Snate@binkert.org    out << "  memory_stalls_for_read_write_turnaround: " << m_memReadWriteBusy << endl;
4136145Snate@binkert.org    out << "  memory_stalls_for_read_read_turnaround: " << m_memDataBusBusy << endl;
4146145Snate@binkert.org    out << "  accesses_per_bank: ";
4156145Snate@binkert.org    for (int bank=0; bank < m_memBankCount.size(); bank++) {
4166145Snate@binkert.org      out << m_memBankCount[bank] << "  ";
4176145Snate@binkert.org      //if ((bank % 8) == 7) out << "                     " << endl;
4186145Snate@binkert.org    }
4196145Snate@binkert.org    out << endl;
4206145Snate@binkert.org    out << endl;
4216145Snate@binkert.org  }
4226145Snate@binkert.org
4236145Snate@binkert.org  if (!short_stats) {
4246145Snate@binkert.org    out << "Busy Controller Counts:" << endl;
4256145Snate@binkert.org    for(int i=0; i < MachineType_NUM; i++) {
4266145Snate@binkert.org      for(int j=0; j < MachineType_base_count((MachineType)i); j++) {
4276145Snate@binkert.org        MachineID machID;
4286145Snate@binkert.org        machID.type = (MachineType)i;
4296145Snate@binkert.org        machID.num = j;
4306145Snate@binkert.org        out << machID << ":" << m_busyControllerCount[i][j] << "  ";
4316145Snate@binkert.org        if ((j+1)%8 == 0) {
4326145Snate@binkert.org          out << endl;
4336145Snate@binkert.org        }
4346145Snate@binkert.org      }
4356145Snate@binkert.org      out << endl;
4366145Snate@binkert.org    }
4376145Snate@binkert.org    out << endl;
4386145Snate@binkert.org
4396145Snate@binkert.org    out << "Busy Bank Count:" << m_busyBankCount << endl;
4406145Snate@binkert.org    out << endl;
4416145Snate@binkert.org
4426145Snate@binkert.org    out << "L1TBE_usage: " << m_L1tbeProfile << endl;
4436145Snate@binkert.org    out << "L2TBE_usage: " << m_L2tbeProfile << endl;
4446145Snate@binkert.org    out << "StopTable_usage: " << m_stopTableProfile << endl;
4456145Snate@binkert.org    out << "sequencer_requests_outstanding: " << m_sequencer_requests << endl;
4466145Snate@binkert.org    out << "store_buffer_size: " << m_store_buffer_size << endl;
4476145Snate@binkert.org    out << "unique_blocks_in_store_buffer: " << m_store_buffer_blocks << endl;
4486145Snate@binkert.org    out << endl;
4496145Snate@binkert.org  }
4506145Snate@binkert.org
4516145Snate@binkert.org  if (!short_stats) {
4526145Snate@binkert.org    out << "All Non-Zero Cycle Demand Cache Accesses" << endl;
4536145Snate@binkert.org    out << "----------------------------------------" << endl;
4546145Snate@binkert.org    out << "miss_latency: " << m_allMissLatencyHistogram << endl;
4556145Snate@binkert.org    for(int i=0; i<m_missLatencyHistograms.size(); i++) {
4566145Snate@binkert.org      if (m_missLatencyHistograms[i].size() > 0) {
4576145Snate@binkert.org        out << "miss_latency_" << CacheRequestType(i) << ": " << m_missLatencyHistograms[i] << endl;
4586145Snate@binkert.org      }
4596145Snate@binkert.org    }
4606145Snate@binkert.org    for(int i=0; i<m_machLatencyHistograms.size(); i++) {
4616145Snate@binkert.org      if (m_machLatencyHistograms[i].size() > 0) {
4626145Snate@binkert.org        out << "miss_latency_" << GenericMachineType(i) << ": " << m_machLatencyHistograms[i] << endl;
4636145Snate@binkert.org      }
4646145Snate@binkert.org    }
4656145Snate@binkert.org    out << "miss_latency_L2Miss: " << m_L2MissLatencyHistogram << endl;
4666145Snate@binkert.org
4676145Snate@binkert.org    out << endl;
4686145Snate@binkert.org
4696145Snate@binkert.org    out << "All Non-Zero Cycle SW Prefetch Requests" << endl;
4706145Snate@binkert.org    out << "------------------------------------" << endl;
4716145Snate@binkert.org    out << "prefetch_latency: " << m_allSWPrefetchLatencyHistogram << endl;
4726145Snate@binkert.org    for(int i=0; i<m_SWPrefetchLatencyHistograms.size(); i++) {
4736145Snate@binkert.org      if (m_SWPrefetchLatencyHistograms[i].size() > 0) {
4746145Snate@binkert.org        out << "prefetch_latency_" << CacheRequestType(i) << ": " << m_SWPrefetchLatencyHistograms[i] << endl;
4756145Snate@binkert.org      }
4766145Snate@binkert.org    }
4776145Snate@binkert.org    for(int i=0; i<m_SWPrefetchMachLatencyHistograms.size(); i++) {
4786145Snate@binkert.org      if (m_SWPrefetchMachLatencyHistograms[i].size() > 0) {
4796145Snate@binkert.org        out << "prefetch_latency_" << GenericMachineType(i) << ": " << m_SWPrefetchMachLatencyHistograms[i] << endl;
4806145Snate@binkert.org      }
4816145Snate@binkert.org    }
4826145Snate@binkert.org    out << "prefetch_latency_L2Miss:" << m_SWPrefetchL2MissLatencyHistogram << endl;
4836145Snate@binkert.org
4846145Snate@binkert.org    out << "multicast_retries: " << m_multicast_retry_histogram << endl;
4856145Snate@binkert.org    out << "gets_mask_prediction_count: " << m_gets_mask_prediction << endl;
4866145Snate@binkert.org    out << "getx_mask_prediction_count: " << m_getx_mask_prediction << endl;
4876145Snate@binkert.org    out << "explicit_training_mask: " << m_explicit_training_mask << endl;
4886145Snate@binkert.org    out << endl;
4896145Snate@binkert.org
4906145Snate@binkert.org    if (m_all_sharing_histogram.size() > 0) {
4916145Snate@binkert.org      out << "all_sharing: " << m_all_sharing_histogram << endl;
4926145Snate@binkert.org      out << "read_sharing: " << m_read_sharing_histogram << endl;
4936145Snate@binkert.org      out << "write_sharing: " << m_write_sharing_histogram << endl;
4946145Snate@binkert.org
4956145Snate@binkert.org      out << "all_sharing_percent: "; m_all_sharing_histogram.printPercent(out); out << endl;
4966145Snate@binkert.org      out << "read_sharing_percent: "; m_read_sharing_histogram.printPercent(out); out << endl;
4976145Snate@binkert.org      out << "write_sharing_percent: "; m_write_sharing_histogram.printPercent(out); out << endl;
4986145Snate@binkert.org
4996145Snate@binkert.org      int64 total_miss = m_cache_to_cache +  m_memory_to_cache;
5006145Snate@binkert.org      out << "all_misses: " << total_miss << endl;
5016145Snate@binkert.org      out << "cache_to_cache_misses: " << m_cache_to_cache << endl;
5026145Snate@binkert.org      out << "memory_to_cache_misses: " << m_memory_to_cache << endl;
5036145Snate@binkert.org      out << "cache_to_cache_percent: " << 100.0 * (double(m_cache_to_cache) / double(total_miss)) << endl;
5046145Snate@binkert.org      out << "memory_to_cache_percent: " << 100.0 * (double(m_memory_to_cache) / double(total_miss)) << endl;
5056145Snate@binkert.org      out << endl;
5066145Snate@binkert.org    }
5076145Snate@binkert.org
5086145Snate@binkert.org    if (m_conflicting_histogram.size() > 0) {
5096145Snate@binkert.org      out << "conflicting_histogram: " << m_conflicting_histogram << endl;
5106145Snate@binkert.org      out << "conflicting_histogram_percent: "; m_conflicting_histogram.printPercent(out); out << endl;
5116145Snate@binkert.org      out << endl;
5126145Snate@binkert.org    }
5136145Snate@binkert.org
5146145Snate@binkert.org    if (m_outstanding_requests.size() > 0) {
5156145Snate@binkert.org      out << "outstanding_requests: "; m_outstanding_requests.printPercent(out); out << endl;
5166145Snate@binkert.org      if (m_outstanding_persistent_requests.size() > 0) {
5176145Snate@binkert.org        out << "outstanding_persistent_requests: "; m_outstanding_persistent_requests.printPercent(out); out << endl;
5186145Snate@binkert.org      }
5196145Snate@binkert.org      out << endl;
5206145Snate@binkert.org    }
5216145Snate@binkert.org  }
5226145Snate@binkert.org
5236145Snate@binkert.org  if (XACT_MEMORY){
5246145Snate@binkert.org    // Transactional Memory stats
5256145Snate@binkert.org    out << "Transactional Memory Stats:" << endl;
5266145Snate@binkert.org    out << "------- xact --------" << endl;
5276145Snate@binkert.org    out << "xact_size_dist: " << m_xactSizes << endl;
5286145Snate@binkert.org    out << "xact_instr_count: " << m_xactInstrCount << endl;
5296145Snate@binkert.org    out << "xact_time_dist: " << m_xactCycles << endl;
5306145Snate@binkert.org    out << "xact_log_size_dist: " << m_xactLogs << endl;
5316145Snate@binkert.org    out << "xact_read_set_size_dist: " << m_xactReads << endl;
5326145Snate@binkert.org    out << "xact_write_set_size_dist: " << m_xactWrites << endl;
5336145Snate@binkert.org    out << "xact_overflow_read_lines_dist: " << m_xactOverflowReads << endl;
5346145Snate@binkert.org    out << "xact_overflow_write_lines_dist: " << m_xactOverflowWrites << endl;
5356145Snate@binkert.org    out << "xact_overflow_read_set_size_dist: " << m_xactOverflowTotalReads << endl;
5366145Snate@binkert.org    out << "xact_overflow_write_set_size_dist: " << m_xactOverflowTotalWrites << endl;
5376145Snate@binkert.org    out << "xact_miss_load_dist: " << m_xactLoadMisses << endl;
5386145Snate@binkert.org    out << "xact_miss_store_dist: " << m_xactStoreMisses << endl;
5396145Snate@binkert.org    out << "xact_nacked: " << m_xactNacked << endl;
5406145Snate@binkert.org    out << "xact_retries:        " << m_xactRetries << endl;
5416145Snate@binkert.org    out << "xact_abort_delays: " << m_abortDelays << endl;
5426145Snate@binkert.org    out << "xact_aborts:        " << m_transactionAborts << endl;
5436145Snate@binkert.org    if (ATMTP_ENABLED) {
5446145Snate@binkert.org      out << "xact_log_overflows: " << m_transactionLogOverflows << endl;
5456145Snate@binkert.org      out << "xact_cache_overflows: " << m_transactionCacheOverflows << endl;
5466145Snate@binkert.org      out << "xact_unsup_inst_aborts: "   << m_transactionUnsupInsts << endl;
5476145Snate@binkert.org      out << "xact_save_rest_aborts: "   << m_transactionSaveRestAborts << endl;
5486145Snate@binkert.org    }
5496145Snate@binkert.org    out << "xact_writebacks:    " << m_transWBs << endl;
5506145Snate@binkert.org    out << "xact_extra_wbs:    " << m_extraWBs << endl;
5516145Snate@binkert.org    out << "xact_handler_startup_delay: " << m_abortStarupDelay << endl;
5526145Snate@binkert.org    out << "xact_handler_per_block_delay: " << m_abortPerBlockDelay << endl;
5536145Snate@binkert.org    out << "xact_inferred_aborts: " << m_inferredAborts << endl;
5546145Snate@binkert.org    //out << "xact_histogram: " << m_procsInXact << endl;
5556145Snate@binkert.org
5566145Snate@binkert.org    if (!short_stats) {
5576145Snate@binkert.org      Vector<int> nackedXIDKeys = m_nackXIDMap_ptr->keys();
5586145Snate@binkert.org      nackedXIDKeys.sortVector();
5596145Snate@binkert.org      out << endl;
5606145Snate@binkert.org      int total_nacks = 0;
5616145Snate@binkert.org      out << "------- xact Nacks by XID --------" << endl;
5626145Snate@binkert.org      for(int i=0; i<nackedXIDKeys.size(); i++) {
5636145Snate@binkert.org        int key = nackedXIDKeys[i];
5646145Snate@binkert.org        int count = m_nackXIDMap_ptr->lookup(key);
5656145Snate@binkert.org        total_nacks += count;
5666145Snate@binkert.org        out << "xact " << key << " "
5676145Snate@binkert.org          << setw(6) << dec << count
5686145Snate@binkert.org          << endl;
5696145Snate@binkert.org      }
5706145Snate@binkert.org      out << "Total Nacks: " << total_nacks << endl;
5716145Snate@binkert.org      out << "---------------" << endl;
5726145Snate@binkert.org      out << endl;
5736145Snate@binkert.org
5746145Snate@binkert.org      // Print XID Nack Pairs
5756145Snate@binkert.org      Vector<int> nackedXIDPairKeys = m_nackXIDPairMap_ptr->keys();
5766145Snate@binkert.org      nackedXIDPairKeys.sortVector();
5776145Snate@binkert.org      out << endl;
5786145Snate@binkert.org      total_nacks = 0;
5796145Snate@binkert.org      out << "------- xact Nacks by XID Pairs --------" << endl;
5806145Snate@binkert.org      for(int i=0; i<nackedXIDPairKeys.size(); i++) {
5816145Snate@binkert.org        int key = nackedXIDPairKeys[i];
5826145Snate@binkert.org        Map<int, int> * my_map  = m_nackXIDPairMap_ptr->lookup(key);
5836145Snate@binkert.org        Vector<int> my_keys = my_map->keys();
5846145Snate@binkert.org        my_keys.sortVector();
5856145Snate@binkert.org        for(int j=0; j<my_keys.size(); j++){
5866145Snate@binkert.org        int nid = my_keys[j];
5876145Snate@binkert.org        int count = my_map->lookup(nid);
5886145Snate@binkert.org        total_nacks += count;
5896145Snate@binkert.org        out << "xact " << key << " nacked by xact " <<  nid << " "
5906145Snate@binkert.org            << setw(6) << dec << count
5916145Snate@binkert.org            << endl;
5926145Snate@binkert.org        }
5936145Snate@binkert.org      }
5946145Snate@binkert.org      out << "Total Nacks: " << total_nacks << endl;
5956145Snate@binkert.org      out << "---------------" << endl;
5966145Snate@binkert.org      out << endl;
5976145Snate@binkert.org
5986145Snate@binkert.org
5996145Snate@binkert.org      Vector<Address> nackedPCKeys = m_nackPCMap_ptr->keys();
6006145Snate@binkert.org      nackedPCKeys.sortVector();
6016145Snate@binkert.org      out << endl;
6026145Snate@binkert.org      out << "------- xact Nacks by PC --------" << endl;
6036145Snate@binkert.org      for(int i=0; i<nackedPCKeys.size(); i++) {
6046145Snate@binkert.org        Address key = nackedPCKeys[i];
6056145Snate@binkert.org        int count = m_nackPCMap_ptr->lookup(key);
6066145Snate@binkert.org        out << "xact_Nack " << key << " "
6076145Snate@binkert.org          << setw(4) << dec << count
6086145Snate@binkert.org          << endl;
6096145Snate@binkert.org      }
6106145Snate@binkert.org      out << "---------------" << endl;
6116145Snate@binkert.org      out << endl;
6126145Snate@binkert.org
6136145Snate@binkert.org
6146145Snate@binkert.org      Vector<int> xactExceptionKeys = m_xactExceptionMap_ptr->keys();
6156145Snate@binkert.org      xactExceptionKeys.sortVector();
6166145Snate@binkert.org      out << "------- xact exceptions --------" << endl;
6176145Snate@binkert.org      for(int i=0; i<xactExceptionKeys.size(); i++) {
6186145Snate@binkert.org        int key = xactExceptionKeys[i];
6196145Snate@binkert.org        int count = m_xactExceptionMap_ptr->lookup(key);
6206145Snate@binkert.org        out << "xact_exception("
6216145Snate@binkert.org          << hex << key << "):"
6226145Snate@binkert.org          << setw(4) << dec << count
6236145Snate@binkert.org          << endl;
6246145Snate@binkert.org      }
6256145Snate@binkert.org      out << endl;
6266145Snate@binkert.org      out << "---------------" << endl;
6276145Snate@binkert.org      out << endl;
6286145Snate@binkert.org
6296145Snate@binkert.org      Vector<int> abortIDKeys = m_abortIDMap_ptr->keys();
6306145Snate@binkert.org      abortIDKeys.sortVector();
6316145Snate@binkert.org      out << "------- xact abort by XID --------" << endl;
6326145Snate@binkert.org      for(int i=0; i<abortIDKeys.size(); i++) {
6336145Snate@binkert.org        int count = m_abortIDMap_ptr->lookup(abortIDKeys[i]);
6346145Snate@binkert.org        out << "xact_aborts("
6356145Snate@binkert.org          << dec << abortIDKeys[i] << "):"
6366145Snate@binkert.org          << setw(7) << count
6376145Snate@binkert.org          << endl;
6386145Snate@binkert.org      }
6396145Snate@binkert.org      out << endl;
6406145Snate@binkert.org      out << "---------------" << endl;
6416145Snate@binkert.org      out << endl;
6426145Snate@binkert.org
6436145Snate@binkert.org      Vector<Address> abortedPCKeys = m_abortPCMap_ptr->keys();
6446145Snate@binkert.org      abortedPCKeys.sortVector();
6456145Snate@binkert.org      out << endl;
6466145Snate@binkert.org      out << "------- xact Aborts by PC --------" << endl;
6476145Snate@binkert.org      for(int i=0; i<abortedPCKeys.size(); i++) {
6486145Snate@binkert.org        Address key = abortedPCKeys[i];
6496145Snate@binkert.org        int count = m_abortPCMap_ptr->lookup(key);
6506145Snate@binkert.org        out << "xact_abort_pc " << key
6516145Snate@binkert.org          << setw(4) << dec << count
6526145Snate@binkert.org          << endl;
6536145Snate@binkert.org      }
6546145Snate@binkert.org      out << "---------------" << endl;
6556145Snate@binkert.org      out << endl;
6566145Snate@binkert.org
6576145Snate@binkert.org      Vector<Address> abortedAddrKeys = m_abortAddressMap_ptr->keys();
6586145Snate@binkert.org      abortedAddrKeys.sortVector();
6596145Snate@binkert.org      out << endl;
6606145Snate@binkert.org      out << "------- xact Aborts by Address --------" << endl;
6616145Snate@binkert.org      for(int i=0; i<abortedAddrKeys.size(); i++) {
6626145Snate@binkert.org        Address key = abortedAddrKeys[i];
6636145Snate@binkert.org        int count = m_abortAddressMap_ptr->lookup(key);
6646145Snate@binkert.org        out << "xact_abort_address " << key
6656145Snate@binkert.org          << setw(4) << dec << count
6666145Snate@binkert.org          << endl;
6676145Snate@binkert.org      }
6686145Snate@binkert.org      out << "---------------" << endl;
6696145Snate@binkert.org      out << endl;
6706145Snate@binkert.org    } // !short_stats
6716145Snate@binkert.org
6726145Snate@binkert.org    Vector<int> commitIDKeys = m_commitIDMap_ptr->keys();
6736145Snate@binkert.org    commitIDKeys.sortVector();
6746145Snate@binkert.org    out << "------- xact Commit Stats by XID --------" << endl;
6756145Snate@binkert.org    for(int i=0; i<commitIDKeys.size(); i++) {
6766145Snate@binkert.org      int count = m_commitIDMap_ptr->lookup(commitIDKeys[i]);
6776145Snate@binkert.org      double retry_count = (double)m_xactRetryIDMap_ptr->lookup(commitIDKeys[i]) / count;
6786145Snate@binkert.org      double cycles_count = (double)m_xactCyclesIDMap_ptr->lookup(commitIDKeys[i]) / count;
6796145Snate@binkert.org      double readset_count = (double)m_xactReadSetIDMap_ptr->lookup(commitIDKeys[i]) / count;
6806145Snate@binkert.org      double writeset_count = (double)m_xactWriteSetIDMap_ptr->lookup(commitIDKeys[i]) / count;
6816145Snate@binkert.org      double loadmiss_count = (double)m_xactLoadMissIDMap_ptr->lookup(commitIDKeys[i]) / count;
6826145Snate@binkert.org      double storemiss_count = (double)m_xactStoreMissIDMap_ptr->lookup(commitIDKeys[i]) / count;
6836145Snate@binkert.org      double instr_count = (double)m_xactInstrCountIDMap_ptr->lookup(commitIDKeys[i]) / count;
6846145Snate@binkert.org      out << "xact_stats id: "
6856145Snate@binkert.org          << dec << commitIDKeys[i]
6866145Snate@binkert.org          << " count: " << setw(7) << count
6876145Snate@binkert.org          << " Cycles: " << setw(7) << cycles_count
6886145Snate@binkert.org          << " Instr: " << setw(7) << instr_count
6896145Snate@binkert.org          << " ReadSet: " << setw(7) << readset_count
6906145Snate@binkert.org          << " WriteSet: " << setw(7) << writeset_count
6916145Snate@binkert.org          << " LoadMiss: " << setw(7) << loadmiss_count
6926145Snate@binkert.org          << " StoreMiss: " << setw(7) << storemiss_count
6936145Snate@binkert.org          << " Retry Count: " << setw(7) << retry_count
6946145Snate@binkert.org          << endl;
6956145Snate@binkert.org    }
6966145Snate@binkert.org    out << endl;
6976145Snate@binkert.org    out << "---------------" << endl;
6986145Snate@binkert.org    out << endl;
6996145Snate@binkert.org
7006145Snate@binkert.org    if (!short_stats) {
7016145Snate@binkert.org      Vector<int> procsInXactKeys = m_procsInXactMap_ptr->keys();
7026145Snate@binkert.org      procsInXactKeys.sortVector();
7036145Snate@binkert.org      out << "------- xact histogram --------" << endl;
7046145Snate@binkert.org      for(int i=0; i<procsInXactKeys.size(); i++) {
7056145Snate@binkert.org        int count = m_procsInXactMap_ptr->lookup(procsInXactKeys[i]);
7066145Snate@binkert.org        int key = procsInXactKeys[i];
7076145Snate@binkert.org        out << "xact_histogram("
7086145Snate@binkert.org            << dec << key << "):"
7096145Snate@binkert.org            << setw(8) << count
7106145Snate@binkert.org            << endl;
7116145Snate@binkert.org      }
7126145Snate@binkert.org      out << endl;
7136145Snate@binkert.org      out << "---------------" << endl;
7146145Snate@binkert.org      out << endl;
7156145Snate@binkert.org
7166145Snate@binkert.org      // Read/Write set Bloom filter stats
7176145Snate@binkert.org      //int false_reads = 0;
7186145Snate@binkert.org      long long int false_reads = m_readSetNoMatch;
7196145Snate@binkert.org      Vector<Address> fp_read_keys = m_readSetNoMatch_ptr->keys();
7206145Snate@binkert.org      out << "------- xact read set false positives -------" << endl;
7216145Snate@binkert.org      for(int i=0; i < fp_read_keys.size(); ++i){
7226145Snate@binkert.org        int count = m_readSetNoMatch_ptr->lookup(fp_read_keys[i]);
7236145Snate@binkert.org        //out << "read_false_positive( " << fp_read_keys[i] << " ): "
7246145Snate@binkert.org        //   << setw(8) << dec << count << endl;
7256145Snate@binkert.org        false_reads += count;
7266145Snate@binkert.org      }
7276145Snate@binkert.org      out << "Total read set false positives : " << setw(8) << false_reads << endl;
7286145Snate@binkert.org      out << "-----------------------" << endl;
7296145Snate@binkert.org      out << endl;
7306145Snate@binkert.org
7316145Snate@binkert.org      //int matching_reads = 0;
7326145Snate@binkert.org      long long int matching_reads = m_readSetMatch;
7336145Snate@binkert.org      long long int empty_checks = m_readSetEmptyChecks;
7346145Snate@binkert.org      Vector<Address> read_keys = m_readSetMatch_ptr->keys();
7356145Snate@binkert.org      out << "------- xact read set matches -------" << endl;
7366145Snate@binkert.org      for(int i=0; i < read_keys.size(); ++i){
7376145Snate@binkert.org        int count = m_readSetMatch_ptr->lookup(read_keys[i]);
7386145Snate@binkert.org        //out << "read_match( " << read_keys[i] << " ): "
7396145Snate@binkert.org        //    << setw(8) << dec << count << endl;
7406145Snate@binkert.org        matching_reads += count;
7416145Snate@binkert.org      }
7426145Snate@binkert.org      out << "Total read set matches : " << setw(8) << matching_reads << endl;
7436145Snate@binkert.org      out << "Total read set empty checks : " << setw(8) << empty_checks << endl;
7446145Snate@binkert.org      double false_positive_pct = 0.0;
7456145Snate@binkert.org      if((false_reads + matching_reads)> 0){
7466145Snate@binkert.org        false_positive_pct = (1.0*false_reads)/(false_reads+matching_reads)*100.0;
7476145Snate@binkert.org      }
7486145Snate@binkert.org      out << "Read set false positives rate : " << false_positive_pct << "%" << endl;
7496145Snate@binkert.org      out << "-----------------------" << endl;
7506145Snate@binkert.org      out << endl;
7516145Snate@binkert.org
7526145Snate@binkert.org      // for write set
7536145Snate@binkert.org      //int false_writes = 0;
7546145Snate@binkert.org      long long int false_writes = m_writeSetNoMatch;
7556145Snate@binkert.org      Vector<Address> fp_write_keys = m_writeSetNoMatch_ptr->keys();
7566145Snate@binkert.org      out << "------- xact write set false positives -------" << endl;
7576145Snate@binkert.org      for(int i=0; i < fp_write_keys.size(); ++i){
7586145Snate@binkert.org        int count = m_writeSetNoMatch_ptr->lookup(fp_write_keys[i]);
7596145Snate@binkert.org        //out << "write_false_positive( " << fp_write_keys[i] << " ): "
7606145Snate@binkert.org        //   << setw(8) << dec << count << endl;
7616145Snate@binkert.org        false_writes += count;
7626145Snate@binkert.org      }
7636145Snate@binkert.org      out << "Total write set false positives : " << setw(8) << false_writes << endl;
7646145Snate@binkert.org      out << "-----------------------" << endl;
7656145Snate@binkert.org      out << endl;
7666145Snate@binkert.org
7676145Snate@binkert.org      //int matching_writes = 0;
7686145Snate@binkert.org      long long int matching_writes = m_writeSetMatch;
7696145Snate@binkert.org      empty_checks = m_writeSetEmptyChecks;
7706145Snate@binkert.org      Vector<Address> write_keys = m_writeSetMatch_ptr->keys();
7716145Snate@binkert.org      out << "------- xact write set matches -------" << endl;
7726145Snate@binkert.org      for(int i=0; i < write_keys.size(); ++i){
7736145Snate@binkert.org        int count = m_writeSetMatch_ptr->lookup(write_keys[i]);
7746145Snate@binkert.org        //out << "write_match( " << write_keys[i] << " ): "
7756145Snate@binkert.org        //    << setw(8) << dec << count << endl;
7766145Snate@binkert.org        matching_writes += count;
7776145Snate@binkert.org      }
7786145Snate@binkert.org      out << "Total write set matches : " << setw(8) << matching_writes << endl;
7796145Snate@binkert.org      out << "Total write set empty checks : " << setw(8) << empty_checks << endl;
7806145Snate@binkert.org      false_positive_pct = 0.0;
7816145Snate@binkert.org      if((matching_writes+false_writes) > 0){
7826145Snate@binkert.org        false_positive_pct = (1.0*false_writes)/(false_writes+matching_writes)*100.0;
7836145Snate@binkert.org      }
7846145Snate@binkert.org      out << "Write set false positives rate : " << false_positive_pct << "%" << endl;
7856145Snate@binkert.org      out << "-----------------------" << endl;
7866145Snate@binkert.org      out << endl;
7876145Snate@binkert.org
7886145Snate@binkert.org      out << "----- Xact Signature Stats ------" << endl;
7896145Snate@binkert.org      Vector<int> xids = m_xactReadFilterBitsSetOnCommit->keys();
7906145Snate@binkert.org      for(int i=0; i < xids.size(); ++i){
7916145Snate@binkert.org        int xid = xids[i];
7926145Snate@binkert.org        out << "xid " << xid << " Read set bits set on commit: " << (m_xactReadFilterBitsSetOnCommit->lookup(xid)) << endl;
7936145Snate@binkert.org      }
7946145Snate@binkert.org      xids = m_xactWriteFilterBitsSetOnCommit->keys();
7956145Snate@binkert.org      for(int i=0; i < xids.size(); ++i){
7966145Snate@binkert.org        int xid = xids[i];
7976145Snate@binkert.org        out << "xid " << xid << " Write set bits set on commit: " << (m_xactWriteFilterBitsSetOnCommit->lookup(xid)) << endl;
7986145Snate@binkert.org      }
7996145Snate@binkert.org      xids = m_xactReadFilterBitsSetOnAbort->keys();
8006145Snate@binkert.org      for(int i=0; i < xids.size(); ++i){
8016145Snate@binkert.org        int xid = xids[i];
8026145Snate@binkert.org        out << "xid " << xid << " Read set bits set on abort: " << (m_xactReadFilterBitsSetOnAbort->lookup(xid)) << endl;
8036145Snate@binkert.org      }
8046145Snate@binkert.org      xids = m_xactWriteFilterBitsSetOnAbort->keys();
8056145Snate@binkert.org      for(int i=0; i < xids.size(); ++i){
8066145Snate@binkert.org        int xid = xids[i];
8076145Snate@binkert.org        out << "xid " << xid << " Write set bits set on abort: " << (m_xactWriteFilterBitsSetOnAbort->lookup(xid)) << endl;
8086145Snate@binkert.org      }
8096145Snate@binkert.org      out << endl;
8106145Snate@binkert.org
8116145Snate@binkert.org      cout << "------- WATCHPOINTS --------" << endl;
8126145Snate@binkert.org      cout << "False Triggers : " << m_watchpointsFalsePositiveTrigger << endl;
8136145Snate@binkert.org      cout << "True  Triggers : " << m_watchpointsTrueTrigger << endl;
8146145Snate@binkert.org      cout << "Total Triggers : " << m_watchpointsTrueTrigger + m_watchpointsFalsePositiveTrigger << endl;
8156145Snate@binkert.org      cout << "---------------" << endl;
8166145Snate@binkert.org      cout << endl;
8176145Snate@binkert.org    } // !short_stats
8186145Snate@binkert.org    //m_xact_profiler_ptr->printStats(out, short_stats); // gem5:Arka for decomissioning of log_tm
8196145Snate@binkert.org  } // XACT_MEMORY
8206145Snate@binkert.org
8216145Snate@binkert.org  if (!short_stats) {
8226145Snate@binkert.org    out << "Request vs. System State Profile" << endl;
8236145Snate@binkert.org    out << "--------------------------------" << endl;
8246145Snate@binkert.org    out << endl;
8256145Snate@binkert.org
8266145Snate@binkert.org    Vector<string> requestProfileKeys = m_requestProfileMap_ptr->keys();
8276145Snate@binkert.org    requestProfileKeys.sortVector();
8286145Snate@binkert.org
8296145Snate@binkert.org    for(int i=0; i<requestProfileKeys.size(); i++) {
8306145Snate@binkert.org      int temp_int = m_requestProfileMap_ptr->lookup(requestProfileKeys[i]);
8316145Snate@binkert.org      double percent = (100.0*double(temp_int))/double(m_requests);
8326145Snate@binkert.org      while (requestProfileKeys[i] != "") {
8336145Snate@binkert.org        out << setw(10) << string_split(requestProfileKeys[i], ':');
8346145Snate@binkert.org      }
8356145Snate@binkert.org      out << setw(11) << temp_int;
8366145Snate@binkert.org      out << setw(14) << percent << endl;
8376145Snate@binkert.org    }
8386145Snate@binkert.org    out << endl;
8396145Snate@binkert.org
8406145Snate@binkert.org    out << "filter_action: " << m_filter_action_histogram << endl;
8416145Snate@binkert.org
8426145Snate@binkert.org    if (!PROFILE_ALL_INSTRUCTIONS) {
8436145Snate@binkert.org      m_address_profiler_ptr->printStats(out);
8446145Snate@binkert.org    }
8456145Snate@binkert.org
8466145Snate@binkert.org    if (PROFILE_ALL_INSTRUCTIONS) {
8476145Snate@binkert.org      m_inst_profiler_ptr->printStats(out);
8486145Snate@binkert.org    }
8496145Snate@binkert.org
8506145Snate@binkert.org    out << endl;
8516145Snate@binkert.org    out << "Message Delayed Cycles" << endl;
8526145Snate@binkert.org    out << "----------------------" << endl;
8536145Snate@binkert.org    out << "Total_delay_cycles: " <<   m_delayedCyclesHistogram << endl;
8546145Snate@binkert.org    out << "Total_nonPF_delay_cycles: " << m_delayedCyclesNonPFHistogram << endl;
8556145Snate@binkert.org    for (int i = 0; i < m_delayedCyclesVCHistograms.size(); i++) {
8566145Snate@binkert.org      out << "  virtual_network_" << i << "_delay_cycles: " << m_delayedCyclesVCHistograms[i] << endl;
8576145Snate@binkert.org    }
8586145Snate@binkert.org
8596145Snate@binkert.org    printResourceUsage(out);
8606145Snate@binkert.org  }
8616145Snate@binkert.org
8626145Snate@binkert.org}
8636145Snate@binkert.org
8646145Snate@binkert.orgvoid Profiler::printResourceUsage(ostream& out) const
8656145Snate@binkert.org{
8666145Snate@binkert.org  out << endl;
8676145Snate@binkert.org  out << "Resource Usage" << endl;
8686145Snate@binkert.org  out << "--------------" << endl;
8696145Snate@binkert.org
8706145Snate@binkert.org  integer_t pagesize = getpagesize(); // page size in bytes
8716145Snate@binkert.org  out << "page_size: " << pagesize << endl;
8726145Snate@binkert.org
8736145Snate@binkert.org  rusage usage;
8746145Snate@binkert.org  getrusage (RUSAGE_SELF, &usage);
8756145Snate@binkert.org
8766145Snate@binkert.org  out << "user_time: " << usage.ru_utime.tv_sec << endl;
8776145Snate@binkert.org  out << "system_time: " << usage.ru_stime.tv_sec << endl;
8786145Snate@binkert.org  out << "page_reclaims: " << usage.ru_minflt << endl;
8796145Snate@binkert.org  out << "page_faults: " << usage.ru_majflt << endl;
8806145Snate@binkert.org  out << "swaps: " << usage.ru_nswap << endl;
8816145Snate@binkert.org  out << "block_inputs: " << usage.ru_inblock << endl;
8826145Snate@binkert.org  out << "block_outputs: " << usage.ru_oublock << endl;
8836145Snate@binkert.org}
8846145Snate@binkert.org
8856145Snate@binkert.orgvoid Profiler::clearStats()
8866145Snate@binkert.org{
8876145Snate@binkert.org  m_num_BA_unicasts = 0;
8886145Snate@binkert.org  m_num_BA_broadcasts = 0;
8896145Snate@binkert.org
8906145Snate@binkert.org  m_ruby_start = g_eventQueue_ptr->getTime();
8916145Snate@binkert.org
8926145Snate@binkert.org  m_instructions_executed_at_start.setSize(RubyConfig::numberOfProcessors());
8936145Snate@binkert.org  m_cycles_executed_at_start.setSize(RubyConfig::numberOfProcessors());
8946145Snate@binkert.org  for (int i=0; i < RubyConfig::numberOfProcessors(); i++) {
8956145Snate@binkert.org    if (g_system_ptr == NULL) {
8966145Snate@binkert.org      m_instructions_executed_at_start[i] = 0;
8976145Snate@binkert.org      m_cycles_executed_at_start[i] = 0;
8986145Snate@binkert.org    } else {
8996145Snate@binkert.org      m_instructions_executed_at_start[i] = g_system_ptr->getDriver()->getInstructionCount(i);
9006145Snate@binkert.org      m_cycles_executed_at_start[i] = g_system_ptr->getDriver()->getCycleCount(i);
9016145Snate@binkert.org    }
9026145Snate@binkert.org  }
9036145Snate@binkert.org
9046145Snate@binkert.org  m_perProcTotalMisses.setSize(RubyConfig::numberOfProcessors());
9056145Snate@binkert.org  m_perProcUserMisses.setSize(RubyConfig::numberOfProcessors());
9066145Snate@binkert.org  m_perProcSupervisorMisses.setSize(RubyConfig::numberOfProcessors());
9076145Snate@binkert.org  m_perProcStartTransaction.setSize(RubyConfig::numberOfProcessors());
9086145Snate@binkert.org  m_perProcEndTransaction.setSize(RubyConfig::numberOfProcessors());
9096145Snate@binkert.org
9106145Snate@binkert.org  for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
9116145Snate@binkert.org    m_perProcTotalMisses[i] = 0;
9126145Snate@binkert.org    m_perProcUserMisses[i] = 0;
9136145Snate@binkert.org    m_perProcSupervisorMisses[i] = 0;
9146145Snate@binkert.org    m_perProcStartTransaction[i] = 0;
9156145Snate@binkert.org    m_perProcEndTransaction[i] = 0;
9166145Snate@binkert.org  }
9176145Snate@binkert.org
9186145Snate@binkert.org  m_busyControllerCount.setSize(MachineType_NUM); // all machines
9196145Snate@binkert.org  for(int i=0; i < MachineType_NUM; i++) {
9206145Snate@binkert.org    m_busyControllerCount[i].setSize(MachineType_base_count((MachineType)i));
9216145Snate@binkert.org    for(int j=0; j < MachineType_base_count((MachineType)i); j++) {
9226145Snate@binkert.org      m_busyControllerCount[i][j] = 0;
9236145Snate@binkert.org    }
9246145Snate@binkert.org  }
9256145Snate@binkert.org  m_busyBankCount = 0;
9266145Snate@binkert.org
9276145Snate@binkert.org  m_delayedCyclesHistogram.clear();
9286145Snate@binkert.org  m_delayedCyclesNonPFHistogram.clear();
9296145Snate@binkert.org  m_delayedCyclesVCHistograms.setSize(NUMBER_OF_VIRTUAL_NETWORKS);
9306145Snate@binkert.org  for (int i = 0; i < NUMBER_OF_VIRTUAL_NETWORKS; i++) {
9316145Snate@binkert.org    m_delayedCyclesVCHistograms[i].clear();
9326145Snate@binkert.org  }
9336145Snate@binkert.org
9346145Snate@binkert.org  m_gets_mask_prediction.clear();
9356145Snate@binkert.org  m_getx_mask_prediction.clear();
9366145Snate@binkert.org  m_explicit_training_mask.clear();
9376145Snate@binkert.org
9386145Snate@binkert.org  m_missLatencyHistograms.setSize(CacheRequestType_NUM);
9396145Snate@binkert.org  for(int i=0; i<m_missLatencyHistograms.size(); i++) {
9406145Snate@binkert.org    m_missLatencyHistograms[i].clear(200);
9416145Snate@binkert.org  }
9426145Snate@binkert.org  m_machLatencyHistograms.setSize(GenericMachineType_NUM+1);
9436145Snate@binkert.org  for(int i=0; i<m_machLatencyHistograms.size(); i++) {
9446145Snate@binkert.org    m_machLatencyHistograms[i].clear(200);
9456145Snate@binkert.org  }
9466145Snate@binkert.org  m_allMissLatencyHistogram.clear(200);
9476145Snate@binkert.org  m_L2MissLatencyHistogram.clear(200);
9486145Snate@binkert.org
9496145Snate@binkert.org  m_SWPrefetchLatencyHistograms.setSize(CacheRequestType_NUM);
9506145Snate@binkert.org  for(int i=0; i<m_SWPrefetchLatencyHistograms.size(); i++) {
9516145Snate@binkert.org    m_SWPrefetchLatencyHistograms[i].clear(200);
9526145Snate@binkert.org  }
9536145Snate@binkert.org  m_SWPrefetchMachLatencyHistograms.setSize(GenericMachineType_NUM+1);
9546145Snate@binkert.org  for(int i=0; i<m_SWPrefetchMachLatencyHistograms.size(); i++) {
9556145Snate@binkert.org    m_SWPrefetchMachLatencyHistograms[i].clear(200);
9566145Snate@binkert.org  }
9576145Snate@binkert.org  m_allSWPrefetchLatencyHistogram.clear(200);
9586145Snate@binkert.org  m_SWPrefetchL2MissLatencyHistogram.clear(200);
9596145Snate@binkert.org
9606145Snate@binkert.org  m_multicast_retry_histogram.clear();
9616145Snate@binkert.org
9626145Snate@binkert.org  m_L1tbeProfile.clear();
9636145Snate@binkert.org  m_L2tbeProfile.clear();
9646145Snate@binkert.org  m_stopTableProfile.clear();
9656145Snate@binkert.org  m_filter_action_histogram.clear();
9666145Snate@binkert.org
9676145Snate@binkert.org  m_sequencer_requests.clear();
9686145Snate@binkert.org  m_store_buffer_size.clear();
9696145Snate@binkert.org  m_store_buffer_blocks.clear();
9706145Snate@binkert.org  m_read_sharing_histogram.clear();
9716145Snate@binkert.org  m_write_sharing_histogram.clear();
9726145Snate@binkert.org  m_all_sharing_histogram.clear();
9736145Snate@binkert.org  m_cache_to_cache = 0;
9746145Snate@binkert.org  m_memory_to_cache = 0;
9756145Snate@binkert.org
9766145Snate@binkert.org  m_predictions = 0;
9776145Snate@binkert.org  m_predictionOpportunities = 0;
9786145Snate@binkert.org  m_goodPredictions = 0;
9796145Snate@binkert.org
9806145Snate@binkert.org  // clear HashMaps
9816145Snate@binkert.org  m_requestProfileMap_ptr->clear();
9826145Snate@binkert.org
9836145Snate@binkert.org  // count requests profiled
9846145Snate@binkert.org  m_requests = 0;
9856145Snate@binkert.org
9866145Snate@binkert.org  // Conflicting requests
9876145Snate@binkert.org  m_conflicting_map_ptr->clear();
9886145Snate@binkert.org  m_conflicting_histogram.clear();
9896145Snate@binkert.org
9906145Snate@binkert.org  m_outstanding_requests.clear();
9916145Snate@binkert.org  m_outstanding_persistent_requests.clear();
9926145Snate@binkert.org
9936145Snate@binkert.org  m_L1D_cache_profiler_ptr->clearStats();
9946145Snate@binkert.org  m_L1I_cache_profiler_ptr->clearStats();
9956145Snate@binkert.org  m_L2_cache_profiler_ptr->clearStats();
9966145Snate@binkert.org  //m_xact_profiler_ptr->clearStats(); //gem5:Arka for decomissiong of log_tm
9976145Snate@binkert.org
9986145Snate@binkert.org  //---- begin XACT_MEM code
9996145Snate@binkert.org  ASSERT(m_xactExceptionMap_ptr != NULL);
10006145Snate@binkert.org  ASSERT(m_procsInXactMap_ptr != NULL);
10016145Snate@binkert.org  ASSERT(m_abortIDMap_ptr != NULL);
10026145Snate@binkert.org  ASSERT(m_abortPCMap_ptr != NULL);
10036145Snate@binkert.org  ASSERT( m_nackXIDMap_ptr != NULL);
10046145Snate@binkert.org  ASSERT(m_nackPCMap_ptr != NULL);
10056145Snate@binkert.org
10066145Snate@binkert.org  m_abortStarupDelay = -1;
10076145Snate@binkert.org  m_abortPerBlockDelay = -1;
10086145Snate@binkert.org  m_transWBs = 0;
10096145Snate@binkert.org  m_extraWBs = 0;
10106145Snate@binkert.org  m_transactionAborts = 0;
10116145Snate@binkert.org  m_transactionLogOverflows = 0;
10126145Snate@binkert.org  m_transactionCacheOverflows = 0;
10136145Snate@binkert.org  m_transactionUnsupInsts = 0;
10146145Snate@binkert.org  m_transactionSaveRestAborts = 0;
10156145Snate@binkert.org  m_inferredAborts = 0;
10166145Snate@binkert.org  m_xactNacked = 0;
10176145Snate@binkert.org
10186145Snate@binkert.org  m_xactLogs.clear();
10196145Snate@binkert.org  m_xactCycles.clear();
10206145Snate@binkert.org  m_xactReads.clear();
10216145Snate@binkert.org  m_xactWrites.clear();
10226145Snate@binkert.org  m_xactSizes.clear();
10236145Snate@binkert.org  m_abortDelays.clear();
10246145Snate@binkert.org  m_xactRetries.clear();
10256145Snate@binkert.org  m_xactOverflowReads.clear();
10266145Snate@binkert.org  m_xactOverflowWrites.clear();
10276145Snate@binkert.org  m_xactLoadMisses.clear();
10286145Snate@binkert.org  m_xactStoreMisses.clear();
10296145Snate@binkert.org  m_xactOverflowTotalReads.clear();
10306145Snate@binkert.org  m_xactOverflowTotalWrites.clear();
10316145Snate@binkert.org
10326145Snate@binkert.org  m_xactExceptionMap_ptr->clear();
10336145Snate@binkert.org  m_procsInXactMap_ptr->clear();
10346145Snate@binkert.org  m_abortIDMap_ptr->clear();
10356145Snate@binkert.org  m_commitIDMap_ptr->clear();
10366145Snate@binkert.org  m_xactRetryIDMap_ptr->clear();
10376145Snate@binkert.org  m_xactCyclesIDMap_ptr->clear();
10386145Snate@binkert.org  m_xactReadSetIDMap_ptr->clear();
10396145Snate@binkert.org  m_xactWriteSetIDMap_ptr->clear();
10406145Snate@binkert.org  m_xactLoadMissIDMap_ptr->clear();
10416145Snate@binkert.org  m_xactStoreMissIDMap_ptr->clear();
10426145Snate@binkert.org  m_xactInstrCountIDMap_ptr->clear();
10436145Snate@binkert.org  m_abortPCMap_ptr->clear();
10446145Snate@binkert.org  m_abortAddressMap_ptr->clear();
10456145Snate@binkert.org  m_nackXIDMap_ptr->clear();
10466145Snate@binkert.org  m_nackXIDPairMap_ptr->clear();
10476145Snate@binkert.org  m_nackPCMap_ptr->clear();
10486145Snate@binkert.org
10496145Snate@binkert.org  m_xactReadFilterBitsSetOnCommit->clear();
10506145Snate@binkert.org  m_xactReadFilterBitsSetOnAbort->clear();
10516145Snate@binkert.org  m_xactWriteFilterBitsSetOnCommit->clear();
10526145Snate@binkert.org  m_xactWriteFilterBitsSetOnAbort->clear();
10536145Snate@binkert.org
10546145Snate@binkert.org  m_readSetEmptyChecks = 0;
10556145Snate@binkert.org  m_readSetMatch = 0;
10566145Snate@binkert.org  m_readSetNoMatch = 0;
10576145Snate@binkert.org  m_writeSetEmptyChecks = 0;
10586145Snate@binkert.org  m_writeSetMatch = 0;
10596145Snate@binkert.org  m_writeSetNoMatch = 0;
10606145Snate@binkert.org
10616145Snate@binkert.org  m_xact_visualizer_last = 0;
10626145Snate@binkert.org  m_watchpointsFalsePositiveTrigger = 0;
10636145Snate@binkert.org  m_watchpointsTrueTrigger = 0;
10646145Snate@binkert.org  //---- end XACT_MEM code
10656145Snate@binkert.org
10666145Snate@binkert.org  // for MemoryControl:
10676145Snate@binkert.org  m_memReq = 0;
10686145Snate@binkert.org  m_memBankBusy = 0;
10696145Snate@binkert.org  m_memBusBusy = 0;
10706145Snate@binkert.org  m_memTfawBusy = 0;
10716145Snate@binkert.org  m_memReadWriteBusy = 0;
10726145Snate@binkert.org  m_memDataBusBusy = 0;
10736145Snate@binkert.org  m_memRefresh = 0;
10746145Snate@binkert.org  m_memRead = 0;
10756145Snate@binkert.org  m_memWrite = 0;
10766145Snate@binkert.org  m_memWaitCycles = 0;
10776145Snate@binkert.org  m_memInputQ = 0;
10786145Snate@binkert.org  m_memBankQ = 0;
10796145Snate@binkert.org  m_memArbWait = 0;
10806145Snate@binkert.org  m_memRandBusy = 0;
10816145Snate@binkert.org  m_memNotOld = 0;
10826145Snate@binkert.org
10836145Snate@binkert.org  for (int bank=0; bank < m_memBankCount.size(); bank++) {
10846145Snate@binkert.org    m_memBankCount[bank] = 0;
10856145Snate@binkert.org  }
10866145Snate@binkert.org
10876145Snate@binkert.org  // Flush the prefetches through the system - used so that there are no outstanding requests after stats are cleared
10886145Snate@binkert.org  //g_eventQueue_ptr->triggerAllEvents();
10896145Snate@binkert.org
10906145Snate@binkert.org  // update the start time
10916145Snate@binkert.org  m_ruby_start = g_eventQueue_ptr->getTime();
10926145Snate@binkert.org}
10936145Snate@binkert.org
10946145Snate@binkert.orgvoid Profiler::addPrimaryStatSample(const CacheMsg& msg, NodeID id)
10956145Snate@binkert.org{
10966145Snate@binkert.org  if (Protocol::m_TwoLevelCache) {
10976145Snate@binkert.org    if (msg.getType() == CacheRequestType_IFETCH) {
10986145Snate@binkert.org      addL1IStatSample(msg, id);
10996145Snate@binkert.org    } else {
11006145Snate@binkert.org      addL1DStatSample(msg, id);
11016145Snate@binkert.org    }
11026145Snate@binkert.org    // profile the address after an L1 miss (outside of the processor for CMP)
11036145Snate@binkert.org    if (Protocol::m_CMP) {
11046145Snate@binkert.org      addAddressTraceSample(msg, id);
11056145Snate@binkert.org    }
11066145Snate@binkert.org  } else {
11076145Snate@binkert.org    addL2StatSample(CacheRequestType_to_GenericRequestType(msg.getType()),
11086145Snate@binkert.org                    msg.getAccessMode(), msg.getSize(), msg.getPrefetch(), id);
11096145Snate@binkert.org    addAddressTraceSample(msg, id);
11106145Snate@binkert.org  }
11116145Snate@binkert.org}
11126145Snate@binkert.org
11136145Snate@binkert.orgvoid Profiler::profileConflictingRequests(const Address& addr)
11146145Snate@binkert.org{
11156145Snate@binkert.org  assert(addr == line_address(addr));
11166145Snate@binkert.org  Time last_time = m_ruby_start;
11176145Snate@binkert.org  if (m_conflicting_map_ptr->exist(addr)) {
11186145Snate@binkert.org    Time last_time = m_conflicting_map_ptr->lookup(addr);
11196145Snate@binkert.org  }
11206145Snate@binkert.org  Time current_time = g_eventQueue_ptr->getTime();
11216145Snate@binkert.org  assert (current_time - last_time > 0);
11226145Snate@binkert.org  m_conflicting_histogram.add(current_time - last_time);
11236145Snate@binkert.org  m_conflicting_map_ptr->add(addr, current_time);
11246145Snate@binkert.org}
11256145Snate@binkert.org
11266145Snate@binkert.orgvoid Profiler::addSecondaryStatSample(CacheRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id)
11276145Snate@binkert.org{
11286145Snate@binkert.org  addSecondaryStatSample(CacheRequestType_to_GenericRequestType(requestType), type, msgSize, pfBit, id);
11296145Snate@binkert.org}
11306145Snate@binkert.org
11316145Snate@binkert.orgvoid Profiler::addSecondaryStatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id)
11326145Snate@binkert.org{
11336145Snate@binkert.org  addL2StatSample(requestType, type, msgSize, pfBit, id);
11346145Snate@binkert.org}
11356145Snate@binkert.org
11366145Snate@binkert.orgvoid Profiler::addL2StatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id)
11376145Snate@binkert.org{
11386145Snate@binkert.org  m_perProcTotalMisses[id]++;
11396145Snate@binkert.org  if (type == AccessModeType_SupervisorMode) {
11406145Snate@binkert.org    m_perProcSupervisorMisses[id]++;
11416145Snate@binkert.org  } else {
11426145Snate@binkert.org    m_perProcUserMisses[id]++;
11436145Snate@binkert.org  }
11446145Snate@binkert.org  m_L2_cache_profiler_ptr->addStatSample(requestType, type, msgSize, pfBit);
11456145Snate@binkert.org}
11466145Snate@binkert.org
11476145Snate@binkert.orgvoid Profiler::addL1DStatSample(const CacheMsg& msg, NodeID id)
11486145Snate@binkert.org{
11496145Snate@binkert.org  m_L1D_cache_profiler_ptr->addStatSample(CacheRequestType_to_GenericRequestType(msg.getType()),
11506145Snate@binkert.org                                          msg.getAccessMode(), msg.getSize(), msg.getPrefetch());
11516145Snate@binkert.org}
11526145Snate@binkert.org
11536145Snate@binkert.orgvoid Profiler::addL1IStatSample(const CacheMsg& msg, NodeID id)
11546145Snate@binkert.org{
11556145Snate@binkert.org  m_L1I_cache_profiler_ptr->addStatSample(CacheRequestType_to_GenericRequestType(msg.getType()),
11566145Snate@binkert.org                                          msg.getAccessMode(), msg.getSize(), msg.getPrefetch());
11576145Snate@binkert.org}
11586145Snate@binkert.org
11596145Snate@binkert.orgvoid Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id)
11606145Snate@binkert.org{
11616145Snate@binkert.org  if (msg.getType() != CacheRequestType_IFETCH) {
11626145Snate@binkert.org
11636145Snate@binkert.org    // Note: The following line should be commented out if you want to
11646145Snate@binkert.org    // use the special profiling that is part of the GS320 protocol
11656145Snate@binkert.org
11666145Snate@binkert.org    // NOTE: Unless PROFILE_HOT_LINES or PROFILE_ALL_INSTRUCTIONS are enabled, nothing will be profiled by the AddressProfiler
11676145Snate@binkert.org    m_address_profiler_ptr->addTraceSample(msg.getAddress(), msg.getProgramCounter(), msg.getType(), msg.getAccessMode(), id, false);
11686145Snate@binkert.org  }
11696145Snate@binkert.org}
11706145Snate@binkert.org
11716145Snate@binkert.orgvoid Profiler::profileSharing(const Address& addr, AccessType type, NodeID requestor, const Set& sharers, const Set& owner)
11726145Snate@binkert.org{
11736145Snate@binkert.org  Set set_contacted(owner);
11746145Snate@binkert.org  if (type == AccessType_Write) {
11756145Snate@binkert.org    set_contacted.addSet(sharers);
11766145Snate@binkert.org  }
11776145Snate@binkert.org  set_contacted.remove(requestor);
11786145Snate@binkert.org  int number_contacted = set_contacted.count();
11796145Snate@binkert.org
11806145Snate@binkert.org  if (type == AccessType_Write) {
11816145Snate@binkert.org    m_write_sharing_histogram.add(number_contacted);
11826145Snate@binkert.org  } else {
11836145Snate@binkert.org    m_read_sharing_histogram.add(number_contacted);
11846145Snate@binkert.org  }
11856145Snate@binkert.org  m_all_sharing_histogram.add(number_contacted);
11866145Snate@binkert.org
11876145Snate@binkert.org  if (number_contacted == 0) {
11886145Snate@binkert.org    m_memory_to_cache++;
11896145Snate@binkert.org  } else {
11906145Snate@binkert.org    m_cache_to_cache++;
11916145Snate@binkert.org  }
11926145Snate@binkert.org
11936145Snate@binkert.org}
11946145Snate@binkert.org
11956145Snate@binkert.orgvoid Profiler::profileMsgDelay(int virtualNetwork, int delayCycles) {
11966145Snate@binkert.org  assert(virtualNetwork < m_delayedCyclesVCHistograms.size());
11976145Snate@binkert.org  m_delayedCyclesHistogram.add(delayCycles);
11986145Snate@binkert.org  m_delayedCyclesVCHistograms[virtualNetwork].add(delayCycles);
11996145Snate@binkert.org  if (virtualNetwork != 0) {
12006145Snate@binkert.org    m_delayedCyclesNonPFHistogram.add(delayCycles);
12016145Snate@binkert.org  }
12026145Snate@binkert.org}
12036145Snate@binkert.org
12046145Snate@binkert.org// profiles original cache requests including PUTs
12056145Snate@binkert.orgvoid Profiler::profileRequest(const string& requestStr)
12066145Snate@binkert.org{
12076145Snate@binkert.org  m_requests++;
12086145Snate@binkert.org
12096145Snate@binkert.org  if (m_requestProfileMap_ptr->exist(requestStr)) {
12106145Snate@binkert.org    (m_requestProfileMap_ptr->lookup(requestStr))++;
12116145Snate@binkert.org  } else {
12126145Snate@binkert.org    m_requestProfileMap_ptr->add(requestStr, 1);
12136145Snate@binkert.org  }
12146145Snate@binkert.org}
12156145Snate@binkert.org
12166145Snate@binkert.orgvoid Profiler::recordPrediction(bool wasGood, bool wasPredicted)
12176145Snate@binkert.org{
12186145Snate@binkert.org  m_predictionOpportunities++;
12196145Snate@binkert.org  if(wasPredicted){
12206145Snate@binkert.org    m_predictions++;
12216145Snate@binkert.org    if(wasGood){
12226145Snate@binkert.org      m_goodPredictions++;
12236145Snate@binkert.org    }
12246145Snate@binkert.org  }
12256145Snate@binkert.org}
12266145Snate@binkert.org
12276145Snate@binkert.orgvoid Profiler::profileFilterAction(int action)
12286145Snate@binkert.org{
12296145Snate@binkert.org  m_filter_action_histogram.add(action);
12306145Snate@binkert.org}
12316145Snate@binkert.org
12326145Snate@binkert.orgvoid Profiler::profileMulticastRetry(const Address& addr, int count)
12336145Snate@binkert.org{
12346145Snate@binkert.org  m_multicast_retry_histogram.add(count);
12356145Snate@binkert.org}
12366145Snate@binkert.org
12376145Snate@binkert.orgvoid Profiler::startTransaction(int cpu)
12386145Snate@binkert.org{
12396145Snate@binkert.org  m_perProcStartTransaction[cpu]++;
12406145Snate@binkert.org}
12416145Snate@binkert.org
12426145Snate@binkert.orgvoid Profiler::endTransaction(int cpu)
12436145Snate@binkert.org{
12446145Snate@binkert.org  m_perProcEndTransaction[cpu]++;
12456145Snate@binkert.org}
12466145Snate@binkert.org
12476145Snate@binkert.orgvoid Profiler::controllerBusy(MachineID machID)
12486145Snate@binkert.org{
12496145Snate@binkert.org  m_busyControllerCount[(int)machID.type][(int)machID.num]++;
12506145Snate@binkert.org}
12516145Snate@binkert.org
12526145Snate@binkert.orgvoid Profiler::profilePFWait(Time waitTime)
12536145Snate@binkert.org{
12546145Snate@binkert.org  m_prefetchWaitHistogram.add(waitTime);
12556145Snate@binkert.org}
12566145Snate@binkert.org
12576145Snate@binkert.orgvoid Profiler::bankBusy()
12586145Snate@binkert.org{
12596145Snate@binkert.org  m_busyBankCount++;
12606145Snate@binkert.org}
12616145Snate@binkert.org
12626145Snate@binkert.org// non-zero cycle demand request
12636145Snate@binkert.orgvoid Profiler::missLatency(Time t, CacheRequestType type, GenericMachineType respondingMach)
12646145Snate@binkert.org{
12656145Snate@binkert.org  m_allMissLatencyHistogram.add(t);
12666145Snate@binkert.org  m_missLatencyHistograms[type].add(t);
12676145Snate@binkert.org  m_machLatencyHistograms[respondingMach].add(t);
12686145Snate@binkert.org  if(respondingMach == GenericMachineType_Directory || respondingMach == GenericMachineType_NUM) {
12696145Snate@binkert.org    m_L2MissLatencyHistogram.add(t);
12706145Snate@binkert.org  }
12716145Snate@binkert.org}
12726145Snate@binkert.org
12736145Snate@binkert.org// non-zero cycle prefetch request
12746145Snate@binkert.orgvoid Profiler::swPrefetchLatency(Time t, CacheRequestType type, GenericMachineType respondingMach)
12756145Snate@binkert.org{
12766145Snate@binkert.org  m_allSWPrefetchLatencyHistogram.add(t);
12776145Snate@binkert.org  m_SWPrefetchLatencyHistograms[type].add(t);
12786145Snate@binkert.org  m_SWPrefetchMachLatencyHistograms[respondingMach].add(t);
12796145Snate@binkert.org  if(respondingMach == GenericMachineType_Directory || respondingMach == GenericMachineType_NUM) {
12806145Snate@binkert.org    m_SWPrefetchL2MissLatencyHistogram.add(t);
12816145Snate@binkert.org  }
12826145Snate@binkert.org}
12836145Snate@binkert.org
12846145Snate@binkert.orgvoid Profiler::profileTransition(const string& component, NodeID id, NodeID version, Address addr,
12856145Snate@binkert.org                                 const string& state, const string& event,
12866145Snate@binkert.org                                 const string& next_state, const string& note)
12876145Snate@binkert.org{
12886145Snate@binkert.org  const int EVENT_SPACES = 20;
12896145Snate@binkert.org  const int ID_SPACES = 3;
12906145Snate@binkert.org  const int TIME_SPACES = 7;
12916145Snate@binkert.org  const int COMP_SPACES = 10;
12926145Snate@binkert.org  const int STATE_SPACES = 6;
12936145Snate@binkert.org
12946145Snate@binkert.org  if ((g_debug_ptr->getDebugTime() > 0) &&
12956145Snate@binkert.org      (g_eventQueue_ptr->getTime() >= g_debug_ptr->getDebugTime())) {
12966145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
12976145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
12986145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << id << " ";
12996145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << version << " ";
13006145Snate@binkert.org    (* debug_cout_ptr) << setw(COMP_SPACES) << component;
13016145Snate@binkert.org    (* debug_cout_ptr) << setw(EVENT_SPACES) << event << " ";
13026145Snate@binkert.org    for (int i=0; i < RubyConfig::numberOfProcessors(); i++) {
13036145Snate@binkert.org
13046145Snate@binkert.org      if (i == id) {
13056145Snate@binkert.org        (* debug_cout_ptr).flags(ios::right);
13066145Snate@binkert.org        (* debug_cout_ptr) << setw(STATE_SPACES) << state;
13076145Snate@binkert.org        (* debug_cout_ptr) << ">";
13086145Snate@binkert.org        (* debug_cout_ptr).flags(ios::left);
13096145Snate@binkert.org        (* debug_cout_ptr) << setw(STATE_SPACES) << next_state;
13106145Snate@binkert.org      } else {
13116145Snate@binkert.org        // cout << setw(STATE_SPACES) << " " << " " << setw(STATE_SPACES) << " ";
13126145Snate@binkert.org      }
13136145Snate@binkert.org    }
13146145Snate@binkert.org    (* debug_cout_ptr) << " " << addr << " " << note;
13156145Snate@binkert.org
13166145Snate@binkert.org    (* debug_cout_ptr) << endl;
13176145Snate@binkert.org  }
13186145Snate@binkert.org}
13196145Snate@binkert.org
13206145Snate@binkert.org// Helper function
13216145Snate@binkert.orgstatic double process_memory_total()
13226145Snate@binkert.org{
13236145Snate@binkert.org  const double MULTIPLIER = 4096.0/(1024.0*1024.0); // 4kB page size, 1024*1024 bytes per MB,
13246145Snate@binkert.org  ifstream proc_file;
13256145Snate@binkert.org  proc_file.open("/proc/self/statm");
13266145Snate@binkert.org  int total_size_in_pages = 0;
13276145Snate@binkert.org  int res_size_in_pages = 0;
13286145Snate@binkert.org  proc_file >> total_size_in_pages;
13296145Snate@binkert.org  proc_file >> res_size_in_pages;
13306145Snate@binkert.org  return double(total_size_in_pages)*MULTIPLIER; // size in megabytes
13316145Snate@binkert.org}
13326145Snate@binkert.org
13336145Snate@binkert.orgstatic double process_memory_resident()
13346145Snate@binkert.org{
13356145Snate@binkert.org  const double MULTIPLIER = 4096.0/(1024.0*1024.0); // 4kB page size, 1024*1024 bytes per MB,
13366145Snate@binkert.org  ifstream proc_file;
13376145Snate@binkert.org  proc_file.open("/proc/self/statm");
13386145Snate@binkert.org  int total_size_in_pages = 0;
13396145Snate@binkert.org  int res_size_in_pages = 0;
13406145Snate@binkert.org  proc_file >> total_size_in_pages;
13416145Snate@binkert.org  proc_file >> res_size_in_pages;
13426145Snate@binkert.org  return double(res_size_in_pages)*MULTIPLIER; // size in megabytes
13436145Snate@binkert.org}
13446145Snate@binkert.org
13456145Snate@binkert.orgvoid Profiler::profileGetXMaskPrediction(const Set& pred_set)
13466145Snate@binkert.org{
13476145Snate@binkert.org  m_getx_mask_prediction.add(pred_set.count());
13486145Snate@binkert.org}
13496145Snate@binkert.org
13506145Snate@binkert.orgvoid Profiler::profileGetSMaskPrediction(const Set& pred_set)
13516145Snate@binkert.org{
13526145Snate@binkert.org  m_gets_mask_prediction.add(pred_set.count());
13536145Snate@binkert.org}
13546145Snate@binkert.org
13556145Snate@binkert.orgvoid Profiler::profileTrainingMask(const Set& pred_set)
13566145Snate@binkert.org{
13576145Snate@binkert.org  m_explicit_training_mask.add(pred_set.count());
13586145Snate@binkert.org}
13596145Snate@binkert.org
13606145Snate@binkert.orgint64 Profiler::getTotalInstructionsExecuted() const
13616145Snate@binkert.org{
13626145Snate@binkert.org  int64 sum = 1;     // Starting at 1 allows us to avoid division by zero
13636145Snate@binkert.org  for(int i=0; i < RubyConfig::numberOfProcessors(); i++) {
13646145Snate@binkert.org    sum += (g_system_ptr->getDriver()->getInstructionCount(i) - m_instructions_executed_at_start[i]);
13656145Snate@binkert.org  }
13666145Snate@binkert.org  return sum;
13676145Snate@binkert.org}
13686145Snate@binkert.org
13696145Snate@binkert.orgint64 Profiler::getTotalTransactionsExecuted() const
13706145Snate@binkert.org{
13716145Snate@binkert.org  int64 sum = m_perProcEndTransaction.sum();
13726145Snate@binkert.org  if (sum > 0) {
13736145Snate@binkert.org    return sum;
13746145Snate@binkert.org  } else {
13756145Snate@binkert.org    return 1;  // Avoid division by zero errors
13766145Snate@binkert.org  }
13776145Snate@binkert.org}
13786145Snate@binkert.org
13796145Snate@binkert.org
13806145Snate@binkert.org// The following case statement converts CacheRequestTypes to GenericRequestTypes
13816145Snate@binkert.org// allowing all profiling to be done with a single enum type instead of slow strings
13826145Snate@binkert.orgGenericRequestType Profiler::CacheRequestType_to_GenericRequestType(const CacheRequestType& type) {
13836145Snate@binkert.org  switch (type) {
13846145Snate@binkert.org  case CacheRequestType_LD:
13856145Snate@binkert.org    return GenericRequestType_LD;
13866145Snate@binkert.org    break;
13876145Snate@binkert.org  case CacheRequestType_ST:
13886145Snate@binkert.org    return GenericRequestType_ST;
13896145Snate@binkert.org    break;
13906145Snate@binkert.org  case CacheRequestType_ATOMIC:
13916145Snate@binkert.org    return GenericRequestType_ATOMIC;
13926145Snate@binkert.org    break;
13936145Snate@binkert.org  case CacheRequestType_IFETCH:
13946145Snate@binkert.org    return GenericRequestType_IFETCH;
13956145Snate@binkert.org    break;
13966145Snate@binkert.org  case CacheRequestType_LD_XACT:
13976145Snate@binkert.org    return GenericRequestType_LD_XACT;
13986145Snate@binkert.org    break;
13996145Snate@binkert.org  case CacheRequestType_LDX_XACT:
14006145Snate@binkert.org    return GenericRequestType_LDX_XACT;
14016145Snate@binkert.org    break;
14026145Snate@binkert.org  case CacheRequestType_ST_XACT:
14036145Snate@binkert.org    return GenericRequestType_ST_XACT;
14046145Snate@binkert.org    break;
14056145Snate@binkert.org  case CacheRequestType_NULL:
14066145Snate@binkert.org    return GenericRequestType_NULL;
14076145Snate@binkert.org    break;
14086145Snate@binkert.org  default:
14096145Snate@binkert.org    ERROR_MSG("Unexpected cache request type");
14106145Snate@binkert.org  }
14116145Snate@binkert.org}
14126145Snate@binkert.org
14136145Snate@binkert.org//---- begin Transactional Memory CODE
14146145Snate@binkert.orgvoid Profiler::profileTransaction(int size, int logSize, int readS, int writeS, int overflow_readS, int overflow_writeS, int retries, int useful_cycles, bool nacked, int loadMisses, int storeMisses, int instrCount, int xid){
14156145Snate@binkert.org  m_xactLogs.add(logSize);
14166145Snate@binkert.org  m_xactSizes.add(size);
14176145Snate@binkert.org  m_xactReads.add(readS);
14186145Snate@binkert.org  m_xactWrites.add(writeS);
14196145Snate@binkert.org  m_xactRetries.add(retries);
14206145Snate@binkert.org  m_xactCycles.add(useful_cycles);
14216145Snate@binkert.org  m_xactLoadMisses.add(loadMisses);
14226145Snate@binkert.org  m_xactStoreMisses.add(storeMisses);
14236145Snate@binkert.org  m_xactInstrCount.add(instrCount);
14246145Snate@binkert.org
14256145Snate@binkert.org  // was this transaction nacked?
14266145Snate@binkert.org  if(nacked){
14276145Snate@binkert.org    m_xactNacked++;
14286145Snate@binkert.org  }
14296145Snate@binkert.org
14306145Snate@binkert.org  // for overflowed transactions
14316145Snate@binkert.org  if(overflow_readS > 0 || overflow_writeS > 0){
14326145Snate@binkert.org    m_xactOverflowReads.add(overflow_readS);
14336145Snate@binkert.org    m_xactOverflowWrites.add(overflow_writeS);
14346145Snate@binkert.org    m_xactOverflowTotalReads.add(readS);
14356145Snate@binkert.org    m_xactOverflowTotalWrites.add(writeS);
14366145Snate@binkert.org  }
14376145Snate@binkert.org
14386145Snate@binkert.org  // Record commits by xid
14396145Snate@binkert.org  if(!m_commitIDMap_ptr->exist(xid)){
14406145Snate@binkert.org    m_commitIDMap_ptr->add(xid, 1);
14416145Snate@binkert.org    m_xactRetryIDMap_ptr->add(xid, retries);
14426145Snate@binkert.org    m_xactCyclesIDMap_ptr->add(xid, useful_cycles);
14436145Snate@binkert.org    m_xactReadSetIDMap_ptr->add(xid, readS);
14446145Snate@binkert.org    m_xactWriteSetIDMap_ptr->add(xid, writeS);
14456145Snate@binkert.org    m_xactLoadMissIDMap_ptr->add(xid, loadMisses);
14466145Snate@binkert.org    m_xactStoreMissIDMap_ptr->add(xid, storeMisses);
14476145Snate@binkert.org    m_xactInstrCountIDMap_ptr->add(xid, instrCount);
14486145Snate@binkert.org  } else {
14496145Snate@binkert.org    (m_commitIDMap_ptr->lookup(xid))++;
14506145Snate@binkert.org    (m_xactRetryIDMap_ptr->lookup(xid)) += retries;
14516145Snate@binkert.org    (m_xactCyclesIDMap_ptr->lookup(xid)) += useful_cycles;
14526145Snate@binkert.org    (m_xactReadSetIDMap_ptr->lookup(xid)) += readS;
14536145Snate@binkert.org    (m_xactWriteSetIDMap_ptr->lookup(xid)) += writeS;
14546145Snate@binkert.org    (m_xactLoadMissIDMap_ptr->lookup(xid)) += loadMisses;
14556145Snate@binkert.org    (m_xactStoreMissIDMap_ptr->lookup(xid)) += storeMisses;
14566145Snate@binkert.org    (m_xactInstrCountIDMap_ptr->lookup(xid)) += instrCount;
14576145Snate@binkert.org  }
14586145Snate@binkert.org}
14596145Snate@binkert.org
14606145Snate@binkert.orgvoid Profiler::profileBeginTransaction(NodeID id, int tid, int xid, int thread, Address pc, bool isOpen){
14616145Snate@binkert.org  //- if(PROFILE_XACT){
14626145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 2)){
14636145Snate@binkert.org    const char* openStr = isOpen ? " OPEN" : " CLOSED";
14646145Snate@binkert.org    const int ID_SPACES = 3;
14656145Snate@binkert.org    const int TIME_SPACES = 7;
14666145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
14676145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
14686145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
14696145Snate@binkert.org    // The actual processor number
14706145Snate@binkert.org    int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
14716145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
14726145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
14736145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" <<  " TID " << tid
14746145Snate@binkert.org                       << " XACT BEGIN " << xid
14756145Snate@binkert.org                       << "  PC 0x" << hex << pc.getAddress()
14766145Snate@binkert.org                       << dec
14776145Snate@binkert.org                       << "  *PC 0x" << hex << myInst << dec
14786145Snate@binkert.org                       << " '" << myInstStr << "'"
14796145Snate@binkert.org                       << openStr
14806145Snate@binkert.org                       << endl;
14816145Snate@binkert.org  }
14826145Snate@binkert.org}
14836145Snate@binkert.org
14846145Snate@binkert.orgvoid Profiler::profileCommitTransaction(NodeID id, int tid, int xid, int thread, Address pc, bool isOpen){
14856145Snate@binkert.org  //- if(PROFILE_XACT){
14866145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 2)){
14876145Snate@binkert.org    const char* openStr = isOpen ? " OPEN" : " CLOSED";
14886145Snate@binkert.org    const int ID_SPACES = 3;
14896145Snate@binkert.org    const int TIME_SPACES = 7;
14906145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
14916145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
14926145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
14936145Snate@binkert.org    // The actual processor number
14946145Snate@binkert.org    int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
14956145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
14966145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
14976145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
14986145Snate@binkert.org                       << " XACT COMMIT " << xid
14996145Snate@binkert.org                       << "  PC 0x" << hex << pc.getAddress()
15006145Snate@binkert.org                       << dec
15016145Snate@binkert.org                       << "  *PC 0x" << hex << myInst << dec
15026145Snate@binkert.org                       << " '" << myInstStr << "'"
15036145Snate@binkert.org                       << openStr
15046145Snate@binkert.org                       << endl;
15056145Snate@binkert.org  }
15066145Snate@binkert.org
15076145Snate@binkert.org}
15086145Snate@binkert.org
15096145Snate@binkert.org// for profiling overflows
15106145Snate@binkert.orgvoid Profiler::profileLoadOverflow(NodeID id, int tid, int xid, int thread, Address addr, bool l1_overflow){
15116145Snate@binkert.org  //- if(PROFILE_XACT){
15126145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
15136145Snate@binkert.org    const int ID_SPACES = 3;
15146145Snate@binkert.org    const int TIME_SPACES = 7;
15156145Snate@binkert.org    string overflow_str = " XACT LOAD L1 OVERFLOW ";
15166145Snate@binkert.org    if(!l1_overflow){
15176145Snate@binkert.org      overflow_str = " XACT LOAD L2 OVERFLOW ";
15186145Snate@binkert.org    }
15196145Snate@binkert.org    // The actual processor number
15206145Snate@binkert.org    int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
15216145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
15226145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
15236145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES)  << proc_no << " [" << id << "," << thread << "]" <<  " TID " << tid
15246145Snate@binkert.org                       << overflow_str << xid
15256145Snate@binkert.org                       << "  ADDR " << addr
15266145Snate@binkert.org                       << endl;
15276145Snate@binkert.org  }
15286145Snate@binkert.org}
15296145Snate@binkert.org
15306145Snate@binkert.org// for profiling overflows
15316145Snate@binkert.orgvoid Profiler::profileStoreOverflow(NodeID id, int tid, int xid, int thread, Address addr, bool l1_overflow){
15326145Snate@binkert.org  //- if(PROFILE_XACT){
15336145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
15346145Snate@binkert.org    const int ID_SPACES = 3;
15356145Snate@binkert.org    const int TIME_SPACES = 7;
15366145Snate@binkert.org    string overflow_str = " XACT STORE L1 OVERFLOW ";
15376145Snate@binkert.org    if(!l1_overflow){
15386145Snate@binkert.org      overflow_str = " XACT STORE L2 OVERFLOW ";
15396145Snate@binkert.org    }
15406145Snate@binkert.org    // The actual processor number
15416145Snate@binkert.org    int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
15426145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
15436145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
15446145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES)  << proc_no << " [" << id << "," << thread << "]" <<  " TID " << tid
15456145Snate@binkert.org                       << overflow_str << xid
15466145Snate@binkert.org                       << "  ADDR " << addr
15476145Snate@binkert.org                       << endl;
15486145Snate@binkert.org  }
15496145Snate@binkert.org}
15506145Snate@binkert.org
15516145Snate@binkert.orgvoid Profiler::profileLoadTransaction(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
15526145Snate@binkert.org  //- if(PROFILE_XACT){
15536145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 3)){
15546145Snate@binkert.org    const int ID_SPACES = 3;
15556145Snate@binkert.org    const int TIME_SPACES = 7;
15566145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
15576145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
15586145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
15596145Snate@binkert.org    // The actual processor number
15606145Snate@binkert.org    int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
15616145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
15626145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
15636145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES)  << proc_no << " [" << id << "," << thread << "]" <<  " TID " << tid
15646145Snate@binkert.org                       << " XACT LOAD " << xid
15656145Snate@binkert.org                       << " " << addr
15666145Snate@binkert.org                       << " VA " << logicalAddress
15676145Snate@binkert.org                       << " PC " << pc
15686145Snate@binkert.org                       << "  *PC 0x" << hex << myInst << dec
15696145Snate@binkert.org                       << " '" << myInstStr << "'"
15706145Snate@binkert.org      //<< " VAL 0x" << hex << SIMICS_read_physical_memory(proc_no, SIMICS_translate_data_address(proc_no, logicalAddress), 4) << dec
15716145Snate@binkert.org                       << " VAL 0x" << hex << g_system_ptr->getDriver()->readPhysicalMemory(proc_no, addr.getAddress(), 4) << dec
15726145Snate@binkert.org                       << endl;
15736145Snate@binkert.org  }
15746145Snate@binkert.org}
15756145Snate@binkert.org
15766145Snate@binkert.orgvoid Profiler::profileLoad(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
15776145Snate@binkert.org  if(PROFILE_NONXACT){
15786145Snate@binkert.org    const int ID_SPACES = 3;
15796145Snate@binkert.org    const int TIME_SPACES = 7;
15806145Snate@binkert.org    // The actual processor number
15816145Snate@binkert.org    int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
15826145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
15836145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
15846145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" <<  " TID " << tid
15856145Snate@binkert.org                       << " LOAD " << xid
15866145Snate@binkert.org                       << " " << addr
15876145Snate@binkert.org                       << " VA " << logicalAddress
15886145Snate@binkert.org                       << " PC " << pc
15896145Snate@binkert.org      //<< " VAL 0x" << hex << SIMICS_read_physical_memory(proc_no, SIMICS_translate_data_address(proc_no, logicalAddress), 4) << dec
15906145Snate@binkert.org                       << " VAL 0x" << hex << g_system_ptr->getDriver()->readPhysicalMemory(proc_no, addr.getAddress(), 4) << dec
15916145Snate@binkert.org                       << endl;
15926145Snate@binkert.org  }
15936145Snate@binkert.org}
15946145Snate@binkert.org
15956145Snate@binkert.orgvoid Profiler::profileStoreTransaction(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
15966145Snate@binkert.org  //- if(PROFILE_XACT){
15976145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 3)){
15986145Snate@binkert.org    const int ID_SPACES = 3;
15996145Snate@binkert.org    const int TIME_SPACES = 7;
16006145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
16016145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
16026145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
16036145Snate@binkert.org    // The actual processor number
16046145Snate@binkert.org    int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
16056145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
16066145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
16076145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" <<  " TID " << tid
16086145Snate@binkert.org                       << " XACT STORE " << xid
16096145Snate@binkert.org                       << " " << addr
16106145Snate@binkert.org                       << " VA " << logicalAddress
16116145Snate@binkert.org                       << " PC " << pc
16126145Snate@binkert.org                       << "  *PC 0x" << hex << myInst << dec
16136145Snate@binkert.org                       << " '" << myInstStr << "'"
16146145Snate@binkert.org                       << endl;
16156145Snate@binkert.org  }
16166145Snate@binkert.org}
16176145Snate@binkert.org
16186145Snate@binkert.orgvoid Profiler::profileStore(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
16196145Snate@binkert.org  if(PROFILE_NONXACT){
16206145Snate@binkert.org    const int ID_SPACES = 3;
16216145Snate@binkert.org    const int TIME_SPACES = 7;
16226145Snate@binkert.org    // The actual processor number
16236145Snate@binkert.org    int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
16246145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
16256145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
16266145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" <<  " TID " << tid
16276145Snate@binkert.org                       << " STORE " << xid
16286145Snate@binkert.org                       << " " << addr
16296145Snate@binkert.org                       << " VA " << logicalAddress
16306145Snate@binkert.org                       << " PC " << pc
16316145Snate@binkert.org                       << endl;
16326145Snate@binkert.org  }
16336145Snate@binkert.org}
16346145Snate@binkert.org
16356145Snate@binkert.orgvoid Profiler::profileNack(NodeID id, int tid, int xid, int thread, int nacking_thread, NodeID nackedBy, Address addr, Address logicalAddress, Address pc, uint64 seq_ts, uint64 nack_ts, bool possibleCycle){
16366145Snate@binkert.org  int nid = 0; // g_system_ptr->getChip(nackedBy/RubyConfig::numberOfProcsPerChip())->getTransactionInterfaceManager(nackedBy%RubyConfig::numberOfProcsPerChip())->getXID(nacking_thread);
16376145Snate@binkert.org  assert(0);
16386145Snate@binkert.org  //- if(PROFILE_XACT){
16396145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
16406145Snate@binkert.org    const int ID_SPACES = 3;
16416145Snate@binkert.org    const int TIME_SPACES = 7;
16426145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
16436145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
16446145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
16456145Snate@binkert.org    // The actual processor number
16466145Snate@binkert.org    int proc_no = id*g_NUM_SMT_THREADS + thread;
16476145Snate@binkert.org    int nack_proc_no = nackedBy*g_NUM_SMT_THREADS + nacking_thread;
16486145Snate@binkert.org    Address nack_pc = SIMICS_get_program_counter(nack_proc_no);
16496145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
16506145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
16516145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" <<  " TID " << tid
16526145Snate@binkert.org                       << " XACT NACK " << xid
16536145Snate@binkert.org                       << " by " << nack_proc_no
16546145Snate@binkert.org                       << " [ " << nackedBy
16556145Snate@binkert.org                       << ", " << nacking_thread
16566145Snate@binkert.org                       << " ]"
16576145Snate@binkert.org                       << " NID: " << nid
16586145Snate@binkert.org                       << " " << addr
16596145Snate@binkert.org                       << " VA " << logicalAddress
16606145Snate@binkert.org                       << "  PC " << pc
16616145Snate@binkert.org                       << "  *PC 0x" << hex << myInst << dec
16626145Snate@binkert.org                       << " '" << myInstStr << "'"
16636145Snate@binkert.org                       << " NackerPC " << nack_pc
16646145Snate@binkert.org                       << "  my_ts " << seq_ts
16656145Snate@binkert.org                       << "  nack_ts " << nack_ts
16666145Snate@binkert.org                       << " possible_cycle " << possibleCycle
16676145Snate@binkert.org                       << endl;
16686145Snate@binkert.org  }
16696145Snate@binkert.org
16706145Snate@binkert.org  // Record nacks by xid
16716145Snate@binkert.org  if(!m_nackXIDMap_ptr->exist(xid)){
16726145Snate@binkert.org    m_nackXIDMap_ptr->add(xid, 1);
16736145Snate@binkert.org  } else {
16746145Snate@binkert.org    (m_nackXIDMap_ptr->lookup(xid))++;
16756145Snate@binkert.org  }
16766145Snate@binkert.org
16776145Snate@binkert.org  // Record nack ID pairs by xid
16786145Snate@binkert.org  if(!m_nackXIDPairMap_ptr->exist(xid)){
16796145Snate@binkert.org    Map<int, int> * new_map = new Map<int, int>;
16806145Snate@binkert.org    new_map->add(nid, 1);
16816145Snate@binkert.org    m_nackXIDPairMap_ptr->add(xid, new_map);
16826145Snate@binkert.org  }
16836145Snate@binkert.org  else{
16846145Snate@binkert.org    // retrieve existing map
16856145Snate@binkert.org    Map<int, int> * my_map = m_nackXIDPairMap_ptr->lookup(xid);
16866145Snate@binkert.org    if(!my_map->exist(nid)){
16876145Snate@binkert.org      my_map->add(nid, 1);
16886145Snate@binkert.org    }
16896145Snate@binkert.org    else{
16906145Snate@binkert.org      (my_map->lookup(nid))++;
16916145Snate@binkert.org    }
16926145Snate@binkert.org  }
16936145Snate@binkert.org
16946145Snate@binkert.org  // Record nacks by pc
16956145Snate@binkert.org  if(!m_nackPCMap_ptr->exist(pc)){
16966145Snate@binkert.org    m_nackPCMap_ptr->add(pc, 1);
16976145Snate@binkert.org  } else {
16986145Snate@binkert.org    (m_nackPCMap_ptr->lookup(pc))++;
16996145Snate@binkert.org  }
17006145Snate@binkert.org}
17016145Snate@binkert.org
17026145Snate@binkert.orgvoid Profiler::profileExposedConflict(NodeID id, int xid, int thread, Address addr, Address pc){
17036145Snate@binkert.org  //if(PROFILE_XACT){
17046145Snate@binkert.org    const int ID_SPACES = 3;
17056145Snate@binkert.org    const int TIME_SPACES = 7;
17066145Snate@binkert.org    // The actual processor number
17076145Snate@binkert.org    int proc_no = id*g_NUM_SMT_THREADS + thread;
17086145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
17096145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
17106145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES)  << proc_no << " [" << id << "," << thread << "]" << " "
17116145Snate@binkert.org                       << " EXPOSED ACTION CONFLICT " << xid
17126145Snate@binkert.org                       << "  ADDR " << addr
17136145Snate@binkert.org                       << "  PC " << pc
17146145Snate@binkert.org                       << endl;
17156145Snate@binkert.org    //}
17166145Snate@binkert.org}
17176145Snate@binkert.org
17186145Snate@binkert.orgvoid Profiler::profileInferredAbort(){
17196145Snate@binkert.org  m_inferredAborts++;
17206145Snate@binkert.org}
17216145Snate@binkert.org
17226145Snate@binkert.orgvoid Profiler::profileAbortDelayConstants(int startupDelay, int perBlock){
17236145Snate@binkert.org  m_abortStarupDelay = startupDelay;
17246145Snate@binkert.org  m_abortPerBlockDelay = perBlock;
17256145Snate@binkert.org}
17266145Snate@binkert.org
17276145Snate@binkert.orgvoid Profiler::profileAbortTransaction(NodeID id, int tid, int xid, int thread, int delay, int abortingThread, int abortingProc, Address addr, Address pc){
17286145Snate@binkert.org  const int ID_SPACES = 3;
17296145Snate@binkert.org  const int TIME_SPACES = 7;
17306145Snate@binkert.org  int abortingXID = -1;
17316145Snate@binkert.org  // The actual processor number
17326145Snate@binkert.org  int proc_no = id*g_NUM_SMT_THREADS + thread;
17336145Snate@binkert.org  // we are passed in physical proc number. Compute logical abort proc_no
17346145Snate@binkert.org  int logical_abort_proc_no = abortingProc/g_NUM_SMT_THREADS;
17356145Snate@binkert.org  if(abortingProc >= 0){
17366145Snate@binkert.org    AbstractChip * c = g_system_ptr->getChip(logical_abort_proc_no/RubyConfig::numberOfProcsPerChip());
17376145Snate@binkert.org    abortingXID = 0; // c->getTransactionInterfaceManager(logical_abort_proc_no%RubyConfig::numberOfProcsPerChip())->getXID(abortingThread);
17386145Snate@binkert.org    assert(0);
17396145Snate@binkert.org  }
17406145Snate@binkert.org  //- if(PROFILE_XACT){
17416145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
17426145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
17436145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
17446145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
17456145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
17466145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
17476145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" <<  " TID " << tid
17486145Snate@binkert.org                       << " XACT ABORT " << xid
17496145Snate@binkert.org                       << " caused by " << abortingProc
17506145Snate@binkert.org                       << " [ " << logical_abort_proc_no
17516145Snate@binkert.org                       << ", " << abortingThread
17526145Snate@binkert.org                       << " ]"
17536145Snate@binkert.org                       << " xid: " << abortingXID << " "
17546145Snate@binkert.org                       << " address: " << addr
17556145Snate@binkert.org                       << " delay: " << delay
17566145Snate@binkert.org                       << "  PC " << pc
17576145Snate@binkert.org                       << "  *PC 0x" << hex << myInst << dec
17586145Snate@binkert.org                       << " '" << myInstStr << "'"
17596145Snate@binkert.org                       << endl;
17606145Snate@binkert.org  }
17616145Snate@binkert.org  m_transactionAborts++;
17626145Snate@binkert.org
17636145Snate@binkert.org  // Record aborts by xid
17646145Snate@binkert.org  if(!m_abortIDMap_ptr->exist(xid)){
17656145Snate@binkert.org    m_abortIDMap_ptr->add(xid, 1);
17666145Snate@binkert.org  } else {
17676145Snate@binkert.org    (m_abortIDMap_ptr->lookup(xid))++;
17686145Snate@binkert.org  }
17696145Snate@binkert.org  m_abortDelays.add(delay);
17706145Snate@binkert.org
17716145Snate@binkert.org  // Record aborts by pc
17726145Snate@binkert.org  if(!m_abortPCMap_ptr->exist(pc)){
17736145Snate@binkert.org    m_abortPCMap_ptr->add(pc, 1);
17746145Snate@binkert.org  } else {
17756145Snate@binkert.org    (m_abortPCMap_ptr->lookup(pc))++;
17766145Snate@binkert.org  }
17776145Snate@binkert.org
17786145Snate@binkert.org  // Record aborts by address
17796145Snate@binkert.org  if(!m_abortAddressMap_ptr->exist(addr)){
17806145Snate@binkert.org    m_abortAddressMap_ptr->add(addr, 1);
17816145Snate@binkert.org  } else {
17826145Snate@binkert.org    (m_abortAddressMap_ptr->lookup(addr))++;
17836145Snate@binkert.org  }
17846145Snate@binkert.org}
17856145Snate@binkert.org
17866145Snate@binkert.orgvoid Profiler::profileTransWB(){
17876145Snate@binkert.org  m_transWBs++;
17886145Snate@binkert.org}
17896145Snate@binkert.org
17906145Snate@binkert.orgvoid Profiler::profileExtraWB(){
17916145Snate@binkert.org  m_extraWBs++;
17926145Snate@binkert.org}
17936145Snate@binkert.org
17946145Snate@binkert.orgvoid Profiler::profileXactChange(int procs, int cycles){
17956145Snate@binkert.org  if(!m_procsInXactMap_ptr->exist(procs)){
17966145Snate@binkert.org    m_procsInXactMap_ptr->add(procs, cycles);
17976145Snate@binkert.org  } else {
17986145Snate@binkert.org    (m_procsInXactMap_ptr->lookup(procs)) += cycles;
17996145Snate@binkert.org  }
18006145Snate@binkert.org}
18016145Snate@binkert.org
18026145Snate@binkert.orgvoid Profiler::profileReadSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
18036145Snate@binkert.org  // do NOT count instances when signature is empty!
18046145Snate@binkert.org  if(!bf_filter_result && !perfect_filter_result){
18056145Snate@binkert.org    m_readSetEmptyChecks++;
18066145Snate@binkert.org    return;
18076145Snate@binkert.org  }
18086145Snate@binkert.org
18096145Snate@binkert.org  if(bf_filter_result != perfect_filter_result){
18106145Snate@binkert.org    m_readSetNoMatch++;
18116145Snate@binkert.org    /*
18126145Snate@binkert.org    // we have a false positive
18136145Snate@binkert.org    if(!m_readSetNoMatch_ptr->exist(addr)){
18146145Snate@binkert.org      m_readSetNoMatch_ptr->add(addr, 1);
18156145Snate@binkert.org    }
18166145Snate@binkert.org    else{
18176145Snate@binkert.org      (m_readSetNoMatch_ptr->lookup(addr))++;
18186145Snate@binkert.org    }
18196145Snate@binkert.org    */
18206145Snate@binkert.org  }
18216145Snate@binkert.org  else{
18226145Snate@binkert.org    m_readSetMatch++;
18236145Snate@binkert.org    /*
18246145Snate@binkert.org    // Bloom filter agrees with perfect filter
18256145Snate@binkert.org    if(!m_readSetMatch_ptr->exist(addr)){
18266145Snate@binkert.org      m_readSetMatch_ptr->add(addr, 1);
18276145Snate@binkert.org    }
18286145Snate@binkert.org    else{
18296145Snate@binkert.org      (m_readSetMatch_ptr->lookup(addr))++;
18306145Snate@binkert.org    }
18316145Snate@binkert.org    */
18326145Snate@binkert.org  }
18336145Snate@binkert.org}
18346145Snate@binkert.org
18356145Snate@binkert.org
18366145Snate@binkert.orgvoid Profiler::profileRemoteReadSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
18376145Snate@binkert.org  if(bf_filter_result != perfect_filter_result){
18386145Snate@binkert.org    // we have a false positive
18396145Snate@binkert.org    if(!m_remoteReadSetNoMatch_ptr->exist(addr)){
18406145Snate@binkert.org      m_remoteReadSetNoMatch_ptr->add(addr, 1);
18416145Snate@binkert.org    }
18426145Snate@binkert.org    else{
18436145Snate@binkert.org      (m_remoteReadSetNoMatch_ptr->lookup(addr))++;
18446145Snate@binkert.org    }
18456145Snate@binkert.org  }
18466145Snate@binkert.org  else{
18476145Snate@binkert.org    // Bloom filter agrees with perfect filter
18486145Snate@binkert.org    if(!m_remoteReadSetMatch_ptr->exist(addr)){
18496145Snate@binkert.org      m_remoteReadSetMatch_ptr->add(addr, 1);
18506145Snate@binkert.org    }
18516145Snate@binkert.org    else{
18526145Snate@binkert.org      (m_remoteReadSetMatch_ptr->lookup(addr))++;
18536145Snate@binkert.org    }
18546145Snate@binkert.org  }
18556145Snate@binkert.org}
18566145Snate@binkert.org
18576145Snate@binkert.orgvoid Profiler::profileWriteSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
18586145Snate@binkert.org  // do NOT count instances when signature is empty!
18596145Snate@binkert.org  if(!bf_filter_result && !perfect_filter_result){
18606145Snate@binkert.org    m_writeSetEmptyChecks++;
18616145Snate@binkert.org    return;
18626145Snate@binkert.org  }
18636145Snate@binkert.org
18646145Snate@binkert.org  if(bf_filter_result != perfect_filter_result){
18656145Snate@binkert.org    m_writeSetNoMatch++;
18666145Snate@binkert.org    /*
18676145Snate@binkert.org    // we have a false positive
18686145Snate@binkert.org    if(!m_writeSetNoMatch_ptr->exist(addr)){
18696145Snate@binkert.org      m_writeSetNoMatch_ptr->add(addr, 1);
18706145Snate@binkert.org    }
18716145Snate@binkert.org    else{
18726145Snate@binkert.org      (m_writeSetNoMatch_ptr->lookup(addr))++;
18736145Snate@binkert.org    }
18746145Snate@binkert.org    */
18756145Snate@binkert.org  }
18766145Snate@binkert.org  else{
18776145Snate@binkert.org    m_writeSetMatch++;
18786145Snate@binkert.org    /*
18796145Snate@binkert.org    // Bloom filter agrees with perfect filter
18806145Snate@binkert.org    if(!m_writeSetMatch_ptr->exist(addr)){
18816145Snate@binkert.org      m_writeSetMatch_ptr->add(addr, 1);
18826145Snate@binkert.org    }
18836145Snate@binkert.org    else{
18846145Snate@binkert.org      (m_writeSetMatch_ptr->lookup(addr))++;
18856145Snate@binkert.org    }
18866145Snate@binkert.org    */
18876145Snate@binkert.org  }
18886145Snate@binkert.org}
18896145Snate@binkert.org
18906145Snate@binkert.org
18916145Snate@binkert.orgvoid Profiler::profileRemoteWriteSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
18926145Snate@binkert.org  if(bf_filter_result != perfect_filter_result){
18936145Snate@binkert.org    // we have a false positive
18946145Snate@binkert.org    if(!m_remoteWriteSetNoMatch_ptr->exist(addr)){
18956145Snate@binkert.org      m_remoteWriteSetNoMatch_ptr->add(addr, 1);
18966145Snate@binkert.org    }
18976145Snate@binkert.org    else{
18986145Snate@binkert.org      (m_remoteWriteSetNoMatch_ptr->lookup(addr))++;
18996145Snate@binkert.org    }
19006145Snate@binkert.org  }
19016145Snate@binkert.org  else{
19026145Snate@binkert.org    // Bloom filter agrees with perfect filter
19036145Snate@binkert.org    if(!m_remoteWriteSetMatch_ptr->exist(addr)){
19046145Snate@binkert.org      m_remoteWriteSetMatch_ptr->add(addr, 1);
19056145Snate@binkert.org    }
19066145Snate@binkert.org    else{
19076145Snate@binkert.org      (m_remoteWriteSetMatch_ptr->lookup(addr))++;
19086145Snate@binkert.org    }
19096145Snate@binkert.org  }
19106145Snate@binkert.org}
19116145Snate@binkert.org
19126145Snate@binkert.orgvoid Profiler::profileTransactionLogOverflow(NodeID id, Address addr, Address pc){
19136145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
19146145Snate@binkert.org    const int ID_SPACES = 3;
19156145Snate@binkert.org    const int TIME_SPACES = 7;
19166145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
19176145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
19186145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
19196145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
19206145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
19216145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
19226145Snate@binkert.org                       << " XACT LOG OVERFLOW"
19236145Snate@binkert.org                       << "  ADDR " << addr
19246145Snate@binkert.org                       << "  PC " << pc
19256145Snate@binkert.org                       << "  *PC 0x" << hex << myInst << dec
19266145Snate@binkert.org                       << " '" << myInstStr << "'"
19276145Snate@binkert.org                       << endl;
19286145Snate@binkert.org
19296145Snate@binkert.org  }
19306145Snate@binkert.org  m_transactionLogOverflows++;
19316145Snate@binkert.org}
19326145Snate@binkert.org
19336145Snate@binkert.orgvoid Profiler::profileTransactionCacheOverflow(NodeID id, Address addr, Address pc){
19346145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
19356145Snate@binkert.org    const int ID_SPACES = 3;
19366145Snate@binkert.org    const int TIME_SPACES = 7;
19376145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
19386145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
19396145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
19406145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
19416145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
19426145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
19436145Snate@binkert.org                       << " XACT CACHE OVERFLOW "
19446145Snate@binkert.org                       << "  ADDR " << addr
19456145Snate@binkert.org                       << "  PC " << pc
19466145Snate@binkert.org                       << "  *PC 0x" << hex << myInst << dec
19476145Snate@binkert.org                       << " '" << myInstStr << "'"
19486145Snate@binkert.org                       << endl;
19496145Snate@binkert.org
19506145Snate@binkert.org  }
19516145Snate@binkert.org  m_transactionCacheOverflows++;
19526145Snate@binkert.org}
19536145Snate@binkert.org
19546145Snate@binkert.orgvoid Profiler::profileGetCPS(NodeID id, uint32 cps, Address pc){
19556145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
19566145Snate@binkert.org    const int ID_SPACES = 3;
19576145Snate@binkert.org    const int TIME_SPACES = 7;
19586145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
19596145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
19606145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
19616145Snate@binkert.org
19626145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
19636145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
19646145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
19656145Snate@binkert.org                       << " XACT GET CPS"
19666145Snate@binkert.org                       << "  PC " << pc
19676145Snate@binkert.org                       << "  *PC 0x" << hex << myInst << dec
19686145Snate@binkert.org                       << " '" << myInstStr << "'"
19696145Snate@binkert.org                       << "  CPS 0x" << hex << cps << dec
19706145Snate@binkert.org                       << endl;
19716145Snate@binkert.org  }
19726145Snate@binkert.org}
19736145Snate@binkert.org//---- end Transactional Memory CODE
19746145Snate@binkert.org
19756145Snate@binkert.org
19766145Snate@binkert.orgvoid Profiler::profileExceptionStart(bool xact, NodeID id, int thread, int val, int trap_level, uinteger_t pc, uinteger_t npc){
19776145Snate@binkert.org  if(xact){
19786145Snate@binkert.org    if(!m_xactExceptionMap_ptr->exist(val)){
19796145Snate@binkert.org      m_xactExceptionMap_ptr->add(val, 1);
19806145Snate@binkert.org    } else {
19816145Snate@binkert.org      (m_xactExceptionMap_ptr->lookup(val))++;
19826145Snate@binkert.org    }
19836145Snate@binkert.org  }
19846145Snate@binkert.org
19856145Snate@binkert.org  if (!xact && !PROFILE_NONXACT) return;
19866145Snate@binkert.org
19876145Snate@binkert.org  if(PROFILE_EXCEPTIONS){
19886145Snate@binkert.org    const int ID_SPACES = 3;
19896145Snate@binkert.org    const int TIME_SPACES = 7;
19906145Snate@binkert.org    // The actual processor number
19916145Snate@binkert.org    int proc_no = id*g_NUM_SMT_THREADS + thread;
19926145Snate@binkert.org
19936145Snate@binkert.org    // get the excepting instruction
19946145Snate@binkert.org    const char * instruction;
19956145Snate@binkert.org    physical_address_t addr = SIMICS_translate_address( proc_no, Address(pc));
19966145Snate@binkert.org    if(val != 0x64 && addr != 0x0){
19976145Snate@binkert.org      // ignore instruction TLB miss
19986145Snate@binkert.org      instruction = SIMICS_disassemble_physical( proc_no, addr );
19996145Snate@binkert.org    }
20006145Snate@binkert.org
20016145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
20026145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
20036145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << " ]" << " ";
20046145Snate@binkert.org    if (xact)
20056145Snate@binkert.org      (* debug_cout_ptr) << " XACT Exception(";
20066145Snate@binkert.org    else
20076145Snate@binkert.org      (* debug_cout_ptr) << "      Exception(";
20086145Snate@binkert.org
20096145Snate@binkert.org    (* debug_cout_ptr)  << hex << val << dec << ")_START--Trap Level " << trap_level
20106145Snate@binkert.org                        << "--(PC=0x" << hex << pc << ", " << npc << ")"
20116145Snate@binkert.org                        << dec;
20126145Snate@binkert.org
20136145Snate@binkert.org    if(val != 0x64 && addr != 0x0){
20146145Snate@binkert.org      (* debug_cout_ptr) << " instruction = " << instruction;
20156145Snate@binkert.org    }
20166145Snate@binkert.org    else{
20176145Snate@binkert.org      (* debug_cout_ptr) << " instruction = INSTRUCTION TLB MISS";
20186145Snate@binkert.org    }
20196145Snate@binkert.org    (* debug_cout_ptr)  << dec << endl;
20206145Snate@binkert.org  }
20216145Snate@binkert.org}
20226145Snate@binkert.org
20236145Snate@binkert.orgvoid Profiler::profileExceptionDone(bool xact, NodeID id, int thread, int val, int trap_level, uinteger_t pc, uinteger_t npc, uinteger_t tpc, uinteger_t tnpc){
20246145Snate@binkert.org  if (!xact && !PROFILE_NONXACT) return;
20256145Snate@binkert.org
20266145Snate@binkert.org  if (PROFILE_EXCEPTIONS){
20276145Snate@binkert.org    const int ID_SPACES = 3;
20286145Snate@binkert.org    const int TIME_SPACES = 7;
20296145Snate@binkert.org    // The actual processor number
20306145Snate@binkert.org    int proc_no = id*g_NUM_SMT_THREADS + thread;
20316145Snate@binkert.org
20326145Snate@binkert.org    // get the excepting instruction
20336145Snate@binkert.org    const char * instruction;
20346145Snate@binkert.org    instruction = SIMICS_disassemble_physical( proc_no, SIMICS_translate_address( proc_no, Address(pc) ) );
20356145Snate@binkert.org
20366145Snate@binkert.org
20376145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
20386145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
20396145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << " ]" << " ";
20406145Snate@binkert.org    if (xact)
20416145Snate@binkert.org      (* debug_cout_ptr) << " XACT Exception(";
20426145Snate@binkert.org    else
20436145Snate@binkert.org      (* debug_cout_ptr) << "      Exception(";
20446145Snate@binkert.org
20456145Snate@binkert.org    (* debug_cout_ptr) << hex << val << dec << ")_DONE--Trap Level " << trap_level
20466145Snate@binkert.org                       << "--(PC=0x" << hex << pc << ", " << npc << dec << ")"
20476145Snate@binkert.org                       << "--(TPC=0x" << hex << tpc << ", " << tnpc << dec << ")"
20486145Snate@binkert.org                       << endl;
20496145Snate@binkert.org  }
20506145Snate@binkert.org}
20516145Snate@binkert.org
20526145Snate@binkert.orgvoid Profiler::rubyWatch(int id){
20536145Snate@binkert.org    int rn_g1 = SIMICS_get_register_number(id, "g1");
20546145Snate@binkert.org    uint64 tr = SIMICS_read_register(id, rn_g1);
20556145Snate@binkert.org    Address watch_address = Address(tr);
20566145Snate@binkert.org    const int ID_SPACES = 3;
20576145Snate@binkert.org    const int TIME_SPACES = 7;
20586145Snate@binkert.org
20596145Snate@binkert.org    (* debug_cout_ptr).flags(ios::right);
20606145Snate@binkert.org    (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
20616145Snate@binkert.org    (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
20626145Snate@binkert.org                       << "RUBY WATCH "
20636145Snate@binkert.org                       << watch_address
20646145Snate@binkert.org                       << endl;
20656145Snate@binkert.org
20666145Snate@binkert.org    if(!m_watch_address_list_ptr->exist(watch_address)){
20676145Snate@binkert.org      m_watch_address_list_ptr->add(watch_address, 1);
20686145Snate@binkert.org    }
20696145Snate@binkert.org}
20706145Snate@binkert.org
20716145Snate@binkert.orgbool Profiler::watchAddress(Address addr){
20726145Snate@binkert.org    if (m_watch_address_list_ptr->exist(addr))
20736145Snate@binkert.org      return true;
20746145Snate@binkert.org    else
20756145Snate@binkert.org      return false;
20766145Snate@binkert.org}
20776145Snate@binkert.org
20786145Snate@binkert.orgvoid Profiler::profileReadFilterBitsSet(int xid, int bits, bool isCommit) {
20796145Snate@binkert.org  if (isCommit) {
20806145Snate@binkert.org    if(!m_xactReadFilterBitsSetOnCommit->exist(xid)){
20816145Snate@binkert.org      Histogram hist;
20826145Snate@binkert.org      hist.add(bits);
20836145Snate@binkert.org      m_xactReadFilterBitsSetOnCommit->add(xid, hist);
20846145Snate@binkert.org    }
20856145Snate@binkert.org    else{
20866145Snate@binkert.org      (m_xactReadFilterBitsSetOnCommit->lookup(xid)).add(bits);
20876145Snate@binkert.org    }
20886145Snate@binkert.org  } else {
20896145Snate@binkert.org    if(!m_xactReadFilterBitsSetOnAbort->exist(xid)){
20906145Snate@binkert.org      Histogram hist;
20916145Snate@binkert.org      hist.add(bits);
20926145Snate@binkert.org      m_xactReadFilterBitsSetOnAbort->add(xid, hist);
20936145Snate@binkert.org    }
20946145Snate@binkert.org    else{
20956145Snate@binkert.org      (m_xactReadFilterBitsSetOnAbort->lookup(xid)).add(bits);
20966145Snate@binkert.org    }
20976145Snate@binkert.org  }
20986145Snate@binkert.org}
20996145Snate@binkert.org
21006145Snate@binkert.orgvoid Profiler::profileWriteFilterBitsSet(int xid, int bits, bool isCommit) {
21016145Snate@binkert.org  if (isCommit) {
21026145Snate@binkert.org    if(!m_xactWriteFilterBitsSetOnCommit->exist(xid)){
21036145Snate@binkert.org      Histogram hist;
21046145Snate@binkert.org      hist.add(bits);
21056145Snate@binkert.org      m_xactWriteFilterBitsSetOnCommit->add(xid, hist);
21066145Snate@binkert.org    }
21076145Snate@binkert.org    else{
21086145Snate@binkert.org      (m_xactWriteFilterBitsSetOnCommit->lookup(xid)).add(bits);
21096145Snate@binkert.org    }
21106145Snate@binkert.org  } else {
21116145Snate@binkert.org    if(!m_xactWriteFilterBitsSetOnAbort->exist(xid)){
21126145Snate@binkert.org      Histogram hist;
21136145Snate@binkert.org      hist.add(bits);
21146145Snate@binkert.org      m_xactWriteFilterBitsSetOnAbort->add(xid, hist);
21156145Snate@binkert.org    }
21166145Snate@binkert.org    else{
21176145Snate@binkert.org      (m_xactWriteFilterBitsSetOnAbort->lookup(xid)).add(bits);
21186145Snate@binkert.org    }
21196145Snate@binkert.org  }
21206145Snate@binkert.org}
21216145Snate@binkert.org/*
21226145Snate@binkert.org                        //gem5:Arka for decomissioning log_tm
21236145Snate@binkert.org
21246145Snate@binkert.orgvoid Profiler::setXactVisualizerFile(char * filename){
21256145Snate@binkert.org  if ( (filename == NULL) ||
21266145Snate@binkert.org       (!strcmp(filename, "none")) ) {
21276145Snate@binkert.org    m_xact_visualizer_ptr = &cout;
21286145Snate@binkert.org    return;
21296145Snate@binkert.org  }
21306145Snate@binkert.org
21316145Snate@binkert.org  if (m_xact_visualizer.is_open() ) {
21326145Snate@binkert.org    m_xact_visualizer.close ();
21336145Snate@binkert.org  }
21346145Snate@binkert.org  m_xact_visualizer.open (filename, std::ios::out);
21356145Snate@binkert.org  if (! m_xact_visualizer.is_open() ) {
21366145Snate@binkert.org    cerr << "setXactVisualizer: can't open file " << filename << endl;
21376145Snate@binkert.org  }
21386145Snate@binkert.org  else {
21396145Snate@binkert.org    m_xact_visualizer_ptr = &m_xact_visualizer;
21406145Snate@binkert.org  }
21416145Snate@binkert.org  cout << "setXactVisualizer file " << filename << endl;
21426145Snate@binkert.org}
21436145Snate@binkert.org
21446145Snate@binkert.orgvoid Profiler::printTransactionState(bool can_skip){
21456145Snate@binkert.org  if (!XACT_VISUALIZER) return;
21466145Snate@binkert.org  int num_processors = RubyConfig::numberOfProcessors() * RubyConfig::numberofSMTThreads();
21476145Snate@binkert.org
21486145Snate@binkert.org  if (!g_system_ptr->getXactVisualizer()->existXactActivity() && can_skip)
21496145Snate@binkert.org    return;
21506145Snate@binkert.org
21516145Snate@binkert.org  if (can_skip && ((g_eventQueue_ptr->getTime()/10000) <= m_xact_visualizer_last))
21526145Snate@binkert.org    return;
21536145Snate@binkert.org
21546145Snate@binkert.org  Vector<char> xactStateVector = g_system_ptr->getXactVisualizer()->getTransactionStateVector();
21556145Snate@binkert.org  for (int i = 0 ; i < num_processors; i++){
21566145Snate@binkert.org    (* m_xact_visualizer_ptr) << xactStateVector[i] << " ";
21576145Snate@binkert.org  }
21586145Snate@binkert.org  (* m_xact_visualizer_ptr) << "   " << g_eventQueue_ptr->getTime() << endl;
21596145Snate@binkert.org  m_xact_visualizer_last = g_eventQueue_ptr->getTime() / 10000;
21606145Snate@binkert.org}
21616145Snate@binkert.org*/
21626145Snate@binkert.orgvoid Profiler::watchpointsFalsePositiveTrigger()
21636145Snate@binkert.org{
21646145Snate@binkert.org  m_watchpointsFalsePositiveTrigger++;
21656145Snate@binkert.org}
21666145Snate@binkert.org
21676145Snate@binkert.orgvoid Profiler::watchpointsTrueTrigger()
21686145Snate@binkert.org{
21696145Snate@binkert.org  m_watchpointsTrueTrigger++;
21706145Snate@binkert.org}
21716145Snate@binkert.org
21726145Snate@binkert.org// For MemoryControl:
21736145Snate@binkert.orgvoid Profiler::profileMemReq(int bank) {
21746145Snate@binkert.org  m_memReq++;
21756145Snate@binkert.org  m_memBankCount[bank]++;
21766145Snate@binkert.org}
21776145Snate@binkert.orgvoid Profiler::profileMemBankBusy() { m_memBankBusy++; }
21786145Snate@binkert.orgvoid Profiler::profileMemBusBusy() { m_memBusBusy++; }
21796145Snate@binkert.orgvoid Profiler::profileMemReadWriteBusy() { m_memReadWriteBusy++; }
21806145Snate@binkert.orgvoid Profiler::profileMemDataBusBusy() { m_memDataBusBusy++; }
21816145Snate@binkert.orgvoid Profiler::profileMemTfawBusy() { m_memTfawBusy++; }
21826145Snate@binkert.orgvoid Profiler::profileMemRefresh() { m_memRefresh++; }
21836145Snate@binkert.orgvoid Profiler::profileMemRead() { m_memRead++; }
21846145Snate@binkert.orgvoid Profiler::profileMemWrite() { m_memWrite++; }
21856145Snate@binkert.orgvoid Profiler::profileMemWaitCycles(int cycles) { m_memWaitCycles += cycles; }
21866145Snate@binkert.orgvoid Profiler::profileMemInputQ(int cycles) { m_memInputQ += cycles; }
21876145Snate@binkert.orgvoid Profiler::profileMemBankQ(int cycles) { m_memBankQ += cycles; }
21886145Snate@binkert.orgvoid Profiler::profileMemArbWait(int cycles) { m_memArbWait += cycles; }
21896145Snate@binkert.orgvoid Profiler::profileMemRandBusy() { m_memRandBusy++; }
21906145Snate@binkert.orgvoid Profiler::profileMemNotOld() { m_memNotOld++; }
21916145Snate@binkert.org
21926145Snate@binkert.org
21936145Snate@binkert.org//----------- ATMTP -------------------//
21946145Snate@binkert.org
21956145Snate@binkert.orgvoid Profiler::profileTransactionTCC(NodeID id, Address pc){
21966145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
21976145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
21986145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
21996145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
22006145Snate@binkert.org
22016145Snate@binkert.org    const int ID_SPACES = 3;
22026145Snate@binkert.org    const int TIME_SPACES = 7;
22036145Snate@binkert.org    cout.flags(ios::right);
22046145Snate@binkert.org    cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
22056145Snate@binkert.org    cout << setw(ID_SPACES) << id << " "
22066145Snate@binkert.org         << " XACT Aborting! Executed TCC "
22076145Snate@binkert.org         << "  PC: " << pc
22086145Snate@binkert.org         << "  *PC: 0x" << hex << myInst << dec
22096145Snate@binkert.org         << " '" << myInstStr << "'"
22106145Snate@binkert.org         << endl;
22116145Snate@binkert.org  }
22126145Snate@binkert.org  m_transactionUnsupInsts++;
22136145Snate@binkert.org}
22146145Snate@binkert.org
22156145Snate@binkert.orgvoid Profiler::profileTransactionUnsupInst(NodeID id, Address pc){
22166145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
22176145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
22186145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
22196145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
22206145Snate@binkert.org
22216145Snate@binkert.org    const int ID_SPACES = 3;
22226145Snate@binkert.org    const int TIME_SPACES = 7;
22236145Snate@binkert.org    cout.flags(ios::right);
22246145Snate@binkert.org    cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
22256145Snate@binkert.org    cout << setw(ID_SPACES) << id << " "
22266145Snate@binkert.org         << " XACT Aborting! Executed Unsupported Instruction "
22276145Snate@binkert.org         << "  PC: " << pc
22286145Snate@binkert.org         << "  *PC: 0x" << hex << myInst << dec
22296145Snate@binkert.org         << " '" << myInstStr << "'"
22306145Snate@binkert.org         << endl;
22316145Snate@binkert.org  }
22326145Snate@binkert.org  m_transactionUnsupInsts++;
22336145Snate@binkert.org}
22346145Snate@binkert.org
22356145Snate@binkert.orgvoid Profiler::profileTransactionSaveInst(NodeID id, Address pc){
22366145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
22376145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
22386145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
22396145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
22406145Snate@binkert.org
22416145Snate@binkert.org    const int ID_SPACES = 3;
22426145Snate@binkert.org    const int TIME_SPACES = 7;
22436145Snate@binkert.org    cout.flags(ios::right);
22446145Snate@binkert.org    cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
22456145Snate@binkert.org    cout << setw(ID_SPACES) << id << " "
22466145Snate@binkert.org         << " XACT Aborting! Executed Save Instruction "
22476145Snate@binkert.org         << "  PC: " << pc
22486145Snate@binkert.org         << "  *PC: 0x" << hex << myInst << dec
22496145Snate@binkert.org         << " '" << myInstStr << "'"
22506145Snate@binkert.org         << endl;
22516145Snate@binkert.org  }
22526145Snate@binkert.org  m_transactionSaveRestAborts++;
22536145Snate@binkert.org}
22546145Snate@binkert.org
22556145Snate@binkert.orgvoid Profiler::profileTransactionRestoreInst(NodeID id, Address pc){
22566145Snate@binkert.org  if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
22576145Snate@binkert.org    physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
22586145Snate@binkert.org    integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
22596145Snate@binkert.org    const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
22606145Snate@binkert.org
22616145Snate@binkert.org    const int ID_SPACES = 3;
22626145Snate@binkert.org    const int TIME_SPACES = 7;
22636145Snate@binkert.org    cout.flags(ios::right);
22646145Snate@binkert.org    cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
22656145Snate@binkert.org    cout << setw(ID_SPACES) << id << " "
22666145Snate@binkert.org         << " XACT Aborting! Executed Restore Instruction "
22676145Snate@binkert.org         << "  PC: " << pc
22686145Snate@binkert.org         << "  *PC: 0x" << hex << myInst << dec
22696145Snate@binkert.org         << " '" << myInstStr << "'"
22706145Snate@binkert.org         << endl;
22716145Snate@binkert.org  }
22726145Snate@binkert.org  m_transactionSaveRestAborts++;
22736145Snate@binkert.org}
22746145Snate@binkert.org
22756145Snate@binkert.orgvoid Profiler::profileTimerInterrupt(NodeID id,
22766145Snate@binkert.org                                     uinteger_t tick, uinteger_t tick_cmpr,
22776145Snate@binkert.org                                     uinteger_t stick, uinteger_t stick_cmpr,
22786145Snate@binkert.org                                     int trap_level,
22796145Snate@binkert.org                                     uinteger_t pc, uinteger_t npc,
22806145Snate@binkert.org                                     uinteger_t pstate, int pil){
22816145Snate@binkert.org  if (PROFILE_EXCEPTIONS) {
22826145Snate@binkert.org    const int ID_SPACES = 3;
22836145Snate@binkert.org    const int TIME_SPACES = 7;
22846145Snate@binkert.org    cout.flags(ios::right);
22856145Snate@binkert.org    cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
22866145Snate@binkert.org    cout << setw(ID_SPACES) << id << " ";
22876145Snate@binkert.org    cout << hex << "Timer--(Tick=0x" << tick << ", TckCmp=0x" << tick_cmpr
22886145Snate@binkert.org         << ", STick=0x" << stick << ", STickCmp=0x" << stick_cmpr
22896145Snate@binkert.org         << ")--(PC=" << pc << ", " << npc
22906145Snate@binkert.org         << dec << ")--(TL=" << trap_level << ", pil=" << pil
22916145Snate@binkert.org         << hex << ", pstate=0x" << pstate
22926145Snate@binkert.org         << dec << ")" << endl;
22936145Snate@binkert.org  }
22946145Snate@binkert.org}
2295