Sequencer.cc (13399:98f54e365584) Sequencer.cc (13974:af47a3ae0f6b)
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 46 unchanged lines hidden (view full) ---

55Sequencer::Sequencer(const Params *p)
56 : RubyPort(p), m_IncompleteTimes(MachineType_NUM),
57 deadlockCheckEvent([this]{ wakeup(); }, "Sequencer deadlock check")
58{
59 m_outstanding_count = 0;
60
61 m_instCache_ptr = p->icache;
62 m_dataCache_ptr = p->dcache;
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 46 unchanged lines hidden (view full) ---

55Sequencer::Sequencer(const Params *p)
56 : RubyPort(p), m_IncompleteTimes(MachineType_NUM),
57 deadlockCheckEvent([this]{ wakeup(); }, "Sequencer deadlock check")
58{
59 m_outstanding_count = 0;
60
61 m_instCache_ptr = p->icache;
62 m_dataCache_ptr = p->dcache;
63 m_data_cache_hit_latency = p->dcache_hit_latency;
64 m_inst_cache_hit_latency = p->icache_hit_latency;
65 m_max_outstanding_requests = p->max_outstanding_requests;
66 m_deadlock_threshold = p->deadlock_threshold;
67
68 m_coreId = p->coreid; // for tracking the two CorePair sequencers
69 assert(m_max_outstanding_requests > 0);
70 assert(m_deadlock_threshold > 0);
71 assert(m_instCache_ptr != NULL);
72 assert(m_dataCache_ptr != NULL);
63 m_max_outstanding_requests = p->max_outstanding_requests;
64 m_deadlock_threshold = p->deadlock_threshold;
65
66 m_coreId = p->coreid; // for tracking the two CorePair sequencers
67 assert(m_max_outstanding_requests > 0);
68 assert(m_deadlock_threshold > 0);
69 assert(m_instCache_ptr != NULL);
70 assert(m_dataCache_ptr != NULL);
73 assert(m_data_cache_hit_latency > 0);
74 assert(m_inst_cache_hit_latency > 0);
75
76 m_runningGarnetStandalone = p->garnet_standalone;
77}
78
79Sequencer::~Sequencer()
80{
81}
82

--- 562 unchanged lines hidden (view full) ---

645 RubyAccessMode_Supervisor, pkt,
646 PrefetchBit_No, proc_id, core_id);
647
648 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
649 curTick(), m_version, "Seq", "Begin", "", "",
650 printAddress(msg->getPhysicalAddress()),
651 RubyRequestType_to_string(secondary_type));
652
71
72 m_runningGarnetStandalone = p->garnet_standalone;
73}
74
75Sequencer::~Sequencer()
76{
77}
78

--- 562 unchanged lines hidden (view full) ---

641 RubyAccessMode_Supervisor, pkt,
642 PrefetchBit_No, proc_id, core_id);
643
644 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
645 curTick(), m_version, "Seq", "Begin", "", "",
646 printAddress(msg->getPhysicalAddress()),
647 RubyRequestType_to_string(secondary_type));
648
653 // The Sequencer currently assesses instruction and data cache hit latency
654 // for the top-level caches at the beginning of a memory access.
655 // TODO: Eventually, this latency should be moved to represent the actual
656 // cache access latency portion of the memory access. This will require
657 // changing cache controller protocol files to assess the latency on the
658 // access response path.
659 Cycles latency(0); // Initialize to zero to catch misconfigured latency
660 if (secondary_type == RubyRequestType_IFETCH)
661 latency = m_inst_cache_hit_latency;
662 else
663 latency = m_data_cache_hit_latency;
664
665 // Send the message to the cache controller
649 Tick latency = cyclesToTicks(
650 m_controller->mandatoryQueueLatency(secondary_type));
666 assert(latency > 0);
667
668 assert(m_mandatory_q_ptr != NULL);
651 assert(latency > 0);
652
653 assert(m_mandatory_q_ptr != NULL);
669 m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(latency));
654 m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
670}
671
672template <class KEY, class VALUE>
673std::ostream &
674operator<<(ostream &out, const std::unordered_map<KEY, VALUE> &map)
675{
676 auto i = map.begin();
677 auto end = map.end();

--- 117 unchanged lines hidden ---
655}
656
657template <class KEY, class VALUE>
658std::ostream &
659operator<<(ostream &out, const std::unordered_map<KEY, VALUE> &map)
660{
661 auto i = map.begin();
662 auto end = map.end();

--- 117 unchanged lines hidden ---