Sequencer.cc revision 11321:02e930db812d
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/x86/ldstflags.hh"
30#include "base/misc.hh"
31#include "base/str.hh"
32#include "cpu/testers/rubytest/RubyTester.hh"
33#include "debug/MemoryAccess.hh"
34#include "debug/ProtocolTrace.hh"
35#include "debug/RubySequencer.hh"
36#include "debug/RubyStats.hh"
37#include "mem/protocol/PrefetchBit.hh"
38#include "mem/protocol/RubyAccessMode.hh"
39#include "mem/ruby/profiler/Profiler.hh"
40#include "mem/ruby/slicc_interface/RubyRequest.hh"
41#include "mem/ruby/system/RubySystem.hh"
42#include "mem/ruby/system/Sequencer.hh"
43#include "mem/packet.hh"
44#include "sim/system.hh"
45
46using namespace std;
47
48Sequencer *
49RubySequencerParams::create()
50{
51    return new Sequencer(this);
52}
53
54Sequencer::Sequencer(const Params *p)
55    : RubyPort(p), m_IncompleteTimes(MachineType_NUM), deadlockCheckEvent(this)
56{
57    m_outstanding_count = 0;
58
59    m_instCache_ptr = p->icache;
60    m_dataCache_ptr = p->dcache;
61    m_data_cache_hit_latency = p->dcache_hit_latency;
62    m_inst_cache_hit_latency = p->icache_hit_latency;
63    m_max_outstanding_requests = p->max_outstanding_requests;
64    m_deadlock_threshold = p->deadlock_threshold;
65
66    m_coreId = p->coreid; // for tracking the two CorePair sequencers
67    assert(m_max_outstanding_requests > 0);
68    assert(m_deadlock_threshold > 0);
69    assert(m_instCache_ptr != NULL);
70    assert(m_dataCache_ptr != NULL);
71    assert(m_data_cache_hit_latency > 0);
72    assert(m_inst_cache_hit_latency > 0);
73
74    m_usingNetworkTester = p->using_network_tester;
75}
76
77Sequencer::~Sequencer()
78{
79}
80
81void
82Sequencer::wakeup()
83{
84    assert(drainState() != DrainState::Draining);
85
86    // Check for deadlock of any of the requests
87    Cycles current_time = curCycle();
88
89    // Check across all outstanding requests
90    int total_outstanding = 0;
91
92    RequestTable::iterator read = m_readRequestTable.begin();
93    RequestTable::iterator read_end = m_readRequestTable.end();
94    for (; read != read_end; ++read) {
95        SequencerRequest* request = read->second;
96        if (current_time - request->issue_time < m_deadlock_threshold)
97            continue;
98
99        panic("Possible Deadlock detected. Aborting!\n"
100              "version: %d request.paddr: 0x%x m_readRequestTable: %d "
101              "current time: %u issue_time: %d difference: %d\n", m_version,
102              request->pkt->getAddr(), m_readRequestTable.size(),
103              current_time * clockPeriod(), request->issue_time * clockPeriod(),
104              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
105    }
106
107    RequestTable::iterator write = m_writeRequestTable.begin();
108    RequestTable::iterator write_end = m_writeRequestTable.end();
109    for (; write != write_end; ++write) {
110        SequencerRequest* request = write->second;
111        if (current_time - request->issue_time < m_deadlock_threshold)
112            continue;
113
114        panic("Possible Deadlock detected. Aborting!\n"
115              "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
116              "current time: %u issue_time: %d difference: %d\n", m_version,
117              request->pkt->getAddr(), m_writeRequestTable.size(),
118              current_time * clockPeriod(), request->issue_time * clockPeriod(),
119              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
120    }
121
122    total_outstanding += m_writeRequestTable.size();
123    total_outstanding += m_readRequestTable.size();
124
125    assert(m_outstanding_count == total_outstanding);
126
127    if (m_outstanding_count > 0) {
128        // If there are still outstanding requests, keep checking
129        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
130    }
131}
132
133void Sequencer::resetStats()
134{
135    m_latencyHist.reset();
136    m_hitLatencyHist.reset();
137    m_missLatencyHist.reset();
138    for (int i = 0; i < RubyRequestType_NUM; i++) {
139        m_typeLatencyHist[i]->reset();
140        m_hitTypeLatencyHist[i]->reset();
141        m_missTypeLatencyHist[i]->reset();
142        for (int j = 0; j < MachineType_NUM; j++) {
143            m_hitTypeMachLatencyHist[i][j]->reset();
144            m_missTypeMachLatencyHist[i][j]->reset();
145        }
146    }
147
148    for (int i = 0; i < MachineType_NUM; i++) {
149        m_missMachLatencyHist[i]->reset();
150        m_hitMachLatencyHist[i]->reset();
151
152        m_IssueToInitialDelayHist[i]->reset();
153        m_InitialToForwardDelayHist[i]->reset();
154        m_ForwardToFirstResponseDelayHist[i]->reset();
155        m_FirstResponseToCompletionDelayHist[i]->reset();
156
157        m_IncompleteTimes[i] = 0;
158    }
159}
160
161// Insert the request on the correct request table.  Return true if
162// the entry was already present.
163RequestStatus
164Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
165{
166    assert(m_outstanding_count ==
167        (m_writeRequestTable.size() + m_readRequestTable.size()));
168
169    // See if we should schedule a deadlock check
170    if (!deadlockCheckEvent.scheduled() &&
171        drainState() != DrainState::Draining) {
172        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
173    }
174
175    Addr line_addr = makeLineAddress(pkt->getAddr());
176    // Create a default entry, mapping the address to NULL, the cast is
177    // there to make gcc 4.4 happy
178    RequestTable::value_type default_entry(line_addr,
179                                           (SequencerRequest*) NULL);
180
181    if ((request_type == RubyRequestType_ST) ||
182        (request_type == RubyRequestType_RMW_Read) ||
183        (request_type == RubyRequestType_RMW_Write) ||
184        (request_type == RubyRequestType_Load_Linked) ||
185        (request_type == RubyRequestType_Store_Conditional) ||
186        (request_type == RubyRequestType_Locked_RMW_Read) ||
187        (request_type == RubyRequestType_Locked_RMW_Write) ||
188        (request_type == RubyRequestType_FLUSH)) {
189
190        // Check if there is any outstanding read request for the same
191        // cache line.
192        if (m_readRequestTable.count(line_addr) > 0) {
193            m_store_waiting_on_load++;
194            return RequestStatus_Aliased;
195        }
196
197        pair<RequestTable::iterator, bool> r =
198            m_writeRequestTable.insert(default_entry);
199        if (r.second) {
200            RequestTable::iterator i = r.first;
201            i->second = new SequencerRequest(pkt, request_type, curCycle());
202            m_outstanding_count++;
203        } else {
204          // There is an outstanding write request for the cache line
205          m_store_waiting_on_store++;
206          return RequestStatus_Aliased;
207        }
208    } else {
209        // Check if there is any outstanding write request for the same
210        // cache line.
211        if (m_writeRequestTable.count(line_addr) > 0) {
212            m_load_waiting_on_store++;
213            return RequestStatus_Aliased;
214        }
215
216        pair<RequestTable::iterator, bool> r =
217            m_readRequestTable.insert(default_entry);
218
219        if (r.second) {
220            RequestTable::iterator i = r.first;
221            i->second = new SequencerRequest(pkt, request_type, curCycle());
222            m_outstanding_count++;
223        } else {
224            // There is an outstanding read request for the cache line
225            m_load_waiting_on_load++;
226            return RequestStatus_Aliased;
227        }
228    }
229
230    m_outstandReqHist.sample(m_outstanding_count);
231    assert(m_outstanding_count ==
232        (m_writeRequestTable.size() + m_readRequestTable.size()));
233
234    return RequestStatus_Ready;
235}
236
237void
238Sequencer::markRemoved()
239{
240    m_outstanding_count--;
241    assert(m_outstanding_count ==
242           m_writeRequestTable.size() + m_readRequestTable.size());
243}
244
245void
246Sequencer::invalidateSC(Addr address)
247{
248    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
249    // The controller has lost the coherence permissions, hence the lock
250    // on the cache line maintained by the cache should be cleared.
251    if (e && e->isLocked(m_version)) {
252        e->clearLocked();
253    }
254}
255
256bool
257Sequencer::handleLlsc(Addr address, SequencerRequest* request)
258{
259    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
260    if (!e)
261        return true;
262
263    // The success flag indicates whether the LLSC operation was successful.
264    // LL ops will always succeed, but SC may fail if the cache line is no
265    // longer locked.
266    bool success = true;
267    if (request->m_type == RubyRequestType_Store_Conditional) {
268        if (!e->isLocked(m_version)) {
269            //
270            // For failed SC requests, indicate the failure to the cpu by
271            // setting the extra data to zero.
272            //
273            request->pkt->req->setExtraData(0);
274            success = false;
275        } else {
276            //
277            // For successful SC requests, indicate the success to the cpu by
278            // setting the extra data to one.
279            //
280            request->pkt->req->setExtraData(1);
281        }
282        //
283        // Independent of success, all SC operations must clear the lock
284        //
285        e->clearLocked();
286    } else if (request->m_type == RubyRequestType_Load_Linked) {
287        //
288        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
289        // previously locked cache lines?
290        //
291        e->setLocked(m_version);
292    } else if (e->isLocked(m_version)) {
293        //
294        // Normal writes should clear the locked address
295        //
296        e->clearLocked();
297    }
298    return success;
299}
300
301void
302Sequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
303                             const MachineType respondingMach,
304                             bool isExternalHit, Cycles issuedTime,
305                             Cycles initialRequestTime,
306                             Cycles forwardRequestTime,
307                             Cycles firstResponseTime, Cycles completionTime)
308{
309    m_latencyHist.sample(cycles);
310    m_typeLatencyHist[type]->sample(cycles);
311
312    if (isExternalHit) {
313        m_missLatencyHist.sample(cycles);
314        m_missTypeLatencyHist[type]->sample(cycles);
315
316        if (respondingMach != MachineType_NUM) {
317            m_missMachLatencyHist[respondingMach]->sample(cycles);
318            m_missTypeMachLatencyHist[type][respondingMach]->sample(cycles);
319
320            if ((issuedTime <= initialRequestTime) &&
321                (initialRequestTime <= forwardRequestTime) &&
322                (forwardRequestTime <= firstResponseTime) &&
323                (firstResponseTime <= completionTime)) {
324
325                m_IssueToInitialDelayHist[respondingMach]->sample(
326                    initialRequestTime - issuedTime);
327                m_InitialToForwardDelayHist[respondingMach]->sample(
328                    forwardRequestTime - initialRequestTime);
329                m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
330                    firstResponseTime - forwardRequestTime);
331                m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
332                    completionTime - firstResponseTime);
333            } else {
334                m_IncompleteTimes[respondingMach]++;
335            }
336        }
337    } else {
338        m_hitLatencyHist.sample(cycles);
339        m_hitTypeLatencyHist[type]->sample(cycles);
340
341        if (respondingMach != MachineType_NUM) {
342            m_hitMachLatencyHist[respondingMach]->sample(cycles);
343            m_hitTypeMachLatencyHist[type][respondingMach]->sample(cycles);
344        }
345    }
346}
347
348void
349Sequencer::writeCallback(Addr address, DataBlock& data,
350                         const bool externalHit, const MachineType mach,
351                         const Cycles initialRequestTime,
352                         const Cycles forwardRequestTime,
353                         const Cycles firstResponseTime)
354{
355    assert(address == makeLineAddress(address));
356    assert(m_writeRequestTable.count(makeLineAddress(address)));
357
358    RequestTable::iterator i = m_writeRequestTable.find(address);
359    assert(i != m_writeRequestTable.end());
360    SequencerRequest* request = i->second;
361
362    m_writeRequestTable.erase(i);
363    markRemoved();
364
365    assert((request->m_type == RubyRequestType_ST) ||
366           (request->m_type == RubyRequestType_ATOMIC) ||
367           (request->m_type == RubyRequestType_RMW_Read) ||
368           (request->m_type == RubyRequestType_RMW_Write) ||
369           (request->m_type == RubyRequestType_Load_Linked) ||
370           (request->m_type == RubyRequestType_Store_Conditional) ||
371           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
372           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
373           (request->m_type == RubyRequestType_FLUSH));
374
375    //
376    // For Alpha, properly handle LL, SC, and write requests with respect to
377    // locked cache blocks.
378    //
379    // Not valid for Network_test protocl
380    //
381    bool success = true;
382    if (!m_usingNetworkTester)
383        success = handleLlsc(address, request);
384
385    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
386        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
387    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
388        m_controller->unblock(address);
389    }
390
391    hitCallback(request, data, success, mach, externalHit,
392                initialRequestTime, forwardRequestTime, firstResponseTime);
393}
394
395void
396Sequencer::readCallback(Addr address, DataBlock& data,
397                        bool externalHit, const MachineType mach,
398                        Cycles initialRequestTime,
399                        Cycles forwardRequestTime,
400                        Cycles firstResponseTime)
401{
402    assert(address == makeLineAddress(address));
403    assert(m_readRequestTable.count(makeLineAddress(address)));
404
405    RequestTable::iterator i = m_readRequestTable.find(address);
406    assert(i != m_readRequestTable.end());
407    SequencerRequest* request = i->second;
408
409    m_readRequestTable.erase(i);
410    markRemoved();
411
412    assert((request->m_type == RubyRequestType_LD) ||
413           (request->m_type == RubyRequestType_IFETCH));
414
415    hitCallback(request, data, true, mach, externalHit,
416                initialRequestTime, forwardRequestTime, firstResponseTime);
417}
418
419void
420Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
421                       bool llscSuccess,
422                       const MachineType mach, const bool externalHit,
423                       const Cycles initialRequestTime,
424                       const Cycles forwardRequestTime,
425                       const Cycles firstResponseTime)
426{
427    warn_once("Replacement policy updates recently became the responsibility "
428              "of SLICC state machines. Make sure to setMRU() near callbacks "
429              "in .sm files!");
430
431    PacketPtr pkt = srequest->pkt;
432    Addr request_address(pkt->getAddr());
433    RubyRequestType type = srequest->m_type;
434    Cycles issued_time = srequest->issue_time;
435
436    assert(curCycle() >= issued_time);
437    Cycles total_latency = curCycle() - issued_time;
438
439    // Profile the latency for all demand accesses.
440    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
441                      initialRequestTime, forwardRequestTime,
442                      firstResponseTime, curCycle());
443
444    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %d cycles\n",
445             curTick(), m_version, "Seq",
446             llscSuccess ? "Done" : "SC_Failed", "", "",
447             printAddress(request_address), total_latency);
448
449    // update the data unless it is a non-data-carrying flush
450    if (RubySystem::getWarmupEnabled()) {
451        data.setData(pkt->getConstPtr<uint8_t>(),
452                     getOffset(request_address), pkt->getSize());
453    } else if (!pkt->isFlush()) {
454        if ((type == RubyRequestType_LD) ||
455            (type == RubyRequestType_IFETCH) ||
456            (type == RubyRequestType_RMW_Read) ||
457            (type == RubyRequestType_Locked_RMW_Read) ||
458            (type == RubyRequestType_Load_Linked)) {
459            memcpy(pkt->getPtr<uint8_t>(),
460                   data.getData(getOffset(request_address), pkt->getSize()),
461                   pkt->getSize());
462            DPRINTF(RubySequencer, "read data %s\n", data);
463        } else {
464            data.setData(pkt->getConstPtr<uint8_t>(),
465                         getOffset(request_address), pkt->getSize());
466            DPRINTF(RubySequencer, "set data %s\n", data);
467        }
468    }
469
470    // If using the RubyTester, update the RubyTester sender state's
471    // subBlock with the recieved data.  The tester will later access
472    // this state.
473    if (m_usingRubyTester) {
474        DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
475                pkt->cmdString(), pkt->getAddr());
476        RubyTester::SenderState* testerSenderState =
477            pkt->findNextSenderState<RubyTester::SenderState>();
478        assert(testerSenderState);
479        testerSenderState->subBlock.mergeFrom(data);
480    }
481
482    delete srequest;
483
484    RubySystem *rs = m_ruby_system;
485    if (RubySystem::getWarmupEnabled()) {
486        assert(pkt->req);
487        delete pkt->req;
488        delete pkt;
489        rs->m_cache_recorder->enqueueNextFetchRequest();
490    } else if (RubySystem::getCooldownEnabled()) {
491        delete pkt;
492        rs->m_cache_recorder->enqueueNextFlushRequest();
493    } else {
494        ruby_hit_callback(pkt);
495        testDrainComplete();
496    }
497}
498
499bool
500Sequencer::empty() const
501{
502    return m_writeRequestTable.empty() && m_readRequestTable.empty();
503}
504
505RequestStatus
506Sequencer::makeRequest(PacketPtr pkt)
507{
508    if (m_outstanding_count >= m_max_outstanding_requests) {
509        return RequestStatus_BufferFull;
510    }
511
512    RubyRequestType primary_type = RubyRequestType_NULL;
513    RubyRequestType secondary_type = RubyRequestType_NULL;
514
515    if (pkt->isLLSC()) {
516        //
517        // Alpha LL/SC instructions need to be handled carefully by the cache
518        // coherence protocol to ensure they follow the proper semantics. In
519        // particular, by identifying the operations as atomic, the protocol
520        // should understand that migratory sharing optimizations should not
521        // be performed (i.e. a load between the LL and SC should not steal
522        // away exclusive permission).
523        //
524        if (pkt->isWrite()) {
525            DPRINTF(RubySequencer, "Issuing SC\n");
526            primary_type = RubyRequestType_Store_Conditional;
527        } else {
528            DPRINTF(RubySequencer, "Issuing LL\n");
529            assert(pkt->isRead());
530            primary_type = RubyRequestType_Load_Linked;
531        }
532        secondary_type = RubyRequestType_ATOMIC;
533    } else if (pkt->req->isLockedRMW()) {
534        //
535        // x86 locked instructions are translated to store cache coherence
536        // requests because these requests should always be treated as read
537        // exclusive operations and should leverage any migratory sharing
538        // optimization built into the protocol.
539        //
540        if (pkt->isWrite()) {
541            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
542            primary_type = RubyRequestType_Locked_RMW_Write;
543        } else {
544            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
545            assert(pkt->isRead());
546            primary_type = RubyRequestType_Locked_RMW_Read;
547        }
548        secondary_type = RubyRequestType_ST;
549    } else {
550        if (pkt->isRead()) {
551            if (pkt->req->isInstFetch()) {
552                primary_type = secondary_type = RubyRequestType_IFETCH;
553            } else {
554                bool storeCheck = false;
555                // only X86 need the store check
556                if (system->getArch() == Arch::X86ISA) {
557                    uint32_t flags = pkt->req->getFlags();
558                    storeCheck = flags &
559                        (X86ISA::StoreCheck << X86ISA::FlagShift);
560                }
561                if (storeCheck) {
562                    primary_type = RubyRequestType_RMW_Read;
563                    secondary_type = RubyRequestType_ST;
564                } else {
565                    primary_type = secondary_type = RubyRequestType_LD;
566                }
567            }
568        } else if (pkt->isWrite()) {
569            //
570            // Note: M5 packets do not differentiate ST from RMW_Write
571            //
572            primary_type = secondary_type = RubyRequestType_ST;
573        } else if (pkt->isFlush()) {
574          primary_type = secondary_type = RubyRequestType_FLUSH;
575        } else {
576            panic("Unsupported ruby packet type\n");
577        }
578    }
579
580    RequestStatus status = insertRequest(pkt, primary_type);
581    if (status != RequestStatus_Ready)
582        return status;
583
584    issueRequest(pkt, secondary_type);
585
586    // TODO: issue hardware prefetches here
587    return RequestStatus_Issued;
588}
589
590void
591Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
592{
593    assert(pkt != NULL);
594    ContextID proc_id = pkt->req->hasContextId() ?
595        pkt->req->contextId() : InvalidContextID;
596
597    ContextID core_id = coreId();
598
599    // If valid, copy the pc to the ruby request
600    Addr pc = 0;
601    if (pkt->req->hasPC()) {
602        pc = pkt->req->getPC();
603    }
604
605    // check if the packet has data as for example prefetch and flush
606    // requests do not
607    std::shared_ptr<RubyRequest> msg =
608        std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
609                                      pkt->isFlush() ?
610                                      nullptr : pkt->getPtr<uint8_t>(),
611                                      pkt->getSize(), pc, secondary_type,
612                                      RubyAccessMode_Supervisor, pkt,
613                                      PrefetchBit_No, proc_id, core_id);
614
615    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
616            curTick(), m_version, "Seq", "Begin", "", "",
617            printAddress(msg->getPhysicalAddress()),
618            RubyRequestType_to_string(secondary_type));
619
620    // The Sequencer currently assesses instruction and data cache hit latency
621    // for the top-level caches at the beginning of a memory access.
622    // TODO: Eventually, this latency should be moved to represent the actual
623    // cache access latency portion of the memory access. This will require
624    // changing cache controller protocol files to assess the latency on the
625    // access response path.
626    Cycles latency(0);  // Initialize to zero to catch misconfigured latency
627    if (secondary_type == RubyRequestType_IFETCH)
628        latency = m_inst_cache_hit_latency;
629    else
630        latency = m_data_cache_hit_latency;
631
632    // Send the message to the cache controller
633    assert(latency > 0);
634
635    assert(m_mandatory_q_ptr != NULL);
636    m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(latency));
637}
638
639template <class KEY, class VALUE>
640std::ostream &
641operator<<(ostream &out, const std::unordered_map<KEY, VALUE> &map)
642{
643    auto i = map.begin();
644    auto end = map.end();
645
646    out << "[";
647    for (; i != end; ++i)
648        out << " " << i->first << "=" << i->second;
649    out << " ]";
650
651    return out;
652}
653
654void
655Sequencer::print(ostream& out) const
656{
657    out << "[Sequencer: " << m_version
658        << ", outstanding requests: " << m_outstanding_count
659        << ", read request table: " << m_readRequestTable
660        << ", write request table: " << m_writeRequestTable
661        << "]";
662}
663
664// this can be called from setState whenever coherence permissions are
665// upgraded when invoked, coherence violations will be checked for the
666// given block
667void
668Sequencer::checkCoherence(Addr addr)
669{
670#ifdef CHECK_COHERENCE
671    m_ruby_system->checkGlobalCoherenceInvariant(addr);
672#endif
673}
674
675void
676Sequencer::recordRequestType(SequencerRequestType requestType) {
677    DPRINTF(RubyStats, "Recorded statistic: %s\n",
678            SequencerRequestType_to_string(requestType));
679}
680
681
682void
683Sequencer::evictionCallback(Addr address)
684{
685    ruby_eviction_callback(address);
686}
687
688void
689Sequencer::regStats()
690{
691    m_store_waiting_on_load
692        .name(name() + ".store_waiting_on_load")
693        .desc("Number of times a store aliased with a pending load")
694        .flags(Stats::nozero);
695    m_store_waiting_on_store
696        .name(name() + ".store_waiting_on_store")
697        .desc("Number of times a store aliased with a pending store")
698        .flags(Stats::nozero);
699    m_load_waiting_on_load
700        .name(name() + ".load_waiting_on_load")
701        .desc("Number of times a load aliased with a pending load")
702        .flags(Stats::nozero);
703    m_load_waiting_on_store
704        .name(name() + ".load_waiting_on_store")
705        .desc("Number of times a load aliased with a pending store")
706        .flags(Stats::nozero);
707
708    // These statistical variables are not for display.
709    // The profiler will collate these across different
710    // sequencers and display those collated statistics.
711    m_outstandReqHist.init(10);
712    m_latencyHist.init(10);
713    m_hitLatencyHist.init(10);
714    m_missLatencyHist.init(10);
715
716    for (int i = 0; i < RubyRequestType_NUM; i++) {
717        m_typeLatencyHist.push_back(new Stats::Histogram());
718        m_typeLatencyHist[i]->init(10);
719
720        m_hitTypeLatencyHist.push_back(new Stats::Histogram());
721        m_hitTypeLatencyHist[i]->init(10);
722
723        m_missTypeLatencyHist.push_back(new Stats::Histogram());
724        m_missTypeLatencyHist[i]->init(10);
725    }
726
727    for (int i = 0; i < MachineType_NUM; i++) {
728        m_hitMachLatencyHist.push_back(new Stats::Histogram());
729        m_hitMachLatencyHist[i]->init(10);
730
731        m_missMachLatencyHist.push_back(new Stats::Histogram());
732        m_missMachLatencyHist[i]->init(10);
733
734        m_IssueToInitialDelayHist.push_back(new Stats::Histogram());
735        m_IssueToInitialDelayHist[i]->init(10);
736
737        m_InitialToForwardDelayHist.push_back(new Stats::Histogram());
738        m_InitialToForwardDelayHist[i]->init(10);
739
740        m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram());
741        m_ForwardToFirstResponseDelayHist[i]->init(10);
742
743        m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram());
744        m_FirstResponseToCompletionDelayHist[i]->init(10);
745    }
746
747    for (int i = 0; i < RubyRequestType_NUM; i++) {
748        m_hitTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
749        m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
750
751        for (int j = 0; j < MachineType_NUM; j++) {
752            m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram());
753            m_hitTypeMachLatencyHist[i][j]->init(10);
754
755            m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
756            m_missTypeMachLatencyHist[i][j]->init(10);
757        }
758    }
759}
760