1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/system/Sequencer.hh"
30
31#include "arch/x86/ldstflags.hh"
32#include "base/logging.hh"
33#include "base/str.hh"
34#include "cpu/testers/rubytest/RubyTester.hh"
35#include "debug/MemoryAccess.hh"
36#include "debug/ProtocolTrace.hh"
37#include "debug/RubySequencer.hh"
38#include "debug/RubyStats.hh"
39#include "mem/packet.hh"
40#include "mem/ruby/profiler/Profiler.hh"
41#include "mem/ruby/protocol/PrefetchBit.hh"
42#include "mem/ruby/protocol/RubyAccessMode.hh"
43#include "mem/ruby/slicc_interface/RubyRequest.hh"
44#include "mem/ruby/system/RubySystem.hh"
45#include "sim/system.hh"
46
47using namespace std;
48
49Sequencer *
50RubySequencerParams::create()
51{
52    return new Sequencer(this);
53}
54
55Sequencer::Sequencer(const Params *p)
56    : RubyPort(p), m_IncompleteTimes(MachineType_NUM),
57      deadlockCheckEvent([this]{ wakeup(); }, "Sequencer deadlock check")
58{
59    m_outstanding_count = 0;
60
61    m_instCache_ptr = p->icache;
62    m_dataCache_ptr = p->dcache;
63    m_max_outstanding_requests = p->max_outstanding_requests;
64    m_deadlock_threshold = p->deadlock_threshold;
65
66    m_coreId = p->coreid; // for tracking the two CorePair sequencers
67    assert(m_max_outstanding_requests > 0);
68    assert(m_deadlock_threshold > 0);
69    assert(m_instCache_ptr != NULL);
70    assert(m_dataCache_ptr != NULL);
71
72    m_runningGarnetStandalone = p->garnet_standalone;
73}
74
75Sequencer::~Sequencer()
76{
77}
78
79void
80Sequencer::wakeup()
81{
82    assert(drainState() != DrainState::Draining);
83
84    // Check for deadlock of any of the requests
85    Cycles current_time = curCycle();
86
87    // Check across all outstanding requests
88    int total_outstanding = 0;
89
90    RequestTable::iterator read = m_readRequestTable.begin();
91    RequestTable::iterator read_end = m_readRequestTable.end();
92    for (; read != read_end; ++read) {
93        SequencerRequest* request = read->second;
94        if (current_time - request->issue_time < m_deadlock_threshold)
95            continue;
96
97        panic("Possible Deadlock detected. Aborting!\n"
98              "version: %d request.paddr: 0x%x m_readRequestTable: %d "
99              "current time: %u issue_time: %d difference: %d\n", m_version,
100              request->pkt->getAddr(), m_readRequestTable.size(),
101              current_time * clockPeriod(), request->issue_time * clockPeriod(),
102              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
103    }
104
105    RequestTable::iterator write = m_writeRequestTable.begin();
106    RequestTable::iterator write_end = m_writeRequestTable.end();
107    for (; write != write_end; ++write) {
108        SequencerRequest* request = write->second;
109        if (current_time - request->issue_time < m_deadlock_threshold)
110            continue;
111
112        panic("Possible Deadlock detected. Aborting!\n"
113              "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
114              "current time: %u issue_time: %d difference: %d\n", m_version,
115              request->pkt->getAddr(), m_writeRequestTable.size(),
116              current_time * clockPeriod(), request->issue_time * clockPeriod(),
117              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
118    }
119
120    total_outstanding += m_writeRequestTable.size();
121    total_outstanding += m_readRequestTable.size();
122
123    assert(m_outstanding_count == total_outstanding);
124
125    if (m_outstanding_count > 0) {
126        // If there are still outstanding requests, keep checking
127        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
128    }
129}
130
131void Sequencer::resetStats()
132{
133    m_latencyHist.reset();
134    m_hitLatencyHist.reset();
135    m_missLatencyHist.reset();
136    for (int i = 0; i < RubyRequestType_NUM; i++) {
137        m_typeLatencyHist[i]->reset();
138        m_hitTypeLatencyHist[i]->reset();
139        m_missTypeLatencyHist[i]->reset();
140        for (int j = 0; j < MachineType_NUM; j++) {
141            m_hitTypeMachLatencyHist[i][j]->reset();
142            m_missTypeMachLatencyHist[i][j]->reset();
143        }
144    }
145
146    for (int i = 0; i < MachineType_NUM; i++) {
147        m_missMachLatencyHist[i]->reset();
148        m_hitMachLatencyHist[i]->reset();
149
150        m_IssueToInitialDelayHist[i]->reset();
151        m_InitialToForwardDelayHist[i]->reset();
152        m_ForwardToFirstResponseDelayHist[i]->reset();
153        m_FirstResponseToCompletionDelayHist[i]->reset();
154
155        m_IncompleteTimes[i] = 0;
156    }
157}
158
159// Insert the request on the correct request table.  Return true if
160// the entry was already present.
161RequestStatus
162Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
163{
164    assert(m_outstanding_count ==
165        (m_writeRequestTable.size() + m_readRequestTable.size()));
166
167    // See if we should schedule a deadlock check
168    if (!deadlockCheckEvent.scheduled() &&
169        drainState() != DrainState::Draining) {
170        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
171    }
172
173    Addr line_addr = makeLineAddress(pkt->getAddr());
174
175    // Check if the line is blocked for a Locked_RMW
176    if (m_controller->isBlocked(line_addr) &&
177        (request_type != RubyRequestType_Locked_RMW_Write)) {
178        // Return that this request's cache line address aliases with
179        // a prior request that locked the cache line. The request cannot
180        // proceed until the cache line is unlocked by a Locked_RMW_Write
181        return RequestStatus_Aliased;
182    }
183
184    // Create a default entry, mapping the address to NULL, the cast is
185    // there to make gcc 4.4 happy
186    RequestTable::value_type default_entry(line_addr,
187                                           (SequencerRequest*) NULL);
188
189    if ((request_type == RubyRequestType_ST) ||
190        (request_type == RubyRequestType_RMW_Read) ||
191        (request_type == RubyRequestType_RMW_Write) ||
192        (request_type == RubyRequestType_Load_Linked) ||
193        (request_type == RubyRequestType_Store_Conditional) ||
194        (request_type == RubyRequestType_Locked_RMW_Read) ||
195        (request_type == RubyRequestType_Locked_RMW_Write) ||
196        (request_type == RubyRequestType_FLUSH)) {
197
198        // Check if there is any outstanding read request for the same
199        // cache line.
200        if (m_readRequestTable.count(line_addr) > 0) {
201            m_store_waiting_on_load++;
202            return RequestStatus_Aliased;
203        }
204
205        pair<RequestTable::iterator, bool> r =
206            m_writeRequestTable.insert(default_entry);
207        if (r.second) {
208            RequestTable::iterator i = r.first;
209            i->second = new SequencerRequest(pkt, request_type, curCycle());
210            m_outstanding_count++;
211        } else {
212          // There is an outstanding write request for the cache line
213          m_store_waiting_on_store++;
214          return RequestStatus_Aliased;
215        }
216    } else {
217        // Check if there is any outstanding write request for the same
218        // cache line.
219        if (m_writeRequestTable.count(line_addr) > 0) {
220            m_load_waiting_on_store++;
221            return RequestStatus_Aliased;
222        }
223
224        pair<RequestTable::iterator, bool> r =
225            m_readRequestTable.insert(default_entry);
226
227        if (r.second) {
228            RequestTable::iterator i = r.first;
229            i->second = new SequencerRequest(pkt, request_type, curCycle());
230            m_outstanding_count++;
231        } else {
232            // There is an outstanding read request for the cache line
233            m_load_waiting_on_load++;
234            return RequestStatus_Aliased;
235        }
236    }
237
238    m_outstandReqHist.sample(m_outstanding_count);
239    assert(m_outstanding_count ==
240        (m_writeRequestTable.size() + m_readRequestTable.size()));
241
242    return RequestStatus_Ready;
243}
244
245void
246Sequencer::markRemoved()
247{
248    m_outstanding_count--;
249    assert(m_outstanding_count ==
250           m_writeRequestTable.size() + m_readRequestTable.size());
251}
252
253void
254Sequencer::invalidateSC(Addr address)
255{
256    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
257    // The controller has lost the coherence permissions, hence the lock
258    // on the cache line maintained by the cache should be cleared.
259    if (e && e->isLocked(m_version)) {
260        e->clearLocked();
261    }
262}
263
264bool
265Sequencer::handleLlsc(Addr address, SequencerRequest* request)
266{
267    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
268    if (!e)
269        return true;
270
271    // The success flag indicates whether the LLSC operation was successful.
272    // LL ops will always succeed, but SC may fail if the cache line is no
273    // longer locked.
274    bool success = true;
275    if (request->m_type == RubyRequestType_Store_Conditional) {
276        if (!e->isLocked(m_version)) {
277            //
278            // For failed SC requests, indicate the failure to the cpu by
279            // setting the extra data to zero.
280            //
281            request->pkt->req->setExtraData(0);
282            success = false;
283        } else {
284            //
285            // For successful SC requests, indicate the success to the cpu by
286            // setting the extra data to one.
287            //
288            request->pkt->req->setExtraData(1);
289        }
290        //
291        // Independent of success, all SC operations must clear the lock
292        //
293        e->clearLocked();
294    } else if (request->m_type == RubyRequestType_Load_Linked) {
295        //
296        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
297        // previously locked cache lines?
298        //
299        e->setLocked(m_version);
300    } else if (e->isLocked(m_version)) {
301        //
302        // Normal writes should clear the locked address
303        //
304        e->clearLocked();
305    }
306    return success;
307}
308
309void
310Sequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
311                             const MachineType respondingMach,
312                             bool isExternalHit, Cycles issuedTime,
313                             Cycles initialRequestTime,
314                             Cycles forwardRequestTime,
315                             Cycles firstResponseTime, Cycles completionTime)
316{
317    m_latencyHist.sample(cycles);
318    m_typeLatencyHist[type]->sample(cycles);
319
320    if (isExternalHit) {
321        m_missLatencyHist.sample(cycles);
322        m_missTypeLatencyHist[type]->sample(cycles);
323
324        if (respondingMach != MachineType_NUM) {
325            m_missMachLatencyHist[respondingMach]->sample(cycles);
326            m_missTypeMachLatencyHist[type][respondingMach]->sample(cycles);
327
328            if ((issuedTime <= initialRequestTime) &&
329                (initialRequestTime <= forwardRequestTime) &&
330                (forwardRequestTime <= firstResponseTime) &&
331                (firstResponseTime <= completionTime)) {
332
333                m_IssueToInitialDelayHist[respondingMach]->sample(
334                    initialRequestTime - issuedTime);
335                m_InitialToForwardDelayHist[respondingMach]->sample(
336                    forwardRequestTime - initialRequestTime);
337                m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
338                    firstResponseTime - forwardRequestTime);
339                m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
340                    completionTime - firstResponseTime);
341            } else {
342                m_IncompleteTimes[respondingMach]++;
343            }
344        }
345    } else {
346        m_hitLatencyHist.sample(cycles);
347        m_hitTypeLatencyHist[type]->sample(cycles);
348
349        if (respondingMach != MachineType_NUM) {
350            m_hitMachLatencyHist[respondingMach]->sample(cycles);
351            m_hitTypeMachLatencyHist[type][respondingMach]->sample(cycles);
352        }
353    }
354}
355
356void
357Sequencer::writeCallback(Addr address, DataBlock& data,
358                         const bool externalHit, const MachineType mach,
359                         const Cycles initialRequestTime,
360                         const Cycles forwardRequestTime,
361                         const Cycles firstResponseTime)
362{
363    assert(address == makeLineAddress(address));
364    assert(m_writeRequestTable.count(makeLineAddress(address)));
365
366    RequestTable::iterator i = m_writeRequestTable.find(address);
367    assert(i != m_writeRequestTable.end());
368    SequencerRequest* request = i->second;
369
370    m_writeRequestTable.erase(i);
371    markRemoved();
372
373    assert((request->m_type == RubyRequestType_ST) ||
374           (request->m_type == RubyRequestType_ATOMIC) ||
375           (request->m_type == RubyRequestType_RMW_Read) ||
376           (request->m_type == RubyRequestType_RMW_Write) ||
377           (request->m_type == RubyRequestType_Load_Linked) ||
378           (request->m_type == RubyRequestType_Store_Conditional) ||
379           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
380           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
381           (request->m_type == RubyRequestType_FLUSH));
382
383    //
384    // For Alpha, properly handle LL, SC, and write requests with respect to
385    // locked cache blocks.
386    //
387    // Not valid for Garnet_standalone protocl
388    //
389    bool success = true;
390    if (!m_runningGarnetStandalone)
391        success = handleLlsc(address, request);
392
393    // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
394    // address variable here is assumed to be a line address, so when
395    // blocking buffers, must check line addresses.
396    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
397        // blockOnQueue blocks all first-level cache controller queues
398        // waiting on memory accesses for the specified address that go to
399        // the specified queue. In this case, a Locked_RMW_Write must go to
400        // the mandatory_q before unblocking the first-level controller.
401        // This will block standard loads, stores, ifetches, etc.
402        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
403    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
404        m_controller->unblock(address);
405    }
406
407    hitCallback(request, data, success, mach, externalHit,
408                initialRequestTime, forwardRequestTime, firstResponseTime);
409}
410
411void
412Sequencer::readCallback(Addr address, DataBlock& data,
413                        bool externalHit, const MachineType mach,
414                        Cycles initialRequestTime,
415                        Cycles forwardRequestTime,
416                        Cycles firstResponseTime)
417{
418    assert(address == makeLineAddress(address));
419    assert(m_readRequestTable.count(makeLineAddress(address)));
420
421    RequestTable::iterator i = m_readRequestTable.find(address);
422    assert(i != m_readRequestTable.end());
423    SequencerRequest* request = i->second;
424
425    m_readRequestTable.erase(i);
426    markRemoved();
427
428    assert((request->m_type == RubyRequestType_LD) ||
429           (request->m_type == RubyRequestType_IFETCH));
430
431    hitCallback(request, data, true, mach, externalHit,
432                initialRequestTime, forwardRequestTime, firstResponseTime);
433}
434
435void
436Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
437                       bool llscSuccess,
438                       const MachineType mach, const bool externalHit,
439                       const Cycles initialRequestTime,
440                       const Cycles forwardRequestTime,
441                       const Cycles firstResponseTime)
442{
443    warn_once("Replacement policy updates recently became the responsibility "
444              "of SLICC state machines. Make sure to setMRU() near callbacks "
445              "in .sm files!");
446
447    PacketPtr pkt = srequest->pkt;
448    Addr request_address(pkt->getAddr());
449    RubyRequestType type = srequest->m_type;
450    Cycles issued_time = srequest->issue_time;
451
452    assert(curCycle() >= issued_time);
453    Cycles total_latency = curCycle() - issued_time;
454
455    // Profile the latency for all demand accesses.
456    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
457                      initialRequestTime, forwardRequestTime,
458                      firstResponseTime, curCycle());
459
460    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %d cycles\n",
461             curTick(), m_version, "Seq",
462             llscSuccess ? "Done" : "SC_Failed", "", "",
463             printAddress(request_address), total_latency);
464
465    // update the data unless it is a non-data-carrying flush
466    if (RubySystem::getWarmupEnabled()) {
467        data.setData(pkt->getConstPtr<uint8_t>(),
468                     getOffset(request_address), pkt->getSize());
469    } else if (!pkt->isFlush()) {
470        if ((type == RubyRequestType_LD) ||
471            (type == RubyRequestType_IFETCH) ||
472            (type == RubyRequestType_RMW_Read) ||
473            (type == RubyRequestType_Locked_RMW_Read) ||
474            (type == RubyRequestType_Load_Linked)) {
475            pkt->setData(
476                data.getData(getOffset(request_address), pkt->getSize()));
477            DPRINTF(RubySequencer, "read data %s\n", data);
478        } else if (pkt->req->isSwap()) {
479            std::vector<uint8_t> overwrite_val(pkt->getSize());
480            pkt->writeData(&overwrite_val[0]);
481            pkt->setData(
482                data.getData(getOffset(request_address), pkt->getSize()));
483            data.setData(&overwrite_val[0],
484                         getOffset(request_address), pkt->getSize());
485            DPRINTF(RubySequencer, "swap data %s\n", data);
486        } else if (type != RubyRequestType_Store_Conditional || llscSuccess) {
487            // Types of stores set the actual data here, apart from
488            // failed Store Conditional requests
489            data.setData(pkt->getConstPtr<uint8_t>(),
490                         getOffset(request_address), pkt->getSize());
491            DPRINTF(RubySequencer, "set data %s\n", data);
492        }
493    }
494
495    // If using the RubyTester, update the RubyTester sender state's
496    // subBlock with the recieved data.  The tester will later access
497    // this state.
498    if (m_usingRubyTester) {
499        DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
500                pkt->cmdString(), pkt->getAddr());
501        RubyTester::SenderState* testerSenderState =
502            pkt->findNextSenderState<RubyTester::SenderState>();
503        assert(testerSenderState);
504        testerSenderState->subBlock.mergeFrom(data);
505    }
506
507    delete srequest;
508
509    RubySystem *rs = m_ruby_system;
510    if (RubySystem::getWarmupEnabled()) {
511        assert(pkt->req);
512        delete pkt;
513        rs->m_cache_recorder->enqueueNextFetchRequest();
514    } else if (RubySystem::getCooldownEnabled()) {
515        delete pkt;
516        rs->m_cache_recorder->enqueueNextFlushRequest();
517    } else {
518        ruby_hit_callback(pkt);
519        testDrainComplete();
520    }
521}
522
523bool
524Sequencer::empty() const
525{
526    return m_writeRequestTable.empty() && m_readRequestTable.empty();
527}
528
529RequestStatus
530Sequencer::makeRequest(PacketPtr pkt)
531{
532    if (m_outstanding_count >= m_max_outstanding_requests) {
533        return RequestStatus_BufferFull;
534    }
535
536    RubyRequestType primary_type = RubyRequestType_NULL;
537    RubyRequestType secondary_type = RubyRequestType_NULL;
538
539    if (pkt->isLLSC()) {
540        //
541        // Alpha LL/SC instructions need to be handled carefully by the cache
542        // coherence protocol to ensure they follow the proper semantics. In
543        // particular, by identifying the operations as atomic, the protocol
544        // should understand that migratory sharing optimizations should not
545        // be performed (i.e. a load between the LL and SC should not steal
546        // away exclusive permission).
547        //
548        if (pkt->isWrite()) {
549            DPRINTF(RubySequencer, "Issuing SC\n");
550            primary_type = RubyRequestType_Store_Conditional;
551        } else {
552            DPRINTF(RubySequencer, "Issuing LL\n");
553            assert(pkt->isRead());
554            primary_type = RubyRequestType_Load_Linked;
555        }
556        secondary_type = RubyRequestType_ATOMIC;
557    } else if (pkt->req->isLockedRMW()) {
558        //
559        // x86 locked instructions are translated to store cache coherence
560        // requests because these requests should always be treated as read
561        // exclusive operations and should leverage any migratory sharing
562        // optimization built into the protocol.
563        //
564        if (pkt->isWrite()) {
565            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
566            primary_type = RubyRequestType_Locked_RMW_Write;
567        } else {
568            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
569            assert(pkt->isRead());
570            primary_type = RubyRequestType_Locked_RMW_Read;
571        }
572        secondary_type = RubyRequestType_ST;
573    } else {
574        //
575        // To support SwapReq, we need to check isWrite() first: a SwapReq
576        // should always be treated like a write, but since a SwapReq implies
577        // both isWrite() and isRead() are true, check isWrite() first here.
578        //
579        if (pkt->isWrite()) {
580            //
581            // Note: M5 packets do not differentiate ST from RMW_Write
582            //
583            primary_type = secondary_type = RubyRequestType_ST;
584        } else if (pkt->isRead()) {
585            if (pkt->req->isInstFetch()) {
586                primary_type = secondary_type = RubyRequestType_IFETCH;
587            } else {
588                bool storeCheck = false;
589                // only X86 need the store check
590                if (system->getArch() == Arch::X86ISA) {
591                    uint32_t flags = pkt->req->getFlags();
592                    storeCheck = flags &
593                        (X86ISA::StoreCheck << X86ISA::FlagShift);
594                }
595                if (storeCheck) {
596                    primary_type = RubyRequestType_RMW_Read;
597                    secondary_type = RubyRequestType_ST;
598                } else {
599                    primary_type = secondary_type = RubyRequestType_LD;
600                }
601            }
602        } else if (pkt->isFlush()) {
603          primary_type = secondary_type = RubyRequestType_FLUSH;
604        } else {
605            panic("Unsupported ruby packet type\n");
606        }
607    }
608
609    RequestStatus status = insertRequest(pkt, primary_type);
610    if (status != RequestStatus_Ready)
611        return status;
612
613    issueRequest(pkt, secondary_type);
614
615    // TODO: issue hardware prefetches here
616    return RequestStatus_Issued;
617}
618
619void
620Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
621{
622    assert(pkt != NULL);
623    ContextID proc_id = pkt->req->hasContextId() ?
624        pkt->req->contextId() : InvalidContextID;
625
626    ContextID core_id = coreId();
627
628    // If valid, copy the pc to the ruby request
629    Addr pc = 0;
630    if (pkt->req->hasPC()) {
631        pc = pkt->req->getPC();
632    }
633
634    // check if the packet has data as for example prefetch and flush
635    // requests do not
636    std::shared_ptr<RubyRequest> msg =
637        std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
638                                      pkt->isFlush() ?
639                                      nullptr : pkt->getPtr<uint8_t>(),
640                                      pkt->getSize(), pc, secondary_type,
641                                      RubyAccessMode_Supervisor, pkt,
642                                      PrefetchBit_No, proc_id, core_id);
643
644    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
645            curTick(), m_version, "Seq", "Begin", "", "",
646            printAddress(msg->getPhysicalAddress()),
647            RubyRequestType_to_string(secondary_type));
648
649    Tick latency = cyclesToTicks(
650                        m_controller->mandatoryQueueLatency(secondary_type));
651    assert(latency > 0);
652
653    assert(m_mandatory_q_ptr != NULL);
654    m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
655}
656
657template <class KEY, class VALUE>
658std::ostream &
659operator<<(ostream &out, const std::unordered_map<KEY, VALUE> &map)
660{
661    auto i = map.begin();
662    auto end = map.end();
663
664    out << "[";
665    for (; i != end; ++i)
666        out << " " << i->first << "=" << i->second;
667    out << " ]";
668
669    return out;
670}
671
672void
673Sequencer::print(ostream& out) const
674{
675    out << "[Sequencer: " << m_version
676        << ", outstanding requests: " << m_outstanding_count
677        << ", read request table: " << m_readRequestTable
678        << ", write request table: " << m_writeRequestTable
679        << "]";
680}
681
682// this can be called from setState whenever coherence permissions are
683// upgraded when invoked, coherence violations will be checked for the
684// given block
685void
686Sequencer::checkCoherence(Addr addr)
687{
688}
689
690void
691Sequencer::recordRequestType(SequencerRequestType requestType) {
692    DPRINTF(RubyStats, "Recorded statistic: %s\n",
693            SequencerRequestType_to_string(requestType));
694}
695
696
697void
698Sequencer::evictionCallback(Addr address)
699{
700    ruby_eviction_callback(address);
701}
702
703void
704Sequencer::regStats()
705{
706    RubyPort::regStats();
707
708    m_store_waiting_on_load
709        .name(name() + ".store_waiting_on_load")
710        .desc("Number of times a store aliased with a pending load")
711        .flags(Stats::nozero);
712    m_store_waiting_on_store
713        .name(name() + ".store_waiting_on_store")
714        .desc("Number of times a store aliased with a pending store")
715        .flags(Stats::nozero);
716    m_load_waiting_on_load
717        .name(name() + ".load_waiting_on_load")
718        .desc("Number of times a load aliased with a pending load")
719        .flags(Stats::nozero);
720    m_load_waiting_on_store
721        .name(name() + ".load_waiting_on_store")
722        .desc("Number of times a load aliased with a pending store")
723        .flags(Stats::nozero);
724
725    // These statistical variables are not for display.
726    // The profiler will collate these across different
727    // sequencers and display those collated statistics.
728    m_outstandReqHist.init(10);
729    m_latencyHist.init(10);
730    m_hitLatencyHist.init(10);
731    m_missLatencyHist.init(10);
732
733    for (int i = 0; i < RubyRequestType_NUM; i++) {
734        m_typeLatencyHist.push_back(new Stats::Histogram());
735        m_typeLatencyHist[i]->init(10);
736
737        m_hitTypeLatencyHist.push_back(new Stats::Histogram());
738        m_hitTypeLatencyHist[i]->init(10);
739
740        m_missTypeLatencyHist.push_back(new Stats::Histogram());
741        m_missTypeLatencyHist[i]->init(10);
742    }
743
744    for (int i = 0; i < MachineType_NUM; i++) {
745        m_hitMachLatencyHist.push_back(new Stats::Histogram());
746        m_hitMachLatencyHist[i]->init(10);
747
748        m_missMachLatencyHist.push_back(new Stats::Histogram());
749        m_missMachLatencyHist[i]->init(10);
750
751        m_IssueToInitialDelayHist.push_back(new Stats::Histogram());
752        m_IssueToInitialDelayHist[i]->init(10);
753
754        m_InitialToForwardDelayHist.push_back(new Stats::Histogram());
755        m_InitialToForwardDelayHist[i]->init(10);
756
757        m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram());
758        m_ForwardToFirstResponseDelayHist[i]->init(10);
759
760        m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram());
761        m_FirstResponseToCompletionDelayHist[i]->init(10);
762    }
763
764    for (int i = 0; i < RubyRequestType_NUM; i++) {
765        m_hitTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
766        m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
767
768        for (int j = 0; j < MachineType_NUM; j++) {
769            m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram());
770            m_hitTypeMachLatencyHist[i][j]->init(10);
771
772            m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
773            m_missTypeMachLatencyHist[i][j]->init(10);
774        }
775    }
776}
777