Sequencer.cc revision 11519:bf08fb8ccf4b
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/x86/ldstflags.hh"
30#include "base/misc.hh"
31#include "base/str.hh"
32#include "cpu/testers/rubytest/RubyTester.hh"
33#include "debug/MemoryAccess.hh"
34#include "debug/ProtocolTrace.hh"
35#include "debug/RubySequencer.hh"
36#include "debug/RubyStats.hh"
37#include "mem/protocol/PrefetchBit.hh"
38#include "mem/protocol/RubyAccessMode.hh"
39#include "mem/ruby/profiler/Profiler.hh"
40#include "mem/ruby/slicc_interface/RubyRequest.hh"
41#include "mem/ruby/system/RubySystem.hh"
42#include "mem/ruby/system/Sequencer.hh"
43#include "mem/packet.hh"
44#include "sim/system.hh"
45
46using namespace std;
47
48Sequencer *
49RubySequencerParams::create()
50{
51    return new Sequencer(this);
52}
53
54Sequencer::Sequencer(const Params *p)
55    : RubyPort(p), m_IncompleteTimes(MachineType_NUM), deadlockCheckEvent(this)
56{
57    m_outstanding_count = 0;
58
59    m_instCache_ptr = p->icache;
60    m_dataCache_ptr = p->dcache;
61    m_data_cache_hit_latency = p->dcache_hit_latency;
62    m_inst_cache_hit_latency = p->icache_hit_latency;
63    m_max_outstanding_requests = p->max_outstanding_requests;
64    m_deadlock_threshold = p->deadlock_threshold;
65
66    m_coreId = p->coreid; // for tracking the two CorePair sequencers
67    assert(m_max_outstanding_requests > 0);
68    assert(m_deadlock_threshold > 0);
69    assert(m_instCache_ptr != NULL);
70    assert(m_dataCache_ptr != NULL);
71    assert(m_data_cache_hit_latency > 0);
72    assert(m_inst_cache_hit_latency > 0);
73
74    m_usingNetworkTester = p->using_network_tester;
75}
76
77Sequencer::~Sequencer()
78{
79}
80
81void
82Sequencer::wakeup()
83{
84    assert(drainState() != DrainState::Draining);
85
86    // Check for deadlock of any of the requests
87    Cycles current_time = curCycle();
88
89    // Check across all outstanding requests
90    int total_outstanding = 0;
91
92    RequestTable::iterator read = m_readRequestTable.begin();
93    RequestTable::iterator read_end = m_readRequestTable.end();
94    for (; read != read_end; ++read) {
95        SequencerRequest* request = read->second;
96        if (current_time - request->issue_time < m_deadlock_threshold)
97            continue;
98
99        panic("Possible Deadlock detected. Aborting!\n"
100              "version: %d request.paddr: 0x%x m_readRequestTable: %d "
101              "current time: %u issue_time: %d difference: %d\n", m_version,
102              request->pkt->getAddr(), m_readRequestTable.size(),
103              current_time * clockPeriod(), request->issue_time * clockPeriod(),
104              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
105    }
106
107    RequestTable::iterator write = m_writeRequestTable.begin();
108    RequestTable::iterator write_end = m_writeRequestTable.end();
109    for (; write != write_end; ++write) {
110        SequencerRequest* request = write->second;
111        if (current_time - request->issue_time < m_deadlock_threshold)
112            continue;
113
114        panic("Possible Deadlock detected. Aborting!\n"
115              "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
116              "current time: %u issue_time: %d difference: %d\n", m_version,
117              request->pkt->getAddr(), m_writeRequestTable.size(),
118              current_time * clockPeriod(), request->issue_time * clockPeriod(),
119              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
120    }
121
122    total_outstanding += m_writeRequestTable.size();
123    total_outstanding += m_readRequestTable.size();
124
125    assert(m_outstanding_count == total_outstanding);
126
127    if (m_outstanding_count > 0) {
128        // If there are still outstanding requests, keep checking
129        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
130    }
131}
132
133void Sequencer::resetStats()
134{
135    m_latencyHist.reset();
136    m_hitLatencyHist.reset();
137    m_missLatencyHist.reset();
138    for (int i = 0; i < RubyRequestType_NUM; i++) {
139        m_typeLatencyHist[i]->reset();
140        m_hitTypeLatencyHist[i]->reset();
141        m_missTypeLatencyHist[i]->reset();
142        for (int j = 0; j < MachineType_NUM; j++) {
143            m_hitTypeMachLatencyHist[i][j]->reset();
144            m_missTypeMachLatencyHist[i][j]->reset();
145        }
146    }
147
148    for (int i = 0; i < MachineType_NUM; i++) {
149        m_missMachLatencyHist[i]->reset();
150        m_hitMachLatencyHist[i]->reset();
151
152        m_IssueToInitialDelayHist[i]->reset();
153        m_InitialToForwardDelayHist[i]->reset();
154        m_ForwardToFirstResponseDelayHist[i]->reset();
155        m_FirstResponseToCompletionDelayHist[i]->reset();
156
157        m_IncompleteTimes[i] = 0;
158    }
159}
160
161// Insert the request on the correct request table.  Return true if
162// the entry was already present.
163RequestStatus
164Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
165{
166    assert(m_outstanding_count ==
167        (m_writeRequestTable.size() + m_readRequestTable.size()));
168
169    // See if we should schedule a deadlock check
170    if (!deadlockCheckEvent.scheduled() &&
171        drainState() != DrainState::Draining) {
172        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
173    }
174
175    Addr line_addr = makeLineAddress(pkt->getAddr());
176
177    // Check if the line is blocked for a Locked_RMW
178    if (m_controller->isBlocked(line_addr) &&
179        (request_type != RubyRequestType_Locked_RMW_Write)) {
180        // Return that this request's cache line address aliases with
181        // a prior request that locked the cache line. The request cannot
182        // proceed until the cache line is unlocked by a Locked_RMW_Write
183        return RequestStatus_Aliased;
184    }
185
186    // Create a default entry, mapping the address to NULL, the cast is
187    // there to make gcc 4.4 happy
188    RequestTable::value_type default_entry(line_addr,
189                                           (SequencerRequest*) NULL);
190
191    if ((request_type == RubyRequestType_ST) ||
192        (request_type == RubyRequestType_RMW_Read) ||
193        (request_type == RubyRequestType_RMW_Write) ||
194        (request_type == RubyRequestType_Load_Linked) ||
195        (request_type == RubyRequestType_Store_Conditional) ||
196        (request_type == RubyRequestType_Locked_RMW_Read) ||
197        (request_type == RubyRequestType_Locked_RMW_Write) ||
198        (request_type == RubyRequestType_FLUSH)) {
199
200        // Check if there is any outstanding read request for the same
201        // cache line.
202        if (m_readRequestTable.count(line_addr) > 0) {
203            m_store_waiting_on_load++;
204            return RequestStatus_Aliased;
205        }
206
207        pair<RequestTable::iterator, bool> r =
208            m_writeRequestTable.insert(default_entry);
209        if (r.second) {
210            RequestTable::iterator i = r.first;
211            i->second = new SequencerRequest(pkt, request_type, curCycle());
212            m_outstanding_count++;
213        } else {
214          // There is an outstanding write request for the cache line
215          m_store_waiting_on_store++;
216          return RequestStatus_Aliased;
217        }
218    } else {
219        // Check if there is any outstanding write request for the same
220        // cache line.
221        if (m_writeRequestTable.count(line_addr) > 0) {
222            m_load_waiting_on_store++;
223            return RequestStatus_Aliased;
224        }
225
226        pair<RequestTable::iterator, bool> r =
227            m_readRequestTable.insert(default_entry);
228
229        if (r.second) {
230            RequestTable::iterator i = r.first;
231            i->second = new SequencerRequest(pkt, request_type, curCycle());
232            m_outstanding_count++;
233        } else {
234            // There is an outstanding read request for the cache line
235            m_load_waiting_on_load++;
236            return RequestStatus_Aliased;
237        }
238    }
239
240    m_outstandReqHist.sample(m_outstanding_count);
241    assert(m_outstanding_count ==
242        (m_writeRequestTable.size() + m_readRequestTable.size()));
243
244    return RequestStatus_Ready;
245}
246
247void
248Sequencer::markRemoved()
249{
250    m_outstanding_count--;
251    assert(m_outstanding_count ==
252           m_writeRequestTable.size() + m_readRequestTable.size());
253}
254
255void
256Sequencer::invalidateSC(Addr address)
257{
258    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
259    // The controller has lost the coherence permissions, hence the lock
260    // on the cache line maintained by the cache should be cleared.
261    if (e && e->isLocked(m_version)) {
262        e->clearLocked();
263    }
264}
265
266bool
267Sequencer::handleLlsc(Addr address, SequencerRequest* request)
268{
269    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
270    if (!e)
271        return true;
272
273    // The success flag indicates whether the LLSC operation was successful.
274    // LL ops will always succeed, but SC may fail if the cache line is no
275    // longer locked.
276    bool success = true;
277    if (request->m_type == RubyRequestType_Store_Conditional) {
278        if (!e->isLocked(m_version)) {
279            //
280            // For failed SC requests, indicate the failure to the cpu by
281            // setting the extra data to zero.
282            //
283            request->pkt->req->setExtraData(0);
284            success = false;
285        } else {
286            //
287            // For successful SC requests, indicate the success to the cpu by
288            // setting the extra data to one.
289            //
290            request->pkt->req->setExtraData(1);
291        }
292        //
293        // Independent of success, all SC operations must clear the lock
294        //
295        e->clearLocked();
296    } else if (request->m_type == RubyRequestType_Load_Linked) {
297        //
298        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
299        // previously locked cache lines?
300        //
301        e->setLocked(m_version);
302    } else if (e->isLocked(m_version)) {
303        //
304        // Normal writes should clear the locked address
305        //
306        e->clearLocked();
307    }
308    return success;
309}
310
311void
312Sequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
313                             const MachineType respondingMach,
314                             bool isExternalHit, Cycles issuedTime,
315                             Cycles initialRequestTime,
316                             Cycles forwardRequestTime,
317                             Cycles firstResponseTime, Cycles completionTime)
318{
319    m_latencyHist.sample(cycles);
320    m_typeLatencyHist[type]->sample(cycles);
321
322    if (isExternalHit) {
323        m_missLatencyHist.sample(cycles);
324        m_missTypeLatencyHist[type]->sample(cycles);
325
326        if (respondingMach != MachineType_NUM) {
327            m_missMachLatencyHist[respondingMach]->sample(cycles);
328            m_missTypeMachLatencyHist[type][respondingMach]->sample(cycles);
329
330            if ((issuedTime <= initialRequestTime) &&
331                (initialRequestTime <= forwardRequestTime) &&
332                (forwardRequestTime <= firstResponseTime) &&
333                (firstResponseTime <= completionTime)) {
334
335                m_IssueToInitialDelayHist[respondingMach]->sample(
336                    initialRequestTime - issuedTime);
337                m_InitialToForwardDelayHist[respondingMach]->sample(
338                    forwardRequestTime - initialRequestTime);
339                m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
340                    firstResponseTime - forwardRequestTime);
341                m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
342                    completionTime - firstResponseTime);
343            } else {
344                m_IncompleteTimes[respondingMach]++;
345            }
346        }
347    } else {
348        m_hitLatencyHist.sample(cycles);
349        m_hitTypeLatencyHist[type]->sample(cycles);
350
351        if (respondingMach != MachineType_NUM) {
352            m_hitMachLatencyHist[respondingMach]->sample(cycles);
353            m_hitTypeMachLatencyHist[type][respondingMach]->sample(cycles);
354        }
355    }
356}
357
358void
359Sequencer::writeCallback(Addr address, DataBlock& data,
360                         const bool externalHit, const MachineType mach,
361                         const Cycles initialRequestTime,
362                         const Cycles forwardRequestTime,
363                         const Cycles firstResponseTime)
364{
365    assert(address == makeLineAddress(address));
366    assert(m_writeRequestTable.count(makeLineAddress(address)));
367
368    RequestTable::iterator i = m_writeRequestTable.find(address);
369    assert(i != m_writeRequestTable.end());
370    SequencerRequest* request = i->second;
371
372    m_writeRequestTable.erase(i);
373    markRemoved();
374
375    assert((request->m_type == RubyRequestType_ST) ||
376           (request->m_type == RubyRequestType_ATOMIC) ||
377           (request->m_type == RubyRequestType_RMW_Read) ||
378           (request->m_type == RubyRequestType_RMW_Write) ||
379           (request->m_type == RubyRequestType_Load_Linked) ||
380           (request->m_type == RubyRequestType_Store_Conditional) ||
381           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
382           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
383           (request->m_type == RubyRequestType_FLUSH));
384
385    //
386    // For Alpha, properly handle LL, SC, and write requests with respect to
387    // locked cache blocks.
388    //
389    // Not valid for Network_test protocl
390    //
391    bool success = true;
392    if (!m_usingNetworkTester)
393        success = handleLlsc(address, request);
394
395    // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
396    // address variable here is assumed to be a line address, so when
397    // blocking buffers, must check line addresses.
398    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
399        // blockOnQueue blocks all first-level cache controller queues
400        // waiting on memory accesses for the specified address that go to
401        // the specified queue. In this case, a Locked_RMW_Write must go to
402        // the mandatory_q before unblocking the first-level controller.
403        // This will block standard loads, stores, ifetches, etc.
404        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
405    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
406        m_controller->unblock(address);
407    }
408
409    hitCallback(request, data, success, mach, externalHit,
410                initialRequestTime, forwardRequestTime, firstResponseTime);
411}
412
413void
414Sequencer::readCallback(Addr address, DataBlock& data,
415                        bool externalHit, const MachineType mach,
416                        Cycles initialRequestTime,
417                        Cycles forwardRequestTime,
418                        Cycles firstResponseTime)
419{
420    assert(address == makeLineAddress(address));
421    assert(m_readRequestTable.count(makeLineAddress(address)));
422
423    RequestTable::iterator i = m_readRequestTable.find(address);
424    assert(i != m_readRequestTable.end());
425    SequencerRequest* request = i->second;
426
427    m_readRequestTable.erase(i);
428    markRemoved();
429
430    assert((request->m_type == RubyRequestType_LD) ||
431           (request->m_type == RubyRequestType_IFETCH));
432
433    hitCallback(request, data, true, mach, externalHit,
434                initialRequestTime, forwardRequestTime, firstResponseTime);
435}
436
437void
438Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
439                       bool llscSuccess,
440                       const MachineType mach, const bool externalHit,
441                       const Cycles initialRequestTime,
442                       const Cycles forwardRequestTime,
443                       const Cycles firstResponseTime)
444{
445    warn_once("Replacement policy updates recently became the responsibility "
446              "of SLICC state machines. Make sure to setMRU() near callbacks "
447              "in .sm files!");
448
449    PacketPtr pkt = srequest->pkt;
450    Addr request_address(pkt->getAddr());
451    RubyRequestType type = srequest->m_type;
452    Cycles issued_time = srequest->issue_time;
453
454    assert(curCycle() >= issued_time);
455    Cycles total_latency = curCycle() - issued_time;
456
457    // Profile the latency for all demand accesses.
458    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
459                      initialRequestTime, forwardRequestTime,
460                      firstResponseTime, curCycle());
461
462    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %d cycles\n",
463             curTick(), m_version, "Seq",
464             llscSuccess ? "Done" : "SC_Failed", "", "",
465             printAddress(request_address), total_latency);
466
467    // update the data unless it is a non-data-carrying flush
468    if (RubySystem::getWarmupEnabled()) {
469        data.setData(pkt->getConstPtr<uint8_t>(),
470                     getOffset(request_address), pkt->getSize());
471    } else if (!pkt->isFlush()) {
472        if ((type == RubyRequestType_LD) ||
473            (type == RubyRequestType_IFETCH) ||
474            (type == RubyRequestType_RMW_Read) ||
475            (type == RubyRequestType_Locked_RMW_Read) ||
476            (type == RubyRequestType_Load_Linked)) {
477            memcpy(pkt->getPtr<uint8_t>(),
478                   data.getData(getOffset(request_address), pkt->getSize()),
479                   pkt->getSize());
480            DPRINTF(RubySequencer, "read data %s\n", data);
481        } else if (pkt->req->isSwap()) {
482            std::vector<uint8_t> overwrite_val(pkt->getSize());
483            memcpy(&overwrite_val[0], pkt->getConstPtr<uint8_t>(),
484                   pkt->getSize());
485            memcpy(pkt->getPtr<uint8_t>(),
486                   data.getData(getOffset(request_address), pkt->getSize()),
487                   pkt->getSize());
488            data.setData(&overwrite_val[0],
489                         getOffset(request_address), pkt->getSize());
490            DPRINTF(RubySequencer, "swap data %s\n", data);
491        } else {
492            data.setData(pkt->getConstPtr<uint8_t>(),
493                         getOffset(request_address), pkt->getSize());
494            DPRINTF(RubySequencer, "set data %s\n", data);
495        }
496    }
497
498    // If using the RubyTester, update the RubyTester sender state's
499    // subBlock with the recieved data.  The tester will later access
500    // this state.
501    if (m_usingRubyTester) {
502        DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
503                pkt->cmdString(), pkt->getAddr());
504        RubyTester::SenderState* testerSenderState =
505            pkt->findNextSenderState<RubyTester::SenderState>();
506        assert(testerSenderState);
507        testerSenderState->subBlock.mergeFrom(data);
508    }
509
510    delete srequest;
511
512    RubySystem *rs = m_ruby_system;
513    if (RubySystem::getWarmupEnabled()) {
514        assert(pkt->req);
515        delete pkt->req;
516        delete pkt;
517        rs->m_cache_recorder->enqueueNextFetchRequest();
518    } else if (RubySystem::getCooldownEnabled()) {
519        delete pkt;
520        rs->m_cache_recorder->enqueueNextFlushRequest();
521    } else {
522        ruby_hit_callback(pkt);
523        testDrainComplete();
524    }
525}
526
527bool
528Sequencer::empty() const
529{
530    return m_writeRequestTable.empty() && m_readRequestTable.empty();
531}
532
533RequestStatus
534Sequencer::makeRequest(PacketPtr pkt)
535{
536    if (m_outstanding_count >= m_max_outstanding_requests) {
537        return RequestStatus_BufferFull;
538    }
539
540    RubyRequestType primary_type = RubyRequestType_NULL;
541    RubyRequestType secondary_type = RubyRequestType_NULL;
542
543    if (pkt->isLLSC()) {
544        //
545        // Alpha LL/SC instructions need to be handled carefully by the cache
546        // coherence protocol to ensure they follow the proper semantics. In
547        // particular, by identifying the operations as atomic, the protocol
548        // should understand that migratory sharing optimizations should not
549        // be performed (i.e. a load between the LL and SC should not steal
550        // away exclusive permission).
551        //
552        if (pkt->isWrite()) {
553            DPRINTF(RubySequencer, "Issuing SC\n");
554            primary_type = RubyRequestType_Store_Conditional;
555        } else {
556            DPRINTF(RubySequencer, "Issuing LL\n");
557            assert(pkt->isRead());
558            primary_type = RubyRequestType_Load_Linked;
559        }
560        secondary_type = RubyRequestType_ATOMIC;
561    } else if (pkt->req->isLockedRMW()) {
562        //
563        // x86 locked instructions are translated to store cache coherence
564        // requests because these requests should always be treated as read
565        // exclusive operations and should leverage any migratory sharing
566        // optimization built into the protocol.
567        //
568        if (pkt->isWrite()) {
569            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
570            primary_type = RubyRequestType_Locked_RMW_Write;
571        } else {
572            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
573            assert(pkt->isRead());
574            primary_type = RubyRequestType_Locked_RMW_Read;
575        }
576        secondary_type = RubyRequestType_ST;
577    } else {
578        //
579        // To support SwapReq, we need to check isWrite() first: a SwapReq
580        // should always be treated like a write, but since a SwapReq implies
581        // both isWrite() and isRead() are true, check isWrite() first here.
582        //
583        if (pkt->isWrite()) {
584            //
585            // Note: M5 packets do not differentiate ST from RMW_Write
586            //
587            primary_type = secondary_type = RubyRequestType_ST;
588        } else if (pkt->isRead()) {
589            if (pkt->req->isInstFetch()) {
590                primary_type = secondary_type = RubyRequestType_IFETCH;
591            } else {
592                bool storeCheck = false;
593                // only X86 need the store check
594                if (system->getArch() == Arch::X86ISA) {
595                    uint32_t flags = pkt->req->getFlags();
596                    storeCheck = flags &
597                        (X86ISA::StoreCheck << X86ISA::FlagShift);
598                }
599                if (storeCheck) {
600                    primary_type = RubyRequestType_RMW_Read;
601                    secondary_type = RubyRequestType_ST;
602                } else {
603                    primary_type = secondary_type = RubyRequestType_LD;
604                }
605            }
606        } else if (pkt->isFlush()) {
607          primary_type = secondary_type = RubyRequestType_FLUSH;
608        } else {
609            panic("Unsupported ruby packet type\n");
610        }
611    }
612
613    RequestStatus status = insertRequest(pkt, primary_type);
614    if (status != RequestStatus_Ready)
615        return status;
616
617    issueRequest(pkt, secondary_type);
618
619    // TODO: issue hardware prefetches here
620    return RequestStatus_Issued;
621}
622
623void
624Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
625{
626    assert(pkt != NULL);
627    ContextID proc_id = pkt->req->hasContextId() ?
628        pkt->req->contextId() : InvalidContextID;
629
630    ContextID core_id = coreId();
631
632    // If valid, copy the pc to the ruby request
633    Addr pc = 0;
634    if (pkt->req->hasPC()) {
635        pc = pkt->req->getPC();
636    }
637
638    // check if the packet has data as for example prefetch and flush
639    // requests do not
640    std::shared_ptr<RubyRequest> msg =
641        std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
642                                      pkt->isFlush() ?
643                                      nullptr : pkt->getPtr<uint8_t>(),
644                                      pkt->getSize(), pc, secondary_type,
645                                      RubyAccessMode_Supervisor, pkt,
646                                      PrefetchBit_No, proc_id, core_id);
647
648    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
649            curTick(), m_version, "Seq", "Begin", "", "",
650            printAddress(msg->getPhysicalAddress()),
651            RubyRequestType_to_string(secondary_type));
652
653    // The Sequencer currently assesses instruction and data cache hit latency
654    // for the top-level caches at the beginning of a memory access.
655    // TODO: Eventually, this latency should be moved to represent the actual
656    // cache access latency portion of the memory access. This will require
657    // changing cache controller protocol files to assess the latency on the
658    // access response path.
659    Cycles latency(0);  // Initialize to zero to catch misconfigured latency
660    if (secondary_type == RubyRequestType_IFETCH)
661        latency = m_inst_cache_hit_latency;
662    else
663        latency = m_data_cache_hit_latency;
664
665    // Send the message to the cache controller
666    assert(latency > 0);
667
668    assert(m_mandatory_q_ptr != NULL);
669    m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(latency));
670}
671
672template <class KEY, class VALUE>
673std::ostream &
674operator<<(ostream &out, const std::unordered_map<KEY, VALUE> &map)
675{
676    auto i = map.begin();
677    auto end = map.end();
678
679    out << "[";
680    for (; i != end; ++i)
681        out << " " << i->first << "=" << i->second;
682    out << " ]";
683
684    return out;
685}
686
687void
688Sequencer::print(ostream& out) const
689{
690    out << "[Sequencer: " << m_version
691        << ", outstanding requests: " << m_outstanding_count
692        << ", read request table: " << m_readRequestTable
693        << ", write request table: " << m_writeRequestTable
694        << "]";
695}
696
697// this can be called from setState whenever coherence permissions are
698// upgraded when invoked, coherence violations will be checked for the
699// given block
700void
701Sequencer::checkCoherence(Addr addr)
702{
703#ifdef CHECK_COHERENCE
704    m_ruby_system->checkGlobalCoherenceInvariant(addr);
705#endif
706}
707
708void
709Sequencer::recordRequestType(SequencerRequestType requestType) {
710    DPRINTF(RubyStats, "Recorded statistic: %s\n",
711            SequencerRequestType_to_string(requestType));
712}
713
714
715void
716Sequencer::evictionCallback(Addr address)
717{
718    ruby_eviction_callback(address);
719}
720
721void
722Sequencer::regStats()
723{
724    m_store_waiting_on_load
725        .name(name() + ".store_waiting_on_load")
726        .desc("Number of times a store aliased with a pending load")
727        .flags(Stats::nozero);
728    m_store_waiting_on_store
729        .name(name() + ".store_waiting_on_store")
730        .desc("Number of times a store aliased with a pending store")
731        .flags(Stats::nozero);
732    m_load_waiting_on_load
733        .name(name() + ".load_waiting_on_load")
734        .desc("Number of times a load aliased with a pending load")
735        .flags(Stats::nozero);
736    m_load_waiting_on_store
737        .name(name() + ".load_waiting_on_store")
738        .desc("Number of times a load aliased with a pending store")
739        .flags(Stats::nozero);
740
741    // These statistical variables are not for display.
742    // The profiler will collate these across different
743    // sequencers and display those collated statistics.
744    m_outstandReqHist.init(10);
745    m_latencyHist.init(10);
746    m_hitLatencyHist.init(10);
747    m_missLatencyHist.init(10);
748
749    for (int i = 0; i < RubyRequestType_NUM; i++) {
750        m_typeLatencyHist.push_back(new Stats::Histogram());
751        m_typeLatencyHist[i]->init(10);
752
753        m_hitTypeLatencyHist.push_back(new Stats::Histogram());
754        m_hitTypeLatencyHist[i]->init(10);
755
756        m_missTypeLatencyHist.push_back(new Stats::Histogram());
757        m_missTypeLatencyHist[i]->init(10);
758    }
759
760    for (int i = 0; i < MachineType_NUM; i++) {
761        m_hitMachLatencyHist.push_back(new Stats::Histogram());
762        m_hitMachLatencyHist[i]->init(10);
763
764        m_missMachLatencyHist.push_back(new Stats::Histogram());
765        m_missMachLatencyHist[i]->init(10);
766
767        m_IssueToInitialDelayHist.push_back(new Stats::Histogram());
768        m_IssueToInitialDelayHist[i]->init(10);
769
770        m_InitialToForwardDelayHist.push_back(new Stats::Histogram());
771        m_InitialToForwardDelayHist[i]->init(10);
772
773        m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram());
774        m_ForwardToFirstResponseDelayHist[i]->init(10);
775
776        m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram());
777        m_FirstResponseToCompletionDelayHist[i]->init(10);
778    }
779
780    for (int i = 0; i < RubyRequestType_NUM; i++) {
781        m_hitTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
782        m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
783
784        for (int j = 0; j < MachineType_NUM; j++) {
785            m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram());
786            m_hitTypeMachLatencyHist[i][j]->init(10);
787
788            m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
789            m_missTypeMachLatencyHist[i][j]->init(10);
790        }
791    }
792}
793