Sequencer.cc revision 11109:bf3d0f56a6ba
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/x86/ldstflags.hh"
30#include "base/misc.hh"
31#include "base/str.hh"
32#include "cpu/testers/rubytest/RubyTester.hh"
33#include "debug/MemoryAccess.hh"
34#include "debug/ProtocolTrace.hh"
35#include "debug/RubySequencer.hh"
36#include "debug/RubyStats.hh"
37#include "mem/protocol/PrefetchBit.hh"
38#include "mem/protocol/RubyAccessMode.hh"
39#include "mem/ruby/profiler/Profiler.hh"
40#include "mem/ruby/slicc_interface/RubyRequest.hh"
41#include "mem/ruby/system/RubySystem.hh"
42#include "mem/ruby/system/Sequencer.hh"
43#include "mem/packet.hh"
44#include "sim/system.hh"
45
46using namespace std;
47
48Sequencer *
49RubySequencerParams::create()
50{
51    return new Sequencer(this);
52}
53
54Sequencer::Sequencer(const Params *p)
55    : RubyPort(p), m_IncompleteTimes(MachineType_NUM), deadlockCheckEvent(this)
56{
57    m_outstanding_count = 0;
58
59    m_instCache_ptr = p->icache;
60    m_dataCache_ptr = p->dcache;
61    m_data_cache_hit_latency = p->dcache_hit_latency;
62    m_inst_cache_hit_latency = p->icache_hit_latency;
63    m_max_outstanding_requests = p->max_outstanding_requests;
64    m_deadlock_threshold = p->deadlock_threshold;
65
66    assert(m_max_outstanding_requests > 0);
67    assert(m_deadlock_threshold > 0);
68    assert(m_instCache_ptr != NULL);
69    assert(m_dataCache_ptr != NULL);
70    assert(m_data_cache_hit_latency > 0);
71    assert(m_inst_cache_hit_latency > 0);
72
73    m_usingNetworkTester = p->using_network_tester;
74}
75
76Sequencer::~Sequencer()
77{
78}
79
80void
81Sequencer::wakeup()
82{
83    assert(drainState() != DrainState::Draining);
84
85    // Check for deadlock of any of the requests
86    Cycles current_time = curCycle();
87
88    // Check across all outstanding requests
89    int total_outstanding = 0;
90
91    RequestTable::iterator read = m_readRequestTable.begin();
92    RequestTable::iterator read_end = m_readRequestTable.end();
93    for (; read != read_end; ++read) {
94        SequencerRequest* request = read->second;
95        if (current_time - request->issue_time < m_deadlock_threshold)
96            continue;
97
98        panic("Possible Deadlock detected. Aborting!\n"
99              "version: %d request.paddr: 0x%x m_readRequestTable: %d "
100              "current time: %u issue_time: %d difference: %d\n", m_version,
101              request->pkt->getAddr(), m_readRequestTable.size(),
102              current_time * clockPeriod(), request->issue_time * clockPeriod(),
103              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
104    }
105
106    RequestTable::iterator write = m_writeRequestTable.begin();
107    RequestTable::iterator write_end = m_writeRequestTable.end();
108    for (; write != write_end; ++write) {
109        SequencerRequest* request = write->second;
110        if (current_time - request->issue_time < m_deadlock_threshold)
111            continue;
112
113        panic("Possible Deadlock detected. Aborting!\n"
114              "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
115              "current time: %u issue_time: %d difference: %d\n", m_version,
116              request->pkt->getAddr(), m_writeRequestTable.size(),
117              current_time * clockPeriod(), request->issue_time * clockPeriod(),
118              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
119    }
120
121    total_outstanding += m_writeRequestTable.size();
122    total_outstanding += m_readRequestTable.size();
123
124    assert(m_outstanding_count == total_outstanding);
125
126    if (m_outstanding_count > 0) {
127        // If there are still outstanding requests, keep checking
128        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
129    }
130}
131
132void Sequencer::resetStats()
133{
134    m_latencyHist.reset();
135    m_hitLatencyHist.reset();
136    m_missLatencyHist.reset();
137    for (int i = 0; i < RubyRequestType_NUM; i++) {
138        m_typeLatencyHist[i]->reset();
139        m_hitTypeLatencyHist[i]->reset();
140        m_missTypeLatencyHist[i]->reset();
141        for (int j = 0; j < MachineType_NUM; j++) {
142            m_hitTypeMachLatencyHist[i][j]->reset();
143            m_missTypeMachLatencyHist[i][j]->reset();
144        }
145    }
146
147    for (int i = 0; i < MachineType_NUM; i++) {
148        m_missMachLatencyHist[i]->reset();
149        m_hitMachLatencyHist[i]->reset();
150
151        m_IssueToInitialDelayHist[i]->reset();
152        m_InitialToForwardDelayHist[i]->reset();
153        m_ForwardToFirstResponseDelayHist[i]->reset();
154        m_FirstResponseToCompletionDelayHist[i]->reset();
155
156        m_IncompleteTimes[i] = 0;
157    }
158}
159
160// Insert the request on the correct request table.  Return true if
161// the entry was already present.
162RequestStatus
163Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
164{
165    assert(m_outstanding_count ==
166        (m_writeRequestTable.size() + m_readRequestTable.size()));
167
168    // See if we should schedule a deadlock check
169    if (!deadlockCheckEvent.scheduled() &&
170        drainState() != DrainState::Draining) {
171        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
172    }
173
174    Addr line_addr = makeLineAddress(pkt->getAddr());
175    // Create a default entry, mapping the address to NULL, the cast is
176    // there to make gcc 4.4 happy
177    RequestTable::value_type default_entry(line_addr,
178                                           (SequencerRequest*) NULL);
179
180    if ((request_type == RubyRequestType_ST) ||
181        (request_type == RubyRequestType_RMW_Read) ||
182        (request_type == RubyRequestType_RMW_Write) ||
183        (request_type == RubyRequestType_Load_Linked) ||
184        (request_type == RubyRequestType_Store_Conditional) ||
185        (request_type == RubyRequestType_Locked_RMW_Read) ||
186        (request_type == RubyRequestType_Locked_RMW_Write) ||
187        (request_type == RubyRequestType_FLUSH)) {
188
189        // Check if there is any outstanding read request for the same
190        // cache line.
191        if (m_readRequestTable.count(line_addr) > 0) {
192            m_store_waiting_on_load++;
193            return RequestStatus_Aliased;
194        }
195
196        pair<RequestTable::iterator, bool> r =
197            m_writeRequestTable.insert(default_entry);
198        if (r.second) {
199            RequestTable::iterator i = r.first;
200            i->second = new SequencerRequest(pkt, request_type, curCycle());
201            m_outstanding_count++;
202        } else {
203          // There is an outstanding write request for the cache line
204          m_store_waiting_on_store++;
205          return RequestStatus_Aliased;
206        }
207    } else {
208        // Check if there is any outstanding write request for the same
209        // cache line.
210        if (m_writeRequestTable.count(line_addr) > 0) {
211            m_load_waiting_on_store++;
212            return RequestStatus_Aliased;
213        }
214
215        pair<RequestTable::iterator, bool> r =
216            m_readRequestTable.insert(default_entry);
217
218        if (r.second) {
219            RequestTable::iterator i = r.first;
220            i->second = new SequencerRequest(pkt, request_type, curCycle());
221            m_outstanding_count++;
222        } else {
223            // There is an outstanding read request for the cache line
224            m_load_waiting_on_load++;
225            return RequestStatus_Aliased;
226        }
227    }
228
229    m_outstandReqHist.sample(m_outstanding_count);
230    assert(m_outstanding_count ==
231        (m_writeRequestTable.size() + m_readRequestTable.size()));
232
233    return RequestStatus_Ready;
234}
235
236void
237Sequencer::markRemoved()
238{
239    m_outstanding_count--;
240    assert(m_outstanding_count ==
241           m_writeRequestTable.size() + m_readRequestTable.size());
242}
243
244void
245Sequencer::removeRequest(SequencerRequest* srequest)
246{
247    assert(m_outstanding_count ==
248           m_writeRequestTable.size() + m_readRequestTable.size());
249
250    Addr line_addr = makeLineAddress(srequest->pkt->getAddr());
251    if ((srequest->m_type == RubyRequestType_ST) ||
252        (srequest->m_type == RubyRequestType_RMW_Read) ||
253        (srequest->m_type == RubyRequestType_RMW_Write) ||
254        (srequest->m_type == RubyRequestType_Load_Linked) ||
255        (srequest->m_type == RubyRequestType_Store_Conditional) ||
256        (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
257        (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
258        m_writeRequestTable.erase(line_addr);
259    } else {
260        m_readRequestTable.erase(line_addr);
261    }
262
263    markRemoved();
264}
265
266void
267Sequencer::invalidateSC(Addr address)
268{
269    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
270    // The controller has lost the coherence permissions, hence the lock
271    // on the cache line maintained by the cache should be cleared.
272    if (e && e->isLocked(m_version)) {
273        e->clearLocked();
274    }
275}
276
277bool
278Sequencer::handleLlsc(Addr address, SequencerRequest* request)
279{
280    AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
281    if (!e)
282        return true;
283
284    // The success flag indicates whether the LLSC operation was successful.
285    // LL ops will always succeed, but SC may fail if the cache line is no
286    // longer locked.
287    bool success = true;
288    if (request->m_type == RubyRequestType_Store_Conditional) {
289        if (!e->isLocked(m_version)) {
290            //
291            // For failed SC requests, indicate the failure to the cpu by
292            // setting the extra data to zero.
293            //
294            request->pkt->req->setExtraData(0);
295            success = false;
296        } else {
297            //
298            // For successful SC requests, indicate the success to the cpu by
299            // setting the extra data to one.
300            //
301            request->pkt->req->setExtraData(1);
302        }
303        //
304        // Independent of success, all SC operations must clear the lock
305        //
306        e->clearLocked();
307    } else if (request->m_type == RubyRequestType_Load_Linked) {
308        //
309        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
310        // previously locked cache lines?
311        //
312        e->setLocked(m_version);
313    } else if (e->isLocked(m_version)) {
314        //
315        // Normal writes should clear the locked address
316        //
317        e->clearLocked();
318    }
319    return success;
320}
321
322void
323Sequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
324                             const MachineType respondingMach,
325                             bool isExternalHit, Cycles issuedTime,
326                             Cycles initialRequestTime,
327                             Cycles forwardRequestTime,
328                             Cycles firstResponseTime, Cycles completionTime)
329{
330    m_latencyHist.sample(cycles);
331    m_typeLatencyHist[type]->sample(cycles);
332
333    if (isExternalHit) {
334        m_missLatencyHist.sample(cycles);
335        m_missTypeLatencyHist[type]->sample(cycles);
336
337        if (respondingMach != MachineType_NUM) {
338            m_missMachLatencyHist[respondingMach]->sample(cycles);
339            m_missTypeMachLatencyHist[type][respondingMach]->sample(cycles);
340
341            if ((issuedTime <= initialRequestTime) &&
342                (initialRequestTime <= forwardRequestTime) &&
343                (forwardRequestTime <= firstResponseTime) &&
344                (firstResponseTime <= completionTime)) {
345
346                m_IssueToInitialDelayHist[respondingMach]->sample(
347                    initialRequestTime - issuedTime);
348                m_InitialToForwardDelayHist[respondingMach]->sample(
349                    forwardRequestTime - initialRequestTime);
350                m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
351                    firstResponseTime - forwardRequestTime);
352                m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
353                    completionTime - firstResponseTime);
354            } else {
355                m_IncompleteTimes[respondingMach]++;
356            }
357        }
358    } else {
359        m_hitLatencyHist.sample(cycles);
360        m_hitTypeLatencyHist[type]->sample(cycles);
361
362        if (respondingMach != MachineType_NUM) {
363            m_hitMachLatencyHist[respondingMach]->sample(cycles);
364            m_hitTypeMachLatencyHist[type][respondingMach]->sample(cycles);
365        }
366    }
367}
368
369void
370Sequencer::writeCallback(Addr address, DataBlock& data,
371                         const bool externalHit, const MachineType mach,
372                         const Cycles initialRequestTime,
373                         const Cycles forwardRequestTime,
374                         const Cycles firstResponseTime)
375{
376    assert(address == makeLineAddress(address));
377    assert(m_writeRequestTable.count(makeLineAddress(address)));
378
379    RequestTable::iterator i = m_writeRequestTable.find(address);
380    assert(i != m_writeRequestTable.end());
381    SequencerRequest* request = i->second;
382
383    m_writeRequestTable.erase(i);
384    markRemoved();
385
386    assert((request->m_type == RubyRequestType_ST) ||
387           (request->m_type == RubyRequestType_ATOMIC) ||
388           (request->m_type == RubyRequestType_RMW_Read) ||
389           (request->m_type == RubyRequestType_RMW_Write) ||
390           (request->m_type == RubyRequestType_Load_Linked) ||
391           (request->m_type == RubyRequestType_Store_Conditional) ||
392           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
393           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
394           (request->m_type == RubyRequestType_FLUSH));
395
396    //
397    // For Alpha, properly handle LL, SC, and write requests with respect to
398    // locked cache blocks.
399    //
400    // Not valid for Network_test protocl
401    //
402    bool success = true;
403    if(!m_usingNetworkTester)
404        success = handleLlsc(address, request);
405
406    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
407        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
408    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
409        m_controller->unblock(address);
410    }
411
412    hitCallback(request, data, success, mach, externalHit,
413                initialRequestTime, forwardRequestTime, firstResponseTime);
414}
415
416void
417Sequencer::readCallback(Addr address, DataBlock& data,
418                        bool externalHit, const MachineType mach,
419                        Cycles initialRequestTime,
420                        Cycles forwardRequestTime,
421                        Cycles firstResponseTime)
422{
423    assert(address == makeLineAddress(address));
424    assert(m_readRequestTable.count(makeLineAddress(address)));
425
426    RequestTable::iterator i = m_readRequestTable.find(address);
427    assert(i != m_readRequestTable.end());
428    SequencerRequest* request = i->second;
429
430    m_readRequestTable.erase(i);
431    markRemoved();
432
433    assert((request->m_type == RubyRequestType_LD) ||
434           (request->m_type == RubyRequestType_IFETCH));
435
436    hitCallback(request, data, true, mach, externalHit,
437                initialRequestTime, forwardRequestTime, firstResponseTime);
438}
439
440void
441Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
442                       bool llscSuccess,
443                       const MachineType mach, const bool externalHit,
444                       const Cycles initialRequestTime,
445                       const Cycles forwardRequestTime,
446                       const Cycles firstResponseTime)
447{
448    warn_once("Replacement policy updates recently became the responsibility "
449              "of SLICC state machines. Make sure to setMRU() near callbacks "
450              "in .sm files!");
451
452    PacketPtr pkt = srequest->pkt;
453    Addr request_address(pkt->getAddr());
454    RubyRequestType type = srequest->m_type;
455    Cycles issued_time = srequest->issue_time;
456
457    assert(curCycle() >= issued_time);
458    Cycles total_latency = curCycle() - issued_time;
459
460    // Profile the latency for all demand accesses.
461    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
462                      initialRequestTime, forwardRequestTime,
463                      firstResponseTime, curCycle());
464
465    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %d cycles\n",
466             curTick(), m_version, "Seq",
467             llscSuccess ? "Done" : "SC_Failed", "", "",
468             request_address, total_latency);
469
470    // update the data unless it is a non-data-carrying flush
471    if (RubySystem::getWarmupEnabled()) {
472        data.setData(pkt->getConstPtr<uint8_t>(),
473                     getOffset(request_address), pkt->getSize());
474    } else if (!pkt->isFlush()) {
475        if ((type == RubyRequestType_LD) ||
476            (type == RubyRequestType_IFETCH) ||
477            (type == RubyRequestType_RMW_Read) ||
478            (type == RubyRequestType_Locked_RMW_Read) ||
479            (type == RubyRequestType_Load_Linked)) {
480            memcpy(pkt->getPtr<uint8_t>(),
481                   data.getData(getOffset(request_address), pkt->getSize()),
482                   pkt->getSize());
483            DPRINTF(RubySequencer, "read data %s\n", data);
484        } else {
485            data.setData(pkt->getConstPtr<uint8_t>(),
486                         getOffset(request_address), pkt->getSize());
487            DPRINTF(RubySequencer, "set data %s\n", data);
488        }
489    }
490
491    // If using the RubyTester, update the RubyTester sender state's
492    // subBlock with the recieved data.  The tester will later access
493    // this state.
494    if (m_usingRubyTester) {
495        DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
496                pkt->cmdString(), pkt->getAddr());
497        RubyTester::SenderState* testerSenderState =
498            pkt->findNextSenderState<RubyTester::SenderState>();
499        assert(testerSenderState);
500        testerSenderState->subBlock.mergeFrom(data);
501    }
502
503    delete srequest;
504
505    RubySystem *rs = m_ruby_system;
506    if (RubySystem::getWarmupEnabled()) {
507        assert(pkt->req);
508        delete pkt->req;
509        delete pkt;
510        rs->m_cache_recorder->enqueueNextFetchRequest();
511    } else if (RubySystem::getCooldownEnabled()) {
512        delete pkt;
513        rs->m_cache_recorder->enqueueNextFlushRequest();
514    } else {
515        ruby_hit_callback(pkt);
516    }
517}
518
519bool
520Sequencer::empty() const
521{
522    return m_writeRequestTable.empty() && m_readRequestTable.empty();
523}
524
525RequestStatus
526Sequencer::makeRequest(PacketPtr pkt)
527{
528    if (m_outstanding_count >= m_max_outstanding_requests) {
529        return RequestStatus_BufferFull;
530    }
531
532    RubyRequestType primary_type = RubyRequestType_NULL;
533    RubyRequestType secondary_type = RubyRequestType_NULL;
534
535    if (pkt->isLLSC()) {
536        //
537        // Alpha LL/SC instructions need to be handled carefully by the cache
538        // coherence protocol to ensure they follow the proper semantics. In
539        // particular, by identifying the operations as atomic, the protocol
540        // should understand that migratory sharing optimizations should not
541        // be performed (i.e. a load between the LL and SC should not steal
542        // away exclusive permission).
543        //
544        if (pkt->isWrite()) {
545            DPRINTF(RubySequencer, "Issuing SC\n");
546            primary_type = RubyRequestType_Store_Conditional;
547        } else {
548            DPRINTF(RubySequencer, "Issuing LL\n");
549            assert(pkt->isRead());
550            primary_type = RubyRequestType_Load_Linked;
551        }
552        secondary_type = RubyRequestType_ATOMIC;
553    } else if (pkt->req->isLockedRMW()) {
554        //
555        // x86 locked instructions are translated to store cache coherence
556        // requests because these requests should always be treated as read
557        // exclusive operations and should leverage any migratory sharing
558        // optimization built into the protocol.
559        //
560        if (pkt->isWrite()) {
561            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
562            primary_type = RubyRequestType_Locked_RMW_Write;
563        } else {
564            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
565            assert(pkt->isRead());
566            primary_type = RubyRequestType_Locked_RMW_Read;
567        }
568        secondary_type = RubyRequestType_ST;
569    } else {
570        if (pkt->isRead()) {
571            if (pkt->req->isInstFetch()) {
572                primary_type = secondary_type = RubyRequestType_IFETCH;
573            } else {
574                bool storeCheck = false;
575                // only X86 need the store check
576                if (system->getArch() == Arch::X86ISA) {
577                    uint32_t flags = pkt->req->getFlags();
578                    storeCheck = flags &
579                        (X86ISA::StoreCheck << X86ISA::FlagShift);
580                }
581                if (storeCheck) {
582                    primary_type = RubyRequestType_RMW_Read;
583                    secondary_type = RubyRequestType_ST;
584                } else {
585                    primary_type = secondary_type = RubyRequestType_LD;
586                }
587            }
588        } else if (pkt->isWrite()) {
589            //
590            // Note: M5 packets do not differentiate ST from RMW_Write
591            //
592            primary_type = secondary_type = RubyRequestType_ST;
593        } else if (pkt->isFlush()) {
594          primary_type = secondary_type = RubyRequestType_FLUSH;
595        } else {
596            panic("Unsupported ruby packet type\n");
597        }
598    }
599
600    RequestStatus status = insertRequest(pkt, primary_type);
601    if (status != RequestStatus_Ready)
602        return status;
603
604    issueRequest(pkt, secondary_type);
605
606    // TODO: issue hardware prefetches here
607    return RequestStatus_Issued;
608}
609
610void
611Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
612{
613    assert(pkt != NULL);
614    ContextID proc_id = pkt->req->hasContextId() ?
615        pkt->req->contextId() : InvalidContextID;
616
617    // If valid, copy the pc to the ruby request
618    Addr pc = 0;
619    if (pkt->req->hasPC()) {
620        pc = pkt->req->getPC();
621    }
622
623    // check if the packet has data as for example prefetch and flush
624    // requests do not
625    std::shared_ptr<RubyRequest> msg =
626        std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
627                                      pkt->isFlush() ?
628                                      nullptr : pkt->getPtr<uint8_t>(),
629                                      pkt->getSize(), pc, secondary_type,
630                                      RubyAccessMode_Supervisor, pkt,
631                                      PrefetchBit_No, proc_id);
632
633    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
634            curTick(), m_version, "Seq", "Begin", "", "",
635            msg->getPhysicalAddress(),
636            RubyRequestType_to_string(secondary_type));
637
638    // The Sequencer currently assesses instruction and data cache hit latency
639    // for the top-level caches at the beginning of a memory access.
640    // TODO: Eventually, this latency should be moved to represent the actual
641    // cache access latency portion of the memory access. This will require
642    // changing cache controller protocol files to assess the latency on the
643    // access response path.
644    Cycles latency(0);  // Initialize to zero to catch misconfigured latency
645    if (secondary_type == RubyRequestType_IFETCH)
646        latency = m_inst_cache_hit_latency;
647    else
648        latency = m_data_cache_hit_latency;
649
650    // Send the message to the cache controller
651    assert(latency > 0);
652
653    assert(m_mandatory_q_ptr != NULL);
654    m_mandatory_q_ptr->enqueue(msg, latency);
655}
656
657template <class KEY, class VALUE>
658std::ostream &
659operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
660{
661    typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
662    typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
663
664    out << "[";
665    for (; i != end; ++i)
666        out << " " << i->first << "=" << i->second;
667    out << " ]";
668
669    return out;
670}
671
672void
673Sequencer::print(ostream& out) const
674{
675    out << "[Sequencer: " << m_version
676        << ", outstanding requests: " << m_outstanding_count
677        << ", read request table: " << m_readRequestTable
678        << ", write request table: " << m_writeRequestTable
679        << "]";
680}
681
682// this can be called from setState whenever coherence permissions are
683// upgraded when invoked, coherence violations will be checked for the
684// given block
685void
686Sequencer::checkCoherence(Addr addr)
687{
688#ifdef CHECK_COHERENCE
689    m_ruby_system->checkGlobalCoherenceInvariant(addr);
690#endif
691}
692
693void
694Sequencer::recordRequestType(SequencerRequestType requestType) {
695    DPRINTF(RubyStats, "Recorded statistic: %s\n",
696            SequencerRequestType_to_string(requestType));
697}
698
699
700void
701Sequencer::evictionCallback(Addr address)
702{
703    ruby_eviction_callback(address);
704}
705
706void
707Sequencer::regStats()
708{
709    m_store_waiting_on_load
710        .name(name() + ".store_waiting_on_load")
711        .desc("Number of times a store aliased with a pending load")
712        .flags(Stats::nozero);
713    m_store_waiting_on_store
714        .name(name() + ".store_waiting_on_store")
715        .desc("Number of times a store aliased with a pending store")
716        .flags(Stats::nozero);
717    m_load_waiting_on_load
718        .name(name() + ".load_waiting_on_load")
719        .desc("Number of times a load aliased with a pending load")
720        .flags(Stats::nozero);
721    m_load_waiting_on_store
722        .name(name() + ".load_waiting_on_store")
723        .desc("Number of times a load aliased with a pending store")
724        .flags(Stats::nozero);
725
726    // These statistical variables are not for display.
727    // The profiler will collate these across different
728    // sequencers and display those collated statistics.
729    m_outstandReqHist.init(10);
730    m_latencyHist.init(10);
731    m_hitLatencyHist.init(10);
732    m_missLatencyHist.init(10);
733
734    for (int i = 0; i < RubyRequestType_NUM; i++) {
735        m_typeLatencyHist.push_back(new Stats::Histogram());
736        m_typeLatencyHist[i]->init(10);
737
738        m_hitTypeLatencyHist.push_back(new Stats::Histogram());
739        m_hitTypeLatencyHist[i]->init(10);
740
741        m_missTypeLatencyHist.push_back(new Stats::Histogram());
742        m_missTypeLatencyHist[i]->init(10);
743    }
744
745    for (int i = 0; i < MachineType_NUM; i++) {
746        m_hitMachLatencyHist.push_back(new Stats::Histogram());
747        m_hitMachLatencyHist[i]->init(10);
748
749        m_missMachLatencyHist.push_back(new Stats::Histogram());
750        m_missMachLatencyHist[i]->init(10);
751
752        m_IssueToInitialDelayHist.push_back(new Stats::Histogram());
753        m_IssueToInitialDelayHist[i]->init(10);
754
755        m_InitialToForwardDelayHist.push_back(new Stats::Histogram());
756        m_InitialToForwardDelayHist[i]->init(10);
757
758        m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram());
759        m_ForwardToFirstResponseDelayHist[i]->init(10);
760
761        m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram());
762        m_FirstResponseToCompletionDelayHist[i]->init(10);
763    }
764
765    for (int i = 0; i < RubyRequestType_NUM; i++) {
766        m_hitTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
767        m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
768
769        for (int j = 0; j < MachineType_NUM; j++) {
770            m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram());
771            m_hitTypeMachLatencyHist[i][j]->init(10);
772
773            m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
774            m_missTypeMachLatencyHist[i][j]->init(10);
775        }
776    }
777}
778