Sequencer.cc revision 10913:38dbdeea7f1f
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/x86/ldstflags.hh"
30#include "base/misc.hh"
31#include "base/str.hh"
32#include "cpu/testers/rubytest/RubyTester.hh"
33#include "debug/MemoryAccess.hh"
34#include "debug/ProtocolTrace.hh"
35#include "debug/RubySequencer.hh"
36#include "debug/RubyStats.hh"
37#include "mem/protocol/PrefetchBit.hh"
38#include "mem/protocol/RubyAccessMode.hh"
39#include "mem/ruby/common/Global.hh"
40#include "mem/ruby/profiler/Profiler.hh"
41#include "mem/ruby/slicc_interface/RubyRequest.hh"
42#include "mem/ruby/system/Sequencer.hh"
43#include "mem/ruby/system/System.hh"
44#include "mem/packet.hh"
45#include "sim/system.hh"
46
47using namespace std;
48
49Sequencer *
50RubySequencerParams::create()
51{
52    return new Sequencer(this);
53}
54
55Sequencer::Sequencer(const Params *p)
56    : RubyPort(p), m_IncompleteTimes(MachineType_NUM), deadlockCheckEvent(this)
57{
58    m_outstanding_count = 0;
59
60    m_instCache_ptr = p->icache;
61    m_dataCache_ptr = p->dcache;
62    m_max_outstanding_requests = p->max_outstanding_requests;
63    m_deadlock_threshold = p->deadlock_threshold;
64
65    assert(m_max_outstanding_requests > 0);
66    assert(m_deadlock_threshold > 0);
67    assert(m_instCache_ptr != NULL);
68    assert(m_dataCache_ptr != NULL);
69
70    m_usingNetworkTester = p->using_network_tester;
71}
72
73Sequencer::~Sequencer()
74{
75}
76
77void
78Sequencer::wakeup()
79{
80    assert(drainState() != DrainState::Draining);
81
82    // Check for deadlock of any of the requests
83    Cycles current_time = curCycle();
84
85    // Check across all outstanding requests
86    int total_outstanding = 0;
87
88    RequestTable::iterator read = m_readRequestTable.begin();
89    RequestTable::iterator read_end = m_readRequestTable.end();
90    for (; read != read_end; ++read) {
91        SequencerRequest* request = read->second;
92        if (current_time - request->issue_time < m_deadlock_threshold)
93            continue;
94
95        panic("Possible Deadlock detected. Aborting!\n"
96             "version: %d request.paddr: 0x%x m_readRequestTable: %d "
97             "current time: %u issue_time: %d difference: %d\n", m_version,
98             Address(request->pkt->getAddr()), m_readRequestTable.size(),
99              current_time * clockPeriod(), request->issue_time * clockPeriod(),
100              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
101    }
102
103    RequestTable::iterator write = m_writeRequestTable.begin();
104    RequestTable::iterator write_end = m_writeRequestTable.end();
105    for (; write != write_end; ++write) {
106        SequencerRequest* request = write->second;
107        if (current_time - request->issue_time < m_deadlock_threshold)
108            continue;
109
110        panic("Possible Deadlock detected. Aborting!\n"
111             "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
112             "current time: %u issue_time: %d difference: %d\n", m_version,
113             Address(request->pkt->getAddr()), m_writeRequestTable.size(),
114              current_time * clockPeriod(), request->issue_time * clockPeriod(),
115              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
116    }
117
118    total_outstanding += m_writeRequestTable.size();
119    total_outstanding += m_readRequestTable.size();
120
121    assert(m_outstanding_count == total_outstanding);
122
123    if (m_outstanding_count > 0) {
124        // If there are still outstanding requests, keep checking
125        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
126    }
127}
128
129void Sequencer::resetStats()
130{
131    m_latencyHist.reset();
132    m_hitLatencyHist.reset();
133    m_missLatencyHist.reset();
134    for (int i = 0; i < RubyRequestType_NUM; i++) {
135        m_typeLatencyHist[i]->reset();
136        m_hitTypeLatencyHist[i]->reset();
137        m_missTypeLatencyHist[i]->reset();
138        for (int j = 0; j < MachineType_NUM; j++) {
139            m_hitTypeMachLatencyHist[i][j]->reset();
140            m_missTypeMachLatencyHist[i][j]->reset();
141        }
142    }
143
144    for (int i = 0; i < MachineType_NUM; i++) {
145        m_missMachLatencyHist[i]->reset();
146        m_hitMachLatencyHist[i]->reset();
147
148        m_IssueToInitialDelayHist[i]->reset();
149        m_InitialToForwardDelayHist[i]->reset();
150        m_ForwardToFirstResponseDelayHist[i]->reset();
151        m_FirstResponseToCompletionDelayHist[i]->reset();
152
153        m_IncompleteTimes[i] = 0;
154    }
155}
156
157void
158Sequencer::printProgress(ostream& out) const
159{
160#if 0
161    int total_demand = 0;
162    out << "Sequencer Stats Version " << m_version << endl;
163    out << "Current time = " << g_system_ptr->getTime() << endl;
164    out << "---------------" << endl;
165    out << "outstanding requests" << endl;
166
167    out << "proc " << m_Read
168        << " version Requests = " << m_readRequestTable.size() << endl;
169
170    // print the request table
171    RequestTable::iterator read = m_readRequestTable.begin();
172    RequestTable::iterator read_end = m_readRequestTable.end();
173    for (; read != read_end; ++read) {
174        SequencerRequest* request = read->second;
175        out << "\tRequest[ " << i << " ] = " << request->type
176            << " Address " << rkeys[i]
177            << " Posted " << request->issue_time
178            << " PF " << PrefetchBit_No << endl;
179        total_demand++;
180    }
181
182    out << "proc " << m_version
183        << " Write Requests = " << m_writeRequestTable.size << endl;
184
185    // print the request table
186    RequestTable::iterator write = m_writeRequestTable.begin();
187    RequestTable::iterator write_end = m_writeRequestTable.end();
188    for (; write != write_end; ++write) {
189        SequencerRequest* request = write->second;
190        out << "\tRequest[ " << i << " ] = " << request.getType()
191            << " Address " << wkeys[i]
192            << " Posted " << request.getTime()
193            << " PF " << request.getPrefetch() << endl;
194        if (request.getPrefetch() == PrefetchBit_No) {
195            total_demand++;
196        }
197    }
198
199    out << endl;
200
201    out << "Total Number Outstanding: " << m_outstanding_count << endl
202        << "Total Number Demand     : " << total_demand << endl
203        << "Total Number Prefetches : " << m_outstanding_count - total_demand
204        << endl << endl << endl;
205#endif
206}
207
208// Insert the request on the correct request table.  Return true if
209// the entry was already present.
210RequestStatus
211Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
212{
213    assert(m_outstanding_count ==
214        (m_writeRequestTable.size() + m_readRequestTable.size()));
215
216    // See if we should schedule a deadlock check
217    if (!deadlockCheckEvent.scheduled() &&
218        drainState() != DrainState::Draining) {
219        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
220    }
221
222    Address line_addr(pkt->getAddr());
223    line_addr.makeLineAddress();
224    // Create a default entry, mapping the address to NULL, the cast is
225    // there to make gcc 4.4 happy
226    RequestTable::value_type default_entry(line_addr,
227                                           (SequencerRequest*) NULL);
228
229    if ((request_type == RubyRequestType_ST) ||
230        (request_type == RubyRequestType_RMW_Read) ||
231        (request_type == RubyRequestType_RMW_Write) ||
232        (request_type == RubyRequestType_Load_Linked) ||
233        (request_type == RubyRequestType_Store_Conditional) ||
234        (request_type == RubyRequestType_Locked_RMW_Read) ||
235        (request_type == RubyRequestType_Locked_RMW_Write) ||
236        (request_type == RubyRequestType_FLUSH)) {
237
238        // Check if there is any outstanding read request for the same
239        // cache line.
240        if (m_readRequestTable.count(line_addr) > 0) {
241            m_store_waiting_on_load++;
242            return RequestStatus_Aliased;
243        }
244
245        pair<RequestTable::iterator, bool> r =
246            m_writeRequestTable.insert(default_entry);
247        if (r.second) {
248            RequestTable::iterator i = r.first;
249            i->second = new SequencerRequest(pkt, request_type, curCycle());
250            m_outstanding_count++;
251        } else {
252          // There is an outstanding write request for the cache line
253          m_store_waiting_on_store++;
254          return RequestStatus_Aliased;
255        }
256    } else {
257        // Check if there is any outstanding write request for the same
258        // cache line.
259        if (m_writeRequestTable.count(line_addr) > 0) {
260            m_load_waiting_on_store++;
261            return RequestStatus_Aliased;
262        }
263
264        pair<RequestTable::iterator, bool> r =
265            m_readRequestTable.insert(default_entry);
266
267        if (r.second) {
268            RequestTable::iterator i = r.first;
269            i->second = new SequencerRequest(pkt, request_type, curCycle());
270            m_outstanding_count++;
271        } else {
272            // There is an outstanding read request for the cache line
273            m_load_waiting_on_load++;
274            return RequestStatus_Aliased;
275        }
276    }
277
278    m_outstandReqHist.sample(m_outstanding_count);
279    assert(m_outstanding_count ==
280        (m_writeRequestTable.size() + m_readRequestTable.size()));
281
282    return RequestStatus_Ready;
283}
284
285void
286Sequencer::markRemoved()
287{
288    m_outstanding_count--;
289    assert(m_outstanding_count ==
290           m_writeRequestTable.size() + m_readRequestTable.size());
291}
292
293void
294Sequencer::removeRequest(SequencerRequest* srequest)
295{
296    assert(m_outstanding_count ==
297           m_writeRequestTable.size() + m_readRequestTable.size());
298
299    Address line_addr(srequest->pkt->getAddr());
300    line_addr.makeLineAddress();
301    if ((srequest->m_type == RubyRequestType_ST) ||
302        (srequest->m_type == RubyRequestType_RMW_Read) ||
303        (srequest->m_type == RubyRequestType_RMW_Write) ||
304        (srequest->m_type == RubyRequestType_Load_Linked) ||
305        (srequest->m_type == RubyRequestType_Store_Conditional) ||
306        (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
307        (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
308        m_writeRequestTable.erase(line_addr);
309    } else {
310        m_readRequestTable.erase(line_addr);
311    }
312
313    markRemoved();
314}
315
316void
317Sequencer::invalidateSC(const Address& address)
318{
319    RequestTable::iterator i = m_writeRequestTable.find(address);
320    if (i != m_writeRequestTable.end()) {
321        SequencerRequest* request = i->second;
322        // The controller has lost the coherence permissions, hence the lock
323        // on the cache line maintained by the cache should be cleared.
324        if (request->m_type == RubyRequestType_Store_Conditional) {
325            m_dataCache_ptr->clearLocked(address);
326        }
327    }
328}
329
330bool
331Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
332{
333    //
334    // The success flag indicates whether the LLSC operation was successful.
335    // LL ops will always succeed, but SC may fail if the cache line is no
336    // longer locked.
337    //
338    bool success = true;
339    if (request->m_type == RubyRequestType_Store_Conditional) {
340        if (!m_dataCache_ptr->isLocked(address, m_version)) {
341            //
342            // For failed SC requests, indicate the failure to the cpu by
343            // setting the extra data to zero.
344            //
345            request->pkt->req->setExtraData(0);
346            success = false;
347        } else {
348            //
349            // For successful SC requests, indicate the success to the cpu by
350            // setting the extra data to one.
351            //
352            request->pkt->req->setExtraData(1);
353        }
354        //
355        // Independent of success, all SC operations must clear the lock
356        //
357        m_dataCache_ptr->clearLocked(address);
358    } else if (request->m_type == RubyRequestType_Load_Linked) {
359        //
360        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
361        // previously locked cache lines?
362        //
363        m_dataCache_ptr->setLocked(address, m_version);
364    } else if ((m_dataCache_ptr->isTagPresent(address)) &&
365               (m_dataCache_ptr->isLocked(address, m_version))) {
366        //
367        // Normal writes should clear the locked address
368        //
369        m_dataCache_ptr->clearLocked(address);
370    }
371    return success;
372}
373
374void
375Sequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
376                             const MachineType respondingMach,
377                             bool isExternalHit, Cycles issuedTime,
378                             Cycles initialRequestTime,
379                             Cycles forwardRequestTime,
380                             Cycles firstResponseTime, Cycles completionTime)
381{
382    m_latencyHist.sample(cycles);
383    m_typeLatencyHist[type]->sample(cycles);
384
385    if (isExternalHit) {
386        m_missLatencyHist.sample(cycles);
387        m_missTypeLatencyHist[type]->sample(cycles);
388
389        if (respondingMach != MachineType_NUM) {
390            m_missMachLatencyHist[respondingMach]->sample(cycles);
391            m_missTypeMachLatencyHist[type][respondingMach]->sample(cycles);
392
393            if ((issuedTime <= initialRequestTime) &&
394                (initialRequestTime <= forwardRequestTime) &&
395                (forwardRequestTime <= firstResponseTime) &&
396                (firstResponseTime <= completionTime)) {
397
398                m_IssueToInitialDelayHist[respondingMach]->sample(
399                    initialRequestTime - issuedTime);
400                m_InitialToForwardDelayHist[respondingMach]->sample(
401                    forwardRequestTime - initialRequestTime);
402                m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
403                    firstResponseTime - forwardRequestTime);
404                m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
405                    completionTime - firstResponseTime);
406            } else {
407                m_IncompleteTimes[respondingMach]++;
408            }
409        }
410    } else {
411        m_hitLatencyHist.sample(cycles);
412        m_hitTypeLatencyHist[type]->sample(cycles);
413
414        if (respondingMach != MachineType_NUM) {
415            m_hitMachLatencyHist[respondingMach]->sample(cycles);
416            m_hitTypeMachLatencyHist[type][respondingMach]->sample(cycles);
417        }
418    }
419}
420
421void
422Sequencer::writeCallback(const Address& address, DataBlock& data,
423                         const bool externalHit, const MachineType mach,
424                         const Cycles initialRequestTime,
425                         const Cycles forwardRequestTime,
426                         const Cycles firstResponseTime)
427{
428    assert(address == line_address(address));
429    assert(m_writeRequestTable.count(line_address(address)));
430
431    RequestTable::iterator i = m_writeRequestTable.find(address);
432    assert(i != m_writeRequestTable.end());
433    SequencerRequest* request = i->second;
434
435    m_writeRequestTable.erase(i);
436    markRemoved();
437
438    assert((request->m_type == RubyRequestType_ST) ||
439           (request->m_type == RubyRequestType_ATOMIC) ||
440           (request->m_type == RubyRequestType_RMW_Read) ||
441           (request->m_type == RubyRequestType_RMW_Write) ||
442           (request->m_type == RubyRequestType_Load_Linked) ||
443           (request->m_type == RubyRequestType_Store_Conditional) ||
444           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
445           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
446           (request->m_type == RubyRequestType_FLUSH));
447
448    //
449    // For Alpha, properly handle LL, SC, and write requests with respect to
450    // locked cache blocks.
451    //
452    // Not valid for Network_test protocl
453    //
454    bool success = true;
455    if(!m_usingNetworkTester)
456        success = handleLlsc(address, request);
457
458    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
459        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
460    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
461        m_controller->unblock(address);
462    }
463
464    hitCallback(request, data, success, mach, externalHit,
465                initialRequestTime, forwardRequestTime, firstResponseTime);
466}
467
468void
469Sequencer::readCallback(const Address& address, DataBlock& data,
470                        bool externalHit, const MachineType mach,
471                        Cycles initialRequestTime,
472                        Cycles forwardRequestTime,
473                        Cycles firstResponseTime)
474{
475    assert(address == line_address(address));
476    assert(m_readRequestTable.count(line_address(address)));
477
478    RequestTable::iterator i = m_readRequestTable.find(address);
479    assert(i != m_readRequestTable.end());
480    SequencerRequest* request = i->second;
481
482    m_readRequestTable.erase(i);
483    markRemoved();
484
485    assert((request->m_type == RubyRequestType_LD) ||
486           (request->m_type == RubyRequestType_IFETCH));
487
488    hitCallback(request, data, true, mach, externalHit,
489                initialRequestTime, forwardRequestTime, firstResponseTime);
490}
491
492void
493Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
494                       bool llscSuccess,
495                       const MachineType mach, const bool externalHit,
496                       const Cycles initialRequestTime,
497                       const Cycles forwardRequestTime,
498                       const Cycles firstResponseTime)
499{
500    PacketPtr pkt = srequest->pkt;
501    Address request_address(pkt->getAddr());
502    Address request_line_address(pkt->getAddr());
503    request_line_address.makeLineAddress();
504    RubyRequestType type = srequest->m_type;
505    Cycles issued_time = srequest->issue_time;
506
507    // Set this cache entry to the most recently used
508    if (type == RubyRequestType_IFETCH) {
509        m_instCache_ptr->setMRU(request_line_address);
510    } else {
511        m_dataCache_ptr->setMRU(request_line_address);
512    }
513
514    assert(curCycle() >= issued_time);
515    Cycles total_latency = curCycle() - issued_time;
516
517    // Profile the latency for all demand accesses.
518    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
519                      initialRequestTime, forwardRequestTime,
520                      firstResponseTime, curCycle());
521
522    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
523             curTick(), m_version, "Seq",
524             llscSuccess ? "Done" : "SC_Failed", "", "",
525             request_address, total_latency);
526
527    // update the data unless it is a non-data-carrying flush
528    if (RubySystem::getWarmupEnabled()) {
529        data.setData(pkt->getConstPtr<uint8_t>(),
530                     request_address.getOffset(), pkt->getSize());
531    } else if (!pkt->isFlush()) {
532        if ((type == RubyRequestType_LD) ||
533            (type == RubyRequestType_IFETCH) ||
534            (type == RubyRequestType_RMW_Read) ||
535            (type == RubyRequestType_Locked_RMW_Read) ||
536            (type == RubyRequestType_Load_Linked)) {
537            memcpy(pkt->getPtr<uint8_t>(),
538                   data.getData(request_address.getOffset(), pkt->getSize()),
539                   pkt->getSize());
540        } else {
541            data.setData(pkt->getConstPtr<uint8_t>(),
542                         request_address.getOffset(), pkt->getSize());
543        }
544    }
545
546    // If using the RubyTester, update the RubyTester sender state's
547    // subBlock with the recieved data.  The tester will later access
548    // this state.
549    if (m_usingRubyTester) {
550        DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
551                pkt->cmdString(), pkt->getAddr());
552        RubyTester::SenderState* testerSenderState =
553            pkt->findNextSenderState<RubyTester::SenderState>();
554        assert(testerSenderState);
555        testerSenderState->subBlock.mergeFrom(data);
556    }
557
558    delete srequest;
559
560    if (RubySystem::getWarmupEnabled()) {
561        assert(pkt->req);
562        delete pkt->req;
563        delete pkt;
564        g_system_ptr->m_cache_recorder->enqueueNextFetchRequest();
565    } else if (RubySystem::getCooldownEnabled()) {
566        delete pkt;
567        g_system_ptr->m_cache_recorder->enqueueNextFlushRequest();
568    } else {
569        ruby_hit_callback(pkt);
570    }
571}
572
573bool
574Sequencer::empty() const
575{
576    return m_writeRequestTable.empty() && m_readRequestTable.empty();
577}
578
579RequestStatus
580Sequencer::makeRequest(PacketPtr pkt)
581{
582    if (m_outstanding_count >= m_max_outstanding_requests) {
583        return RequestStatus_BufferFull;
584    }
585
586    RubyRequestType primary_type = RubyRequestType_NULL;
587    RubyRequestType secondary_type = RubyRequestType_NULL;
588
589    if (pkt->isLLSC()) {
590        //
591        // Alpha LL/SC instructions need to be handled carefully by the cache
592        // coherence protocol to ensure they follow the proper semantics. In
593        // particular, by identifying the operations as atomic, the protocol
594        // should understand that migratory sharing optimizations should not
595        // be performed (i.e. a load between the LL and SC should not steal
596        // away exclusive permission).
597        //
598        if (pkt->isWrite()) {
599            DPRINTF(RubySequencer, "Issuing SC\n");
600            primary_type = RubyRequestType_Store_Conditional;
601        } else {
602            DPRINTF(RubySequencer, "Issuing LL\n");
603            assert(pkt->isRead());
604            primary_type = RubyRequestType_Load_Linked;
605        }
606        secondary_type = RubyRequestType_ATOMIC;
607    } else if (pkt->req->isLockedRMW()) {
608        //
609        // x86 locked instructions are translated to store cache coherence
610        // requests because these requests should always be treated as read
611        // exclusive operations and should leverage any migratory sharing
612        // optimization built into the protocol.
613        //
614        if (pkt->isWrite()) {
615            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
616            primary_type = RubyRequestType_Locked_RMW_Write;
617        } else {
618            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
619            assert(pkt->isRead());
620            primary_type = RubyRequestType_Locked_RMW_Read;
621        }
622        secondary_type = RubyRequestType_ST;
623    } else {
624        if (pkt->isRead()) {
625            if (pkt->req->isInstFetch()) {
626                primary_type = secondary_type = RubyRequestType_IFETCH;
627            } else {
628                bool storeCheck = false;
629                // only X86 need the store check
630                if (system->getArch() == Arch::X86ISA) {
631                    uint32_t flags = pkt->req->getFlags();
632                    storeCheck = flags &
633                        (X86ISA::StoreCheck << X86ISA::FlagShift);
634                }
635                if (storeCheck) {
636                    primary_type = RubyRequestType_RMW_Read;
637                    secondary_type = RubyRequestType_ST;
638                } else {
639                    primary_type = secondary_type = RubyRequestType_LD;
640                }
641            }
642        } else if (pkt->isWrite()) {
643            //
644            // Note: M5 packets do not differentiate ST from RMW_Write
645            //
646            primary_type = secondary_type = RubyRequestType_ST;
647        } else if (pkt->isFlush()) {
648          primary_type = secondary_type = RubyRequestType_FLUSH;
649        } else {
650            panic("Unsupported ruby packet type\n");
651        }
652    }
653
654    RequestStatus status = insertRequest(pkt, primary_type);
655    if (status != RequestStatus_Ready)
656        return status;
657
658    issueRequest(pkt, secondary_type);
659
660    // TODO: issue hardware prefetches here
661    return RequestStatus_Issued;
662}
663
664void
665Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
666{
667    assert(pkt != NULL);
668    int proc_id = -1;
669    if (pkt->req->hasContextId()) {
670        proc_id = pkt->req->contextId();
671    }
672
673    // If valid, copy the pc to the ruby request
674    Addr pc = 0;
675    if (pkt->req->hasPC()) {
676        pc = pkt->req->getPC();
677    }
678
679    // check if the packet has data as for example prefetch and flush
680    // requests do not
681    std::shared_ptr<RubyRequest> msg =
682        std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
683                                      pkt->isFlush() ?
684                                      nullptr : pkt->getPtr<uint8_t>(),
685                                      pkt->getSize(), pc, secondary_type,
686                                      RubyAccessMode_Supervisor, pkt,
687                                      PrefetchBit_No, proc_id);
688
689    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
690            curTick(), m_version, "Seq", "Begin", "", "",
691            msg->getPhysicalAddress(),
692            RubyRequestType_to_string(secondary_type));
693
694    Cycles latency(0);  // initialzed to an null value
695
696    if (secondary_type == RubyRequestType_IFETCH)
697        latency = m_instCache_ptr->getLatency();
698    else
699        latency = m_dataCache_ptr->getLatency();
700
701    // Send the message to the cache controller
702    assert(latency > 0);
703
704    assert(m_mandatory_q_ptr != NULL);
705    m_mandatory_q_ptr->enqueue(msg, latency);
706}
707
708template <class KEY, class VALUE>
709std::ostream &
710operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
711{
712    typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
713    typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
714
715    out << "[";
716    for (; i != end; ++i)
717        out << " " << i->first << "=" << i->second;
718    out << " ]";
719
720    return out;
721}
722
723void
724Sequencer::print(ostream& out) const
725{
726    out << "[Sequencer: " << m_version
727        << ", outstanding requests: " << m_outstanding_count
728        << ", read request table: " << m_readRequestTable
729        << ", write request table: " << m_writeRequestTable
730        << "]";
731}
732
733// this can be called from setState whenever coherence permissions are
734// upgraded when invoked, coherence violations will be checked for the
735// given block
736void
737Sequencer::checkCoherence(const Address& addr)
738{
739#ifdef CHECK_COHERENCE
740    g_system_ptr->checkGlobalCoherenceInvariant(addr);
741#endif
742}
743
744void
745Sequencer::recordRequestType(SequencerRequestType requestType) {
746    DPRINTF(RubyStats, "Recorded statistic: %s\n",
747            SequencerRequestType_to_string(requestType));
748}
749
750
751void
752Sequencer::evictionCallback(const Address& address)
753{
754    ruby_eviction_callback(address);
755}
756
757void
758Sequencer::regStats()
759{
760    m_store_waiting_on_load
761        .name(name() + ".store_waiting_on_load")
762        .desc("Number of times a store aliased with a pending load")
763        .flags(Stats::nozero);
764    m_store_waiting_on_store
765        .name(name() + ".store_waiting_on_store")
766        .desc("Number of times a store aliased with a pending store")
767        .flags(Stats::nozero);
768    m_load_waiting_on_load
769        .name(name() + ".load_waiting_on_load")
770        .desc("Number of times a load aliased with a pending load")
771        .flags(Stats::nozero);
772    m_load_waiting_on_store
773        .name(name() + ".load_waiting_on_store")
774        .desc("Number of times a load aliased with a pending store")
775        .flags(Stats::nozero);
776
777    // These statistical variables are not for display.
778    // The profiler will collate these across different
779    // sequencers and display those collated statistics.
780    m_outstandReqHist.init(10);
781    m_latencyHist.init(10);
782    m_hitLatencyHist.init(10);
783    m_missLatencyHist.init(10);
784
785    for (int i = 0; i < RubyRequestType_NUM; i++) {
786        m_typeLatencyHist.push_back(new Stats::Histogram());
787        m_typeLatencyHist[i]->init(10);
788
789        m_hitTypeLatencyHist.push_back(new Stats::Histogram());
790        m_hitTypeLatencyHist[i]->init(10);
791
792        m_missTypeLatencyHist.push_back(new Stats::Histogram());
793        m_missTypeLatencyHist[i]->init(10);
794    }
795
796    for (int i = 0; i < MachineType_NUM; i++) {
797        m_hitMachLatencyHist.push_back(new Stats::Histogram());
798        m_hitMachLatencyHist[i]->init(10);
799
800        m_missMachLatencyHist.push_back(new Stats::Histogram());
801        m_missMachLatencyHist[i]->init(10);
802
803        m_IssueToInitialDelayHist.push_back(new Stats::Histogram());
804        m_IssueToInitialDelayHist[i]->init(10);
805
806        m_InitialToForwardDelayHist.push_back(new Stats::Histogram());
807        m_InitialToForwardDelayHist[i]->init(10);
808
809        m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram());
810        m_ForwardToFirstResponseDelayHist[i]->init(10);
811
812        m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram());
813        m_FirstResponseToCompletionDelayHist[i]->init(10);
814    }
815
816    for (int i = 0; i < RubyRequestType_NUM; i++) {
817        m_hitTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
818        m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
819
820        for (int j = 0; j < MachineType_NUM; j++) {
821            m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram());
822            m_hitTypeMachLatencyHist[i][j]->init(10);
823
824            m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
825            m_missTypeMachLatencyHist[i][j]->init(10);
826        }
827    }
828}
829