Sequencer.cc revision 9773:915be89faf30
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/misc.hh"
30#include "base/str.hh"
31#include "config/the_isa.hh"
32#if THE_ISA == X86_ISA
33#include "arch/x86/insts/microldstop.hh"
34#endif // X86_ISA
35#include "cpu/testers/rubytest/RubyTester.hh"
36#include "debug/MemoryAccess.hh"
37#include "debug/ProtocolTrace.hh"
38#include "debug/RubySequencer.hh"
39#include "debug/RubyStats.hh"
40#include "mem/protocol/PrefetchBit.hh"
41#include "mem/protocol/RubyAccessMode.hh"
42#include "mem/ruby/common/Global.hh"
43#include "mem/ruby/profiler/Profiler.hh"
44#include "mem/ruby/slicc_interface/RubyRequest.hh"
45#include "mem/ruby/system/Sequencer.hh"
46#include "mem/ruby/system/System.hh"
47#include "mem/packet.hh"
48
49using namespace std;
50
51Sequencer *
52RubySequencerParams::create()
53{
54    return new Sequencer(this);
55}
56
57Sequencer::Sequencer(const Params *p)
58    : RubyPort(p), deadlockCheckEvent(this)
59{
60    m_store_waiting_on_load_cycles = 0;
61    m_store_waiting_on_store_cycles = 0;
62    m_load_waiting_on_store_cycles = 0;
63    m_load_waiting_on_load_cycles = 0;
64
65    m_outstanding_count = 0;
66
67    m_instCache_ptr = p->icache;
68    m_dataCache_ptr = p->dcache;
69    m_max_outstanding_requests = p->max_outstanding_requests;
70    m_deadlock_threshold = p->deadlock_threshold;
71
72    assert(m_max_outstanding_requests > 0);
73    assert(m_deadlock_threshold > 0);
74    assert(m_instCache_ptr != NULL);
75    assert(m_dataCache_ptr != NULL);
76
77    m_usingNetworkTester = p->using_network_tester;
78}
79
80Sequencer::~Sequencer()
81{
82}
83
84void
85Sequencer::wakeup()
86{
87    assert(getDrainState() != Drainable::Draining);
88
89    // Check for deadlock of any of the requests
90    Cycles current_time = curCycle();
91
92    // Check across all outstanding requests
93    int total_outstanding = 0;
94
95    RequestTable::iterator read = m_readRequestTable.begin();
96    RequestTable::iterator read_end = m_readRequestTable.end();
97    for (; read != read_end; ++read) {
98        SequencerRequest* request = read->second;
99        if (current_time - request->issue_time < m_deadlock_threshold)
100            continue;
101
102        panic("Possible Deadlock detected. Aborting!\n"
103             "version: %d request.paddr: 0x%x m_readRequestTable: %d "
104             "current time: %u issue_time: %d difference: %d\n", m_version,
105             Address(request->pkt->getAddr()), m_readRequestTable.size(),
106              current_time * clockPeriod(), request->issue_time * clockPeriod(),
107              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
108    }
109
110    RequestTable::iterator write = m_writeRequestTable.begin();
111    RequestTable::iterator write_end = m_writeRequestTable.end();
112    for (; write != write_end; ++write) {
113        SequencerRequest* request = write->second;
114        if (current_time - request->issue_time < m_deadlock_threshold)
115            continue;
116
117        panic("Possible Deadlock detected. Aborting!\n"
118             "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
119             "current time: %u issue_time: %d difference: %d\n", m_version,
120             Address(request->pkt->getAddr()), m_writeRequestTable.size(),
121              current_time * clockPeriod(), request->issue_time * clockPeriod(),
122              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
123    }
124
125    total_outstanding += m_writeRequestTable.size();
126    total_outstanding += m_readRequestTable.size();
127
128    assert(m_outstanding_count == total_outstanding);
129
130    if (m_outstanding_count > 0) {
131        // If there are still outstanding requests, keep checking
132        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
133    }
134}
135
136void Sequencer::clearStats()
137{
138    m_outstandReqHist.clear();
139
140    // Initialize the histograms that track latency of all requests
141    m_latencyHist.clear(20);
142    m_typeLatencyHist.resize(RubyRequestType_NUM);
143    for (int i = 0; i < RubyRequestType_NUM; i++) {
144        m_typeLatencyHist[i].clear(20);
145    }
146
147    // Initialize the histograms that track latency of requests that
148    // hit in the cache attached to the sequencer.
149    m_hitLatencyHist.clear(20);
150    m_hitTypeLatencyHist.resize(RubyRequestType_NUM);
151    m_hitTypeMachLatencyHist.resize(RubyRequestType_NUM);
152
153    for (int i = 0; i < RubyRequestType_NUM; i++) {
154        m_hitTypeLatencyHist[i].clear(20);
155        m_hitTypeMachLatencyHist[i].resize(MachineType_NUM);
156        for (int j = 0; j < MachineType_NUM; j++) {
157            m_hitTypeMachLatencyHist[i][j].clear(20);
158        }
159    }
160
161    // Initialize the histograms that track the latency of requests that
162    // missed in the cache attached to the sequencer.
163    m_missLatencyHist.clear(20);
164    m_missTypeLatencyHist.resize(RubyRequestType_NUM);
165    m_missTypeMachLatencyHist.resize(RubyRequestType_NUM);
166
167    for (int i = 0; i < RubyRequestType_NUM; i++) {
168        m_missTypeLatencyHist[i].clear(20);
169        m_missTypeMachLatencyHist[i].resize(MachineType_NUM);
170        for (int j = 0; j < MachineType_NUM; j++) {
171            m_missTypeMachLatencyHist[i][j].clear(20);
172        }
173    }
174
175    m_hitMachLatencyHist.resize(MachineType_NUM);
176    m_missMachLatencyHist.resize(MachineType_NUM);
177    m_IssueToInitialDelayHist.resize(MachineType_NUM);
178    m_InitialToForwardDelayHist.resize(MachineType_NUM);
179    m_ForwardToFirstResponseDelayHist.resize(MachineType_NUM);
180    m_FirstResponseToCompletionDelayHist.resize(MachineType_NUM);
181    m_IncompleteTimes.resize(MachineType_NUM);
182
183    for (int i = 0; i < MachineType_NUM; i++) {
184        m_missMachLatencyHist[i].clear(20);
185        m_hitMachLatencyHist[i].clear(20);
186
187        m_IssueToInitialDelayHist[i].clear(20);
188        m_InitialToForwardDelayHist[i].clear(20);
189        m_ForwardToFirstResponseDelayHist[i].clear(20);
190        m_FirstResponseToCompletionDelayHist[i].clear(20);
191
192        m_IncompleteTimes[i] = 0;
193    }
194}
195
196void
197Sequencer::printStats(ostream & out) const
198{
199    out << "Sequencer: " << m_name << endl
200        << "  store_waiting_on_load_cycles: "
201        << m_store_waiting_on_load_cycles << endl
202        << "  store_waiting_on_store_cycles: "
203        << m_store_waiting_on_store_cycles << endl
204        << "  load_waiting_on_load_cycles: "
205        << m_load_waiting_on_load_cycles << endl
206        << "  load_waiting_on_store_cycles: "
207        << m_load_waiting_on_store_cycles << endl;
208}
209
210void
211Sequencer::printProgress(ostream& out) const
212{
213#if 0
214    int total_demand = 0;
215    out << "Sequencer Stats Version " << m_version << endl;
216    out << "Current time = " << g_system_ptr->getTime() << endl;
217    out << "---------------" << endl;
218    out << "outstanding requests" << endl;
219
220    out << "proc " << m_Read
221        << " version Requests = " << m_readRequestTable.size() << endl;
222
223    // print the request table
224    RequestTable::iterator read = m_readRequestTable.begin();
225    RequestTable::iterator read_end = m_readRequestTable.end();
226    for (; read != read_end; ++read) {
227        SequencerRequest* request = read->second;
228        out << "\tRequest[ " << i << " ] = " << request->type
229            << " Address " << rkeys[i]
230            << " Posted " << request->issue_time
231            << " PF " << PrefetchBit_No << endl;
232        total_demand++;
233    }
234
235    out << "proc " << m_version
236        << " Write Requests = " << m_writeRequestTable.size << endl;
237
238    // print the request table
239    RequestTable::iterator write = m_writeRequestTable.begin();
240    RequestTable::iterator write_end = m_writeRequestTable.end();
241    for (; write != write_end; ++write) {
242        SequencerRequest* request = write->second;
243        out << "\tRequest[ " << i << " ] = " << request.getType()
244            << " Address " << wkeys[i]
245            << " Posted " << request.getTime()
246            << " PF " << request.getPrefetch() << endl;
247        if (request.getPrefetch() == PrefetchBit_No) {
248            total_demand++;
249        }
250    }
251
252    out << endl;
253
254    out << "Total Number Outstanding: " << m_outstanding_count << endl
255        << "Total Number Demand     : " << total_demand << endl
256        << "Total Number Prefetches : " << m_outstanding_count - total_demand
257        << endl << endl << endl;
258#endif
259}
260
261// Insert the request on the correct request table.  Return true if
262// the entry was already present.
263RequestStatus
264Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
265{
266    assert(m_outstanding_count ==
267        (m_writeRequestTable.size() + m_readRequestTable.size()));
268
269    // See if we should schedule a deadlock check
270    if (!deadlockCheckEvent.scheduled() &&
271        getDrainState() != Drainable::Draining) {
272        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
273    }
274
275    Address line_addr(pkt->getAddr());
276    line_addr.makeLineAddress();
277    // Create a default entry, mapping the address to NULL, the cast is
278    // there to make gcc 4.4 happy
279    RequestTable::value_type default_entry(line_addr,
280                                           (SequencerRequest*) NULL);
281
282    if ((request_type == RubyRequestType_ST) ||
283        (request_type == RubyRequestType_RMW_Read) ||
284        (request_type == RubyRequestType_RMW_Write) ||
285        (request_type == RubyRequestType_Load_Linked) ||
286        (request_type == RubyRequestType_Store_Conditional) ||
287        (request_type == RubyRequestType_Locked_RMW_Read) ||
288        (request_type == RubyRequestType_Locked_RMW_Write) ||
289        (request_type == RubyRequestType_FLUSH)) {
290
291        // Check if there is any outstanding read request for the same
292        // cache line.
293        if (m_readRequestTable.count(line_addr) > 0) {
294            m_store_waiting_on_load_cycles++;
295            return RequestStatus_Aliased;
296        }
297
298        pair<RequestTable::iterator, bool> r =
299            m_writeRequestTable.insert(default_entry);
300        if (r.second) {
301            RequestTable::iterator i = r.first;
302            i->second = new SequencerRequest(pkt, request_type, curCycle());
303            m_outstanding_count++;
304        } else {
305          // There is an outstanding write request for the cache line
306          m_store_waiting_on_store_cycles++;
307          return RequestStatus_Aliased;
308        }
309    } else {
310        // Check if there is any outstanding write request for the same
311        // cache line.
312        if (m_writeRequestTable.count(line_addr) > 0) {
313            m_load_waiting_on_store_cycles++;
314            return RequestStatus_Aliased;
315        }
316
317        pair<RequestTable::iterator, bool> r =
318            m_readRequestTable.insert(default_entry);
319
320        if (r.second) {
321            RequestTable::iterator i = r.first;
322            i->second = new SequencerRequest(pkt, request_type, curCycle());
323            m_outstanding_count++;
324        } else {
325            // There is an outstanding read request for the cache line
326            m_load_waiting_on_load_cycles++;
327            return RequestStatus_Aliased;
328        }
329    }
330
331    m_outstandReqHist.add(m_outstanding_count);
332    assert(m_outstanding_count ==
333        (m_writeRequestTable.size() + m_readRequestTable.size()));
334
335    return RequestStatus_Ready;
336}
337
338void
339Sequencer::markRemoved()
340{
341    m_outstanding_count--;
342    assert(m_outstanding_count ==
343           m_writeRequestTable.size() + m_readRequestTable.size());
344}
345
346void
347Sequencer::removeRequest(SequencerRequest* srequest)
348{
349    assert(m_outstanding_count ==
350           m_writeRequestTable.size() + m_readRequestTable.size());
351
352    Address line_addr(srequest->pkt->getAddr());
353    line_addr.makeLineAddress();
354    if ((srequest->m_type == RubyRequestType_ST) ||
355        (srequest->m_type == RubyRequestType_RMW_Read) ||
356        (srequest->m_type == RubyRequestType_RMW_Write) ||
357        (srequest->m_type == RubyRequestType_Load_Linked) ||
358        (srequest->m_type == RubyRequestType_Store_Conditional) ||
359        (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
360        (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
361        m_writeRequestTable.erase(line_addr);
362    } else {
363        m_readRequestTable.erase(line_addr);
364    }
365
366    markRemoved();
367}
368
369void
370Sequencer::invalidateSC(const Address& address)
371{
372    RequestTable::iterator i = m_writeRequestTable.find(address);
373    if (i != m_writeRequestTable.end()) {
374        SequencerRequest* request = i->second;
375        // The controller has lost the coherence permissions, hence the lock
376        // on the cache line maintained by the cache should be cleared.
377        if (request->m_type == RubyRequestType_Store_Conditional) {
378            m_dataCache_ptr->clearLocked(address);
379        }
380    }
381}
382
383bool
384Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
385{
386    //
387    // The success flag indicates whether the LLSC operation was successful.
388    // LL ops will always succeed, but SC may fail if the cache line is no
389    // longer locked.
390    //
391    bool success = true;
392    if (request->m_type == RubyRequestType_Store_Conditional) {
393        if (!m_dataCache_ptr->isLocked(address, m_version)) {
394            //
395            // For failed SC requests, indicate the failure to the cpu by
396            // setting the extra data to zero.
397            //
398            request->pkt->req->setExtraData(0);
399            success = false;
400        } else {
401            //
402            // For successful SC requests, indicate the success to the cpu by
403            // setting the extra data to one.
404            //
405            request->pkt->req->setExtraData(1);
406        }
407        //
408        // Independent of success, all SC operations must clear the lock
409        //
410        m_dataCache_ptr->clearLocked(address);
411    } else if (request->m_type == RubyRequestType_Load_Linked) {
412        //
413        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
414        // previously locked cache lines?
415        //
416        m_dataCache_ptr->setLocked(address, m_version);
417    } else if ((m_dataCache_ptr->isTagPresent(address)) &&
418               (m_dataCache_ptr->isLocked(address, m_version))) {
419        //
420        // Normal writes should clear the locked address
421        //
422        m_dataCache_ptr->clearLocked(address);
423    }
424    return success;
425}
426
427void
428Sequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
429                             const MachineType respondingMach,
430                             bool isExternalHit, Cycles issuedTime,
431                             Cycles initialRequestTime,
432                             Cycles forwardRequestTime,
433                             Cycles firstResponseTime, Cycles completionTime)
434{
435    m_latencyHist.add(cycles);
436    m_typeLatencyHist[type].add(cycles);
437
438    if (isExternalHit) {
439        m_missLatencyHist.add(cycles);
440        m_missTypeLatencyHist[type].add(cycles);
441
442        if (respondingMach != MachineType_NUM) {
443            m_missMachLatencyHist[respondingMach].add(cycles);
444            m_missTypeMachLatencyHist[type][respondingMach].add(cycles);
445
446            if ((issuedTime <= initialRequestTime) &&
447                (initialRequestTime <= forwardRequestTime) &&
448                (forwardRequestTime <= firstResponseTime) &&
449                (firstResponseTime <= completionTime)) {
450
451                m_IssueToInitialDelayHist[respondingMach].add(
452                    initialRequestTime - issuedTime);
453                m_InitialToForwardDelayHist[respondingMach].add(
454                    forwardRequestTime - initialRequestTime);
455                m_ForwardToFirstResponseDelayHist[respondingMach].add(
456                    firstResponseTime - forwardRequestTime);
457                m_FirstResponseToCompletionDelayHist[respondingMach].add(
458                    completionTime - firstResponseTime);
459            } else {
460                m_IncompleteTimes[respondingMach]++;
461            }
462        }
463    } else {
464        m_hitLatencyHist.add(cycles);
465        m_hitTypeLatencyHist[type].add(cycles);
466
467        if (respondingMach != MachineType_NUM) {
468            m_hitMachLatencyHist[respondingMach].add(cycles);
469            m_hitTypeMachLatencyHist[type][respondingMach].add(cycles);
470        }
471    }
472}
473
474void
475Sequencer::writeCallback(const Address& address, DataBlock& data,
476                         const bool externalHit, const MachineType mach,
477                         const Cycles initialRequestTime,
478                         const Cycles forwardRequestTime,
479                         const Cycles firstResponseTime)
480{
481    assert(address == line_address(address));
482    assert(m_writeRequestTable.count(line_address(address)));
483
484    RequestTable::iterator i = m_writeRequestTable.find(address);
485    assert(i != m_writeRequestTable.end());
486    SequencerRequest* request = i->second;
487
488    m_writeRequestTable.erase(i);
489    markRemoved();
490
491    assert((request->m_type == RubyRequestType_ST) ||
492           (request->m_type == RubyRequestType_ATOMIC) ||
493           (request->m_type == RubyRequestType_RMW_Read) ||
494           (request->m_type == RubyRequestType_RMW_Write) ||
495           (request->m_type == RubyRequestType_Load_Linked) ||
496           (request->m_type == RubyRequestType_Store_Conditional) ||
497           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
498           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
499           (request->m_type == RubyRequestType_FLUSH));
500
501    //
502    // For Alpha, properly handle LL, SC, and write requests with respect to
503    // locked cache blocks.
504    //
505    // Not valid for Network_test protocl
506    //
507    bool success = true;
508    if(!m_usingNetworkTester)
509        success = handleLlsc(address, request);
510
511    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
512        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
513    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
514        m_controller->unblock(address);
515    }
516
517    hitCallback(request, data, success, mach, externalHit,
518                initialRequestTime, forwardRequestTime, firstResponseTime);
519}
520
521void
522Sequencer::readCallback(const Address& address, DataBlock& data,
523                        bool externalHit, const MachineType mach,
524                        Cycles initialRequestTime,
525                        Cycles forwardRequestTime,
526                        Cycles firstResponseTime)
527{
528    assert(address == line_address(address));
529    assert(m_readRequestTable.count(line_address(address)));
530
531    RequestTable::iterator i = m_readRequestTable.find(address);
532    assert(i != m_readRequestTable.end());
533    SequencerRequest* request = i->second;
534
535    m_readRequestTable.erase(i);
536    markRemoved();
537
538    assert((request->m_type == RubyRequestType_LD) ||
539           (request->m_type == RubyRequestType_IFETCH));
540
541    hitCallback(request, data, true, mach, externalHit,
542                initialRequestTime, forwardRequestTime, firstResponseTime);
543}
544
545void
546Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
547                       bool llscSuccess,
548                       const MachineType mach, const bool externalHit,
549                       const Cycles initialRequestTime,
550                       const Cycles forwardRequestTime,
551                       const Cycles firstResponseTime)
552{
553    PacketPtr pkt = srequest->pkt;
554    Address request_address(pkt->getAddr());
555    Address request_line_address(pkt->getAddr());
556    request_line_address.makeLineAddress();
557    RubyRequestType type = srequest->m_type;
558    Cycles issued_time = srequest->issue_time;
559
560    // Set this cache entry to the most recently used
561    if (type == RubyRequestType_IFETCH) {
562        m_instCache_ptr->setMRU(request_line_address);
563    } else {
564        m_dataCache_ptr->setMRU(request_line_address);
565    }
566
567    assert(curCycle() >= issued_time);
568    Cycles total_latency = curCycle() - issued_time;
569
570    // Profile the latency for all demand accesses.
571    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
572                      initialRequestTime, forwardRequestTime,
573                      firstResponseTime, curCycle());
574
575    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
576             curTick(), m_version, "Seq",
577             llscSuccess ? "Done" : "SC_Failed", "", "",
578             request_address, total_latency);
579
580    // update the data
581    if (g_system_ptr->m_warmup_enabled) {
582        assert(pkt->getPtr<uint8_t>(false) != NULL);
583        data.setData(pkt->getPtr<uint8_t>(false),
584                     request_address.getOffset(), pkt->getSize());
585    } else if (pkt->getPtr<uint8_t>(true) != NULL) {
586        if ((type == RubyRequestType_LD) ||
587            (type == RubyRequestType_IFETCH) ||
588            (type == RubyRequestType_RMW_Read) ||
589            (type == RubyRequestType_Locked_RMW_Read) ||
590            (type == RubyRequestType_Load_Linked)) {
591            memcpy(pkt->getPtr<uint8_t>(true),
592                   data.getData(request_address.getOffset(), pkt->getSize()),
593                   pkt->getSize());
594        } else {
595            data.setData(pkt->getPtr<uint8_t>(true),
596                         request_address.getOffset(), pkt->getSize());
597        }
598    } else {
599        DPRINTF(MemoryAccess,
600                "WARNING.  Data not transfered from Ruby to M5 for type %s\n",
601                RubyRequestType_to_string(type));
602    }
603
604    // If using the RubyTester, update the RubyTester sender state's
605    // subBlock with the recieved data.  The tester will later access
606    // this state.
607    // Note: RubyPort will access it's sender state before the
608    // RubyTester.
609    if (m_usingRubyTester) {
610        RubyPort::SenderState *reqSenderState =
611            safe_cast<RubyPort::SenderState*>(pkt->senderState);
612        // @todo This is a dangerous assumption on nothing else
613        // modifying the senderState
614        RubyTester::SenderState* testerSenderState =
615            safe_cast<RubyTester::SenderState*>(reqSenderState->predecessor);
616        testerSenderState->subBlock.mergeFrom(data);
617    }
618
619    delete srequest;
620
621    if (g_system_ptr->m_warmup_enabled) {
622        assert(pkt->req);
623        delete pkt->req;
624        delete pkt;
625        g_system_ptr->m_cache_recorder->enqueueNextFetchRequest();
626    } else if (g_system_ptr->m_cooldown_enabled) {
627        delete pkt;
628        g_system_ptr->m_cache_recorder->enqueueNextFlushRequest();
629    } else {
630        ruby_hit_callback(pkt);
631    }
632}
633
634bool
635Sequencer::empty() const
636{
637    return m_writeRequestTable.empty() && m_readRequestTable.empty();
638}
639
640RequestStatus
641Sequencer::makeRequest(PacketPtr pkt)
642{
643    if (m_outstanding_count >= m_max_outstanding_requests) {
644        return RequestStatus_BufferFull;
645    }
646
647    RubyRequestType primary_type = RubyRequestType_NULL;
648    RubyRequestType secondary_type = RubyRequestType_NULL;
649
650    if (pkt->isLLSC()) {
651        //
652        // Alpha LL/SC instructions need to be handled carefully by the cache
653        // coherence protocol to ensure they follow the proper semantics. In
654        // particular, by identifying the operations as atomic, the protocol
655        // should understand that migratory sharing optimizations should not
656        // be performed (i.e. a load between the LL and SC should not steal
657        // away exclusive permission).
658        //
659        if (pkt->isWrite()) {
660            DPRINTF(RubySequencer, "Issuing SC\n");
661            primary_type = RubyRequestType_Store_Conditional;
662        } else {
663            DPRINTF(RubySequencer, "Issuing LL\n");
664            assert(pkt->isRead());
665            primary_type = RubyRequestType_Load_Linked;
666        }
667        secondary_type = RubyRequestType_ATOMIC;
668    } else if (pkt->req->isLocked()) {
669        //
670        // x86 locked instructions are translated to store cache coherence
671        // requests because these requests should always be treated as read
672        // exclusive operations and should leverage any migratory sharing
673        // optimization built into the protocol.
674        //
675        if (pkt->isWrite()) {
676            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
677            primary_type = RubyRequestType_Locked_RMW_Write;
678        } else {
679            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
680            assert(pkt->isRead());
681            primary_type = RubyRequestType_Locked_RMW_Read;
682        }
683        secondary_type = RubyRequestType_ST;
684    } else {
685        if (pkt->isRead()) {
686            if (pkt->req->isInstFetch()) {
687                primary_type = secondary_type = RubyRequestType_IFETCH;
688            } else {
689#if THE_ISA == X86_ISA
690                uint32_t flags = pkt->req->getFlags();
691                bool storeCheck = flags &
692                        (TheISA::StoreCheck << TheISA::FlagShift);
693#else
694                bool storeCheck = false;
695#endif // X86_ISA
696                if (storeCheck) {
697                    primary_type = RubyRequestType_RMW_Read;
698                    secondary_type = RubyRequestType_ST;
699                } else {
700                    primary_type = secondary_type = RubyRequestType_LD;
701                }
702            }
703        } else if (pkt->isWrite()) {
704            //
705            // Note: M5 packets do not differentiate ST from RMW_Write
706            //
707            primary_type = secondary_type = RubyRequestType_ST;
708        } else if (pkt->isFlush()) {
709          primary_type = secondary_type = RubyRequestType_FLUSH;
710        } else {
711            panic("Unsupported ruby packet type\n");
712        }
713    }
714
715    RequestStatus status = insertRequest(pkt, primary_type);
716    if (status != RequestStatus_Ready)
717        return status;
718
719    issueRequest(pkt, secondary_type);
720
721    // TODO: issue hardware prefetches here
722    return RequestStatus_Issued;
723}
724
725void
726Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
727{
728    assert(pkt != NULL);
729    int proc_id = -1;
730    if (pkt->req->hasContextId()) {
731        proc_id = pkt->req->contextId();
732    }
733
734    // If valid, copy the pc to the ruby request
735    Addr pc = 0;
736    if (pkt->req->hasPC()) {
737        pc = pkt->req->getPC();
738    }
739
740    RubyRequest *msg = new RubyRequest(clockEdge(), pkt->getAddr(),
741                                       pkt->getPtr<uint8_t>(true),
742                                       pkt->getSize(), pc, secondary_type,
743                                       RubyAccessMode_Supervisor, pkt,
744                                       PrefetchBit_No, proc_id);
745
746    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
747            curTick(), m_version, "Seq", "Begin", "", "",
748            msg->getPhysicalAddress(),
749            RubyRequestType_to_string(secondary_type));
750
751    Cycles latency(0);  // initialzed to an null value
752
753    if (secondary_type == RubyRequestType_IFETCH)
754        latency = m_instCache_ptr->getLatency();
755    else
756        latency = m_dataCache_ptr->getLatency();
757
758    // Send the message to the cache controller
759    assert(latency > 0);
760
761    assert(m_mandatory_q_ptr != NULL);
762    m_mandatory_q_ptr->enqueue(msg, latency);
763}
764
765template <class KEY, class VALUE>
766std::ostream &
767operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
768{
769    typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
770    typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
771
772    out << "[";
773    for (; i != end; ++i)
774        out << " " << i->first << "=" << i->second;
775    out << " ]";
776
777    return out;
778}
779
780void
781Sequencer::print(ostream& out) const
782{
783    out << "[Sequencer: " << m_version
784        << ", outstanding requests: " << m_outstanding_count
785        << ", read request table: " << m_readRequestTable
786        << ", write request table: " << m_writeRequestTable
787        << "]";
788}
789
790// this can be called from setState whenever coherence permissions are
791// upgraded when invoked, coherence violations will be checked for the
792// given block
793void
794Sequencer::checkCoherence(const Address& addr)
795{
796#ifdef CHECK_COHERENCE
797    g_system_ptr->checkGlobalCoherenceInvariant(addr);
798#endif
799}
800
801void
802Sequencer::recordRequestType(SequencerRequestType requestType) {
803    DPRINTF(RubyStats, "Recorded statistic: %s\n",
804            SequencerRequestType_to_string(requestType));
805}
806
807
808void
809Sequencer::evictionCallback(const Address& address)
810{
811    ruby_eviction_callback(address);
812}
813