Sequencer.cc revision 9224:b0539d08bda8
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/misc.hh"
30#include "base/str.hh"
31#include "config/the_isa.hh"
32#if THE_ISA == X86_ISA
33#include "arch/x86/insts/microldstop.hh"
34#endif // X86_ISA
35#include "cpu/testers/rubytest/RubyTester.hh"
36#include "debug/MemoryAccess.hh"
37#include "debug/ProtocolTrace.hh"
38#include "debug/RubySequencer.hh"
39#include "debug/RubyStats.hh"
40#include "mem/protocol/PrefetchBit.hh"
41#include "mem/protocol/RubyAccessMode.hh"
42#include "mem/ruby/buffers/MessageBuffer.hh"
43#include "mem/ruby/common/Global.hh"
44#include "mem/ruby/profiler/Profiler.hh"
45#include "mem/ruby/slicc_interface/RubyRequest.hh"
46#include "mem/ruby/system/Sequencer.hh"
47#include "mem/ruby/system/System.hh"
48#include "mem/packet.hh"
49
50using namespace std;
51
52Sequencer *
53RubySequencerParams::create()
54{
55    return new Sequencer(this);
56}
57
58Sequencer::Sequencer(const Params *p)
59    : RubyPort(p), deadlockCheckEvent(this)
60{
61    m_store_waiting_on_load_cycles = 0;
62    m_store_waiting_on_store_cycles = 0;
63    m_load_waiting_on_store_cycles = 0;
64    m_load_waiting_on_load_cycles = 0;
65
66    m_outstanding_count = 0;
67
68    m_instCache_ptr = p->icache;
69    m_dataCache_ptr = p->dcache;
70    m_max_outstanding_requests = p->max_outstanding_requests;
71    m_deadlock_threshold = p->deadlock_threshold;
72
73    assert(m_max_outstanding_requests > 0);
74    assert(m_deadlock_threshold > 0);
75    assert(m_instCache_ptr != NULL);
76    assert(m_dataCache_ptr != NULL);
77
78    m_usingNetworkTester = p->using_network_tester;
79}
80
81Sequencer::~Sequencer()
82{
83}
84
85void
86Sequencer::wakeup()
87{
88    // Check for deadlock of any of the requests
89    Time current_time = g_system_ptr->getTime();
90
91    // Check across all outstanding requests
92    int total_outstanding = 0;
93
94    RequestTable::iterator read = m_readRequestTable.begin();
95    RequestTable::iterator read_end = m_readRequestTable.end();
96    for (; read != read_end; ++read) {
97        SequencerRequest* request = read->second;
98        if (current_time - request->issue_time < m_deadlock_threshold)
99            continue;
100
101        panic("Possible Deadlock detected. Aborting!\n"
102             "version: %d request.paddr: 0x%x m_readRequestTable: %d "
103             "current time: %u issue_time: %d difference: %d\n", m_version,
104             Address(request->pkt->getAddr()), m_readRequestTable.size(),
105             current_time, request->issue_time,
106             current_time - request->issue_time);
107    }
108
109    RequestTable::iterator write = m_writeRequestTable.begin();
110    RequestTable::iterator write_end = m_writeRequestTable.end();
111    for (; write != write_end; ++write) {
112        SequencerRequest* request = write->second;
113        if (current_time - request->issue_time < m_deadlock_threshold)
114            continue;
115
116        panic("Possible Deadlock detected. Aborting!\n"
117             "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
118             "current time: %u issue_time: %d difference: %d\n", m_version,
119             Address(request->pkt->getAddr()), m_writeRequestTable.size(),
120             current_time, request->issue_time,
121             current_time - request->issue_time);
122    }
123
124    total_outstanding += m_writeRequestTable.size();
125    total_outstanding += m_readRequestTable.size();
126
127    assert(m_outstanding_count == total_outstanding);
128
129    if (m_outstanding_count > 0) {
130        // If there are still outstanding requests, keep checking
131        schedule(deadlockCheckEvent,
132            g_system_ptr->clockPeriod() * m_deadlock_threshold + curTick());
133    }
134}
135
136void
137Sequencer::printStats(ostream & out) const
138{
139    out << "Sequencer: " << m_name << endl
140        << "  store_waiting_on_load_cycles: "
141        << m_store_waiting_on_load_cycles << endl
142        << "  store_waiting_on_store_cycles: "
143        << m_store_waiting_on_store_cycles << endl
144        << "  load_waiting_on_load_cycles: "
145        << m_load_waiting_on_load_cycles << endl
146        << "  load_waiting_on_store_cycles: "
147        << m_load_waiting_on_store_cycles << endl;
148}
149
150void
151Sequencer::printProgress(ostream& out) const
152{
153#if 0
154    int total_demand = 0;
155    out << "Sequencer Stats Version " << m_version << endl;
156    out << "Current time = " << g_system_ptr->getTime() << endl;
157    out << "---------------" << endl;
158    out << "outstanding requests" << endl;
159
160    out << "proc " << m_Read
161        << " version Requests = " << m_readRequestTable.size() << endl;
162
163    // print the request table
164    RequestTable::iterator read = m_readRequestTable.begin();
165    RequestTable::iterator read_end = m_readRequestTable.end();
166    for (; read != read_end; ++read) {
167        SequencerRequest* request = read->second;
168        out << "\tRequest[ " << i << " ] = " << request->type
169            << " Address " << rkeys[i]
170            << " Posted " << request->issue_time
171            << " PF " << PrefetchBit_No << endl;
172        total_demand++;
173    }
174
175    out << "proc " << m_version
176        << " Write Requests = " << m_writeRequestTable.size << endl;
177
178    // print the request table
179    RequestTable::iterator write = m_writeRequestTable.begin();
180    RequestTable::iterator write_end = m_writeRequestTable.end();
181    for (; write != write_end; ++write) {
182        SequencerRequest* request = write->second;
183        out << "\tRequest[ " << i << " ] = " << request.getType()
184            << " Address " << wkeys[i]
185            << " Posted " << request.getTime()
186            << " PF " << request.getPrefetch() << endl;
187        if (request.getPrefetch() == PrefetchBit_No) {
188            total_demand++;
189        }
190    }
191
192    out << endl;
193
194    out << "Total Number Outstanding: " << m_outstanding_count << endl
195        << "Total Number Demand     : " << total_demand << endl
196        << "Total Number Prefetches : " << m_outstanding_count - total_demand
197        << endl << endl << endl;
198#endif
199}
200
201// Insert the request on the correct request table.  Return true if
202// the entry was already present.
203RequestStatus
204Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
205{
206    assert(m_outstanding_count ==
207        (m_writeRequestTable.size() + m_readRequestTable.size()));
208
209    // See if we should schedule a deadlock check
210    if (deadlockCheckEvent.scheduled() == false) {
211        schedule(deadlockCheckEvent,
212            g_system_ptr->clockPeriod() * m_deadlock_threshold + curTick());
213    }
214
215    Address line_addr(pkt->getAddr());
216    line_addr.makeLineAddress();
217    // Create a default entry, mapping the address to NULL, the cast is
218    // there to make gcc 4.4 happy
219    RequestTable::value_type default_entry(line_addr,
220                                           (SequencerRequest*) NULL);
221
222    if ((request_type == RubyRequestType_ST) ||
223        (request_type == RubyRequestType_RMW_Read) ||
224        (request_type == RubyRequestType_RMW_Write) ||
225        (request_type == RubyRequestType_Load_Linked) ||
226        (request_type == RubyRequestType_Store_Conditional) ||
227        (request_type == RubyRequestType_Locked_RMW_Read) ||
228        (request_type == RubyRequestType_Locked_RMW_Write) ||
229        (request_type == RubyRequestType_FLUSH)) {
230
231        // Check if there is any outstanding read request for the same
232        // cache line.
233        if (m_readRequestTable.count(line_addr) > 0) {
234            m_store_waiting_on_load_cycles++;
235            return RequestStatus_Aliased;
236        }
237
238        pair<RequestTable::iterator, bool> r =
239            m_writeRequestTable.insert(default_entry);
240        if (r.second) {
241            RequestTable::iterator i = r.first;
242            i->second = new SequencerRequest(pkt, request_type,
243                                             g_system_ptr->getTime());
244            m_outstanding_count++;
245        } else {
246          // There is an outstanding write request for the cache line
247          m_store_waiting_on_store_cycles++;
248          return RequestStatus_Aliased;
249        }
250    } else {
251        // Check if there is any outstanding write request for the same
252        // cache line.
253        if (m_writeRequestTable.count(line_addr) > 0) {
254            m_load_waiting_on_store_cycles++;
255            return RequestStatus_Aliased;
256        }
257
258        pair<RequestTable::iterator, bool> r =
259            m_readRequestTable.insert(default_entry);
260
261        if (r.second) {
262            RequestTable::iterator i = r.first;
263            i->second = new SequencerRequest(pkt, request_type,
264                                             g_system_ptr->getTime());
265            m_outstanding_count++;
266        } else {
267            // There is an outstanding read request for the cache line
268            m_load_waiting_on_load_cycles++;
269            return RequestStatus_Aliased;
270        }
271    }
272
273    g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
274    assert(m_outstanding_count ==
275        (m_writeRequestTable.size() + m_readRequestTable.size()));
276
277    return RequestStatus_Ready;
278}
279
280void
281Sequencer::markRemoved()
282{
283    m_outstanding_count--;
284    assert(m_outstanding_count ==
285           m_writeRequestTable.size() + m_readRequestTable.size());
286}
287
288void
289Sequencer::removeRequest(SequencerRequest* srequest)
290{
291    assert(m_outstanding_count ==
292           m_writeRequestTable.size() + m_readRequestTable.size());
293
294    Address line_addr(srequest->pkt->getAddr());
295    line_addr.makeLineAddress();
296    if ((srequest->m_type == RubyRequestType_ST) ||
297        (srequest->m_type == RubyRequestType_RMW_Read) ||
298        (srequest->m_type == RubyRequestType_RMW_Write) ||
299        (srequest->m_type == RubyRequestType_Load_Linked) ||
300        (srequest->m_type == RubyRequestType_Store_Conditional) ||
301        (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
302        (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
303        m_writeRequestTable.erase(line_addr);
304    } else {
305        m_readRequestTable.erase(line_addr);
306    }
307
308    markRemoved();
309}
310
311bool
312Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
313{
314    //
315    // The success flag indicates whether the LLSC operation was successful.
316    // LL ops will always succeed, but SC may fail if the cache line is no
317    // longer locked.
318    //
319    bool success = true;
320    if (request->m_type == RubyRequestType_Store_Conditional) {
321        if (!m_dataCache_ptr->isLocked(address, m_version)) {
322            //
323            // For failed SC requests, indicate the failure to the cpu by
324            // setting the extra data to zero.
325            //
326            request->pkt->req->setExtraData(0);
327            success = false;
328        } else {
329            //
330            // For successful SC requests, indicate the success to the cpu by
331            // setting the extra data to one.
332            //
333            request->pkt->req->setExtraData(1);
334        }
335        //
336        // Independent of success, all SC operations must clear the lock
337        //
338        m_dataCache_ptr->clearLocked(address);
339    } else if (request->m_type == RubyRequestType_Load_Linked) {
340        //
341        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
342        // previously locked cache lines?
343        //
344        m_dataCache_ptr->setLocked(address, m_version);
345    } else if ((m_dataCache_ptr->isTagPresent(address)) &&
346               (m_dataCache_ptr->isLocked(address, m_version))) {
347        //
348        // Normal writes should clear the locked address
349        //
350        m_dataCache_ptr->clearLocked(address);
351    }
352    return success;
353}
354
355void
356Sequencer::writeCallback(const Address& address, DataBlock& data)
357{
358    writeCallback(address, GenericMachineType_NULL, data);
359}
360
361void
362Sequencer::writeCallback(const Address& address,
363                         GenericMachineType mach,
364                         DataBlock& data)
365{
366    writeCallback(address, mach, data, 0, 0, 0);
367}
368
369void
370Sequencer::writeCallback(const Address& address,
371                         GenericMachineType mach,
372                         DataBlock& data,
373                         Time initialRequestTime,
374                         Time forwardRequestTime,
375                         Time firstResponseTime)
376{
377    assert(address == line_address(address));
378    assert(m_writeRequestTable.count(line_address(address)));
379
380    RequestTable::iterator i = m_writeRequestTable.find(address);
381    assert(i != m_writeRequestTable.end());
382    SequencerRequest* request = i->second;
383
384    m_writeRequestTable.erase(i);
385    markRemoved();
386
387    assert((request->m_type == RubyRequestType_ST) ||
388           (request->m_type == RubyRequestType_ATOMIC) ||
389           (request->m_type == RubyRequestType_RMW_Read) ||
390           (request->m_type == RubyRequestType_RMW_Write) ||
391           (request->m_type == RubyRequestType_Load_Linked) ||
392           (request->m_type == RubyRequestType_Store_Conditional) ||
393           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
394           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
395           (request->m_type == RubyRequestType_FLUSH));
396
397
398    //
399    // For Alpha, properly handle LL, SC, and write requests with respect to
400    // locked cache blocks.
401    //
402    // Not valid for Network_test protocl
403    //
404    bool success = true;
405    if(!m_usingNetworkTester)
406        success = handleLlsc(address, request);
407
408    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
409        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
410    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
411        m_controller->unblock(address);
412    }
413
414    hitCallback(request, mach, data, success,
415                initialRequestTime, forwardRequestTime, firstResponseTime);
416}
417
418void
419Sequencer::readCallback(const Address& address, DataBlock& data)
420{
421    readCallback(address, GenericMachineType_NULL, data);
422}
423
424void
425Sequencer::readCallback(const Address& address,
426                        GenericMachineType mach,
427                        DataBlock& data)
428{
429    readCallback(address, mach, data, 0, 0, 0);
430}
431
432void
433Sequencer::readCallback(const Address& address,
434                        GenericMachineType mach,
435                        DataBlock& data,
436                        Time initialRequestTime,
437                        Time forwardRequestTime,
438                        Time firstResponseTime)
439{
440    assert(address == line_address(address));
441    assert(m_readRequestTable.count(line_address(address)));
442
443    RequestTable::iterator i = m_readRequestTable.find(address);
444    assert(i != m_readRequestTable.end());
445    SequencerRequest* request = i->second;
446
447    m_readRequestTable.erase(i);
448    markRemoved();
449
450    assert((request->m_type == RubyRequestType_LD) ||
451           (request->m_type == RubyRequestType_IFETCH));
452
453    hitCallback(request, mach, data, true,
454                initialRequestTime, forwardRequestTime, firstResponseTime);
455}
456
457void
458Sequencer::hitCallback(SequencerRequest* srequest,
459                       GenericMachineType mach,
460                       DataBlock& data,
461                       bool success,
462                       Time initialRequestTime,
463                       Time forwardRequestTime,
464                       Time firstResponseTime)
465{
466    PacketPtr pkt = srequest->pkt;
467    Address request_address(pkt->getAddr());
468    Address request_line_address(pkt->getAddr());
469    request_line_address.makeLineAddress();
470    RubyRequestType type = srequest->m_type;
471    Time issued_time = srequest->issue_time;
472
473    // Set this cache entry to the most recently used
474    if (type == RubyRequestType_IFETCH) {
475        m_instCache_ptr->setMRU(request_line_address);
476    } else {
477        m_dataCache_ptr->setMRU(request_line_address);
478    }
479
480    assert(g_system_ptr->getTime() >= issued_time);
481    Time miss_latency = g_system_ptr->getTime() - issued_time;
482
483    // Profile the miss latency for all non-zero demand misses
484    if (miss_latency != 0) {
485        g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach);
486
487        if (mach == GenericMachineType_L1Cache_wCC) {
488            g_system_ptr->getProfiler()->missLatencyWcc(issued_time,
489                                                   initialRequestTime,
490                                                   forwardRequestTime,
491                                                   firstResponseTime,
492                                                   g_system_ptr->getTime());
493        }
494
495        if (mach == GenericMachineType_Directory) {
496            g_system_ptr->getProfiler()->missLatencyDir(issued_time,
497                                                   initialRequestTime,
498                                                   forwardRequestTime,
499                                                   firstResponseTime,
500                                                   g_system_ptr->getTime());
501        }
502
503        DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
504                 curTick(), m_version, "Seq",
505                 success ? "Done" : "SC_Failed", "", "",
506                 request_address, miss_latency);
507    }
508
509    // update the data
510    if (g_system_ptr->m_warmup_enabled) {
511        assert(pkt->getPtr<uint8_t>(false) != NULL);
512        data.setData(pkt->getPtr<uint8_t>(false),
513                     request_address.getOffset(), pkt->getSize());
514    } else if (pkt->getPtr<uint8_t>(true) != NULL) {
515        if ((type == RubyRequestType_LD) ||
516            (type == RubyRequestType_IFETCH) ||
517            (type == RubyRequestType_RMW_Read) ||
518            (type == RubyRequestType_Locked_RMW_Read) ||
519            (type == RubyRequestType_Load_Linked)) {
520            memcpy(pkt->getPtr<uint8_t>(true),
521                   data.getData(request_address.getOffset(), pkt->getSize()),
522                   pkt->getSize());
523        } else {
524            data.setData(pkt->getPtr<uint8_t>(true),
525                         request_address.getOffset(), pkt->getSize());
526        }
527    } else {
528        DPRINTF(MemoryAccess,
529                "WARNING.  Data not transfered from Ruby to M5 for type %s\n",
530                RubyRequestType_to_string(type));
531    }
532
533    // If using the RubyTester, update the RubyTester sender state's
534    // subBlock with the recieved data.  The tester will later access
535    // this state.
536    // Note: RubyPort will access it's sender state before the
537    // RubyTester.
538    if (m_usingRubyTester) {
539        RubyPort::SenderState *requestSenderState =
540            safe_cast<RubyPort::SenderState*>(pkt->senderState);
541        RubyTester::SenderState* testerSenderState =
542            safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
543        testerSenderState->subBlock->mergeFrom(data);
544    }
545
546    delete srequest;
547
548    if (g_system_ptr->m_warmup_enabled) {
549        delete pkt;
550        g_system_ptr->m_cache_recorder->enqueueNextFetchRequest();
551    } else if (g_system_ptr->m_cooldown_enabled) {
552        delete pkt;
553        g_system_ptr->m_cache_recorder->enqueueNextFlushRequest();
554    } else {
555        ruby_hit_callback(pkt);
556    }
557}
558
559bool
560Sequencer::empty() const
561{
562    return m_writeRequestTable.empty() && m_readRequestTable.empty();
563}
564
565RequestStatus
566Sequencer::makeRequest(PacketPtr pkt)
567{
568    if (m_outstanding_count >= m_max_outstanding_requests) {
569        return RequestStatus_BufferFull;
570    }
571
572    RubyRequestType primary_type = RubyRequestType_NULL;
573    RubyRequestType secondary_type = RubyRequestType_NULL;
574
575    if (pkt->isLLSC()) {
576        //
577        // Alpha LL/SC instructions need to be handled carefully by the cache
578        // coherence protocol to ensure they follow the proper semantics. In
579        // particular, by identifying the operations as atomic, the protocol
580        // should understand that migratory sharing optimizations should not
581        // be performed (i.e. a load between the LL and SC should not steal
582        // away exclusive permission).
583        //
584        if (pkt->isWrite()) {
585            DPRINTF(RubySequencer, "Issuing SC\n");
586            primary_type = RubyRequestType_Store_Conditional;
587        } else {
588            DPRINTF(RubySequencer, "Issuing LL\n");
589            assert(pkt->isRead());
590            primary_type = RubyRequestType_Load_Linked;
591        }
592        secondary_type = RubyRequestType_ATOMIC;
593    } else if (pkt->req->isLocked()) {
594        //
595        // x86 locked instructions are translated to store cache coherence
596        // requests because these requests should always be treated as read
597        // exclusive operations and should leverage any migratory sharing
598        // optimization built into the protocol.
599        //
600        if (pkt->isWrite()) {
601            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
602            primary_type = RubyRequestType_Locked_RMW_Write;
603        } else {
604            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
605            assert(pkt->isRead());
606            primary_type = RubyRequestType_Locked_RMW_Read;
607        }
608        secondary_type = RubyRequestType_ST;
609    } else {
610        if (pkt->isRead()) {
611            if (pkt->req->isInstFetch()) {
612                primary_type = secondary_type = RubyRequestType_IFETCH;
613            } else {
614#if THE_ISA == X86_ISA
615                uint32_t flags = pkt->req->getFlags();
616                bool storeCheck = flags &
617                        (TheISA::StoreCheck << TheISA::FlagShift);
618#else
619                bool storeCheck = false;
620#endif // X86_ISA
621                if (storeCheck) {
622                    primary_type = RubyRequestType_RMW_Read;
623                    secondary_type = RubyRequestType_ST;
624                } else {
625                    primary_type = secondary_type = RubyRequestType_LD;
626                }
627            }
628        } else if (pkt->isWrite()) {
629            //
630            // Note: M5 packets do not differentiate ST from RMW_Write
631            //
632            primary_type = secondary_type = RubyRequestType_ST;
633        } else if (pkt->isFlush()) {
634          primary_type = secondary_type = RubyRequestType_FLUSH;
635        } else {
636            panic("Unsupported ruby packet type\n");
637        }
638    }
639
640    RequestStatus status = insertRequest(pkt, primary_type);
641    if (status != RequestStatus_Ready)
642        return status;
643
644    issueRequest(pkt, secondary_type);
645
646    // TODO: issue hardware prefetches here
647    return RequestStatus_Issued;
648}
649
650void
651Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
652{
653    assert(pkt != NULL);
654    int proc_id = -1;
655    if (pkt->req->hasContextId()) {
656        proc_id = pkt->req->contextId();
657    }
658
659    // If valid, copy the pc to the ruby request
660    Addr pc = 0;
661    if (pkt->req->hasPC()) {
662        pc = pkt->req->getPC();
663    }
664
665    RubyRequest *msg = new RubyRequest(pkt->getAddr(),
666                                       pkt->getPtr<uint8_t>(true),
667                                       pkt->getSize(), pc, secondary_type,
668                                       RubyAccessMode_Supervisor, pkt,
669                                       PrefetchBit_No, proc_id);
670
671    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
672            curTick(), m_version, "Seq", "Begin", "", "",
673            msg->getPhysicalAddress(),
674            RubyRequestType_to_string(secondary_type));
675
676    Time latency = 0;  // initialzed to an null value
677
678    if (secondary_type == RubyRequestType_IFETCH)
679        latency = m_instCache_ptr->getLatency();
680    else
681        latency = m_dataCache_ptr->getLatency();
682
683    // Send the message to the cache controller
684    assert(latency > 0);
685
686    assert(m_mandatory_q_ptr != NULL);
687    m_mandatory_q_ptr->enqueue(msg, latency);
688}
689
690template <class KEY, class VALUE>
691std::ostream &
692operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
693{
694    typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
695    typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
696
697    out << "[";
698    for (; i != end; ++i)
699        out << " " << i->first << "=" << i->second;
700    out << " ]";
701
702    return out;
703}
704
705void
706Sequencer::print(ostream& out) const
707{
708    out << "[Sequencer: " << m_version
709        << ", outstanding requests: " << m_outstanding_count
710        << ", read request table: " << m_readRequestTable
711        << ", write request table: " << m_writeRequestTable
712        << "]";
713}
714
715// this can be called from setState whenever coherence permissions are
716// upgraded when invoked, coherence violations will be checked for the
717// given block
718void
719Sequencer::checkCoherence(const Address& addr)
720{
721#ifdef CHECK_COHERENCE
722    g_system_ptr->checkGlobalCoherenceInvariant(addr);
723#endif
724}
725
726void
727Sequencer::recordRequestType(SequencerRequestType requestType) {
728    DPRINTF(RubyStats, "Recorded statistic: %s\n",
729            SequencerRequestType_to_string(requestType));
730}
731
732
733void
734Sequencer::evictionCallback(const Address& address)
735{
736    ruby_eviction_callback(address);
737}
738