Sequencer.cc revision 9632:476febc1aff0
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/misc.hh"
30#include "base/str.hh"
31#include "config/the_isa.hh"
32#if THE_ISA == X86_ISA
33#include "arch/x86/insts/microldstop.hh"
34#endif // X86_ISA
35#include "cpu/testers/rubytest/RubyTester.hh"
36#include "debug/MemoryAccess.hh"
37#include "debug/ProtocolTrace.hh"
38#include "debug/RubySequencer.hh"
39#include "debug/RubyStats.hh"
40#include "mem/protocol/PrefetchBit.hh"
41#include "mem/protocol/RubyAccessMode.hh"
42#include "mem/ruby/common/Global.hh"
43#include "mem/ruby/profiler/Profiler.hh"
44#include "mem/ruby/slicc_interface/RubyRequest.hh"
45#include "mem/ruby/system/Sequencer.hh"
46#include "mem/ruby/system/System.hh"
47#include "mem/packet.hh"
48
49using namespace std;
50
51Sequencer *
52RubySequencerParams::create()
53{
54    return new Sequencer(this);
55}
56
57Sequencer::Sequencer(const Params *p)
58    : RubyPort(p), deadlockCheckEvent(this)
59{
60    m_store_waiting_on_load_cycles = 0;
61    m_store_waiting_on_store_cycles = 0;
62    m_load_waiting_on_store_cycles = 0;
63    m_load_waiting_on_load_cycles = 0;
64
65    m_outstanding_count = 0;
66
67    m_instCache_ptr = p->icache;
68    m_dataCache_ptr = p->dcache;
69    m_max_outstanding_requests = p->max_outstanding_requests;
70    m_deadlock_threshold = p->deadlock_threshold;
71
72    assert(m_max_outstanding_requests > 0);
73    assert(m_deadlock_threshold > 0);
74    assert(m_instCache_ptr != NULL);
75    assert(m_dataCache_ptr != NULL);
76
77    m_usingNetworkTester = p->using_network_tester;
78}
79
80Sequencer::~Sequencer()
81{
82}
83
84void
85Sequencer::wakeup()
86{
87    assert(getDrainState() != Drainable::Draining);
88
89    // Check for deadlock of any of the requests
90    Cycles current_time = curCycle();
91
92    // Check across all outstanding requests
93    int total_outstanding = 0;
94
95    RequestTable::iterator read = m_readRequestTable.begin();
96    RequestTable::iterator read_end = m_readRequestTable.end();
97    for (; read != read_end; ++read) {
98        SequencerRequest* request = read->second;
99        if (current_time - request->issue_time < m_deadlock_threshold)
100            continue;
101
102        panic("Possible Deadlock detected. Aborting!\n"
103             "version: %d request.paddr: 0x%x m_readRequestTable: %d "
104             "current time: %u issue_time: %d difference: %d\n", m_version,
105             Address(request->pkt->getAddr()), m_readRequestTable.size(),
106              current_time * clockPeriod(), request->issue_time * clockPeriod(),
107              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
108    }
109
110    RequestTable::iterator write = m_writeRequestTable.begin();
111    RequestTable::iterator write_end = m_writeRequestTable.end();
112    for (; write != write_end; ++write) {
113        SequencerRequest* request = write->second;
114        if (current_time - request->issue_time < m_deadlock_threshold)
115            continue;
116
117        panic("Possible Deadlock detected. Aborting!\n"
118             "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
119             "current time: %u issue_time: %d difference: %d\n", m_version,
120             Address(request->pkt->getAddr()), m_writeRequestTable.size(),
121              current_time * clockPeriod(), request->issue_time * clockPeriod(),
122              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
123    }
124
125    total_outstanding += m_writeRequestTable.size();
126    total_outstanding += m_readRequestTable.size();
127
128    assert(m_outstanding_count == total_outstanding);
129
130    if (m_outstanding_count > 0) {
131        // If there are still outstanding requests, keep checking
132        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
133    }
134}
135
136void Sequencer::clearStats()
137{
138    m_outstandReqHist.clear();
139}
140
141void
142Sequencer::printStats(ostream & out) const
143{
144    out << "Sequencer: " << m_name << endl
145        << "  store_waiting_on_load_cycles: "
146        << m_store_waiting_on_load_cycles << endl
147        << "  store_waiting_on_store_cycles: "
148        << m_store_waiting_on_store_cycles << endl
149        << "  load_waiting_on_load_cycles: "
150        << m_load_waiting_on_load_cycles << endl
151        << "  load_waiting_on_store_cycles: "
152        << m_load_waiting_on_store_cycles << endl;
153}
154
155void
156Sequencer::printProgress(ostream& out) const
157{
158#if 0
159    int total_demand = 0;
160    out << "Sequencer Stats Version " << m_version << endl;
161    out << "Current time = " << g_system_ptr->getTime() << endl;
162    out << "---------------" << endl;
163    out << "outstanding requests" << endl;
164
165    out << "proc " << m_Read
166        << " version Requests = " << m_readRequestTable.size() << endl;
167
168    // print the request table
169    RequestTable::iterator read = m_readRequestTable.begin();
170    RequestTable::iterator read_end = m_readRequestTable.end();
171    for (; read != read_end; ++read) {
172        SequencerRequest* request = read->second;
173        out << "\tRequest[ " << i << " ] = " << request->type
174            << " Address " << rkeys[i]
175            << " Posted " << request->issue_time
176            << " PF " << PrefetchBit_No << endl;
177        total_demand++;
178    }
179
180    out << "proc " << m_version
181        << " Write Requests = " << m_writeRequestTable.size << endl;
182
183    // print the request table
184    RequestTable::iterator write = m_writeRequestTable.begin();
185    RequestTable::iterator write_end = m_writeRequestTable.end();
186    for (; write != write_end; ++write) {
187        SequencerRequest* request = write->second;
188        out << "\tRequest[ " << i << " ] = " << request.getType()
189            << " Address " << wkeys[i]
190            << " Posted " << request.getTime()
191            << " PF " << request.getPrefetch() << endl;
192        if (request.getPrefetch() == PrefetchBit_No) {
193            total_demand++;
194        }
195    }
196
197    out << endl;
198
199    out << "Total Number Outstanding: " << m_outstanding_count << endl
200        << "Total Number Demand     : " << total_demand << endl
201        << "Total Number Prefetches : " << m_outstanding_count - total_demand
202        << endl << endl << endl;
203#endif
204}
205
206// Insert the request on the correct request table.  Return true if
207// the entry was already present.
208RequestStatus
209Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
210{
211    assert(m_outstanding_count ==
212        (m_writeRequestTable.size() + m_readRequestTable.size()));
213
214    // See if we should schedule a deadlock check
215    if (!deadlockCheckEvent.scheduled() &&
216        getDrainState() != Drainable::Draining) {
217        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
218    }
219
220    Address line_addr(pkt->getAddr());
221    line_addr.makeLineAddress();
222    // Create a default entry, mapping the address to NULL, the cast is
223    // there to make gcc 4.4 happy
224    RequestTable::value_type default_entry(line_addr,
225                                           (SequencerRequest*) NULL);
226
227    if ((request_type == RubyRequestType_ST) ||
228        (request_type == RubyRequestType_RMW_Read) ||
229        (request_type == RubyRequestType_RMW_Write) ||
230        (request_type == RubyRequestType_Load_Linked) ||
231        (request_type == RubyRequestType_Store_Conditional) ||
232        (request_type == RubyRequestType_Locked_RMW_Read) ||
233        (request_type == RubyRequestType_Locked_RMW_Write) ||
234        (request_type == RubyRequestType_FLUSH)) {
235
236        // Check if there is any outstanding read request for the same
237        // cache line.
238        if (m_readRequestTable.count(line_addr) > 0) {
239            m_store_waiting_on_load_cycles++;
240            return RequestStatus_Aliased;
241        }
242
243        pair<RequestTable::iterator, bool> r =
244            m_writeRequestTable.insert(default_entry);
245        if (r.second) {
246            RequestTable::iterator i = r.first;
247            i->second = new SequencerRequest(pkt, request_type, curCycle());
248            m_outstanding_count++;
249        } else {
250          // There is an outstanding write request for the cache line
251          m_store_waiting_on_store_cycles++;
252          return RequestStatus_Aliased;
253        }
254    } else {
255        // Check if there is any outstanding write request for the same
256        // cache line.
257        if (m_writeRequestTable.count(line_addr) > 0) {
258            m_load_waiting_on_store_cycles++;
259            return RequestStatus_Aliased;
260        }
261
262        pair<RequestTable::iterator, bool> r =
263            m_readRequestTable.insert(default_entry);
264
265        if (r.second) {
266            RequestTable::iterator i = r.first;
267            i->second = new SequencerRequest(pkt, request_type, curCycle());
268            m_outstanding_count++;
269        } else {
270            // There is an outstanding read request for the cache line
271            m_load_waiting_on_load_cycles++;
272            return RequestStatus_Aliased;
273        }
274    }
275
276    m_outstandReqHist.add(m_outstanding_count);
277    assert(m_outstanding_count ==
278        (m_writeRequestTable.size() + m_readRequestTable.size()));
279
280    return RequestStatus_Ready;
281}
282
283void
284Sequencer::markRemoved()
285{
286    m_outstanding_count--;
287    assert(m_outstanding_count ==
288           m_writeRequestTable.size() + m_readRequestTable.size());
289}
290
291void
292Sequencer::removeRequest(SequencerRequest* srequest)
293{
294    assert(m_outstanding_count ==
295           m_writeRequestTable.size() + m_readRequestTable.size());
296
297    Address line_addr(srequest->pkt->getAddr());
298    line_addr.makeLineAddress();
299    if ((srequest->m_type == RubyRequestType_ST) ||
300        (srequest->m_type == RubyRequestType_RMW_Read) ||
301        (srequest->m_type == RubyRequestType_RMW_Write) ||
302        (srequest->m_type == RubyRequestType_Load_Linked) ||
303        (srequest->m_type == RubyRequestType_Store_Conditional) ||
304        (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
305        (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
306        m_writeRequestTable.erase(line_addr);
307    } else {
308        m_readRequestTable.erase(line_addr);
309    }
310
311    markRemoved();
312}
313
314void
315Sequencer::invalidateSC(const Address& address)
316{
317    RequestTable::iterator i = m_writeRequestTable.find(address);
318    if (i != m_writeRequestTable.end()) {
319        SequencerRequest* request = i->second;
320        // The controller has lost the coherence permissions, hence the lock
321        // on the cache line maintained by the cache should be cleared.
322        if (request->m_type == RubyRequestType_Store_Conditional) {
323            m_dataCache_ptr->clearLocked(address);
324        }
325    }
326}
327
328bool
329Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
330{
331    //
332    // The success flag indicates whether the LLSC operation was successful.
333    // LL ops will always succeed, but SC may fail if the cache line is no
334    // longer locked.
335    //
336    bool success = true;
337    if (request->m_type == RubyRequestType_Store_Conditional) {
338        if (!m_dataCache_ptr->isLocked(address, m_version)) {
339            //
340            // For failed SC requests, indicate the failure to the cpu by
341            // setting the extra data to zero.
342            //
343            request->pkt->req->setExtraData(0);
344            success = false;
345        } else {
346            //
347            // For successful SC requests, indicate the success to the cpu by
348            // setting the extra data to one.
349            //
350            request->pkt->req->setExtraData(1);
351        }
352        //
353        // Independent of success, all SC operations must clear the lock
354        //
355        m_dataCache_ptr->clearLocked(address);
356    } else if (request->m_type == RubyRequestType_Load_Linked) {
357        //
358        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
359        // previously locked cache lines?
360        //
361        m_dataCache_ptr->setLocked(address, m_version);
362    } else if ((m_dataCache_ptr->isTagPresent(address)) &&
363               (m_dataCache_ptr->isLocked(address, m_version))) {
364        //
365        // Normal writes should clear the locked address
366        //
367        m_dataCache_ptr->clearLocked(address);
368    }
369    return success;
370}
371
372void
373Sequencer::writeCallback(const Address& address, DataBlock& data)
374{
375    writeCallback(address, GenericMachineType_NULL, data);
376}
377
378void
379Sequencer::writeCallback(const Address& address,
380                         GenericMachineType mach,
381                         DataBlock& data)
382{
383    writeCallback(address, mach, data, Cycles(0), Cycles(0), Cycles(0));
384}
385
386void
387Sequencer::writeCallback(const Address& address,
388                         GenericMachineType mach,
389                         DataBlock& data,
390                         Cycles initialRequestTime,
391                         Cycles forwardRequestTime,
392                         Cycles firstResponseTime)
393{
394    assert(address == line_address(address));
395    assert(m_writeRequestTable.count(line_address(address)));
396
397    RequestTable::iterator i = m_writeRequestTable.find(address);
398    assert(i != m_writeRequestTable.end());
399    SequencerRequest* request = i->second;
400
401    m_writeRequestTable.erase(i);
402    markRemoved();
403
404    assert((request->m_type == RubyRequestType_ST) ||
405           (request->m_type == RubyRequestType_ATOMIC) ||
406           (request->m_type == RubyRequestType_RMW_Read) ||
407           (request->m_type == RubyRequestType_RMW_Write) ||
408           (request->m_type == RubyRequestType_Load_Linked) ||
409           (request->m_type == RubyRequestType_Store_Conditional) ||
410           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
411           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
412           (request->m_type == RubyRequestType_FLUSH));
413
414    //
415    // For Alpha, properly handle LL, SC, and write requests with respect to
416    // locked cache blocks.
417    //
418    // Not valid for Network_test protocl
419    //
420    bool success = true;
421    if(!m_usingNetworkTester)
422        success = handleLlsc(address, request);
423
424    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
425        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
426    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
427        m_controller->unblock(address);
428    }
429
430    hitCallback(request, mach, data, success,
431                initialRequestTime, forwardRequestTime, firstResponseTime);
432}
433
434void
435Sequencer::readCallback(const Address& address, DataBlock& data)
436{
437    readCallback(address, GenericMachineType_NULL, data);
438}
439
440void
441Sequencer::readCallback(const Address& address,
442                        GenericMachineType mach,
443                        DataBlock& data)
444{
445    readCallback(address, mach, data, Cycles(0), Cycles(0), Cycles(0));
446}
447
448void
449Sequencer::readCallback(const Address& address,
450                        GenericMachineType mach,
451                        DataBlock& data,
452                        Cycles initialRequestTime,
453                        Cycles forwardRequestTime,
454                        Cycles firstResponseTime)
455{
456    assert(address == line_address(address));
457    assert(m_readRequestTable.count(line_address(address)));
458
459    RequestTable::iterator i = m_readRequestTable.find(address);
460    assert(i != m_readRequestTable.end());
461    SequencerRequest* request = i->second;
462
463    m_readRequestTable.erase(i);
464    markRemoved();
465
466    assert((request->m_type == RubyRequestType_LD) ||
467           (request->m_type == RubyRequestType_IFETCH));
468
469    hitCallback(request, mach, data, true,
470                initialRequestTime, forwardRequestTime, firstResponseTime);
471}
472
473void
474Sequencer::hitCallback(SequencerRequest* srequest,
475                       GenericMachineType mach,
476                       DataBlock& data,
477                       bool success,
478                       Cycles initialRequestTime,
479                       Cycles forwardRequestTime,
480                       Cycles firstResponseTime)
481{
482    PacketPtr pkt = srequest->pkt;
483    Address request_address(pkt->getAddr());
484    Address request_line_address(pkt->getAddr());
485    request_line_address.makeLineAddress();
486    RubyRequestType type = srequest->m_type;
487    Cycles issued_time = srequest->issue_time;
488
489    // Set this cache entry to the most recently used
490    if (type == RubyRequestType_IFETCH) {
491        m_instCache_ptr->setMRU(request_line_address);
492    } else {
493        m_dataCache_ptr->setMRU(request_line_address);
494    }
495
496    assert(curCycle() >= issued_time);
497    Cycles miss_latency = curCycle() - issued_time;
498
499    // Profile the miss latency for all non-zero demand misses
500    if (miss_latency != 0) {
501        g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach);
502
503        if (mach == GenericMachineType_L1Cache_wCC) {
504            g_system_ptr->getProfiler()->missLatencyWcc(issued_time,
505                initialRequestTime, forwardRequestTime,
506                firstResponseTime, curCycle());
507        }
508
509        if (mach == GenericMachineType_Directory) {
510            g_system_ptr->getProfiler()->missLatencyDir(issued_time,
511                initialRequestTime, forwardRequestTime,
512                firstResponseTime, curCycle());
513        }
514
515        DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
516                 curTick(), m_version, "Seq",
517                 success ? "Done" : "SC_Failed", "", "",
518                 request_address, miss_latency);
519    }
520
521    // update the data
522    if (g_system_ptr->m_warmup_enabled) {
523        assert(pkt->getPtr<uint8_t>(false) != NULL);
524        data.setData(pkt->getPtr<uint8_t>(false),
525                     request_address.getOffset(), pkt->getSize());
526    } else if (pkt->getPtr<uint8_t>(true) != NULL) {
527        if ((type == RubyRequestType_LD) ||
528            (type == RubyRequestType_IFETCH) ||
529            (type == RubyRequestType_RMW_Read) ||
530            (type == RubyRequestType_Locked_RMW_Read) ||
531            (type == RubyRequestType_Load_Linked)) {
532            memcpy(pkt->getPtr<uint8_t>(true),
533                   data.getData(request_address.getOffset(), pkt->getSize()),
534                   pkt->getSize());
535        } else {
536            data.setData(pkt->getPtr<uint8_t>(true),
537                         request_address.getOffset(), pkt->getSize());
538        }
539    } else {
540        DPRINTF(MemoryAccess,
541                "WARNING.  Data not transfered from Ruby to M5 for type %s\n",
542                RubyRequestType_to_string(type));
543    }
544
545    // If using the RubyTester, update the RubyTester sender state's
546    // subBlock with the recieved data.  The tester will later access
547    // this state.
548    // Note: RubyPort will access it's sender state before the
549    // RubyTester.
550    if (m_usingRubyTester) {
551        RubyPort::SenderState *reqSenderState =
552            safe_cast<RubyPort::SenderState*>(pkt->senderState);
553        // @todo This is a dangerous assumption on nothing else
554        // modifying the senderState
555        RubyTester::SenderState* testerSenderState =
556            safe_cast<RubyTester::SenderState*>(reqSenderState->predecessor);
557        testerSenderState->subBlock.mergeFrom(data);
558    }
559
560    delete srequest;
561
562    if (g_system_ptr->m_warmup_enabled) {
563        assert(pkt->req);
564        delete pkt->req;
565        delete pkt;
566        g_system_ptr->m_cache_recorder->enqueueNextFetchRequest();
567    } else if (g_system_ptr->m_cooldown_enabled) {
568        delete pkt;
569        g_system_ptr->m_cache_recorder->enqueueNextFlushRequest();
570    } else {
571        ruby_hit_callback(pkt);
572    }
573}
574
575bool
576Sequencer::empty() const
577{
578    return m_writeRequestTable.empty() && m_readRequestTable.empty();
579}
580
581RequestStatus
582Sequencer::makeRequest(PacketPtr pkt)
583{
584    if (m_outstanding_count >= m_max_outstanding_requests) {
585        return RequestStatus_BufferFull;
586    }
587
588    RubyRequestType primary_type = RubyRequestType_NULL;
589    RubyRequestType secondary_type = RubyRequestType_NULL;
590
591    if (pkt->isLLSC()) {
592        //
593        // Alpha LL/SC instructions need to be handled carefully by the cache
594        // coherence protocol to ensure they follow the proper semantics. In
595        // particular, by identifying the operations as atomic, the protocol
596        // should understand that migratory sharing optimizations should not
597        // be performed (i.e. a load between the LL and SC should not steal
598        // away exclusive permission).
599        //
600        if (pkt->isWrite()) {
601            DPRINTF(RubySequencer, "Issuing SC\n");
602            primary_type = RubyRequestType_Store_Conditional;
603        } else {
604            DPRINTF(RubySequencer, "Issuing LL\n");
605            assert(pkt->isRead());
606            primary_type = RubyRequestType_Load_Linked;
607        }
608        secondary_type = RubyRequestType_ATOMIC;
609    } else if (pkt->req->isLocked()) {
610        //
611        // x86 locked instructions are translated to store cache coherence
612        // requests because these requests should always be treated as read
613        // exclusive operations and should leverage any migratory sharing
614        // optimization built into the protocol.
615        //
616        if (pkt->isWrite()) {
617            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
618            primary_type = RubyRequestType_Locked_RMW_Write;
619        } else {
620            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
621            assert(pkt->isRead());
622            primary_type = RubyRequestType_Locked_RMW_Read;
623        }
624        secondary_type = RubyRequestType_ST;
625    } else {
626        if (pkt->isRead()) {
627            if (pkt->req->isInstFetch()) {
628                primary_type = secondary_type = RubyRequestType_IFETCH;
629            } else {
630#if THE_ISA == X86_ISA
631                uint32_t flags = pkt->req->getFlags();
632                bool storeCheck = flags &
633                        (TheISA::StoreCheck << TheISA::FlagShift);
634#else
635                bool storeCheck = false;
636#endif // X86_ISA
637                if (storeCheck) {
638                    primary_type = RubyRequestType_RMW_Read;
639                    secondary_type = RubyRequestType_ST;
640                } else {
641                    primary_type = secondary_type = RubyRequestType_LD;
642                }
643            }
644        } else if (pkt->isWrite()) {
645            //
646            // Note: M5 packets do not differentiate ST from RMW_Write
647            //
648            primary_type = secondary_type = RubyRequestType_ST;
649        } else if (pkt->isFlush()) {
650          primary_type = secondary_type = RubyRequestType_FLUSH;
651        } else {
652            panic("Unsupported ruby packet type\n");
653        }
654    }
655
656    RequestStatus status = insertRequest(pkt, primary_type);
657    if (status != RequestStatus_Ready)
658        return status;
659
660    issueRequest(pkt, secondary_type);
661
662    // TODO: issue hardware prefetches here
663    return RequestStatus_Issued;
664}
665
666void
667Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
668{
669    assert(pkt != NULL);
670    int proc_id = -1;
671    if (pkt->req->hasContextId()) {
672        proc_id = pkt->req->contextId();
673    }
674
675    // If valid, copy the pc to the ruby request
676    Addr pc = 0;
677    if (pkt->req->hasPC()) {
678        pc = pkt->req->getPC();
679    }
680
681    RubyRequest *msg = new RubyRequest(clockEdge(), pkt->getAddr(),
682                                       pkt->getPtr<uint8_t>(true),
683                                       pkt->getSize(), pc, secondary_type,
684                                       RubyAccessMode_Supervisor, pkt,
685                                       PrefetchBit_No, proc_id);
686
687    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
688            curTick(), m_version, "Seq", "Begin", "", "",
689            msg->getPhysicalAddress(),
690            RubyRequestType_to_string(secondary_type));
691
692    Cycles latency(0);  // initialzed to an null value
693
694    if (secondary_type == RubyRequestType_IFETCH)
695        latency = m_instCache_ptr->getLatency();
696    else
697        latency = m_dataCache_ptr->getLatency();
698
699    // Send the message to the cache controller
700    assert(latency > 0);
701
702    assert(m_mandatory_q_ptr != NULL);
703    m_mandatory_q_ptr->enqueue(msg, latency);
704}
705
706template <class KEY, class VALUE>
707std::ostream &
708operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
709{
710    typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
711    typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
712
713    out << "[";
714    for (; i != end; ++i)
715        out << " " << i->first << "=" << i->second;
716    out << " ]";
717
718    return out;
719}
720
721void
722Sequencer::print(ostream& out) const
723{
724    out << "[Sequencer: " << m_version
725        << ", outstanding requests: " << m_outstanding_count
726        << ", read request table: " << m_readRequestTable
727        << ", write request table: " << m_writeRequestTable
728        << "]";
729}
730
731// this can be called from setState whenever coherence permissions are
732// upgraded when invoked, coherence violations will be checked for the
733// given block
734void
735Sequencer::checkCoherence(const Address& addr)
736{
737#ifdef CHECK_COHERENCE
738    g_system_ptr->checkGlobalCoherenceInvariant(addr);
739#endif
740}
741
742void
743Sequencer::recordRequestType(SequencerRequestType requestType) {
744    DPRINTF(RubyStats, "Recorded statistic: %s\n",
745            SequencerRequestType_to_string(requestType));
746}
747
748
749void
750Sequencer::evictionCallback(const Address& address)
751{
752    ruby_eviction_callback(address);
753}
754