Sequencer.cc revision 8641
113012Sandreas.sandberg@arm.com/*
213012Sandreas.sandberg@arm.com * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
313012Sandreas.sandberg@arm.com * All rights reserved.
413012Sandreas.sandberg@arm.com *
513012Sandreas.sandberg@arm.com * Redistribution and use in source and binary forms, with or without
613012Sandreas.sandberg@arm.com * modification, are permitted provided that the following conditions are
713012Sandreas.sandberg@arm.com * met: redistributions of source code must retain the above copyright
813012Sandreas.sandberg@arm.com * notice, this list of conditions and the following disclaimer;
913012Sandreas.sandberg@arm.com * redistributions in binary form must reproduce the above copyright
1013012Sandreas.sandberg@arm.com * notice, this list of conditions and the following disclaimer in the
1113012Sandreas.sandberg@arm.com * documentation and/or other materials provided with the distribution;
1213012Sandreas.sandberg@arm.com * neither the name of the copyright holders nor the names of its
1313012Sandreas.sandberg@arm.com * contributors may be used to endorse or promote products derived from
1413012Sandreas.sandberg@arm.com * this software without specific prior written permission.
1513012Sandreas.sandberg@arm.com *
1613012Sandreas.sandberg@arm.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1713012Sandreas.sandberg@arm.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1813012Sandreas.sandberg@arm.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1913012Sandreas.sandberg@arm.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2013012Sandreas.sandberg@arm.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2113012Sandreas.sandberg@arm.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2213012Sandreas.sandberg@arm.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2313012Sandreas.sandberg@arm.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2413012Sandreas.sandberg@arm.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2513012Sandreas.sandberg@arm.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2613012Sandreas.sandberg@arm.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2713012Sandreas.sandberg@arm.com */
2813012Sandreas.sandberg@arm.com
2913012Sandreas.sandberg@arm.com#include "base/misc.hh"
3013012Sandreas.sandberg@arm.com#include "base/str.hh"
3113012Sandreas.sandberg@arm.com#include "config/the_isa.hh"
3213012Sandreas.sandberg@arm.com#if THE_ISA == X86_ISA
3313012Sandreas.sandberg@arm.com#include "arch/x86/insts/microldstop.hh"
3413012Sandreas.sandberg@arm.com#endif // X86_ISA
3513012Sandreas.sandberg@arm.com#include "cpu/testers/rubytest/RubyTester.hh"
3613012Sandreas.sandberg@arm.com#include "debug/MemoryAccess.hh"
3713012Sandreas.sandberg@arm.com#include "debug/ProtocolTrace.hh"
3813012Sandreas.sandberg@arm.com#include "debug/RubySequencer.hh"
3913665Sandreas.sandberg@arm.com#include "mem/protocol/PrefetchBit.hh"
4013012Sandreas.sandberg@arm.com#include "mem/protocol/RubyAccessMode.hh"
4113012Sandreas.sandberg@arm.com#include "mem/ruby/buffers/MessageBuffer.hh"
4213012Sandreas.sandberg@arm.com#include "mem/ruby/common/Global.hh"
4313012Sandreas.sandberg@arm.com#include "mem/ruby/common/SubBlock.hh"
4413012Sandreas.sandberg@arm.com#include "mem/ruby/profiler/Profiler.hh"
4513012Sandreas.sandberg@arm.com#include "mem/ruby/recorder/Tracer.hh"
4613012Sandreas.sandberg@arm.com#include "mem/ruby/slicc_interface/RubyRequest.hh"
4713012Sandreas.sandberg@arm.com#include "mem/ruby/system/CacheMemory.hh"
4813012Sandreas.sandberg@arm.com#include "mem/ruby/system/Sequencer.hh"
4913012Sandreas.sandberg@arm.com#include "mem/ruby/system/System.hh"
5013012Sandreas.sandberg@arm.com#include "mem/packet.hh"
5113012Sandreas.sandberg@arm.com#include "params/RubySequencer.hh"
5213012Sandreas.sandberg@arm.com
5313012Sandreas.sandberg@arm.comusing namespace std;
5413012Sandreas.sandberg@arm.com
5513012Sandreas.sandberg@arm.comSequencer *
5613012Sandreas.sandberg@arm.comRubySequencerParams::create()
5713012Sandreas.sandberg@arm.com{
5813012Sandreas.sandberg@arm.com    return new Sequencer(this);
5913012Sandreas.sandberg@arm.com}
6013012Sandreas.sandberg@arm.com
61Sequencer::Sequencer(const Params *p)
62    : RubyPort(p), deadlockCheckEvent(this)
63{
64    m_store_waiting_on_load_cycles = 0;
65    m_store_waiting_on_store_cycles = 0;
66    m_load_waiting_on_store_cycles = 0;
67    m_load_waiting_on_load_cycles = 0;
68
69    m_outstanding_count = 0;
70
71    m_deadlock_threshold = 0;
72    m_instCache_ptr = NULL;
73    m_dataCache_ptr = NULL;
74
75    m_instCache_ptr = p->icache;
76    m_dataCache_ptr = p->dcache;
77    m_max_outstanding_requests = p->max_outstanding_requests;
78    m_deadlock_threshold = p->deadlock_threshold;
79
80    assert(m_max_outstanding_requests > 0);
81    assert(m_deadlock_threshold > 0);
82    assert(m_instCache_ptr != NULL);
83    assert(m_dataCache_ptr != NULL);
84
85    m_usingNetworkTester = p->using_network_tester;
86}
87
88Sequencer::~Sequencer()
89{
90}
91
92void
93Sequencer::wakeup()
94{
95    // Check for deadlock of any of the requests
96    Time current_time = g_eventQueue_ptr->getTime();
97
98    // Check across all outstanding requests
99    int total_outstanding = 0;
100
101    RequestTable::iterator read = m_readRequestTable.begin();
102    RequestTable::iterator read_end = m_readRequestTable.end();
103    for (; read != read_end; ++read) {
104        SequencerRequest* request = read->second;
105        if (current_time - request->issue_time < m_deadlock_threshold)
106            continue;
107
108        panic("Possible Deadlock detected. Aborting!\n"
109             "version: %d request.paddr: 0x%x m_readRequestTable: %d "
110             "current time: %u issue_time: %d difference: %d\n", m_version,
111             Address(request->pkt->getAddr()), m_readRequestTable.size(),
112             current_time, request->issue_time,
113             current_time - request->issue_time);
114    }
115
116    RequestTable::iterator write = m_writeRequestTable.begin();
117    RequestTable::iterator write_end = m_writeRequestTable.end();
118    for (; write != write_end; ++write) {
119        SequencerRequest* request = write->second;
120        if (current_time - request->issue_time < m_deadlock_threshold)
121            continue;
122
123        panic("Possible Deadlock detected. Aborting!\n"
124             "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
125             "current time: %u issue_time: %d difference: %d\n", m_version,
126             Address(request->pkt->getAddr()), m_writeRequestTable.size(),
127             current_time, request->issue_time,
128             current_time - request->issue_time);
129    }
130
131    total_outstanding += m_writeRequestTable.size();
132    total_outstanding += m_readRequestTable.size();
133
134    assert(m_outstanding_count == total_outstanding);
135
136    if (m_outstanding_count > 0) {
137        // If there are still outstanding requests, keep checking
138        schedule(deadlockCheckEvent,
139                 m_deadlock_threshold * g_eventQueue_ptr->getClock() +
140                 curTick());
141    }
142}
143
144void
145Sequencer::printStats(ostream & out) const
146{
147    out << "Sequencer: " << m_name << endl
148        << "  store_waiting_on_load_cycles: "
149        << m_store_waiting_on_load_cycles << endl
150        << "  store_waiting_on_store_cycles: "
151        << m_store_waiting_on_store_cycles << endl
152        << "  load_waiting_on_load_cycles: "
153        << m_load_waiting_on_load_cycles << endl
154        << "  load_waiting_on_store_cycles: "
155        << m_load_waiting_on_store_cycles << endl;
156}
157
158void
159Sequencer::printProgress(ostream& out) const
160{
161#if 0
162    int total_demand = 0;
163    out << "Sequencer Stats Version " << m_version << endl;
164    out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
165    out << "---------------" << endl;
166    out << "outstanding requests" << endl;
167
168    out << "proc " << m_Read
169        << " version Requests = " << m_readRequestTable.size() << endl;
170
171    // print the request table
172    RequestTable::iterator read = m_readRequestTable.begin();
173    RequestTable::iterator read_end = m_readRequestTable.end();
174    for (; read != read_end; ++read) {
175        SequencerRequest* request = read->second;
176        out << "\tRequest[ " << i << " ] = " << request->type
177            << " Address " << rkeys[i]
178            << " Posted " << request->issue_time
179            << " PF " << PrefetchBit_No << endl;
180        total_demand++;
181    }
182
183    out << "proc " << m_version
184        << " Write Requests = " << m_writeRequestTable.size << endl;
185
186    // print the request table
187    RequestTable::iterator write = m_writeRequestTable.begin();
188    RequestTable::iterator write_end = m_writeRequestTable.end();
189    for (; write != write_end; ++write) {
190        SequencerRequest* request = write->second;
191        out << "\tRequest[ " << i << " ] = " << request.getType()
192            << " Address " << wkeys[i]
193            << " Posted " << request.getTime()
194            << " PF " << request.getPrefetch() << endl;
195        if (request.getPrefetch() == PrefetchBit_No) {
196            total_demand++;
197        }
198    }
199
200    out << endl;
201
202    out << "Total Number Outstanding: " << m_outstanding_count << endl
203        << "Total Number Demand     : " << total_demand << endl
204        << "Total Number Prefetches : " << m_outstanding_count - total_demand
205        << endl << endl << endl;
206#endif
207}
208
209void
210Sequencer::printConfig(ostream& out) const
211{
212    out << "Seqeuncer config: " << m_name << endl
213        << "  controller: " << m_controller->getName() << endl
214        << "  version: " << m_version << endl
215        << "  max_outstanding_requests: " << m_max_outstanding_requests << endl
216        << "  deadlock_threshold: " << m_deadlock_threshold << endl;
217}
218
219// Insert the request on the correct request table.  Return true if
220// the entry was already present.
221RequestStatus
222Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
223{
224    assert(m_outstanding_count ==
225        (m_writeRequestTable.size() + m_readRequestTable.size()));
226
227    // See if we should schedule a deadlock check
228    if (deadlockCheckEvent.scheduled() == false) {
229        schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
230    }
231
232    Address line_addr(pkt->getAddr());
233    line_addr.makeLineAddress();
234    if ((request_type == RubyRequestType_ST) ||
235        (request_type == RubyRequestType_RMW_Read) ||
236        (request_type == RubyRequestType_RMW_Write) ||
237        (request_type == RubyRequestType_Load_Linked) ||
238        (request_type == RubyRequestType_Store_Conditional) ||
239        (request_type == RubyRequestType_Locked_RMW_Read) ||
240        (request_type == RubyRequestType_Locked_RMW_Write) ||
241        (request_type == RubyRequestType_FLUSH)) {
242
243        // Check if there is any outstanding read request for the same
244        // cache line.
245        if (m_readRequestTable.count(line_addr) > 0) {
246            m_store_waiting_on_load_cycles++;
247            return RequestStatus_Aliased;
248        }
249
250        pair<RequestTable::iterator, bool> r =
251            m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
252        if (r.second) {
253            RequestTable::iterator i = r.first;
254            i->second = new SequencerRequest(pkt, request_type,
255                                             g_eventQueue_ptr->getTime());
256            m_outstanding_count++;
257        } else {
258          // There is an outstanding write request for the cache line
259          m_store_waiting_on_store_cycles++;
260          return RequestStatus_Aliased;
261        }
262    } else {
263        // Check if there is any outstanding write request for the same
264        // cache line.
265        if (m_writeRequestTable.count(line_addr) > 0) {
266            m_load_waiting_on_store_cycles++;
267            return RequestStatus_Aliased;
268        }
269
270        pair<RequestTable::iterator, bool> r =
271            m_readRequestTable.insert(RequestTable::value_type(line_addr, 0));
272
273        if (r.second) {
274            RequestTable::iterator i = r.first;
275            i->second = new SequencerRequest(pkt, request_type,
276                                             g_eventQueue_ptr->getTime());
277            m_outstanding_count++;
278        } else {
279            // There is an outstanding read request for the cache line
280            m_load_waiting_on_load_cycles++;
281            return RequestStatus_Aliased;
282        }
283    }
284
285    g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
286    assert(m_outstanding_count ==
287        (m_writeRequestTable.size() + m_readRequestTable.size()));
288
289    return RequestStatus_Ready;
290}
291
292void
293Sequencer::markRemoved()
294{
295    m_outstanding_count--;
296    assert(m_outstanding_count ==
297           m_writeRequestTable.size() + m_readRequestTable.size());
298}
299
300void
301Sequencer::removeRequest(SequencerRequest* srequest)
302{
303    assert(m_outstanding_count ==
304           m_writeRequestTable.size() + m_readRequestTable.size());
305
306    Address line_addr(srequest->pkt->getAddr());
307    line_addr.makeLineAddress();
308    if ((srequest->m_type == RubyRequestType_ST) ||
309        (srequest->m_type == RubyRequestType_RMW_Read) ||
310        (srequest->m_type == RubyRequestType_RMW_Write) ||
311        (srequest->m_type == RubyRequestType_Load_Linked) ||
312        (srequest->m_type == RubyRequestType_Store_Conditional) ||
313        (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
314        (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
315        m_writeRequestTable.erase(line_addr);
316    } else {
317        m_readRequestTable.erase(line_addr);
318    }
319
320    markRemoved();
321}
322
323bool
324Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
325{
326    //
327    // The success flag indicates whether the LLSC operation was successful.
328    // LL ops will always succeed, but SC may fail if the cache line is no
329    // longer locked.
330    //
331    bool success = true;
332    if (request->m_type == RubyRequestType_Store_Conditional) {
333        if (!m_dataCache_ptr->isLocked(address, m_version)) {
334            //
335            // For failed SC requests, indicate the failure to the cpu by
336            // setting the extra data to zero.
337            //
338            request->pkt->req->setExtraData(0);
339            success = false;
340        } else {
341            //
342            // For successful SC requests, indicate the success to the cpu by
343            // setting the extra data to one.
344            //
345            request->pkt->req->setExtraData(1);
346        }
347        //
348        // Independent of success, all SC operations must clear the lock
349        //
350        m_dataCache_ptr->clearLocked(address);
351    } else if (request->m_type == RubyRequestType_Load_Linked) {
352        //
353        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
354        // previously locked cache lines?
355        //
356        m_dataCache_ptr->setLocked(address, m_version);
357    } else if ((m_dataCache_ptr->isTagPresent(address)) &&
358               (m_dataCache_ptr->isLocked(address, m_version))) {
359        //
360        // Normal writes should clear the locked address
361        //
362        m_dataCache_ptr->clearLocked(address);
363    }
364    return success;
365}
366
367void
368Sequencer::writeCallback(const Address& address, DataBlock& data)
369{
370    writeCallback(address, GenericMachineType_NULL, data);
371}
372
373void
374Sequencer::writeCallback(const Address& address,
375                         GenericMachineType mach,
376                         DataBlock& data)
377{
378    writeCallback(address, mach, data, 0, 0, 0);
379}
380
381void
382Sequencer::writeCallback(const Address& address,
383                         GenericMachineType mach,
384                         DataBlock& data,
385                         Time initialRequestTime,
386                         Time forwardRequestTime,
387                         Time firstResponseTime)
388{
389    assert(address == line_address(address));
390    assert(m_writeRequestTable.count(line_address(address)));
391
392    RequestTable::iterator i = m_writeRequestTable.find(address);
393    assert(i != m_writeRequestTable.end());
394    SequencerRequest* request = i->second;
395
396    m_writeRequestTable.erase(i);
397    markRemoved();
398
399    assert((request->m_type == RubyRequestType_ST) ||
400           (request->m_type == RubyRequestType_ATOMIC) ||
401           (request->m_type == RubyRequestType_RMW_Read) ||
402           (request->m_type == RubyRequestType_RMW_Write) ||
403           (request->m_type == RubyRequestType_Load_Linked) ||
404           (request->m_type == RubyRequestType_Store_Conditional) ||
405           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
406           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
407           (request->m_type == RubyRequestType_FLUSH));
408
409
410    //
411    // For Alpha, properly handle LL, SC, and write requests with respect to
412    // locked cache blocks.
413    //
414    // Not valid for Network_test protocl
415    //
416    bool success = true;
417    if(!m_usingNetworkTester)
418        success = handleLlsc(address, request);
419
420    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
421        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
422    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
423        m_controller->unblock(address);
424    }
425
426    hitCallback(request, mach, data, success,
427                initialRequestTime, forwardRequestTime, firstResponseTime);
428}
429
430void
431Sequencer::readCallback(const Address& address, DataBlock& data)
432{
433    readCallback(address, GenericMachineType_NULL, data);
434}
435
436void
437Sequencer::readCallback(const Address& address,
438                        GenericMachineType mach,
439                        DataBlock& data)
440{
441    readCallback(address, mach, data, 0, 0, 0);
442}
443
444void
445Sequencer::readCallback(const Address& address,
446                        GenericMachineType mach,
447                        DataBlock& data,
448                        Time initialRequestTime,
449                        Time forwardRequestTime,
450                        Time firstResponseTime)
451{
452    assert(address == line_address(address));
453    assert(m_readRequestTable.count(line_address(address)));
454
455    RequestTable::iterator i = m_readRequestTable.find(address);
456    assert(i != m_readRequestTable.end());
457    SequencerRequest* request = i->second;
458
459    m_readRequestTable.erase(i);
460    markRemoved();
461
462    assert((request->m_type == RubyRequestType_LD) ||
463           (request->m_type == RubyRequestType_IFETCH));
464
465    hitCallback(request, mach, data, true,
466                initialRequestTime, forwardRequestTime, firstResponseTime);
467}
468
469void
470Sequencer::hitCallback(SequencerRequest* srequest,
471                       GenericMachineType mach,
472                       DataBlock& data,
473                       bool success,
474                       Time initialRequestTime,
475                       Time forwardRequestTime,
476                       Time firstResponseTime)
477{
478    PacketPtr pkt = srequest->pkt;
479    Address request_address(pkt->getAddr());
480    Address request_line_address(pkt->getAddr());
481    request_line_address.makeLineAddress();
482    RubyRequestType type = srequest->m_type;
483    Time issued_time = srequest->issue_time;
484
485    // Set this cache entry to the most recently used
486    if (type == RubyRequestType_IFETCH) {
487        if (m_instCache_ptr->isTagPresent(request_line_address))
488            m_instCache_ptr->setMRU(request_line_address);
489    } else {
490        if (m_dataCache_ptr->isTagPresent(request_line_address))
491            m_dataCache_ptr->setMRU(request_line_address);
492    }
493
494    assert(g_eventQueue_ptr->getTime() >= issued_time);
495    Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
496
497    // Profile the miss latency for all non-zero demand misses
498    if (miss_latency != 0) {
499        g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach);
500
501        if (mach == GenericMachineType_L1Cache_wCC) {
502            g_system_ptr->getProfiler()->missLatencyWcc(issued_time,
503                                                   initialRequestTime,
504                                                   forwardRequestTime,
505                                                   firstResponseTime,
506                                                   g_eventQueue_ptr->getTime());
507        }
508
509        if (mach == GenericMachineType_Directory) {
510            g_system_ptr->getProfiler()->missLatencyDir(issued_time,
511                                                   initialRequestTime,
512                                                   forwardRequestTime,
513                                                   firstResponseTime,
514                                                   g_eventQueue_ptr->getTime());
515        }
516
517        DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
518                 curTick(), m_version, "Seq",
519                 success ? "Done" : "SC_Failed", "", "",
520                 request_address, miss_latency);
521    }
522
523    // update the data
524    if (pkt->getPtr<uint8_t>(true) != NULL) {
525        if ((type == RubyRequestType_LD) ||
526            (type == RubyRequestType_IFETCH) ||
527            (type == RubyRequestType_RMW_Read) ||
528            (type == RubyRequestType_Locked_RMW_Read) ||
529            (type == RubyRequestType_Load_Linked)) {
530            memcpy(pkt->getPtr<uint8_t>(true),
531                   data.getData(request_address.getOffset(), pkt->getSize()),
532                   pkt->getSize());
533        } else {
534            data.setData(pkt->getPtr<uint8_t>(true),
535                         request_address.getOffset(), pkt->getSize());
536        }
537    } else {
538        DPRINTF(MemoryAccess,
539                "WARNING.  Data not transfered from Ruby to M5 for type %s\n",
540                RubyRequestType_to_string(type));
541    }
542
543    // If using the RubyTester, update the RubyTester sender state's
544    // subBlock with the recieved data.  The tester will later access
545    // this state.
546    // Note: RubyPort will access it's sender state before the
547    // RubyTester.
548    if (m_usingRubyTester) {
549        RubyPort::SenderState *requestSenderState =
550            safe_cast<RubyPort::SenderState*>(pkt->senderState);
551        RubyTester::SenderState* testerSenderState =
552            safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
553        testerSenderState->subBlock->mergeFrom(data);
554    }
555
556    ruby_hit_callback(pkt);
557    delete srequest;
558}
559
560bool
561Sequencer::empty() const
562{
563    return m_writeRequestTable.empty() && m_readRequestTable.empty();
564}
565
566RequestStatus
567Sequencer::makeRequest(PacketPtr pkt)
568{
569    if (m_outstanding_count >= m_max_outstanding_requests) {
570        return RequestStatus_BufferFull;
571    }
572
573    RubyRequestType primary_type = RubyRequestType_NULL;
574    RubyRequestType secondary_type = RubyRequestType_NULL;
575
576    if (pkt->isLLSC()) {
577        //
578        // Alpha LL/SC instructions need to be handled carefully by the cache
579        // coherence protocol to ensure they follow the proper semantics. In
580        // particular, by identifying the operations as atomic, the protocol
581        // should understand that migratory sharing optimizations should not
582        // be performed (i.e. a load between the LL and SC should not steal
583        // away exclusive permission).
584        //
585        if (pkt->isWrite()) {
586            DPRINTF(RubySequencer, "Issuing SC\n");
587            primary_type = RubyRequestType_Store_Conditional;
588        } else {
589            DPRINTF(RubySequencer, "Issuing LL\n");
590            assert(pkt->isRead());
591            primary_type = RubyRequestType_Load_Linked;
592        }
593        secondary_type = RubyRequestType_ATOMIC;
594    } else if (pkt->req->isLocked()) {
595        //
596        // x86 locked instructions are translated to store cache coherence
597        // requests because these requests should always be treated as read
598        // exclusive operations and should leverage any migratory sharing
599        // optimization built into the protocol.
600        //
601        if (pkt->isWrite()) {
602            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
603            primary_type = RubyRequestType_Locked_RMW_Write;
604        } else {
605            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
606            assert(pkt->isRead());
607            primary_type = RubyRequestType_Locked_RMW_Read;
608        }
609        secondary_type = RubyRequestType_ST;
610    } else {
611        if (pkt->isRead()) {
612            if (pkt->req->isInstFetch()) {
613                primary_type = secondary_type = RubyRequestType_IFETCH;
614            } else {
615#if THE_ISA == X86_ISA
616                uint32_t flags = pkt->req->getFlags();
617                bool storeCheck = flags &
618                        (TheISA::StoreCheck << TheISA::FlagShift);
619#else
620                bool storeCheck = false;
621#endif // X86_ISA
622                if (storeCheck) {
623                    primary_type = RubyRequestType_RMW_Read;
624                    secondary_type = RubyRequestType_ST;
625                } else {
626                    primary_type = secondary_type = RubyRequestType_LD;
627                }
628            }
629        } else if (pkt->isWrite()) {
630            //
631            // Note: M5 packets do not differentiate ST from RMW_Write
632            //
633            primary_type = secondary_type = RubyRequestType_ST;
634        } else if (pkt->isFlush()) {
635          primary_type = secondary_type = RubyRequestType_FLUSH;
636        } else {
637            panic("Unsupported ruby packet type\n");
638        }
639    }
640
641    RequestStatus status = insertRequest(pkt, primary_type);
642    if (status != RequestStatus_Ready)
643        return status;
644
645    issueRequest(pkt, secondary_type);
646
647    // TODO: issue hardware prefetches here
648    return RequestStatus_Issued;
649}
650
651void
652Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
653{
654    int proc_id = -1;
655    if (pkt != NULL && pkt->req->hasContextId()) {
656        proc_id = pkt->req->contextId();
657    }
658
659    // If valid, copy the pc to the ruby request
660    Addr pc = 0;
661    if (pkt->req->hasPC()) {
662        pc = pkt->req->getPC();
663    }
664
665    RubyRequest *msg = new RubyRequest(pkt->getAddr(),
666                                       pkt->getPtr<uint8_t>(true),
667                                       pkt->getSize(), pc, secondary_type,
668                                       RubyAccessMode_Supervisor, pkt,
669                                       PrefetchBit_No, proc_id);
670
671    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
672            curTick(), m_version, "Seq", "Begin", "", "",
673            msg->getPhysicalAddress(),
674            RubyRequestType_to_string(secondary_type));
675
676    Time latency = 0;  // initialzed to an null value
677
678    if (secondary_type == RubyRequestType_IFETCH)
679        latency = m_instCache_ptr->getLatency();
680    else
681        latency = m_dataCache_ptr->getLatency();
682
683    // Send the message to the cache controller
684    assert(latency > 0);
685
686    assert(m_mandatory_q_ptr != NULL);
687    m_mandatory_q_ptr->enqueue(msg, latency);
688}
689
690template <class KEY, class VALUE>
691std::ostream &
692operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
693{
694    typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
695    typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
696
697    out << "[";
698    for (; i != end; ++i)
699        out << " " << i->first << "=" << i->second;
700    out << " ]";
701
702    return out;
703}
704
705void
706Sequencer::print(ostream& out) const
707{
708    out << "[Sequencer: " << m_version
709        << ", outstanding requests: " << m_outstanding_count
710        << ", read request table: " << m_readRequestTable
711        << ", write request table: " << m_writeRequestTable
712        << "]";
713}
714
715// this can be called from setState whenever coherence permissions are
716// upgraded when invoked, coherence violations will be checked for the
717// given block
718void
719Sequencer::checkCoherence(const Address& addr)
720{
721#ifdef CHECK_COHERENCE
722    g_system_ptr->checkGlobalCoherenceInvariant(addr);
723#endif
724}
725