Sequencer.cc revision 10012
12736Sktlim@umich.edu/*
22736Sktlim@umich.edu * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
32736Sktlim@umich.edu * All rights reserved.
42736Sktlim@umich.edu *
52736Sktlim@umich.edu * Redistribution and use in source and binary forms, with or without
62736Sktlim@umich.edu * modification, are permitted provided that the following conditions are
72736Sktlim@umich.edu * met: redistributions of source code must retain the above copyright
82736Sktlim@umich.edu * notice, this list of conditions and the following disclaimer;
92736Sktlim@umich.edu * redistributions in binary form must reproduce the above copyright
102736Sktlim@umich.edu * notice, this list of conditions and the following disclaimer in the
112736Sktlim@umich.edu * documentation and/or other materials provided with the distribution;
122736Sktlim@umich.edu * neither the name of the copyright holders nor the names of its
132736Sktlim@umich.edu * contributors may be used to endorse or promote products derived from
142736Sktlim@umich.edu * this software without specific prior written permission.
152736Sktlim@umich.edu *
162736Sktlim@umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
172736Sktlim@umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
182736Sktlim@umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192736Sktlim@umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
202736Sktlim@umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
212736Sktlim@umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
222736Sktlim@umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232736Sktlim@umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242736Sktlim@umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252736Sktlim@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
262736Sktlim@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272736Sktlim@umich.edu */
282736Sktlim@umich.edu
292736Sktlim@umich.edu#include "base/misc.hh"
302736Sktlim@umich.edu#include "base/str.hh"
3111793Sbrandon.potter@amd.com#include "config/the_isa.hh"
3211793Sbrandon.potter@amd.com#if THE_ISA == X86_ISA
332736Sktlim@umich.edu#include "arch/x86/insts/microldstop.hh"
342736Sktlim@umich.edu#endif // X86_ISA
352736Sktlim@umich.edu#include "cpu/testers/rubytest/RubyTester.hh"
362736Sktlim@umich.edu#include "debug/MemoryAccess.hh"
372736Sktlim@umich.edu#include "debug/ProtocolTrace.hh"
382736Sktlim@umich.edu#include "debug/RubySequencer.hh"
392736Sktlim@umich.edu#include "debug/RubyStats.hh"
402736Sktlim@umich.edu#include "mem/protocol/PrefetchBit.hh"
412736Sktlim@umich.edu#include "mem/protocol/RubyAccessMode.hh"
422736Sktlim@umich.edu#include "mem/ruby/common/Global.hh"
432736Sktlim@umich.edu#include "mem/ruby/profiler/Profiler.hh"
442736Sktlim@umich.edu#include "mem/ruby/slicc_interface/RubyRequest.hh"
452736Sktlim@umich.edu#include "mem/ruby/system/Sequencer.hh"
4610936Sandreas.hansson@arm.com#include "mem/ruby/system/System.hh"
4710936Sandreas.hansson@arm.com#include "mem/packet.hh"
482736Sktlim@umich.edu
492736Sktlim@umich.eduusing namespace std;
502736Sktlim@umich.edu
512736Sktlim@umich.eduSequencer *
522736Sktlim@umich.eduRubySequencerParams::create()
532736Sktlim@umich.edu{
542736Sktlim@umich.edu    return new Sequencer(this);
552736Sktlim@umich.edu}
562736Sktlim@umich.edu
572736Sktlim@umich.eduSequencer::Sequencer(const Params *p)
5810807Snilay@cs.wisc.edu    : RubyPort(p), m_IncompleteTimes(MachineType_NUM), deadlockCheckEvent(this)
592736Sktlim@umich.edu{
602736Sktlim@umich.edu    m_outstanding_count = 0;
612736Sktlim@umich.edu
622736Sktlim@umich.edu    m_instCache_ptr = p->icache;
632736Sktlim@umich.edu    m_dataCache_ptr = p->dcache;
642736Sktlim@umich.edu    m_max_outstanding_requests = p->max_outstanding_requests;
652736Sktlim@umich.edu    m_deadlock_threshold = p->deadlock_threshold;
6610807Snilay@cs.wisc.edu
672736Sktlim@umich.edu    assert(m_max_outstanding_requests > 0);
6810807Snilay@cs.wisc.edu    assert(m_deadlock_threshold > 0);
692736Sktlim@umich.edu    assert(m_instCache_ptr != NULL);
702736Sktlim@umich.edu    assert(m_dataCache_ptr != NULL);
712736Sktlim@umich.edu
722736Sktlim@umich.edu    m_usingNetworkTester = p->using_network_tester;
732736Sktlim@umich.edu}
7410807Snilay@cs.wisc.edu
752736Sktlim@umich.eduSequencer::~Sequencer()
762736Sktlim@umich.edu{
772736Sktlim@umich.edu}
782736Sktlim@umich.edu
792736Sktlim@umich.eduvoid
802736Sktlim@umich.eduSequencer::wakeup()
812736Sktlim@umich.edu{
822736Sktlim@umich.edu    assert(getDrainState() != Drainable::Draining);
832736Sktlim@umich.edu
842736Sktlim@umich.edu    // Check for deadlock of any of the requests
852736Sktlim@umich.edu    Cycles current_time = curCycle();
862736Sktlim@umich.edu
872736Sktlim@umich.edu    // Check across all outstanding requests
882736Sktlim@umich.edu    int total_outstanding = 0;
892736Sktlim@umich.edu
902736Sktlim@umich.edu    RequestTable::iterator read = m_readRequestTable.begin();
912736Sktlim@umich.edu    RequestTable::iterator read_end = m_readRequestTable.end();
922736Sktlim@umich.edu    for (; read != read_end; ++read) {
932736Sktlim@umich.edu        SequencerRequest* request = read->second;
942736Sktlim@umich.edu        if (current_time - request->issue_time < m_deadlock_threshold)
9510807Snilay@cs.wisc.edu            continue;
9610807Snilay@cs.wisc.edu
972736Sktlim@umich.edu        panic("Possible Deadlock detected. Aborting!\n"
9810807Snilay@cs.wisc.edu             "version: %d request.paddr: 0x%x m_readRequestTable: %d "
992736Sktlim@umich.edu             "current time: %u issue_time: %d difference: %d\n", m_version,
1002736Sktlim@umich.edu             Address(request->pkt->getAddr()), m_readRequestTable.size(),
1012736Sktlim@umich.edu              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1022736Sktlim@umich.edu              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1032736Sktlim@umich.edu    }
1042736Sktlim@umich.edu
1052736Sktlim@umich.edu    RequestTable::iterator write = m_writeRequestTable.begin();
1062736Sktlim@umich.edu    RequestTable::iterator write_end = m_writeRequestTable.end();
1072736Sktlim@umich.edu    for (; write != write_end; ++write) {
1082736Sktlim@umich.edu        SequencerRequest* request = write->second;
1092736Sktlim@umich.edu        if (current_time - request->issue_time < m_deadlock_threshold)
1102736Sktlim@umich.edu            continue;
1112736Sktlim@umich.edu
1122736Sktlim@umich.edu        panic("Possible Deadlock detected. Aborting!\n"
1132736Sktlim@umich.edu             "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
1142736Sktlim@umich.edu             "current time: %u issue_time: %d difference: %d\n", m_version,
1152736Sktlim@umich.edu             Address(request->pkt->getAddr()), m_writeRequestTable.size(),
1162736Sktlim@umich.edu              current_time * clockPeriod(), request->issue_time * clockPeriod(),
1172736Sktlim@umich.edu              (current_time * clockPeriod()) - (request->issue_time * clockPeriod()));
1182736Sktlim@umich.edu    }
1192736Sktlim@umich.edu
1202736Sktlim@umich.edu    total_outstanding += m_writeRequestTable.size();
1214762Snate@binkert.org    total_outstanding += m_readRequestTable.size();
1224762Snate@binkert.org
1232736Sktlim@umich.edu    assert(m_outstanding_count == total_outstanding);
1245034Smilesck@eecs.umich.edu
1252736Sktlim@umich.edu    if (m_outstanding_count > 0) {
1262736Sktlim@umich.edu        // If there are still outstanding requests, keep checking
1272736Sktlim@umich.edu        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
1282736Sktlim@umich.edu    }
1292736Sktlim@umich.edu}
1304762Snate@binkert.org
1314762Snate@binkert.orgvoid Sequencer::resetStats()
1322736Sktlim@umich.edu{
1335034Smilesck@eecs.umich.edu    m_latencyHist.reset();
1342736Sktlim@umich.edu    m_hitLatencyHist.reset();
135    m_missLatencyHist.reset();
136    for (int i = 0; i < RubyRequestType_NUM; i++) {
137        m_typeLatencyHist[i]->reset();
138        m_hitTypeLatencyHist[i]->reset();
139        m_missTypeLatencyHist[i]->reset();
140        for (int j = 0; j < MachineType_NUM; j++) {
141            m_hitTypeMachLatencyHist[i][j]->reset();
142            m_missTypeMachLatencyHist[i][j]->reset();
143        }
144    }
145
146    for (int i = 0; i < MachineType_NUM; i++) {
147        m_missMachLatencyHist[i]->reset();
148        m_hitMachLatencyHist[i]->reset();
149
150        m_IssueToInitialDelayHist[i]->reset();
151        m_InitialToForwardDelayHist[i]->reset();
152        m_ForwardToFirstResponseDelayHist[i]->reset();
153        m_FirstResponseToCompletionDelayHist[i]->reset();
154
155        m_IncompleteTimes[i] = 0;
156    }
157}
158
159void
160Sequencer::printProgress(ostream& out) const
161{
162#if 0
163    int total_demand = 0;
164    out << "Sequencer Stats Version " << m_version << endl;
165    out << "Current time = " << g_system_ptr->getTime() << endl;
166    out << "---------------" << endl;
167    out << "outstanding requests" << endl;
168
169    out << "proc " << m_Read
170        << " version Requests = " << m_readRequestTable.size() << endl;
171
172    // print the request table
173    RequestTable::iterator read = m_readRequestTable.begin();
174    RequestTable::iterator read_end = m_readRequestTable.end();
175    for (; read != read_end; ++read) {
176        SequencerRequest* request = read->second;
177        out << "\tRequest[ " << i << " ] = " << request->type
178            << " Address " << rkeys[i]
179            << " Posted " << request->issue_time
180            << " PF " << PrefetchBit_No << endl;
181        total_demand++;
182    }
183
184    out << "proc " << m_version
185        << " Write Requests = " << m_writeRequestTable.size << endl;
186
187    // print the request table
188    RequestTable::iterator write = m_writeRequestTable.begin();
189    RequestTable::iterator write_end = m_writeRequestTable.end();
190    for (; write != write_end; ++write) {
191        SequencerRequest* request = write->second;
192        out << "\tRequest[ " << i << " ] = " << request.getType()
193            << " Address " << wkeys[i]
194            << " Posted " << request.getTime()
195            << " PF " << request.getPrefetch() << endl;
196        if (request.getPrefetch() == PrefetchBit_No) {
197            total_demand++;
198        }
199    }
200
201    out << endl;
202
203    out << "Total Number Outstanding: " << m_outstanding_count << endl
204        << "Total Number Demand     : " << total_demand << endl
205        << "Total Number Prefetches : " << m_outstanding_count - total_demand
206        << endl << endl << endl;
207#endif
208}
209
210// Insert the request on the correct request table.  Return true if
211// the entry was already present.
212RequestStatus
213Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
214{
215    assert(m_outstanding_count ==
216        (m_writeRequestTable.size() + m_readRequestTable.size()));
217
218    // See if we should schedule a deadlock check
219    if (!deadlockCheckEvent.scheduled() &&
220        getDrainState() != Drainable::Draining) {
221        schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold));
222    }
223
224    Address line_addr(pkt->getAddr());
225    line_addr.makeLineAddress();
226    // Create a default entry, mapping the address to NULL, the cast is
227    // there to make gcc 4.4 happy
228    RequestTable::value_type default_entry(line_addr,
229                                           (SequencerRequest*) NULL);
230
231    if ((request_type == RubyRequestType_ST) ||
232        (request_type == RubyRequestType_RMW_Read) ||
233        (request_type == RubyRequestType_RMW_Write) ||
234        (request_type == RubyRequestType_Load_Linked) ||
235        (request_type == RubyRequestType_Store_Conditional) ||
236        (request_type == RubyRequestType_Locked_RMW_Read) ||
237        (request_type == RubyRequestType_Locked_RMW_Write) ||
238        (request_type == RubyRequestType_FLUSH)) {
239
240        // Check if there is any outstanding read request for the same
241        // cache line.
242        if (m_readRequestTable.count(line_addr) > 0) {
243            m_store_waiting_on_load++;
244            return RequestStatus_Aliased;
245        }
246
247        pair<RequestTable::iterator, bool> r =
248            m_writeRequestTable.insert(default_entry);
249        if (r.second) {
250            RequestTable::iterator i = r.first;
251            i->second = new SequencerRequest(pkt, request_type, curCycle());
252            m_outstanding_count++;
253        } else {
254          // There is an outstanding write request for the cache line
255          m_store_waiting_on_store++;
256          return RequestStatus_Aliased;
257        }
258    } else {
259        // Check if there is any outstanding write request for the same
260        // cache line.
261        if (m_writeRequestTable.count(line_addr) > 0) {
262            m_load_waiting_on_store++;
263            return RequestStatus_Aliased;
264        }
265
266        pair<RequestTable::iterator, bool> r =
267            m_readRequestTable.insert(default_entry);
268
269        if (r.second) {
270            RequestTable::iterator i = r.first;
271            i->second = new SequencerRequest(pkt, request_type, curCycle());
272            m_outstanding_count++;
273        } else {
274            // There is an outstanding read request for the cache line
275            m_load_waiting_on_load++;
276            return RequestStatus_Aliased;
277        }
278    }
279
280    m_outstandReqHist.sample(m_outstanding_count);
281    assert(m_outstanding_count ==
282        (m_writeRequestTable.size() + m_readRequestTable.size()));
283
284    return RequestStatus_Ready;
285}
286
287void
288Sequencer::markRemoved()
289{
290    m_outstanding_count--;
291    assert(m_outstanding_count ==
292           m_writeRequestTable.size() + m_readRequestTable.size());
293}
294
295void
296Sequencer::removeRequest(SequencerRequest* srequest)
297{
298    assert(m_outstanding_count ==
299           m_writeRequestTable.size() + m_readRequestTable.size());
300
301    Address line_addr(srequest->pkt->getAddr());
302    line_addr.makeLineAddress();
303    if ((srequest->m_type == RubyRequestType_ST) ||
304        (srequest->m_type == RubyRequestType_RMW_Read) ||
305        (srequest->m_type == RubyRequestType_RMW_Write) ||
306        (srequest->m_type == RubyRequestType_Load_Linked) ||
307        (srequest->m_type == RubyRequestType_Store_Conditional) ||
308        (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
309        (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
310        m_writeRequestTable.erase(line_addr);
311    } else {
312        m_readRequestTable.erase(line_addr);
313    }
314
315    markRemoved();
316}
317
318void
319Sequencer::invalidateSC(const Address& address)
320{
321    RequestTable::iterator i = m_writeRequestTable.find(address);
322    if (i != m_writeRequestTable.end()) {
323        SequencerRequest* request = i->second;
324        // The controller has lost the coherence permissions, hence the lock
325        // on the cache line maintained by the cache should be cleared.
326        if (request->m_type == RubyRequestType_Store_Conditional) {
327            m_dataCache_ptr->clearLocked(address);
328        }
329    }
330}
331
332bool
333Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
334{
335    //
336    // The success flag indicates whether the LLSC operation was successful.
337    // LL ops will always succeed, but SC may fail if the cache line is no
338    // longer locked.
339    //
340    bool success = true;
341    if (request->m_type == RubyRequestType_Store_Conditional) {
342        if (!m_dataCache_ptr->isLocked(address, m_version)) {
343            //
344            // For failed SC requests, indicate the failure to the cpu by
345            // setting the extra data to zero.
346            //
347            request->pkt->req->setExtraData(0);
348            success = false;
349        } else {
350            //
351            // For successful SC requests, indicate the success to the cpu by
352            // setting the extra data to one.
353            //
354            request->pkt->req->setExtraData(1);
355        }
356        //
357        // Independent of success, all SC operations must clear the lock
358        //
359        m_dataCache_ptr->clearLocked(address);
360    } else if (request->m_type == RubyRequestType_Load_Linked) {
361        //
362        // Note: To fully follow Alpha LLSC semantics, should the LL clear any
363        // previously locked cache lines?
364        //
365        m_dataCache_ptr->setLocked(address, m_version);
366    } else if ((m_dataCache_ptr->isTagPresent(address)) &&
367               (m_dataCache_ptr->isLocked(address, m_version))) {
368        //
369        // Normal writes should clear the locked address
370        //
371        m_dataCache_ptr->clearLocked(address);
372    }
373    return success;
374}
375
376void
377Sequencer::recordMissLatency(const Cycles cycles, const RubyRequestType type,
378                             const MachineType respondingMach,
379                             bool isExternalHit, Cycles issuedTime,
380                             Cycles initialRequestTime,
381                             Cycles forwardRequestTime,
382                             Cycles firstResponseTime, Cycles completionTime)
383{
384    m_latencyHist.sample(cycles);
385    m_typeLatencyHist[type]->sample(cycles);
386
387    if (isExternalHit) {
388        m_missLatencyHist.sample(cycles);
389        m_missTypeLatencyHist[type]->sample(cycles);
390
391        if (respondingMach != MachineType_NUM) {
392            m_missMachLatencyHist[respondingMach]->sample(cycles);
393            m_missTypeMachLatencyHist[type][respondingMach]->sample(cycles);
394
395            if ((issuedTime <= initialRequestTime) &&
396                (initialRequestTime <= forwardRequestTime) &&
397                (forwardRequestTime <= firstResponseTime) &&
398                (firstResponseTime <= completionTime)) {
399
400                m_IssueToInitialDelayHist[respondingMach]->sample(
401                    initialRequestTime - issuedTime);
402                m_InitialToForwardDelayHist[respondingMach]->sample(
403                    forwardRequestTime - initialRequestTime);
404                m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
405                    firstResponseTime - forwardRequestTime);
406                m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
407                    completionTime - firstResponseTime);
408            } else {
409                m_IncompleteTimes[respondingMach]++;
410            }
411        }
412    } else {
413        m_hitLatencyHist.sample(cycles);
414        m_hitTypeLatencyHist[type]->sample(cycles);
415
416        if (respondingMach != MachineType_NUM) {
417            m_hitMachLatencyHist[respondingMach]->sample(cycles);
418            m_hitTypeMachLatencyHist[type][respondingMach]->sample(cycles);
419        }
420    }
421}
422
423void
424Sequencer::writeCallback(const Address& address, DataBlock& data,
425                         const bool externalHit, const MachineType mach,
426                         const Cycles initialRequestTime,
427                         const Cycles forwardRequestTime,
428                         const Cycles firstResponseTime)
429{
430    assert(address == line_address(address));
431    assert(m_writeRequestTable.count(line_address(address)));
432
433    RequestTable::iterator i = m_writeRequestTable.find(address);
434    assert(i != m_writeRequestTable.end());
435    SequencerRequest* request = i->second;
436
437    m_writeRequestTable.erase(i);
438    markRemoved();
439
440    assert((request->m_type == RubyRequestType_ST) ||
441           (request->m_type == RubyRequestType_ATOMIC) ||
442           (request->m_type == RubyRequestType_RMW_Read) ||
443           (request->m_type == RubyRequestType_RMW_Write) ||
444           (request->m_type == RubyRequestType_Load_Linked) ||
445           (request->m_type == RubyRequestType_Store_Conditional) ||
446           (request->m_type == RubyRequestType_Locked_RMW_Read) ||
447           (request->m_type == RubyRequestType_Locked_RMW_Write) ||
448           (request->m_type == RubyRequestType_FLUSH));
449
450    //
451    // For Alpha, properly handle LL, SC, and write requests with respect to
452    // locked cache blocks.
453    //
454    // Not valid for Network_test protocl
455    //
456    bool success = true;
457    if(!m_usingNetworkTester)
458        success = handleLlsc(address, request);
459
460    if (request->m_type == RubyRequestType_Locked_RMW_Read) {
461        m_controller->blockOnQueue(address, m_mandatory_q_ptr);
462    } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
463        m_controller->unblock(address);
464    }
465
466    hitCallback(request, data, success, mach, externalHit,
467                initialRequestTime, forwardRequestTime, firstResponseTime);
468}
469
470void
471Sequencer::readCallback(const Address& address, DataBlock& data,
472                        bool externalHit, const MachineType mach,
473                        Cycles initialRequestTime,
474                        Cycles forwardRequestTime,
475                        Cycles firstResponseTime)
476{
477    assert(address == line_address(address));
478    assert(m_readRequestTable.count(line_address(address)));
479
480    RequestTable::iterator i = m_readRequestTable.find(address);
481    assert(i != m_readRequestTable.end());
482    SequencerRequest* request = i->second;
483
484    m_readRequestTable.erase(i);
485    markRemoved();
486
487    assert((request->m_type == RubyRequestType_LD) ||
488           (request->m_type == RubyRequestType_IFETCH));
489
490    hitCallback(request, data, true, mach, externalHit,
491                initialRequestTime, forwardRequestTime, firstResponseTime);
492}
493
494void
495Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
496                       bool llscSuccess,
497                       const MachineType mach, const bool externalHit,
498                       const Cycles initialRequestTime,
499                       const Cycles forwardRequestTime,
500                       const Cycles firstResponseTime)
501{
502    PacketPtr pkt = srequest->pkt;
503    Address request_address(pkt->getAddr());
504    Address request_line_address(pkt->getAddr());
505    request_line_address.makeLineAddress();
506    RubyRequestType type = srequest->m_type;
507    Cycles issued_time = srequest->issue_time;
508
509    // Set this cache entry to the most recently used
510    if (type == RubyRequestType_IFETCH) {
511        m_instCache_ptr->setMRU(request_line_address);
512    } else {
513        m_dataCache_ptr->setMRU(request_line_address);
514    }
515
516    assert(curCycle() >= issued_time);
517    Cycles total_latency = curCycle() - issued_time;
518
519    // Profile the latency for all demand accesses.
520    recordMissLatency(total_latency, type, mach, externalHit, issued_time,
521                      initialRequestTime, forwardRequestTime,
522                      firstResponseTime, curCycle());
523
524    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
525             curTick(), m_version, "Seq",
526             llscSuccess ? "Done" : "SC_Failed", "", "",
527             request_address, total_latency);
528
529    // update the data
530    if (g_system_ptr->m_warmup_enabled) {
531        assert(pkt->getPtr<uint8_t>(false) != NULL);
532        data.setData(pkt->getPtr<uint8_t>(false),
533                     request_address.getOffset(), pkt->getSize());
534    } else if (pkt->getPtr<uint8_t>(true) != NULL) {
535        if ((type == RubyRequestType_LD) ||
536            (type == RubyRequestType_IFETCH) ||
537            (type == RubyRequestType_RMW_Read) ||
538            (type == RubyRequestType_Locked_RMW_Read) ||
539            (type == RubyRequestType_Load_Linked)) {
540            memcpy(pkt->getPtr<uint8_t>(true),
541                   data.getData(request_address.getOffset(), pkt->getSize()),
542                   pkt->getSize());
543        } else {
544            data.setData(pkt->getPtr<uint8_t>(true),
545                         request_address.getOffset(), pkt->getSize());
546        }
547    } else {
548        DPRINTF(MemoryAccess,
549                "WARNING.  Data not transfered from Ruby to M5 for type %s\n",
550                RubyRequestType_to_string(type));
551    }
552
553    // If using the RubyTester, update the RubyTester sender state's
554    // subBlock with the recieved data.  The tester will later access
555    // this state.
556    // Note: RubyPort will access it's sender state before the
557    // RubyTester.
558    if (m_usingRubyTester) {
559        RubyPort::SenderState *reqSenderState =
560            safe_cast<RubyPort::SenderState*>(pkt->senderState);
561        // @todo This is a dangerous assumption on nothing else
562        // modifying the senderState
563        RubyTester::SenderState* testerSenderState =
564            safe_cast<RubyTester::SenderState*>(reqSenderState->predecessor);
565        testerSenderState->subBlock.mergeFrom(data);
566    }
567
568    delete srequest;
569
570    if (g_system_ptr->m_warmup_enabled) {
571        assert(pkt->req);
572        delete pkt->req;
573        delete pkt;
574        g_system_ptr->m_cache_recorder->enqueueNextFetchRequest();
575    } else if (g_system_ptr->m_cooldown_enabled) {
576        delete pkt;
577        g_system_ptr->m_cache_recorder->enqueueNextFlushRequest();
578    } else {
579        ruby_hit_callback(pkt);
580    }
581}
582
583bool
584Sequencer::empty() const
585{
586    return m_writeRequestTable.empty() && m_readRequestTable.empty();
587}
588
589RequestStatus
590Sequencer::makeRequest(PacketPtr pkt)
591{
592    if (m_outstanding_count >= m_max_outstanding_requests) {
593        return RequestStatus_BufferFull;
594    }
595
596    RubyRequestType primary_type = RubyRequestType_NULL;
597    RubyRequestType secondary_type = RubyRequestType_NULL;
598
599    if (pkt->isLLSC()) {
600        //
601        // Alpha LL/SC instructions need to be handled carefully by the cache
602        // coherence protocol to ensure they follow the proper semantics. In
603        // particular, by identifying the operations as atomic, the protocol
604        // should understand that migratory sharing optimizations should not
605        // be performed (i.e. a load between the LL and SC should not steal
606        // away exclusive permission).
607        //
608        if (pkt->isWrite()) {
609            DPRINTF(RubySequencer, "Issuing SC\n");
610            primary_type = RubyRequestType_Store_Conditional;
611        } else {
612            DPRINTF(RubySequencer, "Issuing LL\n");
613            assert(pkt->isRead());
614            primary_type = RubyRequestType_Load_Linked;
615        }
616        secondary_type = RubyRequestType_ATOMIC;
617    } else if (pkt->req->isLocked()) {
618        //
619        // x86 locked instructions are translated to store cache coherence
620        // requests because these requests should always be treated as read
621        // exclusive operations and should leverage any migratory sharing
622        // optimization built into the protocol.
623        //
624        if (pkt->isWrite()) {
625            DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
626            primary_type = RubyRequestType_Locked_RMW_Write;
627        } else {
628            DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
629            assert(pkt->isRead());
630            primary_type = RubyRequestType_Locked_RMW_Read;
631        }
632        secondary_type = RubyRequestType_ST;
633    } else {
634        if (pkt->isRead()) {
635            if (pkt->req->isInstFetch()) {
636                primary_type = secondary_type = RubyRequestType_IFETCH;
637            } else {
638#if THE_ISA == X86_ISA
639                uint32_t flags = pkt->req->getFlags();
640                bool storeCheck = flags &
641                        (TheISA::StoreCheck << TheISA::FlagShift);
642#else
643                bool storeCheck = false;
644#endif // X86_ISA
645                if (storeCheck) {
646                    primary_type = RubyRequestType_RMW_Read;
647                    secondary_type = RubyRequestType_ST;
648                } else {
649                    primary_type = secondary_type = RubyRequestType_LD;
650                }
651            }
652        } else if (pkt->isWrite()) {
653            //
654            // Note: M5 packets do not differentiate ST from RMW_Write
655            //
656            primary_type = secondary_type = RubyRequestType_ST;
657        } else if (pkt->isFlush()) {
658          primary_type = secondary_type = RubyRequestType_FLUSH;
659        } else {
660            panic("Unsupported ruby packet type\n");
661        }
662    }
663
664    RequestStatus status = insertRequest(pkt, primary_type);
665    if (status != RequestStatus_Ready)
666        return status;
667
668    issueRequest(pkt, secondary_type);
669
670    // TODO: issue hardware prefetches here
671    return RequestStatus_Issued;
672}
673
674void
675Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
676{
677    assert(pkt != NULL);
678    int proc_id = -1;
679    if (pkt->req->hasContextId()) {
680        proc_id = pkt->req->contextId();
681    }
682
683    // If valid, copy the pc to the ruby request
684    Addr pc = 0;
685    if (pkt->req->hasPC()) {
686        pc = pkt->req->getPC();
687    }
688
689    RubyRequest *msg = new RubyRequest(clockEdge(), pkt->getAddr(),
690                                       pkt->getPtr<uint8_t>(true),
691                                       pkt->getSize(), pc, secondary_type,
692                                       RubyAccessMode_Supervisor, pkt,
693                                       PrefetchBit_No, proc_id);
694
695    DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
696            curTick(), m_version, "Seq", "Begin", "", "",
697            msg->getPhysicalAddress(),
698            RubyRequestType_to_string(secondary_type));
699
700    Cycles latency(0);  // initialzed to an null value
701
702    if (secondary_type == RubyRequestType_IFETCH)
703        latency = m_instCache_ptr->getLatency();
704    else
705        latency = m_dataCache_ptr->getLatency();
706
707    // Send the message to the cache controller
708    assert(latency > 0);
709
710    assert(m_mandatory_q_ptr != NULL);
711    m_mandatory_q_ptr->enqueue(msg, latency);
712}
713
714template <class KEY, class VALUE>
715std::ostream &
716operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
717{
718    typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
719    typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
720
721    out << "[";
722    for (; i != end; ++i)
723        out << " " << i->first << "=" << i->second;
724    out << " ]";
725
726    return out;
727}
728
729void
730Sequencer::print(ostream& out) const
731{
732    out << "[Sequencer: " << m_version
733        << ", outstanding requests: " << m_outstanding_count
734        << ", read request table: " << m_readRequestTable
735        << ", write request table: " << m_writeRequestTable
736        << "]";
737}
738
739// this can be called from setState whenever coherence permissions are
740// upgraded when invoked, coherence violations will be checked for the
741// given block
742void
743Sequencer::checkCoherence(const Address& addr)
744{
745#ifdef CHECK_COHERENCE
746    g_system_ptr->checkGlobalCoherenceInvariant(addr);
747#endif
748}
749
750void
751Sequencer::recordRequestType(SequencerRequestType requestType) {
752    DPRINTF(RubyStats, "Recorded statistic: %s\n",
753            SequencerRequestType_to_string(requestType));
754}
755
756
757void
758Sequencer::evictionCallback(const Address& address)
759{
760    ruby_eviction_callback(address);
761}
762
763void
764Sequencer::regStats()
765{
766    m_store_waiting_on_load
767        .name(name() + ".store_waiting_on_load")
768        .desc("Number of times a store aliased with a pending load")
769        .flags(Stats::nozero);
770    m_store_waiting_on_store
771        .name(name() + ".store_waiting_on_store")
772        .desc("Number of times a store aliased with a pending store")
773        .flags(Stats::nozero);
774    m_load_waiting_on_load
775        .name(name() + ".load_waiting_on_load")
776        .desc("Number of times a load aliased with a pending load")
777        .flags(Stats::nozero);
778    m_load_waiting_on_store
779        .name(name() + ".load_waiting_on_store")
780        .desc("Number of times a load aliased with a pending store")
781        .flags(Stats::nozero);
782
783    // These statistical variables are not for display.
784    // The profiler will collate these across different
785    // sequencers and display those collated statistics.
786    m_outstandReqHist.init(10);
787    m_latencyHist.init(10);
788    m_hitLatencyHist.init(10);
789    m_missLatencyHist.init(10);
790
791    for (int i = 0; i < RubyRequestType_NUM; i++) {
792        m_typeLatencyHist.push_back(new Stats::Histogram());
793        m_typeLatencyHist[i]->init(10);
794
795        m_hitTypeLatencyHist.push_back(new Stats::Histogram());
796        m_hitTypeLatencyHist[i]->init(10);
797
798        m_missTypeLatencyHist.push_back(new Stats::Histogram());
799        m_missTypeLatencyHist[i]->init(10);
800    }
801
802    for (int i = 0; i < MachineType_NUM; i++) {
803        m_hitMachLatencyHist.push_back(new Stats::Histogram());
804        m_hitMachLatencyHist[i]->init(10);
805
806        m_missMachLatencyHist.push_back(new Stats::Histogram());
807        m_missMachLatencyHist[i]->init(10);
808
809        m_IssueToInitialDelayHist.push_back(new Stats::Histogram());
810        m_IssueToInitialDelayHist[i]->init(10);
811
812        m_InitialToForwardDelayHist.push_back(new Stats::Histogram());
813        m_InitialToForwardDelayHist[i]->init(10);
814
815        m_ForwardToFirstResponseDelayHist.push_back(new Stats::Histogram());
816        m_ForwardToFirstResponseDelayHist[i]->init(10);
817
818        m_FirstResponseToCompletionDelayHist.push_back(new Stats::Histogram());
819        m_FirstResponseToCompletionDelayHist[i]->init(10);
820    }
821
822    for (int i = 0; i < RubyRequestType_NUM; i++) {
823        m_hitTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
824        m_missTypeMachLatencyHist.push_back(std::vector<Stats::Histogram *>());
825
826        for (int j = 0; j < MachineType_NUM; j++) {
827            m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram());
828            m_hitTypeMachLatencyHist[i][j]->init(10);
829
830            m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
831            m_missTypeMachLatencyHist[i][j]->init(10);
832        }
833    }
834}
835