Sequencer.cc revision 6922:1620cffaa3b6
12207SN/A
22207SN/A/*
32207SN/A * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
42207SN/A * All rights reserved.
52207SN/A *
62207SN/A * Redistribution and use in source and binary forms, with or without
72207SN/A * modification, are permitted provided that the following conditions are
82207SN/A * met: redistributions of source code must retain the above copyright
92207SN/A * notice, this list of conditions and the following disclaimer;
102207SN/A * redistributions in binary form must reproduce the above copyright
112207SN/A * notice, this list of conditions and the following disclaimer in the
122207SN/A * documentation and/or other materials provided with the distribution;
132207SN/A * neither the name of the copyright holders nor the names of its
142207SN/A * contributors may be used to endorse or promote products derived from
152207SN/A * this software without specific prior written permission.
162207SN/A *
172207SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
182207SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
192207SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
202207SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
212207SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
222207SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
232207SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
242207SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
252207SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
262207SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
272665Ssaidi@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
282665Ssaidi@eecs.umich.edu */
292665Ssaidi@eecs.umich.edu
302207SN/A#include "mem/ruby/libruby.hh"
312207SN/A#include "mem/ruby/common/Global.hh"
322207SN/A#include "mem/ruby/system/Sequencer.hh"
332207SN/A#include "mem/ruby/system/System.hh"
342207SN/A#include "mem/protocol/Protocol.hh"
352454SN/A#include "mem/ruby/profiler/Profiler.hh"
362454SN/A#include "mem/ruby/system/CacheMemory.hh"
378229Snate@binkert.org#include "mem/protocol/CacheMsg.hh"
3811800Sbrandon.potter@amd.com#include "mem/ruby/recorder/Tracer.hh"
395285Sgblack@eecs.umich.edu#include "mem/ruby/common/SubBlock.hh"
402474SN/A#include "mem/protocol/Protocol.hh"
412454SN/A#include "mem/gems_common/Map.hh"
422454SN/A#include "mem/ruby/buffers/MessageBuffer.hh"
432454SN/A#include "mem/ruby/slicc_interface/AbstractController.hh"
442207SN/A#include "cpu/rubytest/RubyTester.hh"
452474SN/A
462207SN/A#include "params/RubySequencer.hh"
472474SN/A
482561SN/ASequencer *
495285Sgblack@eecs.umich.eduRubySequencerParams::create()
505285Sgblack@eecs.umich.edu{
517741Sgblack@eecs.umich.edu    return new Sequencer(this);
523415Sgblack@eecs.umich.edu}
533415Sgblack@eecs.umich.edu
545285Sgblack@eecs.umich.eduSequencer::Sequencer(const Params *p)
555285Sgblack@eecs.umich.edu    : RubyPort(p), deadlockCheckEvent(this)
565285Sgblack@eecs.umich.edu{
577532Ssteve.reinhardt@amd.com    m_store_waiting_on_load_cycles = 0;
585285Sgblack@eecs.umich.edu    m_store_waiting_on_store_cycles = 0;
595285Sgblack@eecs.umich.edu    m_load_waiting_on_store_cycles = 0;
605285Sgblack@eecs.umich.edu    m_load_waiting_on_load_cycles = 0;
612207SN/A
622474SN/A    m_outstanding_count = 0;
632474SN/A
647741Sgblack@eecs.umich.edu    m_max_outstanding_requests = 0;
654111Sgblack@eecs.umich.edu    m_deadlock_threshold = 0;
662561SN/A    m_instCache_ptr = NULL;
677741Sgblack@eecs.umich.edu    m_dataCache_ptr = NULL;
687741Sgblack@eecs.umich.edu
693415Sgblack@eecs.umich.edu    m_instCache_ptr = p->icache;
705128Sgblack@eecs.umich.edu    m_dataCache_ptr = p->dcache;
715958Sgblack@eecs.umich.edu    m_max_outstanding_requests = p->max_outstanding_requests;
722474SN/A    m_deadlock_threshold = p->deadlock_threshold;
732207SN/A    m_usingRubyTester = p->using_ruby_tester;
744111Sgblack@eecs.umich.edu
754111Sgblack@eecs.umich.edu    assert(m_max_outstanding_requests > 0);
764111Sgblack@eecs.umich.edu    assert(m_deadlock_threshold > 0);
774111Sgblack@eecs.umich.edu    assert(m_instCache_ptr != NULL);
785154Sgblack@eecs.umich.edu    assert(m_dataCache_ptr != NULL);
795285Sgblack@eecs.umich.edu}
804111Sgblack@eecs.umich.edu
814111Sgblack@eecs.umich.eduSequencer::~Sequencer() {
824111Sgblack@eecs.umich.edu
834111Sgblack@eecs.umich.edu}
844111Sgblack@eecs.umich.edu
854111Sgblack@eecs.umich.eduvoid Sequencer::wakeup() {
8611386Ssteve.reinhardt@amd.com  // Check for deadlock of any of the requests
874111Sgblack@eecs.umich.edu  Time current_time = g_eventQueue_ptr->getTime();
884111Sgblack@eecs.umich.edu
897532Ssteve.reinhardt@amd.com  // Check across all outstanding requests
904111Sgblack@eecs.umich.edu  int total_outstanding = 0;
914111Sgblack@eecs.umich.edu
924111Sgblack@eecs.umich.edu  Vector<Address> keys = m_readRequestTable.keys();
934111Sgblack@eecs.umich.edu  for (int i=0; i<keys.size(); i++) {
944111Sgblack@eecs.umich.edu    SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
955128Sgblack@eecs.umich.edu    if (current_time - request->issue_time >= m_deadlock_threshold) {
965958Sgblack@eecs.umich.edu      WARN_MSG("Possible Deadlock detected");
976701Sgblack@eecs.umich.edu      WARN_EXPR(request);
989552Sandreas.hansson@arm.com      WARN_EXPR(m_version);
999552Sandreas.hansson@arm.com      WARN_EXPR(request->ruby_request.paddr);
1009552Sandreas.hansson@arm.com      WARN_EXPR(keys.size());
1015958Sgblack@eecs.umich.edu      WARN_EXPR(current_time);
1024111Sgblack@eecs.umich.edu      WARN_EXPR(request->issue_time);
1034111Sgblack@eecs.umich.edu      WARN_EXPR(current_time - request->issue_time);
1044111Sgblack@eecs.umich.edu      ERROR_MSG("Aborting");
1054111Sgblack@eecs.umich.edu    }
1064111Sgblack@eecs.umich.edu  }
1074111Sgblack@eecs.umich.edu
1085154Sgblack@eecs.umich.edu  keys = m_writeRequestTable.keys();
1095285Sgblack@eecs.umich.edu  for (int i=0; i<keys.size(); i++) {
1104111Sgblack@eecs.umich.edu    SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
1114111Sgblack@eecs.umich.edu    if (current_time - request->issue_time >= m_deadlock_threshold) {
1124111Sgblack@eecs.umich.edu      WARN_MSG("Possible Deadlock detected");
1134111Sgblack@eecs.umich.edu      WARN_EXPR(request);
1144111Sgblack@eecs.umich.edu      WARN_EXPR(m_version);
11511386Ssteve.reinhardt@amd.com      WARN_EXPR(current_time);
11611386Ssteve.reinhardt@amd.com      WARN_EXPR(request->issue_time);
1174111Sgblack@eecs.umich.edu      WARN_EXPR(current_time - request->issue_time);
1184111Sgblack@eecs.umich.edu      WARN_EXPR(keys.size());
1197532Ssteve.reinhardt@amd.com      ERROR_MSG("Aborting");
1204111Sgblack@eecs.umich.edu    }
1214111Sgblack@eecs.umich.edu  }
1224111Sgblack@eecs.umich.edu  total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
1234111Sgblack@eecs.umich.edu
1244111Sgblack@eecs.umich.edu  assert(m_outstanding_count == total_outstanding);
1255128Sgblack@eecs.umich.edu
1265958Sgblack@eecs.umich.edu  if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
1276701Sgblack@eecs.umich.edu    schedule(deadlockCheckEvent,
1289552Sandreas.hansson@arm.com             (m_deadlock_threshold * g_eventQueue_ptr->getClock()) + curTick);
1299552Sandreas.hansson@arm.com  }
1309552Sandreas.hansson@arm.com}
1315958Sgblack@eecs.umich.edu
1324111Sgblack@eecs.umich.eduvoid Sequencer::printStats(ostream & out) const {
1334111Sgblack@eecs.umich.edu  out << "Sequencer: " << m_name << endl;
13410299Salexandru.dutu@amd.com  out << "  store_waiting_on_load_cycles: " << m_store_waiting_on_load_cycles << endl;
13510299Salexandru.dutu@amd.com  out << "  store_waiting_on_store_cycles: " << m_store_waiting_on_store_cycles << endl;
13610299Salexandru.dutu@amd.com  out << "  load_waiting_on_load_cycles: " << m_load_waiting_on_load_cycles << endl;
1372207SN/A  out << "  load_waiting_on_store_cycles: " << m_load_waiting_on_store_cycles << endl;
138}
139
140void Sequencer::printProgress(ostream& out) const{
141  /*
142  int total_demand = 0;
143  out << "Sequencer Stats Version " << m_version << endl;
144  out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
145  out << "---------------" << endl;
146  out << "outstanding requests" << endl;
147
148  Vector<Address> rkeys = m_readRequestTable.keys();
149  int read_size = rkeys.size();
150  out << "proc " << m_version << " Read Requests = " << read_size << endl;
151  // print the request table
152  for(int i=0; i < read_size; ++i){
153    SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
154    out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i]  << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
155    total_demand++;
156  }
157
158  Vector<Address> wkeys = m_writeRequestTable.keys();
159  int write_size = wkeys.size();
160  out << "proc " << m_version << " Write Requests = " << write_size << endl;
161  // print the request table
162  for(int i=0; i < write_size; ++i){
163      CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
164      out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i]  << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
165      if( request.getPrefetch() == PrefetchBit_No ){
166        total_demand++;
167      }
168  }
169
170  out << endl;
171
172  out << "Total Number Outstanding: " << m_outstanding_count << endl;
173  out << "Total Number Demand     : " << total_demand << endl;
174  out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
175  out << endl;
176  out << endl;
177  */
178}
179
180void Sequencer::printConfig(ostream& out) const {
181  out << "Seqeuncer config: " << m_name << endl;
182  out << "  controller: " << m_controller->getName() << endl;
183  out << "  version: " << m_version << endl;
184  out << "  max_outstanding_requests: " << m_max_outstanding_requests << endl;
185  out << "  deadlock_threshold: " << m_deadlock_threshold << endl;
186}
187
188// Insert the request on the correct request table.  Return true if
189// the entry was already present.
190bool Sequencer::insertRequest(SequencerRequest* request) {
191  int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
192
193  assert(m_outstanding_count == total_outstanding);
194
195  // See if we should schedule a deadlock check
196  if (deadlockCheckEvent.scheduled() == false) {
197    schedule(deadlockCheckEvent, m_deadlock_threshold + curTick);
198  }
199
200  Address line_addr(request->ruby_request.paddr);
201  line_addr.makeLineAddress();
202  if ((request->ruby_request.type == RubyRequestType_ST) ||
203      (request->ruby_request.type == RubyRequestType_RMW_Read) ||
204      (request->ruby_request.type == RubyRequestType_RMW_Write) ||
205      (request->ruby_request.type == RubyRequestType_Locked_Read) ||
206      (request->ruby_request.type == RubyRequestType_Locked_Write)) {
207    if (m_writeRequestTable.exist(line_addr)) {
208      m_writeRequestTable.lookup(line_addr) = request;
209      //      return true;
210      assert(0); // drh5: isn't this an error?  do you lose the initial request?
211    }
212    m_writeRequestTable.allocate(line_addr);
213    m_writeRequestTable.lookup(line_addr) = request;
214    m_outstanding_count++;
215  } else {
216    if (m_readRequestTable.exist(line_addr)) {
217      m_readRequestTable.lookup(line_addr) = request;
218      //      return true;
219      assert(0); // drh5: isn't this an error?  do you lose the initial request?
220    }
221    m_readRequestTable.allocate(line_addr);
222    m_readRequestTable.lookup(line_addr) = request;
223    m_outstanding_count++;
224  }
225
226  g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
227
228  total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
229  assert(m_outstanding_count == total_outstanding);
230
231  return false;
232}
233
234void Sequencer::removeRequest(SequencerRequest* srequest) {
235
236  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
237
238  const RubyRequest & ruby_request = srequest->ruby_request;
239  Address line_addr(ruby_request.paddr);
240  line_addr.makeLineAddress();
241  if ((ruby_request.type == RubyRequestType_ST) ||
242      (ruby_request.type == RubyRequestType_RMW_Read) ||
243      (ruby_request.type == RubyRequestType_RMW_Write) ||
244      (ruby_request.type == RubyRequestType_Locked_Read) ||
245      (ruby_request.type == RubyRequestType_Locked_Write)) {
246    m_writeRequestTable.deallocate(line_addr);
247  } else {
248    m_readRequestTable.deallocate(line_addr);
249  }
250  m_outstanding_count--;
251
252  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
253}
254
255void Sequencer::writeCallback(const Address& address, DataBlock& data) {
256
257  assert(address == line_address(address));
258  assert(m_writeRequestTable.exist(line_address(address)));
259
260  SequencerRequest* request = m_writeRequestTable.lookup(address);
261
262  removeRequest(request);
263
264  assert((request->ruby_request.type == RubyRequestType_ST) ||
265         (request->ruby_request.type == RubyRequestType_RMW_Read) ||
266         (request->ruby_request.type == RubyRequestType_RMW_Write) ||
267         (request->ruby_request.type == RubyRequestType_Locked_Read) ||
268         (request->ruby_request.type == RubyRequestType_Locked_Write));
269
270  if (request->ruby_request.type == RubyRequestType_Locked_Read) {
271    m_dataCache_ptr->setLocked(address, m_version);
272  }
273  else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
274    m_controller->blockOnQueue(address, m_mandatory_q_ptr);
275  }
276  else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
277    m_controller->unblock(address);
278  }
279
280  hitCallback(request, data);
281}
282
283void Sequencer::readCallback(const Address& address, DataBlock& data) {
284
285  assert(address == line_address(address));
286  assert(m_readRequestTable.exist(line_address(address)));
287
288  SequencerRequest* request = m_readRequestTable.lookup(address);
289  removeRequest(request);
290
291  assert((request->ruby_request.type == RubyRequestType_LD) ||
292	 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
293         (request->ruby_request.type == RubyRequestType_IFETCH));
294
295  hitCallback(request, data);
296}
297
298void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
299  const RubyRequest & ruby_request = srequest->ruby_request;
300  Address request_address(ruby_request.paddr);
301  Address request_line_address(ruby_request.paddr);
302  request_line_address.makeLineAddress();
303  RubyRequestType type = ruby_request.type;
304  Time issued_time = srequest->issue_time;
305
306  // Set this cache entry to the most recently used
307  if (type == RubyRequestType_IFETCH) {
308    if (m_instCache_ptr->isTagPresent(request_line_address) )
309      m_instCache_ptr->setMRU(request_line_address);
310  } else {
311    if (m_dataCache_ptr->isTagPresent(request_line_address) )
312      m_dataCache_ptr->setMRU(request_line_address);
313  }
314
315  assert(g_eventQueue_ptr->getTime() >= issued_time);
316  Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
317
318  // Profile the miss latency for all non-zero demand misses
319  if (miss_latency != 0) {
320    g_system_ptr->getProfiler()->missLatency(miss_latency, type);
321
322    if (Debug::getProtocolTrace()) {
323      g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
324                                                     "", "Done", "", int_to_string(miss_latency)+" cycles");
325    }
326  }
327  /*
328  if (request.getPrefetch() == PrefetchBit_Yes) {
329    return; // Ignore the prefetch
330  }
331  */
332
333  // update the data
334  if (ruby_request.data != NULL) {
335    if ((type == RubyRequestType_LD) ||
336        (type == RubyRequestType_IFETCH) ||
337        (type == RubyRequestType_RMW_Read)) {
338      memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
339    } else {
340      data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
341    }
342  }
343
344  //
345  // If using the RubyTester, update the RubyTester sender state's subBlock
346  // with the recieved data.  The tester will later access this state.
347  // Note: RubyPort will access it's sender state before the RubyTester.
348  //
349  if (m_usingRubyTester) {
350      RubyTester::SenderState* testerSenderState;
351      testerSenderState = safe_cast<RubyTester::SenderState*>( \
352          safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState)->saved);
353      testerSenderState->subBlock->mergeFrom(data);
354  }
355
356  ruby_hit_callback(ruby_request.pkt);
357  delete srequest;
358}
359
360// Returns true if the sequencer already has a load or store outstanding
361RequestStatus Sequencer::getRequestStatus(const RubyRequest& request) {
362  bool is_outstanding_store = m_writeRequestTable.exist(line_address(Address(request.paddr)));
363  bool is_outstanding_load = m_readRequestTable.exist(line_address(Address(request.paddr)));
364  if ( is_outstanding_store ) {
365    if ((request.type == RubyRequestType_LD) ||
366        (request.type == RubyRequestType_IFETCH) ||
367        (request.type == RubyRequestType_RMW_Read)) {
368      m_store_waiting_on_load_cycles++;
369    } else {
370      m_store_waiting_on_store_cycles++;
371    }
372    return RequestStatus_Aliased;
373  } else if ( is_outstanding_load ) {
374    if ((request.type == RubyRequestType_ST) ||
375        (request.type == RubyRequestType_RMW_Write) ) {
376      m_load_waiting_on_store_cycles++;
377    } else {
378      m_load_waiting_on_load_cycles++;
379    }
380    return RequestStatus_Aliased;
381  }
382
383  if (m_outstanding_count >= m_max_outstanding_requests) {
384    return RequestStatus_BufferFull;
385  }
386
387  return RequestStatus_Ready;
388}
389
390bool Sequencer::empty() const {
391  return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
392}
393
394
395RequestStatus Sequencer::makeRequest(const RubyRequest & request)
396{
397  assert(Address(request.paddr).getOffset() + request.len <=
398         RubySystem::getBlockSizeBytes());
399  RequestStatus status = getRequestStatus(request);
400  if (status == RequestStatus_Ready) {
401    SequencerRequest *srequest = new SequencerRequest(request,
402                                                  g_eventQueue_ptr->getTime());
403    bool found = insertRequest(srequest);
404    if (!found) {
405      if (request.type == RubyRequestType_Locked_Write) {
406        // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
407        // ensuring that nothing comes between checking the flag and servicing the store
408        if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
409          return RequestStatus_LlscFailed;
410        }
411        else {
412          m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
413        }
414      }
415      issueRequest(request);
416
417      // TODO: issue hardware prefetches here
418      return RequestStatus_Issued;
419    }
420    else {
421        panic("Sequencer::makeRequest should never be called if the request"\
422              "is already outstanding\n");
423        return RequestStatus_NULL;
424    }
425  } else {
426    return status;
427  }
428}
429
430void Sequencer::issueRequest(const RubyRequest& request) {
431
432  // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
433  CacheRequestType ctype;
434  switch(request.type) {
435  case RubyRequestType_IFETCH:
436    ctype = CacheRequestType_IFETCH;
437    break;
438  case RubyRequestType_LD:
439    ctype = CacheRequestType_LD;
440    break;
441  case RubyRequestType_ST:
442    ctype = CacheRequestType_ST;
443    break;
444  case RubyRequestType_Locked_Read:
445  case RubyRequestType_Locked_Write:
446    ctype = CacheRequestType_ATOMIC;
447    break;
448  case RubyRequestType_RMW_Read:
449    ctype = CacheRequestType_ATOMIC;
450    break;
451  case RubyRequestType_RMW_Write:
452    ctype = CacheRequestType_ATOMIC;
453    break;
454  default:
455    assert(0);
456  }
457  AccessModeType amtype;
458  switch(request.access_mode){
459  case RubyAccessMode_User:
460    amtype = AccessModeType_UserMode;
461    break;
462  case RubyAccessMode_Supervisor:
463    amtype = AccessModeType_SupervisorMode;
464    break;
465  case RubyAccessMode_Device:
466    amtype = AccessModeType_UserMode;
467    break;
468  default:
469    assert(0);
470  }
471  Address line_addr(request.paddr);
472  line_addr.makeLineAddress();
473  CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
474
475  if (Debug::getProtocolTrace()) {
476    g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
477                                                   "", "Begin", "", RubyRequestType_to_string(request.type));
478  }
479
480  if (g_system_ptr->getTracer()->traceEnabled()) {
481    g_system_ptr->getTracer()->traceRequest(this, line_addr, Address(request.pc),
482                                            request.type, g_eventQueue_ptr->getTime());
483  }
484
485  Time latency = 0;  // initialzed to an null value
486
487  if (request.type == RubyRequestType_IFETCH)
488    latency = m_instCache_ptr->getLatency();
489  else
490    latency = m_dataCache_ptr->getLatency();
491
492  // Send the message to the cache controller
493  assert(latency > 0);
494
495  assert(m_mandatory_q_ptr != NULL);
496  m_mandatory_q_ptr->enqueue(msg, latency);
497}
498/*
499bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
500                               AccessModeType access_mode,
501                               int size, DataBlock*& data_ptr) {
502  if (type == CacheRequestType_IFETCH) {
503    return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
504  } else {
505    return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
506  }
507}
508*/
509
510void Sequencer::print(ostream& out) const {
511  out << "[Sequencer: " << m_version
512      << ", outstanding requests: " << m_outstanding_count;
513
514  out << ", read request table: " << m_readRequestTable
515      << ", write request table: " << m_writeRequestTable;
516  out << "]";
517}
518
519// this can be called from setState whenever coherence permissions are upgraded
520// when invoked, coherence violations will be checked for the given block
521void Sequencer::checkCoherence(const Address& addr) {
522#ifdef CHECK_COHERENCE
523  g_system_ptr->checkGlobalCoherenceInvariant(addr);
524#endif
525}
526
527