Sequencer.cc revision 6893
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/libruby.hh"
31#include "mem/ruby/common/Global.hh"
32#include "mem/ruby/system/Sequencer.hh"
33#include "mem/ruby/system/System.hh"
34#include "mem/protocol/Protocol.hh"
35#include "mem/ruby/profiler/Profiler.hh"
36#include "mem/ruby/system/CacheMemory.hh"
37#include "mem/protocol/CacheMsg.hh"
38#include "mem/ruby/recorder/Tracer.hh"
39#include "mem/ruby/common/SubBlock.hh"
40#include "mem/protocol/Protocol.hh"
41#include "mem/gems_common/Map.hh"
42#include "mem/ruby/buffers/MessageBuffer.hh"
43#include "mem/ruby/slicc_interface/AbstractController.hh"
44
45#include "params/RubySequencer.hh"
46
47//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
48
49#define LLSC_FAIL -2
50long int already = 0;
51
52Sequencer *
53RubySequencerParams::create()
54{
55    return new Sequencer(this);
56}
57
58Sequencer::Sequencer(const Params *p)
59    : RubyPort(p), deadlockCheckEvent(this)
60{
61    m_store_waiting_on_load_cycles = 0;
62    m_store_waiting_on_store_cycles = 0;
63    m_load_waiting_on_store_cycles = 0;
64    m_load_waiting_on_load_cycles = 0;
65
66    m_outstanding_count = 0;
67
68    m_max_outstanding_requests = 0;
69    m_deadlock_threshold = 0;
70    m_instCache_ptr = NULL;
71    m_dataCache_ptr = NULL;
72
73    m_instCache_ptr = p->icache;
74    m_dataCache_ptr = p->dcache;
75    m_max_outstanding_requests = p->max_outstanding_requests;
76    m_deadlock_threshold = p->deadlock_threshold;
77
78    assert(m_max_outstanding_requests > 0);
79    assert(m_deadlock_threshold > 0);
80    assert(m_instCache_ptr != NULL);
81    assert(m_dataCache_ptr != NULL);
82}
83
84Sequencer::~Sequencer() {
85
86}
87
88void Sequencer::wakeup() {
89  // Check for deadlock of any of the requests
90  Time current_time = g_eventQueue_ptr->getTime();
91
92  // Check across all outstanding requests
93  int total_outstanding = 0;
94
95  Vector<Address> keys = m_readRequestTable.keys();
96  for (int i=0; i<keys.size(); i++) {
97    SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
98    if (current_time - request->issue_time >= m_deadlock_threshold) {
99      WARN_MSG("Possible Deadlock detected");
100      WARN_EXPR(request);
101      WARN_EXPR(m_version);
102      WARN_EXPR(request->ruby_request.paddr);
103      WARN_EXPR(keys.size());
104      WARN_EXPR(current_time);
105      WARN_EXPR(request->issue_time);
106      WARN_EXPR(current_time - request->issue_time);
107      ERROR_MSG("Aborting");
108    }
109  }
110
111  keys = m_writeRequestTable.keys();
112  for (int i=0; i<keys.size(); i++) {
113    SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
114    if (current_time - request->issue_time >= m_deadlock_threshold) {
115      WARN_MSG("Possible Deadlock detected");
116      WARN_EXPR(request);
117      WARN_EXPR(m_version);
118      WARN_EXPR(current_time);
119      WARN_EXPR(request->issue_time);
120      WARN_EXPR(current_time - request->issue_time);
121      WARN_EXPR(keys.size());
122      ERROR_MSG("Aborting");
123    }
124  }
125  total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
126
127  assert(m_outstanding_count == total_outstanding);
128
129  if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
130    schedule(deadlockCheckEvent,
131             (m_deadlock_threshold * g_eventQueue_ptr->getClock()) + curTick);
132  }
133}
134
135void Sequencer::printStats(ostream & out) const {
136  out << "Sequencer: " << m_name << endl;
137  out << "  store_waiting_on_load_cycles: " << m_store_waiting_on_load_cycles << endl;
138  out << "  store_waiting_on_store_cycles: " << m_store_waiting_on_store_cycles << endl;
139  out << "  load_waiting_on_load_cycles: " << m_load_waiting_on_load_cycles << endl;
140  out << "  load_waiting_on_store_cycles: " << m_load_waiting_on_store_cycles << endl;
141}
142
143void Sequencer::printProgress(ostream& out) const{
144  /*
145  int total_demand = 0;
146  out << "Sequencer Stats Version " << m_version << endl;
147  out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
148  out << "---------------" << endl;
149  out << "outstanding requests" << endl;
150
151  Vector<Address> rkeys = m_readRequestTable.keys();
152  int read_size = rkeys.size();
153  out << "proc " << m_version << " Read Requests = " << read_size << endl;
154  // print the request table
155  for(int i=0; i < read_size; ++i){
156    SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
157    out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i]  << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
158    total_demand++;
159  }
160
161  Vector<Address> wkeys = m_writeRequestTable.keys();
162  int write_size = wkeys.size();
163  out << "proc " << m_version << " Write Requests = " << write_size << endl;
164  // print the request table
165  for(int i=0; i < write_size; ++i){
166      CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
167      out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i]  << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
168      if( request.getPrefetch() == PrefetchBit_No ){
169        total_demand++;
170      }
171  }
172
173  out << endl;
174
175  out << "Total Number Outstanding: " << m_outstanding_count << endl;
176  out << "Total Number Demand     : " << total_demand << endl;
177  out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
178  out << endl;
179  out << endl;
180  */
181}
182
183void Sequencer::printConfig(ostream& out) const {
184  out << "Seqeuncer config: " << m_name << endl;
185  out << "  controller: " << m_controller->getName() << endl;
186  out << "  version: " << m_version << endl;
187  out << "  max_outstanding_requests: " << m_max_outstanding_requests << endl;
188  out << "  deadlock_threshold: " << m_deadlock_threshold << endl;
189}
190
191// Insert the request on the correct request table.  Return true if
192// the entry was already present.
193bool Sequencer::insertRequest(SequencerRequest* request) {
194  int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
195
196  assert(m_outstanding_count == total_outstanding);
197
198  // See if we should schedule a deadlock check
199  if (deadlockCheckEvent.scheduled() == false) {
200    schedule(deadlockCheckEvent, m_deadlock_threshold + curTick);
201  }
202
203  Address line_addr(request->ruby_request.paddr);
204  line_addr.makeLineAddress();
205  if ((request->ruby_request.type == RubyRequestType_ST) ||
206      (request->ruby_request.type == RubyRequestType_RMW_Read) ||
207      (request->ruby_request.type == RubyRequestType_RMW_Write) ||
208      (request->ruby_request.type == RubyRequestType_Locked_Read) ||
209      (request->ruby_request.type == RubyRequestType_Locked_Write)) {
210    if (m_writeRequestTable.exist(line_addr)) {
211      m_writeRequestTable.lookup(line_addr) = request;
212      //      return true;
213      assert(0); // drh5: isn't this an error?  do you lose the initial request?
214    }
215    m_writeRequestTable.allocate(line_addr);
216    m_writeRequestTable.lookup(line_addr) = request;
217    m_outstanding_count++;
218  } else {
219    if (m_readRequestTable.exist(line_addr)) {
220      m_readRequestTable.lookup(line_addr) = request;
221      //      return true;
222      assert(0); // drh5: isn't this an error?  do you lose the initial request?
223    }
224    m_readRequestTable.allocate(line_addr);
225    m_readRequestTable.lookup(line_addr) = request;
226    m_outstanding_count++;
227  }
228
229  g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
230
231  total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
232  assert(m_outstanding_count == total_outstanding);
233
234  return false;
235}
236
237void Sequencer::removeRequest(SequencerRequest* srequest) {
238
239  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
240
241  const RubyRequest & ruby_request = srequest->ruby_request;
242  Address line_addr(ruby_request.paddr);
243  line_addr.makeLineAddress();
244  if ((ruby_request.type == RubyRequestType_ST) ||
245      (ruby_request.type == RubyRequestType_RMW_Read) ||
246      (ruby_request.type == RubyRequestType_RMW_Write) ||
247      (ruby_request.type == RubyRequestType_Locked_Read) ||
248      (ruby_request.type == RubyRequestType_Locked_Write)) {
249    m_writeRequestTable.deallocate(line_addr);
250  } else {
251    m_readRequestTable.deallocate(line_addr);
252  }
253  m_outstanding_count--;
254
255  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
256}
257
258void Sequencer::writeCallback(const Address& address, DataBlock& data) {
259
260  assert(address == line_address(address));
261  assert(m_writeRequestTable.exist(line_address(address)));
262
263  SequencerRequest* request = m_writeRequestTable.lookup(address);
264
265  removeRequest(request);
266
267  assert((request->ruby_request.type == RubyRequestType_ST) ||
268         (request->ruby_request.type == RubyRequestType_RMW_Read) ||
269         (request->ruby_request.type == RubyRequestType_RMW_Write) ||
270         (request->ruby_request.type == RubyRequestType_Locked_Read) ||
271         (request->ruby_request.type == RubyRequestType_Locked_Write));
272
273  if (request->ruby_request.type == RubyRequestType_Locked_Read) {
274    m_dataCache_ptr->setLocked(address, m_version);
275  }
276  else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
277    m_controller->blockOnQueue(address, m_mandatory_q_ptr);
278  }
279  else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
280    m_controller->unblock(address);
281  }
282
283  hitCallback(request, data);
284}
285
286void Sequencer::readCallback(const Address& address, DataBlock& data) {
287
288  assert(address == line_address(address));
289  assert(m_readRequestTable.exist(line_address(address)));
290
291  SequencerRequest* request = m_readRequestTable.lookup(address);
292  removeRequest(request);
293
294  assert((request->ruby_request.type == RubyRequestType_LD) ||
295	 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
296         (request->ruby_request.type == RubyRequestType_IFETCH));
297
298  hitCallback(request, data);
299}
300
301void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
302  const RubyRequest & ruby_request = srequest->ruby_request;
303  Address request_address(ruby_request.paddr);
304  Address request_line_address(ruby_request.paddr);
305  request_line_address.makeLineAddress();
306  RubyRequestType type = ruby_request.type;
307  Time issued_time = srequest->issue_time;
308
309  // Set this cache entry to the most recently used
310  if (type == RubyRequestType_IFETCH) {
311    if (m_instCache_ptr->isTagPresent(request_line_address) )
312      m_instCache_ptr->setMRU(request_line_address);
313  } else {
314    if (m_dataCache_ptr->isTagPresent(request_line_address) )
315      m_dataCache_ptr->setMRU(request_line_address);
316  }
317
318  assert(g_eventQueue_ptr->getTime() >= issued_time);
319  Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
320
321  // Profile the miss latency for all non-zero demand misses
322  if (miss_latency != 0) {
323    g_system_ptr->getProfiler()->missLatency(miss_latency, type);
324
325    if (Debug::getProtocolTrace()) {
326      g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
327                                                     "", "Done", "", int_to_string(miss_latency)+" cycles");
328    }
329  }
330  /*
331  if (request.getPrefetch() == PrefetchBit_Yes) {
332    return; // Ignore the prefetch
333  }
334  */
335
336  // update the data
337  if (ruby_request.data != NULL) {
338    if ((type == RubyRequestType_LD) ||
339        (type == RubyRequestType_IFETCH) ||
340        (type == RubyRequestType_RMW_Read)) {
341      memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
342    } else {
343      data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
344    }
345  }
346
347  m_hit_callback(srequest->id);
348  delete srequest;
349}
350
351// Returns true if the sequencer already has a load or store outstanding
352int Sequencer::isReady(const RubyRequest& request) {
353  bool is_outstanding_store = m_writeRequestTable.exist(line_address(Address(request.paddr)));
354  bool is_outstanding_load = m_readRequestTable.exist(line_address(Address(request.paddr)));
355  if ( is_outstanding_store ) {
356    if ((request.type == RubyRequestType_LD) ||
357        (request.type == RubyRequestType_IFETCH) ||
358        (request.type == RubyRequestType_RMW_Read)) {
359      m_store_waiting_on_load_cycles++;
360    } else {
361      m_store_waiting_on_store_cycles++;
362    }
363    return LIBRUBY_ALIASED_REQUEST;
364  } else if ( is_outstanding_load ) {
365    if ((request.type == RubyRequestType_ST) ||
366        (request.type == RubyRequestType_RMW_Write) ) {
367      m_load_waiting_on_store_cycles++;
368    } else {
369      m_load_waiting_on_load_cycles++;
370    }
371    return LIBRUBY_ALIASED_REQUEST;
372  }
373
374  if (m_outstanding_count >= m_max_outstanding_requests) {
375    return LIBRUBY_BUFFER_FULL;
376  }
377
378  return 1;
379}
380
381bool Sequencer::empty() const {
382  return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
383}
384
385
386int64_t Sequencer::makeRequest(const RubyRequest & request)
387{
388  assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
389  int ready = isReady(request);
390  if (ready > 0) {
391    int64_t id = makeUniqueRequestID();
392    SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
393    bool found = insertRequest(srequest);
394    if (!found) {
395      if (request.type == RubyRequestType_Locked_Write) {
396        // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
397        // ensuring that nothing comes between checking the flag and servicing the store
398        if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
399          return LLSC_FAIL;
400        }
401        else {
402          m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
403        }
404      }
405      issueRequest(request);
406
407      // TODO: issue hardware prefetches here
408      return id;
409    }
410    else {
411      assert(0);
412      return 0;
413    }
414  } else {
415    return ready;
416  }
417}
418
419void Sequencer::issueRequest(const RubyRequest& request) {
420
421  // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
422  CacheRequestType ctype;
423  switch(request.type) {
424  case RubyRequestType_IFETCH:
425    ctype = CacheRequestType_IFETCH;
426    break;
427  case RubyRequestType_LD:
428    ctype = CacheRequestType_LD;
429    break;
430  case RubyRequestType_ST:
431    ctype = CacheRequestType_ST;
432    break;
433  case RubyRequestType_Locked_Read:
434  case RubyRequestType_Locked_Write:
435    ctype = CacheRequestType_ATOMIC;
436    break;
437  case RubyRequestType_RMW_Read:
438    ctype = CacheRequestType_ATOMIC;
439    break;
440  case RubyRequestType_RMW_Write:
441    ctype = CacheRequestType_ATOMIC;
442    break;
443  default:
444    assert(0);
445  }
446  AccessModeType amtype;
447  switch(request.access_mode){
448  case RubyAccessMode_User:
449    amtype = AccessModeType_UserMode;
450    break;
451  case RubyAccessMode_Supervisor:
452    amtype = AccessModeType_SupervisorMode;
453    break;
454  case RubyAccessMode_Device:
455    amtype = AccessModeType_UserMode;
456    break;
457  default:
458    assert(0);
459  }
460  Address line_addr(request.paddr);
461  line_addr.makeLineAddress();
462  CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
463
464  if (Debug::getProtocolTrace()) {
465    g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
466                                                   "", "Begin", "", RubyRequestType_to_string(request.type));
467  }
468
469  if (g_system_ptr->getTracer()->traceEnabled()) {
470    g_system_ptr->getTracer()->traceRequest(this, line_addr, Address(request.pc),
471                                            request.type, g_eventQueue_ptr->getTime());
472  }
473
474  Time latency = 0;  // initialzed to an null value
475
476  if (request.type == RubyRequestType_IFETCH)
477    latency = m_instCache_ptr->getLatency();
478  else
479    latency = m_dataCache_ptr->getLatency();
480
481  // Send the message to the cache controller
482  assert(latency > 0);
483
484  assert(m_mandatory_q_ptr != NULL);
485  m_mandatory_q_ptr->enqueue(msg, latency);
486}
487/*
488bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
489                               AccessModeType access_mode,
490                               int size, DataBlock*& data_ptr) {
491  if (type == CacheRequestType_IFETCH) {
492    return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
493  } else {
494    return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
495  }
496}
497*/
498
499void Sequencer::print(ostream& out) const {
500  out << "[Sequencer: " << m_version
501      << ", outstanding requests: " << m_outstanding_count;
502
503  out << ", read request table: " << m_readRequestTable
504      << ", write request table: " << m_writeRequestTable;
505  out << "]";
506}
507
508// this can be called from setState whenever coherence permissions are upgraded
509// when invoked, coherence violations will be checked for the given block
510void Sequencer::checkCoherence(const Address& addr) {
511#ifdef CHECK_COHERENCE
512  g_system_ptr->checkGlobalCoherenceInvariant(addr);
513#endif
514}
515
516