Sequencer.cc revision 6347:a532849ca78f
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/common/Global.hh"
31#include "mem/ruby/system/Sequencer.hh"
32#include "mem/ruby/system/System.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/ruby/profiler/Profiler.hh"
35#include "mem/ruby/system/CacheMemory.hh"
36#include "mem/protocol/CacheMsg.hh"
37#include "mem/ruby/recorder/Tracer.hh"
38#include "mem/ruby/common/SubBlock.hh"
39#include "mem/protocol/Protocol.hh"
40#include "mem/gems_common/Map.hh"
41#include "mem/ruby/buffers/MessageBuffer.hh"
42#include "mem/ruby/slicc_interface/AbstractController.hh"
43
44//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
45
46Sequencer::Sequencer(const string & name)
47  :RubyPort(name)
48{
49}
50
51void Sequencer::init(const vector<string> & argv)
52{
53  m_deadlock_check_scheduled = false;
54  m_outstanding_count = 0;
55
56  m_max_outstanding_requests = 0;
57  m_deadlock_threshold = 0;
58  m_version = -1;
59  m_instCache_ptr = NULL;
60  m_dataCache_ptr = NULL;
61  m_controller = NULL;
62  for (size_t i=0; i<argv.size(); i+=2) {
63    if ( argv[i] == "controller") {
64      m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
65      m_mandatory_q_ptr = m_controller->getMandatoryQueue();
66    } else if ( argv[i] == "icache")
67      m_instCache_ptr = RubySystem::getCache(argv[i+1]);
68    else if ( argv[i] == "dcache")
69      m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
70    else if ( argv[i] == "version")
71      m_version = atoi(argv[i+1].c_str());
72    else if ( argv[i] == "max_outstanding_requests")
73      m_max_outstanding_requests = atoi(argv[i+1].c_str());
74    else if ( argv[i] == "deadlock_threshold")
75      m_deadlock_threshold = atoi(argv[i+1].c_str());
76    else {
77      cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
78      assert(false);
79    }
80  }
81  assert(m_max_outstanding_requests > 0);
82  assert(m_deadlock_threshold > 0);
83  assert(m_version > -1);
84  assert(m_instCache_ptr != NULL);
85  assert(m_dataCache_ptr != NULL);
86  assert(m_controller != NULL);
87}
88
89Sequencer::~Sequencer() {
90
91}
92
93void Sequencer::wakeup() {
94  // Check for deadlock of any of the requests
95  Time current_time = g_eventQueue_ptr->getTime();
96
97  // Check across all outstanding requests
98  int total_outstanding = 0;
99
100  Vector<Address> keys = m_readRequestTable.keys();
101  for (int i=0; i<keys.size(); i++) {
102    SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
103    if (current_time - request->issue_time >= m_deadlock_threshold) {
104      WARN_MSG("Possible Deadlock detected");
105      WARN_EXPR(request);
106      WARN_EXPR(m_version);
107      WARN_EXPR(keys.size());
108      WARN_EXPR(current_time);
109      WARN_EXPR(request->issue_time);
110      WARN_EXPR(current_time - request->issue_time);
111      ERROR_MSG("Aborting");
112    }
113  }
114
115  keys = m_writeRequestTable.keys();
116  for (int i=0; i<keys.size(); i++) {
117    SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
118    if (current_time - request->issue_time >= m_deadlock_threshold) {
119      WARN_MSG("Possible Deadlock detected");
120      WARN_EXPR(request);
121      WARN_EXPR(m_version);
122      WARN_EXPR(current_time);
123      WARN_EXPR(request->issue_time);
124      WARN_EXPR(current_time - request->issue_time);
125      WARN_EXPR(keys.size());
126      ERROR_MSG("Aborting");
127    }
128  }
129  total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
130
131  assert(m_outstanding_count == total_outstanding);
132
133  if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
134    g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
135  } else {
136    m_deadlock_check_scheduled = false;
137  }
138}
139
140void Sequencer::printProgress(ostream& out) const{
141  /*
142  int total_demand = 0;
143  out << "Sequencer Stats Version " << m_version << endl;
144  out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
145  out << "---------------" << endl;
146  out << "outstanding requests" << endl;
147
148  Vector<Address> rkeys = m_readRequestTable.keys();
149  int read_size = rkeys.size();
150  out << "proc " << m_version << " Read Requests = " << read_size << endl;
151  // print the request table
152  for(int i=0; i < read_size; ++i){
153    SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
154    out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i]  << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
155    total_demand++;
156  }
157
158  Vector<Address> wkeys = m_writeRequestTable.keys();
159  int write_size = wkeys.size();
160  out << "proc " << m_version << " Write Requests = " << write_size << endl;
161  // print the request table
162  for(int i=0; i < write_size; ++i){
163      CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
164      out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i]  << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
165      if( request.getPrefetch() == PrefetchBit_No ){
166        total_demand++;
167      }
168  }
169
170  out << endl;
171
172  out << "Total Number Outstanding: " << m_outstanding_count << endl;
173  out << "Total Number Demand     : " << total_demand << endl;
174  out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
175  out << endl;
176  out << endl;
177  */
178}
179
180void Sequencer::printConfig(ostream& out) const {
181  out << "Seqeuncer config: " << m_name << endl;
182  out << "  controller: " << m_controller->getName() << endl;
183  out << "  version: " << m_version << endl;
184  out << "  max_outstanding_requests: " << m_max_outstanding_requests << endl;
185  out << "  deadlock_threshold: " << m_deadlock_threshold << endl;
186}
187
188// Insert the request on the correct request table.  Return true if
189// the entry was already present.
190bool Sequencer::insertRequest(SequencerRequest* request) {
191  int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
192
193  assert(m_outstanding_count == total_outstanding);
194
195  // See if we should schedule a deadlock check
196  if (m_deadlock_check_scheduled == false) {
197    g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
198    m_deadlock_check_scheduled = true;
199  }
200
201  Address line_addr(request->ruby_request.paddr);
202  line_addr.makeLineAddress();
203  if ((request->ruby_request.type == RubyRequestType_ST) ||
204      (request->ruby_request.type == RubyRequestType_RMW)) {
205    if (m_writeRequestTable.exist(line_addr)) {
206      m_writeRequestTable.lookup(line_addr) = request;
207      //      return true;
208      assert(0); // drh5: isn't this an error?  do you lose the initial request?
209    }
210    m_writeRequestTable.allocate(line_addr);
211    m_writeRequestTable.lookup(line_addr) = request;
212    m_outstanding_count++;
213  } else {
214    if (m_readRequestTable.exist(line_addr)) {
215      m_readRequestTable.lookup(line_addr) = request;
216      //      return true;
217      assert(0); // drh5: isn't this an error?  do you lose the initial request?
218    }
219    m_readRequestTable.allocate(line_addr);
220    m_readRequestTable.lookup(line_addr) = request;
221    m_outstanding_count++;
222  }
223
224  g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
225
226  total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
227  assert(m_outstanding_count == total_outstanding);
228
229  return false;
230}
231
232void Sequencer::removeRequest(SequencerRequest* srequest) {
233
234  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
235
236  const RubyRequest & ruby_request = srequest->ruby_request;
237  Address line_addr(ruby_request.paddr);
238  line_addr.makeLineAddress();
239  if ((ruby_request.type == RubyRequestType_ST) ||
240      (ruby_request.type == RubyRequestType_RMW_Read) ||
241      (ruby_request.type == RubyRequestType_RMW_Write)) {
242    m_writeRequestTable.deallocate(line_addr);
243  } else {
244    m_readRequestTable.deallocate(line_addr);
245  }
246  m_outstanding_count--;
247
248  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
249}
250
251void Sequencer::writeCallback(const Address& address, DataBlock& data) {
252
253  assert(address == line_address(address));
254  assert(m_writeRequestTable.exist(line_address(address)));
255
256  SequencerRequest* request = m_writeRequestTable.lookup(address);
257  removeRequest(request);
258
259  assert((request->ruby_request.type == RubyRequestType_ST) ||
260         (request->ruby_request.type == RubyRequestType_RMW_Read) ||
261         (request->ruby_request.type == RubyRequestType_RMW_Write));
262  // POLINA: the assumption is that atomics are only on data cache and not instruction cache
263  if (request->ruby_request.type == RubyRequestType_RMW_Read) {
264    m_dataCache_ptr->setLocked(address, m_version);
265  }
266  else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
267    if (m_dataCache_ptr->isLocked(address, m_version)) {
268      // if we are holding the lock for this
269      request->ruby_request.atomic_success = true;
270      m_dataCache_ptr->clearLocked(address);
271    }
272    else {
273      // if we are not holding the lock for this
274      request->ruby_request.atomic_success = false;
275    }
276
277    // can have livelock
278  }
279
280  hitCallback(request, data);
281}
282
283void Sequencer::readCallback(const Address& address, DataBlock& data) {
284
285  assert(address == line_address(address));
286  assert(m_readRequestTable.exist(line_address(address)));
287
288  SequencerRequest* request = m_readRequestTable.lookup(address);
289  removeRequest(request);
290
291  assert((request->ruby_request.type == RubyRequestType_LD) ||
292         (request->ruby_request.type == RubyRequestType_IFETCH));
293
294  hitCallback(request, data);
295}
296
297void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
298  const RubyRequest & ruby_request = srequest->ruby_request;
299  Address request_address(ruby_request.paddr);
300  Address request_line_address(ruby_request.paddr);
301  request_line_address.makeLineAddress();
302  RubyRequestType type = ruby_request.type;
303  Time issued_time = srequest->issue_time;
304
305  // Set this cache entry to the most recently used
306  if (type == RubyRequestType_IFETCH) {
307    if (m_instCache_ptr->isTagPresent(request_line_address) )
308      m_instCache_ptr->setMRU(request_line_address);
309  } else {
310    if (m_dataCache_ptr->isTagPresent(request_line_address) )
311      m_dataCache_ptr->setMRU(request_line_address);
312  }
313
314  assert(g_eventQueue_ptr->getTime() >= issued_time);
315  Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
316
317  // Profile the miss latency for all non-zero demand misses
318  if (miss_latency != 0) {
319    g_system_ptr->getProfiler()->missLatency(miss_latency, type);
320
321    if (Debug::getProtocolTrace()) {
322      g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
323                                                     "", "Done", "", int_to_string(miss_latency)+" cycles");
324    }
325  }
326  /*
327  if (request.getPrefetch() == PrefetchBit_Yes) {
328    return; // Ignore the prefetch
329  }
330  */
331
332  // update the data
333  if (ruby_request.data != NULL) {
334    if ((type == RubyRequestType_LD) ||
335        (type == RubyRequestType_IFETCH)) {
336      memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
337    } else {
338      data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
339    }
340  }
341
342  m_hit_callback(srequest->id);
343  delete srequest;
344}
345
346// Returns true if the sequencer already has a load or store outstanding
347bool Sequencer::isReady(const RubyRequest& request) const {
348  // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
349  // to simulate stalling of the front-end
350  // Do we stall all the sequencers? If it is atomic instruction - yes!
351  if (m_outstanding_count >= m_max_outstanding_requests) {
352    return false;
353  }
354
355  if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
356      m_readRequestTable.exist(line_address(Address(request.paddr))) ){
357    //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
358    //printProgress(cout);
359    return false;
360  }
361
362  return true;
363}
364
365bool Sequencer::empty() const {
366  return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
367}
368
369int64_t Sequencer::makeRequest(const RubyRequest & request)
370{
371  assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
372  if (isReady(request)) {
373    int64_t id = makeUniqueRequestID();
374    SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
375    bool found = insertRequest(srequest);
376    if (!found)
377      issueRequest(request);
378
379    // TODO: issue hardware prefetches here
380    return id;
381  }
382  else {
383    return -1;
384  }
385}
386
387void Sequencer::issueRequest(const RubyRequest& request) {
388
389  // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
390  CacheRequestType ctype;
391  switch(request.type) {
392  case RubyRequestType_IFETCH:
393    ctype = CacheRequestType_IFETCH;
394    break;
395  case RubyRequestType_LD:
396    ctype = CacheRequestType_LD;
397    break;
398  case RubyRequestType_ST:
399    ctype = CacheRequestType_ST;
400    break;
401  case RubyRequestType_RMW_Read:
402    ctype = CacheRequestType_ATOMIC;
403    break;
404  case RubyRequestType_RMW_Write:
405    ctype = CacheRequestType_ATOMIC;
406    break;
407  default:
408    assert(0);
409  }
410  AccessModeType amtype;
411  switch(request.access_mode){
412  case RubyAccessMode_User:
413    amtype = AccessModeType_UserMode;
414    break;
415  case RubyAccessMode_Supervisor:
416    amtype = AccessModeType_SupervisorMode;
417    break;
418  case RubyAccessMode_Device:
419    amtype = AccessModeType_UserMode;
420    break;
421  default:
422    assert(0);
423  }
424  Address line_addr(request.paddr);
425  line_addr.makeLineAddress();
426  CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No);
427
428  if (Debug::getProtocolTrace()) {
429    g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
430                                                   "", "Begin", "", RubyRequestType_to_string(request.type));
431  }
432
433  if (g_system_ptr->getTracer()->traceEnabled()) {
434    g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
435                                            request.type, g_eventQueue_ptr->getTime());
436  }
437
438  Time latency = 0;  // initialzed to an null value
439
440  if (request.type == RubyRequestType_IFETCH)
441    latency = m_instCache_ptr->getLatency();
442  else
443    latency = m_dataCache_ptr->getLatency();
444
445  // Send the message to the cache controller
446  assert(latency > 0);
447
448
449  m_mandatory_q_ptr->enqueue(msg, latency);
450}
451/*
452bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
453                               AccessModeType access_mode,
454                               int size, DataBlock*& data_ptr) {
455  if (type == CacheRequestType_IFETCH) {
456    return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
457  } else {
458    return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
459  }
460}
461*/
462
463void Sequencer::print(ostream& out) const {
464  out << "[Sequencer: " << m_version
465      << ", outstanding requests: " << m_outstanding_count;
466
467  out << ", read request table: " << m_readRequestTable
468      << ", write request table: " << m_writeRequestTable;
469  out << "]";
470}
471
472// this can be called from setState whenever coherence permissions are upgraded
473// when invoked, coherence violations will be checked for the given block
474void Sequencer::checkCoherence(const Address& addr) {
475#ifdef CHECK_COHERENCE
476  g_system_ptr->checkGlobalCoherenceInvariant(addr);
477#endif
478}
479
480/*
481bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
482                                   unsigned int size_in_bytes )
483{
484    bool found = false;
485    const Address lineAddr = line_address(addr);
486    DataBlock data;
487    PhysAddress paddr(addr);
488    DataBlock* dataPtr = &data;
489
490    MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
491    int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
492
493    if (Protocol::m_TwoLevelCache) {
494      if(Protocol::m_CMP){
495        assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
496      }
497      else{
498        assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
499      }
500    }
501
502    if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
503      n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
504      found = true;
505    } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
506      n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
507      found = true;
508    } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
509      n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
510      found = true;
511    // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
512//       ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
513//       L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
514
515//       int offset = addr.getOffset();
516//       for(int i=0; i<size_in_bytes; ++i){
517//         value[i] = tbeEntry.getDataBlk().getByte(offset + i);
518//       }
519
520//       found = true;
521    } else {
522      // Address not found
523      //cout << "  " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
524      n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
525      int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
526      for(unsigned int i=0; i<size_in_bytes; ++i){
527        int offset = addr.getOffset();
528        value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
529      }
530      // Address not found
531      //WARN_MSG("Couldn't find address");
532      //WARN_EXPR(addr);
533      found = false;
534    }
535    return true;
536}
537
538bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
539                                   unsigned int size_in_bytes) {
540  char test_buffer[64];
541
542  // idea here is that coherent cache should find the
543  // latest data, the update it
544  bool found = false;
545  const Address lineAddr = line_address(addr);
546  PhysAddress paddr(addr);
547  DataBlock data;
548  DataBlock* dataPtr = &data;
549  Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
550
551  MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
552  int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
553
554  assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
555  assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
556  if (Protocol::m_TwoLevelCache) {
557    if(Protocol::m_CMP){
558      assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
559    }
560    else{
561      assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
562    }
563  }
564
565  if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
566    n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
567    found = true;
568  } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
569    n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
570    found = true;
571  } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
572    n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
573    found = true;
574  } else {
575    // Address not found
576    n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
577    int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
578    for(unsigned int i=0; i<size_in_bytes; ++i){
579      int offset = addr.getOffset();
580      n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
581    }
582    found = false;
583  }
584
585  if (found){
586    found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
587    assert(found);
588    if(value[0] != test_buffer[0]){
589      WARN_EXPR((int) value[0]);
590      WARN_EXPR((int) test_buffer[0]);
591      ERROR_MSG("setRubyMemoryValue failed to set value.");
592    }
593  }
594
595  return true;
596}
597*/
598/*
599
600void
601Sequencer::rubyMemAccess(const uint64 paddr, char* data, const int len, const AccessType type)
602{
603  if ( type == AccessType_Read || type == AccessType_Write ) {
604    // need to break up the packet data
605    uint64 guest_ptr = paddr;
606    Vector<DataBlock*> datablocks;
607    while (paddr + len != guest_ptr) {
608      Address addr(guest_ptr);
609      Address line_addr = line_address(addr);
610
611      int bytes_copied;
612      if (addr.getOffset() == 0) {
613        bytes_copied = (guest_ptr + RubyConfig::dataBlockBytes() > paddr + len)?
614          (paddr + len - guest_ptr):
615          RubyConfig::dataBlockBytes();
616      } else {
617        bytes_copied = RubyConfig::dataBlockBytes() - addr.getOffset();
618        if (guest_ptr + bytes_copied > paddr + len)
619          bytes_copied = paddr + len - guest_ptr;
620      }
621
622      // first we need to find all data blocks that have to be updated for a write
623      // and the highest block for a read
624     for(int i=0;i<RubyConfig::numberOfProcessors();i++) {
625        if (Protocol::m_TwoLevelCache){
626          if(m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->isTagPresent(line_address(addr)))
627            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
628          if(m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->isTagPresent(line_address(addr)))
629            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
630        } else {
631          if(m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->isTagPresent(line_address(addr)))
632            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->lookup(line_addr).getDataBlk());
633        }
634      }
635      if (Protocol::m_TwoLevelCache){
636        int l2_bank = map_L2ChipId_to_L2Cache(addr, 0).num; // TODO: ONLY WORKS WITH CMP!!!
637        if (m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->isTagPresent(line_address(Address(paddr)))) {
638          datablocks.insertAtBottom(&m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->lookup(addr).getDataBlk());
639        }
640      }
641      assert(dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec.size() > map_Address_to_DirectoryNode(addr));
642      DirectoryMemory* dir = dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec[map_Address_to_DirectoryNode(addr)];
643      Directory_Entry& entry = dir->lookup(line_addr);
644      datablocks.insertAtBottom(&entry.getDataBlk());
645
646      if (pkt->isRead()){
647        datablocks[0]->copyData(pkt_data, addr.getOffset(), bytes_copied);
648      } else {// pkt->isWrite() {
649        for (int i=0;i<datablocks.size();i++)
650          datablocks[i]->setData(pkt_data, addr.getOffset(), bytes_copied);
651      }
652
653      guest_ptr += bytes_copied;
654      pkt_data += bytes_copied;
655      datablocks.clear();
656    }
657}
658
659*/
660