Sequencer.cc revision 6348:374e1d9b0660
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/common/Global.hh"
31#include "mem/ruby/system/Sequencer.hh"
32#include "mem/ruby/system/System.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/ruby/profiler/Profiler.hh"
35#include "mem/ruby/system/CacheMemory.hh"
36#include "mem/protocol/CacheMsg.hh"
37#include "mem/ruby/recorder/Tracer.hh"
38#include "mem/ruby/common/SubBlock.hh"
39#include "mem/protocol/Protocol.hh"
40#include "mem/gems_common/Map.hh"
41#include "mem/ruby/buffers/MessageBuffer.hh"
42#include "mem/ruby/slicc_interface/AbstractController.hh"
43
44//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
45
46Sequencer::Sequencer(const string & name)
47  :RubyPort(name)
48{
49}
50
51void Sequencer::init(const vector<string> & argv)
52{
53  m_deadlock_check_scheduled = false;
54  m_outstanding_count = 0;
55
56  m_max_outstanding_requests = 0;
57  m_deadlock_threshold = 0;
58  m_version = -1;
59  m_instCache_ptr = NULL;
60  m_dataCache_ptr = NULL;
61  m_controller = NULL;
62  for (size_t i=0; i<argv.size(); i+=2) {
63    if ( argv[i] == "controller") {
64      m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
65      m_mandatory_q_ptr = m_controller->getMandatoryQueue();
66    } else if ( argv[i] == "icache")
67      m_instCache_ptr = RubySystem::getCache(argv[i+1]);
68    else if ( argv[i] == "dcache")
69      m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
70    else if ( argv[i] == "version")
71      m_version = atoi(argv[i+1].c_str());
72    else if ( argv[i] == "max_outstanding_requests")
73      m_max_outstanding_requests = atoi(argv[i+1].c_str());
74    else if ( argv[i] == "deadlock_threshold")
75      m_deadlock_threshold = atoi(argv[i+1].c_str());
76    else {
77      cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
78      assert(false);
79    }
80  }
81  assert(m_max_outstanding_requests > 0);
82  assert(m_deadlock_threshold > 0);
83  assert(m_version > -1);
84  assert(m_instCache_ptr != NULL);
85  assert(m_dataCache_ptr != NULL);
86  assert(m_controller != NULL);
87}
88
89Sequencer::~Sequencer() {
90
91}
92
93void Sequencer::wakeup() {
94  // Check for deadlock of any of the requests
95  Time current_time = g_eventQueue_ptr->getTime();
96
97  // Check across all outstanding requests
98  int total_outstanding = 0;
99
100  Vector<Address> keys = m_readRequestTable.keys();
101  for (int i=0; i<keys.size(); i++) {
102    SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
103    if (current_time - request->issue_time >= m_deadlock_threshold) {
104      WARN_MSG("Possible Deadlock detected");
105      WARN_EXPR(request);
106      WARN_EXPR(m_version);
107      WARN_EXPR(keys.size());
108      WARN_EXPR(current_time);
109      WARN_EXPR(request->issue_time);
110      WARN_EXPR(current_time - request->issue_time);
111      ERROR_MSG("Aborting");
112    }
113  }
114
115  keys = m_writeRequestTable.keys();
116  for (int i=0; i<keys.size(); i++) {
117    SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
118    if (current_time - request->issue_time >= m_deadlock_threshold) {
119      WARN_MSG("Possible Deadlock detected");
120      WARN_EXPR(request);
121      WARN_EXPR(m_version);
122      WARN_EXPR(current_time);
123      WARN_EXPR(request->issue_time);
124      WARN_EXPR(current_time - request->issue_time);
125      WARN_EXPR(keys.size());
126      ERROR_MSG("Aborting");
127    }
128  }
129  total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
130
131  assert(m_outstanding_count == total_outstanding);
132
133  if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
134    g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
135  } else {
136    m_deadlock_check_scheduled = false;
137  }
138}
139
140void Sequencer::printProgress(ostream& out) const{
141  /*
142  int total_demand = 0;
143  out << "Sequencer Stats Version " << m_version << endl;
144  out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
145  out << "---------------" << endl;
146  out << "outstanding requests" << endl;
147
148  Vector<Address> rkeys = m_readRequestTable.keys();
149  int read_size = rkeys.size();
150  out << "proc " << m_version << " Read Requests = " << read_size << endl;
151  // print the request table
152  for(int i=0; i < read_size; ++i){
153    SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
154    out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i]  << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
155    total_demand++;
156  }
157
158  Vector<Address> wkeys = m_writeRequestTable.keys();
159  int write_size = wkeys.size();
160  out << "proc " << m_version << " Write Requests = " << write_size << endl;
161  // print the request table
162  for(int i=0; i < write_size; ++i){
163      CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
164      out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i]  << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
165      if( request.getPrefetch() == PrefetchBit_No ){
166        total_demand++;
167      }
168  }
169
170  out << endl;
171
172  out << "Total Number Outstanding: " << m_outstanding_count << endl;
173  out << "Total Number Demand     : " << total_demand << endl;
174  out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
175  out << endl;
176  out << endl;
177  */
178}
179
180void Sequencer::printConfig(ostream& out) const {
181  out << "Seqeuncer config: " << m_name << endl;
182  out << "  controller: " << m_controller->getName() << endl;
183  out << "  version: " << m_version << endl;
184  out << "  max_outstanding_requests: " << m_max_outstanding_requests << endl;
185  out << "  deadlock_threshold: " << m_deadlock_threshold << endl;
186}
187
188// Insert the request on the correct request table.  Return true if
189// the entry was already present.
190bool Sequencer::insertRequest(SequencerRequest* request) {
191  int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
192
193  assert(m_outstanding_count == total_outstanding);
194
195  // See if we should schedule a deadlock check
196  if (m_deadlock_check_scheduled == false) {
197    g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
198    m_deadlock_check_scheduled = true;
199  }
200
201  Address line_addr(request->ruby_request.paddr);
202  line_addr.makeLineAddress();
203  if ((request->ruby_request.type == RubyRequestType_ST) ||
204      (request->ruby_request.type == RubyRequestType_RMW_Read) ||
205      (request->ruby_request.type == RubyRequestType_RMW_Write)) {
206    if (m_writeRequestTable.exist(line_addr)) {
207      m_writeRequestTable.lookup(line_addr) = request;
208      //      return true;
209      assert(0); // drh5: isn't this an error?  do you lose the initial request?
210    }
211    m_writeRequestTable.allocate(line_addr);
212    m_writeRequestTable.lookup(line_addr) = request;
213    m_outstanding_count++;
214  } else {
215    if (m_readRequestTable.exist(line_addr)) {
216      m_readRequestTable.lookup(line_addr) = request;
217      //      return true;
218      assert(0); // drh5: isn't this an error?  do you lose the initial request?
219    }
220    m_readRequestTable.allocate(line_addr);
221    m_readRequestTable.lookup(line_addr) = request;
222    m_outstanding_count++;
223  }
224
225  g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
226
227  total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
228  assert(m_outstanding_count == total_outstanding);
229
230  return false;
231}
232
233void Sequencer::removeRequest(SequencerRequest* srequest) {
234
235  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
236
237  const RubyRequest & ruby_request = srequest->ruby_request;
238  Address line_addr(ruby_request.paddr);
239  line_addr.makeLineAddress();
240  if ((ruby_request.type == RubyRequestType_ST) ||
241      (ruby_request.type == RubyRequestType_RMW_Read) ||
242      (ruby_request.type == RubyRequestType_RMW_Write)) {
243    m_writeRequestTable.deallocate(line_addr);
244  } else {
245    m_readRequestTable.deallocate(line_addr);
246  }
247  m_outstanding_count--;
248
249  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
250}
251
252void Sequencer::writeCallback(const Address& address, DataBlock& data) {
253
254  assert(address == line_address(address));
255  assert(m_writeRequestTable.exist(line_address(address)));
256
257  SequencerRequest* request = m_writeRequestTable.lookup(address);
258  removeRequest(request);
259
260  assert((request->ruby_request.type == RubyRequestType_ST) ||
261         (request->ruby_request.type == RubyRequestType_RMW_Read) ||
262         (request->ruby_request.type == RubyRequestType_RMW_Write));
263  // POLINA: the assumption is that atomics are only on data cache and not instruction cache
264  if (request->ruby_request.type == RubyRequestType_RMW_Read) {
265    m_dataCache_ptr->setLocked(address, m_version);
266  }
267  else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
268    if (m_dataCache_ptr->isLocked(address, m_version)) {
269      // if we are holding the lock for this
270      request->ruby_request.atomic_success = true;
271      m_dataCache_ptr->clearLocked(address);
272    }
273    else {
274      // if we are not holding the lock for this
275      request->ruby_request.atomic_success = false;
276    }
277
278    // can have livelock
279  }
280
281  hitCallback(request, data);
282}
283
284void Sequencer::readCallback(const Address& address, DataBlock& data) {
285
286  assert(address == line_address(address));
287  assert(m_readRequestTable.exist(line_address(address)));
288
289  SequencerRequest* request = m_readRequestTable.lookup(address);
290  removeRequest(request);
291
292  assert((request->ruby_request.type == RubyRequestType_LD) ||
293         (request->ruby_request.type == RubyRequestType_IFETCH));
294
295  hitCallback(request, data);
296}
297
298void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
299  const RubyRequest & ruby_request = srequest->ruby_request;
300  Address request_address(ruby_request.paddr);
301  Address request_line_address(ruby_request.paddr);
302  request_line_address.makeLineAddress();
303  RubyRequestType type = ruby_request.type;
304  Time issued_time = srequest->issue_time;
305
306  // Set this cache entry to the most recently used
307  if (type == RubyRequestType_IFETCH) {
308    if (m_instCache_ptr->isTagPresent(request_line_address) )
309      m_instCache_ptr->setMRU(request_line_address);
310  } else {
311    if (m_dataCache_ptr->isTagPresent(request_line_address) )
312      m_dataCache_ptr->setMRU(request_line_address);
313  }
314
315  assert(g_eventQueue_ptr->getTime() >= issued_time);
316  Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
317
318  // Profile the miss latency for all non-zero demand misses
319  if (miss_latency != 0) {
320    g_system_ptr->getProfiler()->missLatency(miss_latency, type);
321
322    if (Debug::getProtocolTrace()) {
323      g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
324                                                     "", "Done", "", int_to_string(miss_latency)+" cycles");
325    }
326  }
327  /*
328  if (request.getPrefetch() == PrefetchBit_Yes) {
329    return; // Ignore the prefetch
330  }
331  */
332
333  // update the data
334  if (ruby_request.data != NULL) {
335    if ((type == RubyRequestType_LD) ||
336        (type == RubyRequestType_IFETCH)) {
337      memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
338    } else {
339      data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
340    }
341  }
342
343  m_hit_callback(srequest->id);
344  delete srequest;
345}
346
347// Returns true if the sequencer already has a load or store outstanding
348bool Sequencer::isReady(const RubyRequest& request) const {
349  // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
350  // to simulate stalling of the front-end
351  // Do we stall all the sequencers? If it is atomic instruction - yes!
352  if (m_outstanding_count >= m_max_outstanding_requests) {
353    return false;
354  }
355
356  if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
357      m_readRequestTable.exist(line_address(Address(request.paddr))) ){
358    //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
359    //printProgress(cout);
360    return false;
361  }
362
363  return true;
364}
365
366bool Sequencer::empty() const {
367  return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
368}
369
370int64_t Sequencer::makeRequest(const RubyRequest & request)
371{
372  assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
373  if (isReady(request)) {
374    int64_t id = makeUniqueRequestID();
375    SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
376    bool found = insertRequest(srequest);
377    if (!found)
378      issueRequest(request);
379
380    // TODO: issue hardware prefetches here
381    return id;
382  }
383  else {
384    return -1;
385  }
386}
387
388void Sequencer::issueRequest(const RubyRequest& request) {
389
390  // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
391  CacheRequestType ctype;
392  switch(request.type) {
393  case RubyRequestType_IFETCH:
394    ctype = CacheRequestType_IFETCH;
395    break;
396  case RubyRequestType_LD:
397    ctype = CacheRequestType_LD;
398    break;
399  case RubyRequestType_ST:
400    ctype = CacheRequestType_ST;
401    break;
402  case RubyRequestType_RMW_Read:
403    ctype = CacheRequestType_ATOMIC;
404    break;
405  case RubyRequestType_RMW_Write:
406    ctype = CacheRequestType_ATOMIC;
407    break;
408  default:
409    assert(0);
410  }
411  AccessModeType amtype;
412  switch(request.access_mode){
413  case RubyAccessMode_User:
414    amtype = AccessModeType_UserMode;
415    break;
416  case RubyAccessMode_Supervisor:
417    amtype = AccessModeType_SupervisorMode;
418    break;
419  case RubyAccessMode_Device:
420    amtype = AccessModeType_UserMode;
421    break;
422  default:
423    assert(0);
424  }
425  Address line_addr(request.paddr);
426  line_addr.makeLineAddress();
427  CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No);
428
429  if (Debug::getProtocolTrace()) {
430    g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
431                                                   "", "Begin", "", RubyRequestType_to_string(request.type));
432  }
433
434  if (g_system_ptr->getTracer()->traceEnabled()) {
435    g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
436                                            request.type, g_eventQueue_ptr->getTime());
437  }
438
439  Time latency = 0;  // initialzed to an null value
440
441  if (request.type == RubyRequestType_IFETCH)
442    latency = m_instCache_ptr->getLatency();
443  else
444    latency = m_dataCache_ptr->getLatency();
445
446  // Send the message to the cache controller
447  assert(latency > 0);
448
449
450  m_mandatory_q_ptr->enqueue(msg, latency);
451}
452/*
453bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
454                               AccessModeType access_mode,
455                               int size, DataBlock*& data_ptr) {
456  if (type == CacheRequestType_IFETCH) {
457    return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
458  } else {
459    return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
460  }
461}
462*/
463
464void Sequencer::print(ostream& out) const {
465  out << "[Sequencer: " << m_version
466      << ", outstanding requests: " << m_outstanding_count;
467
468  out << ", read request table: " << m_readRequestTable
469      << ", write request table: " << m_writeRequestTable;
470  out << "]";
471}
472
473// this can be called from setState whenever coherence permissions are upgraded
474// when invoked, coherence violations will be checked for the given block
475void Sequencer::checkCoherence(const Address& addr) {
476#ifdef CHECK_COHERENCE
477  g_system_ptr->checkGlobalCoherenceInvariant(addr);
478#endif
479}
480
481/*
482bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
483                                   unsigned int size_in_bytes )
484{
485    bool found = false;
486    const Address lineAddr = line_address(addr);
487    DataBlock data;
488    PhysAddress paddr(addr);
489    DataBlock* dataPtr = &data;
490
491    MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
492    int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
493
494    if (Protocol::m_TwoLevelCache) {
495      if(Protocol::m_CMP){
496        assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
497      }
498      else{
499        assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
500      }
501    }
502
503    if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
504      n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
505      found = true;
506    } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
507      n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
508      found = true;
509    } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
510      n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
511      found = true;
512    // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
513//       ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
514//       L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
515
516//       int offset = addr.getOffset();
517//       for(int i=0; i<size_in_bytes; ++i){
518//         value[i] = tbeEntry.getDataBlk().getByte(offset + i);
519//       }
520
521//       found = true;
522    } else {
523      // Address not found
524      //cout << "  " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
525      n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
526      int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
527      for(unsigned int i=0; i<size_in_bytes; ++i){
528        int offset = addr.getOffset();
529        value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
530      }
531      // Address not found
532      //WARN_MSG("Couldn't find address");
533      //WARN_EXPR(addr);
534      found = false;
535    }
536    return true;
537}
538
539bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
540                                   unsigned int size_in_bytes) {
541  char test_buffer[64];
542
543  // idea here is that coherent cache should find the
544  // latest data, the update it
545  bool found = false;
546  const Address lineAddr = line_address(addr);
547  PhysAddress paddr(addr);
548  DataBlock data;
549  DataBlock* dataPtr = &data;
550  Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
551
552  MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
553  int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
554
555  assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
556  assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
557  if (Protocol::m_TwoLevelCache) {
558    if(Protocol::m_CMP){
559      assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
560    }
561    else{
562      assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
563    }
564  }
565
566  if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
567    n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
568    found = true;
569  } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
570    n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
571    found = true;
572  } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
573    n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
574    found = true;
575  } else {
576    // Address not found
577    n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
578    int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
579    for(unsigned int i=0; i<size_in_bytes; ++i){
580      int offset = addr.getOffset();
581      n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
582    }
583    found = false;
584  }
585
586  if (found){
587    found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
588    assert(found);
589    if(value[0] != test_buffer[0]){
590      WARN_EXPR((int) value[0]);
591      WARN_EXPR((int) test_buffer[0]);
592      ERROR_MSG("setRubyMemoryValue failed to set value.");
593    }
594  }
595
596  return true;
597}
598*/
599/*
600
601void
602Sequencer::rubyMemAccess(const uint64 paddr, char* data, const int len, const AccessType type)
603{
604  if ( type == AccessType_Read || type == AccessType_Write ) {
605    // need to break up the packet data
606    uint64 guest_ptr = paddr;
607    Vector<DataBlock*> datablocks;
608    while (paddr + len != guest_ptr) {
609      Address addr(guest_ptr);
610      Address line_addr = line_address(addr);
611
612      int bytes_copied;
613      if (addr.getOffset() == 0) {
614        bytes_copied = (guest_ptr + RubyConfig::dataBlockBytes() > paddr + len)?
615          (paddr + len - guest_ptr):
616          RubyConfig::dataBlockBytes();
617      } else {
618        bytes_copied = RubyConfig::dataBlockBytes() - addr.getOffset();
619        if (guest_ptr + bytes_copied > paddr + len)
620          bytes_copied = paddr + len - guest_ptr;
621      }
622
623      // first we need to find all data blocks that have to be updated for a write
624      // and the highest block for a read
625     for(int i=0;i<RubyConfig::numberOfProcessors();i++) {
626        if (Protocol::m_TwoLevelCache){
627          if(m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->isTagPresent(line_address(addr)))
628            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
629          if(m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->isTagPresent(line_address(addr)))
630            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
631        } else {
632          if(m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->isTagPresent(line_address(addr)))
633            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->lookup(line_addr).getDataBlk());
634        }
635      }
636      if (Protocol::m_TwoLevelCache){
637        int l2_bank = map_L2ChipId_to_L2Cache(addr, 0).num; // TODO: ONLY WORKS WITH CMP!!!
638        if (m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->isTagPresent(line_address(Address(paddr)))) {
639          datablocks.insertAtBottom(&m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->lookup(addr).getDataBlk());
640        }
641      }
642      assert(dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec.size() > map_Address_to_DirectoryNode(addr));
643      DirectoryMemory* dir = dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec[map_Address_to_DirectoryNode(addr)];
644      Directory_Entry& entry = dir->lookup(line_addr);
645      datablocks.insertAtBottom(&entry.getDataBlk());
646
647      if (pkt->isRead()){
648        datablocks[0]->copyData(pkt_data, addr.getOffset(), bytes_copied);
649      } else {// pkt->isWrite() {
650        for (int i=0;i<datablocks.size();i++)
651          datablocks[i]->setData(pkt_data, addr.getOffset(), bytes_copied);
652      }
653
654      guest_ptr += bytes_copied;
655      pkt_data += bytes_copied;
656      datablocks.clear();
657    }
658}
659
660*/
661