Sequencer.cc revision 6285:ce086eca1ede
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/common/Global.hh"
31#include "mem/ruby/system/Sequencer.hh"
32#include "mem/ruby/system/System.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/ruby/profiler/Profiler.hh"
35#include "mem/ruby/system/CacheMemory.hh"
36#include "mem/protocol/CacheMsg.hh"
37#include "mem/ruby/recorder/Tracer.hh"
38#include "mem/ruby/common/SubBlock.hh"
39#include "mem/protocol/Protocol.hh"
40#include "mem/gems_common/Map.hh"
41#include "mem/ruby/buffers/MessageBuffer.hh"
42#include "mem/ruby/slicc_interface/AbstractController.hh"
43
44//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
45
46Sequencer::Sequencer(const string & name)
47  :RubyPort(name)
48{
49}
50
51void Sequencer::init(const vector<string> & argv)
52{
53  m_deadlock_check_scheduled = false;
54  m_outstanding_count = 0;
55
56  m_max_outstanding_requests = 0;
57  m_deadlock_threshold = 0;
58  m_version = -1;
59  m_instCache_ptr = NULL;
60  m_dataCache_ptr = NULL;
61  m_controller = NULL;
62  for (size_t i=0; i<argv.size(); i+=2) {
63    if ( argv[i] == "controller") {
64      m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
65      m_mandatory_q_ptr = m_controller->getMandatoryQueue();
66    } else if ( argv[i] == "icache")
67      m_instCache_ptr = RubySystem::getCache(argv[i+1]);
68    else if ( argv[i] == "dcache")
69      m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
70    else if ( argv[i] == "version")
71      m_version = atoi(argv[i+1].c_str());
72    else if ( argv[i] == "max_outstanding_requests")
73      m_max_outstanding_requests = atoi(argv[i+1].c_str());
74    else if ( argv[i] == "deadlock_threshold")
75      m_deadlock_threshold = atoi(argv[i+1].c_str());
76    else {
77      cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
78      assert(false);
79    }
80  }
81  assert(m_max_outstanding_requests > 0);
82  assert(m_deadlock_threshold > 0);
83  assert(m_version > -1);
84  assert(m_instCache_ptr != NULL);
85  assert(m_dataCache_ptr != NULL);
86  assert(m_controller != NULL);
87}
88
89Sequencer::~Sequencer() {
90
91}
92
93void Sequencer::wakeup() {
94  // Check for deadlock of any of the requests
95  Time current_time = g_eventQueue_ptr->getTime();
96
97  // Check across all outstanding requests
98  int total_outstanding = 0;
99
100  Vector<Address> keys = m_readRequestTable.keys();
101  for (int i=0; i<keys.size(); i++) {
102    SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
103    if (current_time - request->issue_time >= m_deadlock_threshold) {
104      WARN_MSG("Possible Deadlock detected");
105      WARN_EXPR(request);
106      WARN_EXPR(m_version);
107      WARN_EXPR(keys.size());
108      WARN_EXPR(current_time);
109      WARN_EXPR(request->issue_time);
110      WARN_EXPR(current_time - request->issue_time);
111      ERROR_MSG("Aborting");
112    }
113  }
114
115  keys = m_writeRequestTable.keys();
116  for (int i=0; i<keys.size(); i++) {
117    SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
118    if (current_time - request->issue_time >= m_deadlock_threshold) {
119      WARN_MSG("Possible Deadlock detected");
120      WARN_EXPR(request);
121      WARN_EXPR(m_version);
122      WARN_EXPR(current_time);
123      WARN_EXPR(request->issue_time);
124      WARN_EXPR(current_time - request->issue_time);
125      WARN_EXPR(keys.size());
126      ERROR_MSG("Aborting");
127    }
128  }
129  total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
130
131  assert(m_outstanding_count == total_outstanding);
132
133  if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
134    g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
135  } else {
136    m_deadlock_check_scheduled = false;
137  }
138}
139
140void Sequencer::printProgress(ostream& out) const{
141  /*
142  int total_demand = 0;
143  out << "Sequencer Stats Version " << m_version << endl;
144  out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
145  out << "---------------" << endl;
146  out << "outstanding requests" << endl;
147
148  Vector<Address> rkeys = m_readRequestTable.keys();
149  int read_size = rkeys.size();
150  out << "proc " << m_version << " Read Requests = " << read_size << endl;
151  // print the request table
152  for(int i=0; i < read_size; ++i){
153    SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
154    out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i]  << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
155    total_demand++;
156  }
157
158  Vector<Address> wkeys = m_writeRequestTable.keys();
159  int write_size = wkeys.size();
160  out << "proc " << m_version << " Write Requests = " << write_size << endl;
161  // print the request table
162  for(int i=0; i < write_size; ++i){
163      CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
164      out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i]  << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
165      if( request.getPrefetch() == PrefetchBit_No ){
166        total_demand++;
167      }
168  }
169
170  out << endl;
171
172  out << "Total Number Outstanding: " << m_outstanding_count << endl;
173  out << "Total Number Demand     : " << total_demand << endl;
174  out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
175  out << endl;
176  out << endl;
177  */
178}
179
180void Sequencer::printConfig(ostream& out) const {
181  out << "Seqeuncer config: " << m_name << endl;
182  out << "  controller: " << m_controller->getName() << endl;
183  out << "  version: " << m_version << endl;
184  out << "  max_outstanding_requests: " << m_max_outstanding_requests << endl;
185  out << "  deadlock_threshold: " << m_deadlock_threshold << endl;
186}
187
188// Insert the request on the correct request table.  Return true if
189// the entry was already present.
190bool Sequencer::insertRequest(SequencerRequest* request) {
191  int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
192
193  assert(m_outstanding_count == total_outstanding);
194
195  // See if we should schedule a deadlock check
196  if (m_deadlock_check_scheduled == false) {
197    g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
198    m_deadlock_check_scheduled = true;
199  }
200
201  Address line_addr(request->ruby_request.paddr);
202  line_addr.makeLineAddress();
203  if ((request->ruby_request.type == RubyRequestType_ST) ||
204      (request->ruby_request.type == RubyRequestType_RMW)) {
205    if (m_writeRequestTable.exist(line_addr)) {
206      m_writeRequestTable.lookup(line_addr) = request;
207      //      return true;
208      assert(0); // drh5: isn't this an error?  do you lose the initial request?
209    }
210    m_writeRequestTable.allocate(line_addr);
211    m_writeRequestTable.lookup(line_addr) = request;
212    m_outstanding_count++;
213  } else {
214    if (m_readRequestTable.exist(line_addr)) {
215      m_readRequestTable.lookup(line_addr) = request;
216      //      return true;
217      assert(0); // drh5: isn't this an error?  do you lose the initial request?
218    }
219    m_readRequestTable.allocate(line_addr);
220    m_readRequestTable.lookup(line_addr) = request;
221    m_outstanding_count++;
222  }
223
224  g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
225
226  total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
227  assert(m_outstanding_count == total_outstanding);
228
229  return false;
230}
231
232void Sequencer::removeRequest(SequencerRequest* srequest) {
233
234  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
235
236  const RubyRequest & ruby_request = srequest->ruby_request;
237  Address line_addr(ruby_request.paddr);
238  line_addr.makeLineAddress();
239  if ((ruby_request.type == RubyRequestType_ST) ||
240      (ruby_request.type == RubyRequestType_RMW)) {
241    m_writeRequestTable.deallocate(line_addr);
242  } else {
243    m_readRequestTable.deallocate(line_addr);
244  }
245  m_outstanding_count--;
246
247  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
248}
249
250void Sequencer::writeCallback(const Address& address, DataBlock& data) {
251
252  assert(address == line_address(address));
253  assert(m_writeRequestTable.exist(line_address(address)));
254
255  SequencerRequest* request = m_writeRequestTable.lookup(address);
256  removeRequest(request);
257
258  assert((request->ruby_request.type == RubyRequestType_ST) ||
259         (request->ruby_request.type == RubyRequestType_RMW));
260
261  hitCallback(request, data);
262}
263
264void Sequencer::readCallback(const Address& address, DataBlock& data) {
265
266  assert(address == line_address(address));
267  assert(m_readRequestTable.exist(line_address(address)));
268
269  SequencerRequest* request = m_readRequestTable.lookup(address);
270  removeRequest(request);
271
272  assert((request->ruby_request.type == RubyRequestType_LD) ||
273         (request->ruby_request.type == RubyRequestType_IFETCH));
274
275  hitCallback(request, data);
276}
277
278void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
279  const RubyRequest & ruby_request = srequest->ruby_request;
280  int size = ruby_request.len;
281  Address request_address(ruby_request.paddr);
282  Address request_line_address(ruby_request.paddr);
283  request_line_address.makeLineAddress();
284  RubyRequestType type = ruby_request.type;
285  Time issued_time = srequest->issue_time;
286
287  // Set this cache entry to the most recently used
288  if (type == RubyRequestType_IFETCH) {
289    if (m_instCache_ptr->isTagPresent(request_line_address) )
290      m_instCache_ptr->setMRU(request_line_address);
291  } else {
292    if (m_dataCache_ptr->isTagPresent(request_line_address) )
293      m_dataCache_ptr->setMRU(request_line_address);
294  }
295
296  assert(g_eventQueue_ptr->getTime() >= issued_time);
297  Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
298
299  // Profile the miss latency for all non-zero demand misses
300  if (miss_latency != 0) {
301    g_system_ptr->getProfiler()->missLatency(miss_latency, type);
302
303    if (Debug::getProtocolTrace()) {
304      g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
305                                                     "", "Done", "", int_to_string(miss_latency)+" cycles");
306    }
307  }
308  /*
309  if (request.getPrefetch() == PrefetchBit_Yes) {
310    return; // Ignore the prefetch
311  }
312  */
313
314  // update the data
315  if (ruby_request.data != NULL) {
316    if ((type == RubyRequestType_LD) ||
317        (type == RubyRequestType_IFETCH)) {
318      memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
319    } else {
320      data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
321    }
322  }
323
324  m_hit_callback(srequest->id);
325  delete srequest;
326}
327
328// Returns true if the sequencer already has a load or store outstanding
329bool Sequencer::isReady(const RubyRequest& request) const {
330  // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
331  // to simulate stalling of the front-end
332  // Do we stall all the sequencers? If it is atomic instruction - yes!
333  if (m_outstanding_count >= m_max_outstanding_requests) {
334    return false;
335  }
336
337  if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
338      m_readRequestTable.exist(line_address(Address(request.paddr))) ){
339    //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
340    //printProgress(cout);
341    return false;
342  }
343
344  return true;
345}
346
347bool Sequencer::empty() const {
348  return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
349}
350
351int64_t Sequencer::makeRequest(const RubyRequest & request)
352{
353  assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
354  if (isReady(request)) {
355    int64_t id = makeUniqueRequestID();
356    SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
357    bool found = insertRequest(srequest);
358    if (!found)
359      issueRequest(request);
360
361    // TODO: issue hardware prefetches here
362    return id;
363  }
364  else {
365    return -1;
366  }
367}
368
369void Sequencer::issueRequest(const RubyRequest& request) {
370
371  // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
372  CacheRequestType ctype;
373  switch(request.type) {
374  case RubyRequestType_IFETCH:
375    ctype = CacheRequestType_IFETCH;
376    break;
377  case RubyRequestType_LD:
378    ctype = CacheRequestType_LD;
379    break;
380  case RubyRequestType_ST:
381    ctype = CacheRequestType_ST;
382    break;
383  case RubyRequestType_RMW:
384    ctype = CacheRequestType_ATOMIC;
385    break;
386  default:
387    assert(0);
388  }
389  AccessModeType amtype;
390  switch(request.access_mode){
391  case RubyAccessMode_User:
392    amtype = AccessModeType_UserMode;
393    break;
394  case RubyAccessMode_Supervisor:
395    amtype = AccessModeType_SupervisorMode;
396    break;
397  case RubyAccessMode_Device:
398    amtype = AccessModeType_UserMode;
399    break;
400  default:
401    assert(0);
402  }
403  Address line_addr(request.paddr);
404  line_addr.makeLineAddress();
405  CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No);
406
407  if (Debug::getProtocolTrace()) {
408    g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
409                                                   "", "Begin", "", RubyRequestType_to_string(request.type));
410  }
411
412  if (g_system_ptr->getTracer()->traceEnabled()) {
413    g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
414                                            request.type, g_eventQueue_ptr->getTime());
415  }
416
417  Time latency = 0;  // initialzed to an null value
418
419  if (request.type == RubyRequestType_IFETCH)
420    latency = m_instCache_ptr->getLatency();
421  else
422    latency = m_dataCache_ptr->getLatency();
423
424  // Send the message to the cache controller
425  assert(latency > 0);
426
427
428  m_mandatory_q_ptr->enqueue(msg, latency);
429}
430/*
431bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
432                               AccessModeType access_mode,
433                               int size, DataBlock*& data_ptr) {
434  if (type == CacheRequestType_IFETCH) {
435    return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
436  } else {
437    return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
438  }
439}
440*/
441
442void Sequencer::print(ostream& out) const {
443  out << "[Sequencer: " << m_version
444      << ", outstanding requests: " << m_outstanding_count;
445
446  out << ", read request table: " << m_readRequestTable
447      << ", write request table: " << m_writeRequestTable;
448  out << "]";
449}
450
451// this can be called from setState whenever coherence permissions are upgraded
452// when invoked, coherence violations will be checked for the given block
453void Sequencer::checkCoherence(const Address& addr) {
454#ifdef CHECK_COHERENCE
455  g_system_ptr->checkGlobalCoherenceInvariant(addr);
456#endif
457}
458
459/*
460bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
461                                   unsigned int size_in_bytes )
462{
463    bool found = false;
464    const Address lineAddr = line_address(addr);
465    DataBlock data;
466    PhysAddress paddr(addr);
467    DataBlock* dataPtr = &data;
468
469    MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
470    int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
471
472    if (Protocol::m_TwoLevelCache) {
473      if(Protocol::m_CMP){
474        assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
475      }
476      else{
477        assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
478      }
479    }
480
481    if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
482      n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
483      found = true;
484    } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
485      n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
486      found = true;
487    } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
488      n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
489      found = true;
490    // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
491//       ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
492//       L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
493
494//       int offset = addr.getOffset();
495//       for(int i=0; i<size_in_bytes; ++i){
496//         value[i] = tbeEntry.getDataBlk().getByte(offset + i);
497//       }
498
499//       found = true;
500    } else {
501      // Address not found
502      //cout << "  " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
503      n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
504      int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
505      for(unsigned int i=0; i<size_in_bytes; ++i){
506        int offset = addr.getOffset();
507        value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
508      }
509      // Address not found
510      //WARN_MSG("Couldn't find address");
511      //WARN_EXPR(addr);
512      found = false;
513    }
514    return true;
515}
516
517bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
518                                   unsigned int size_in_bytes) {
519  char test_buffer[64];
520
521  // idea here is that coherent cache should find the
522  // latest data, the update it
523  bool found = false;
524  const Address lineAddr = line_address(addr);
525  PhysAddress paddr(addr);
526  DataBlock data;
527  DataBlock* dataPtr = &data;
528  Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
529
530  MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
531  int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
532
533  assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
534  assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
535  if (Protocol::m_TwoLevelCache) {
536    if(Protocol::m_CMP){
537      assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
538    }
539    else{
540      assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
541    }
542  }
543
544  if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
545    n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
546    found = true;
547  } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
548    n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
549    found = true;
550  } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
551    n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
552    found = true;
553  } else {
554    // Address not found
555    n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
556    int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
557    for(unsigned int i=0; i<size_in_bytes; ++i){
558      int offset = addr.getOffset();
559      n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
560    }
561    found = false;
562  }
563
564  if (found){
565    found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
566    assert(found);
567    if(value[0] != test_buffer[0]){
568      WARN_EXPR((int) value[0]);
569      WARN_EXPR((int) test_buffer[0]);
570      ERROR_MSG("setRubyMemoryValue failed to set value.");
571    }
572  }
573
574  return true;
575}
576*/
577/*
578
579void
580Sequencer::rubyMemAccess(const uint64 paddr, char* data, const int len, const AccessType type)
581{
582  if ( type == AccessType_Read || type == AccessType_Write ) {
583    // need to break up the packet data
584    uint64 guest_ptr = paddr;
585    Vector<DataBlock*> datablocks;
586    while (paddr + len != guest_ptr) {
587      Address addr(guest_ptr);
588      Address line_addr = line_address(addr);
589
590      int bytes_copied;
591      if (addr.getOffset() == 0) {
592        bytes_copied = (guest_ptr + RubyConfig::dataBlockBytes() > paddr + len)?
593          (paddr + len - guest_ptr):
594          RubyConfig::dataBlockBytes();
595      } else {
596        bytes_copied = RubyConfig::dataBlockBytes() - addr.getOffset();
597        if (guest_ptr + bytes_copied > paddr + len)
598          bytes_copied = paddr + len - guest_ptr;
599      }
600
601      // first we need to find all data blocks that have to be updated for a write
602      // and the highest block for a read
603     for(int i=0;i<RubyConfig::numberOfProcessors();i++) {
604        if (Protocol::m_TwoLevelCache){
605          if(m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->isTagPresent(line_address(addr)))
606            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
607          if(m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->isTagPresent(line_address(addr)))
608            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
609        } else {
610          if(m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->isTagPresent(line_address(addr)))
611            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->lookup(line_addr).getDataBlk());
612        }
613      }
614      if (Protocol::m_TwoLevelCache){
615        int l2_bank = map_L2ChipId_to_L2Cache(addr, 0).num; // TODO: ONLY WORKS WITH CMP!!!
616        if (m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->isTagPresent(line_address(Address(paddr)))) {
617          datablocks.insertAtBottom(&m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->lookup(addr).getDataBlk());
618        }
619      }
620      assert(dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec.size() > map_Address_to_DirectoryNode(addr));
621      DirectoryMemory* dir = dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec[map_Address_to_DirectoryNode(addr)];
622      Directory_Entry& entry = dir->lookup(line_addr);
623      datablocks.insertAtBottom(&entry.getDataBlk());
624
625      if (pkt->isRead()){
626        datablocks[0]->copyData(pkt_data, addr.getOffset(), bytes_copied);
627      } else {// pkt->isWrite() {
628        for (int i=0;i<datablocks.size();i++)
629          datablocks[i]->setData(pkt_data, addr.getOffset(), bytes_copied);
630      }
631
632      guest_ptr += bytes_copied;
633      pkt_data += bytes_copied;
634      datablocks.clear();
635    }
636}
637
638*/
639