Sequencer.cc revision 6355:79464d8a4d2f
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/common/Global.hh"
31#include "mem/ruby/system/Sequencer.hh"
32#include "mem/ruby/system/System.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/ruby/profiler/Profiler.hh"
35#include "mem/ruby/system/CacheMemory.hh"
36#include "mem/protocol/CacheMsg.hh"
37#include "mem/ruby/recorder/Tracer.hh"
38#include "mem/ruby/common/SubBlock.hh"
39#include "mem/protocol/Protocol.hh"
40#include "mem/gems_common/Map.hh"
41#include "mem/ruby/buffers/MessageBuffer.hh"
42#include "mem/ruby/slicc_interface/AbstractController.hh"
43
44//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
45
46#define LLSC_FAIL -2
47
48Sequencer::Sequencer(const string & name)
49  :RubyPort(name)
50{
51}
52
53void Sequencer::init(const vector<string> & argv)
54{
55  m_deadlock_check_scheduled = false;
56  m_outstanding_count = 0;
57
58  m_max_outstanding_requests = 0;
59  m_deadlock_threshold = 0;
60  m_version = -1;
61  m_instCache_ptr = NULL;
62  m_dataCache_ptr = NULL;
63  m_controller = NULL;
64  for (size_t i=0; i<argv.size(); i+=2) {
65    if ( argv[i] == "controller") {
66      m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
67      m_mandatory_q_ptr = m_controller->getMandatoryQueue();
68    } else if ( argv[i] == "icache")
69      m_instCache_ptr = RubySystem::getCache(argv[i+1]);
70    else if ( argv[i] == "dcache")
71      m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
72    else if ( argv[i] == "version")
73      m_version = atoi(argv[i+1].c_str());
74    else if ( argv[i] == "max_outstanding_requests")
75      m_max_outstanding_requests = atoi(argv[i+1].c_str());
76    else if ( argv[i] == "deadlock_threshold")
77      m_deadlock_threshold = atoi(argv[i+1].c_str());
78    else {
79      cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
80      assert(false);
81    }
82  }
83  assert(m_max_outstanding_requests > 0);
84  assert(m_deadlock_threshold > 0);
85  assert(m_version > -1);
86  assert(m_instCache_ptr != NULL);
87  assert(m_dataCache_ptr != NULL);
88  assert(m_controller != NULL);
89}
90
91Sequencer::~Sequencer() {
92
93}
94
95void Sequencer::wakeup() {
96  // Check for deadlock of any of the requests
97  Time current_time = g_eventQueue_ptr->getTime();
98
99  // Check across all outstanding requests
100  int total_outstanding = 0;
101
102  Vector<Address> keys = m_readRequestTable.keys();
103  for (int i=0; i<keys.size(); i++) {
104    SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
105    if (current_time - request->issue_time >= m_deadlock_threshold) {
106      WARN_MSG("Possible Deadlock detected");
107      WARN_EXPR(request);
108      WARN_EXPR(m_version);
109      WARN_EXPR(keys.size());
110      WARN_EXPR(current_time);
111      WARN_EXPR(request->issue_time);
112      WARN_EXPR(current_time - request->issue_time);
113      ERROR_MSG("Aborting");
114    }
115  }
116
117  keys = m_writeRequestTable.keys();
118  for (int i=0; i<keys.size(); i++) {
119    SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
120    if (current_time - request->issue_time >= m_deadlock_threshold) {
121      WARN_MSG("Possible Deadlock detected");
122      WARN_EXPR(request);
123      WARN_EXPR(m_version);
124      WARN_EXPR(current_time);
125      WARN_EXPR(request->issue_time);
126      WARN_EXPR(current_time - request->issue_time);
127      WARN_EXPR(keys.size());
128      ERROR_MSG("Aborting");
129    }
130  }
131  total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
132
133  assert(m_outstanding_count == total_outstanding);
134
135  if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
136    g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
137  } else {
138    m_deadlock_check_scheduled = false;
139  }
140}
141
142void Sequencer::printProgress(ostream& out) const{
143  /*
144  int total_demand = 0;
145  out << "Sequencer Stats Version " << m_version << endl;
146  out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
147  out << "---------------" << endl;
148  out << "outstanding requests" << endl;
149
150  Vector<Address> rkeys = m_readRequestTable.keys();
151  int read_size = rkeys.size();
152  out << "proc " << m_version << " Read Requests = " << read_size << endl;
153  // print the request table
154  for(int i=0; i < read_size; ++i){
155    SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
156    out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i]  << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
157    total_demand++;
158  }
159
160  Vector<Address> wkeys = m_writeRequestTable.keys();
161  int write_size = wkeys.size();
162  out << "proc " << m_version << " Write Requests = " << write_size << endl;
163  // print the request table
164  for(int i=0; i < write_size; ++i){
165      CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
166      out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i]  << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
167      if( request.getPrefetch() == PrefetchBit_No ){
168        total_demand++;
169      }
170  }
171
172  out << endl;
173
174  out << "Total Number Outstanding: " << m_outstanding_count << endl;
175  out << "Total Number Demand     : " << total_demand << endl;
176  out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
177  out << endl;
178  out << endl;
179  */
180}
181
182void Sequencer::printConfig(ostream& out) const {
183  out << "Seqeuncer config: " << m_name << endl;
184  out << "  controller: " << m_controller->getName() << endl;
185  out << "  version: " << m_version << endl;
186  out << "  max_outstanding_requests: " << m_max_outstanding_requests << endl;
187  out << "  deadlock_threshold: " << m_deadlock_threshold << endl;
188}
189
190// Insert the request on the correct request table.  Return true if
191// the entry was already present.
192bool Sequencer::insertRequest(SequencerRequest* request) {
193  int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
194
195  assert(m_outstanding_count == total_outstanding);
196
197  // See if we should schedule a deadlock check
198  if (m_deadlock_check_scheduled == false) {
199    g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
200    m_deadlock_check_scheduled = true;
201  }
202
203  Address line_addr(request->ruby_request.paddr);
204  line_addr.makeLineAddress();
205  if ((request->ruby_request.type == RubyRequestType_ST) ||
206      (request->ruby_request.type == RubyRequestType_RMW_Read) ||
207      (request->ruby_request.type == RubyRequestType_RMW_Write) ||
208      (request->ruby_request.type == RubyRequestType_Locked_Read) ||
209      (request->ruby_request.type == RubyRequestType_Locked_Write)) {
210    if (m_writeRequestTable.exist(line_addr)) {
211      m_writeRequestTable.lookup(line_addr) = request;
212      //      return true;
213      assert(0); // drh5: isn't this an error?  do you lose the initial request?
214    }
215    m_writeRequestTable.allocate(line_addr);
216    m_writeRequestTable.lookup(line_addr) = request;
217    m_outstanding_count++;
218  } else {
219    if (m_readRequestTable.exist(line_addr)) {
220      m_readRequestTable.lookup(line_addr) = request;
221      //      return true;
222      assert(0); // drh5: isn't this an error?  do you lose the initial request?
223    }
224    m_readRequestTable.allocate(line_addr);
225    m_readRequestTable.lookup(line_addr) = request;
226    m_outstanding_count++;
227  }
228
229  g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
230
231  total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
232  assert(m_outstanding_count == total_outstanding);
233
234  return false;
235}
236
237void Sequencer::removeRequest(SequencerRequest* srequest) {
238
239  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
240
241  const RubyRequest & ruby_request = srequest->ruby_request;
242  Address line_addr(ruby_request.paddr);
243  line_addr.makeLineAddress();
244  if ((ruby_request.type == RubyRequestType_ST) ||
245      (ruby_request.type == RubyRequestType_RMW_Read) ||
246      (ruby_request.type == RubyRequestType_RMW_Write) ||
247      (ruby_request.type == RubyRequestType_Locked_Read) ||
248      (ruby_request.type == RubyRequestType_Locked_Write)) {
249    m_writeRequestTable.deallocate(line_addr);
250  } else {
251    m_readRequestTable.deallocate(line_addr);
252  }
253  m_outstanding_count--;
254
255  assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
256}
257
258void Sequencer::writeCallback(const Address& address, DataBlock& data) {
259
260  assert(address == line_address(address));
261  assert(m_writeRequestTable.exist(line_address(address)));
262
263  SequencerRequest* request = m_writeRequestTable.lookup(address);
264  removeRequest(request);
265
266  assert((request->ruby_request.type == RubyRequestType_ST) ||
267         (request->ruby_request.type == RubyRequestType_RMW_Read) ||
268         (request->ruby_request.type == RubyRequestType_RMW_Write) ||
269         (request->ruby_request.type == RubyRequestType_Locked_Read) ||
270         (request->ruby_request.type == RubyRequestType_Locked_Write));
271  // POLINA: the assumption is that atomics are only on data cache and not instruction cache
272  if (request->ruby_request.type == RubyRequestType_Locked_Read) {
273    m_dataCache_ptr->setLocked(address, m_version);
274  }
275
276  hitCallback(request, data);
277}
278
279void Sequencer::readCallback(const Address& address, DataBlock& data) {
280
281  assert(address == line_address(address));
282  assert(m_readRequestTable.exist(line_address(address)));
283
284  SequencerRequest* request = m_readRequestTable.lookup(address);
285  removeRequest(request);
286
287  assert((request->ruby_request.type == RubyRequestType_LD) ||
288         (request->ruby_request.type == RubyRequestType_IFETCH));
289
290  hitCallback(request, data);
291}
292
293void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
294  const RubyRequest & ruby_request = srequest->ruby_request;
295  Address request_address(ruby_request.paddr);
296  Address request_line_address(ruby_request.paddr);
297  request_line_address.makeLineAddress();
298  RubyRequestType type = ruby_request.type;
299  Time issued_time = srequest->issue_time;
300
301  // Set this cache entry to the most recently used
302  if (type == RubyRequestType_IFETCH) {
303    if (m_instCache_ptr->isTagPresent(request_line_address) )
304      m_instCache_ptr->setMRU(request_line_address);
305  } else {
306    if (m_dataCache_ptr->isTagPresent(request_line_address) )
307      m_dataCache_ptr->setMRU(request_line_address);
308  }
309
310  assert(g_eventQueue_ptr->getTime() >= issued_time);
311  Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
312
313  // Profile the miss latency for all non-zero demand misses
314  if (miss_latency != 0) {
315    g_system_ptr->getProfiler()->missLatency(miss_latency, type);
316
317    if (Debug::getProtocolTrace()) {
318      g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
319                                                     "", "Done", "", int_to_string(miss_latency)+" cycles");
320    }
321  }
322  /*
323  if (request.getPrefetch() == PrefetchBit_Yes) {
324    return; // Ignore the prefetch
325  }
326  */
327
328  // update the data
329  if (ruby_request.data != NULL) {
330    if ((type == RubyRequestType_LD) ||
331        (type == RubyRequestType_IFETCH)) {
332      memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
333    } else {
334      data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
335    }
336  }
337
338  m_hit_callback(srequest->id);
339  delete srequest;
340}
341
342// Returns true if the sequencer already has a load or store outstanding
343bool Sequencer::isReady(const RubyRequest& request) const {
344  // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
345  // to simulate stalling of the front-end
346  // Do we stall all the sequencers? If it is atomic instruction - yes!
347  if (m_outstanding_count >= m_max_outstanding_requests) {
348    return false;
349  }
350
351  if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
352      m_readRequestTable.exist(line_address(Address(request.paddr))) ){
353    //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
354    //printProgress(cout);
355    return false;
356  }
357
358  return true;
359}
360
361bool Sequencer::empty() const {
362  return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
363}
364
365
366int64_t Sequencer::makeRequest(const RubyRequest & request)
367{
368  assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
369  if (isReady(request)) {
370    int64_t id = makeUniqueRequestID();
371    SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
372    bool found = insertRequest(srequest);
373    if (!found)
374      if (request.type == RubyRequestType_Locked_Write) {
375        // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
376        // ensuring that nothing comes between checking the flag and servicing the store
377        if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
378          return LLSC_FAIL;
379        }
380        else {
381          m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
382        }
383      }
384      issueRequest(request);
385
386    // TODO: issue hardware prefetches here
387    return id;
388  }
389  else {
390    return -1;
391  }
392}
393
394void Sequencer::issueRequest(const RubyRequest& request) {
395
396  // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
397  CacheRequestType ctype;
398  switch(request.type) {
399  case RubyRequestType_IFETCH:
400    ctype = CacheRequestType_IFETCH;
401    break;
402  case RubyRequestType_LD:
403    ctype = CacheRequestType_LD;
404    break;
405  case RubyRequestType_ST:
406    ctype = CacheRequestType_ST;
407    break;
408  case RubyRequestType_Locked_Read:
409    ctype = CacheRequestType_ST;
410    break;
411  case RubyRequestType_Locked_Write:
412    ctype = CacheRequestType_ST;
413    break;
414  case RubyRequestType_RMW_Read:
415    ctype = CacheRequestType_ATOMIC;
416    break;
417  case RubyRequestType_RMW_Write:
418    ctype = CacheRequestType_ATOMIC;
419    break;
420  default:
421    assert(0);
422  }
423  AccessModeType amtype;
424  switch(request.access_mode){
425  case RubyAccessMode_User:
426    amtype = AccessModeType_UserMode;
427    break;
428  case RubyAccessMode_Supervisor:
429    amtype = AccessModeType_SupervisorMode;
430    break;
431  case RubyAccessMode_Device:
432    amtype = AccessModeType_UserMode;
433    break;
434  default:
435    assert(0);
436  }
437  Address line_addr(request.paddr);
438  line_addr.makeLineAddress();
439  CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No);
440
441  if (Debug::getProtocolTrace()) {
442    g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
443                                                   "", "Begin", "", RubyRequestType_to_string(request.type));
444  }
445
446  if (g_system_ptr->getTracer()->traceEnabled()) {
447    g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
448                                            request.type, g_eventQueue_ptr->getTime());
449  }
450
451  Time latency = 0;  // initialzed to an null value
452
453  if (request.type == RubyRequestType_IFETCH)
454    latency = m_instCache_ptr->getLatency();
455  else
456    latency = m_dataCache_ptr->getLatency();
457
458  // Send the message to the cache controller
459  assert(latency > 0);
460
461
462  m_mandatory_q_ptr->enqueue(msg, latency);
463}
464/*
465bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
466                               AccessModeType access_mode,
467                               int size, DataBlock*& data_ptr) {
468  if (type == CacheRequestType_IFETCH) {
469    return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
470  } else {
471    return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
472  }
473}
474*/
475
476void Sequencer::print(ostream& out) const {
477  out << "[Sequencer: " << m_version
478      << ", outstanding requests: " << m_outstanding_count;
479
480  out << ", read request table: " << m_readRequestTable
481      << ", write request table: " << m_writeRequestTable;
482  out << "]";
483}
484
485// this can be called from setState whenever coherence permissions are upgraded
486// when invoked, coherence violations will be checked for the given block
487void Sequencer::checkCoherence(const Address& addr) {
488#ifdef CHECK_COHERENCE
489  g_system_ptr->checkGlobalCoherenceInvariant(addr);
490#endif
491}
492
493/*
494bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
495                                   unsigned int size_in_bytes )
496{
497    bool found = false;
498    const Address lineAddr = line_address(addr);
499    DataBlock data;
500    PhysAddress paddr(addr);
501    DataBlock* dataPtr = &data;
502
503    MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
504    int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
505
506    if (Protocol::m_TwoLevelCache) {
507      if(Protocol::m_CMP){
508        assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
509      }
510      else{
511        assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
512      }
513    }
514
515    if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
516      n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
517      found = true;
518    } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
519      n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
520      found = true;
521    } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
522      n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
523      found = true;
524    // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
525//       ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
526//       L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
527
528//       int offset = addr.getOffset();
529//       for(int i=0; i<size_in_bytes; ++i){
530//         value[i] = tbeEntry.getDataBlk().getByte(offset + i);
531//       }
532
533//       found = true;
534    } else {
535      // Address not found
536      //cout << "  " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
537      n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
538      int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
539      for(unsigned int i=0; i<size_in_bytes; ++i){
540        int offset = addr.getOffset();
541        value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
542      }
543      // Address not found
544      //WARN_MSG("Couldn't find address");
545      //WARN_EXPR(addr);
546      found = false;
547    }
548    return true;
549}
550
551bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
552                                   unsigned int size_in_bytes) {
553  char test_buffer[64];
554
555  // idea here is that coherent cache should find the
556  // latest data, the update it
557  bool found = false;
558  const Address lineAddr = line_address(addr);
559  PhysAddress paddr(addr);
560  DataBlock data;
561  DataBlock* dataPtr = &data;
562  Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
563
564  MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
565  int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
566
567  assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
568  assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
569  if (Protocol::m_TwoLevelCache) {
570    if(Protocol::m_CMP){
571      assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
572    }
573    else{
574      assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
575    }
576  }
577
578  if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
579    n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
580    found = true;
581  } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
582    n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
583    found = true;
584  } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
585    n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
586    found = true;
587  } else {
588    // Address not found
589    n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
590    int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
591    for(unsigned int i=0; i<size_in_bytes; ++i){
592      int offset = addr.getOffset();
593      n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
594    }
595    found = false;
596  }
597
598  if (found){
599    found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
600    assert(found);
601    if(value[0] != test_buffer[0]){
602      WARN_EXPR((int) value[0]);
603      WARN_EXPR((int) test_buffer[0]);
604      ERROR_MSG("setRubyMemoryValue failed to set value.");
605    }
606  }
607
608  return true;
609}
610*/
611/*
612
613void
614Sequencer::rubyMemAccess(const uint64 paddr, char* data, const int len, const AccessType type)
615{
616  if ( type == AccessType_Read || type == AccessType_Write ) {
617    // need to break up the packet data
618    uint64 guest_ptr = paddr;
619    Vector<DataBlock*> datablocks;
620    while (paddr + len != guest_ptr) {
621      Address addr(guest_ptr);
622      Address line_addr = line_address(addr);
623
624      int bytes_copied;
625      if (addr.getOffset() == 0) {
626        bytes_copied = (guest_ptr + RubyConfig::dataBlockBytes() > paddr + len)?
627          (paddr + len - guest_ptr):
628          RubyConfig::dataBlockBytes();
629      } else {
630        bytes_copied = RubyConfig::dataBlockBytes() - addr.getOffset();
631        if (guest_ptr + bytes_copied > paddr + len)
632          bytes_copied = paddr + len - guest_ptr;
633      }
634
635      // first we need to find all data blocks that have to be updated for a write
636      // and the highest block for a read
637     for(int i=0;i<RubyConfig::numberOfProcessors();i++) {
638        if (Protocol::m_TwoLevelCache){
639          if(m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->isTagPresent(line_address(addr)))
640            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
641          if(m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->isTagPresent(line_address(addr)))
642            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
643        } else {
644          if(m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->isTagPresent(line_address(addr)))
645            datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->lookup(line_addr).getDataBlk());
646        }
647      }
648      if (Protocol::m_TwoLevelCache){
649        int l2_bank = map_L2ChipId_to_L2Cache(addr, 0).num; // TODO: ONLY WORKS WITH CMP!!!
650        if (m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->isTagPresent(line_address(Address(paddr)))) {
651          datablocks.insertAtBottom(&m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->lookup(addr).getDataBlk());
652        }
653      }
654      assert(dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec.size() > map_Address_to_DirectoryNode(addr));
655      DirectoryMemory* dir = dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec[map_Address_to_DirectoryNode(addr)];
656      Directory_Entry& entry = dir->lookup(line_addr);
657      datablocks.insertAtBottom(&entry.getDataBlk());
658
659      if (pkt->isRead()){
660        datablocks[0]->copyData(pkt_data, addr.getOffset(), bytes_copied);
661      } else {// pkt->isWrite() {
662        for (int i=0;i<datablocks.size();i++)
663          datablocks[i]->setData(pkt_data, addr.getOffset(), bytes_copied);
664      }
665
666      guest_ptr += bytes_copied;
667      pkt_data += bytes_copied;
668      datablocks.clear();
669    }
670}
671
672*/
673