1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * $Id: Sequencer.C 1.131 2006/11/06 17:41:01-06:00 bobba@gratiano.cs.wisc.edu $
32 *
33 */
34
35#include "Global.hh"
36#include "Sequencer.hh"
37#include "System.hh"
38#include "Protocol.hh"
39#include "Profiler.hh"
40#include "CacheMemory.hh"
41#include "RubyConfig.hh"
42//#include "Tracer.hh"
43#include "AbstractChip.hh"
44#include "Chip.hh"
45#include "Tester.hh"
46#include "SubBlock.hh"
47#include "Protocol.hh"
48#include "Map.hh"
49#include "interface.hh"
50//#include "XactCommitArbiter.hh"
51// #include "TransactionInterfaceManager.hh"
52//#include "TransactionVersionManager.hh"
53//#include "LazyTransactionVersionManager.hh"
50
55//#define XACT_MGR g_system_ptr->getChip(m_chip_ptr->getID())->getTransactionInterfaceManager(m_version)
56
51Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
52 m_chip_ptr = chip_ptr;
53 m_version = version;
54
55 m_deadlock_check_scheduled = false;
56 m_outstanding_count = 0;
57
58 int smt_threads = RubyConfig::numberofSMTThreads();
59 m_writeRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
60 m_readRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
61
62 for(int p=0; p < smt_threads; ++p){
63 m_writeRequestTable_ptr[p] = new Map<Address, CacheMsg>;
64 m_readRequestTable_ptr[p] = new Map<Address, CacheMsg>;
65 }
66
67}
68
69Sequencer::~Sequencer() {
70 int smt_threads = RubyConfig::numberofSMTThreads();
71 for(int i=0; i < smt_threads; ++i){
72 if(m_writeRequestTable_ptr[i]){
73 delete m_writeRequestTable_ptr[i];
74 }
75 if(m_readRequestTable_ptr[i]){
76 delete m_readRequestTable_ptr[i];
77 }
78 }
79 if(m_writeRequestTable_ptr){
80 delete [] m_writeRequestTable_ptr;
81 }
82 if(m_readRequestTable_ptr){
83 delete [] m_readRequestTable_ptr;
84 }
85}
86
87void Sequencer::wakeup() {
88 // Check for deadlock of any of the requests
89 Time current_time = g_eventQueue_ptr->getTime();
90 bool deadlock = false;
91
92 // Check across all outstanding requests
93 int smt_threads = RubyConfig::numberofSMTThreads();
94 int total_outstanding = 0;
95 for(int p=0; p < smt_threads; ++p){
96 Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
97 for (int i=0; i<keys.size(); i++) {
98 CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
99 if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
100 WARN_MSG("Possible Deadlock detected");
101 WARN_EXPR(request);
102 WARN_EXPR(m_chip_ptr->getID());
103 WARN_EXPR(m_version);
104 WARN_EXPR(keys.size());
105 WARN_EXPR(current_time);
106 WARN_EXPR(request.getTime());
107 WARN_EXPR(current_time - request.getTime());
108 WARN_EXPR(*m_readRequestTable_ptr[p]);
109 ERROR_MSG("Aborting");
110 deadlock = true;
111 }
112 }
113
114 keys = m_writeRequestTable_ptr[p]->keys();
115 for (int i=0; i<keys.size(); i++) {
116 CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
117 if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
118 WARN_MSG("Possible Deadlock detected");
119 WARN_EXPR(request);
120 WARN_EXPR(m_chip_ptr->getID());
121 WARN_EXPR(m_version);
122 WARN_EXPR(current_time);
123 WARN_EXPR(request.getTime());
124 WARN_EXPR(current_time - request.getTime());
125 WARN_EXPR(keys.size());
126 WARN_EXPR(*m_writeRequestTable_ptr[p]);
127 ERROR_MSG("Aborting");
128 deadlock = true;
129 }
130 }
131 total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
132 } // across all request tables
133 assert(m_outstanding_count == total_outstanding);
134
135 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
136 g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
137 } else {
138 m_deadlock_check_scheduled = false;
139 }
140}
141
142//returns the total number of requests
143int Sequencer::getNumberOutstanding(){
144 return m_outstanding_count;
145}
146
147// returns the total number of demand requests
148int Sequencer::getNumberOutstandingDemand(){
149 int smt_threads = RubyConfig::numberofSMTThreads();
150 int total_demand = 0;
151 for(int p=0; p < smt_threads; ++p){
152 Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
153 for (int i=0; i< keys.size(); i++) {
154 CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
161 // don't count transactional begin/commit requests
162 if(request.getType() != CacheRequestType_BEGIN_XACT && request.getType() != CacheRequestType_COMMIT_XACT){
163 if(request.getPrefetch() == PrefetchBit_No){
164 total_demand++;
165 }
155 if(request.getPrefetch() == PrefetchBit_No){
156 total_demand++;
157 }
158 }
159
160 keys = m_writeRequestTable_ptr[p]->keys();
161 for (int i=0; i< keys.size(); i++) {
162 CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
163 if(request.getPrefetch() == PrefetchBit_No){
164 total_demand++;
165 }
166 }
167 }
168
169 return total_demand;
170}
171
172int Sequencer::getNumberOutstandingPrefetch(){
173 int smt_threads = RubyConfig::numberofSMTThreads();
174 int total_prefetch = 0;
175 for(int p=0; p < smt_threads; ++p){
176 Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
177 for (int i=0; i< keys.size(); i++) {
178 CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
179 if(request.getPrefetch() == PrefetchBit_Yes){
180 total_prefetch++;
181 }
182 }
183
184 keys = m_writeRequestTable_ptr[p]->keys();
185 for (int i=0; i< keys.size(); i++) {
186 CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
187 if(request.getPrefetch() == PrefetchBit_Yes){
188 total_prefetch++;
189 }
190 }
191 }
192
193 return total_prefetch;
194}
195
196bool Sequencer::isPrefetchRequest(const Address & lineaddr){
197 int smt_threads = RubyConfig::numberofSMTThreads();
198 for(int p=0; p < smt_threads; ++p){
199 // check load requests
200 Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
201 for (int i=0; i< keys.size(); i++) {
202 CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
203 if(line_address(request.getAddress()) == lineaddr){
204 if(request.getPrefetch() == PrefetchBit_Yes){
205 return true;
206 }
207 else{
208 return false;
209 }
210 }
211 }
212
213 // check store requests
214 keys = m_writeRequestTable_ptr[p]->keys();
215 for (int i=0; i< keys.size(); i++) {
216 CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
217 if(line_address(request.getAddress()) == lineaddr){
218 if(request.getPrefetch() == PrefetchBit_Yes){
219 return true;
220 }
221 else{
222 return false;
223 }
224 }
225 }
226 }
227 // we should've found a matching request
228 cout << "isRequestPrefetch() ERROR request NOT FOUND : " << lineaddr << endl;
229 printProgress(cout);
230 assert(0);
231}
232
233AccessModeType Sequencer::getAccessModeOfRequest(Address addr, int thread){
234 if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
235 CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
236 return request.getAccessMode();
237 } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
238 CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
239 return request.getAccessMode();
240 } else {
241 printProgress(cout);
242 ERROR_MSG("Request not found in RequestTables");
243 }
244}
245
246Address Sequencer::getLogicalAddressOfRequest(Address addr, int thread){
247 assert(thread >= 0);
248 if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
249 CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
250 return request.getLogicalAddress();
251 } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
252 CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
253 return request.getLogicalAddress();
254 } else {
255 printProgress(cout);
256 WARN_MSG("Request not found in RequestTables");
257 WARN_MSG(addr);
258 WARN_MSG(thread);
259 ASSERT(0);
260 }
261}
262
263// returns the ThreadID of the request
264int Sequencer::getRequestThreadID(const Address & addr){
265 int smt_threads = RubyConfig::numberofSMTThreads();
266 int thread = -1;
267 int num_found = 0;
268 for(int p=0; p < smt_threads; ++p){
269 if(m_readRequestTable_ptr[p]->exist(addr)){
270 num_found++;
271 thread = p;
272 }
273 if(m_writeRequestTable_ptr[p]->exist(addr)){
274 num_found++;
275 thread = p;
276 }
277 }
278 if(num_found != 1){
279 cout << "getRequestThreadID ERROR too many matching requests addr = " << addr << endl;
280 printProgress(cout);
281 }
282 ASSERT(num_found == 1);
283 ASSERT(thread != -1);
284
285 return thread;
286}
287
288// given a line address, return the request's physical address
289Address Sequencer::getRequestPhysicalAddress(const Address & lineaddr){
290 int smt_threads = RubyConfig::numberofSMTThreads();
291 Address physaddr;
292 int num_found = 0;
293 for(int p=0; p < smt_threads; ++p){
294 if(m_readRequestTable_ptr[p]->exist(lineaddr)){
295 num_found++;
296 physaddr = (m_readRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
297 }
298 if(m_writeRequestTable_ptr[p]->exist(lineaddr)){
299 num_found++;
300 physaddr = (m_writeRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
301 }
302 }
303 if(num_found != 1){
304 cout << "getRequestPhysicalAddress ERROR too many matching requests addr = " << lineaddr << endl;
305 printProgress(cout);
306 }
307 ASSERT(num_found == 1);
308
309 return physaddr;
310}
311
312void Sequencer::printProgress(ostream& out) const{
313
314 int total_demand = 0;
315 out << "Sequencer Stats Version " << m_version << endl;
316 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
317 out << "---------------" << endl;
318 out << "outstanding requests" << endl;
319
320 int smt_threads = RubyConfig::numberofSMTThreads();
321 for(int p=0; p < smt_threads; ++p){
322 Vector<Address> rkeys = m_readRequestTable_ptr[p]->keys();
323 int read_size = rkeys.size();
324 out << "proc " << m_chip_ptr->getID() << " thread " << p << " Read Requests = " << read_size << endl;
325 // print the request table
326 for(int i=0; i < read_size; ++i){
327 CacheMsg & request = m_readRequestTable_ptr[p]->lookup(rkeys[i]);
328 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << rkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
329 if( request.getPrefetch() == PrefetchBit_No ){
330 total_demand++;
331 }
332 }
333
334 Vector<Address> wkeys = m_writeRequestTable_ptr[p]->keys();
335 int write_size = wkeys.size();
336 out << "proc " << m_chip_ptr->getID() << " thread " << p << " Write Requests = " << write_size << endl;
337 // print the request table
338 for(int i=0; i < write_size; ++i){
339 CacheMsg & request = m_writeRequestTable_ptr[p]->lookup(wkeys[i]);
340 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
341 if( request.getPrefetch() == PrefetchBit_No ){
342 total_demand++;
343 }
344 }
345
346 out << endl;
347 }
348 out << "Total Number Outstanding: " << m_outstanding_count << endl;
349 out << "Total Number Demand : " << total_demand << endl;
350 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
351 out << endl;
352 out << endl;
353
354}
355
356void Sequencer::printConfig(ostream& out) {
357 if (TSO) {
358 out << "sequencer: Sequencer - TSO" << endl;
359 } else {
360 out << "sequencer: Sequencer - SC" << endl;
361 }
362 out << " max_outstanding_requests: " << g_SEQUENCER_OUTSTANDING_REQUESTS << endl;
363}
364
365bool Sequencer::empty() const {
366 return m_outstanding_count == 0;
367}
368
369// Insert the request on the correct request table. Return true if
370// the entry was already present.
371bool Sequencer::insertRequest(const CacheMsg& request) {
372 int thread = request.getThreadID();
373 assert(thread >= 0);
374 int total_outstanding = 0;
375 int smt_threads = RubyConfig::numberofSMTThreads();
376 for(int p=0; p < smt_threads; ++p){
377 total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
378 }
379 assert(m_outstanding_count == total_outstanding);
380
381 // See if we should schedule a deadlock check
382 if (m_deadlock_check_scheduled == false) {
383 g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
384 m_deadlock_check_scheduled = true;
385 }
386
387 if ((request.getType() == CacheRequestType_ST) ||
397 (request.getType() == CacheRequestType_ST_XACT) ||
398 (request.getType() == CacheRequestType_LDX_XACT) ||
388 (request.getType() == CacheRequestType_ATOMIC)) {
389 if (m_writeRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
390 m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
391 return true;
392 }
393 m_writeRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
394 m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
395 m_outstanding_count++;
396 } else {
397 if (m_readRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
398 m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
399 return true;
400 }
401 m_readRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
402 m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
403 m_outstanding_count++;
404 }
405
406 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
407
408 total_outstanding = 0;
409 for(int p=0; p < smt_threads; ++p){
410 total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
411 }
412
413 assert(m_outstanding_count == total_outstanding);
414 return false;
415}
416
417void Sequencer::removeRequest(const CacheMsg& request) {
418 int thread = request.getThreadID();
419 assert(thread >= 0);
420 int total_outstanding = 0;
421 int smt_threads = RubyConfig::numberofSMTThreads();
422 for(int p=0; p < smt_threads; ++p){
423 total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
424 }
425 assert(m_outstanding_count == total_outstanding);
426
427 if ((request.getType() == CacheRequestType_ST) ||
439 (request.getType() == CacheRequestType_ST_XACT) ||
440 (request.getType() == CacheRequestType_LDX_XACT) ||
428 (request.getType() == CacheRequestType_ATOMIC)) {
429 m_writeRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
430 } else {
431 m_readRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
432 }
433 m_outstanding_count--;
434
435 total_outstanding = 0;
436 for(int p=0; p < smt_threads; ++p){
437 total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
438 }
439 assert(m_outstanding_count == total_outstanding);
440}
441
442void Sequencer::writeCallback(const Address& address) {
443 DataBlock data;
444 writeCallback(address, data);
445}
446
447void Sequencer::writeCallback(const Address& address, DataBlock& data) {
448 // process oldest thread first
449 int thread = -1;
450 Time oldest_time = 0;
451 int smt_threads = RubyConfig::numberofSMTThreads();
452 for(int t=0; t < smt_threads; ++t){
453 if(m_writeRequestTable_ptr[t]->exist(address)){
454 CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address);
455 if(thread == -1 || (request.getTime() < oldest_time) ){
456 thread = t;
457 oldest_time = request.getTime();
458 }
459 }
460 }
461 // make sure we found an oldest thread
462 ASSERT(thread != -1);
463
464 CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address);
465
466 writeCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
467}
468
469void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
470
471 assert(address == line_address(address));
472 assert(thread >= 0);
473 assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
474
475 writeCallback(address, data, respondingMach, thread);
476
477}
478
479void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
480 assert(address == line_address(address));
481 assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
482 CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
483 assert( request.getThreadID() == thread);
484 removeRequest(request);
485
486 assert((request.getType() == CacheRequestType_ST) ||
500 (request.getType() == CacheRequestType_ST_XACT) ||
501 (request.getType() == CacheRequestType_LDX_XACT) ||
487 (request.getType() == CacheRequestType_ATOMIC));
488
489 hitCallback(request, data, respondingMach, thread);
490
491}
492
493void Sequencer::readCallback(const Address& address) {
494 DataBlock data;
495 readCallback(address, data);
496}
497
498void Sequencer::readCallback(const Address& address, DataBlock& data) {
499 // process oldest thread first
500 int thread = -1;
501 Time oldest_time = 0;
502 int smt_threads = RubyConfig::numberofSMTThreads();
503 for(int t=0; t < smt_threads; ++t){
504 if(m_readRequestTable_ptr[t]->exist(address)){
505 CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address);
506 if(thread == -1 || (request.getTime() < oldest_time) ){
507 thread = t;
508 oldest_time = request.getTime();
509 }
510 }
511 }
512 // make sure we found an oldest thread
513 ASSERT(thread != -1);
514
515 CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address);
516
517 readCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
518}
519
520void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
521
522 assert(address == line_address(address));
523 assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
524
525 readCallback(address, data, respondingMach, thread);
526}
527
528void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
529 assert(address == line_address(address));
530 assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
531
532 CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
533 assert( request.getThreadID() == thread );
534 removeRequest(request);
535
536 assert((request.getType() == CacheRequestType_LD) ||
552 (request.getType() == CacheRequestType_LD_XACT) ||
537 (request.getType() == CacheRequestType_IFETCH)
538 );
539
540 hitCallback(request, data, respondingMach, thread);
541}
542
543void Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread) {
544 int size = request.getSize();
545 Address request_address = request.getAddress();
546 Address request_logical_address = request.getLogicalAddress();
547 Address request_line_address = line_address(request_address);
548 CacheRequestType type = request.getType();
549 int threadID = request.getThreadID();
550 Time issued_time = request.getTime();
551 int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID;
552
553 DEBUG_MSG(SEQUENCER_COMP, MedPrio, size);
554
555 // Set this cache entry to the most recently used
556 if (type == CacheRequestType_IFETCH) {
557 if (Protocol::m_TwoLevelCache) {
558 if (m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
559 m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->setMRU(request_line_address);
560 }
561 }
562 else {
563 if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
564 m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
565 }
566 }
567 } else {
568 if (Protocol::m_TwoLevelCache) {
569 if (m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
570 m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->setMRU(request_line_address);
571 }
572 }
573 else {
574 if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
575 m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
576 }
577 }
578 }
579
580 assert(g_eventQueue_ptr->getTime() >= issued_time);
581 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
582
583 if (PROTOCOL_DEBUG_TRACE) {
584 g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Done", "",
585 int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch()));
586 }
587
588 DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address);
589 DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch());
590 if (request.getPrefetch() == PrefetchBit_Yes) {
591 DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return");
592 g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach);
593 return; // Ignore the software prefetch, don't callback the driver
594 }
595
596 // Profile the miss latency for all non-zero demand misses
597 if (miss_latency != 0) {
598 g_system_ptr->getProfiler()->missLatency(miss_latency, type, respondingMach);
599
600#if 0
601 uinteger_t tick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick"));
602 uinteger_t tick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick_cmpr"));
603 uinteger_t stick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick"));
604 uinteger_t stick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick_cmpr"));
605 cout << "END PROC " << m_version << hex << " tick = " << tick << " tick_cmpr = " << tick_cmpr << " stick = " << stick << " stick_cmpr = " << stick_cmpr << " cycle = "<< g_eventQueue_ptr->getTime() << dec << endl;
606#endif
607
608 }
609
610 bool write =
611 (type == CacheRequestType_ST) ||
628 (type == CacheRequestType_ST_XACT) ||
629 (type == CacheRequestType_LDX_XACT) ||
612 (type == CacheRequestType_ATOMIC);
613
614 if (TSO && write) {
615 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data);
616 } else {
617
618 // Copy the correct bytes out of the cache line into the subblock
619 SubBlock subblock(request_address, request_logical_address, size);
620 subblock.mergeFrom(data); // copy the correct bytes from DataBlock in the SubBlock
621
622 // Scan the store buffer to see if there are any outstanding stores we need to collect
623 if (TSO) {
624 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->updateSubBlock(subblock);
625 }
626
627 // Call into the Driver (Tester or Simics) and let it read and/or modify the sub-block
628 g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID);
629
630 // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
631 // (This is only triggered for the non-TSO case)
632 if (write) {
633 assert(!TSO);
634 subblock.mergeTo(data); // copy the correct bytes from SubBlock into the DataBlock
635 }
636 }
637}
638
657void Sequencer::readConflictCallback(const Address& address) {
658 // process oldest thread first
659 int thread = -1;
660 Time oldest_time = 0;
661 int smt_threads = RubyConfig::numberofSMTThreads();
662 for(int t=0; t < smt_threads; ++t){
663 if(m_readRequestTable_ptr[t]->exist(address)){
664 CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address);
665 if(thread == -1 || (request.getTime() < oldest_time) ){
666 thread = t;
667 oldest_time = request.getTime();
668 }
669 }
670 }
671 // make sure we found an oldest thread
672 ASSERT(thread != -1);
673
674 CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address);
675
676 readConflictCallback(address, GenericMachineType_NULL, thread);
677}
678
679void Sequencer::readConflictCallback(const Address& address, GenericMachineType respondingMach, int thread) {
680 assert(address == line_address(address));
681 assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
682
683 CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
684 assert( request.getThreadID() == thread );
685 removeRequest(request);
686
687 assert((request.getType() == CacheRequestType_LD) ||
688 (request.getType() == CacheRequestType_LD_XACT) ||
689 (request.getType() == CacheRequestType_IFETCH)
690 );
691
692 conflictCallback(request, respondingMach, thread);
693}
694
695void Sequencer::writeConflictCallback(const Address& address) {
696 // process oldest thread first
697 int thread = -1;
698 Time oldest_time = 0;
699 int smt_threads = RubyConfig::numberofSMTThreads();
700 for(int t=0; t < smt_threads; ++t){
701 if(m_writeRequestTable_ptr[t]->exist(address)){
702 CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address);
703 if(thread == -1 || (request.getTime() < oldest_time) ){
704 thread = t;
705 oldest_time = request.getTime();
706 }
707 }
708 }
709 // make sure we found an oldest thread
710 ASSERT(thread != -1);
711
712 CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address);
713
714 writeConflictCallback(address, GenericMachineType_NULL, thread);
715}
716
717void Sequencer::writeConflictCallback(const Address& address, GenericMachineType respondingMach, int thread) {
718 assert(address == line_address(address));
719 assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
720 CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
721 assert( request.getThreadID() == thread);
722 removeRequest(request);
723
724 assert((request.getType() == CacheRequestType_ST) ||
725 (request.getType() == CacheRequestType_ST_XACT) ||
726 (request.getType() == CacheRequestType_LDX_XACT) ||
727 (request.getType() == CacheRequestType_ATOMIC));
728
729 conflictCallback(request, respondingMach, thread);
730
731}
732
733void Sequencer::conflictCallback(const CacheMsg& request, GenericMachineType respondingMach, int thread) {
734 assert(XACT_MEMORY);
735 int size = request.getSize();
736 Address request_address = request.getAddress();
737 Address request_logical_address = request.getLogicalAddress();
738 Address request_line_address = line_address(request_address);
739 CacheRequestType type = request.getType();
740 int threadID = request.getThreadID();
741 Time issued_time = request.getTime();
742 int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID;
743
744 DEBUG_MSG(SEQUENCER_COMP, MedPrio, size);
745
746 assert(g_eventQueue_ptr->getTime() >= issued_time);
747 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
748
749 if (PROTOCOL_DEBUG_TRACE) {
750 g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Conflict", "",
751 int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch()));
752 }
753
754 DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address);
755 DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch());
756 if (request.getPrefetch() == PrefetchBit_Yes) {
757 DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return");
758 g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach);
759 return; // Ignore the software prefetch, don't callback the driver
760 }
761
762 bool write =
763 (type == CacheRequestType_ST) ||
764 (type == CacheRequestType_ST_XACT) ||
765 (type == CacheRequestType_LDX_XACT) ||
766 (type == CacheRequestType_ATOMIC);
767
768 // Copy the correct bytes out of the cache line into the subblock
769 SubBlock subblock(request_address, request_logical_address, size);
770
771 // Call into the Driver (Tester or Simics)
772 g_system_ptr->getDriver()->conflictCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID);
773
774 // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
775 // (This is only triggered for the non-TSO case)
776 if (write) {
777 assert(!TSO);
778 }
779}
780
639void Sequencer::printDebug(){
640 //notify driver of debug
641 g_system_ptr->getDriver()->printDebug();
642}
643
644// Returns true if the sequencer already has a load or store outstanding
645bool
646Sequencer::isReady(const Packet* pkt) const
647{
648
649 int cpu_number = pkt->req->contextId();
650 la_t logical_addr = pkt->req->getVaddr();
651 pa_t physical_addr = pkt->req->getPaddr();
652 CacheRequestType type_of_request;
653 if ( pkt->req->isInstFetch() ) {
654 type_of_request = CacheRequestType_IFETCH;
655 } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
656 type_of_request = CacheRequestType_ATOMIC;
657 } else if ( pkt->isRead() ) {
658 type_of_request = CacheRequestType_LD;
659 } else if ( pkt->isWrite() ) {
660 type_of_request = CacheRequestType_ST;
661 } else {
662 assert(false);
663 }
664 int thread = pkt->req->threadId();
665
666 CacheMsg request(Address( physical_addr ),
667 Address( physical_addr ),
668 type_of_request,
669 Address(0),
670 AccessModeType_UserMode, // User/supervisor mode
671 0, // Size in bytes of request
672 PrefetchBit_No, // Not a prefetch
673 0, // Version number
674 Address(logical_addr), // Virtual Address
817 thread, // SMT thread
818 0, // TM specific - timestamp of memory request
819 false // TM specific - whether request is part of escape action
675 thread // SMT thread
676 );
677 isReady(request);
678}
679
680bool
681Sequencer::isReady(const CacheMsg& request) const
682{
683 if (m_outstanding_count >= g_SEQUENCER_OUTSTANDING_REQUESTS) {
684 //cout << "TOO MANY OUTSTANDING: " << m_outstanding_count << " " << g_SEQUENCER_OUTSTANDING_REQUESTS << " VER " << m_version << endl;
685 //printProgress(cout);
686 return false;
687 }
688
689 // This code allows reads to be performed even when we have a write
690 // request outstanding for the line
691 bool write =
692 (request.getType() == CacheRequestType_ST) ||
837 (request.getType() == CacheRequestType_ST_XACT) ||
838 (request.getType() == CacheRequestType_LDX_XACT) ||
693 (request.getType() == CacheRequestType_ATOMIC);
694
695 // LUKE - disallow more than one request type per address
696 // INVARIANT: at most one request type per address, per processor
697 int smt_threads = RubyConfig::numberofSMTThreads();
698 for(int p=0; p < smt_threads; ++p){
699 if( m_writeRequestTable_ptr[p]->exist(line_address(request.getAddress())) ||
700 m_readRequestTable_ptr[p]->exist(line_address(request.getAddress())) ){
701 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
702 //printProgress(cout);
703 return false;
704 }
705 }
706
707 if (TSO) {
708 return m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady();
709 }
710 return true;
711}
712
713// Called by Driver (Simics or Tester).
714void
715Sequencer::makeRequest(const Packet* pkt, void* data)
716{
717 int cpu_number = pkt->req->contextId();
718 la_t logical_addr = pkt->req->getVaddr();
719 pa_t physical_addr = pkt->req->getPaddr();
720 int request_size = pkt->getSize();
721 CacheRequestType type_of_request;
722 if ( pkt->req->isInstFetch() ) {
723 type_of_request = CacheRequestType_IFETCH;
724 } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
725 type_of_request = CacheRequestType_ATOMIC;
726 } else if ( pkt->isRead() ) {
727 type_of_request = CacheRequestType_LD;
728 } else if ( pkt->isWrite() ) {
729 type_of_request = CacheRequestType_ST;
730 } else {
731 assert(false);
732 }
733 la_t virtual_pc = pkt->req->getPC();
734 int isPriv = false; // TODO: get permission data
735 int thread = pkt->req->threadId();
736
737 AccessModeType access_mode = AccessModeType_UserMode; // TODO: get actual permission
738
739 CacheMsg request(Address( physical_addr ),
740 Address( physical_addr ),
741 type_of_request,
742 Address(virtual_pc),
743 access_mode, // User/supervisor mode
744 request_size, // Size in bytes of request
745 PrefetchBit_No, // Not a prefetch
746 0, // Version number
747 Address(logical_addr), // Virtual Address
894 thread, // SMT thread
895 0, // TM specific - timestamp of memory request
896 false // TM specific - whether request is part of escape action
748 thread // SMT thread
749 );
750 makeRequest(request);
751}
752
753void
754Sequencer::makeRequest(const CacheMsg& request)
755{
756 bool write = (request.getType() == CacheRequestType_ST) ||
905 (request.getType() == CacheRequestType_ST_XACT) ||
906 (request.getType() == CacheRequestType_LDX_XACT) ||
757 (request.getType() == CacheRequestType_ATOMIC);
758
759 if (TSO && (request.getPrefetch() == PrefetchBit_No) && write) {
760 assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady());
761 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(request);
762 return;
763 }
764
765 bool hit = doRequest(request);
766
767}
768
769bool Sequencer::doRequest(const CacheMsg& request) {
770 bool hit = false;
771 // Check the fast path
772 DataBlock* data_ptr;
773
774 int thread = request.getThreadID();
775
776 hit = tryCacheAccess(line_address(request.getAddress()),
777 request.getType(),
778 request.getProgramCounter(),
779 request.getAccessMode(),
780 request.getSize(),
781 data_ptr);
782
783 if (hit && (request.getType() == CacheRequestType_IFETCH || !REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) ) {
784 DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path hit");
785 hitCallback(request, *data_ptr, GenericMachineType_L1Cache, thread);
786 return true;
787 }
788
789#if 0
790 uinteger_t tick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick"));
791 uinteger_t tick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick_cmpr"));
792 uinteger_t stick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick"));
793 uinteger_t stick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick_cmpr"));
794 cout << "START PROC " << m_version << hex << " tick = " << tick << " tick_cmpr = " << tick_cmpr << " stick = " << stick << " stick_cmpr = " << stick_cmpr << " cycle = "<< g_eventQueue_ptr->getTime() << dec << endl;;
795#endif
796
797 if (TSO && (request.getType() == CacheRequestType_LD || request.getType() == CacheRequestType_IFETCH)) {
798
799 // See if we can satisfy the load entirely from the store buffer
800 SubBlock subblock(line_address(request.getAddress()), request.getSize());
801 if (m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->trySubBlock(subblock)) {
802 DataBlock dummy;
803 hitCallback(request, dummy, GenericMachineType_NULL, thread); // Call with an 'empty' datablock, since the data is in the store buffer
804 return true;
805 }
806 }
807
808 DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path miss");
809 issueRequest(request);
810 return hit;
811}
812
813void Sequencer::issueRequest(const CacheMsg& request) {
814 bool found = insertRequest(request);
815
816 if (!found) {
817 CacheMsg msg = request;
818 msg.getAddress() = line_address(request.getAddress()); // Make line address
819
820 // Fast Path L1 misses are profiled here - all non-fast path misses are profiled within the generated protocol code
821 if (!REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) {
822 g_system_ptr->getProfiler()->addPrimaryStatSample(msg, m_chip_ptr->getID());
823 }
824
825 if (PROTOCOL_DEBUG_TRACE) {
826 g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip() + m_version), -1, msg.getAddress(),"", "Begin", "", CacheRequestType_to_string(request.getType()));
827 }
828
829#if 0
830 // Commented out by nate binkert because I removed the trace stuff
831 if (g_system_ptr->getTracer()->traceEnabled()) {
832 g_system_ptr->getTracer()->traceRequest((m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), msg.getAddress(), msg.getProgramCounter(),
833 msg.getType(), g_eventQueue_ptr->getTime());
834 }
835#endif
836
837 Time latency = 0; // initialzed to an null value
838
839 latency = SEQUENCER_TO_CONTROLLER_LATENCY;
840
841 // Send the message to the cache controller
842 assert(latency > 0);
843 m_chip_ptr->m_L1Cache_mandatoryQueue_vec[m_version]->enqueue(msg, latency);
844
845 } // !found
846}
847
848bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
849 const Address& pc, AccessModeType access_mode,
850 int size, DataBlock*& data_ptr) {
851 if (type == CacheRequestType_IFETCH) {
852 if (Protocol::m_TwoLevelCache) {
853 return m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
854 }
855 else {
856 return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
857 }
858 } else {
859 if (Protocol::m_TwoLevelCache) {
860 return m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
861 }
862 else {
863 return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
864 }
865 }
866}
867
868void Sequencer::resetRequestTime(const Address& addr, int thread){
869 assert(thread >= 0);
870 //reset both load and store requests, if they exist
871 if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
872 CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
873 if( request.m_AccessMode != AccessModeType_UserMode){
874 cout << "resetRequestType ERROR read request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
875 printProgress(cout);
876 }
877 //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
878 request.setTime(g_eventQueue_ptr->getTime());
879 }
880 if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
881 CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
882 if( request.m_AccessMode != AccessModeType_UserMode){
883 cout << "resetRequestType ERROR write request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
884 printProgress(cout);
885 }
886 //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
887 request.setTime(g_eventQueue_ptr->getTime());
888 }
889}
890
891// removes load request from queue
892void Sequencer::removeLoadRequest(const Address & addr, int thread){
893 removeRequest(getReadRequest(addr, thread));
894}
895
896void Sequencer::removeStoreRequest(const Address & addr, int thread){
897 removeRequest(getWriteRequest(addr, thread));
898}
899
900// returns the read CacheMsg
901CacheMsg & Sequencer::getReadRequest( const Address & addr, int thread ){
902 Address temp = addr;
903 assert(thread >= 0);
904 assert(temp == line_address(temp));
905 assert(m_readRequestTable_ptr[thread]->exist(addr));
906 return m_readRequestTable_ptr[thread]->lookup(addr);
907}
908
909CacheMsg & Sequencer::getWriteRequest( const Address & addr, int thread){
910 Address temp = addr;
911 assert(thread >= 0);
912 assert(temp == line_address(temp));
913 assert(m_writeRequestTable_ptr[thread]->exist(addr));
914 return m_writeRequestTable_ptr[thread]->lookup(addr);
915}
916
917void Sequencer::print(ostream& out) const {
918 out << "[Sequencer: " << m_chip_ptr->getID()
919 << ", outstanding requests: " << m_outstanding_count;
920
921 int smt_threads = RubyConfig::numberofSMTThreads();
922 for(int p=0; p < smt_threads; ++p){
923 out << ", read request table[ " << p << " ]: " << *m_readRequestTable_ptr[p]
924 << ", write request table[ " << p << " ]: " << *m_writeRequestTable_ptr[p];
925 }
926 out << "]";
927}
928
929// this can be called from setState whenever coherence permissions are upgraded
930// when invoked, coherence violations will be checked for the given block
931void Sequencer::checkCoherence(const Address& addr) {
932#ifdef CHECK_COHERENCE
933 g_system_ptr->checkGlobalCoherenceInvariant(addr);
934#endif
935}
936
937bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
938 unsigned int size_in_bytes ) {
939 if(g_SIMICS){
940 for(unsigned int i=0; i < size_in_bytes; i++) {
941 value[i] = SIMICS_read_physical_memory( m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version,
942 addr.getAddress() + i, 1 );
943 }
944 return false; // Do nothing?
945 } else {
946 bool found = false;
947 const Address lineAddr = line_address(addr);
948 DataBlock data;
949 PhysAddress paddr(addr);
950 DataBlock* dataPtr = &data;
951 Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
952 // LUKE - use variable names instead of macros
953 assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
954 assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
955
956 MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
957 int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
958
959 if (Protocol::m_TwoLevelCache) {
960 if(Protocol::m_CMP){
961 assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
962 }
963 else{
964 assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
965 }
966 }
967
968 if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
969 n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
970 found = true;
971 } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
972 n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
973 found = true;
974 } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
975 n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
976 found = true;
977 // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
978 // ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
979 // L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
980
981 // int offset = addr.getOffset();
982 // for(int i=0; i<size_in_bytes; ++i){
983 // value[i] = tbeEntry.getDataBlk().getByte(offset + i);
984 // }
985
986 // found = true;
987 } else {
988 // Address not found
989 //cout << " " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
990 n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
991 int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
992 for(unsigned int i=0; i<size_in_bytes; ++i){
993 int offset = addr.getOffset();
994 value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
995 }
996 // Address not found
997 //WARN_MSG("Couldn't find address");
998 //WARN_EXPR(addr);
999 found = false;
1000 }
1001 return true;
1002 }
1003}
1004
1005bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
1006 unsigned int size_in_bytes) {
1007 char test_buffer[64];
1008
1009 if(g_SIMICS){
1010 return false; // Do nothing?
1011 } else {
1012 // idea here is that coherent cache should find the
1013 // latest data, the update it
1014 bool found = false;
1015 const Address lineAddr = line_address(addr);
1016 PhysAddress paddr(addr);
1017 DataBlock data;
1018 DataBlock* dataPtr = &data;
1019 Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
1020
1021 MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
1022 int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
1023 // LUKE - use variable names instead of macros
1024 //cout << "number of L2caches per chip = " << RubyConfig::numberOfL2CachePerChip(m_version) << endl;
1025 //cout << "L1I cache vec size = " << n->m_L1Cache_L1IcacheMemory_vec.size() << endl;
1026 //cout << "L1D cache vec size = " << n->m_L1Cache_L1DcacheMemory_vec.size() << endl;
1027 //cout << "L1cache_cachememory size = " << n->m_L1Cache_cacheMemory_vec.size() << endl;
1028 //cout << "L1cache_l2cachememory size = " << n->m_L1Cache_L2cacheMemory_vec.size() << endl;
1029 // if (Protocol::m_TwoLevelCache) {
1030 // if(Protocol::m_CMP){
1031 // cout << "CMP L2 cache vec size = " << n->m_L2Cache_L2cacheMemory_vec.size() << endl;
1032 // }
1033 // else{
1034 // cout << "L2 cache vec size = " << n->m_L1Cache_cacheMemory_vec.size() << endl;
1035 // }
1036 // }
1037
1038 assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
1039 assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
1040 if (Protocol::m_TwoLevelCache) {
1041 if(Protocol::m_CMP){
1042 assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
1043 }
1044 else{
1045 assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
1046 }
1047 }
1048
1049 if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
1050 n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
1051 found = true;
1052 } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
1053 n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
1054 found = true;
1055 } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
1056 n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
1057 found = true;
1058 // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isTagPresent(lineAddr)){
1059 // L1Cache_TBE& tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
1060 // DataBlock tmpData;
1061 // int offset = addr.getOffset();
1062 // for(int i=0; i<size_in_bytes; ++i){
1063 // tmpData.setByte(offset + i, value[i]);
1064 // }
1065 // tbeEntry.setDataBlk(tmpData);
1066 // tbeEntry.setDirty(true);
1067 } else {
1068 // Address not found
1069 n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
1070 int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
1071 for(unsigned int i=0; i<size_in_bytes; ++i){
1072 int offset = addr.getOffset();
1073 n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
1074 }
1075 found = false;
1076 }
1077
1078 if (found){
1079 found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
1080 assert(found);
1081 if(value[0] != test_buffer[0]){
1082 WARN_EXPR((int) value[0]);
1083 WARN_EXPR((int) test_buffer[0]);
1084 ERROR_MSG("setRubyMemoryValue failed to set value.");
1085 }
1086 }
1087
1088 return true;
1089 }
1090}