Sequencer.cc revision 7632:acf43d6bbc18
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "base/str.hh" 30#include "cpu/testers/rubytest/RubyTester.hh" 31#include "mem/protocol/CacheMsg.hh" 32#include "mem/protocol/Protocol.hh" 33#include "mem/protocol/Protocol.hh" 34#include "mem/ruby/buffers/MessageBuffer.hh" 35#include "mem/ruby/common/Global.hh" 36#include "mem/ruby/common/SubBlock.hh" 37#include "mem/ruby/libruby.hh" 38#include "mem/ruby/profiler/Profiler.hh" 39#include "mem/ruby/recorder/Tracer.hh" 40#include "mem/ruby/slicc_interface/AbstractController.hh" 41#include "mem/ruby/system/CacheMemory.hh" 42#include "mem/ruby/system/Sequencer.hh" 43#include "mem/ruby/system/System.hh" 44#include "mem/packet.hh" 45#include "params/RubySequencer.hh" 46 47using namespace std; 48 49Sequencer * 50RubySequencerParams::create() 51{ 52 return new Sequencer(this); 53} 54 55Sequencer::Sequencer(const Params *p) 56 : RubyPort(p), deadlockCheckEvent(this) 57{ 58 m_store_waiting_on_load_cycles = 0; 59 m_store_waiting_on_store_cycles = 0; 60 m_load_waiting_on_store_cycles = 0; 61 m_load_waiting_on_load_cycles = 0; 62 63 m_outstanding_count = 0; 64 65 m_max_outstanding_requests = 0; 66 m_deadlock_threshold = 0; 67 m_instCache_ptr = NULL; 68 m_dataCache_ptr = NULL; 69 70 m_instCache_ptr = p->icache; 71 m_dataCache_ptr = p->dcache; 72 m_max_outstanding_requests = p->max_outstanding_requests; 73 m_deadlock_threshold = p->deadlock_threshold; 74 m_usingRubyTester = p->using_ruby_tester; 75 76 assert(m_max_outstanding_requests > 0); 77 assert(m_deadlock_threshold > 0); 78 assert(m_instCache_ptr != NULL); 79 assert(m_dataCache_ptr != NULL); 80} 81 82Sequencer::~Sequencer() 83{ 84} 85 86void 87Sequencer::wakeup() 88{ 89 // Check for deadlock of any of the requests 90 Time current_time = g_eventQueue_ptr->getTime(); 91 92 // Check across all outstanding requests 93 int total_outstanding = 0; 94 95 RequestTable::iterator read = m_readRequestTable.begin(); 96 RequestTable::iterator read_end = m_readRequestTable.end(); 97 for (; read != read_end; ++read) { 98 SequencerRequest* request = read->second; 99 if (current_time - request->issue_time < m_deadlock_threshold) 100 continue; 101 102 WARN_MSG("Possible Deadlock detected"); 103 WARN_EXPR(m_version); 104 WARN_EXPR(request->ruby_request.paddr); 105 WARN_EXPR(m_readRequestTable.size()); 106 WARN_EXPR(current_time); 107 WARN_EXPR(request->issue_time); 108 WARN_EXPR(current_time - request->issue_time); 109 ERROR_MSG("Aborting"); 110 } 111 112 RequestTable::iterator write = m_writeRequestTable.begin(); 113 RequestTable::iterator write_end = m_writeRequestTable.end(); 114 for (; write != write_end; ++write) { 115 SequencerRequest* request = write->second; 116 if (current_time - request->issue_time < m_deadlock_threshold) 117 continue; 118 119 WARN_MSG("Possible Deadlock detected"); 120 WARN_EXPR(m_version); 121 WARN_EXPR(request->ruby_request.paddr); 122 WARN_EXPR(current_time); 123 WARN_EXPR(request->issue_time); 124 WARN_EXPR(current_time - request->issue_time); 125 WARN_EXPR(m_writeRequestTable.size()); 126 ERROR_MSG("Aborting"); 127 } 128 129 total_outstanding += m_writeRequestTable.size(); 130 total_outstanding += m_readRequestTable.size(); 131 132 assert(m_outstanding_count == total_outstanding); 133 134 if (m_outstanding_count > 0) { 135 // If there are still outstanding requests, keep checking 136 schedule(deadlockCheckEvent, 137 m_deadlock_threshold * g_eventQueue_ptr->getClock() + 138 curTick); 139 } 140} 141 142void 143Sequencer::printStats(ostream & out) const 144{ 145 out << "Sequencer: " << m_name << endl 146 << " store_waiting_on_load_cycles: " 147 << m_store_waiting_on_load_cycles << endl 148 << " store_waiting_on_store_cycles: " 149 << m_store_waiting_on_store_cycles << endl 150 << " load_waiting_on_load_cycles: " 151 << m_load_waiting_on_load_cycles << endl 152 << " load_waiting_on_store_cycles: " 153 << m_load_waiting_on_store_cycles << endl; 154} 155 156void 157Sequencer::printProgress(ostream& out) const 158{ 159#if 0 160 int total_demand = 0; 161 out << "Sequencer Stats Version " << m_version << endl; 162 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 163 out << "---------------" << endl; 164 out << "outstanding requests" << endl; 165 166 out << "proc " << m_Read 167 << " version Requests = " << m_readRequestTable.size() << endl; 168 169 // print the request table 170 RequestTable::iterator read = m_readRequestTable.begin(); 171 RequestTable::iterator read_end = m_readRequestTable.end(); 172 for (; read != read_end; ++read) { 173 SequencerRequest* request = read->second; 174 out << "\tRequest[ " << i << " ] = " << request->type 175 << " Address " << rkeys[i] 176 << " Posted " << request->issue_time 177 << " PF " << PrefetchBit_No << endl; 178 total_demand++; 179 } 180 181 out << "proc " << m_version 182 << " Write Requests = " << m_writeRequestTable.size << endl; 183 184 // print the request table 185 RequestTable::iterator write = m_writeRequestTable.begin(); 186 RequestTable::iterator write_end = m_writeRequestTable.end(); 187 for (; write != write_end; ++write) { 188 SequencerRequest* request = write->second; 189 out << "\tRequest[ " << i << " ] = " << request.getType() 190 << " Address " << wkeys[i] 191 << " Posted " << request.getTime() 192 << " PF " << request.getPrefetch() << endl; 193 if (request.getPrefetch() == PrefetchBit_No) { 194 total_demand++; 195 } 196 } 197 198 out << endl; 199 200 out << "Total Number Outstanding: " << m_outstanding_count << endl 201 << "Total Number Demand : " << total_demand << endl 202 << "Total Number Prefetches : " << m_outstanding_count - total_demand 203 << endl << endl << endl; 204#endif 205} 206 207void 208Sequencer::printConfig(ostream& out) const 209{ 210 out << "Seqeuncer config: " << m_name << endl 211 << " controller: " << m_controller->getName() << endl 212 << " version: " << m_version << endl 213 << " max_outstanding_requests: " << m_max_outstanding_requests << endl 214 << " deadlock_threshold: " << m_deadlock_threshold << endl; 215} 216 217// Insert the request on the correct request table. Return true if 218// the entry was already present. 219bool 220Sequencer::insertRequest(SequencerRequest* request) 221{ 222 int total_outstanding = 223 m_writeRequestTable.size() + m_readRequestTable.size(); 224 225 assert(m_outstanding_count == total_outstanding); 226 227 // See if we should schedule a deadlock check 228 if (deadlockCheckEvent.scheduled() == false) { 229 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick); 230 } 231 232 Address line_addr(request->ruby_request.paddr); 233 line_addr.makeLineAddress(); 234 if ((request->ruby_request.type == RubyRequestType_ST) || 235 (request->ruby_request.type == RubyRequestType_RMW_Read) || 236 (request->ruby_request.type == RubyRequestType_RMW_Write) || 237 (request->ruby_request.type == RubyRequestType_Locked_Read) || 238 (request->ruby_request.type == RubyRequestType_Locked_Write)) { 239 pair<RequestTable::iterator, bool> r = 240 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0)); 241 bool success = r.second; 242 RequestTable::iterator i = r.first; 243 if (!success) { 244 i->second = request; 245 // return true; 246 247 // drh5: isn't this an error? do you lose the initial request? 248 assert(0); 249 } 250 i->second = request; 251 m_outstanding_count++; 252 } else { 253 pair<RequestTable::iterator, bool> r = 254 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0)); 255 bool success = r.second; 256 RequestTable::iterator i = r.first; 257 if (!success) { 258 i->second = request; 259 // return true; 260 261 // drh5: isn't this an error? do you lose the initial request? 262 assert(0); 263 } 264 i->second = request; 265 m_outstanding_count++; 266 } 267 268 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 269 270 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 271 assert(m_outstanding_count == total_outstanding); 272 273 return false; 274} 275 276void 277Sequencer::markRemoved() 278{ 279 m_outstanding_count--; 280 assert(m_outstanding_count == 281 m_writeRequestTable.size() + m_readRequestTable.size()); 282} 283 284void 285Sequencer::removeRequest(SequencerRequest* srequest) 286{ 287 assert(m_outstanding_count == 288 m_writeRequestTable.size() + m_readRequestTable.size()); 289 290 const RubyRequest & ruby_request = srequest->ruby_request; 291 Address line_addr(ruby_request.paddr); 292 line_addr.makeLineAddress(); 293 if ((ruby_request.type == RubyRequestType_ST) || 294 (ruby_request.type == RubyRequestType_RMW_Read) || 295 (ruby_request.type == RubyRequestType_RMW_Write) || 296 (ruby_request.type == RubyRequestType_Locked_Read) || 297 (ruby_request.type == RubyRequestType_Locked_Write)) { 298 m_writeRequestTable.erase(line_addr); 299 } else { 300 m_readRequestTable.erase(line_addr); 301 } 302 303 markRemoved(); 304} 305 306bool 307Sequencer::handleLlsc(const Address& address, SequencerRequest* request) 308{ 309 // 310 // The success flag indicates whether the LLSC operation was successful. 311 // LL ops will always succeed, but SC may fail if the cache line is no 312 // longer locked. 313 // 314 bool success = true; 315 if (request->ruby_request.type == RubyRequestType_Locked_Write) { 316 if (!m_dataCache_ptr->isLocked(address, m_version)) { 317 // 318 // For failed SC requests, indicate the failure to the cpu by 319 // setting the extra data to zero. 320 // 321 request->ruby_request.pkt->req->setExtraData(0); 322 success = false; 323 } else { 324 // 325 // For successful SC requests, indicate the success to the cpu by 326 // setting the extra data to one. 327 // 328 request->ruby_request.pkt->req->setExtraData(1); 329 } 330 // 331 // Independent of success, all SC operations must clear the lock 332 // 333 m_dataCache_ptr->clearLocked(address); 334 } else if (request->ruby_request.type == RubyRequestType_Locked_Read) { 335 // 336 // Note: To fully follow Alpha LLSC semantics, should the LL clear any 337 // previously locked cache lines? 338 // 339 m_dataCache_ptr->setLocked(address, m_version); 340 } else if (m_dataCache_ptr->isLocked(address, m_version)) { 341 // 342 // Normal writes should clear the locked address 343 // 344 m_dataCache_ptr->clearLocked(address); 345 } 346 return success; 347} 348 349void 350Sequencer::writeCallback(const Address& address, DataBlock& data) 351{ 352 writeCallback(address, GenericMachineType_NULL, data); 353} 354 355void 356Sequencer::writeCallback(const Address& address, 357 GenericMachineType mach, 358 DataBlock& data) 359{ 360 writeCallback(address, mach, data, 0, 0, 0); 361} 362 363void 364Sequencer::writeCallback(const Address& address, 365 GenericMachineType mach, 366 DataBlock& data, 367 Time initialRequestTime, 368 Time forwardRequestTime, 369 Time firstResponseTime) 370{ 371 assert(address == line_address(address)); 372 assert(m_writeRequestTable.count(line_address(address))); 373 374 RequestTable::iterator i = m_writeRequestTable.find(address); 375 assert(i != m_writeRequestTable.end()); 376 SequencerRequest* request = i->second; 377 378 m_writeRequestTable.erase(i); 379 markRemoved(); 380 381 assert((request->ruby_request.type == RubyRequestType_ST) || 382 (request->ruby_request.type == RubyRequestType_RMW_Read) || 383 (request->ruby_request.type == RubyRequestType_RMW_Write) || 384 (request->ruby_request.type == RubyRequestType_Locked_Read) || 385 (request->ruby_request.type == RubyRequestType_Locked_Write)); 386 387 // 388 // For Alpha, properly handle LL, SC, and write requests with respect to 389 // locked cache blocks. 390 // 391 bool success = handleLlsc(address, request); 392 393 if (request->ruby_request.type == RubyRequestType_RMW_Read) { 394 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 395 } else if (request->ruby_request.type == RubyRequestType_RMW_Write) { 396 m_controller->unblock(address); 397 } 398 399 hitCallback(request, mach, data, success, 400 initialRequestTime, forwardRequestTime, firstResponseTime); 401} 402 403void 404Sequencer::readCallback(const Address& address, DataBlock& data) 405{ 406 readCallback(address, GenericMachineType_NULL, data); 407} 408 409void 410Sequencer::readCallback(const Address& address, 411 GenericMachineType mach, 412 DataBlock& data) 413{ 414 readCallback(address, mach, data, 0, 0, 0); 415} 416 417void 418Sequencer::readCallback(const Address& address, 419 GenericMachineType mach, 420 DataBlock& data, 421 Time initialRequestTime, 422 Time forwardRequestTime, 423 Time firstResponseTime) 424{ 425 assert(address == line_address(address)); 426 assert(m_readRequestTable.count(line_address(address))); 427 428 RequestTable::iterator i = m_readRequestTable.find(address); 429 assert(i != m_readRequestTable.end()); 430 SequencerRequest* request = i->second; 431 432 m_readRequestTable.erase(i); 433 markRemoved(); 434 435 assert((request->ruby_request.type == RubyRequestType_LD) || 436 (request->ruby_request.type == RubyRequestType_RMW_Read) || 437 (request->ruby_request.type == RubyRequestType_IFETCH)); 438 439 hitCallback(request, mach, data, true, 440 initialRequestTime, forwardRequestTime, firstResponseTime); 441} 442 443void 444Sequencer::hitCallback(SequencerRequest* srequest, 445 GenericMachineType mach, 446 DataBlock& data, 447 bool success, 448 Time initialRequestTime, 449 Time forwardRequestTime, 450 Time firstResponseTime) 451{ 452 const RubyRequest & ruby_request = srequest->ruby_request; 453 Address request_address(ruby_request.paddr); 454 Address request_line_address(ruby_request.paddr); 455 request_line_address.makeLineAddress(); 456 RubyRequestType type = ruby_request.type; 457 Time issued_time = srequest->issue_time; 458 459 // Set this cache entry to the most recently used 460 if (type == RubyRequestType_IFETCH) { 461 if (m_instCache_ptr->isTagPresent(request_line_address)) 462 m_instCache_ptr->setMRU(request_line_address); 463 } else { 464 if (m_dataCache_ptr->isTagPresent(request_line_address)) 465 m_dataCache_ptr->setMRU(request_line_address); 466 } 467 468 assert(g_eventQueue_ptr->getTime() >= issued_time); 469 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 470 471 // Profile the miss latency for all non-zero demand misses 472 if (miss_latency != 0) { 473 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach); 474 475 if (mach == GenericMachineType_L1Cache_wCC) { 476 g_system_ptr->getProfiler()->missLatencyWcc(issued_time, 477 initialRequestTime, 478 forwardRequestTime, 479 firstResponseTime, 480 g_eventQueue_ptr->getTime()); 481 } 482 483 if (mach == GenericMachineType_Directory) { 484 g_system_ptr->getProfiler()->missLatencyDir(issued_time, 485 initialRequestTime, 486 forwardRequestTime, 487 firstResponseTime, 488 g_eventQueue_ptr->getTime()); 489 } 490 491 if (Debug::getProtocolTrace()) { 492 if (success) { 493 g_system_ptr->getProfiler()-> 494 profileTransition("Seq", m_version, 495 Address(ruby_request.paddr), "", "Done", "", 496 csprintf("%d cycles", miss_latency)); 497 } else { 498 g_system_ptr->getProfiler()-> 499 profileTransition("Seq", m_version, 500 Address(ruby_request.paddr), "", "SC_Failed", "", 501 csprintf("%d cycles", miss_latency)); 502 } 503 } 504 } 505#if 0 506 if (request.getPrefetch() == PrefetchBit_Yes) { 507 return; // Ignore the prefetch 508 } 509#endif 510 511 // update the data 512 if (ruby_request.data != NULL) { 513 if ((type == RubyRequestType_LD) || 514 (type == RubyRequestType_IFETCH) || 515 (type == RubyRequestType_RMW_Read) || 516 (type == RubyRequestType_Locked_Read)) { 517 518 memcpy(ruby_request.data, 519 data.getData(request_address.getOffset(), ruby_request.len), 520 ruby_request.len); 521 } else { 522 data.setData(ruby_request.data, request_address.getOffset(), 523 ruby_request.len); 524 } 525 } else { 526 DPRINTF(MemoryAccess, 527 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 528 RubyRequestType_to_string(type)); 529 } 530 531 // If using the RubyTester, update the RubyTester sender state's 532 // subBlock with the recieved data. The tester will later access 533 // this state. 534 // Note: RubyPort will access it's sender state before the 535 // RubyTester. 536 if (m_usingRubyTester) { 537 RubyPort::SenderState *requestSenderState = 538 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState); 539 RubyTester::SenderState* testerSenderState = 540 safe_cast<RubyTester::SenderState*>(requestSenderState->saved); 541 testerSenderState->subBlock->mergeFrom(data); 542 } 543 544 ruby_hit_callback(ruby_request.pkt); 545 delete srequest; 546} 547 548// Returns true if the sequencer already has a load or store outstanding 549RequestStatus 550Sequencer::getRequestStatus(const RubyRequest& request) 551{ 552 bool is_outstanding_store = 553 !!m_writeRequestTable.count(line_address(Address(request.paddr))); 554 bool is_outstanding_load = 555 !!m_readRequestTable.count(line_address(Address(request.paddr))); 556 if (is_outstanding_store) { 557 if ((request.type == RubyRequestType_LD) || 558 (request.type == RubyRequestType_IFETCH) || 559 (request.type == RubyRequestType_RMW_Read)) { 560 m_store_waiting_on_load_cycles++; 561 } else { 562 m_store_waiting_on_store_cycles++; 563 } 564 return RequestStatus_Aliased; 565 } else if (is_outstanding_load) { 566 if ((request.type == RubyRequestType_ST) || 567 (request.type == RubyRequestType_RMW_Write)) { 568 m_load_waiting_on_store_cycles++; 569 } else { 570 m_load_waiting_on_load_cycles++; 571 } 572 return RequestStatus_Aliased; 573 } 574 575 if (m_outstanding_count >= m_max_outstanding_requests) { 576 return RequestStatus_BufferFull; 577 } 578 579 return RequestStatus_Ready; 580} 581 582bool 583Sequencer::empty() const 584{ 585 return m_writeRequestTable.empty() && m_readRequestTable.empty(); 586} 587 588RequestStatus 589Sequencer::makeRequest(const RubyRequest &request) 590{ 591 assert(Address(request.paddr).getOffset() + request.len <= 592 RubySystem::getBlockSizeBytes()); 593 RequestStatus status = getRequestStatus(request); 594 if (status != RequestStatus_Ready) 595 return status; 596 597 SequencerRequest *srequest = 598 new SequencerRequest(request, g_eventQueue_ptr->getTime()); 599 bool found = insertRequest(srequest); 600 if (found) { 601 panic("Sequencer::makeRequest should never be called if the " 602 "request is already outstanding\n"); 603 return RequestStatus_NULL; 604 } 605 606 issueRequest(request); 607 608 // TODO: issue hardware prefetches here 609 return RequestStatus_Issued; 610} 611 612void 613Sequencer::issueRequest(const RubyRequest& request) 614{ 615 // TODO: get rid of CacheMsg, CacheRequestType, and 616 // AccessModeTYpe, & have SLICC use RubyRequest and subtypes 617 // natively 618 CacheRequestType ctype; 619 switch(request.type) { 620 case RubyRequestType_IFETCH: 621 ctype = CacheRequestType_IFETCH; 622 break; 623 case RubyRequestType_LD: 624 ctype = CacheRequestType_LD; 625 break; 626 case RubyRequestType_ST: 627 ctype = CacheRequestType_ST; 628 break; 629 case RubyRequestType_Locked_Read: 630 case RubyRequestType_Locked_Write: 631 ctype = CacheRequestType_ATOMIC; 632 break; 633 case RubyRequestType_RMW_Read: 634 ctype = CacheRequestType_ATOMIC; 635 break; 636 case RubyRequestType_RMW_Write: 637 ctype = CacheRequestType_ATOMIC; 638 break; 639 default: 640 assert(0); 641 } 642 643 AccessModeType amtype; 644 switch(request.access_mode){ 645 case RubyAccessMode_User: 646 amtype = AccessModeType_UserMode; 647 break; 648 case RubyAccessMode_Supervisor: 649 amtype = AccessModeType_SupervisorMode; 650 break; 651 case RubyAccessMode_Device: 652 amtype = AccessModeType_UserMode; 653 break; 654 default: 655 assert(0); 656 } 657 658 Address line_addr(request.paddr); 659 line_addr.makeLineAddress(); 660 CacheMsg *msg = new CacheMsg(line_addr, Address(request.paddr), ctype, 661 Address(request.pc), amtype, request.len, PrefetchBit_No, 662 request.proc_id); 663 664 if (Debug::getProtocolTrace()) { 665 g_system_ptr->getProfiler()-> 666 profileTransition("Seq", m_version, Address(request.paddr), 667 "", "Begin", "", 668 RubyRequestType_to_string(request.type)); 669 } 670 671 if (g_system_ptr->getTracer()->traceEnabled()) { 672 g_system_ptr->getTracer()-> 673 traceRequest(this, line_addr, Address(request.pc), 674 request.type, g_eventQueue_ptr->getTime()); 675 } 676 677 Time latency = 0; // initialzed to an null value 678 679 if (request.type == RubyRequestType_IFETCH) 680 latency = m_instCache_ptr->getLatency(); 681 else 682 latency = m_dataCache_ptr->getLatency(); 683 684 // Send the message to the cache controller 685 assert(latency > 0); 686 687 assert(m_mandatory_q_ptr != NULL); 688 m_mandatory_q_ptr->enqueue(msg, latency); 689} 690 691#if 0 692bool 693Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type, 694 AccessModeType access_mode, 695 int size, DataBlock*& data_ptr) 696{ 697 CacheMemory *cache = 698 (type == CacheRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr; 699 700 return cache->tryCacheAccess(line_address(addr), type, data_ptr); 701} 702#endif 703 704template <class KEY, class VALUE> 705std::ostream & 706operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) 707{ 708 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); 709 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); 710 711 out << "["; 712 for (; i != end; ++i) 713 out << " " << i->first << "=" << i->second; 714 out << " ]"; 715 716 return out; 717} 718 719void 720Sequencer::print(ostream& out) const 721{ 722 out << "[Sequencer: " << m_version 723 << ", outstanding requests: " << m_outstanding_count 724 << ", read request table: " << m_readRequestTable 725 << ", write request table: " << m_writeRequestTable 726 << "]"; 727} 728 729// this can be called from setState whenever coherence permissions are 730// upgraded when invoked, coherence violations will be checked for the 731// given block 732void 733Sequencer::checkCoherence(const Address& addr) 734{ 735#ifdef CHECK_COHERENCE 736 g_system_ptr->checkGlobalCoherenceInvariant(addr); 737#endif 738} 739