Sequencer.cc revision 8171:19444b1f092c
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "base/str.hh" 30#include "base/misc.hh" 31#include "cpu/testers/rubytest/RubyTester.hh" 32#include "mem/protocol/CacheMsg.hh" 33#include "mem/protocol/Protocol.hh" 34#include "mem/protocol/Protocol.hh" 35#include "mem/ruby/buffers/MessageBuffer.hh" 36#include "mem/ruby/common/Global.hh" 37#include "mem/ruby/common/SubBlock.hh" 38#include "mem/ruby/slicc_interface/RubyRequest.hh" 39#include "mem/ruby/profiler/Profiler.hh" 40#include "mem/ruby/recorder/Tracer.hh" 41#include "mem/ruby/slicc_interface/AbstractController.hh" 42#include "mem/ruby/system/CacheMemory.hh" 43#include "mem/ruby/system/Sequencer.hh" 44#include "mem/ruby/system/System.hh" 45#include "mem/packet.hh" 46#include "params/RubySequencer.hh" 47 48using namespace std; 49 50Sequencer * 51RubySequencerParams::create() 52{ 53 return new Sequencer(this); 54} 55 56Sequencer::Sequencer(const Params *p) 57 : RubyPort(p), deadlockCheckEvent(this) 58{ 59 m_store_waiting_on_load_cycles = 0; 60 m_store_waiting_on_store_cycles = 0; 61 m_load_waiting_on_store_cycles = 0; 62 m_load_waiting_on_load_cycles = 0; 63 64 m_outstanding_count = 0; 65 66 m_max_outstanding_requests = 0; 67 m_deadlock_threshold = 0; 68 m_instCache_ptr = NULL; 69 m_dataCache_ptr = NULL; 70 71 m_instCache_ptr = p->icache; 72 m_dataCache_ptr = p->dcache; 73 m_max_outstanding_requests = p->max_outstanding_requests; 74 m_deadlock_threshold = p->deadlock_threshold; 75 76 assert(m_max_outstanding_requests > 0); 77 assert(m_deadlock_threshold > 0); 78 assert(m_instCache_ptr != NULL); 79 assert(m_dataCache_ptr != NULL); 80 81 m_usingNetworkTester = p->using_network_tester; 82} 83 84Sequencer::~Sequencer() 85{ 86} 87 88void 89Sequencer::wakeup() 90{ 91 // Check for deadlock of any of the requests 92 Time current_time = g_eventQueue_ptr->getTime(); 93 94 // Check across all outstanding requests 95 int total_outstanding = 0; 96 97 RequestTable::iterator read = m_readRequestTable.begin(); 98 RequestTable::iterator read_end = m_readRequestTable.end(); 99 for (; read != read_end; ++read) { 100 SequencerRequest* request = read->second; 101 if (current_time - request->issue_time < m_deadlock_threshold) 102 continue; 103 104 panic("Possible Deadlock detected. Aborting!\n" 105 "version: %d request.paddr: 0x%x m_readRequestTable: %d " 106 "current time: %u issue_time: %d difference: %d\n", m_version, 107 request->ruby_request.paddr, m_readRequestTable.size(), 108 current_time, request->issue_time, 109 current_time - request->issue_time); 110 } 111 112 RequestTable::iterator write = m_writeRequestTable.begin(); 113 RequestTable::iterator write_end = m_writeRequestTable.end(); 114 for (; write != write_end; ++write) { 115 SequencerRequest* request = write->second; 116 if (current_time - request->issue_time < m_deadlock_threshold) 117 continue; 118 119 panic("Possible Deadlock detected. Aborting!\n" 120 "version: %d request.paddr: 0x%x m_writeRequestTable: %d " 121 "current time: %u issue_time: %d difference: %d\n", m_version, 122 request->ruby_request.paddr, m_writeRequestTable.size(), 123 current_time, request->issue_time, 124 current_time - request->issue_time); 125 } 126 127 total_outstanding += m_writeRequestTable.size(); 128 total_outstanding += m_readRequestTable.size(); 129 130 assert(m_outstanding_count == total_outstanding); 131 132 if (m_outstanding_count > 0) { 133 // If there are still outstanding requests, keep checking 134 schedule(deadlockCheckEvent, 135 m_deadlock_threshold * g_eventQueue_ptr->getClock() + 136 curTick()); 137 } 138} 139 140void 141Sequencer::printStats(ostream & out) const 142{ 143 out << "Sequencer: " << m_name << endl 144 << " store_waiting_on_load_cycles: " 145 << m_store_waiting_on_load_cycles << endl 146 << " store_waiting_on_store_cycles: " 147 << m_store_waiting_on_store_cycles << endl 148 << " load_waiting_on_load_cycles: " 149 << m_load_waiting_on_load_cycles << endl 150 << " load_waiting_on_store_cycles: " 151 << m_load_waiting_on_store_cycles << endl; 152} 153 154void 155Sequencer::printProgress(ostream& out) const 156{ 157#if 0 158 int total_demand = 0; 159 out << "Sequencer Stats Version " << m_version << endl; 160 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 161 out << "---------------" << endl; 162 out << "outstanding requests" << endl; 163 164 out << "proc " << m_Read 165 << " version Requests = " << m_readRequestTable.size() << endl; 166 167 // print the request table 168 RequestTable::iterator read = m_readRequestTable.begin(); 169 RequestTable::iterator read_end = m_readRequestTable.end(); 170 for (; read != read_end; ++read) { 171 SequencerRequest* request = read->second; 172 out << "\tRequest[ " << i << " ] = " << request->type 173 << " Address " << rkeys[i] 174 << " Posted " << request->issue_time 175 << " PF " << PrefetchBit_No << endl; 176 total_demand++; 177 } 178 179 out << "proc " << m_version 180 << " Write Requests = " << m_writeRequestTable.size << endl; 181 182 // print the request table 183 RequestTable::iterator write = m_writeRequestTable.begin(); 184 RequestTable::iterator write_end = m_writeRequestTable.end(); 185 for (; write != write_end; ++write) { 186 SequencerRequest* request = write->second; 187 out << "\tRequest[ " << i << " ] = " << request.getType() 188 << " Address " << wkeys[i] 189 << " Posted " << request.getTime() 190 << " PF " << request.getPrefetch() << endl; 191 if (request.getPrefetch() == PrefetchBit_No) { 192 total_demand++; 193 } 194 } 195 196 out << endl; 197 198 out << "Total Number Outstanding: " << m_outstanding_count << endl 199 << "Total Number Demand : " << total_demand << endl 200 << "Total Number Prefetches : " << m_outstanding_count - total_demand 201 << endl << endl << endl; 202#endif 203} 204 205void 206Sequencer::printConfig(ostream& out) const 207{ 208 out << "Seqeuncer config: " << m_name << endl 209 << " controller: " << m_controller->getName() << endl 210 << " version: " << m_version << endl 211 << " max_outstanding_requests: " << m_max_outstanding_requests << endl 212 << " deadlock_threshold: " << m_deadlock_threshold << endl; 213} 214 215// Insert the request on the correct request table. Return true if 216// the entry was already present. 217bool 218Sequencer::insertRequest(SequencerRequest* request) 219{ 220 int total_outstanding = 221 m_writeRequestTable.size() + m_readRequestTable.size(); 222 223 assert(m_outstanding_count == total_outstanding); 224 225 // See if we should schedule a deadlock check 226 if (deadlockCheckEvent.scheduled() == false) { 227 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick()); 228 } 229 230 Address line_addr(request->ruby_request.paddr); 231 line_addr.makeLineAddress(); 232 if ((request->ruby_request.type == RubyRequestType_ST) || 233 (request->ruby_request.type == RubyRequestType_RMW_Read) || 234 (request->ruby_request.type == RubyRequestType_RMW_Write) || 235 (request->ruby_request.type == RubyRequestType_Load_Linked) || 236 (request->ruby_request.type == RubyRequestType_Store_Conditional) || 237 (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) || 238 (request->ruby_request.type == RubyRequestType_Locked_RMW_Write)) { 239 pair<RequestTable::iterator, bool> r = 240 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0)); 241 bool success = r.second; 242 RequestTable::iterator i = r.first; 243 if (!success) { 244 i->second = request; 245 // return true; 246 247 // drh5: isn't this an error? do you lose the initial request? 248 assert(0); 249 } 250 i->second = request; 251 m_outstanding_count++; 252 } else { 253 pair<RequestTable::iterator, bool> r = 254 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0)); 255 bool success = r.second; 256 RequestTable::iterator i = r.first; 257 if (!success) { 258 i->second = request; 259 // return true; 260 261 // drh5: isn't this an error? do you lose the initial request? 262 assert(0); 263 } 264 i->second = request; 265 m_outstanding_count++; 266 } 267 268 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 269 270 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 271 assert(m_outstanding_count == total_outstanding); 272 273 return false; 274} 275 276void 277Sequencer::markRemoved() 278{ 279 m_outstanding_count--; 280 assert(m_outstanding_count == 281 m_writeRequestTable.size() + m_readRequestTable.size()); 282} 283 284void 285Sequencer::removeRequest(SequencerRequest* srequest) 286{ 287 assert(m_outstanding_count == 288 m_writeRequestTable.size() + m_readRequestTable.size()); 289 290 const RubyRequest & ruby_request = srequest->ruby_request; 291 Address line_addr(ruby_request.paddr); 292 line_addr.makeLineAddress(); 293 if ((ruby_request.type == RubyRequestType_ST) || 294 (ruby_request.type == RubyRequestType_RMW_Read) || 295 (ruby_request.type == RubyRequestType_RMW_Write) || 296 (ruby_request.type == RubyRequestType_Load_Linked) || 297 (ruby_request.type == RubyRequestType_Store_Conditional) || 298 (ruby_request.type == RubyRequestType_Locked_RMW_Read) || 299 (ruby_request.type == RubyRequestType_Locked_RMW_Write)) { 300 m_writeRequestTable.erase(line_addr); 301 } else { 302 m_readRequestTable.erase(line_addr); 303 } 304 305 markRemoved(); 306} 307 308bool 309Sequencer::handleLlsc(const Address& address, SequencerRequest* request) 310{ 311 // 312 // The success flag indicates whether the LLSC operation was successful. 313 // LL ops will always succeed, but SC may fail if the cache line is no 314 // longer locked. 315 // 316 bool success = true; 317 if (request->ruby_request.type == RubyRequestType_Store_Conditional) { 318 if (!m_dataCache_ptr->isLocked(address, m_version)) { 319 // 320 // For failed SC requests, indicate the failure to the cpu by 321 // setting the extra data to zero. 322 // 323 request->ruby_request.pkt->req->setExtraData(0); 324 success = false; 325 } else { 326 // 327 // For successful SC requests, indicate the success to the cpu by 328 // setting the extra data to one. 329 // 330 request->ruby_request.pkt->req->setExtraData(1); 331 } 332 // 333 // Independent of success, all SC operations must clear the lock 334 // 335 m_dataCache_ptr->clearLocked(address); 336 } else if (request->ruby_request.type == RubyRequestType_Load_Linked) { 337 // 338 // Note: To fully follow Alpha LLSC semantics, should the LL clear any 339 // previously locked cache lines? 340 // 341 m_dataCache_ptr->setLocked(address, m_version); 342 } else if (m_dataCache_ptr->isLocked(address, m_version)) { 343 // 344 // Normal writes should clear the locked address 345 // 346 m_dataCache_ptr->clearLocked(address); 347 } 348 return success; 349} 350 351void 352Sequencer::writeCallback(const Address& address, DataBlock& data) 353{ 354 writeCallback(address, GenericMachineType_NULL, data); 355} 356 357void 358Sequencer::writeCallback(const Address& address, 359 GenericMachineType mach, 360 DataBlock& data) 361{ 362 writeCallback(address, mach, data, 0, 0, 0); 363} 364 365void 366Sequencer::writeCallback(const Address& address, 367 GenericMachineType mach, 368 DataBlock& data, 369 Time initialRequestTime, 370 Time forwardRequestTime, 371 Time firstResponseTime) 372{ 373 assert(address == line_address(address)); 374 assert(m_writeRequestTable.count(line_address(address))); 375 376 RequestTable::iterator i = m_writeRequestTable.find(address); 377 assert(i != m_writeRequestTable.end()); 378 SequencerRequest* request = i->second; 379 380 m_writeRequestTable.erase(i); 381 markRemoved(); 382 383 assert((request->ruby_request.type == RubyRequestType_ST) || 384 (request->ruby_request.type == RubyRequestType_RMW_Read) || 385 (request->ruby_request.type == RubyRequestType_RMW_Write) || 386 (request->ruby_request.type == RubyRequestType_Load_Linked) || 387 (request->ruby_request.type == RubyRequestType_Store_Conditional) || 388 (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) || 389 (request->ruby_request.type == RubyRequestType_Locked_RMW_Write)); 390 391 // 392 // For Alpha, properly handle LL, SC, and write requests with respect to 393 // locked cache blocks. 394 // 395 // Not valid for Network_test protocl 396 // 397 bool success = true; 398 if(!m_usingNetworkTester) 399 success = handleLlsc(address, request); 400 401 if (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) { 402 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 403 } else if (request->ruby_request.type == RubyRequestType_Locked_RMW_Write) { 404 m_controller->unblock(address); 405 } 406 407 hitCallback(request, mach, data, success, 408 initialRequestTime, forwardRequestTime, firstResponseTime); 409} 410 411void 412Sequencer::readCallback(const Address& address, DataBlock& data) 413{ 414 readCallback(address, GenericMachineType_NULL, data); 415} 416 417void 418Sequencer::readCallback(const Address& address, 419 GenericMachineType mach, 420 DataBlock& data) 421{ 422 readCallback(address, mach, data, 0, 0, 0); 423} 424 425void 426Sequencer::readCallback(const Address& address, 427 GenericMachineType mach, 428 DataBlock& data, 429 Time initialRequestTime, 430 Time forwardRequestTime, 431 Time firstResponseTime) 432{ 433 assert(address == line_address(address)); 434 assert(m_readRequestTable.count(line_address(address))); 435 436 RequestTable::iterator i = m_readRequestTable.find(address); 437 assert(i != m_readRequestTable.end()); 438 SequencerRequest* request = i->second; 439 440 m_readRequestTable.erase(i); 441 markRemoved(); 442 443 assert((request->ruby_request.type == RubyRequestType_LD) || 444 (request->ruby_request.type == RubyRequestType_IFETCH)); 445 446 hitCallback(request, mach, data, true, 447 initialRequestTime, forwardRequestTime, firstResponseTime); 448} 449 450void 451Sequencer::hitCallback(SequencerRequest* srequest, 452 GenericMachineType mach, 453 DataBlock& data, 454 bool success, 455 Time initialRequestTime, 456 Time forwardRequestTime, 457 Time firstResponseTime) 458{ 459 const RubyRequest & ruby_request = srequest->ruby_request; 460 Address request_address(ruby_request.paddr); 461 Address request_line_address(ruby_request.paddr); 462 request_line_address.makeLineAddress(); 463 RubyRequestType type = ruby_request.type; 464 Time issued_time = srequest->issue_time; 465 466 // Set this cache entry to the most recently used 467 if (type == RubyRequestType_IFETCH) { 468 if (m_instCache_ptr->isTagPresent(request_line_address)) 469 m_instCache_ptr->setMRU(request_line_address); 470 } else { 471 if (m_dataCache_ptr->isTagPresent(request_line_address)) 472 m_dataCache_ptr->setMRU(request_line_address); 473 } 474 475 assert(g_eventQueue_ptr->getTime() >= issued_time); 476 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 477 478 // Profile the miss latency for all non-zero demand misses 479 if (miss_latency != 0) { 480 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach); 481 482 if (mach == GenericMachineType_L1Cache_wCC) { 483 g_system_ptr->getProfiler()->missLatencyWcc(issued_time, 484 initialRequestTime, 485 forwardRequestTime, 486 firstResponseTime, 487 g_eventQueue_ptr->getTime()); 488 } 489 490 if (mach == GenericMachineType_Directory) { 491 g_system_ptr->getProfiler()->missLatencyDir(issued_time, 492 initialRequestTime, 493 forwardRequestTime, 494 firstResponseTime, 495 g_eventQueue_ptr->getTime()); 496 } 497 498 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %d cycles\n", 499 g_eventQueue_ptr->getTime(), m_version, "Seq", 500 success ? "Done" : "SC_Failed", "", "", 501 Address(ruby_request.paddr), miss_latency); 502 } 503#if 0 504 if (request.getPrefetch() == PrefetchBit_Yes) { 505 return; // Ignore the prefetch 506 } 507#endif 508 509 // update the data 510 if (ruby_request.data != NULL) { 511 if ((type == RubyRequestType_LD) || 512 (type == RubyRequestType_IFETCH) || 513 (type == RubyRequestType_RMW_Read) || 514 (type == RubyRequestType_Locked_RMW_Read) || 515 (type == RubyRequestType_Load_Linked)) { 516 memcpy(ruby_request.data, 517 data.getData(request_address.getOffset(), ruby_request.len), 518 ruby_request.len); 519 } else { 520 data.setData(ruby_request.data, request_address.getOffset(), 521 ruby_request.len); 522 } 523 } else { 524 DPRINTF(MemoryAccess, 525 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 526 RubyRequestType_to_string(type)); 527 } 528 529 // If using the RubyTester, update the RubyTester sender state's 530 // subBlock with the recieved data. The tester will later access 531 // this state. 532 // Note: RubyPort will access it's sender state before the 533 // RubyTester. 534 if (m_usingRubyTester) { 535 RubyPort::SenderState *requestSenderState = 536 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState); 537 RubyTester::SenderState* testerSenderState = 538 safe_cast<RubyTester::SenderState*>(requestSenderState->saved); 539 testerSenderState->subBlock->mergeFrom(data); 540 } 541 542 ruby_hit_callback(ruby_request.pkt); 543 delete srequest; 544} 545 546// Returns true if the sequencer already has a load or store outstanding 547RequestStatus 548Sequencer::getRequestStatus(const RubyRequest& request) 549{ 550 bool is_outstanding_store = 551 !!m_writeRequestTable.count(line_address(Address(request.paddr))); 552 bool is_outstanding_load = 553 !!m_readRequestTable.count(line_address(Address(request.paddr))); 554 if (is_outstanding_store) { 555 if ((request.type == RubyRequestType_LD) || 556 (request.type == RubyRequestType_IFETCH) || 557 (request.type == RubyRequestType_RMW_Read)) { 558 m_store_waiting_on_load_cycles++; 559 } else { 560 m_store_waiting_on_store_cycles++; 561 } 562 return RequestStatus_Aliased; 563 } else if (is_outstanding_load) { 564 if ((request.type == RubyRequestType_ST) || 565 (request.type == RubyRequestType_RMW_Write)) { 566 m_load_waiting_on_store_cycles++; 567 } else { 568 m_load_waiting_on_load_cycles++; 569 } 570 return RequestStatus_Aliased; 571 } 572 573 if (m_outstanding_count >= m_max_outstanding_requests) { 574 return RequestStatus_BufferFull; 575 } 576 577 return RequestStatus_Ready; 578} 579 580bool 581Sequencer::empty() const 582{ 583 return m_writeRequestTable.empty() && m_readRequestTable.empty(); 584} 585 586RequestStatus 587Sequencer::makeRequest(const RubyRequest &request) 588{ 589 assert(Address(request.paddr).getOffset() + request.len <= 590 RubySystem::getBlockSizeBytes()); 591 RequestStatus status = getRequestStatus(request); 592 if (status != RequestStatus_Ready) 593 return status; 594 595 SequencerRequest *srequest = 596 new SequencerRequest(request, g_eventQueue_ptr->getTime()); 597 bool found = insertRequest(srequest); 598 if (found) { 599 panic("Sequencer::makeRequest should never be called if the " 600 "request is already outstanding\n"); 601 return RequestStatus_NULL; 602 } 603 604 issueRequest(request); 605 606 // TODO: issue hardware prefetches here 607 return RequestStatus_Issued; 608} 609 610void 611Sequencer::issueRequest(const RubyRequest& request) 612{ 613 // TODO: get rid of CacheMsg, RubyRequestType, and 614 // AccessModeTYpe, & have SLICC use RubyRequest and subtypes 615 // natively 616 RubyRequestType ctype; 617 switch(request.type) { 618 case RubyRequestType_IFETCH: 619 ctype = RubyRequestType_IFETCH; 620 break; 621 case RubyRequestType_LD: 622 ctype = RubyRequestType_LD; 623 break; 624 case RubyRequestType_ST: 625 case RubyRequestType_RMW_Read: 626 case RubyRequestType_RMW_Write: 627 // 628 // x86 locked instructions are translated to store cache coherence 629 // requests because these requests should always be treated as read 630 // exclusive operations and should leverage any migratory sharing 631 // optimization built into the protocol. 632 // 633 case RubyRequestType_Locked_RMW_Read: 634 case RubyRequestType_Locked_RMW_Write: 635 ctype = RubyRequestType_ST; 636 break; 637 // 638 // Alpha LL/SC instructions need to be handled carefully by the cache 639 // coherence protocol to ensure they follow the proper semantics. In 640 // particular, by identifying the operations as atomic, the protocol 641 // should understand that migratory sharing optimizations should not be 642 // performed (i.e. a load between the LL and SC should not steal away 643 // exclusive permission). 644 // 645 case RubyRequestType_Load_Linked: 646 case RubyRequestType_Store_Conditional: 647 ctype = RubyRequestType_ATOMIC; 648 break; 649 default: 650 assert(0); 651 } 652 653 RubyAccessMode amtype; 654 switch(request.access_mode){ 655 case RubyAccessMode_User: 656 amtype = RubyAccessMode_User; 657 break; 658 case RubyAccessMode_Supervisor: 659 amtype = RubyAccessMode_Supervisor; 660 break; 661 case RubyAccessMode_Device: 662 amtype = RubyAccessMode_User; 663 break; 664 default: 665 assert(0); 666 } 667 668 Address line_addr(request.paddr); 669 line_addr.makeLineAddress(); 670 CacheMsg *msg = new CacheMsg(line_addr, Address(request.paddr), ctype, 671 Address(request.pc), amtype, request.len, PrefetchBit_No, 672 request.proc_id); 673 674 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\n", 675 g_eventQueue_ptr->getTime(), m_version, "Seq", "Begin", "", "", 676 Address(request.paddr), RubyRequestType_to_string(request.type)); 677 678 Time latency = 0; // initialzed to an null value 679 680 if (request.type == RubyRequestType_IFETCH) 681 latency = m_instCache_ptr->getLatency(); 682 else 683 latency = m_dataCache_ptr->getLatency(); 684 685 // Send the message to the cache controller 686 assert(latency > 0); 687 688 assert(m_mandatory_q_ptr != NULL); 689 m_mandatory_q_ptr->enqueue(msg, latency); 690} 691 692#if 0 693bool 694Sequencer::tryCacheAccess(const Address& addr, RubyRequestType type, 695 RubyAccessMode access_mode, 696 int size, DataBlock*& data_ptr) 697{ 698 CacheMemory *cache = 699 (type == RubyRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr; 700 701 return cache->tryCacheAccess(line_address(addr), type, data_ptr); 702} 703#endif 704 705template <class KEY, class VALUE> 706std::ostream & 707operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) 708{ 709 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); 710 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); 711 712 out << "["; 713 for (; i != end; ++i) 714 out << " " << i->first << "=" << i->second; 715 out << " ]"; 716 717 return out; 718} 719 720void 721Sequencer::print(ostream& out) const 722{ 723 out << "[Sequencer: " << m_version 724 << ", outstanding requests: " << m_outstanding_count 725 << ", read request table: " << m_readRequestTable 726 << ", write request table: " << m_writeRequestTable 727 << "]"; 728} 729 730// this can be called from setState whenever coherence permissions are 731// upgraded when invoked, coherence violations will be checked for the 732// given block 733void 734Sequencer::checkCoherence(const Address& addr) 735{ 736#ifdef CHECK_COHERENCE 737 g_system_ptr->checkGlobalCoherenceInvariant(addr); 738#endif 739} 740