Sequencer.cc revision 9245
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "base/misc.hh" 30#include "base/str.hh" 31#include "config/the_isa.hh" 32#if THE_ISA == X86_ISA 33#include "arch/x86/insts/microldstop.hh" 34#endif // X86_ISA 35#include "cpu/testers/rubytest/RubyTester.hh" 36#include "debug/MemoryAccess.hh" 37#include "debug/ProtocolTrace.hh" 38#include "debug/RubySequencer.hh" 39#include "debug/RubyStats.hh" 40#include "mem/protocol/PrefetchBit.hh" 41#include "mem/protocol/RubyAccessMode.hh" 42#include "mem/ruby/buffers/MessageBuffer.hh" 43#include "mem/ruby/common/Global.hh" 44#include "mem/ruby/profiler/Profiler.hh" 45#include "mem/ruby/slicc_interface/RubyRequest.hh" 46#include "mem/ruby/system/Sequencer.hh" 47#include "mem/ruby/system/System.hh" 48#include "mem/packet.hh" 49 50using namespace std; 51 52Sequencer * 53RubySequencerParams::create() 54{ 55 return new Sequencer(this); 56} 57 58Sequencer::Sequencer(const Params *p) 59 : RubyPort(p), deadlockCheckEvent(this) 60{ 61 m_store_waiting_on_load_cycles = 0; 62 m_store_waiting_on_store_cycles = 0; 63 m_load_waiting_on_store_cycles = 0; 64 m_load_waiting_on_load_cycles = 0; 65 66 m_outstanding_count = 0; 67 68 m_instCache_ptr = p->icache; 69 m_dataCache_ptr = p->dcache; 70 m_max_outstanding_requests = p->max_outstanding_requests; 71 m_deadlock_threshold = p->deadlock_threshold; 72 73 assert(m_max_outstanding_requests > 0); 74 assert(m_deadlock_threshold > 0); 75 assert(m_instCache_ptr != NULL); 76 assert(m_dataCache_ptr != NULL); 77 78 m_usingNetworkTester = p->using_network_tester; 79} 80 81Sequencer::~Sequencer() 82{ 83} 84 85void 86Sequencer::wakeup() 87{ 88 assert(getState() != SimObject::Draining); 89 90 // Check for deadlock of any of the requests 91 Time current_time = g_system_ptr->getTime(); 92 93 // Check across all outstanding requests 94 int total_outstanding = 0; 95 96 RequestTable::iterator read = m_readRequestTable.begin(); 97 RequestTable::iterator read_end = m_readRequestTable.end(); 98 for (; read != read_end; ++read) { 99 SequencerRequest* request = read->second; 100 if (current_time - request->issue_time < m_deadlock_threshold) 101 continue; 102 103 panic("Possible Deadlock detected. Aborting!\n" 104 "version: %d request.paddr: 0x%x m_readRequestTable: %d " 105 "current time: %u issue_time: %d difference: %d\n", m_version, 106 Address(request->pkt->getAddr()), m_readRequestTable.size(), 107 current_time, request->issue_time, 108 current_time - request->issue_time); 109 } 110 111 RequestTable::iterator write = m_writeRequestTable.begin(); 112 RequestTable::iterator write_end = m_writeRequestTable.end(); 113 for (; write != write_end; ++write) { 114 SequencerRequest* request = write->second; 115 if (current_time - request->issue_time < m_deadlock_threshold) 116 continue; 117 118 panic("Possible Deadlock detected. Aborting!\n" 119 "version: %d request.paddr: 0x%x m_writeRequestTable: %d " 120 "current time: %u issue_time: %d difference: %d\n", m_version, 121 Address(request->pkt->getAddr()), m_writeRequestTable.size(), 122 current_time, request->issue_time, 123 current_time - request->issue_time); 124 } 125 126 total_outstanding += m_writeRequestTable.size(); 127 total_outstanding += m_readRequestTable.size(); 128 129 assert(m_outstanding_count == total_outstanding); 130 131 if (m_outstanding_count > 0) { 132 // If there are still outstanding requests, keep checking 133 schedule(deadlockCheckEvent, 134 g_system_ptr->clockPeriod() * m_deadlock_threshold + curTick()); 135 } 136} 137 138void 139Sequencer::printStats(ostream & out) const 140{ 141 out << "Sequencer: " << m_name << endl 142 << " store_waiting_on_load_cycles: " 143 << m_store_waiting_on_load_cycles << endl 144 << " store_waiting_on_store_cycles: " 145 << m_store_waiting_on_store_cycles << endl 146 << " load_waiting_on_load_cycles: " 147 << m_load_waiting_on_load_cycles << endl 148 << " load_waiting_on_store_cycles: " 149 << m_load_waiting_on_store_cycles << endl; 150} 151 152void 153Sequencer::printProgress(ostream& out) const 154{ 155#if 0 156 int total_demand = 0; 157 out << "Sequencer Stats Version " << m_version << endl; 158 out << "Current time = " << g_system_ptr->getTime() << endl; 159 out << "---------------" << endl; 160 out << "outstanding requests" << endl; 161 162 out << "proc " << m_Read 163 << " version Requests = " << m_readRequestTable.size() << endl; 164 165 // print the request table 166 RequestTable::iterator read = m_readRequestTable.begin(); 167 RequestTable::iterator read_end = m_readRequestTable.end(); 168 for (; read != read_end; ++read) { 169 SequencerRequest* request = read->second; 170 out << "\tRequest[ " << i << " ] = " << request->type 171 << " Address " << rkeys[i] 172 << " Posted " << request->issue_time 173 << " PF " << PrefetchBit_No << endl; 174 total_demand++; 175 } 176 177 out << "proc " << m_version 178 << " Write Requests = " << m_writeRequestTable.size << endl; 179 180 // print the request table 181 RequestTable::iterator write = m_writeRequestTable.begin(); 182 RequestTable::iterator write_end = m_writeRequestTable.end(); 183 for (; write != write_end; ++write) { 184 SequencerRequest* request = write->second; 185 out << "\tRequest[ " << i << " ] = " << request.getType() 186 << " Address " << wkeys[i] 187 << " Posted " << request.getTime() 188 << " PF " << request.getPrefetch() << endl; 189 if (request.getPrefetch() == PrefetchBit_No) { 190 total_demand++; 191 } 192 } 193 194 out << endl; 195 196 out << "Total Number Outstanding: " << m_outstanding_count << endl 197 << "Total Number Demand : " << total_demand << endl 198 << "Total Number Prefetches : " << m_outstanding_count - total_demand 199 << endl << endl << endl; 200#endif 201} 202 203// Insert the request on the correct request table. Return true if 204// the entry was already present. 205RequestStatus 206Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type) 207{ 208 assert(m_outstanding_count == 209 (m_writeRequestTable.size() + m_readRequestTable.size())); 210 211 // See if we should schedule a deadlock check 212 if (!deadlockCheckEvent.scheduled() && getState() != SimObject::Draining) { 213 schedule(deadlockCheckEvent, 214 g_system_ptr->clockPeriod() * m_deadlock_threshold + curTick()); 215 } 216 217 Address line_addr(pkt->getAddr()); 218 line_addr.makeLineAddress(); 219 // Create a default entry, mapping the address to NULL, the cast is 220 // there to make gcc 4.4 happy 221 RequestTable::value_type default_entry(line_addr, 222 (SequencerRequest*) NULL); 223 224 if ((request_type == RubyRequestType_ST) || 225 (request_type == RubyRequestType_RMW_Read) || 226 (request_type == RubyRequestType_RMW_Write) || 227 (request_type == RubyRequestType_Load_Linked) || 228 (request_type == RubyRequestType_Store_Conditional) || 229 (request_type == RubyRequestType_Locked_RMW_Read) || 230 (request_type == RubyRequestType_Locked_RMW_Write) || 231 (request_type == RubyRequestType_FLUSH)) { 232 233 // Check if there is any outstanding read request for the same 234 // cache line. 235 if (m_readRequestTable.count(line_addr) > 0) { 236 m_store_waiting_on_load_cycles++; 237 return RequestStatus_Aliased; 238 } 239 240 pair<RequestTable::iterator, bool> r = 241 m_writeRequestTable.insert(default_entry); 242 if (r.second) { 243 RequestTable::iterator i = r.first; 244 i->second = new SequencerRequest(pkt, request_type, 245 g_system_ptr->getTime()); 246 m_outstanding_count++; 247 } else { 248 // There is an outstanding write request for the cache line 249 m_store_waiting_on_store_cycles++; 250 return RequestStatus_Aliased; 251 } 252 } else { 253 // Check if there is any outstanding write request for the same 254 // cache line. 255 if (m_writeRequestTable.count(line_addr) > 0) { 256 m_load_waiting_on_store_cycles++; 257 return RequestStatus_Aliased; 258 } 259 260 pair<RequestTable::iterator, bool> r = 261 m_readRequestTable.insert(default_entry); 262 263 if (r.second) { 264 RequestTable::iterator i = r.first; 265 i->second = new SequencerRequest(pkt, request_type, 266 g_system_ptr->getTime()); 267 m_outstanding_count++; 268 } else { 269 // There is an outstanding read request for the cache line 270 m_load_waiting_on_load_cycles++; 271 return RequestStatus_Aliased; 272 } 273 } 274 275 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 276 assert(m_outstanding_count == 277 (m_writeRequestTable.size() + m_readRequestTable.size())); 278 279 return RequestStatus_Ready; 280} 281 282void 283Sequencer::markRemoved() 284{ 285 m_outstanding_count--; 286 assert(m_outstanding_count == 287 m_writeRequestTable.size() + m_readRequestTable.size()); 288} 289 290void 291Sequencer::removeRequest(SequencerRequest* srequest) 292{ 293 assert(m_outstanding_count == 294 m_writeRequestTable.size() + m_readRequestTable.size()); 295 296 Address line_addr(srequest->pkt->getAddr()); 297 line_addr.makeLineAddress(); 298 if ((srequest->m_type == RubyRequestType_ST) || 299 (srequest->m_type == RubyRequestType_RMW_Read) || 300 (srequest->m_type == RubyRequestType_RMW_Write) || 301 (srequest->m_type == RubyRequestType_Load_Linked) || 302 (srequest->m_type == RubyRequestType_Store_Conditional) || 303 (srequest->m_type == RubyRequestType_Locked_RMW_Read) || 304 (srequest->m_type == RubyRequestType_Locked_RMW_Write)) { 305 m_writeRequestTable.erase(line_addr); 306 } else { 307 m_readRequestTable.erase(line_addr); 308 } 309 310 markRemoved(); 311} 312 313bool 314Sequencer::handleLlsc(const Address& address, SequencerRequest* request) 315{ 316 // 317 // The success flag indicates whether the LLSC operation was successful. 318 // LL ops will always succeed, but SC may fail if the cache line is no 319 // longer locked. 320 // 321 bool success = true; 322 if (request->m_type == RubyRequestType_Store_Conditional) { 323 if (!m_dataCache_ptr->isLocked(address, m_version)) { 324 // 325 // For failed SC requests, indicate the failure to the cpu by 326 // setting the extra data to zero. 327 // 328 request->pkt->req->setExtraData(0); 329 success = false; 330 } else { 331 // 332 // For successful SC requests, indicate the success to the cpu by 333 // setting the extra data to one. 334 // 335 request->pkt->req->setExtraData(1); 336 } 337 // 338 // Independent of success, all SC operations must clear the lock 339 // 340 m_dataCache_ptr->clearLocked(address); 341 } else if (request->m_type == RubyRequestType_Load_Linked) { 342 // 343 // Note: To fully follow Alpha LLSC semantics, should the LL clear any 344 // previously locked cache lines? 345 // 346 m_dataCache_ptr->setLocked(address, m_version); 347 } else if ((m_dataCache_ptr->isTagPresent(address)) && 348 (m_dataCache_ptr->isLocked(address, m_version))) { 349 // 350 // Normal writes should clear the locked address 351 // 352 m_dataCache_ptr->clearLocked(address); 353 } 354 return success; 355} 356 357void 358Sequencer::writeCallback(const Address& address, DataBlock& data) 359{ 360 writeCallback(address, GenericMachineType_NULL, data); 361} 362 363void 364Sequencer::writeCallback(const Address& address, 365 GenericMachineType mach, 366 DataBlock& data) 367{ 368 writeCallback(address, mach, data, 0, 0, 0); 369} 370 371void 372Sequencer::writeCallback(const Address& address, 373 GenericMachineType mach, 374 DataBlock& data, 375 Time initialRequestTime, 376 Time forwardRequestTime, 377 Time firstResponseTime) 378{ 379 assert(address == line_address(address)); 380 assert(m_writeRequestTable.count(line_address(address))); 381 382 RequestTable::iterator i = m_writeRequestTable.find(address); 383 assert(i != m_writeRequestTable.end()); 384 SequencerRequest* request = i->second; 385 386 m_writeRequestTable.erase(i); 387 markRemoved(); 388 389 assert((request->m_type == RubyRequestType_ST) || 390 (request->m_type == RubyRequestType_ATOMIC) || 391 (request->m_type == RubyRequestType_RMW_Read) || 392 (request->m_type == RubyRequestType_RMW_Write) || 393 (request->m_type == RubyRequestType_Load_Linked) || 394 (request->m_type == RubyRequestType_Store_Conditional) || 395 (request->m_type == RubyRequestType_Locked_RMW_Read) || 396 (request->m_type == RubyRequestType_Locked_RMW_Write) || 397 (request->m_type == RubyRequestType_FLUSH)); 398 399 400 // 401 // For Alpha, properly handle LL, SC, and write requests with respect to 402 // locked cache blocks. 403 // 404 // Not valid for Network_test protocl 405 // 406 bool success = true; 407 if(!m_usingNetworkTester) 408 success = handleLlsc(address, request); 409 410 if (request->m_type == RubyRequestType_Locked_RMW_Read) { 411 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 412 } else if (request->m_type == RubyRequestType_Locked_RMW_Write) { 413 m_controller->unblock(address); 414 } 415 416 hitCallback(request, mach, data, success, 417 initialRequestTime, forwardRequestTime, firstResponseTime); 418} 419 420void 421Sequencer::readCallback(const Address& address, DataBlock& data) 422{ 423 readCallback(address, GenericMachineType_NULL, data); 424} 425 426void 427Sequencer::readCallback(const Address& address, 428 GenericMachineType mach, 429 DataBlock& data) 430{ 431 readCallback(address, mach, data, 0, 0, 0); 432} 433 434void 435Sequencer::readCallback(const Address& address, 436 GenericMachineType mach, 437 DataBlock& data, 438 Time initialRequestTime, 439 Time forwardRequestTime, 440 Time firstResponseTime) 441{ 442 assert(address == line_address(address)); 443 assert(m_readRequestTable.count(line_address(address))); 444 445 RequestTable::iterator i = m_readRequestTable.find(address); 446 assert(i != m_readRequestTable.end()); 447 SequencerRequest* request = i->second; 448 449 m_readRequestTable.erase(i); 450 markRemoved(); 451 452 assert((request->m_type == RubyRequestType_LD) || 453 (request->m_type == RubyRequestType_IFETCH)); 454 455 hitCallback(request, mach, data, true, 456 initialRequestTime, forwardRequestTime, firstResponseTime); 457} 458 459void 460Sequencer::hitCallback(SequencerRequest* srequest, 461 GenericMachineType mach, 462 DataBlock& data, 463 bool success, 464 Time initialRequestTime, 465 Time forwardRequestTime, 466 Time firstResponseTime) 467{ 468 PacketPtr pkt = srequest->pkt; 469 Address request_address(pkt->getAddr()); 470 Address request_line_address(pkt->getAddr()); 471 request_line_address.makeLineAddress(); 472 RubyRequestType type = srequest->m_type; 473 Time issued_time = srequest->issue_time; 474 475 // Set this cache entry to the most recently used 476 if (type == RubyRequestType_IFETCH) { 477 m_instCache_ptr->setMRU(request_line_address); 478 } else { 479 m_dataCache_ptr->setMRU(request_line_address); 480 } 481 482 assert(g_system_ptr->getTime() >= issued_time); 483 Time miss_latency = g_system_ptr->getTime() - issued_time; 484 485 // Profile the miss latency for all non-zero demand misses 486 if (miss_latency != 0) { 487 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach); 488 489 if (mach == GenericMachineType_L1Cache_wCC) { 490 g_system_ptr->getProfiler()->missLatencyWcc(issued_time, 491 initialRequestTime, 492 forwardRequestTime, 493 firstResponseTime, 494 g_system_ptr->getTime()); 495 } 496 497 if (mach == GenericMachineType_Directory) { 498 g_system_ptr->getProfiler()->missLatencyDir(issued_time, 499 initialRequestTime, 500 forwardRequestTime, 501 firstResponseTime, 502 g_system_ptr->getTime()); 503 } 504 505 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n", 506 curTick(), m_version, "Seq", 507 success ? "Done" : "SC_Failed", "", "", 508 request_address, miss_latency); 509 } 510 511 // update the data 512 if (g_system_ptr->m_warmup_enabled) { 513 assert(pkt->getPtr<uint8_t>(false) != NULL); 514 data.setData(pkt->getPtr<uint8_t>(false), 515 request_address.getOffset(), pkt->getSize()); 516 } else if (pkt->getPtr<uint8_t>(true) != NULL) { 517 if ((type == RubyRequestType_LD) || 518 (type == RubyRequestType_IFETCH) || 519 (type == RubyRequestType_RMW_Read) || 520 (type == RubyRequestType_Locked_RMW_Read) || 521 (type == RubyRequestType_Load_Linked)) { 522 memcpy(pkt->getPtr<uint8_t>(true), 523 data.getData(request_address.getOffset(), pkt->getSize()), 524 pkt->getSize()); 525 } else { 526 data.setData(pkt->getPtr<uint8_t>(true), 527 request_address.getOffset(), pkt->getSize()); 528 } 529 } else { 530 DPRINTF(MemoryAccess, 531 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 532 RubyRequestType_to_string(type)); 533 } 534 535 // If using the RubyTester, update the RubyTester sender state's 536 // subBlock with the recieved data. The tester will later access 537 // this state. 538 // Note: RubyPort will access it's sender state before the 539 // RubyTester. 540 if (m_usingRubyTester) { 541 RubyPort::SenderState *requestSenderState = 542 safe_cast<RubyPort::SenderState*>(pkt->senderState); 543 RubyTester::SenderState* testerSenderState = 544 safe_cast<RubyTester::SenderState*>(requestSenderState->saved); 545 testerSenderState->subBlock->mergeFrom(data); 546 } 547 548 delete srequest; 549 550 if (g_system_ptr->m_warmup_enabled) { 551 delete pkt; 552 g_system_ptr->m_cache_recorder->enqueueNextFetchRequest(); 553 } else if (g_system_ptr->m_cooldown_enabled) { 554 delete pkt; 555 g_system_ptr->m_cache_recorder->enqueueNextFlushRequest(); 556 } else { 557 ruby_hit_callback(pkt); 558 } 559} 560 561bool 562Sequencer::empty() const 563{ 564 return m_writeRequestTable.empty() && m_readRequestTable.empty(); 565} 566 567RequestStatus 568Sequencer::makeRequest(PacketPtr pkt) 569{ 570 if (m_outstanding_count >= m_max_outstanding_requests) { 571 return RequestStatus_BufferFull; 572 } 573 574 RubyRequestType primary_type = RubyRequestType_NULL; 575 RubyRequestType secondary_type = RubyRequestType_NULL; 576 577 if (pkt->isLLSC()) { 578 // 579 // Alpha LL/SC instructions need to be handled carefully by the cache 580 // coherence protocol to ensure they follow the proper semantics. In 581 // particular, by identifying the operations as atomic, the protocol 582 // should understand that migratory sharing optimizations should not 583 // be performed (i.e. a load between the LL and SC should not steal 584 // away exclusive permission). 585 // 586 if (pkt->isWrite()) { 587 DPRINTF(RubySequencer, "Issuing SC\n"); 588 primary_type = RubyRequestType_Store_Conditional; 589 } else { 590 DPRINTF(RubySequencer, "Issuing LL\n"); 591 assert(pkt->isRead()); 592 primary_type = RubyRequestType_Load_Linked; 593 } 594 secondary_type = RubyRequestType_ATOMIC; 595 } else if (pkt->req->isLocked()) { 596 // 597 // x86 locked instructions are translated to store cache coherence 598 // requests because these requests should always be treated as read 599 // exclusive operations and should leverage any migratory sharing 600 // optimization built into the protocol. 601 // 602 if (pkt->isWrite()) { 603 DPRINTF(RubySequencer, "Issuing Locked RMW Write\n"); 604 primary_type = RubyRequestType_Locked_RMW_Write; 605 } else { 606 DPRINTF(RubySequencer, "Issuing Locked RMW Read\n"); 607 assert(pkt->isRead()); 608 primary_type = RubyRequestType_Locked_RMW_Read; 609 } 610 secondary_type = RubyRequestType_ST; 611 } else { 612 if (pkt->isRead()) { 613 if (pkt->req->isInstFetch()) { 614 primary_type = secondary_type = RubyRequestType_IFETCH; 615 } else { 616#if THE_ISA == X86_ISA 617 uint32_t flags = pkt->req->getFlags(); 618 bool storeCheck = flags & 619 (TheISA::StoreCheck << TheISA::FlagShift); 620#else 621 bool storeCheck = false; 622#endif // X86_ISA 623 if (storeCheck) { 624 primary_type = RubyRequestType_RMW_Read; 625 secondary_type = RubyRequestType_ST; 626 } else { 627 primary_type = secondary_type = RubyRequestType_LD; 628 } 629 } 630 } else if (pkt->isWrite()) { 631 // 632 // Note: M5 packets do not differentiate ST from RMW_Write 633 // 634 primary_type = secondary_type = RubyRequestType_ST; 635 } else if (pkt->isFlush()) { 636 primary_type = secondary_type = RubyRequestType_FLUSH; 637 } else { 638 panic("Unsupported ruby packet type\n"); 639 } 640 } 641 642 RequestStatus status = insertRequest(pkt, primary_type); 643 if (status != RequestStatus_Ready) 644 return status; 645 646 issueRequest(pkt, secondary_type); 647 648 // TODO: issue hardware prefetches here 649 return RequestStatus_Issued; 650} 651 652void 653Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type) 654{ 655 assert(pkt != NULL); 656 int proc_id = -1; 657 if (pkt->req->hasContextId()) { 658 proc_id = pkt->req->contextId(); 659 } 660 661 // If valid, copy the pc to the ruby request 662 Addr pc = 0; 663 if (pkt->req->hasPC()) { 664 pc = pkt->req->getPC(); 665 } 666 667 RubyRequest *msg = new RubyRequest(pkt->getAddr(), 668 pkt->getPtr<uint8_t>(true), 669 pkt->getSize(), pc, secondary_type, 670 RubyAccessMode_Supervisor, pkt, 671 PrefetchBit_No, proc_id); 672 673 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n", 674 curTick(), m_version, "Seq", "Begin", "", "", 675 msg->getPhysicalAddress(), 676 RubyRequestType_to_string(secondary_type)); 677 678 Time latency = 0; // initialzed to an null value 679 680 if (secondary_type == RubyRequestType_IFETCH) 681 latency = m_instCache_ptr->getLatency(); 682 else 683 latency = m_dataCache_ptr->getLatency(); 684 685 // Send the message to the cache controller 686 assert(latency > 0); 687 688 assert(m_mandatory_q_ptr != NULL); 689 m_mandatory_q_ptr->enqueue(msg, latency); 690} 691 692template <class KEY, class VALUE> 693std::ostream & 694operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) 695{ 696 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); 697 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); 698 699 out << "["; 700 for (; i != end; ++i) 701 out << " " << i->first << "=" << i->second; 702 out << " ]"; 703 704 return out; 705} 706 707void 708Sequencer::print(ostream& out) const 709{ 710 out << "[Sequencer: " << m_version 711 << ", outstanding requests: " << m_outstanding_count 712 << ", read request table: " << m_readRequestTable 713 << ", write request table: " << m_writeRequestTable 714 << "]"; 715} 716 717// this can be called from setState whenever coherence permissions are 718// upgraded when invoked, coherence violations will be checked for the 719// given block 720void 721Sequencer::checkCoherence(const Address& addr) 722{ 723#ifdef CHECK_COHERENCE 724 g_system_ptr->checkGlobalCoherenceInvariant(addr); 725#endif 726} 727 728void 729Sequencer::recordRequestType(SequencerRequestType requestType) { 730 DPRINTF(RubyStats, "Recorded statistic: %s\n", 731 SequencerRequestType_to_string(requestType)); 732} 733 734 735void 736Sequencer::evictionCallback(const Address& address) 737{ 738 ruby_eviction_callback(address); 739} 740