Sequencer.cc revision 9542
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "base/misc.hh" 30#include "base/str.hh" 31#include "config/the_isa.hh" 32#if THE_ISA == X86_ISA 33#include "arch/x86/insts/microldstop.hh" 34#endif // X86_ISA 35#include "cpu/testers/rubytest/RubyTester.hh" 36#include "debug/MemoryAccess.hh" 37#include "debug/ProtocolTrace.hh" 38#include "debug/RubySequencer.hh" 39#include "debug/RubyStats.hh" 40#include "mem/protocol/PrefetchBit.hh" 41#include "mem/protocol/RubyAccessMode.hh" 42#include "mem/ruby/common/Global.hh" 43#include "mem/ruby/profiler/Profiler.hh" 44#include "mem/ruby/slicc_interface/RubyRequest.hh" 45#include "mem/ruby/system/Sequencer.hh" 46#include "mem/ruby/system/System.hh" 47#include "mem/packet.hh" 48 49using namespace std; 50 51Sequencer * 52RubySequencerParams::create() 53{ 54 return new Sequencer(this); 55} 56 57Sequencer::Sequencer(const Params *p) 58 : RubyPort(p), deadlockCheckEvent(this) 59{ 60 m_store_waiting_on_load_cycles = 0; 61 m_store_waiting_on_store_cycles = 0; 62 m_load_waiting_on_store_cycles = 0; 63 m_load_waiting_on_load_cycles = 0; 64 65 m_outstanding_count = 0; 66 67 m_instCache_ptr = p->icache; 68 m_dataCache_ptr = p->dcache; 69 m_max_outstanding_requests = p->max_outstanding_requests; 70 m_deadlock_threshold = p->deadlock_threshold; 71 72 assert(m_max_outstanding_requests > 0); 73 assert(m_deadlock_threshold > 0); 74 assert(m_instCache_ptr != NULL); 75 assert(m_dataCache_ptr != NULL); 76 77 m_usingNetworkTester = p->using_network_tester; 78} 79 80Sequencer::~Sequencer() 81{ 82} 83 84void 85Sequencer::wakeup() 86{ 87 assert(getDrainState() != Drainable::Draining); 88 89 // Check for deadlock of any of the requests 90 Cycles current_time = curCycle(); 91 92 // Check across all outstanding requests 93 int total_outstanding = 0; 94 95 RequestTable::iterator read = m_readRequestTable.begin(); 96 RequestTable::iterator read_end = m_readRequestTable.end(); 97 for (; read != read_end; ++read) { 98 SequencerRequest* request = read->second; 99 if (current_time - request->issue_time < m_deadlock_threshold) 100 continue; 101 102 panic("Possible Deadlock detected. Aborting!\n" 103 "version: %d request.paddr: 0x%x m_readRequestTable: %d " 104 "current time: %u issue_time: %d difference: %d\n", m_version, 105 Address(request->pkt->getAddr()), m_readRequestTable.size(), 106 current_time * clockPeriod(), request->issue_time * clockPeriod(), 107 (current_time * clockPeriod()) - (request->issue_time * clockPeriod())); 108 } 109 110 RequestTable::iterator write = m_writeRequestTable.begin(); 111 RequestTable::iterator write_end = m_writeRequestTable.end(); 112 for (; write != write_end; ++write) { 113 SequencerRequest* request = write->second; 114 if (current_time - request->issue_time < m_deadlock_threshold) 115 continue; 116 117 panic("Possible Deadlock detected. Aborting!\n" 118 "version: %d request.paddr: 0x%x m_writeRequestTable: %d " 119 "current time: %u issue_time: %d difference: %d\n", m_version, 120 Address(request->pkt->getAddr()), m_writeRequestTable.size(), 121 current_time * clockPeriod(), request->issue_time * clockPeriod(), 122 (current_time * clockPeriod()) - (request->issue_time * clockPeriod())); 123 } 124 125 total_outstanding += m_writeRequestTable.size(); 126 total_outstanding += m_readRequestTable.size(); 127 128 assert(m_outstanding_count == total_outstanding); 129 130 if (m_outstanding_count > 0) { 131 // If there are still outstanding requests, keep checking 132 schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold)); 133 } 134} 135 136void 137Sequencer::printStats(ostream & out) const 138{ 139 out << "Sequencer: " << m_name << endl 140 << " store_waiting_on_load_cycles: " 141 << m_store_waiting_on_load_cycles << endl 142 << " store_waiting_on_store_cycles: " 143 << m_store_waiting_on_store_cycles << endl 144 << " load_waiting_on_load_cycles: " 145 << m_load_waiting_on_load_cycles << endl 146 << " load_waiting_on_store_cycles: " 147 << m_load_waiting_on_store_cycles << endl; 148} 149 150void 151Sequencer::printProgress(ostream& out) const 152{ 153#if 0 154 int total_demand = 0; 155 out << "Sequencer Stats Version " << m_version << endl; 156 out << "Current time = " << g_system_ptr->getTime() << endl; 157 out << "---------------" << endl; 158 out << "outstanding requests" << endl; 159 160 out << "proc " << m_Read 161 << " version Requests = " << m_readRequestTable.size() << endl; 162 163 // print the request table 164 RequestTable::iterator read = m_readRequestTable.begin(); 165 RequestTable::iterator read_end = m_readRequestTable.end(); 166 for (; read != read_end; ++read) { 167 SequencerRequest* request = read->second; 168 out << "\tRequest[ " << i << " ] = " << request->type 169 << " Address " << rkeys[i] 170 << " Posted " << request->issue_time 171 << " PF " << PrefetchBit_No << endl; 172 total_demand++; 173 } 174 175 out << "proc " << m_version 176 << " Write Requests = " << m_writeRequestTable.size << endl; 177 178 // print the request table 179 RequestTable::iterator write = m_writeRequestTable.begin(); 180 RequestTable::iterator write_end = m_writeRequestTable.end(); 181 for (; write != write_end; ++write) { 182 SequencerRequest* request = write->second; 183 out << "\tRequest[ " << i << " ] = " << request.getType() 184 << " Address " << wkeys[i] 185 << " Posted " << request.getTime() 186 << " PF " << request.getPrefetch() << endl; 187 if (request.getPrefetch() == PrefetchBit_No) { 188 total_demand++; 189 } 190 } 191 192 out << endl; 193 194 out << "Total Number Outstanding: " << m_outstanding_count << endl 195 << "Total Number Demand : " << total_demand << endl 196 << "Total Number Prefetches : " << m_outstanding_count - total_demand 197 << endl << endl << endl; 198#endif 199} 200 201// Insert the request on the correct request table. Return true if 202// the entry was already present. 203RequestStatus 204Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type) 205{ 206 assert(m_outstanding_count == 207 (m_writeRequestTable.size() + m_readRequestTable.size())); 208 209 // See if we should schedule a deadlock check 210 if (!deadlockCheckEvent.scheduled() && 211 getDrainState() != Drainable::Draining) { 212 schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold)); 213 } 214 215 Address line_addr(pkt->getAddr()); 216 line_addr.makeLineAddress(); 217 // Create a default entry, mapping the address to NULL, the cast is 218 // there to make gcc 4.4 happy 219 RequestTable::value_type default_entry(line_addr, 220 (SequencerRequest*) NULL); 221 222 if ((request_type == RubyRequestType_ST) || 223 (request_type == RubyRequestType_RMW_Read) || 224 (request_type == RubyRequestType_RMW_Write) || 225 (request_type == RubyRequestType_Load_Linked) || 226 (request_type == RubyRequestType_Store_Conditional) || 227 (request_type == RubyRequestType_Locked_RMW_Read) || 228 (request_type == RubyRequestType_Locked_RMW_Write) || 229 (request_type == RubyRequestType_FLUSH)) { 230 231 // Check if there is any outstanding read request for the same 232 // cache line. 233 if (m_readRequestTable.count(line_addr) > 0) { 234 m_store_waiting_on_load_cycles++; 235 return RequestStatus_Aliased; 236 } 237 238 pair<RequestTable::iterator, bool> r = 239 m_writeRequestTable.insert(default_entry); 240 if (r.second) { 241 RequestTable::iterator i = r.first; 242 i->second = new SequencerRequest(pkt, request_type, curCycle()); 243 m_outstanding_count++; 244 } else { 245 // There is an outstanding write request for the cache line 246 m_store_waiting_on_store_cycles++; 247 return RequestStatus_Aliased; 248 } 249 } else { 250 // Check if there is any outstanding write request for the same 251 // cache line. 252 if (m_writeRequestTable.count(line_addr) > 0) { 253 m_load_waiting_on_store_cycles++; 254 return RequestStatus_Aliased; 255 } 256 257 pair<RequestTable::iterator, bool> r = 258 m_readRequestTable.insert(default_entry); 259 260 if (r.second) { 261 RequestTable::iterator i = r.first; 262 i->second = new SequencerRequest(pkt, request_type, curCycle()); 263 m_outstanding_count++; 264 } else { 265 // There is an outstanding read request for the cache line 266 m_load_waiting_on_load_cycles++; 267 return RequestStatus_Aliased; 268 } 269 } 270 271 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 272 assert(m_outstanding_count == 273 (m_writeRequestTable.size() + m_readRequestTable.size())); 274 275 return RequestStatus_Ready; 276} 277 278void 279Sequencer::markRemoved() 280{ 281 m_outstanding_count--; 282 assert(m_outstanding_count == 283 m_writeRequestTable.size() + m_readRequestTable.size()); 284} 285 286void 287Sequencer::removeRequest(SequencerRequest* srequest) 288{ 289 assert(m_outstanding_count == 290 m_writeRequestTable.size() + m_readRequestTable.size()); 291 292 Address line_addr(srequest->pkt->getAddr()); 293 line_addr.makeLineAddress(); 294 if ((srequest->m_type == RubyRequestType_ST) || 295 (srequest->m_type == RubyRequestType_RMW_Read) || 296 (srequest->m_type == RubyRequestType_RMW_Write) || 297 (srequest->m_type == RubyRequestType_Load_Linked) || 298 (srequest->m_type == RubyRequestType_Store_Conditional) || 299 (srequest->m_type == RubyRequestType_Locked_RMW_Read) || 300 (srequest->m_type == RubyRequestType_Locked_RMW_Write)) { 301 m_writeRequestTable.erase(line_addr); 302 } else { 303 m_readRequestTable.erase(line_addr); 304 } 305 306 markRemoved(); 307} 308 309bool 310Sequencer::handleLlsc(const Address& address, SequencerRequest* request) 311{ 312 // 313 // The success flag indicates whether the LLSC operation was successful. 314 // LL ops will always succeed, but SC may fail if the cache line is no 315 // longer locked. 316 // 317 bool success = true; 318 if (request->m_type == RubyRequestType_Store_Conditional) { 319 if (!m_dataCache_ptr->isLocked(address, m_version)) { 320 // 321 // For failed SC requests, indicate the failure to the cpu by 322 // setting the extra data to zero. 323 // 324 request->pkt->req->setExtraData(0); 325 success = false; 326 } else { 327 // 328 // For successful SC requests, indicate the success to the cpu by 329 // setting the extra data to one. 330 // 331 request->pkt->req->setExtraData(1); 332 } 333 // 334 // Independent of success, all SC operations must clear the lock 335 // 336 m_dataCache_ptr->clearLocked(address); 337 } else if (request->m_type == RubyRequestType_Load_Linked) { 338 // 339 // Note: To fully follow Alpha LLSC semantics, should the LL clear any 340 // previously locked cache lines? 341 // 342 m_dataCache_ptr->setLocked(address, m_version); 343 } else if ((m_dataCache_ptr->isTagPresent(address)) && 344 (m_dataCache_ptr->isLocked(address, m_version))) { 345 // 346 // Normal writes should clear the locked address 347 // 348 m_dataCache_ptr->clearLocked(address); 349 } 350 return success; 351} 352 353void 354Sequencer::writeCallback(const Address& address, DataBlock& data) 355{ 356 writeCallback(address, GenericMachineType_NULL, data); 357} 358 359void 360Sequencer::writeCallback(const Address& address, 361 GenericMachineType mach, 362 DataBlock& data) 363{ 364 writeCallback(address, mach, data, Cycles(0), Cycles(0), Cycles(0)); 365} 366 367void 368Sequencer::writeCallback(const Address& address, 369 GenericMachineType mach, 370 DataBlock& data, 371 Cycles initialRequestTime, 372 Cycles forwardRequestTime, 373 Cycles firstResponseTime) 374{ 375 assert(address == line_address(address)); 376 assert(m_writeRequestTable.count(line_address(address))); 377 378 RequestTable::iterator i = m_writeRequestTable.find(address); 379 assert(i != m_writeRequestTable.end()); 380 SequencerRequest* request = i->second; 381 382 m_writeRequestTable.erase(i); 383 markRemoved(); 384 385 assert((request->m_type == RubyRequestType_ST) || 386 (request->m_type == RubyRequestType_ATOMIC) || 387 (request->m_type == RubyRequestType_RMW_Read) || 388 (request->m_type == RubyRequestType_RMW_Write) || 389 (request->m_type == RubyRequestType_Load_Linked) || 390 (request->m_type == RubyRequestType_Store_Conditional) || 391 (request->m_type == RubyRequestType_Locked_RMW_Read) || 392 (request->m_type == RubyRequestType_Locked_RMW_Write) || 393 (request->m_type == RubyRequestType_FLUSH)); 394 395 396 // 397 // For Alpha, properly handle LL, SC, and write requests with respect to 398 // locked cache blocks. 399 // 400 // Not valid for Network_test protocl 401 // 402 bool success = true; 403 if(!m_usingNetworkTester) 404 success = handleLlsc(address, request); 405 406 if (request->m_type == RubyRequestType_Locked_RMW_Read) { 407 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 408 } else if (request->m_type == RubyRequestType_Locked_RMW_Write) { 409 m_controller->unblock(address); 410 } 411 412 hitCallback(request, mach, data, success, 413 initialRequestTime, forwardRequestTime, firstResponseTime); 414} 415 416void 417Sequencer::readCallback(const Address& address, DataBlock& data) 418{ 419 readCallback(address, GenericMachineType_NULL, data); 420} 421 422void 423Sequencer::readCallback(const Address& address, 424 GenericMachineType mach, 425 DataBlock& data) 426{ 427 readCallback(address, mach, data, Cycles(0), Cycles(0), Cycles(0)); 428} 429 430void 431Sequencer::readCallback(const Address& address, 432 GenericMachineType mach, 433 DataBlock& data, 434 Cycles initialRequestTime, 435 Cycles forwardRequestTime, 436 Cycles firstResponseTime) 437{ 438 assert(address == line_address(address)); 439 assert(m_readRequestTable.count(line_address(address))); 440 441 RequestTable::iterator i = m_readRequestTable.find(address); 442 assert(i != m_readRequestTable.end()); 443 SequencerRequest* request = i->second; 444 445 m_readRequestTable.erase(i); 446 markRemoved(); 447 448 assert((request->m_type == RubyRequestType_LD) || 449 (request->m_type == RubyRequestType_IFETCH)); 450 451 hitCallback(request, mach, data, true, 452 initialRequestTime, forwardRequestTime, firstResponseTime); 453} 454 455void 456Sequencer::hitCallback(SequencerRequest* srequest, 457 GenericMachineType mach, 458 DataBlock& data, 459 bool success, 460 Cycles initialRequestTime, 461 Cycles forwardRequestTime, 462 Cycles firstResponseTime) 463{ 464 PacketPtr pkt = srequest->pkt; 465 Address request_address(pkt->getAddr()); 466 Address request_line_address(pkt->getAddr()); 467 request_line_address.makeLineAddress(); 468 RubyRequestType type = srequest->m_type; 469 Cycles issued_time = srequest->issue_time; 470 471 // Set this cache entry to the most recently used 472 if (type == RubyRequestType_IFETCH) { 473 m_instCache_ptr->setMRU(request_line_address); 474 } else { 475 m_dataCache_ptr->setMRU(request_line_address); 476 } 477 478 assert(curCycle() >= issued_time); 479 Cycles miss_latency = curCycle() - issued_time; 480 481 // Profile the miss latency for all non-zero demand misses 482 if (miss_latency != 0) { 483 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach); 484 485 if (mach == GenericMachineType_L1Cache_wCC) { 486 g_system_ptr->getProfiler()->missLatencyWcc(issued_time, 487 initialRequestTime, forwardRequestTime, 488 firstResponseTime, curCycle()); 489 } 490 491 if (mach == GenericMachineType_Directory) { 492 g_system_ptr->getProfiler()->missLatencyDir(issued_time, 493 initialRequestTime, forwardRequestTime, 494 firstResponseTime, curCycle()); 495 } 496 497 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n", 498 curTick(), m_version, "Seq", 499 success ? "Done" : "SC_Failed", "", "", 500 request_address, miss_latency); 501 } 502 503 // update the data 504 if (g_system_ptr->m_warmup_enabled) { 505 assert(pkt->getPtr<uint8_t>(false) != NULL); 506 data.setData(pkt->getPtr<uint8_t>(false), 507 request_address.getOffset(), pkt->getSize()); 508 } else if (pkt->getPtr<uint8_t>(true) != NULL) { 509 if ((type == RubyRequestType_LD) || 510 (type == RubyRequestType_IFETCH) || 511 (type == RubyRequestType_RMW_Read) || 512 (type == RubyRequestType_Locked_RMW_Read) || 513 (type == RubyRequestType_Load_Linked)) { 514 memcpy(pkt->getPtr<uint8_t>(true), 515 data.getData(request_address.getOffset(), pkt->getSize()), 516 pkt->getSize()); 517 } else { 518 data.setData(pkt->getPtr<uint8_t>(true), 519 request_address.getOffset(), pkt->getSize()); 520 } 521 } else { 522 DPRINTF(MemoryAccess, 523 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 524 RubyRequestType_to_string(type)); 525 } 526 527 // If using the RubyTester, update the RubyTester sender state's 528 // subBlock with the recieved data. The tester will later access 529 // this state. 530 // Note: RubyPort will access it's sender state before the 531 // RubyTester. 532 if (m_usingRubyTester) { 533 RubyPort::SenderState *reqSenderState = 534 safe_cast<RubyPort::SenderState*>(pkt->senderState); 535 // @todo This is a dangerous assumption on nothing else 536 // modifying the senderState 537 RubyTester::SenderState* testerSenderState = 538 safe_cast<RubyTester::SenderState*>(reqSenderState->predecessor); 539 testerSenderState->subBlock.mergeFrom(data); 540 } 541 542 delete srequest; 543 544 if (g_system_ptr->m_warmup_enabled) { 545 delete pkt; 546 g_system_ptr->m_cache_recorder->enqueueNextFetchRequest(); 547 } else if (g_system_ptr->m_cooldown_enabled) { 548 delete pkt; 549 g_system_ptr->m_cache_recorder->enqueueNextFlushRequest(); 550 } else { 551 ruby_hit_callback(pkt); 552 } 553} 554 555bool 556Sequencer::empty() const 557{ 558 return m_writeRequestTable.empty() && m_readRequestTable.empty(); 559} 560 561RequestStatus 562Sequencer::makeRequest(PacketPtr pkt) 563{ 564 if (m_outstanding_count >= m_max_outstanding_requests) { 565 return RequestStatus_BufferFull; 566 } 567 568 RubyRequestType primary_type = RubyRequestType_NULL; 569 RubyRequestType secondary_type = RubyRequestType_NULL; 570 571 if (pkt->isLLSC()) { 572 // 573 // Alpha LL/SC instructions need to be handled carefully by the cache 574 // coherence protocol to ensure they follow the proper semantics. In 575 // particular, by identifying the operations as atomic, the protocol 576 // should understand that migratory sharing optimizations should not 577 // be performed (i.e. a load between the LL and SC should not steal 578 // away exclusive permission). 579 // 580 if (pkt->isWrite()) { 581 DPRINTF(RubySequencer, "Issuing SC\n"); 582 primary_type = RubyRequestType_Store_Conditional; 583 } else { 584 DPRINTF(RubySequencer, "Issuing LL\n"); 585 assert(pkt->isRead()); 586 primary_type = RubyRequestType_Load_Linked; 587 } 588 secondary_type = RubyRequestType_ATOMIC; 589 } else if (pkt->req->isLocked()) { 590 // 591 // x86 locked instructions are translated to store cache coherence 592 // requests because these requests should always be treated as read 593 // exclusive operations and should leverage any migratory sharing 594 // optimization built into the protocol. 595 // 596 if (pkt->isWrite()) { 597 DPRINTF(RubySequencer, "Issuing Locked RMW Write\n"); 598 primary_type = RubyRequestType_Locked_RMW_Write; 599 } else { 600 DPRINTF(RubySequencer, "Issuing Locked RMW Read\n"); 601 assert(pkt->isRead()); 602 primary_type = RubyRequestType_Locked_RMW_Read; 603 } 604 secondary_type = RubyRequestType_ST; 605 } else { 606 if (pkt->isRead()) { 607 if (pkt->req->isInstFetch()) { 608 primary_type = secondary_type = RubyRequestType_IFETCH; 609 } else { 610#if THE_ISA == X86_ISA 611 uint32_t flags = pkt->req->getFlags(); 612 bool storeCheck = flags & 613 (TheISA::StoreCheck << TheISA::FlagShift); 614#else 615 bool storeCheck = false; 616#endif // X86_ISA 617 if (storeCheck) { 618 primary_type = RubyRequestType_RMW_Read; 619 secondary_type = RubyRequestType_ST; 620 } else { 621 primary_type = secondary_type = RubyRequestType_LD; 622 } 623 } 624 } else if (pkt->isWrite()) { 625 // 626 // Note: M5 packets do not differentiate ST from RMW_Write 627 // 628 primary_type = secondary_type = RubyRequestType_ST; 629 } else if (pkt->isFlush()) { 630 primary_type = secondary_type = RubyRequestType_FLUSH; 631 } else { 632 panic("Unsupported ruby packet type\n"); 633 } 634 } 635 636 RequestStatus status = insertRequest(pkt, primary_type); 637 if (status != RequestStatus_Ready) 638 return status; 639 640 issueRequest(pkt, secondary_type); 641 642 // TODO: issue hardware prefetches here 643 return RequestStatus_Issued; 644} 645 646void 647Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type) 648{ 649 assert(pkt != NULL); 650 int proc_id = -1; 651 if (pkt->req->hasContextId()) { 652 proc_id = pkt->req->contextId(); 653 } 654 655 // If valid, copy the pc to the ruby request 656 Addr pc = 0; 657 if (pkt->req->hasPC()) { 658 pc = pkt->req->getPC(); 659 } 660 661 RubyRequest *msg = new RubyRequest(clockEdge(), pkt->getAddr(), 662 pkt->getPtr<uint8_t>(true), 663 pkt->getSize(), pc, secondary_type, 664 RubyAccessMode_Supervisor, pkt, 665 PrefetchBit_No, proc_id); 666 667 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n", 668 curTick(), m_version, "Seq", "Begin", "", "", 669 msg->getPhysicalAddress(), 670 RubyRequestType_to_string(secondary_type)); 671 672 Cycles latency(0); // initialzed to an null value 673 674 if (secondary_type == RubyRequestType_IFETCH) 675 latency = m_instCache_ptr->getLatency(); 676 else 677 latency = m_dataCache_ptr->getLatency(); 678 679 // Send the message to the cache controller 680 assert(latency > 0); 681 682 assert(m_mandatory_q_ptr != NULL); 683 m_mandatory_q_ptr->enqueue(msg, latency); 684} 685 686template <class KEY, class VALUE> 687std::ostream & 688operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) 689{ 690 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); 691 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); 692 693 out << "["; 694 for (; i != end; ++i) 695 out << " " << i->first << "=" << i->second; 696 out << " ]"; 697 698 return out; 699} 700 701void 702Sequencer::print(ostream& out) const 703{ 704 out << "[Sequencer: " << m_version 705 << ", outstanding requests: " << m_outstanding_count 706 << ", read request table: " << m_readRequestTable 707 << ", write request table: " << m_writeRequestTable 708 << "]"; 709} 710 711// this can be called from setState whenever coherence permissions are 712// upgraded when invoked, coherence violations will be checked for the 713// given block 714void 715Sequencer::checkCoherence(const Address& addr) 716{ 717#ifdef CHECK_COHERENCE 718 g_system_ptr->checkGlobalCoherenceInvariant(addr); 719#endif 720} 721 722void 723Sequencer::recordRequestType(SequencerRequestType requestType) { 724 DPRINTF(RubyStats, "Recorded statistic: %s\n", 725 SequencerRequestType_to_string(requestType)); 726} 727 728 729void 730Sequencer::evictionCallback(const Address& address) 731{ 732 ruby_eviction_callback(address); 733} 734