Sequencer.cc revision 8967:fc2c4db64ded
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "base/misc.hh" 30#include "base/str.hh" 31#include "config/the_isa.hh" 32#if THE_ISA == X86_ISA 33#include "arch/x86/insts/microldstop.hh" 34#endif // X86_ISA 35#include "cpu/testers/rubytest/RubyTester.hh" 36#include "debug/MemoryAccess.hh" 37#include "debug/ProtocolTrace.hh" 38#include "debug/RubySequencer.hh" 39#include "mem/protocol/PrefetchBit.hh" 40#include "mem/protocol/RubyAccessMode.hh" 41#include "mem/ruby/buffers/MessageBuffer.hh" 42#include "mem/ruby/common/Global.hh" 43#include "mem/ruby/profiler/Profiler.hh" 44#include "mem/ruby/slicc_interface/RubyRequest.hh" 45#include "mem/ruby/system/CacheMemory.hh" 46#include "mem/ruby/system/Sequencer.hh" 47#include "mem/ruby/system/System.hh" 48#include "mem/packet.hh" 49#include "params/RubySequencer.hh" 50 51using namespace std; 52 53Sequencer * 54RubySequencerParams::create() 55{ 56 return new Sequencer(this); 57} 58 59Sequencer::Sequencer(const Params *p) 60 : RubyPort(p), deadlockCheckEvent(this) 61{ 62 m_store_waiting_on_load_cycles = 0; 63 m_store_waiting_on_store_cycles = 0; 64 m_load_waiting_on_store_cycles = 0; 65 m_load_waiting_on_load_cycles = 0; 66 67 m_outstanding_count = 0; 68 69 m_instCache_ptr = p->icache; 70 m_dataCache_ptr = p->dcache; 71 m_max_outstanding_requests = p->max_outstanding_requests; 72 m_deadlock_threshold = p->deadlock_threshold; 73 74 assert(m_max_outstanding_requests > 0); 75 assert(m_deadlock_threshold > 0); 76 assert(m_instCache_ptr != NULL); 77 assert(m_dataCache_ptr != NULL); 78 79 m_usingNetworkTester = p->using_network_tester; 80} 81 82Sequencer::~Sequencer() 83{ 84} 85 86void 87Sequencer::wakeup() 88{ 89 // Check for deadlock of any of the requests 90 Time current_time = g_eventQueue_ptr->getTime(); 91 92 // Check across all outstanding requests 93 int total_outstanding = 0; 94 95 RequestTable::iterator read = m_readRequestTable.begin(); 96 RequestTable::iterator read_end = m_readRequestTable.end(); 97 for (; read != read_end; ++read) { 98 SequencerRequest* request = read->second; 99 if (current_time - request->issue_time < m_deadlock_threshold) 100 continue; 101 102 panic("Possible Deadlock detected. Aborting!\n" 103 "version: %d request.paddr: 0x%x m_readRequestTable: %d " 104 "current time: %u issue_time: %d difference: %d\n", m_version, 105 Address(request->pkt->getAddr()), m_readRequestTable.size(), 106 current_time, request->issue_time, 107 current_time - request->issue_time); 108 } 109 110 RequestTable::iterator write = m_writeRequestTable.begin(); 111 RequestTable::iterator write_end = m_writeRequestTable.end(); 112 for (; write != write_end; ++write) { 113 SequencerRequest* request = write->second; 114 if (current_time - request->issue_time < m_deadlock_threshold) 115 continue; 116 117 panic("Possible Deadlock detected. Aborting!\n" 118 "version: %d request.paddr: 0x%x m_writeRequestTable: %d " 119 "current time: %u issue_time: %d difference: %d\n", m_version, 120 Address(request->pkt->getAddr()), m_writeRequestTable.size(), 121 current_time, request->issue_time, 122 current_time - request->issue_time); 123 } 124 125 total_outstanding += m_writeRequestTable.size(); 126 total_outstanding += m_readRequestTable.size(); 127 128 assert(m_outstanding_count == total_outstanding); 129 130 if (m_outstanding_count > 0) { 131 // If there are still outstanding requests, keep checking 132 schedule(deadlockCheckEvent, 133 m_deadlock_threshold * g_eventQueue_ptr->getClock() + 134 curTick()); 135 } 136} 137 138void 139Sequencer::printStats(ostream & out) const 140{ 141 out << "Sequencer: " << m_name << endl 142 << " store_waiting_on_load_cycles: " 143 << m_store_waiting_on_load_cycles << endl 144 << " store_waiting_on_store_cycles: " 145 << m_store_waiting_on_store_cycles << endl 146 << " load_waiting_on_load_cycles: " 147 << m_load_waiting_on_load_cycles << endl 148 << " load_waiting_on_store_cycles: " 149 << m_load_waiting_on_store_cycles << endl; 150} 151 152void 153Sequencer::printProgress(ostream& out) const 154{ 155#if 0 156 int total_demand = 0; 157 out << "Sequencer Stats Version " << m_version << endl; 158 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 159 out << "---------------" << endl; 160 out << "outstanding requests" << endl; 161 162 out << "proc " << m_Read 163 << " version Requests = " << m_readRequestTable.size() << endl; 164 165 // print the request table 166 RequestTable::iterator read = m_readRequestTable.begin(); 167 RequestTable::iterator read_end = m_readRequestTable.end(); 168 for (; read != read_end; ++read) { 169 SequencerRequest* request = read->second; 170 out << "\tRequest[ " << i << " ] = " << request->type 171 << " Address " << rkeys[i] 172 << " Posted " << request->issue_time 173 << " PF " << PrefetchBit_No << endl; 174 total_demand++; 175 } 176 177 out << "proc " << m_version 178 << " Write Requests = " << m_writeRequestTable.size << endl; 179 180 // print the request table 181 RequestTable::iterator write = m_writeRequestTable.begin(); 182 RequestTable::iterator write_end = m_writeRequestTable.end(); 183 for (; write != write_end; ++write) { 184 SequencerRequest* request = write->second; 185 out << "\tRequest[ " << i << " ] = " << request.getType() 186 << " Address " << wkeys[i] 187 << " Posted " << request.getTime() 188 << " PF " << request.getPrefetch() << endl; 189 if (request.getPrefetch() == PrefetchBit_No) { 190 total_demand++; 191 } 192 } 193 194 out << endl; 195 196 out << "Total Number Outstanding: " << m_outstanding_count << endl 197 << "Total Number Demand : " << total_demand << endl 198 << "Total Number Prefetches : " << m_outstanding_count - total_demand 199 << endl << endl << endl; 200#endif 201} 202 203void 204Sequencer::printConfig(ostream& out) const 205{ 206 out << "Seqeuncer config: " << m_name << endl 207 << " controller: " << m_controller->getName() << endl 208 << " version: " << m_version << endl 209 << " max_outstanding_requests: " << m_max_outstanding_requests << endl 210 << " deadlock_threshold: " << m_deadlock_threshold << endl; 211} 212 213// Insert the request on the correct request table. Return true if 214// the entry was already present. 215RequestStatus 216Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type) 217{ 218 assert(m_outstanding_count == 219 (m_writeRequestTable.size() + m_readRequestTable.size())); 220 221 // See if we should schedule a deadlock check 222 if (deadlockCheckEvent.scheduled() == false) { 223 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick()); 224 } 225 226 Address line_addr(pkt->getAddr()); 227 line_addr.makeLineAddress(); 228 if ((request_type == RubyRequestType_ST) || 229 (request_type == RubyRequestType_RMW_Read) || 230 (request_type == RubyRequestType_RMW_Write) || 231 (request_type == RubyRequestType_Load_Linked) || 232 (request_type == RubyRequestType_Store_Conditional) || 233 (request_type == RubyRequestType_Locked_RMW_Read) || 234 (request_type == RubyRequestType_Locked_RMW_Write) || 235 (request_type == RubyRequestType_FLUSH)) { 236 237 // Check if there is any outstanding read request for the same 238 // cache line. 239 if (m_readRequestTable.count(line_addr) > 0) { 240 m_store_waiting_on_load_cycles++; 241 return RequestStatus_Aliased; 242 } 243 244 pair<RequestTable::iterator, bool> r = 245 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0)); 246 if (r.second) { 247 RequestTable::iterator i = r.first; 248 i->second = new SequencerRequest(pkt, request_type, 249 g_eventQueue_ptr->getTime()); 250 m_outstanding_count++; 251 } else { 252 // There is an outstanding write request for the cache line 253 m_store_waiting_on_store_cycles++; 254 return RequestStatus_Aliased; 255 } 256 } else { 257 // Check if there is any outstanding write request for the same 258 // cache line. 259 if (m_writeRequestTable.count(line_addr) > 0) { 260 m_load_waiting_on_store_cycles++; 261 return RequestStatus_Aliased; 262 } 263 264 pair<RequestTable::iterator, bool> r = 265 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0)); 266 267 if (r.second) { 268 RequestTable::iterator i = r.first; 269 i->second = new SequencerRequest(pkt, request_type, 270 g_eventQueue_ptr->getTime()); 271 m_outstanding_count++; 272 } else { 273 // There is an outstanding read request for the cache line 274 m_load_waiting_on_load_cycles++; 275 return RequestStatus_Aliased; 276 } 277 } 278 279 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 280 assert(m_outstanding_count == 281 (m_writeRequestTable.size() + m_readRequestTable.size())); 282 283 return RequestStatus_Ready; 284} 285 286void 287Sequencer::markRemoved() 288{ 289 m_outstanding_count--; 290 assert(m_outstanding_count == 291 m_writeRequestTable.size() + m_readRequestTable.size()); 292} 293 294void 295Sequencer::removeRequest(SequencerRequest* srequest) 296{ 297 assert(m_outstanding_count == 298 m_writeRequestTable.size() + m_readRequestTable.size()); 299 300 Address line_addr(srequest->pkt->getAddr()); 301 line_addr.makeLineAddress(); 302 if ((srequest->m_type == RubyRequestType_ST) || 303 (srequest->m_type == RubyRequestType_RMW_Read) || 304 (srequest->m_type == RubyRequestType_RMW_Write) || 305 (srequest->m_type == RubyRequestType_Load_Linked) || 306 (srequest->m_type == RubyRequestType_Store_Conditional) || 307 (srequest->m_type == RubyRequestType_Locked_RMW_Read) || 308 (srequest->m_type == RubyRequestType_Locked_RMW_Write)) { 309 m_writeRequestTable.erase(line_addr); 310 } else { 311 m_readRequestTable.erase(line_addr); 312 } 313 314 markRemoved(); 315} 316 317bool 318Sequencer::handleLlsc(const Address& address, SequencerRequest* request) 319{ 320 // 321 // The success flag indicates whether the LLSC operation was successful. 322 // LL ops will always succeed, but SC may fail if the cache line is no 323 // longer locked. 324 // 325 bool success = true; 326 if (request->m_type == RubyRequestType_Store_Conditional) { 327 if (!m_dataCache_ptr->isLocked(address, m_version)) { 328 // 329 // For failed SC requests, indicate the failure to the cpu by 330 // setting the extra data to zero. 331 // 332 request->pkt->req->setExtraData(0); 333 success = false; 334 } else { 335 // 336 // For successful SC requests, indicate the success to the cpu by 337 // setting the extra data to one. 338 // 339 request->pkt->req->setExtraData(1); 340 } 341 // 342 // Independent of success, all SC operations must clear the lock 343 // 344 m_dataCache_ptr->clearLocked(address); 345 } else if (request->m_type == RubyRequestType_Load_Linked) { 346 // 347 // Note: To fully follow Alpha LLSC semantics, should the LL clear any 348 // previously locked cache lines? 349 // 350 m_dataCache_ptr->setLocked(address, m_version); 351 } else if ((m_dataCache_ptr->isTagPresent(address)) && 352 (m_dataCache_ptr->isLocked(address, m_version))) { 353 // 354 // Normal writes should clear the locked address 355 // 356 m_dataCache_ptr->clearLocked(address); 357 } 358 return success; 359} 360 361void 362Sequencer::writeCallback(const Address& address, DataBlock& data) 363{ 364 writeCallback(address, GenericMachineType_NULL, data); 365} 366 367void 368Sequencer::writeCallback(const Address& address, 369 GenericMachineType mach, 370 DataBlock& data) 371{ 372 writeCallback(address, mach, data, 0, 0, 0); 373} 374 375void 376Sequencer::writeCallback(const Address& address, 377 GenericMachineType mach, 378 DataBlock& data, 379 Time initialRequestTime, 380 Time forwardRequestTime, 381 Time firstResponseTime) 382{ 383 assert(address == line_address(address)); 384 assert(m_writeRequestTable.count(line_address(address))); 385 386 RequestTable::iterator i = m_writeRequestTable.find(address); 387 assert(i != m_writeRequestTable.end()); 388 SequencerRequest* request = i->second; 389 390 m_writeRequestTable.erase(i); 391 markRemoved(); 392 393 assert((request->m_type == RubyRequestType_ST) || 394 (request->m_type == RubyRequestType_ATOMIC) || 395 (request->m_type == RubyRequestType_RMW_Read) || 396 (request->m_type == RubyRequestType_RMW_Write) || 397 (request->m_type == RubyRequestType_Load_Linked) || 398 (request->m_type == RubyRequestType_Store_Conditional) || 399 (request->m_type == RubyRequestType_Locked_RMW_Read) || 400 (request->m_type == RubyRequestType_Locked_RMW_Write) || 401 (request->m_type == RubyRequestType_FLUSH)); 402 403 404 // 405 // For Alpha, properly handle LL, SC, and write requests with respect to 406 // locked cache blocks. 407 // 408 // Not valid for Network_test protocl 409 // 410 bool success = true; 411 if(!m_usingNetworkTester) 412 success = handleLlsc(address, request); 413 414 if (request->m_type == RubyRequestType_Locked_RMW_Read) { 415 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 416 } else if (request->m_type == RubyRequestType_Locked_RMW_Write) { 417 m_controller->unblock(address); 418 } 419 420 hitCallback(request, mach, data, success, 421 initialRequestTime, forwardRequestTime, firstResponseTime); 422} 423 424void 425Sequencer::readCallback(const Address& address, DataBlock& data) 426{ 427 readCallback(address, GenericMachineType_NULL, data); 428} 429 430void 431Sequencer::readCallback(const Address& address, 432 GenericMachineType mach, 433 DataBlock& data) 434{ 435 readCallback(address, mach, data, 0, 0, 0); 436} 437 438void 439Sequencer::readCallback(const Address& address, 440 GenericMachineType mach, 441 DataBlock& data, 442 Time initialRequestTime, 443 Time forwardRequestTime, 444 Time firstResponseTime) 445{ 446 assert(address == line_address(address)); 447 assert(m_readRequestTable.count(line_address(address))); 448 449 RequestTable::iterator i = m_readRequestTable.find(address); 450 assert(i != m_readRequestTable.end()); 451 SequencerRequest* request = i->second; 452 453 m_readRequestTable.erase(i); 454 markRemoved(); 455 456 assert((request->m_type == RubyRequestType_LD) || 457 (request->m_type == RubyRequestType_IFETCH)); 458 459 hitCallback(request, mach, data, true, 460 initialRequestTime, forwardRequestTime, firstResponseTime); 461} 462 463void 464Sequencer::hitCallback(SequencerRequest* srequest, 465 GenericMachineType mach, 466 DataBlock& data, 467 bool success, 468 Time initialRequestTime, 469 Time forwardRequestTime, 470 Time firstResponseTime) 471{ 472 PacketPtr pkt = srequest->pkt; 473 Address request_address(pkt->getAddr()); 474 Address request_line_address(pkt->getAddr()); 475 request_line_address.makeLineAddress(); 476 RubyRequestType type = srequest->m_type; 477 Time issued_time = srequest->issue_time; 478 479 // Set this cache entry to the most recently used 480 if (type == RubyRequestType_IFETCH) { 481 m_instCache_ptr->setMRU(request_line_address); 482 } else { 483 m_dataCache_ptr->setMRU(request_line_address); 484 } 485 486 assert(g_eventQueue_ptr->getTime() >= issued_time); 487 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 488 489 // Profile the miss latency for all non-zero demand misses 490 if (miss_latency != 0) { 491 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach); 492 493 if (mach == GenericMachineType_L1Cache_wCC) { 494 g_system_ptr->getProfiler()->missLatencyWcc(issued_time, 495 initialRequestTime, 496 forwardRequestTime, 497 firstResponseTime, 498 g_eventQueue_ptr->getTime()); 499 } 500 501 if (mach == GenericMachineType_Directory) { 502 g_system_ptr->getProfiler()->missLatencyDir(issued_time, 503 initialRequestTime, 504 forwardRequestTime, 505 firstResponseTime, 506 g_eventQueue_ptr->getTime()); 507 } 508 509 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n", 510 curTick(), m_version, "Seq", 511 success ? "Done" : "SC_Failed", "", "", 512 request_address, miss_latency); 513 } 514 515 // update the data 516 if (g_system_ptr->m_warmup_enabled) { 517 assert(pkt->getPtr<uint8_t>(false) != NULL); 518 data.setData(pkt->getPtr<uint8_t>(false), 519 request_address.getOffset(), pkt->getSize()); 520 } else if (pkt->getPtr<uint8_t>(true) != NULL) { 521 if ((type == RubyRequestType_LD) || 522 (type == RubyRequestType_IFETCH) || 523 (type == RubyRequestType_RMW_Read) || 524 (type == RubyRequestType_Locked_RMW_Read) || 525 (type == RubyRequestType_Load_Linked)) { 526 memcpy(pkt->getPtr<uint8_t>(true), 527 data.getData(request_address.getOffset(), pkt->getSize()), 528 pkt->getSize()); 529 } else { 530 data.setData(pkt->getPtr<uint8_t>(true), 531 request_address.getOffset(), pkt->getSize()); 532 } 533 } else { 534 DPRINTF(MemoryAccess, 535 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 536 RubyRequestType_to_string(type)); 537 } 538 539 // If using the RubyTester, update the RubyTester sender state's 540 // subBlock with the recieved data. The tester will later access 541 // this state. 542 // Note: RubyPort will access it's sender state before the 543 // RubyTester. 544 if (m_usingRubyTester) { 545 RubyPort::SenderState *requestSenderState = 546 safe_cast<RubyPort::SenderState*>(pkt->senderState); 547 RubyTester::SenderState* testerSenderState = 548 safe_cast<RubyTester::SenderState*>(requestSenderState->saved); 549 testerSenderState->subBlock->mergeFrom(data); 550 } 551 552 delete srequest; 553 554 if (g_system_ptr->m_warmup_enabled) { 555 delete pkt; 556 g_system_ptr->m_cache_recorder->enqueueNextFetchRequest(); 557 } else if (g_system_ptr->m_cooldown_enabled) { 558 delete pkt; 559 g_system_ptr->m_cache_recorder->enqueueNextFlushRequest(); 560 } else { 561 ruby_hit_callback(pkt); 562 } 563} 564 565bool 566Sequencer::empty() const 567{ 568 return m_writeRequestTable.empty() && m_readRequestTable.empty(); 569} 570 571RequestStatus 572Sequencer::makeRequest(PacketPtr pkt) 573{ 574 if (m_outstanding_count >= m_max_outstanding_requests) { 575 return RequestStatus_BufferFull; 576 } 577 578 RubyRequestType primary_type = RubyRequestType_NULL; 579 RubyRequestType secondary_type = RubyRequestType_NULL; 580 581 if (pkt->isLLSC()) { 582 // 583 // Alpha LL/SC instructions need to be handled carefully by the cache 584 // coherence protocol to ensure they follow the proper semantics. In 585 // particular, by identifying the operations as atomic, the protocol 586 // should understand that migratory sharing optimizations should not 587 // be performed (i.e. a load between the LL and SC should not steal 588 // away exclusive permission). 589 // 590 if (pkt->isWrite()) { 591 DPRINTF(RubySequencer, "Issuing SC\n"); 592 primary_type = RubyRequestType_Store_Conditional; 593 } else { 594 DPRINTF(RubySequencer, "Issuing LL\n"); 595 assert(pkt->isRead()); 596 primary_type = RubyRequestType_Load_Linked; 597 } 598 secondary_type = RubyRequestType_ATOMIC; 599 } else if (pkt->req->isLocked()) { 600 // 601 // x86 locked instructions are translated to store cache coherence 602 // requests because these requests should always be treated as read 603 // exclusive operations and should leverage any migratory sharing 604 // optimization built into the protocol. 605 // 606 if (pkt->isWrite()) { 607 DPRINTF(RubySequencer, "Issuing Locked RMW Write\n"); 608 primary_type = RubyRequestType_Locked_RMW_Write; 609 } else { 610 DPRINTF(RubySequencer, "Issuing Locked RMW Read\n"); 611 assert(pkt->isRead()); 612 primary_type = RubyRequestType_Locked_RMW_Read; 613 } 614 secondary_type = RubyRequestType_ST; 615 } else { 616 if (pkt->isRead()) { 617 if (pkt->req->isInstFetch()) { 618 primary_type = secondary_type = RubyRequestType_IFETCH; 619 } else { 620#if THE_ISA == X86_ISA 621 uint32_t flags = pkt->req->getFlags(); 622 bool storeCheck = flags & 623 (TheISA::StoreCheck << TheISA::FlagShift); 624#else 625 bool storeCheck = false; 626#endif // X86_ISA 627 if (storeCheck) { 628 primary_type = RubyRequestType_RMW_Read; 629 secondary_type = RubyRequestType_ST; 630 } else { 631 primary_type = secondary_type = RubyRequestType_LD; 632 } 633 } 634 } else if (pkt->isWrite()) { 635 // 636 // Note: M5 packets do not differentiate ST from RMW_Write 637 // 638 primary_type = secondary_type = RubyRequestType_ST; 639 } else if (pkt->isFlush()) { 640 primary_type = secondary_type = RubyRequestType_FLUSH; 641 } else { 642 panic("Unsupported ruby packet type\n"); 643 } 644 } 645 646 RequestStatus status = insertRequest(pkt, primary_type); 647 if (status != RequestStatus_Ready) 648 return status; 649 650 issueRequest(pkt, secondary_type); 651 652 // TODO: issue hardware prefetches here 653 return RequestStatus_Issued; 654} 655 656void 657Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type) 658{ 659 int proc_id = -1; 660 if (pkt != NULL && pkt->req->hasContextId()) { 661 proc_id = pkt->req->contextId(); 662 } 663 664 // If valid, copy the pc to the ruby request 665 Addr pc = 0; 666 if (pkt->req->hasPC()) { 667 pc = pkt->req->getPC(); 668 } 669 670 RubyRequest *msg = new RubyRequest(pkt->getAddr(), 671 pkt->getPtr<uint8_t>(true), 672 pkt->getSize(), pc, secondary_type, 673 RubyAccessMode_Supervisor, pkt, 674 PrefetchBit_No, proc_id); 675 676 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n", 677 curTick(), m_version, "Seq", "Begin", "", "", 678 msg->getPhysicalAddress(), 679 RubyRequestType_to_string(secondary_type)); 680 681 Time latency = 0; // initialzed to an null value 682 683 if (secondary_type == RubyRequestType_IFETCH) 684 latency = m_instCache_ptr->getLatency(); 685 else 686 latency = m_dataCache_ptr->getLatency(); 687 688 // Send the message to the cache controller 689 assert(latency > 0); 690 691 assert(m_mandatory_q_ptr != NULL); 692 m_mandatory_q_ptr->enqueue(msg, latency); 693} 694 695template <class KEY, class VALUE> 696std::ostream & 697operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) 698{ 699 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); 700 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); 701 702 out << "["; 703 for (; i != end; ++i) 704 out << " " << i->first << "=" << i->second; 705 out << " ]"; 706 707 return out; 708} 709 710void 711Sequencer::print(ostream& out) const 712{ 713 out << "[Sequencer: " << m_version 714 << ", outstanding requests: " << m_outstanding_count 715 << ", read request table: " << m_readRequestTable 716 << ", write request table: " << m_writeRequestTable 717 << "]"; 718} 719 720// this can be called from setState whenever coherence permissions are 721// upgraded when invoked, coherence violations will be checked for the 722// given block 723void 724Sequencer::checkCoherence(const Address& addr) 725{ 726#ifdef CHECK_COHERENCE 727 g_system_ptr->checkGlobalCoherenceInvariant(addr); 728#endif 729} 730 731void 732Sequencer::evictionCallback(const Address& address) 733{ 734 ruby_eviction_callback(address); 735} 736