Sequencer.cc revision 9104
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "base/misc.hh" 30#include "base/str.hh" 31#include "config/the_isa.hh" 32#if THE_ISA == X86_ISA 33#include "arch/x86/insts/microldstop.hh" 34#endif // X86_ISA 35#include "cpu/testers/rubytest/RubyTester.hh" 36#include "debug/MemoryAccess.hh" 37#include "debug/ProtocolTrace.hh" 38#include "debug/RubySequencer.hh" 39#include "debug/RubyStats.hh" 40#include "mem/protocol/PrefetchBit.hh" 41#include "mem/protocol/RubyAccessMode.hh" 42#include "mem/ruby/buffers/MessageBuffer.hh" 43#include "mem/ruby/common/Global.hh" 44#include "mem/ruby/profiler/Profiler.hh" 45#include "mem/ruby/slicc_interface/RubyRequest.hh" 46#include "mem/ruby/system/CacheMemory.hh" 47#include "mem/ruby/system/Sequencer.hh" 48#include "mem/ruby/system/System.hh" 49#include "mem/packet.hh" 50#include "params/RubySequencer.hh" 51 52using namespace std; 53 54Sequencer * 55RubySequencerParams::create() 56{ 57 return new Sequencer(this); 58} 59 60Sequencer::Sequencer(const Params *p) 61 : RubyPort(p), deadlockCheckEvent(this) 62{ 63 m_store_waiting_on_load_cycles = 0; 64 m_store_waiting_on_store_cycles = 0; 65 m_load_waiting_on_store_cycles = 0; 66 m_load_waiting_on_load_cycles = 0; 67 68 m_outstanding_count = 0; 69 70 m_instCache_ptr = p->icache; 71 m_dataCache_ptr = p->dcache; 72 m_max_outstanding_requests = p->max_outstanding_requests; 73 m_deadlock_threshold = p->deadlock_threshold; 74 75 assert(m_max_outstanding_requests > 0); 76 assert(m_deadlock_threshold > 0); 77 assert(m_instCache_ptr != NULL); 78 assert(m_dataCache_ptr != NULL); 79 80 m_usingNetworkTester = p->using_network_tester; 81} 82 83Sequencer::~Sequencer() 84{ 85} 86 87void 88Sequencer::wakeup() 89{ 90 // Check for deadlock of any of the requests 91 Time current_time = g_eventQueue_ptr->getTime(); 92 93 // Check across all outstanding requests 94 int total_outstanding = 0; 95 96 RequestTable::iterator read = m_readRequestTable.begin(); 97 RequestTable::iterator read_end = m_readRequestTable.end(); 98 for (; read != read_end; ++read) { 99 SequencerRequest* request = read->second; 100 if (current_time - request->issue_time < m_deadlock_threshold) 101 continue; 102 103 panic("Possible Deadlock detected. Aborting!\n" 104 "version: %d request.paddr: 0x%x m_readRequestTable: %d " 105 "current time: %u issue_time: %d difference: %d\n", m_version, 106 Address(request->pkt->getAddr()), m_readRequestTable.size(), 107 current_time, request->issue_time, 108 current_time - request->issue_time); 109 } 110 111 RequestTable::iterator write = m_writeRequestTable.begin(); 112 RequestTable::iterator write_end = m_writeRequestTable.end(); 113 for (; write != write_end; ++write) { 114 SequencerRequest* request = write->second; 115 if (current_time - request->issue_time < m_deadlock_threshold) 116 continue; 117 118 panic("Possible Deadlock detected. Aborting!\n" 119 "version: %d request.paddr: 0x%x m_writeRequestTable: %d " 120 "current time: %u issue_time: %d difference: %d\n", m_version, 121 Address(request->pkt->getAddr()), m_writeRequestTable.size(), 122 current_time, request->issue_time, 123 current_time - request->issue_time); 124 } 125 126 total_outstanding += m_writeRequestTable.size(); 127 total_outstanding += m_readRequestTable.size(); 128 129 assert(m_outstanding_count == total_outstanding); 130 131 if (m_outstanding_count > 0) { 132 // If there are still outstanding requests, keep checking 133 schedule(deadlockCheckEvent, 134 m_deadlock_threshold * g_eventQueue_ptr->getClock() + 135 curTick()); 136 } 137} 138 139void 140Sequencer::printStats(ostream & out) const 141{ 142 out << "Sequencer: " << m_name << endl 143 << " store_waiting_on_load_cycles: " 144 << m_store_waiting_on_load_cycles << endl 145 << " store_waiting_on_store_cycles: " 146 << m_store_waiting_on_store_cycles << endl 147 << " load_waiting_on_load_cycles: " 148 << m_load_waiting_on_load_cycles << endl 149 << " load_waiting_on_store_cycles: " 150 << m_load_waiting_on_store_cycles << endl; 151} 152 153void 154Sequencer::printProgress(ostream& out) const 155{ 156#if 0 157 int total_demand = 0; 158 out << "Sequencer Stats Version " << m_version << endl; 159 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 160 out << "---------------" << endl; 161 out << "outstanding requests" << endl; 162 163 out << "proc " << m_Read 164 << " version Requests = " << m_readRequestTable.size() << endl; 165 166 // print the request table 167 RequestTable::iterator read = m_readRequestTable.begin(); 168 RequestTable::iterator read_end = m_readRequestTable.end(); 169 for (; read != read_end; ++read) { 170 SequencerRequest* request = read->second; 171 out << "\tRequest[ " << i << " ] = " << request->type 172 << " Address " << rkeys[i] 173 << " Posted " << request->issue_time 174 << " PF " << PrefetchBit_No << endl; 175 total_demand++; 176 } 177 178 out << "proc " << m_version 179 << " Write Requests = " << m_writeRequestTable.size << endl; 180 181 // print the request table 182 RequestTable::iterator write = m_writeRequestTable.begin(); 183 RequestTable::iterator write_end = m_writeRequestTable.end(); 184 for (; write != write_end; ++write) { 185 SequencerRequest* request = write->second; 186 out << "\tRequest[ " << i << " ] = " << request.getType() 187 << " Address " << wkeys[i] 188 << " Posted " << request.getTime() 189 << " PF " << request.getPrefetch() << endl; 190 if (request.getPrefetch() == PrefetchBit_No) { 191 total_demand++; 192 } 193 } 194 195 out << endl; 196 197 out << "Total Number Outstanding: " << m_outstanding_count << endl 198 << "Total Number Demand : " << total_demand << endl 199 << "Total Number Prefetches : " << m_outstanding_count - total_demand 200 << endl << endl << endl; 201#endif 202} 203 204void 205Sequencer::printConfig(ostream& out) const 206{ 207 out << "Seqeuncer config: " << m_name << endl 208 << " controller: " << m_controller->getName() << endl 209 << " version: " << m_version << endl 210 << " max_outstanding_requests: " << m_max_outstanding_requests << endl 211 << " deadlock_threshold: " << m_deadlock_threshold << endl; 212} 213 214// Insert the request on the correct request table. Return true if 215// the entry was already present. 216RequestStatus 217Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type) 218{ 219 assert(m_outstanding_count == 220 (m_writeRequestTable.size() + m_readRequestTable.size())); 221 222 // See if we should schedule a deadlock check 223 if (deadlockCheckEvent.scheduled() == false) { 224 schedule(deadlockCheckEvent, 225 m_deadlock_threshold * g_eventQueue_ptr->getClock() 226 + curTick()); 227 } 228 229 Address line_addr(pkt->getAddr()); 230 line_addr.makeLineAddress(); 231 if ((request_type == RubyRequestType_ST) || 232 (request_type == RubyRequestType_RMW_Read) || 233 (request_type == RubyRequestType_RMW_Write) || 234 (request_type == RubyRequestType_Load_Linked) || 235 (request_type == RubyRequestType_Store_Conditional) || 236 (request_type == RubyRequestType_Locked_RMW_Read) || 237 (request_type == RubyRequestType_Locked_RMW_Write) || 238 (request_type == RubyRequestType_FLUSH)) { 239 240 // Check if there is any outstanding read request for the same 241 // cache line. 242 if (m_readRequestTable.count(line_addr) > 0) { 243 m_store_waiting_on_load_cycles++; 244 return RequestStatus_Aliased; 245 } 246 247 pair<RequestTable::iterator, bool> r = 248 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0)); 249 if (r.second) { 250 RequestTable::iterator i = r.first; 251 i->second = new SequencerRequest(pkt, request_type, 252 g_eventQueue_ptr->getTime()); 253 m_outstanding_count++; 254 } else { 255 // There is an outstanding write request for the cache line 256 m_store_waiting_on_store_cycles++; 257 return RequestStatus_Aliased; 258 } 259 } else { 260 // Check if there is any outstanding write request for the same 261 // cache line. 262 if (m_writeRequestTable.count(line_addr) > 0) { 263 m_load_waiting_on_store_cycles++; 264 return RequestStatus_Aliased; 265 } 266 267 pair<RequestTable::iterator, bool> r = 268 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0)); 269 270 if (r.second) { 271 RequestTable::iterator i = r.first; 272 i->second = new SequencerRequest(pkt, request_type, 273 g_eventQueue_ptr->getTime()); 274 m_outstanding_count++; 275 } else { 276 // There is an outstanding read request for the cache line 277 m_load_waiting_on_load_cycles++; 278 return RequestStatus_Aliased; 279 } 280 } 281 282 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 283 assert(m_outstanding_count == 284 (m_writeRequestTable.size() + m_readRequestTable.size())); 285 286 return RequestStatus_Ready; 287} 288 289void 290Sequencer::markRemoved() 291{ 292 m_outstanding_count--; 293 assert(m_outstanding_count == 294 m_writeRequestTable.size() + m_readRequestTable.size()); 295} 296 297void 298Sequencer::removeRequest(SequencerRequest* srequest) 299{ 300 assert(m_outstanding_count == 301 m_writeRequestTable.size() + m_readRequestTable.size()); 302 303 Address line_addr(srequest->pkt->getAddr()); 304 line_addr.makeLineAddress(); 305 if ((srequest->m_type == RubyRequestType_ST) || 306 (srequest->m_type == RubyRequestType_RMW_Read) || 307 (srequest->m_type == RubyRequestType_RMW_Write) || 308 (srequest->m_type == RubyRequestType_Load_Linked) || 309 (srequest->m_type == RubyRequestType_Store_Conditional) || 310 (srequest->m_type == RubyRequestType_Locked_RMW_Read) || 311 (srequest->m_type == RubyRequestType_Locked_RMW_Write)) { 312 m_writeRequestTable.erase(line_addr); 313 } else { 314 m_readRequestTable.erase(line_addr); 315 } 316 317 markRemoved(); 318} 319 320bool 321Sequencer::handleLlsc(const Address& address, SequencerRequest* request) 322{ 323 // 324 // The success flag indicates whether the LLSC operation was successful. 325 // LL ops will always succeed, but SC may fail if the cache line is no 326 // longer locked. 327 // 328 bool success = true; 329 if (request->m_type == RubyRequestType_Store_Conditional) { 330 if (!m_dataCache_ptr->isLocked(address, m_version)) { 331 // 332 // For failed SC requests, indicate the failure to the cpu by 333 // setting the extra data to zero. 334 // 335 request->pkt->req->setExtraData(0); 336 success = false; 337 } else { 338 // 339 // For successful SC requests, indicate the success to the cpu by 340 // setting the extra data to one. 341 // 342 request->pkt->req->setExtraData(1); 343 } 344 // 345 // Independent of success, all SC operations must clear the lock 346 // 347 m_dataCache_ptr->clearLocked(address); 348 } else if (request->m_type == RubyRequestType_Load_Linked) { 349 // 350 // Note: To fully follow Alpha LLSC semantics, should the LL clear any 351 // previously locked cache lines? 352 // 353 m_dataCache_ptr->setLocked(address, m_version); 354 } else if ((m_dataCache_ptr->isTagPresent(address)) && 355 (m_dataCache_ptr->isLocked(address, m_version))) { 356 // 357 // Normal writes should clear the locked address 358 // 359 m_dataCache_ptr->clearLocked(address); 360 } 361 return success; 362} 363 364void 365Sequencer::writeCallback(const Address& address, DataBlock& data) 366{ 367 writeCallback(address, GenericMachineType_NULL, data); 368} 369 370void 371Sequencer::writeCallback(const Address& address, 372 GenericMachineType mach, 373 DataBlock& data) 374{ 375 writeCallback(address, mach, data, 0, 0, 0); 376} 377 378void 379Sequencer::writeCallback(const Address& address, 380 GenericMachineType mach, 381 DataBlock& data, 382 Time initialRequestTime, 383 Time forwardRequestTime, 384 Time firstResponseTime) 385{ 386 assert(address == line_address(address)); 387 assert(m_writeRequestTable.count(line_address(address))); 388 389 RequestTable::iterator i = m_writeRequestTable.find(address); 390 assert(i != m_writeRequestTable.end()); 391 SequencerRequest* request = i->second; 392 393 m_writeRequestTable.erase(i); 394 markRemoved(); 395 396 assert((request->m_type == RubyRequestType_ST) || 397 (request->m_type == RubyRequestType_ATOMIC) || 398 (request->m_type == RubyRequestType_RMW_Read) || 399 (request->m_type == RubyRequestType_RMW_Write) || 400 (request->m_type == RubyRequestType_Load_Linked) || 401 (request->m_type == RubyRequestType_Store_Conditional) || 402 (request->m_type == RubyRequestType_Locked_RMW_Read) || 403 (request->m_type == RubyRequestType_Locked_RMW_Write) || 404 (request->m_type == RubyRequestType_FLUSH)); 405 406 407 // 408 // For Alpha, properly handle LL, SC, and write requests with respect to 409 // locked cache blocks. 410 // 411 // Not valid for Network_test protocl 412 // 413 bool success = true; 414 if(!m_usingNetworkTester) 415 success = handleLlsc(address, request); 416 417 if (request->m_type == RubyRequestType_Locked_RMW_Read) { 418 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 419 } else if (request->m_type == RubyRequestType_Locked_RMW_Write) { 420 m_controller->unblock(address); 421 } 422 423 hitCallback(request, mach, data, success, 424 initialRequestTime, forwardRequestTime, firstResponseTime); 425} 426 427void 428Sequencer::readCallback(const Address& address, DataBlock& data) 429{ 430 readCallback(address, GenericMachineType_NULL, data); 431} 432 433void 434Sequencer::readCallback(const Address& address, 435 GenericMachineType mach, 436 DataBlock& data) 437{ 438 readCallback(address, mach, data, 0, 0, 0); 439} 440 441void 442Sequencer::readCallback(const Address& address, 443 GenericMachineType mach, 444 DataBlock& data, 445 Time initialRequestTime, 446 Time forwardRequestTime, 447 Time firstResponseTime) 448{ 449 assert(address == line_address(address)); 450 assert(m_readRequestTable.count(line_address(address))); 451 452 RequestTable::iterator i = m_readRequestTable.find(address); 453 assert(i != m_readRequestTable.end()); 454 SequencerRequest* request = i->second; 455 456 m_readRequestTable.erase(i); 457 markRemoved(); 458 459 assert((request->m_type == RubyRequestType_LD) || 460 (request->m_type == RubyRequestType_IFETCH)); 461 462 hitCallback(request, mach, data, true, 463 initialRequestTime, forwardRequestTime, firstResponseTime); 464} 465 466void 467Sequencer::hitCallback(SequencerRequest* srequest, 468 GenericMachineType mach, 469 DataBlock& data, 470 bool success, 471 Time initialRequestTime, 472 Time forwardRequestTime, 473 Time firstResponseTime) 474{ 475 PacketPtr pkt = srequest->pkt; 476 Address request_address(pkt->getAddr()); 477 Address request_line_address(pkt->getAddr()); 478 request_line_address.makeLineAddress(); 479 RubyRequestType type = srequest->m_type; 480 Time issued_time = srequest->issue_time; 481 482 // Set this cache entry to the most recently used 483 if (type == RubyRequestType_IFETCH) { 484 m_instCache_ptr->setMRU(request_line_address); 485 } else { 486 m_dataCache_ptr->setMRU(request_line_address); 487 } 488 489 assert(g_eventQueue_ptr->getTime() >= issued_time); 490 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 491 492 // Profile the miss latency for all non-zero demand misses 493 if (miss_latency != 0) { 494 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach); 495 496 if (mach == GenericMachineType_L1Cache_wCC) { 497 g_system_ptr->getProfiler()->missLatencyWcc(issued_time, 498 initialRequestTime, 499 forwardRequestTime, 500 firstResponseTime, 501 g_eventQueue_ptr->getTime()); 502 } 503 504 if (mach == GenericMachineType_Directory) { 505 g_system_ptr->getProfiler()->missLatencyDir(issued_time, 506 initialRequestTime, 507 forwardRequestTime, 508 firstResponseTime, 509 g_eventQueue_ptr->getTime()); 510 } 511 512 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n", 513 curTick(), m_version, "Seq", 514 success ? "Done" : "SC_Failed", "", "", 515 request_address, miss_latency); 516 } 517 518 // update the data 519 if (g_system_ptr->m_warmup_enabled) { 520 assert(pkt->getPtr<uint8_t>(false) != NULL); 521 data.setData(pkt->getPtr<uint8_t>(false), 522 request_address.getOffset(), pkt->getSize()); 523 } else if (pkt->getPtr<uint8_t>(true) != NULL) { 524 if ((type == RubyRequestType_LD) || 525 (type == RubyRequestType_IFETCH) || 526 (type == RubyRequestType_RMW_Read) || 527 (type == RubyRequestType_Locked_RMW_Read) || 528 (type == RubyRequestType_Load_Linked)) { 529 memcpy(pkt->getPtr<uint8_t>(true), 530 data.getData(request_address.getOffset(), pkt->getSize()), 531 pkt->getSize()); 532 } else { 533 data.setData(pkt->getPtr<uint8_t>(true), 534 request_address.getOffset(), pkt->getSize()); 535 } 536 } else { 537 DPRINTF(MemoryAccess, 538 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 539 RubyRequestType_to_string(type)); 540 } 541 542 // If using the RubyTester, update the RubyTester sender state's 543 // subBlock with the recieved data. The tester will later access 544 // this state. 545 // Note: RubyPort will access it's sender state before the 546 // RubyTester. 547 if (m_usingRubyTester) { 548 RubyPort::SenderState *requestSenderState = 549 safe_cast<RubyPort::SenderState*>(pkt->senderState); 550 RubyTester::SenderState* testerSenderState = 551 safe_cast<RubyTester::SenderState*>(requestSenderState->saved); 552 testerSenderState->subBlock->mergeFrom(data); 553 } 554 555 delete srequest; 556 557 if (g_system_ptr->m_warmup_enabled) { 558 delete pkt; 559 g_system_ptr->m_cache_recorder->enqueueNextFetchRequest(); 560 } else if (g_system_ptr->m_cooldown_enabled) { 561 delete pkt; 562 g_system_ptr->m_cache_recorder->enqueueNextFlushRequest(); 563 } else { 564 ruby_hit_callback(pkt); 565 } 566} 567 568bool 569Sequencer::empty() const 570{ 571 return m_writeRequestTable.empty() && m_readRequestTable.empty(); 572} 573 574RequestStatus 575Sequencer::makeRequest(PacketPtr pkt) 576{ 577 if (m_outstanding_count >= m_max_outstanding_requests) { 578 return RequestStatus_BufferFull; 579 } 580 581 RubyRequestType primary_type = RubyRequestType_NULL; 582 RubyRequestType secondary_type = RubyRequestType_NULL; 583 584 if (pkt->isLLSC()) { 585 // 586 // Alpha LL/SC instructions need to be handled carefully by the cache 587 // coherence protocol to ensure they follow the proper semantics. In 588 // particular, by identifying the operations as atomic, the protocol 589 // should understand that migratory sharing optimizations should not 590 // be performed (i.e. a load between the LL and SC should not steal 591 // away exclusive permission). 592 // 593 if (pkt->isWrite()) { 594 DPRINTF(RubySequencer, "Issuing SC\n"); 595 primary_type = RubyRequestType_Store_Conditional; 596 } else { 597 DPRINTF(RubySequencer, "Issuing LL\n"); 598 assert(pkt->isRead()); 599 primary_type = RubyRequestType_Load_Linked; 600 } 601 secondary_type = RubyRequestType_ATOMIC; 602 } else if (pkt->req->isLocked()) { 603 // 604 // x86 locked instructions are translated to store cache coherence 605 // requests because these requests should always be treated as read 606 // exclusive operations and should leverage any migratory sharing 607 // optimization built into the protocol. 608 // 609 if (pkt->isWrite()) { 610 DPRINTF(RubySequencer, "Issuing Locked RMW Write\n"); 611 primary_type = RubyRequestType_Locked_RMW_Write; 612 } else { 613 DPRINTF(RubySequencer, "Issuing Locked RMW Read\n"); 614 assert(pkt->isRead()); 615 primary_type = RubyRequestType_Locked_RMW_Read; 616 } 617 secondary_type = RubyRequestType_ST; 618 } else { 619 if (pkt->isRead()) { 620 if (pkt->req->isInstFetch()) { 621 primary_type = secondary_type = RubyRequestType_IFETCH; 622 } else { 623#if THE_ISA == X86_ISA 624 uint32_t flags = pkt->req->getFlags(); 625 bool storeCheck = flags & 626 (TheISA::StoreCheck << TheISA::FlagShift); 627#else 628 bool storeCheck = false; 629#endif // X86_ISA 630 if (storeCheck) { 631 primary_type = RubyRequestType_RMW_Read; 632 secondary_type = RubyRequestType_ST; 633 } else { 634 primary_type = secondary_type = RubyRequestType_LD; 635 } 636 } 637 } else if (pkt->isWrite()) { 638 // 639 // Note: M5 packets do not differentiate ST from RMW_Write 640 // 641 primary_type = secondary_type = RubyRequestType_ST; 642 } else if (pkt->isFlush()) { 643 primary_type = secondary_type = RubyRequestType_FLUSH; 644 } else { 645 panic("Unsupported ruby packet type\n"); 646 } 647 } 648 649 RequestStatus status = insertRequest(pkt, primary_type); 650 if (status != RequestStatus_Ready) 651 return status; 652 653 issueRequest(pkt, secondary_type); 654 655 // TODO: issue hardware prefetches here 656 return RequestStatus_Issued; 657} 658 659void 660Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type) 661{ 662 int proc_id = -1; 663 if (pkt != NULL && pkt->req->hasContextId()) { 664 proc_id = pkt->req->contextId(); 665 } 666 667 // If valid, copy the pc to the ruby request 668 Addr pc = 0; 669 if (pkt->req->hasPC()) { 670 pc = pkt->req->getPC(); 671 } 672 673 RubyRequest *msg = new RubyRequest(pkt->getAddr(), 674 pkt->getPtr<uint8_t>(true), 675 pkt->getSize(), pc, secondary_type, 676 RubyAccessMode_Supervisor, pkt, 677 PrefetchBit_No, proc_id); 678 679 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n", 680 curTick(), m_version, "Seq", "Begin", "", "", 681 msg->getPhysicalAddress(), 682 RubyRequestType_to_string(secondary_type)); 683 684 Time latency = 0; // initialzed to an null value 685 686 if (secondary_type == RubyRequestType_IFETCH) 687 latency = m_instCache_ptr->getLatency(); 688 else 689 latency = m_dataCache_ptr->getLatency(); 690 691 // Send the message to the cache controller 692 assert(latency > 0); 693 694 assert(m_mandatory_q_ptr != NULL); 695 m_mandatory_q_ptr->enqueue(msg, latency); 696} 697 698template <class KEY, class VALUE> 699std::ostream & 700operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) 701{ 702 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); 703 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); 704 705 out << "["; 706 for (; i != end; ++i) 707 out << " " << i->first << "=" << i->second; 708 out << " ]"; 709 710 return out; 711} 712 713void 714Sequencer::print(ostream& out) const 715{ 716 out << "[Sequencer: " << m_version 717 << ", outstanding requests: " << m_outstanding_count 718 << ", read request table: " << m_readRequestTable 719 << ", write request table: " << m_writeRequestTable 720 << "]"; 721} 722 723// this can be called from setState whenever coherence permissions are 724// upgraded when invoked, coherence violations will be checked for the 725// given block 726void 727Sequencer::checkCoherence(const Address& addr) 728{ 729#ifdef CHECK_COHERENCE 730 g_system_ptr->checkGlobalCoherenceInvariant(addr); 731#endif 732} 733 734void 735Sequencer::recordRequestType(SequencerRequestType requestType) { 736 DPRINTF(RubyStats, "Recorded statistic: %s\n", 737 SequencerRequestType_to_string(requestType)); 738} 739 740 741void 742Sequencer::evictionCallback(const Address& address) 743{ 744 ruby_eviction_callback(address); 745} 746