Sequencer.cc revision 8174
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "base/str.hh" 30#include "base/misc.hh" 31#include "cpu/testers/rubytest/RubyTester.hh" 32#include "mem/protocol/Protocol.hh" 33#include "mem/protocol/Protocol.hh" 34#include "mem/ruby/buffers/MessageBuffer.hh" 35#include "mem/ruby/common/Global.hh" 36#include "mem/ruby/common/SubBlock.hh" 37#include "mem/ruby/slicc_interface/RubyRequest.hh" 38#include "mem/ruby/profiler/Profiler.hh" 39#include "mem/ruby/recorder/Tracer.hh" 40#include "mem/ruby/slicc_interface/AbstractController.hh" 41#include "mem/ruby/system/CacheMemory.hh" 42#include "mem/ruby/system/Sequencer.hh" 43#include "mem/ruby/system/System.hh" 44#include "mem/packet.hh" 45#include "params/RubySequencer.hh" 46 47using namespace std; 48 49Sequencer * 50RubySequencerParams::create() 51{ 52 return new Sequencer(this); 53} 54 55Sequencer::Sequencer(const Params *p) 56 : RubyPort(p), deadlockCheckEvent(this) 57{ 58 m_store_waiting_on_load_cycles = 0; 59 m_store_waiting_on_store_cycles = 0; 60 m_load_waiting_on_store_cycles = 0; 61 m_load_waiting_on_load_cycles = 0; 62 63 m_outstanding_count = 0; 64 65 m_max_outstanding_requests = 0; 66 m_deadlock_threshold = 0; 67 m_instCache_ptr = NULL; 68 m_dataCache_ptr = NULL; 69 70 m_instCache_ptr = p->icache; 71 m_dataCache_ptr = p->dcache; 72 m_max_outstanding_requests = p->max_outstanding_requests; 73 m_deadlock_threshold = p->deadlock_threshold; 74 75 assert(m_max_outstanding_requests > 0); 76 assert(m_deadlock_threshold > 0); 77 assert(m_instCache_ptr != NULL); 78 assert(m_dataCache_ptr != NULL); 79 80 m_usingNetworkTester = p->using_network_tester; 81} 82 83Sequencer::~Sequencer() 84{ 85} 86 87void 88Sequencer::wakeup() 89{ 90 // Check for deadlock of any of the requests 91 Time current_time = g_eventQueue_ptr->getTime(); 92 93 // Check across all outstanding requests 94 int total_outstanding = 0; 95 96 RequestTable::iterator read = m_readRequestTable.begin(); 97 RequestTable::iterator read_end = m_readRequestTable.end(); 98 for (; read != read_end; ++read) { 99 SequencerRequest* request = read->second; 100 if (current_time - request->issue_time < m_deadlock_threshold) 101 continue; 102 103 panic("Possible Deadlock detected. Aborting!\n" 104 "version: %d request.paddr: 0x%x m_readRequestTable: %d " 105 "current time: %u issue_time: %d difference: %d\n", m_version, 106 request->ruby_request.m_PhysicalAddress, m_readRequestTable.size(), 107 current_time, request->issue_time, 108 current_time - request->issue_time); 109 } 110 111 RequestTable::iterator write = m_writeRequestTable.begin(); 112 RequestTable::iterator write_end = m_writeRequestTable.end(); 113 for (; write != write_end; ++write) { 114 SequencerRequest* request = write->second; 115 if (current_time - request->issue_time < m_deadlock_threshold) 116 continue; 117 118 panic("Possible Deadlock detected. Aborting!\n" 119 "version: %d request.paddr: 0x%x m_writeRequestTable: %d " 120 "current time: %u issue_time: %d difference: %d\n", m_version, 121 request->ruby_request.m_PhysicalAddress, m_writeRequestTable.size(), 122 current_time, request->issue_time, 123 current_time - request->issue_time); 124 } 125 126 total_outstanding += m_writeRequestTable.size(); 127 total_outstanding += m_readRequestTable.size(); 128 129 assert(m_outstanding_count == total_outstanding); 130 131 if (m_outstanding_count > 0) { 132 // If there are still outstanding requests, keep checking 133 schedule(deadlockCheckEvent, 134 m_deadlock_threshold * g_eventQueue_ptr->getClock() + 135 curTick()); 136 } 137} 138 139void 140Sequencer::printStats(ostream & out) const 141{ 142 out << "Sequencer: " << m_name << endl 143 << " store_waiting_on_load_cycles: " 144 << m_store_waiting_on_load_cycles << endl 145 << " store_waiting_on_store_cycles: " 146 << m_store_waiting_on_store_cycles << endl 147 << " load_waiting_on_load_cycles: " 148 << m_load_waiting_on_load_cycles << endl 149 << " load_waiting_on_store_cycles: " 150 << m_load_waiting_on_store_cycles << endl; 151} 152 153void 154Sequencer::printProgress(ostream& out) const 155{ 156#if 0 157 int total_demand = 0; 158 out << "Sequencer Stats Version " << m_version << endl; 159 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 160 out << "---------------" << endl; 161 out << "outstanding requests" << endl; 162 163 out << "proc " << m_Read 164 << " version Requests = " << m_readRequestTable.size() << endl; 165 166 // print the request table 167 RequestTable::iterator read = m_readRequestTable.begin(); 168 RequestTable::iterator read_end = m_readRequestTable.end(); 169 for (; read != read_end; ++read) { 170 SequencerRequest* request = read->second; 171 out << "\tRequest[ " << i << " ] = " << request->type 172 << " Address " << rkeys[i] 173 << " Posted " << request->issue_time 174 << " PF " << PrefetchBit_No << endl; 175 total_demand++; 176 } 177 178 out << "proc " << m_version 179 << " Write Requests = " << m_writeRequestTable.size << endl; 180 181 // print the request table 182 RequestTable::iterator write = m_writeRequestTable.begin(); 183 RequestTable::iterator write_end = m_writeRequestTable.end(); 184 for (; write != write_end; ++write) { 185 SequencerRequest* request = write->second; 186 out << "\tRequest[ " << i << " ] = " << request.getType() 187 << " Address " << wkeys[i] 188 << " Posted " << request.getTime() 189 << " PF " << request.getPrefetch() << endl; 190 if (request.getPrefetch() == PrefetchBit_No) { 191 total_demand++; 192 } 193 } 194 195 out << endl; 196 197 out << "Total Number Outstanding: " << m_outstanding_count << endl 198 << "Total Number Demand : " << total_demand << endl 199 << "Total Number Prefetches : " << m_outstanding_count - total_demand 200 << endl << endl << endl; 201#endif 202} 203 204void 205Sequencer::printConfig(ostream& out) const 206{ 207 out << "Seqeuncer config: " << m_name << endl 208 << " controller: " << m_controller->getName() << endl 209 << " version: " << m_version << endl 210 << " max_outstanding_requests: " << m_max_outstanding_requests << endl 211 << " deadlock_threshold: " << m_deadlock_threshold << endl; 212} 213 214// Insert the request on the correct request table. Return true if 215// the entry was already present. 216bool 217Sequencer::insertRequest(SequencerRequest* request) 218{ 219 int total_outstanding = 220 m_writeRequestTable.size() + m_readRequestTable.size(); 221 222 assert(m_outstanding_count == total_outstanding); 223 224 // See if we should schedule a deadlock check 225 if (deadlockCheckEvent.scheduled() == false) { 226 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick()); 227 } 228 229 Address line_addr(request->ruby_request.m_PhysicalAddress); 230 line_addr.makeLineAddress(); 231 if ((request->ruby_request.m_Type == RubyRequestType_ST) || 232 (request->ruby_request.m_Type == RubyRequestType_RMW_Read) || 233 (request->ruby_request.m_Type == RubyRequestType_RMW_Write) || 234 (request->ruby_request.m_Type == RubyRequestType_Load_Linked) || 235 (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) || 236 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 237 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) { 238 pair<RequestTable::iterator, bool> r = 239 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0)); 240 bool success = r.second; 241 RequestTable::iterator i = r.first; 242 if (!success) { 243 i->second = request; 244 // return true; 245 246 // drh5: isn't this an error? do you lose the initial request? 247 assert(0); 248 } 249 i->second = request; 250 m_outstanding_count++; 251 } else { 252 pair<RequestTable::iterator, bool> r = 253 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0)); 254 bool success = r.second; 255 RequestTable::iterator i = r.first; 256 if (!success) { 257 i->second = request; 258 // return true; 259 260 // drh5: isn't this an error? do you lose the initial request? 261 assert(0); 262 } 263 i->second = request; 264 m_outstanding_count++; 265 } 266 267 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 268 269 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 270 assert(m_outstanding_count == total_outstanding); 271 272 return false; 273} 274 275void 276Sequencer::markRemoved() 277{ 278 m_outstanding_count--; 279 assert(m_outstanding_count == 280 m_writeRequestTable.size() + m_readRequestTable.size()); 281} 282 283void 284Sequencer::removeRequest(SequencerRequest* srequest) 285{ 286 assert(m_outstanding_count == 287 m_writeRequestTable.size() + m_readRequestTable.size()); 288 289 const RubyRequest & ruby_request = srequest->ruby_request; 290 Address line_addr(ruby_request.m_PhysicalAddress); 291 line_addr.makeLineAddress(); 292 if ((ruby_request.m_Type == RubyRequestType_ST) || 293 (ruby_request.m_Type == RubyRequestType_RMW_Read) || 294 (ruby_request.m_Type == RubyRequestType_RMW_Write) || 295 (ruby_request.m_Type == RubyRequestType_Load_Linked) || 296 (ruby_request.m_Type == RubyRequestType_Store_Conditional) || 297 (ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 298 (ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) { 299 m_writeRequestTable.erase(line_addr); 300 } else { 301 m_readRequestTable.erase(line_addr); 302 } 303 304 markRemoved(); 305} 306 307bool 308Sequencer::handleLlsc(const Address& address, SequencerRequest* request) 309{ 310 // 311 // The success flag indicates whether the LLSC operation was successful. 312 // LL ops will always succeed, but SC may fail if the cache line is no 313 // longer locked. 314 // 315 bool success = true; 316 if (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) { 317 if (!m_dataCache_ptr->isLocked(address, m_version)) { 318 // 319 // For failed SC requests, indicate the failure to the cpu by 320 // setting the extra data to zero. 321 // 322 request->ruby_request.pkt->req->setExtraData(0); 323 success = false; 324 } else { 325 // 326 // For successful SC requests, indicate the success to the cpu by 327 // setting the extra data to one. 328 // 329 request->ruby_request.pkt->req->setExtraData(1); 330 } 331 // 332 // Independent of success, all SC operations must clear the lock 333 // 334 m_dataCache_ptr->clearLocked(address); 335 } else if (request->ruby_request.m_Type == RubyRequestType_Load_Linked) { 336 // 337 // Note: To fully follow Alpha LLSC semantics, should the LL clear any 338 // previously locked cache lines? 339 // 340 m_dataCache_ptr->setLocked(address, m_version); 341 } else if (m_dataCache_ptr->isLocked(address, m_version)) { 342 // 343 // Normal writes should clear the locked address 344 // 345 m_dataCache_ptr->clearLocked(address); 346 } 347 return success; 348} 349 350void 351Sequencer::writeCallback(const Address& address, DataBlock& data) 352{ 353 writeCallback(address, GenericMachineType_NULL, data); 354} 355 356void 357Sequencer::writeCallback(const Address& address, 358 GenericMachineType mach, 359 DataBlock& data) 360{ 361 writeCallback(address, mach, data, 0, 0, 0); 362} 363 364void 365Sequencer::writeCallback(const Address& address, 366 GenericMachineType mach, 367 DataBlock& data, 368 Time initialRequestTime, 369 Time forwardRequestTime, 370 Time firstResponseTime) 371{ 372 assert(address == line_address(address)); 373 assert(m_writeRequestTable.count(line_address(address))); 374 375 RequestTable::iterator i = m_writeRequestTable.find(address); 376 assert(i != m_writeRequestTable.end()); 377 SequencerRequest* request = i->second; 378 379 m_writeRequestTable.erase(i); 380 markRemoved(); 381 382 assert((request->ruby_request.m_Type == RubyRequestType_ST) || 383 (request->ruby_request.m_Type == RubyRequestType_RMW_Read) || 384 (request->ruby_request.m_Type == RubyRequestType_RMW_Write) || 385 (request->ruby_request.m_Type == RubyRequestType_Load_Linked) || 386 (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) || 387 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 388 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)); 389 390 // 391 // For Alpha, properly handle LL, SC, and write requests with respect to 392 // locked cache blocks. 393 // 394 // Not valid for Network_test protocl 395 // 396 bool success = true; 397 if(!m_usingNetworkTester) 398 success = handleLlsc(address, request); 399 400 if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) { 401 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 402 } else if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) { 403 m_controller->unblock(address); 404 } 405 406 hitCallback(request, mach, data, success, 407 initialRequestTime, forwardRequestTime, firstResponseTime); 408} 409 410void 411Sequencer::readCallback(const Address& address, DataBlock& data) 412{ 413 readCallback(address, GenericMachineType_NULL, data); 414} 415 416void 417Sequencer::readCallback(const Address& address, 418 GenericMachineType mach, 419 DataBlock& data) 420{ 421 readCallback(address, mach, data, 0, 0, 0); 422} 423 424void 425Sequencer::readCallback(const Address& address, 426 GenericMachineType mach, 427 DataBlock& data, 428 Time initialRequestTime, 429 Time forwardRequestTime, 430 Time firstResponseTime) 431{ 432 assert(address == line_address(address)); 433 assert(m_readRequestTable.count(line_address(address))); 434 435 RequestTable::iterator i = m_readRequestTable.find(address); 436 assert(i != m_readRequestTable.end()); 437 SequencerRequest* request = i->second; 438 439 m_readRequestTable.erase(i); 440 markRemoved(); 441 442 assert((request->ruby_request.m_Type == RubyRequestType_LD) || 443 (request->ruby_request.m_Type == RubyRequestType_IFETCH)); 444 445 hitCallback(request, mach, data, true, 446 initialRequestTime, forwardRequestTime, firstResponseTime); 447} 448 449void 450Sequencer::hitCallback(SequencerRequest* srequest, 451 GenericMachineType mach, 452 DataBlock& data, 453 bool success, 454 Time initialRequestTime, 455 Time forwardRequestTime, 456 Time firstResponseTime) 457{ 458 const RubyRequest & ruby_request = srequest->ruby_request; 459 Address request_address(ruby_request.m_PhysicalAddress); 460 Address request_line_address(ruby_request.m_PhysicalAddress); 461 request_line_address.makeLineAddress(); 462 RubyRequestType type = ruby_request.m_Type; 463 Time issued_time = srequest->issue_time; 464 465 // Set this cache entry to the most recently used 466 if (type == RubyRequestType_IFETCH) { 467 if (m_instCache_ptr->isTagPresent(request_line_address)) 468 m_instCache_ptr->setMRU(request_line_address); 469 } else { 470 if (m_dataCache_ptr->isTagPresent(request_line_address)) 471 m_dataCache_ptr->setMRU(request_line_address); 472 } 473 474 assert(g_eventQueue_ptr->getTime() >= issued_time); 475 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 476 477 // Profile the miss latency for all non-zero demand misses 478 if (miss_latency != 0) { 479 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach); 480 481 if (mach == GenericMachineType_L1Cache_wCC) { 482 g_system_ptr->getProfiler()->missLatencyWcc(issued_time, 483 initialRequestTime, 484 forwardRequestTime, 485 firstResponseTime, 486 g_eventQueue_ptr->getTime()); 487 } 488 489 if (mach == GenericMachineType_Directory) { 490 g_system_ptr->getProfiler()->missLatencyDir(issued_time, 491 initialRequestTime, 492 forwardRequestTime, 493 firstResponseTime, 494 g_eventQueue_ptr->getTime()); 495 } 496 497 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %d cycles\n", 498 g_eventQueue_ptr->getTime(), m_version, "Seq", 499 success ? "Done" : "SC_Failed", "", "", 500 ruby_request.m_PhysicalAddress, miss_latency); 501 } 502#if 0 503 if (request.getPrefetch() == PrefetchBit_Yes) { 504 return; // Ignore the prefetch 505 } 506#endif 507 508 // update the data 509 if (ruby_request.data != NULL) { 510 if ((type == RubyRequestType_LD) || 511 (type == RubyRequestType_IFETCH) || 512 (type == RubyRequestType_RMW_Read) || 513 (type == RubyRequestType_Locked_RMW_Read) || 514 (type == RubyRequestType_Load_Linked)) { 515 memcpy(ruby_request.data, 516 data.getData(request_address.getOffset(), ruby_request.m_Size), 517 ruby_request.m_Size); 518 } else { 519 data.setData(ruby_request.data, request_address.getOffset(), 520 ruby_request.m_Size); 521 } 522 } else { 523 DPRINTF(MemoryAccess, 524 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 525 RubyRequestType_to_string(type)); 526 } 527 528 // If using the RubyTester, update the RubyTester sender state's 529 // subBlock with the recieved data. The tester will later access 530 // this state. 531 // Note: RubyPort will access it's sender state before the 532 // RubyTester. 533 if (m_usingRubyTester) { 534 RubyPort::SenderState *requestSenderState = 535 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState); 536 RubyTester::SenderState* testerSenderState = 537 safe_cast<RubyTester::SenderState*>(requestSenderState->saved); 538 testerSenderState->subBlock->mergeFrom(data); 539 } 540 541 ruby_hit_callback(ruby_request.pkt); 542 delete srequest; 543} 544 545// Returns true if the sequencer already has a load or store outstanding 546RequestStatus 547Sequencer::getRequestStatus(const RubyRequest& request) 548{ 549 bool is_outstanding_store = 550 !!m_writeRequestTable.count(line_address(request.m_PhysicalAddress)); 551 bool is_outstanding_load = 552 !!m_readRequestTable.count(line_address(request.m_PhysicalAddress)); 553 if (is_outstanding_store) { 554 if ((request.m_Type == RubyRequestType_LD) || 555 (request.m_Type == RubyRequestType_IFETCH) || 556 (request.m_Type == RubyRequestType_RMW_Read)) { 557 m_store_waiting_on_load_cycles++; 558 } else { 559 m_store_waiting_on_store_cycles++; 560 } 561 return RequestStatus_Aliased; 562 } else if (is_outstanding_load) { 563 if ((request.m_Type == RubyRequestType_ST) || 564 (request.m_Type == RubyRequestType_RMW_Write)) { 565 m_load_waiting_on_store_cycles++; 566 } else { 567 m_load_waiting_on_load_cycles++; 568 } 569 return RequestStatus_Aliased; 570 } 571 572 if (m_outstanding_count >= m_max_outstanding_requests) { 573 return RequestStatus_BufferFull; 574 } 575 576 return RequestStatus_Ready; 577} 578 579bool 580Sequencer::empty() const 581{ 582 return m_writeRequestTable.empty() && m_readRequestTable.empty(); 583} 584 585RequestStatus 586Sequencer::makeRequest(const RubyRequest &request) 587{ 588 assert(request.m_PhysicalAddress.getOffset() + request.m_Size <= 589 RubySystem::getBlockSizeBytes()); 590 RequestStatus status = getRequestStatus(request); 591 if (status != RequestStatus_Ready) 592 return status; 593 594 SequencerRequest *srequest = 595 new SequencerRequest(request, g_eventQueue_ptr->getTime()); 596 bool found = insertRequest(srequest); 597 if (found) { 598 panic("Sequencer::makeRequest should never be called if the " 599 "request is already outstanding\n"); 600 return RequestStatus_NULL; 601 } 602 603 issueRequest(request); 604 605 // TODO: issue hardware prefetches here 606 return RequestStatus_Issued; 607} 608 609void 610Sequencer::issueRequest(const RubyRequest& request) 611{ 612 // TODO: Eliminate RubyRequest being copied again. 613 614 RubyRequestType ctype; 615 switch(request.m_Type) { 616 case RubyRequestType_IFETCH: 617 ctype = RubyRequestType_IFETCH; 618 break; 619 case RubyRequestType_LD: 620 ctype = RubyRequestType_LD; 621 break; 622 case RubyRequestType_ST: 623 case RubyRequestType_RMW_Read: 624 case RubyRequestType_RMW_Write: 625 // 626 // x86 locked instructions are translated to store cache coherence 627 // requests because these requests should always be treated as read 628 // exclusive operations and should leverage any migratory sharing 629 // optimization built into the protocol. 630 // 631 case RubyRequestType_Locked_RMW_Read: 632 case RubyRequestType_Locked_RMW_Write: 633 ctype = RubyRequestType_ST; 634 break; 635 // 636 // Alpha LL/SC instructions need to be handled carefully by the cache 637 // coherence protocol to ensure they follow the proper semantics. In 638 // particular, by identifying the operations as atomic, the protocol 639 // should understand that migratory sharing optimizations should not be 640 // performed (i.e. a load between the LL and SC should not steal away 641 // exclusive permission). 642 // 643 case RubyRequestType_Load_Linked: 644 case RubyRequestType_Store_Conditional: 645 ctype = RubyRequestType_ATOMIC; 646 break; 647 default: 648 assert(0); 649 } 650 651 RubyAccessMode amtype; 652 switch(request.m_AccessMode){ 653 case RubyAccessMode_User: 654 amtype = RubyAccessMode_User; 655 break; 656 case RubyAccessMode_Supervisor: 657 amtype = RubyAccessMode_Supervisor; 658 break; 659 case RubyAccessMode_Device: 660 amtype = RubyAccessMode_User; 661 break; 662 default: 663 assert(0); 664 } 665 666 Address line_addr(request.m_PhysicalAddress); 667 line_addr.makeLineAddress(); 668 RubyRequest *msg = new RubyRequest(request.m_PhysicalAddress.getAddress(), 669 request.data, request.m_Size, 670 request.m_ProgramCounter.getAddress(), 671 ctype, amtype, request.pkt, 672 PrefetchBit_No, request.proc_id); 673 674 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\n", 675 g_eventQueue_ptr->getTime(), m_version, "Seq", "Begin", "", "", 676 request.m_PhysicalAddress, RubyRequestType_to_string(request.m_Type)); 677 678 Time latency = 0; // initialzed to an null value 679 680 if (request.m_Type == RubyRequestType_IFETCH) 681 latency = m_instCache_ptr->getLatency(); 682 else 683 latency = m_dataCache_ptr->getLatency(); 684 685 // Send the message to the cache controller 686 assert(latency > 0); 687 688 assert(m_mandatory_q_ptr != NULL); 689 m_mandatory_q_ptr->enqueue(msg, latency); 690} 691 692#if 0 693bool 694Sequencer::tryCacheAccess(const Address& addr, RubyRequestType type, 695 RubyAccessMode access_mode, 696 int size, DataBlock*& data_ptr) 697{ 698 CacheMemory *cache = 699 (type == RubyRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr; 700 701 return cache->tryCacheAccess(line_address(addr), type, data_ptr); 702} 703#endif 704 705template <class KEY, class VALUE> 706std::ostream & 707operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) 708{ 709 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); 710 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); 711 712 out << "["; 713 for (; i != end; ++i) 714 out << " " << i->first << "=" << i->second; 715 out << " ]"; 716 717 return out; 718} 719 720void 721Sequencer::print(ostream& out) const 722{ 723 out << "[Sequencer: " << m_version 724 << ", outstanding requests: " << m_outstanding_count 725 << ", read request table: " << m_readRequestTable 726 << ", write request table: " << m_writeRequestTable 727 << "]"; 728} 729 730// this can be called from setState whenever coherence permissions are 731// upgraded when invoked, coherence violations will be checked for the 732// given block 733void 734Sequencer::checkCoherence(const Address& addr) 735{ 736#ifdef CHECK_COHERENCE 737 g_system_ptr->checkGlobalCoherenceInvariant(addr); 738#endif 739} 740