Sequencer.cc revision 8188
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "base/str.hh" 30#include "base/misc.hh" 31#include "cpu/testers/rubytest/RubyTester.hh" 32#include "mem/protocol/Protocol.hh" 33#include "mem/protocol/Protocol.hh" 34#include "mem/ruby/buffers/MessageBuffer.hh" 35#include "mem/ruby/common/Global.hh" 36#include "mem/ruby/common/SubBlock.hh" 37#include "mem/ruby/slicc_interface/RubyRequest.hh" 38#include "mem/ruby/profiler/Profiler.hh" 39#include "mem/ruby/recorder/Tracer.hh" 40#include "mem/ruby/slicc_interface/AbstractController.hh" 41#include "mem/ruby/system/CacheMemory.hh" 42#include "mem/ruby/system/Sequencer.hh" 43#include "mem/ruby/system/System.hh" 44#include "mem/packet.hh" 45#include "params/RubySequencer.hh" 46 47using namespace std; 48 49Sequencer * 50RubySequencerParams::create() 51{ 52 return new Sequencer(this); 53} 54 55Sequencer::Sequencer(const Params *p) 56 : RubyPort(p), deadlockCheckEvent(this) 57{ 58 m_store_waiting_on_load_cycles = 0; 59 m_store_waiting_on_store_cycles = 0; 60 m_load_waiting_on_store_cycles = 0; 61 m_load_waiting_on_load_cycles = 0; 62 63 m_outstanding_count = 0; 64 65 m_max_outstanding_requests = 0; 66 m_deadlock_threshold = 0; 67 m_instCache_ptr = NULL; 68 m_dataCache_ptr = NULL; 69 70 m_instCache_ptr = p->icache; 71 m_dataCache_ptr = p->dcache; 72 m_max_outstanding_requests = p->max_outstanding_requests; 73 m_deadlock_threshold = p->deadlock_threshold; 74 75 assert(m_max_outstanding_requests > 0); 76 assert(m_deadlock_threshold > 0); 77 assert(m_instCache_ptr != NULL); 78 assert(m_dataCache_ptr != NULL); 79 80 m_usingNetworkTester = p->using_network_tester; 81} 82 83Sequencer::~Sequencer() 84{ 85} 86 87void 88Sequencer::wakeup() 89{ 90 // Check for deadlock of any of the requests 91 Time current_time = g_eventQueue_ptr->getTime(); 92 93 // Check across all outstanding requests 94 int total_outstanding = 0; 95 96 RequestTable::iterator read = m_readRequestTable.begin(); 97 RequestTable::iterator read_end = m_readRequestTable.end(); 98 for (; read != read_end; ++read) { 99 SequencerRequest* request = read->second; 100 if (current_time - request->issue_time < m_deadlock_threshold) 101 continue; 102 103 panic("Possible Deadlock detected. Aborting!\n" 104 "version: %d request.paddr: 0x%x m_readRequestTable: %d " 105 "current time: %u issue_time: %d difference: %d\n", m_version, 106 request->ruby_request.m_PhysicalAddress, m_readRequestTable.size(), 107 current_time, request->issue_time, 108 current_time - request->issue_time); 109 } 110 111 RequestTable::iterator write = m_writeRequestTable.begin(); 112 RequestTable::iterator write_end = m_writeRequestTable.end(); 113 for (; write != write_end; ++write) { 114 SequencerRequest* request = write->second; 115 if (current_time - request->issue_time < m_deadlock_threshold) 116 continue; 117 118 panic("Possible Deadlock detected. Aborting!\n" 119 "version: %d request.paddr: 0x%x m_writeRequestTable: %d " 120 "current time: %u issue_time: %d difference: %d\n", m_version, 121 request->ruby_request.m_PhysicalAddress, m_writeRequestTable.size(), 122 current_time, request->issue_time, 123 current_time - request->issue_time); 124 } 125 126 total_outstanding += m_writeRequestTable.size(); 127 total_outstanding += m_readRequestTable.size(); 128 129 assert(m_outstanding_count == total_outstanding); 130 131 if (m_outstanding_count > 0) { 132 // If there are still outstanding requests, keep checking 133 schedule(deadlockCheckEvent, 134 m_deadlock_threshold * g_eventQueue_ptr->getClock() + 135 curTick()); 136 } 137} 138 139void 140Sequencer::printStats(ostream & out) const 141{ 142 out << "Sequencer: " << m_name << endl 143 << " store_waiting_on_load_cycles: " 144 << m_store_waiting_on_load_cycles << endl 145 << " store_waiting_on_store_cycles: " 146 << m_store_waiting_on_store_cycles << endl 147 << " load_waiting_on_load_cycles: " 148 << m_load_waiting_on_load_cycles << endl 149 << " load_waiting_on_store_cycles: " 150 << m_load_waiting_on_store_cycles << endl; 151} 152 153void 154Sequencer::printProgress(ostream& out) const 155{ 156#if 0 157 int total_demand = 0; 158 out << "Sequencer Stats Version " << m_version << endl; 159 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 160 out << "---------------" << endl; 161 out << "outstanding requests" << endl; 162 163 out << "proc " << m_Read 164 << " version Requests = " << m_readRequestTable.size() << endl; 165 166 // print the request table 167 RequestTable::iterator read = m_readRequestTable.begin(); 168 RequestTable::iterator read_end = m_readRequestTable.end(); 169 for (; read != read_end; ++read) { 170 SequencerRequest* request = read->second; 171 out << "\tRequest[ " << i << " ] = " << request->type 172 << " Address " << rkeys[i] 173 << " Posted " << request->issue_time 174 << " PF " << PrefetchBit_No << endl; 175 total_demand++; 176 } 177 178 out << "proc " << m_version 179 << " Write Requests = " << m_writeRequestTable.size << endl; 180 181 // print the request table 182 RequestTable::iterator write = m_writeRequestTable.begin(); 183 RequestTable::iterator write_end = m_writeRequestTable.end(); 184 for (; write != write_end; ++write) { 185 SequencerRequest* request = write->second; 186 out << "\tRequest[ " << i << " ] = " << request.getType() 187 << " Address " << wkeys[i] 188 << " Posted " << request.getTime() 189 << " PF " << request.getPrefetch() << endl; 190 if (request.getPrefetch() == PrefetchBit_No) { 191 total_demand++; 192 } 193 } 194 195 out << endl; 196 197 out << "Total Number Outstanding: " << m_outstanding_count << endl 198 << "Total Number Demand : " << total_demand << endl 199 << "Total Number Prefetches : " << m_outstanding_count - total_demand 200 << endl << endl << endl; 201#endif 202} 203 204void 205Sequencer::printConfig(ostream& out) const 206{ 207 out << "Seqeuncer config: " << m_name << endl 208 << " controller: " << m_controller->getName() << endl 209 << " version: " << m_version << endl 210 << " max_outstanding_requests: " << m_max_outstanding_requests << endl 211 << " deadlock_threshold: " << m_deadlock_threshold << endl; 212} 213 214// Insert the request on the correct request table. Return true if 215// the entry was already present. 216bool 217Sequencer::insertRequest(SequencerRequest* request) 218{ 219 int total_outstanding = 220 m_writeRequestTable.size() + m_readRequestTable.size(); 221 222 assert(m_outstanding_count == total_outstanding); 223 224 // See if we should schedule a deadlock check 225 if (deadlockCheckEvent.scheduled() == false) { 226 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick()); 227 } 228 229 Address line_addr(request->ruby_request.m_PhysicalAddress); 230 line_addr.makeLineAddress(); 231 if ((request->ruby_request.m_Type == RubyRequestType_ST) || 232 (request->ruby_request.m_Type == RubyRequestType_RMW_Read) || 233 (request->ruby_request.m_Type == RubyRequestType_RMW_Write) || 234 (request->ruby_request.m_Type == RubyRequestType_Load_Linked) || 235 (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) || 236 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 237 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) || 238 (request->ruby_request.m_Type == RubyRequestType_FLUSH)) { 239 pair<RequestTable::iterator, bool> r = 240 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0)); 241 bool success = r.second; 242 RequestTable::iterator i = r.first; 243 if (!success) { 244 i->second = request; 245 // return true; 246 247 // drh5: isn't this an error? do you lose the initial request? 248 assert(0); 249 } 250 i->second = request; 251 m_outstanding_count++; 252 } else { 253 pair<RequestTable::iterator, bool> r = 254 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0)); 255 bool success = r.second; 256 RequestTable::iterator i = r.first; 257 if (!success) { 258 i->second = request; 259 // return true; 260 261 // drh5: isn't this an error? do you lose the initial request? 262 assert(0); 263 } 264 i->second = request; 265 m_outstanding_count++; 266 } 267 268 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 269 270 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 271 assert(m_outstanding_count == total_outstanding); 272 273 return false; 274} 275 276void 277Sequencer::markRemoved() 278{ 279 m_outstanding_count--; 280 assert(m_outstanding_count == 281 m_writeRequestTable.size() + m_readRequestTable.size()); 282} 283 284void 285Sequencer::removeRequest(SequencerRequest* srequest) 286{ 287 assert(m_outstanding_count == 288 m_writeRequestTable.size() + m_readRequestTable.size()); 289 290 const RubyRequest & ruby_request = srequest->ruby_request; 291 Address line_addr(ruby_request.m_PhysicalAddress); 292 line_addr.makeLineAddress(); 293 if ((ruby_request.m_Type == RubyRequestType_ST) || 294 (ruby_request.m_Type == RubyRequestType_RMW_Read) || 295 (ruby_request.m_Type == RubyRequestType_RMW_Write) || 296 (ruby_request.m_Type == RubyRequestType_Load_Linked) || 297 (ruby_request.m_Type == RubyRequestType_Store_Conditional) || 298 (ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 299 (ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) { 300 m_writeRequestTable.erase(line_addr); 301 } else { 302 m_readRequestTable.erase(line_addr); 303 } 304 305 markRemoved(); 306} 307 308bool 309Sequencer::handleLlsc(const Address& address, SequencerRequest* request) 310{ 311 // 312 // The success flag indicates whether the LLSC operation was successful. 313 // LL ops will always succeed, but SC may fail if the cache line is no 314 // longer locked. 315 // 316 bool success = true; 317 if (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) { 318 if (!m_dataCache_ptr->isLocked(address, m_version)) { 319 // 320 // For failed SC requests, indicate the failure to the cpu by 321 // setting the extra data to zero. 322 // 323 request->ruby_request.pkt->req->setExtraData(0); 324 success = false; 325 } else { 326 // 327 // For successful SC requests, indicate the success to the cpu by 328 // setting the extra data to one. 329 // 330 request->ruby_request.pkt->req->setExtraData(1); 331 } 332 // 333 // Independent of success, all SC operations must clear the lock 334 // 335 m_dataCache_ptr->clearLocked(address); 336 } else if (request->ruby_request.m_Type == RubyRequestType_Load_Linked) { 337 // 338 // Note: To fully follow Alpha LLSC semantics, should the LL clear any 339 // previously locked cache lines? 340 // 341 m_dataCache_ptr->setLocked(address, m_version); 342 } else if ((m_dataCache_ptr->isTagPresent(address)) && (m_dataCache_ptr->isLocked(address, m_version))) { 343 // 344 // Normal writes should clear the locked address 345 // 346 m_dataCache_ptr->clearLocked(address); 347 } 348 return success; 349} 350 351void 352Sequencer::writeCallback(const Address& address, DataBlock& data) 353{ 354 writeCallback(address, GenericMachineType_NULL, data); 355} 356 357void 358Sequencer::writeCallback(const Address& address, 359 GenericMachineType mach, 360 DataBlock& data) 361{ 362 writeCallback(address, mach, data, 0, 0, 0); 363} 364 365void 366Sequencer::writeCallback(const Address& address, 367 GenericMachineType mach, 368 DataBlock& data, 369 Time initialRequestTime, 370 Time forwardRequestTime, 371 Time firstResponseTime) 372{ 373 assert(address == line_address(address)); 374 assert(m_writeRequestTable.count(line_address(address))); 375 376 RequestTable::iterator i = m_writeRequestTable.find(address); 377 assert(i != m_writeRequestTable.end()); 378 SequencerRequest* request = i->second; 379 380 m_writeRequestTable.erase(i); 381 markRemoved(); 382 383 assert((request->ruby_request.m_Type == RubyRequestType_ST) || 384 (request->ruby_request.m_Type == RubyRequestType_RMW_Read) || 385 (request->ruby_request.m_Type == RubyRequestType_RMW_Write) || 386 (request->ruby_request.m_Type == RubyRequestType_Load_Linked) || 387 (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) || 388 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 389 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) || 390 (request->ruby_request.m_Type == RubyRequestType_FLUSH)); 391 392 393 // 394 // For Alpha, properly handle LL, SC, and write requests with respect to 395 // locked cache blocks. 396 // 397 // Not valid for Network_test protocl 398 // 399 bool success = true; 400 if(!m_usingNetworkTester) 401 success = handleLlsc(address, request); 402 403 if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) { 404 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 405 } else if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) { 406 m_controller->unblock(address); 407 } 408 409 hitCallback(request, mach, data, success, 410 initialRequestTime, forwardRequestTime, firstResponseTime); 411} 412 413void 414Sequencer::readCallback(const Address& address, DataBlock& data) 415{ 416 readCallback(address, GenericMachineType_NULL, data); 417} 418 419void 420Sequencer::readCallback(const Address& address, 421 GenericMachineType mach, 422 DataBlock& data) 423{ 424 readCallback(address, mach, data, 0, 0, 0); 425} 426 427void 428Sequencer::readCallback(const Address& address, 429 GenericMachineType mach, 430 DataBlock& data, 431 Time initialRequestTime, 432 Time forwardRequestTime, 433 Time firstResponseTime) 434{ 435 assert(address == line_address(address)); 436 assert(m_readRequestTable.count(line_address(address))); 437 438 RequestTable::iterator i = m_readRequestTable.find(address); 439 assert(i != m_readRequestTable.end()); 440 SequencerRequest* request = i->second; 441 442 m_readRequestTable.erase(i); 443 markRemoved(); 444 445 assert((request->ruby_request.m_Type == RubyRequestType_LD) || 446 (request->ruby_request.m_Type == RubyRequestType_IFETCH)); 447 448 hitCallback(request, mach, data, true, 449 initialRequestTime, forwardRequestTime, firstResponseTime); 450} 451 452void 453Sequencer::hitCallback(SequencerRequest* srequest, 454 GenericMachineType mach, 455 DataBlock& data, 456 bool success, 457 Time initialRequestTime, 458 Time forwardRequestTime, 459 Time firstResponseTime) 460{ 461 const RubyRequest & ruby_request = srequest->ruby_request; 462 Address request_address(ruby_request.m_PhysicalAddress); 463 Address request_line_address(ruby_request.m_PhysicalAddress); 464 request_line_address.makeLineAddress(); 465 RubyRequestType type = ruby_request.m_Type; 466 Time issued_time = srequest->issue_time; 467 468 // Set this cache entry to the most recently used 469 if (type == RubyRequestType_IFETCH) { 470 if (m_instCache_ptr->isTagPresent(request_line_address)) 471 m_instCache_ptr->setMRU(request_line_address); 472 } else { 473 if (m_dataCache_ptr->isTagPresent(request_line_address)) 474 m_dataCache_ptr->setMRU(request_line_address); 475 } 476 477 assert(g_eventQueue_ptr->getTime() >= issued_time); 478 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 479 480 // Profile the miss latency for all non-zero demand misses 481 if (miss_latency != 0) { 482 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach); 483 484 if (mach == GenericMachineType_L1Cache_wCC) { 485 g_system_ptr->getProfiler()->missLatencyWcc(issued_time, 486 initialRequestTime, 487 forwardRequestTime, 488 firstResponseTime, 489 g_eventQueue_ptr->getTime()); 490 } 491 492 if (mach == GenericMachineType_Directory) { 493 g_system_ptr->getProfiler()->missLatencyDir(issued_time, 494 initialRequestTime, 495 forwardRequestTime, 496 firstResponseTime, 497 g_eventQueue_ptr->getTime()); 498 } 499 500 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %d cycles\n", 501 g_eventQueue_ptr->getTime(), m_version, "Seq", 502 success ? "Done" : "SC_Failed", "", "", 503 ruby_request.m_PhysicalAddress, miss_latency); 504 } 505#if 0 506 if (request.getPrefetch() == PrefetchBit_Yes) { 507 return; // Ignore the prefetch 508 } 509#endif 510 511 // update the data 512 if (ruby_request.data != NULL) { 513 if ((type == RubyRequestType_LD) || 514 (type == RubyRequestType_IFETCH) || 515 (type == RubyRequestType_RMW_Read) || 516 (type == RubyRequestType_Locked_RMW_Read) || 517 (type == RubyRequestType_Load_Linked)) { 518 memcpy(ruby_request.data, 519 data.getData(request_address.getOffset(), ruby_request.m_Size), 520 ruby_request.m_Size); 521 } else { 522 data.setData(ruby_request.data, request_address.getOffset(), 523 ruby_request.m_Size); 524 } 525 } else { 526 DPRINTF(MemoryAccess, 527 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 528 RubyRequestType_to_string(type)); 529 } 530 531 // If using the RubyTester, update the RubyTester sender state's 532 // subBlock with the recieved data. The tester will later access 533 // this state. 534 // Note: RubyPort will access it's sender state before the 535 // RubyTester. 536 if (m_usingRubyTester) { 537 RubyPort::SenderState *requestSenderState = 538 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState); 539 RubyTester::SenderState* testerSenderState = 540 safe_cast<RubyTester::SenderState*>(requestSenderState->saved); 541 testerSenderState->subBlock->mergeFrom(data); 542 } 543 544 ruby_hit_callback(ruby_request.pkt); 545 delete srequest; 546} 547 548// Returns true if the sequencer already has a load or store outstanding 549RequestStatus 550Sequencer::getRequestStatus(const RubyRequest& request) 551{ 552 bool is_outstanding_store = 553 !!m_writeRequestTable.count(line_address(request.m_PhysicalAddress)); 554 bool is_outstanding_load = 555 !!m_readRequestTable.count(line_address(request.m_PhysicalAddress)); 556 if (is_outstanding_store) { 557 if ((request.m_Type == RubyRequestType_LD) || 558 (request.m_Type == RubyRequestType_IFETCH) || 559 (request.m_Type == RubyRequestType_RMW_Read)) { 560 m_store_waiting_on_load_cycles++; 561 } else { 562 m_store_waiting_on_store_cycles++; 563 } 564 return RequestStatus_Aliased; 565 } else if (is_outstanding_load) { 566 if ((request.m_Type == RubyRequestType_ST) || 567 (request.m_Type == RubyRequestType_RMW_Write)) { 568 m_load_waiting_on_store_cycles++; 569 } else { 570 m_load_waiting_on_load_cycles++; 571 } 572 return RequestStatus_Aliased; 573 } 574 575 if (m_outstanding_count >= m_max_outstanding_requests) { 576 return RequestStatus_BufferFull; 577 } 578 579 return RequestStatus_Ready; 580} 581 582bool 583Sequencer::empty() const 584{ 585 return m_writeRequestTable.empty() && m_readRequestTable.empty(); 586} 587 588RequestStatus 589Sequencer::makeRequest(const RubyRequest &request) 590{ 591 assert(request.m_PhysicalAddress.getOffset() + request.m_Size <= 592 RubySystem::getBlockSizeBytes()); 593 RequestStatus status = getRequestStatus(request); 594 if (status != RequestStatus_Ready) 595 return status; 596 597 SequencerRequest *srequest = 598 new SequencerRequest(request, g_eventQueue_ptr->getTime()); 599 bool found = insertRequest(srequest); 600 if (found) { 601 panic("Sequencer::makeRequest should never be called if the " 602 "request is already outstanding\n"); 603 return RequestStatus_NULL; 604 } 605 606 issueRequest(request); 607 608 // TODO: issue hardware prefetches here 609 return RequestStatus_Issued; 610} 611 612void 613Sequencer::issueRequest(const RubyRequest& request) 614{ 615 // TODO: Eliminate RubyRequest being copied again. 616 617 RubyRequestType ctype; 618 switch(request.m_Type) { 619 case RubyRequestType_IFETCH: 620 ctype = RubyRequestType_IFETCH; 621 break; 622 case RubyRequestType_LD: 623 ctype = RubyRequestType_LD; 624 break; 625 case RubyRequestType_FLUSH: 626 ctype = RubyRequestType_FLUSH; 627 break; 628 case RubyRequestType_ST: 629 case RubyRequestType_RMW_Read: 630 case RubyRequestType_RMW_Write: 631 // 632 // x86 locked instructions are translated to store cache coherence 633 // requests because these requests should always be treated as read 634 // exclusive operations and should leverage any migratory sharing 635 // optimization built into the protocol. 636 // 637 case RubyRequestType_Locked_RMW_Read: 638 case RubyRequestType_Locked_RMW_Write: 639 ctype = RubyRequestType_ST; 640 break; 641 // 642 // Alpha LL/SC instructions need to be handled carefully by the cache 643 // coherence protocol to ensure they follow the proper semantics. In 644 // particular, by identifying the operations as atomic, the protocol 645 // should understand that migratory sharing optimizations should not be 646 // performed (i.e. a load between the LL and SC should not steal away 647 // exclusive permission). 648 // 649 case RubyRequestType_Load_Linked: 650 case RubyRequestType_Store_Conditional: 651 ctype = RubyRequestType_ATOMIC; 652 break; 653 default: 654 assert(0); 655 } 656 657 RubyAccessMode amtype; 658 switch(request.m_AccessMode){ 659 case RubyAccessMode_User: 660 amtype = RubyAccessMode_User; 661 break; 662 case RubyAccessMode_Supervisor: 663 amtype = RubyAccessMode_Supervisor; 664 break; 665 case RubyAccessMode_Device: 666 amtype = RubyAccessMode_User; 667 break; 668 default: 669 assert(0); 670 } 671 672 Address line_addr(request.m_PhysicalAddress); 673 line_addr.makeLineAddress(); 674 int proc_id = request.pkt->req->hasContextId() ? 675 request.pkt->req->contextId() : -1; 676 RubyRequest *msg = new RubyRequest(request.m_PhysicalAddress.getAddress(), 677 request.data, request.m_Size, 678 request.m_ProgramCounter.getAddress(), 679 ctype, amtype, request.pkt, 680 PrefetchBit_No, proc_id); 681 682 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\n", 683 g_eventQueue_ptr->getTime(), m_version, "Seq", "Begin", "", "", 684 request.m_PhysicalAddress, RubyRequestType_to_string(request.m_Type)); 685 686 Time latency = 0; // initialzed to an null value 687 688 if (request.m_Type == RubyRequestType_IFETCH) 689 latency = m_instCache_ptr->getLatency(); 690 else 691 latency = m_dataCache_ptr->getLatency(); 692 693 // Send the message to the cache controller 694 assert(latency > 0); 695 696 assert(m_mandatory_q_ptr != NULL); 697 m_mandatory_q_ptr->enqueue(msg, latency); 698} 699 700#if 0 701bool 702Sequencer::tryCacheAccess(const Address& addr, RubyRequestType type, 703 RubyAccessMode access_mode, 704 int size, DataBlock*& data_ptr) 705{ 706 CacheMemory *cache = 707 (type == RubyRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr; 708 709 return cache->tryCacheAccess(line_address(addr), type, data_ptr); 710} 711#endif 712 713template <class KEY, class VALUE> 714std::ostream & 715operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) 716{ 717 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); 718 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); 719 720 out << "["; 721 for (; i != end; ++i) 722 out << " " << i->first << "=" << i->second; 723 out << " ]"; 724 725 return out; 726} 727 728void 729Sequencer::print(ostream& out) const 730{ 731 out << "[Sequencer: " << m_version 732 << ", outstanding requests: " << m_outstanding_count 733 << ", read request table: " << m_readRequestTable 734 << ", write request table: " << m_writeRequestTable 735 << "]"; 736} 737 738// this can be called from setState whenever coherence permissions are 739// upgraded when invoked, coherence violations will be checked for the 740// given block 741void 742Sequencer::checkCoherence(const Address& addr) 743{ 744#ifdef CHECK_COHERENCE 745 g_system_ptr->checkGlobalCoherenceInvariant(addr); 746#endif 747} 748