Sequencer.cc revision 8214:02cb69e5cfeb
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "base/str.hh" 30#include "base/misc.hh" 31#include "cpu/testers/rubytest/RubyTester.hh" 32#include "mem/protocol/Protocol.hh" 33#include "mem/protocol/Protocol.hh" 34#include "mem/ruby/buffers/MessageBuffer.hh" 35#include "mem/ruby/common/Global.hh" 36#include "mem/ruby/common/SubBlock.hh" 37#include "mem/ruby/slicc_interface/RubyRequest.hh" 38#include "mem/ruby/profiler/Profiler.hh" 39#include "mem/ruby/recorder/Tracer.hh" 40#include "mem/ruby/slicc_interface/AbstractController.hh" 41#include "mem/ruby/system/CacheMemory.hh" 42#include "mem/ruby/system/Sequencer.hh" 43#include "mem/ruby/system/System.hh" 44#include "mem/packet.hh" 45#include "params/RubySequencer.hh" 46 47using namespace std; 48 49Sequencer * 50RubySequencerParams::create() 51{ 52 return new Sequencer(this); 53} 54 55Sequencer::Sequencer(const Params *p) 56 : RubyPort(p), deadlockCheckEvent(this) 57{ 58 m_store_waiting_on_load_cycles = 0; 59 m_store_waiting_on_store_cycles = 0; 60 m_load_waiting_on_store_cycles = 0; 61 m_load_waiting_on_load_cycles = 0; 62 63 m_outstanding_count = 0; 64 65 m_max_outstanding_requests = 0; 66 m_deadlock_threshold = 0; 67 m_instCache_ptr = NULL; 68 m_dataCache_ptr = NULL; 69 70 m_instCache_ptr = p->icache; 71 m_dataCache_ptr = p->dcache; 72 m_max_outstanding_requests = p->max_outstanding_requests; 73 m_deadlock_threshold = p->deadlock_threshold; 74 75 assert(m_max_outstanding_requests > 0); 76 assert(m_deadlock_threshold > 0); 77 assert(m_instCache_ptr != NULL); 78 assert(m_dataCache_ptr != NULL); 79 80 m_usingNetworkTester = p->using_network_tester; 81} 82 83Sequencer::~Sequencer() 84{ 85} 86 87void 88Sequencer::wakeup() 89{ 90 // Check for deadlock of any of the requests 91 Time current_time = g_eventQueue_ptr->getTime(); 92 93 // Check across all outstanding requests 94 int total_outstanding = 0; 95 96 RequestTable::iterator read = m_readRequestTable.begin(); 97 RequestTable::iterator read_end = m_readRequestTable.end(); 98 for (; read != read_end; ++read) { 99 SequencerRequest* request = read->second; 100 if (current_time - request->issue_time < m_deadlock_threshold) 101 continue; 102 103 panic("Possible Deadlock detected. Aborting!\n" 104 "version: %d request.paddr: 0x%x m_readRequestTable: %d " 105 "current time: %u issue_time: %d difference: %d\n", m_version, 106 request->ruby_request.m_PhysicalAddress, m_readRequestTable.size(), 107 current_time, request->issue_time, 108 current_time - request->issue_time); 109 } 110 111 RequestTable::iterator write = m_writeRequestTable.begin(); 112 RequestTable::iterator write_end = m_writeRequestTable.end(); 113 for (; write != write_end; ++write) { 114 SequencerRequest* request = write->second; 115 if (current_time - request->issue_time < m_deadlock_threshold) 116 continue; 117 118 panic("Possible Deadlock detected. Aborting!\n" 119 "version: %d request.paddr: 0x%x m_writeRequestTable: %d " 120 "current time: %u issue_time: %d difference: %d\n", m_version, 121 request->ruby_request.m_PhysicalAddress, m_writeRequestTable.size(), 122 current_time, request->issue_time, 123 current_time - request->issue_time); 124 } 125 126 total_outstanding += m_writeRequestTable.size(); 127 total_outstanding += m_readRequestTable.size(); 128 129 assert(m_outstanding_count == total_outstanding); 130 131 if (m_outstanding_count > 0) { 132 // If there are still outstanding requests, keep checking 133 schedule(deadlockCheckEvent, 134 m_deadlock_threshold * g_eventQueue_ptr->getClock() + 135 curTick()); 136 } 137} 138 139void 140Sequencer::printStats(ostream & out) const 141{ 142 out << "Sequencer: " << m_name << endl 143 << " store_waiting_on_load_cycles: " 144 << m_store_waiting_on_load_cycles << endl 145 << " store_waiting_on_store_cycles: " 146 << m_store_waiting_on_store_cycles << endl 147 << " load_waiting_on_load_cycles: " 148 << m_load_waiting_on_load_cycles << endl 149 << " load_waiting_on_store_cycles: " 150 << m_load_waiting_on_store_cycles << endl; 151} 152 153void 154Sequencer::printProgress(ostream& out) const 155{ 156#if 0 157 int total_demand = 0; 158 out << "Sequencer Stats Version " << m_version << endl; 159 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 160 out << "---------------" << endl; 161 out << "outstanding requests" << endl; 162 163 out << "proc " << m_Read 164 << " version Requests = " << m_readRequestTable.size() << endl; 165 166 // print the request table 167 RequestTable::iterator read = m_readRequestTable.begin(); 168 RequestTable::iterator read_end = m_readRequestTable.end(); 169 for (; read != read_end; ++read) { 170 SequencerRequest* request = read->second; 171 out << "\tRequest[ " << i << " ] = " << request->type 172 << " Address " << rkeys[i] 173 << " Posted " << request->issue_time 174 << " PF " << PrefetchBit_No << endl; 175 total_demand++; 176 } 177 178 out << "proc " << m_version 179 << " Write Requests = " << m_writeRequestTable.size << endl; 180 181 // print the request table 182 RequestTable::iterator write = m_writeRequestTable.begin(); 183 RequestTable::iterator write_end = m_writeRequestTable.end(); 184 for (; write != write_end; ++write) { 185 SequencerRequest* request = write->second; 186 out << "\tRequest[ " << i << " ] = " << request.getType() 187 << " Address " << wkeys[i] 188 << " Posted " << request.getTime() 189 << " PF " << request.getPrefetch() << endl; 190 if (request.getPrefetch() == PrefetchBit_No) { 191 total_demand++; 192 } 193 } 194 195 out << endl; 196 197 out << "Total Number Outstanding: " << m_outstanding_count << endl 198 << "Total Number Demand : " << total_demand << endl 199 << "Total Number Prefetches : " << m_outstanding_count - total_demand 200 << endl << endl << endl; 201#endif 202} 203 204void 205Sequencer::printConfig(ostream& out) const 206{ 207 out << "Seqeuncer config: " << m_name << endl 208 << " controller: " << m_controller->getName() << endl 209 << " version: " << m_version << endl 210 << " max_outstanding_requests: " << m_max_outstanding_requests << endl 211 << " deadlock_threshold: " << m_deadlock_threshold << endl; 212} 213 214// Insert the request on the correct request table. Return true if 215// the entry was already present. 216bool 217Sequencer::insertRequest(SequencerRequest* request) 218{ 219 int total_outstanding = 220 m_writeRequestTable.size() + m_readRequestTable.size(); 221 222 assert(m_outstanding_count == total_outstanding); 223 224 // See if we should schedule a deadlock check 225 if (deadlockCheckEvent.scheduled() == false) { 226 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick()); 227 } 228 229 Address line_addr(request->ruby_request.m_PhysicalAddress); 230 line_addr.makeLineAddress(); 231 if ((request->ruby_request.m_Type == RubyRequestType_ST) || 232 (request->ruby_request.m_Type == RubyRequestType_ATOMIC) || 233 (request->ruby_request.m_Type == RubyRequestType_RMW_Read) || 234 (request->ruby_request.m_Type == RubyRequestType_RMW_Write) || 235 (request->ruby_request.m_Type == RubyRequestType_Load_Linked) || 236 (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) || 237 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 238 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) || 239 (request->ruby_request.m_Type == RubyRequestType_FLUSH)) { 240 pair<RequestTable::iterator, bool> r = 241 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0)); 242 bool success = r.second; 243 RequestTable::iterator i = r.first; 244 if (!success) { 245 i->second = request; 246 // return true; 247 248 // drh5: isn't this an error? do you lose the initial request? 249 assert(0); 250 } 251 i->second = request; 252 m_outstanding_count++; 253 } else { 254 pair<RequestTable::iterator, bool> r = 255 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0)); 256 bool success = r.second; 257 RequestTable::iterator i = r.first; 258 if (!success) { 259 i->second = request; 260 // return true; 261 262 // drh5: isn't this an error? do you lose the initial request? 263 assert(0); 264 } 265 i->second = request; 266 m_outstanding_count++; 267 } 268 269 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 270 271 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 272 assert(m_outstanding_count == total_outstanding); 273 274 return false; 275} 276 277void 278Sequencer::markRemoved() 279{ 280 m_outstanding_count--; 281 assert(m_outstanding_count == 282 m_writeRequestTable.size() + m_readRequestTable.size()); 283} 284 285void 286Sequencer::removeRequest(SequencerRequest* srequest) 287{ 288 assert(m_outstanding_count == 289 m_writeRequestTable.size() + m_readRequestTable.size()); 290 291 const RubyRequest & ruby_request = srequest->ruby_request; 292 Address line_addr(ruby_request.m_PhysicalAddress); 293 line_addr.makeLineAddress(); 294 if ((ruby_request.m_Type == RubyRequestType_ST) || 295 (ruby_request.m_Type == RubyRequestType_RMW_Read) || 296 (ruby_request.m_Type == RubyRequestType_RMW_Write) || 297 (ruby_request.m_Type == RubyRequestType_Load_Linked) || 298 (ruby_request.m_Type == RubyRequestType_Store_Conditional) || 299 (ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 300 (ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) { 301 m_writeRequestTable.erase(line_addr); 302 } else { 303 m_readRequestTable.erase(line_addr); 304 } 305 306 markRemoved(); 307} 308 309bool 310Sequencer::handleLlsc(const Address& address, SequencerRequest* request) 311{ 312 // 313 // The success flag indicates whether the LLSC operation was successful. 314 // LL ops will always succeed, but SC may fail if the cache line is no 315 // longer locked. 316 // 317 bool success = true; 318 if (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) { 319 if (!m_dataCache_ptr->isLocked(address, m_version)) { 320 // 321 // For failed SC requests, indicate the failure to the cpu by 322 // setting the extra data to zero. 323 // 324 request->ruby_request.pkt->req->setExtraData(0); 325 success = false; 326 } else { 327 // 328 // For successful SC requests, indicate the success to the cpu by 329 // setting the extra data to one. 330 // 331 request->ruby_request.pkt->req->setExtraData(1); 332 } 333 // 334 // Independent of success, all SC operations must clear the lock 335 // 336 m_dataCache_ptr->clearLocked(address); 337 } else if (request->ruby_request.m_Type == RubyRequestType_Load_Linked) { 338 // 339 // Note: To fully follow Alpha LLSC semantics, should the LL clear any 340 // previously locked cache lines? 341 // 342 m_dataCache_ptr->setLocked(address, m_version); 343 } else if ((m_dataCache_ptr->isTagPresent(address)) && (m_dataCache_ptr->isLocked(address, m_version))) { 344 // 345 // Normal writes should clear the locked address 346 // 347 m_dataCache_ptr->clearLocked(address); 348 } 349 return success; 350} 351 352void 353Sequencer::writeCallback(const Address& address, DataBlock& data) 354{ 355 writeCallback(address, GenericMachineType_NULL, data); 356} 357 358void 359Sequencer::writeCallback(const Address& address, 360 GenericMachineType mach, 361 DataBlock& data) 362{ 363 writeCallback(address, mach, data, 0, 0, 0); 364} 365 366void 367Sequencer::writeCallback(const Address& address, 368 GenericMachineType mach, 369 DataBlock& data, 370 Time initialRequestTime, 371 Time forwardRequestTime, 372 Time firstResponseTime) 373{ 374 assert(address == line_address(address)); 375 assert(m_writeRequestTable.count(line_address(address))); 376 377 RequestTable::iterator i = m_writeRequestTable.find(address); 378 assert(i != m_writeRequestTable.end()); 379 SequencerRequest* request = i->second; 380 381 m_writeRequestTable.erase(i); 382 markRemoved(); 383 384 assert((request->ruby_request.m_Type == RubyRequestType_ST) || 385 (request->ruby_request.m_Type == RubyRequestType_ATOMIC) || 386 (request->ruby_request.m_Type == RubyRequestType_RMW_Read) || 387 (request->ruby_request.m_Type == RubyRequestType_RMW_Write) || 388 (request->ruby_request.m_Type == RubyRequestType_Load_Linked) || 389 (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) || 390 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 391 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) || 392 (request->ruby_request.m_Type == RubyRequestType_FLUSH)); 393 394 395 // 396 // For Alpha, properly handle LL, SC, and write requests with respect to 397 // locked cache blocks. 398 // 399 // Not valid for Network_test protocl 400 // 401 bool success = true; 402 if(!m_usingNetworkTester) 403 success = handleLlsc(address, request); 404 405 if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) { 406 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 407 } else if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) { 408 m_controller->unblock(address); 409 } 410 411 hitCallback(request, mach, data, success, 412 initialRequestTime, forwardRequestTime, firstResponseTime); 413} 414 415void 416Sequencer::readCallback(const Address& address, DataBlock& data) 417{ 418 readCallback(address, GenericMachineType_NULL, data); 419} 420 421void 422Sequencer::readCallback(const Address& address, 423 GenericMachineType mach, 424 DataBlock& data) 425{ 426 readCallback(address, mach, data, 0, 0, 0); 427} 428 429void 430Sequencer::readCallback(const Address& address, 431 GenericMachineType mach, 432 DataBlock& data, 433 Time initialRequestTime, 434 Time forwardRequestTime, 435 Time firstResponseTime) 436{ 437 assert(address == line_address(address)); 438 assert(m_readRequestTable.count(line_address(address))); 439 440 RequestTable::iterator i = m_readRequestTable.find(address); 441 assert(i != m_readRequestTable.end()); 442 SequencerRequest* request = i->second; 443 444 m_readRequestTable.erase(i); 445 markRemoved(); 446 447 assert((request->ruby_request.m_Type == RubyRequestType_LD) || 448 (request->ruby_request.m_Type == RubyRequestType_IFETCH)); 449 450 hitCallback(request, mach, data, true, 451 initialRequestTime, forwardRequestTime, firstResponseTime); 452} 453 454void 455Sequencer::hitCallback(SequencerRequest* srequest, 456 GenericMachineType mach, 457 DataBlock& data, 458 bool success, 459 Time initialRequestTime, 460 Time forwardRequestTime, 461 Time firstResponseTime) 462{ 463 const RubyRequest & ruby_request = srequest->ruby_request; 464 Address request_address(ruby_request.m_PhysicalAddress); 465 Address request_line_address(ruby_request.m_PhysicalAddress); 466 request_line_address.makeLineAddress(); 467 RubyRequestType type = ruby_request.m_Type; 468 Time issued_time = srequest->issue_time; 469 470 // Set this cache entry to the most recently used 471 if (type == RubyRequestType_IFETCH) { 472 if (m_instCache_ptr->isTagPresent(request_line_address)) 473 m_instCache_ptr->setMRU(request_line_address); 474 } else { 475 if (m_dataCache_ptr->isTagPresent(request_line_address)) 476 m_dataCache_ptr->setMRU(request_line_address); 477 } 478 479 assert(g_eventQueue_ptr->getTime() >= issued_time); 480 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 481 482 // Profile the miss latency for all non-zero demand misses 483 if (miss_latency != 0) { 484 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach); 485 486 if (mach == GenericMachineType_L1Cache_wCC) { 487 g_system_ptr->getProfiler()->missLatencyWcc(issued_time, 488 initialRequestTime, 489 forwardRequestTime, 490 firstResponseTime, 491 g_eventQueue_ptr->getTime()); 492 } 493 494 if (mach == GenericMachineType_Directory) { 495 g_system_ptr->getProfiler()->missLatencyDir(issued_time, 496 initialRequestTime, 497 forwardRequestTime, 498 firstResponseTime, 499 g_eventQueue_ptr->getTime()); 500 } 501 502 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %d cycles\n", 503 g_eventQueue_ptr->getTime(), m_version, "Seq", 504 success ? "Done" : "SC_Failed", "", "", 505 ruby_request.m_PhysicalAddress, miss_latency); 506 } 507#if 0 508 if (request.getPrefetch() == PrefetchBit_Yes) { 509 return; // Ignore the prefetch 510 } 511#endif 512 513 // update the data 514 if (ruby_request.data != NULL) { 515 if ((type == RubyRequestType_LD) || 516 (type == RubyRequestType_IFETCH) || 517 (type == RubyRequestType_RMW_Read) || 518 (type == RubyRequestType_Locked_RMW_Read) || 519 (type == RubyRequestType_Load_Linked)) { 520 memcpy(ruby_request.data, 521 data.getData(request_address.getOffset(), ruby_request.m_Size), 522 ruby_request.m_Size); 523 } else { 524 data.setData(ruby_request.data, request_address.getOffset(), 525 ruby_request.m_Size); 526 } 527 } else { 528 DPRINTF(MemoryAccess, 529 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 530 RubyRequestType_to_string(type)); 531 } 532 533 // If using the RubyTester, update the RubyTester sender state's 534 // subBlock with the recieved data. The tester will later access 535 // this state. 536 // Note: RubyPort will access it's sender state before the 537 // RubyTester. 538 if (m_usingRubyTester) { 539 RubyPort::SenderState *requestSenderState = 540 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState); 541 RubyTester::SenderState* testerSenderState = 542 safe_cast<RubyTester::SenderState*>(requestSenderState->saved); 543 testerSenderState->subBlock->mergeFrom(data); 544 } 545 546 ruby_hit_callback(ruby_request.pkt); 547 delete srequest; 548} 549 550// Returns true if the sequencer already has a load or store outstanding 551RequestStatus 552Sequencer::getRequestStatus(const RubyRequest& request) 553{ 554 bool is_outstanding_store = 555 !!m_writeRequestTable.count(line_address(request.m_PhysicalAddress)); 556 bool is_outstanding_load = 557 !!m_readRequestTable.count(line_address(request.m_PhysicalAddress)); 558 if (is_outstanding_store) { 559 if ((request.m_Type == RubyRequestType_LD) || 560 (request.m_Type == RubyRequestType_IFETCH) || 561 (request.m_Type == RubyRequestType_RMW_Read)) { 562 m_store_waiting_on_load_cycles++; 563 } else { 564 m_store_waiting_on_store_cycles++; 565 } 566 return RequestStatus_Aliased; 567 } else if (is_outstanding_load) { 568 if ((request.m_Type == RubyRequestType_ST) || 569 (request.m_Type == RubyRequestType_RMW_Write)) { 570 m_load_waiting_on_store_cycles++; 571 } else { 572 m_load_waiting_on_load_cycles++; 573 } 574 return RequestStatus_Aliased; 575 } 576 577 if (m_outstanding_count >= m_max_outstanding_requests) { 578 return RequestStatus_BufferFull; 579 } 580 581 return RequestStatus_Ready; 582} 583 584bool 585Sequencer::empty() const 586{ 587 return m_writeRequestTable.empty() && m_readRequestTable.empty(); 588} 589 590RequestStatus 591Sequencer::makeRequest(const RubyRequest &request) 592{ 593 assert(request.m_PhysicalAddress.getOffset() + request.m_Size <= 594 RubySystem::getBlockSizeBytes()); 595 RequestStatus status = getRequestStatus(request); 596 if (status != RequestStatus_Ready) 597 return status; 598 599 SequencerRequest *srequest = 600 new SequencerRequest(request, g_eventQueue_ptr->getTime()); 601 bool found = insertRequest(srequest); 602 if (found) { 603 panic("Sequencer::makeRequest should never be called if the " 604 "request is already outstanding\n"); 605 return RequestStatus_NULL; 606 } 607 608 issueRequest(request); 609 610 // TODO: issue hardware prefetches here 611 return RequestStatus_Issued; 612} 613 614void 615Sequencer::issueRequest(const RubyRequest& request) 616{ 617 // TODO: Eliminate RubyRequest being copied again. 618 619 RubyRequestType ctype; 620 switch(request.m_Type) { 621 case RubyRequestType_IFETCH: 622 ctype = RubyRequestType_IFETCH; 623 break; 624 case RubyRequestType_LD: 625 ctype = RubyRequestType_LD; 626 break; 627 case RubyRequestType_FLUSH: 628 ctype = RubyRequestType_FLUSH; 629 break; 630 case RubyRequestType_ST: 631 case RubyRequestType_RMW_Read: 632 case RubyRequestType_RMW_Write: 633 // 634 // x86 locked instructions are translated to store cache coherence 635 // requests because these requests should always be treated as read 636 // exclusive operations and should leverage any migratory sharing 637 // optimization built into the protocol. 638 // 639 case RubyRequestType_Locked_RMW_Read: 640 case RubyRequestType_Locked_RMW_Write: 641 ctype = RubyRequestType_ST; 642 break; 643 // 644 // Alpha LL/SC instructions need to be handled carefully by the cache 645 // coherence protocol to ensure they follow the proper semantics. In 646 // particular, by identifying the operations as atomic, the protocol 647 // should understand that migratory sharing optimizations should not be 648 // performed (i.e. a load between the LL and SC should not steal away 649 // exclusive permission). 650 // 651 case RubyRequestType_Load_Linked: 652 case RubyRequestType_Store_Conditional: 653 case RubyRequestType_ATOMIC: 654 ctype = RubyRequestType_ATOMIC; 655 break; 656 default: 657 assert(0); 658 } 659 660 RubyAccessMode amtype; 661 switch(request.m_AccessMode){ 662 case RubyAccessMode_User: 663 amtype = RubyAccessMode_User; 664 break; 665 case RubyAccessMode_Supervisor: 666 amtype = RubyAccessMode_Supervisor; 667 break; 668 case RubyAccessMode_Device: 669 amtype = RubyAccessMode_User; 670 break; 671 default: 672 assert(0); 673 } 674 675 Address line_addr(request.m_PhysicalAddress); 676 line_addr.makeLineAddress(); 677 int proc_id = -1; 678 if (request.pkt != NULL && request.pkt->req->hasContextId()) { 679 proc_id = request.pkt->req->contextId(); 680 } 681 RubyRequest *msg = new RubyRequest(request.m_PhysicalAddress.getAddress(), 682 request.data, request.m_Size, 683 request.m_ProgramCounter.getAddress(), 684 ctype, amtype, request.pkt, 685 PrefetchBit_No, proc_id); 686 687 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\n", 688 g_eventQueue_ptr->getTime(), m_version, "Seq", "Begin", "", "", 689 request.m_PhysicalAddress, RubyRequestType_to_string(request.m_Type)); 690 691 Time latency = 0; // initialzed to an null value 692 693 if (request.m_Type == RubyRequestType_IFETCH) 694 latency = m_instCache_ptr->getLatency(); 695 else 696 latency = m_dataCache_ptr->getLatency(); 697 698 // Send the message to the cache controller 699 assert(latency > 0); 700 701 assert(m_mandatory_q_ptr != NULL); 702 m_mandatory_q_ptr->enqueue(msg, latency); 703} 704 705#if 0 706bool 707Sequencer::tryCacheAccess(const Address& addr, RubyRequestType type, 708 RubyAccessMode access_mode, 709 int size, DataBlock*& data_ptr) 710{ 711 CacheMemory *cache = 712 (type == RubyRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr; 713 714 return cache->tryCacheAccess(line_address(addr), type, data_ptr); 715} 716#endif 717 718template <class KEY, class VALUE> 719std::ostream & 720operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) 721{ 722 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); 723 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); 724 725 out << "["; 726 for (; i != end; ++i) 727 out << " " << i->first << "=" << i->second; 728 out << " ]"; 729 730 return out; 731} 732 733void 734Sequencer::print(ostream& out) const 735{ 736 out << "[Sequencer: " << m_version 737 << ", outstanding requests: " << m_outstanding_count 738 << ", read request table: " << m_readRequestTable 739 << ", write request table: " << m_writeRequestTable 740 << "]"; 741} 742 743// this can be called from setState whenever coherence permissions are 744// upgraded when invoked, coherence violations will be checked for the 745// given block 746void 747Sequencer::checkCoherence(const Address& addr) 748{ 749#ifdef CHECK_COHERENCE 750 g_system_ptr->checkGlobalCoherenceInvariant(addr); 751#endif 752} 753