Sequencer.cc revision 8232:b28d06a175be
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "base/misc.hh" 30#include "base/str.hh" 31#include "cpu/testers/rubytest/RubyTester.hh" 32#include "debug/MemoryAccess.hh" 33#include "debug/ProtocolTrace.hh" 34#include "mem/protocol/Protocol.hh" 35#include "mem/ruby/buffers/MessageBuffer.hh" 36#include "mem/ruby/common/Global.hh" 37#include "mem/ruby/common/SubBlock.hh" 38#include "mem/ruby/profiler/Profiler.hh" 39#include "mem/ruby/recorder/Tracer.hh" 40#include "mem/ruby/slicc_interface/AbstractController.hh" 41#include "mem/ruby/slicc_interface/RubyRequest.hh" 42#include "mem/ruby/system/CacheMemory.hh" 43#include "mem/ruby/system/Sequencer.hh" 44#include "mem/ruby/system/System.hh" 45#include "mem/packet.hh" 46#include "params/RubySequencer.hh" 47 48using namespace std; 49 50Sequencer * 51RubySequencerParams::create() 52{ 53 return new Sequencer(this); 54} 55 56Sequencer::Sequencer(const Params *p) 57 : RubyPort(p), deadlockCheckEvent(this) 58{ 59 m_store_waiting_on_load_cycles = 0; 60 m_store_waiting_on_store_cycles = 0; 61 m_load_waiting_on_store_cycles = 0; 62 m_load_waiting_on_load_cycles = 0; 63 64 m_outstanding_count = 0; 65 66 m_max_outstanding_requests = 0; 67 m_deadlock_threshold = 0; 68 m_instCache_ptr = NULL; 69 m_dataCache_ptr = NULL; 70 71 m_instCache_ptr = p->icache; 72 m_dataCache_ptr = p->dcache; 73 m_max_outstanding_requests = p->max_outstanding_requests; 74 m_deadlock_threshold = p->deadlock_threshold; 75 76 assert(m_max_outstanding_requests > 0); 77 assert(m_deadlock_threshold > 0); 78 assert(m_instCache_ptr != NULL); 79 assert(m_dataCache_ptr != NULL); 80 81 m_usingNetworkTester = p->using_network_tester; 82} 83 84Sequencer::~Sequencer() 85{ 86} 87 88void 89Sequencer::wakeup() 90{ 91 // Check for deadlock of any of the requests 92 Time current_time = g_eventQueue_ptr->getTime(); 93 94 // Check across all outstanding requests 95 int total_outstanding = 0; 96 97 RequestTable::iterator read = m_readRequestTable.begin(); 98 RequestTable::iterator read_end = m_readRequestTable.end(); 99 for (; read != read_end; ++read) { 100 SequencerRequest* request = read->second; 101 if (current_time - request->issue_time < m_deadlock_threshold) 102 continue; 103 104 panic("Possible Deadlock detected. Aborting!\n" 105 "version: %d request.paddr: 0x%x m_readRequestTable: %d " 106 "current time: %u issue_time: %d difference: %d\n", m_version, 107 request->ruby_request.m_PhysicalAddress, m_readRequestTable.size(), 108 current_time, request->issue_time, 109 current_time - request->issue_time); 110 } 111 112 RequestTable::iterator write = m_writeRequestTable.begin(); 113 RequestTable::iterator write_end = m_writeRequestTable.end(); 114 for (; write != write_end; ++write) { 115 SequencerRequest* request = write->second; 116 if (current_time - request->issue_time < m_deadlock_threshold) 117 continue; 118 119 panic("Possible Deadlock detected. Aborting!\n" 120 "version: %d request.paddr: 0x%x m_writeRequestTable: %d " 121 "current time: %u issue_time: %d difference: %d\n", m_version, 122 request->ruby_request.m_PhysicalAddress, m_writeRequestTable.size(), 123 current_time, request->issue_time, 124 current_time - request->issue_time); 125 } 126 127 total_outstanding += m_writeRequestTable.size(); 128 total_outstanding += m_readRequestTable.size(); 129 130 assert(m_outstanding_count == total_outstanding); 131 132 if (m_outstanding_count > 0) { 133 // If there are still outstanding requests, keep checking 134 schedule(deadlockCheckEvent, 135 m_deadlock_threshold * g_eventQueue_ptr->getClock() + 136 curTick()); 137 } 138} 139 140void 141Sequencer::printStats(ostream & out) const 142{ 143 out << "Sequencer: " << m_name << endl 144 << " store_waiting_on_load_cycles: " 145 << m_store_waiting_on_load_cycles << endl 146 << " store_waiting_on_store_cycles: " 147 << m_store_waiting_on_store_cycles << endl 148 << " load_waiting_on_load_cycles: " 149 << m_load_waiting_on_load_cycles << endl 150 << " load_waiting_on_store_cycles: " 151 << m_load_waiting_on_store_cycles << endl; 152} 153 154void 155Sequencer::printProgress(ostream& out) const 156{ 157#if 0 158 int total_demand = 0; 159 out << "Sequencer Stats Version " << m_version << endl; 160 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 161 out << "---------------" << endl; 162 out << "outstanding requests" << endl; 163 164 out << "proc " << m_Read 165 << " version Requests = " << m_readRequestTable.size() << endl; 166 167 // print the request table 168 RequestTable::iterator read = m_readRequestTable.begin(); 169 RequestTable::iterator read_end = m_readRequestTable.end(); 170 for (; read != read_end; ++read) { 171 SequencerRequest* request = read->second; 172 out << "\tRequest[ " << i << " ] = " << request->type 173 << " Address " << rkeys[i] 174 << " Posted " << request->issue_time 175 << " PF " << PrefetchBit_No << endl; 176 total_demand++; 177 } 178 179 out << "proc " << m_version 180 << " Write Requests = " << m_writeRequestTable.size << endl; 181 182 // print the request table 183 RequestTable::iterator write = m_writeRequestTable.begin(); 184 RequestTable::iterator write_end = m_writeRequestTable.end(); 185 for (; write != write_end; ++write) { 186 SequencerRequest* request = write->second; 187 out << "\tRequest[ " << i << " ] = " << request.getType() 188 << " Address " << wkeys[i] 189 << " Posted " << request.getTime() 190 << " PF " << request.getPrefetch() << endl; 191 if (request.getPrefetch() == PrefetchBit_No) { 192 total_demand++; 193 } 194 } 195 196 out << endl; 197 198 out << "Total Number Outstanding: " << m_outstanding_count << endl 199 << "Total Number Demand : " << total_demand << endl 200 << "Total Number Prefetches : " << m_outstanding_count - total_demand 201 << endl << endl << endl; 202#endif 203} 204 205void 206Sequencer::printConfig(ostream& out) const 207{ 208 out << "Seqeuncer config: " << m_name << endl 209 << " controller: " << m_controller->getName() << endl 210 << " version: " << m_version << endl 211 << " max_outstanding_requests: " << m_max_outstanding_requests << endl 212 << " deadlock_threshold: " << m_deadlock_threshold << endl; 213} 214 215// Insert the request on the correct request table. Return true if 216// the entry was already present. 217bool 218Sequencer::insertRequest(SequencerRequest* request) 219{ 220 int total_outstanding = 221 m_writeRequestTable.size() + m_readRequestTable.size(); 222 223 assert(m_outstanding_count == total_outstanding); 224 225 // See if we should schedule a deadlock check 226 if (deadlockCheckEvent.scheduled() == false) { 227 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick()); 228 } 229 230 Address line_addr(request->ruby_request.m_PhysicalAddress); 231 line_addr.makeLineAddress(); 232 if ((request->ruby_request.m_Type == RubyRequestType_ST) || 233 (request->ruby_request.m_Type == RubyRequestType_ATOMIC) || 234 (request->ruby_request.m_Type == RubyRequestType_RMW_Read) || 235 (request->ruby_request.m_Type == RubyRequestType_RMW_Write) || 236 (request->ruby_request.m_Type == RubyRequestType_Load_Linked) || 237 (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) || 238 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 239 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) || 240 (request->ruby_request.m_Type == RubyRequestType_FLUSH)) { 241 pair<RequestTable::iterator, bool> r = 242 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0)); 243 bool success = r.second; 244 RequestTable::iterator i = r.first; 245 if (!success) { 246 i->second = request; 247 // return true; 248 249 // drh5: isn't this an error? do you lose the initial request? 250 assert(0); 251 } 252 i->second = request; 253 m_outstanding_count++; 254 } else { 255 pair<RequestTable::iterator, bool> r = 256 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0)); 257 bool success = r.second; 258 RequestTable::iterator i = r.first; 259 if (!success) { 260 i->second = request; 261 // return true; 262 263 // drh5: isn't this an error? do you lose the initial request? 264 assert(0); 265 } 266 i->second = request; 267 m_outstanding_count++; 268 } 269 270 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 271 272 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 273 assert(m_outstanding_count == total_outstanding); 274 275 return false; 276} 277 278void 279Sequencer::markRemoved() 280{ 281 m_outstanding_count--; 282 assert(m_outstanding_count == 283 m_writeRequestTable.size() + m_readRequestTable.size()); 284} 285 286void 287Sequencer::removeRequest(SequencerRequest* srequest) 288{ 289 assert(m_outstanding_count == 290 m_writeRequestTable.size() + m_readRequestTable.size()); 291 292 const RubyRequest & ruby_request = srequest->ruby_request; 293 Address line_addr(ruby_request.m_PhysicalAddress); 294 line_addr.makeLineAddress(); 295 if ((ruby_request.m_Type == RubyRequestType_ST) || 296 (ruby_request.m_Type == RubyRequestType_RMW_Read) || 297 (ruby_request.m_Type == RubyRequestType_RMW_Write) || 298 (ruby_request.m_Type == RubyRequestType_Load_Linked) || 299 (ruby_request.m_Type == RubyRequestType_Store_Conditional) || 300 (ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 301 (ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) { 302 m_writeRequestTable.erase(line_addr); 303 } else { 304 m_readRequestTable.erase(line_addr); 305 } 306 307 markRemoved(); 308} 309 310bool 311Sequencer::handleLlsc(const Address& address, SequencerRequest* request) 312{ 313 // 314 // The success flag indicates whether the LLSC operation was successful. 315 // LL ops will always succeed, but SC may fail if the cache line is no 316 // longer locked. 317 // 318 bool success = true; 319 if (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) { 320 if (!m_dataCache_ptr->isLocked(address, m_version)) { 321 // 322 // For failed SC requests, indicate the failure to the cpu by 323 // setting the extra data to zero. 324 // 325 request->ruby_request.pkt->req->setExtraData(0); 326 success = false; 327 } else { 328 // 329 // For successful SC requests, indicate the success to the cpu by 330 // setting the extra data to one. 331 // 332 request->ruby_request.pkt->req->setExtraData(1); 333 } 334 // 335 // Independent of success, all SC operations must clear the lock 336 // 337 m_dataCache_ptr->clearLocked(address); 338 } else if (request->ruby_request.m_Type == RubyRequestType_Load_Linked) { 339 // 340 // Note: To fully follow Alpha LLSC semantics, should the LL clear any 341 // previously locked cache lines? 342 // 343 m_dataCache_ptr->setLocked(address, m_version); 344 } else if ((m_dataCache_ptr->isTagPresent(address)) && (m_dataCache_ptr->isLocked(address, m_version))) { 345 // 346 // Normal writes should clear the locked address 347 // 348 m_dataCache_ptr->clearLocked(address); 349 } 350 return success; 351} 352 353void 354Sequencer::writeCallback(const Address& address, DataBlock& data) 355{ 356 writeCallback(address, GenericMachineType_NULL, data); 357} 358 359void 360Sequencer::writeCallback(const Address& address, 361 GenericMachineType mach, 362 DataBlock& data) 363{ 364 writeCallback(address, mach, data, 0, 0, 0); 365} 366 367void 368Sequencer::writeCallback(const Address& address, 369 GenericMachineType mach, 370 DataBlock& data, 371 Time initialRequestTime, 372 Time forwardRequestTime, 373 Time firstResponseTime) 374{ 375 assert(address == line_address(address)); 376 assert(m_writeRequestTable.count(line_address(address))); 377 378 RequestTable::iterator i = m_writeRequestTable.find(address); 379 assert(i != m_writeRequestTable.end()); 380 SequencerRequest* request = i->second; 381 382 m_writeRequestTable.erase(i); 383 markRemoved(); 384 385 assert((request->ruby_request.m_Type == RubyRequestType_ST) || 386 (request->ruby_request.m_Type == RubyRequestType_ATOMIC) || 387 (request->ruby_request.m_Type == RubyRequestType_RMW_Read) || 388 (request->ruby_request.m_Type == RubyRequestType_RMW_Write) || 389 (request->ruby_request.m_Type == RubyRequestType_Load_Linked) || 390 (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) || 391 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) || 392 (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) || 393 (request->ruby_request.m_Type == RubyRequestType_FLUSH)); 394 395 396 // 397 // For Alpha, properly handle LL, SC, and write requests with respect to 398 // locked cache blocks. 399 // 400 // Not valid for Network_test protocl 401 // 402 bool success = true; 403 if(!m_usingNetworkTester) 404 success = handleLlsc(address, request); 405 406 if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) { 407 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 408 } else if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) { 409 m_controller->unblock(address); 410 } 411 412 hitCallback(request, mach, data, success, 413 initialRequestTime, forwardRequestTime, firstResponseTime); 414} 415 416void 417Sequencer::readCallback(const Address& address, DataBlock& data) 418{ 419 readCallback(address, GenericMachineType_NULL, data); 420} 421 422void 423Sequencer::readCallback(const Address& address, 424 GenericMachineType mach, 425 DataBlock& data) 426{ 427 readCallback(address, mach, data, 0, 0, 0); 428} 429 430void 431Sequencer::readCallback(const Address& address, 432 GenericMachineType mach, 433 DataBlock& data, 434 Time initialRequestTime, 435 Time forwardRequestTime, 436 Time firstResponseTime) 437{ 438 assert(address == line_address(address)); 439 assert(m_readRequestTable.count(line_address(address))); 440 441 RequestTable::iterator i = m_readRequestTable.find(address); 442 assert(i != m_readRequestTable.end()); 443 SequencerRequest* request = i->second; 444 445 m_readRequestTable.erase(i); 446 markRemoved(); 447 448 assert((request->ruby_request.m_Type == RubyRequestType_LD) || 449 (request->ruby_request.m_Type == RubyRequestType_IFETCH)); 450 451 hitCallback(request, mach, data, true, 452 initialRequestTime, forwardRequestTime, firstResponseTime); 453} 454 455void 456Sequencer::hitCallback(SequencerRequest* srequest, 457 GenericMachineType mach, 458 DataBlock& data, 459 bool success, 460 Time initialRequestTime, 461 Time forwardRequestTime, 462 Time firstResponseTime) 463{ 464 const RubyRequest & ruby_request = srequest->ruby_request; 465 Address request_address(ruby_request.m_PhysicalAddress); 466 Address request_line_address(ruby_request.m_PhysicalAddress); 467 request_line_address.makeLineAddress(); 468 RubyRequestType type = ruby_request.m_Type; 469 Time issued_time = srequest->issue_time; 470 471 // Set this cache entry to the most recently used 472 if (type == RubyRequestType_IFETCH) { 473 if (m_instCache_ptr->isTagPresent(request_line_address)) 474 m_instCache_ptr->setMRU(request_line_address); 475 } else { 476 if (m_dataCache_ptr->isTagPresent(request_line_address)) 477 m_dataCache_ptr->setMRU(request_line_address); 478 } 479 480 assert(g_eventQueue_ptr->getTime() >= issued_time); 481 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 482 483 // Profile the miss latency for all non-zero demand misses 484 if (miss_latency != 0) { 485 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach); 486 487 if (mach == GenericMachineType_L1Cache_wCC) { 488 g_system_ptr->getProfiler()->missLatencyWcc(issued_time, 489 initialRequestTime, 490 forwardRequestTime, 491 firstResponseTime, 492 g_eventQueue_ptr->getTime()); 493 } 494 495 if (mach == GenericMachineType_Directory) { 496 g_system_ptr->getProfiler()->missLatencyDir(issued_time, 497 initialRequestTime, 498 forwardRequestTime, 499 firstResponseTime, 500 g_eventQueue_ptr->getTime()); 501 } 502 503 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %d cycles\n", 504 g_eventQueue_ptr->getTime(), m_version, "Seq", 505 success ? "Done" : "SC_Failed", "", "", 506 ruby_request.m_PhysicalAddress, miss_latency); 507 } 508#if 0 509 if (request.getPrefetch() == PrefetchBit_Yes) { 510 return; // Ignore the prefetch 511 } 512#endif 513 514 // update the data 515 if (ruby_request.data != NULL) { 516 if ((type == RubyRequestType_LD) || 517 (type == RubyRequestType_IFETCH) || 518 (type == RubyRequestType_RMW_Read) || 519 (type == RubyRequestType_Locked_RMW_Read) || 520 (type == RubyRequestType_Load_Linked)) { 521 memcpy(ruby_request.data, 522 data.getData(request_address.getOffset(), ruby_request.m_Size), 523 ruby_request.m_Size); 524 } else { 525 data.setData(ruby_request.data, request_address.getOffset(), 526 ruby_request.m_Size); 527 } 528 } else { 529 DPRINTF(MemoryAccess, 530 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 531 RubyRequestType_to_string(type)); 532 } 533 534 // If using the RubyTester, update the RubyTester sender state's 535 // subBlock with the recieved data. The tester will later access 536 // this state. 537 // Note: RubyPort will access it's sender state before the 538 // RubyTester. 539 if (m_usingRubyTester) { 540 RubyPort::SenderState *requestSenderState = 541 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState); 542 RubyTester::SenderState* testerSenderState = 543 safe_cast<RubyTester::SenderState*>(requestSenderState->saved); 544 testerSenderState->subBlock->mergeFrom(data); 545 } 546 547 ruby_hit_callback(ruby_request.pkt); 548 delete srequest; 549} 550 551// Returns true if the sequencer already has a load or store outstanding 552RequestStatus 553Sequencer::getRequestStatus(const RubyRequest& request) 554{ 555 bool is_outstanding_store = 556 !!m_writeRequestTable.count(line_address(request.m_PhysicalAddress)); 557 bool is_outstanding_load = 558 !!m_readRequestTable.count(line_address(request.m_PhysicalAddress)); 559 if (is_outstanding_store) { 560 if ((request.m_Type == RubyRequestType_LD) || 561 (request.m_Type == RubyRequestType_IFETCH) || 562 (request.m_Type == RubyRequestType_RMW_Read)) { 563 m_store_waiting_on_load_cycles++; 564 } else { 565 m_store_waiting_on_store_cycles++; 566 } 567 return RequestStatus_Aliased; 568 } else if (is_outstanding_load) { 569 if ((request.m_Type == RubyRequestType_ST) || 570 (request.m_Type == RubyRequestType_RMW_Write)) { 571 m_load_waiting_on_store_cycles++; 572 } else { 573 m_load_waiting_on_load_cycles++; 574 } 575 return RequestStatus_Aliased; 576 } 577 578 if (m_outstanding_count >= m_max_outstanding_requests) { 579 return RequestStatus_BufferFull; 580 } 581 582 return RequestStatus_Ready; 583} 584 585bool 586Sequencer::empty() const 587{ 588 return m_writeRequestTable.empty() && m_readRequestTable.empty(); 589} 590 591RequestStatus 592Sequencer::makeRequest(const RubyRequest &request) 593{ 594 assert(request.m_PhysicalAddress.getOffset() + request.m_Size <= 595 RubySystem::getBlockSizeBytes()); 596 RequestStatus status = getRequestStatus(request); 597 if (status != RequestStatus_Ready) 598 return status; 599 600 SequencerRequest *srequest = 601 new SequencerRequest(request, g_eventQueue_ptr->getTime()); 602 bool found = insertRequest(srequest); 603 if (found) { 604 panic("Sequencer::makeRequest should never be called if the " 605 "request is already outstanding\n"); 606 return RequestStatus_NULL; 607 } 608 609 issueRequest(request); 610 611 // TODO: issue hardware prefetches here 612 return RequestStatus_Issued; 613} 614 615void 616Sequencer::issueRequest(const RubyRequest& request) 617{ 618 // TODO: Eliminate RubyRequest being copied again. 619 620 RubyRequestType ctype; 621 switch(request.m_Type) { 622 case RubyRequestType_IFETCH: 623 ctype = RubyRequestType_IFETCH; 624 break; 625 case RubyRequestType_LD: 626 ctype = RubyRequestType_LD; 627 break; 628 case RubyRequestType_FLUSH: 629 ctype = RubyRequestType_FLUSH; 630 break; 631 case RubyRequestType_ST: 632 case RubyRequestType_RMW_Read: 633 case RubyRequestType_RMW_Write: 634 // 635 // x86 locked instructions are translated to store cache coherence 636 // requests because these requests should always be treated as read 637 // exclusive operations and should leverage any migratory sharing 638 // optimization built into the protocol. 639 // 640 case RubyRequestType_Locked_RMW_Read: 641 case RubyRequestType_Locked_RMW_Write: 642 ctype = RubyRequestType_ST; 643 break; 644 // 645 // Alpha LL/SC instructions need to be handled carefully by the cache 646 // coherence protocol to ensure they follow the proper semantics. In 647 // particular, by identifying the operations as atomic, the protocol 648 // should understand that migratory sharing optimizations should not be 649 // performed (i.e. a load between the LL and SC should not steal away 650 // exclusive permission). 651 // 652 case RubyRequestType_Load_Linked: 653 case RubyRequestType_Store_Conditional: 654 case RubyRequestType_ATOMIC: 655 ctype = RubyRequestType_ATOMIC; 656 break; 657 default: 658 assert(0); 659 } 660 661 RubyAccessMode amtype; 662 switch(request.m_AccessMode){ 663 case RubyAccessMode_User: 664 amtype = RubyAccessMode_User; 665 break; 666 case RubyAccessMode_Supervisor: 667 amtype = RubyAccessMode_Supervisor; 668 break; 669 case RubyAccessMode_Device: 670 amtype = RubyAccessMode_User; 671 break; 672 default: 673 assert(0); 674 } 675 676 Address line_addr(request.m_PhysicalAddress); 677 line_addr.makeLineAddress(); 678 int proc_id = -1; 679 if (request.pkt != NULL && request.pkt->req->hasContextId()) { 680 proc_id = request.pkt->req->contextId(); 681 } 682 RubyRequest *msg = new RubyRequest(request.m_PhysicalAddress.getAddress(), 683 request.data, request.m_Size, 684 request.m_ProgramCounter.getAddress(), 685 ctype, amtype, request.pkt, 686 PrefetchBit_No, proc_id); 687 688 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\n", 689 g_eventQueue_ptr->getTime(), m_version, "Seq", "Begin", "", "", 690 request.m_PhysicalAddress, RubyRequestType_to_string(request.m_Type)); 691 692 Time latency = 0; // initialzed to an null value 693 694 if (request.m_Type == RubyRequestType_IFETCH) 695 latency = m_instCache_ptr->getLatency(); 696 else 697 latency = m_dataCache_ptr->getLatency(); 698 699 // Send the message to the cache controller 700 assert(latency > 0); 701 702 assert(m_mandatory_q_ptr != NULL); 703 m_mandatory_q_ptr->enqueue(msg, latency); 704} 705 706#if 0 707bool 708Sequencer::tryCacheAccess(const Address& addr, RubyRequestType type, 709 RubyAccessMode access_mode, 710 int size, DataBlock*& data_ptr) 711{ 712 CacheMemory *cache = 713 (type == RubyRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr; 714 715 return cache->tryCacheAccess(line_address(addr), type, data_ptr); 716} 717#endif 718 719template <class KEY, class VALUE> 720std::ostream & 721operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) 722{ 723 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); 724 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); 725 726 out << "["; 727 for (; i != end; ++i) 728 out << " " << i->first << "=" << i->second; 729 out << " ]"; 730 731 return out; 732} 733 734void 735Sequencer::print(ostream& out) const 736{ 737 out << "[Sequencer: " << m_version 738 << ", outstanding requests: " << m_outstanding_count 739 << ", read request table: " << m_readRequestTable 740 << ", write request table: " << m_writeRequestTable 741 << "]"; 742} 743 744// this can be called from setState whenever coherence permissions are 745// upgraded when invoked, coherence violations will be checked for the 746// given block 747void 748Sequencer::checkCoherence(const Address& addr) 749{ 750#ifdef CHECK_COHERENCE 751 g_system_ptr->checkGlobalCoherenceInvariant(addr); 752#endif 753} 754