Sequencer.cc revision 7056:b66b558578bd
1/* 2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "base/str.hh" 30#include "cpu/rubytest/RubyTester.hh" 31#include "mem/gems_common/Map.hh" 32#include "mem/protocol/CacheMsg.hh" 33#include "mem/protocol/Protocol.hh" 34#include "mem/protocol/Protocol.hh" 35#include "mem/ruby/buffers/MessageBuffer.hh" 36#include "mem/ruby/common/Global.hh" 37#include "mem/ruby/common/SubBlock.hh" 38#include "mem/ruby/libruby.hh" 39#include "mem/ruby/profiler/Profiler.hh" 40#include "mem/ruby/recorder/Tracer.hh" 41#include "mem/ruby/slicc_interface/AbstractController.hh" 42#include "mem/ruby/system/CacheMemory.hh" 43#include "mem/ruby/system/Sequencer.hh" 44#include "mem/ruby/system/System.hh" 45#include "params/RubySequencer.hh" 46 47using namespace std; 48 49Sequencer * 50RubySequencerParams::create() 51{ 52 return new Sequencer(this); 53} 54 55Sequencer::Sequencer(const Params *p) 56 : RubyPort(p), deadlockCheckEvent(this) 57{ 58 m_store_waiting_on_load_cycles = 0; 59 m_store_waiting_on_store_cycles = 0; 60 m_load_waiting_on_store_cycles = 0; 61 m_load_waiting_on_load_cycles = 0; 62 63 m_outstanding_count = 0; 64 65 m_max_outstanding_requests = 0; 66 m_deadlock_threshold = 0; 67 m_instCache_ptr = NULL; 68 m_dataCache_ptr = NULL; 69 70 m_instCache_ptr = p->icache; 71 m_dataCache_ptr = p->dcache; 72 m_max_outstanding_requests = p->max_outstanding_requests; 73 m_deadlock_threshold = p->deadlock_threshold; 74 m_usingRubyTester = p->using_ruby_tester; 75 76 assert(m_max_outstanding_requests > 0); 77 assert(m_deadlock_threshold > 0); 78 assert(m_instCache_ptr != NULL); 79 assert(m_dataCache_ptr != NULL); 80} 81 82Sequencer::~Sequencer() 83{ 84} 85 86void 87Sequencer::wakeup() 88{ 89 // Check for deadlock of any of the requests 90 Time current_time = g_eventQueue_ptr->getTime(); 91 92 // Check across all outstanding requests 93 int total_outstanding = 0; 94 95 Vector<Address> keys = m_readRequestTable.keys(); 96 for (int i = 0; i < keys.size(); i++) { 97 SequencerRequest* request = m_readRequestTable.lookup(keys[i]); 98 if (current_time - request->issue_time >= m_deadlock_threshold) { 99 WARN_MSG("Possible Deadlock detected"); 100 WARN_EXPR(request); 101 WARN_EXPR(m_version); 102 WARN_EXPR(request->ruby_request.paddr); 103 WARN_EXPR(keys.size()); 104 WARN_EXPR(current_time); 105 WARN_EXPR(request->issue_time); 106 WARN_EXPR(current_time - request->issue_time); 107 ERROR_MSG("Aborting"); 108 } 109 } 110 111 keys = m_writeRequestTable.keys(); 112 for (int i = 0; i < keys.size(); i++) { 113 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]); 114 if (current_time - request->issue_time >= m_deadlock_threshold) { 115 WARN_MSG("Possible Deadlock detected"); 116 WARN_EXPR(request); 117 WARN_EXPR(m_version); 118 WARN_EXPR(current_time); 119 WARN_EXPR(request->issue_time); 120 WARN_EXPR(current_time - request->issue_time); 121 WARN_EXPR(keys.size()); 122 ERROR_MSG("Aborting"); 123 } 124 } 125 126 total_outstanding += m_writeRequestTable.size(); 127 total_outstanding += m_readRequestTable.size(); 128 129 assert(m_outstanding_count == total_outstanding); 130 131 if (m_outstanding_count > 0) { 132 // If there are still outstanding requests, keep checking 133 schedule(deadlockCheckEvent, 134 m_deadlock_threshold * g_eventQueue_ptr->getClock() + 135 curTick); 136 } 137} 138 139void 140Sequencer::printStats(ostream & out) const 141{ 142 out << "Sequencer: " << m_name << endl 143 << " store_waiting_on_load_cycles: " 144 << m_store_waiting_on_load_cycles << endl 145 << " store_waiting_on_store_cycles: " 146 << m_store_waiting_on_store_cycles << endl 147 << " load_waiting_on_load_cycles: " 148 << m_load_waiting_on_load_cycles << endl 149 << " load_waiting_on_store_cycles: " 150 << m_load_waiting_on_store_cycles << endl; 151} 152 153void 154Sequencer::printProgress(ostream& out) const 155{ 156#if 0 157 int total_demand = 0; 158 out << "Sequencer Stats Version " << m_version << endl; 159 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 160 out << "---------------" << endl; 161 out << "outstanding requests" << endl; 162 163 Vector<Address> rkeys = m_readRequestTable.keys(); 164 int read_size = rkeys.size(); 165 out << "proc " << m_version << " Read Requests = " << read_size << endl; 166 167 // print the request table 168 for (int i = 0; i < read_size; ++i) { 169 SequencerRequest *request = m_readRequestTable.lookup(rkeys[i]); 170 out << "\tRequest[ " << i << " ] = " << request->type 171 << " Address " << rkeys[i] 172 << " Posted " << request->issue_time 173 << " PF " << PrefetchBit_No << endl; 174 total_demand++; 175 } 176 177 Vector<Address> wkeys = m_writeRequestTable.keys(); 178 int write_size = wkeys.size(); 179 out << "proc " << m_version << " Write Requests = " << write_size << endl; 180 181 // print the request table 182 for (int i = 0; i < write_size; ++i){ 183 CacheMsg &request = m_writeRequestTable.lookup(wkeys[i]); 184 out << "\tRequest[ " << i << " ] = " << request.getType() 185 << " Address " << wkeys[i] 186 << " Posted " << request.getTime() 187 << " PF " << request.getPrefetch() << endl; 188 if (request.getPrefetch() == PrefetchBit_No) { 189 total_demand++; 190 } 191 } 192 193 out << endl; 194 195 out << "Total Number Outstanding: " << m_outstanding_count << endl 196 << "Total Number Demand : " << total_demand << endl 197 << "Total Number Prefetches : " << m_outstanding_count - total_demand 198 << endl << endl << endl; 199#endif 200} 201 202void 203Sequencer::printConfig(ostream& out) const 204{ 205 out << "Seqeuncer config: " << m_name << endl 206 << " controller: " << m_controller->getName() << endl 207 << " version: " << m_version << endl 208 << " max_outstanding_requests: " << m_max_outstanding_requests << endl 209 << " deadlock_threshold: " << m_deadlock_threshold << endl; 210} 211 212// Insert the request on the correct request table. Return true if 213// the entry was already present. 214bool 215Sequencer::insertRequest(SequencerRequest* request) 216{ 217 int total_outstanding = 218 m_writeRequestTable.size() + m_readRequestTable.size(); 219 220 assert(m_outstanding_count == total_outstanding); 221 222 // See if we should schedule a deadlock check 223 if (deadlockCheckEvent.scheduled() == false) { 224 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick); 225 } 226 227 Address line_addr(request->ruby_request.paddr); 228 line_addr.makeLineAddress(); 229 if ((request->ruby_request.type == RubyRequestType_ST) || 230 (request->ruby_request.type == RubyRequestType_RMW_Read) || 231 (request->ruby_request.type == RubyRequestType_RMW_Write) || 232 (request->ruby_request.type == RubyRequestType_Locked_Read) || 233 (request->ruby_request.type == RubyRequestType_Locked_Write)) { 234 if (m_writeRequestTable.exist(line_addr)) { 235 m_writeRequestTable.lookup(line_addr) = request; 236 // return true; 237 238 // drh5: isn't this an error? do you lose the initial request? 239 assert(0); 240 } 241 m_writeRequestTable.allocate(line_addr); 242 m_writeRequestTable.lookup(line_addr) = request; 243 m_outstanding_count++; 244 } else { 245 if (m_readRequestTable.exist(line_addr)) { 246 m_readRequestTable.lookup(line_addr) = request; 247 // return true; 248 249 // drh5: isn't this an error? do you lose the initial request? 250 assert(0); 251 } 252 m_readRequestTable.allocate(line_addr); 253 m_readRequestTable.lookup(line_addr) = request; 254 m_outstanding_count++; 255 } 256 257 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 258 259 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 260 assert(m_outstanding_count == total_outstanding); 261 262 return false; 263} 264 265void 266Sequencer::removeRequest(SequencerRequest* srequest) 267{ 268 assert(m_outstanding_count == 269 m_writeRequestTable.size() + m_readRequestTable.size()); 270 271 const RubyRequest & ruby_request = srequest->ruby_request; 272 Address line_addr(ruby_request.paddr); 273 line_addr.makeLineAddress(); 274 if ((ruby_request.type == RubyRequestType_ST) || 275 (ruby_request.type == RubyRequestType_RMW_Read) || 276 (ruby_request.type == RubyRequestType_RMW_Write) || 277 (ruby_request.type == RubyRequestType_Locked_Read) || 278 (ruby_request.type == RubyRequestType_Locked_Write)) { 279 m_writeRequestTable.deallocate(line_addr); 280 } else { 281 m_readRequestTable.deallocate(line_addr); 282 } 283 m_outstanding_count--; 284 285 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); 286} 287 288void 289Sequencer::writeCallback(const Address& address, DataBlock& data) 290{ 291 assert(address == line_address(address)); 292 assert(m_writeRequestTable.exist(line_address(address))); 293 294 SequencerRequest* request = m_writeRequestTable.lookup(address); 295 296 removeRequest(request); 297 298 assert((request->ruby_request.type == RubyRequestType_ST) || 299 (request->ruby_request.type == RubyRequestType_RMW_Read) || 300 (request->ruby_request.type == RubyRequestType_RMW_Write) || 301 (request->ruby_request.type == RubyRequestType_Locked_Read) || 302 (request->ruby_request.type == RubyRequestType_Locked_Write)); 303 304 if (request->ruby_request.type == RubyRequestType_Locked_Read) { 305 m_dataCache_ptr->setLocked(address, m_version); 306 } else if (request->ruby_request.type == RubyRequestType_RMW_Read) { 307 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 308 } else if (request->ruby_request.type == RubyRequestType_RMW_Write) { 309 m_controller->unblock(address); 310 } 311 312 hitCallback(request, data); 313} 314 315void 316Sequencer::readCallback(const Address& address, DataBlock& data) 317{ 318 assert(address == line_address(address)); 319 assert(m_readRequestTable.exist(line_address(address))); 320 321 SequencerRequest* request = m_readRequestTable.lookup(address); 322 removeRequest(request); 323 324 assert((request->ruby_request.type == RubyRequestType_LD) || 325 (request->ruby_request.type == RubyRequestType_RMW_Read) || 326 (request->ruby_request.type == RubyRequestType_IFETCH)); 327 328 hitCallback(request, data); 329} 330 331void 332Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) 333{ 334 const RubyRequest & ruby_request = srequest->ruby_request; 335 Address request_address(ruby_request.paddr); 336 Address request_line_address(ruby_request.paddr); 337 request_line_address.makeLineAddress(); 338 RubyRequestType type = ruby_request.type; 339 Time issued_time = srequest->issue_time; 340 341 // Set this cache entry to the most recently used 342 if (type == RubyRequestType_IFETCH) { 343 if (m_instCache_ptr->isTagPresent(request_line_address)) 344 m_instCache_ptr->setMRU(request_line_address); 345 } else { 346 if (m_dataCache_ptr->isTagPresent(request_line_address)) 347 m_dataCache_ptr->setMRU(request_line_address); 348 } 349 350 assert(g_eventQueue_ptr->getTime() >= issued_time); 351 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 352 353 // Profile the miss latency for all non-zero demand misses 354 if (miss_latency != 0) { 355 g_system_ptr->getProfiler()->missLatency(miss_latency, type); 356 357 if (Debug::getProtocolTrace()) { 358 g_system_ptr->getProfiler()-> 359 profileTransition("Seq", m_version, 360 Address(ruby_request.paddr), "", "Done", "", 361 csprintf("%d cycles", miss_latency)); 362 } 363 } 364#if 0 365 if (request.getPrefetch() == PrefetchBit_Yes) { 366 return; // Ignore the prefetch 367 } 368#endif 369 370 // update the data 371 if (ruby_request.data != NULL) { 372 if ((type == RubyRequestType_LD) || 373 (type == RubyRequestType_IFETCH) || 374 (type == RubyRequestType_RMW_Read) || 375 (type == RubyRequestType_Locked_Read)) { 376 377 memcpy(ruby_request.data, 378 data.getData(request_address.getOffset(), ruby_request.len), 379 ruby_request.len); 380 } else { 381 data.setData(ruby_request.data, request_address.getOffset(), 382 ruby_request.len); 383 } 384 } else { 385 DPRINTF(MemoryAccess, 386 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 387 RubyRequestType_to_string(type)); 388 } 389 390 // If using the RubyTester, update the RubyTester sender state's 391 // subBlock with the recieved data. The tester will later access 392 // this state. 393 // Note: RubyPort will access it's sender state before the 394 // RubyTester. 395 if (m_usingRubyTester) { 396 RubyPort::SenderState *requestSenderState = 397 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState); 398 RubyTester::SenderState* testerSenderState = 399 safe_cast<RubyTester::SenderState*>(requestSenderState->saved); 400 testerSenderState->subBlock->mergeFrom(data); 401 } 402 403 ruby_hit_callback(ruby_request.pkt); 404 delete srequest; 405} 406 407// Returns true if the sequencer already has a load or store outstanding 408RequestStatus 409Sequencer::getRequestStatus(const RubyRequest& request) 410{ 411 bool is_outstanding_store = 412 m_writeRequestTable.exist(line_address(Address(request.paddr))); 413 bool is_outstanding_load = 414 m_readRequestTable.exist(line_address(Address(request.paddr))); 415 if (is_outstanding_store) { 416 if ((request.type == RubyRequestType_LD) || 417 (request.type == RubyRequestType_IFETCH) || 418 (request.type == RubyRequestType_RMW_Read)) { 419 m_store_waiting_on_load_cycles++; 420 } else { 421 m_store_waiting_on_store_cycles++; 422 } 423 return RequestStatus_Aliased; 424 } else if (is_outstanding_load) { 425 if ((request.type == RubyRequestType_ST) || 426 (request.type == RubyRequestType_RMW_Write)) { 427 m_load_waiting_on_store_cycles++; 428 } else { 429 m_load_waiting_on_load_cycles++; 430 } 431 return RequestStatus_Aliased; 432 } 433 434 if (m_outstanding_count >= m_max_outstanding_requests) { 435 return RequestStatus_BufferFull; 436 } 437 438 return RequestStatus_Ready; 439} 440 441bool 442Sequencer::empty() const 443{ 444 return m_writeRequestTable.size() == 0 && m_readRequestTable.size() == 0; 445} 446 447RequestStatus 448Sequencer::makeRequest(const RubyRequest &request) 449{ 450 assert(Address(request.paddr).getOffset() + request.len <= 451 RubySystem::getBlockSizeBytes()); 452 RequestStatus status = getRequestStatus(request); 453 if (status != RequestStatus_Ready) 454 return status; 455 456 SequencerRequest *srequest = 457 new SequencerRequest(request, g_eventQueue_ptr->getTime()); 458 bool found = insertRequest(srequest); 459 if (found) { 460 panic("Sequencer::makeRequest should never be called if the " 461 "request is already outstanding\n"); 462 return RequestStatus_NULL; 463 } 464 465 if (request.type == RubyRequestType_Locked_Write) { 466 // NOTE: it is OK to check the locked flag here as the 467 // mandatory queue will be checked first ensuring that nothing 468 // comes between checking the flag and servicing the store. 469 470 Address line_addr = line_address(Address(request.paddr)); 471 if (!m_dataCache_ptr->isLocked(line_addr, m_version)) { 472 removeRequest(srequest); 473 if (Debug::getProtocolTrace()) { 474 g_system_ptr->getProfiler()-> 475 profileTransition("Seq", m_version, 476 Address(request.paddr), 477 "", "SC Fail", "", 478 RubyRequestType_to_string(request.type)); 479 } 480 return RequestStatus_LlscFailed; 481 } else { 482 m_dataCache_ptr->clearLocked(line_addr); 483 } 484 } 485 issueRequest(request); 486 487 // TODO: issue hardware prefetches here 488 return RequestStatus_Issued; 489} 490 491void 492Sequencer::issueRequest(const RubyRequest& request) 493{ 494 // TODO: get rid of CacheMsg, CacheRequestType, and 495 // AccessModeTYpe, & have SLICC use RubyRequest and subtypes 496 // natively 497 CacheRequestType ctype; 498 switch(request.type) { 499 case RubyRequestType_IFETCH: 500 ctype = CacheRequestType_IFETCH; 501 break; 502 case RubyRequestType_LD: 503 ctype = CacheRequestType_LD; 504 break; 505 case RubyRequestType_ST: 506 ctype = CacheRequestType_ST; 507 break; 508 case RubyRequestType_Locked_Read: 509 case RubyRequestType_Locked_Write: 510 ctype = CacheRequestType_ATOMIC; 511 break; 512 case RubyRequestType_RMW_Read: 513 ctype = CacheRequestType_ATOMIC; 514 break; 515 case RubyRequestType_RMW_Write: 516 ctype = CacheRequestType_ATOMIC; 517 break; 518 default: 519 assert(0); 520 } 521 522 AccessModeType amtype; 523 switch(request.access_mode){ 524 case RubyAccessMode_User: 525 amtype = AccessModeType_UserMode; 526 break; 527 case RubyAccessMode_Supervisor: 528 amtype = AccessModeType_SupervisorMode; 529 break; 530 case RubyAccessMode_Device: 531 amtype = AccessModeType_UserMode; 532 break; 533 default: 534 assert(0); 535 } 536 537 Address line_addr(request.paddr); 538 line_addr.makeLineAddress(); 539 CacheMsg msg(line_addr, Address(request.paddr), ctype, 540 Address(request.pc), amtype, request.len, PrefetchBit_No, 541 request.proc_id); 542 543 if (Debug::getProtocolTrace()) { 544 g_system_ptr->getProfiler()-> 545 profileTransition("Seq", m_version, Address(request.paddr), 546 "", "Begin", "", 547 RubyRequestType_to_string(request.type)); 548 } 549 550 if (g_system_ptr->getTracer()->traceEnabled()) { 551 g_system_ptr->getTracer()-> 552 traceRequest(this, line_addr, Address(request.pc), 553 request.type, g_eventQueue_ptr->getTime()); 554 } 555 556 Time latency = 0; // initialzed to an null value 557 558 if (request.type == RubyRequestType_IFETCH) 559 latency = m_instCache_ptr->getLatency(); 560 else 561 latency = m_dataCache_ptr->getLatency(); 562 563 // Send the message to the cache controller 564 assert(latency > 0); 565 566 assert(m_mandatory_q_ptr != NULL); 567 m_mandatory_q_ptr->enqueue(msg, latency); 568} 569 570#if 0 571bool 572Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type, 573 AccessModeType access_mode, 574 int size, DataBlock*& data_ptr) 575{ 576 CacheMemory *cache = 577 (type == CacheRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr; 578 579 return cache->tryCacheAccess(line_address(addr), type, data_ptr); 580} 581#endif 582 583void 584Sequencer::print(ostream& out) const 585{ 586 out << "[Sequencer: " << m_version 587 << ", outstanding requests: " << m_outstanding_count 588 << ", read request table: " << m_readRequestTable 589 << ", write request table: " << m_writeRequestTable 590 << "]"; 591} 592 593// this can be called from setState whenever coherence permissions are 594// upgraded when invoked, coherence violations will be checked for the 595// given block 596void 597Sequencer::checkCoherence(const Address& addr) 598{ 599#ifdef CHECK_COHERENCE 600 g_system_ptr->checkGlobalCoherenceInvariant(addr); 601#endif 602} 603