Sequencer.cc revision 7023
1 2/* 3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer; 10 * redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution; 13 * neither the name of the copyright holders nor the names of its 14 * contributors may be used to endorse or promote products derived from 15 * this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30#include "mem/ruby/libruby.hh" 31#include "mem/ruby/common/Global.hh" 32#include "mem/ruby/system/Sequencer.hh" 33#include "mem/ruby/system/System.hh" 34#include "mem/protocol/Protocol.hh" 35#include "mem/ruby/profiler/Profiler.hh" 36#include "mem/ruby/system/CacheMemory.hh" 37#include "mem/protocol/CacheMsg.hh" 38#include "mem/ruby/recorder/Tracer.hh" 39#include "mem/ruby/common/SubBlock.hh" 40#include "mem/protocol/Protocol.hh" 41#include "mem/gems_common/Map.hh" 42#include "mem/ruby/buffers/MessageBuffer.hh" 43#include "mem/ruby/slicc_interface/AbstractController.hh" 44#include "cpu/rubytest/RubyTester.hh" 45 46#include "params/RubySequencer.hh" 47 48Sequencer * 49RubySequencerParams::create() 50{ 51 return new Sequencer(this); 52} 53 54Sequencer::Sequencer(const Params *p) 55 : RubyPort(p), deadlockCheckEvent(this) 56{ 57 m_store_waiting_on_load_cycles = 0; 58 m_store_waiting_on_store_cycles = 0; 59 m_load_waiting_on_store_cycles = 0; 60 m_load_waiting_on_load_cycles = 0; 61 62 m_outstanding_count = 0; 63 64 m_max_outstanding_requests = 0; 65 m_deadlock_threshold = 0; 66 m_instCache_ptr = NULL; 67 m_dataCache_ptr = NULL; 68 69 m_instCache_ptr = p->icache; 70 m_dataCache_ptr = p->dcache; 71 m_max_outstanding_requests = p->max_outstanding_requests; 72 m_deadlock_threshold = p->deadlock_threshold; 73 m_usingRubyTester = p->using_ruby_tester; 74 75 assert(m_max_outstanding_requests > 0); 76 assert(m_deadlock_threshold > 0); 77 assert(m_instCache_ptr != NULL); 78 assert(m_dataCache_ptr != NULL); 79} 80 81Sequencer::~Sequencer() { 82 83} 84 85void Sequencer::wakeup() { 86 // Check for deadlock of any of the requests 87 Time current_time = g_eventQueue_ptr->getTime(); 88 89 // Check across all outstanding requests 90 int total_outstanding = 0; 91 92 Vector<Address> keys = m_readRequestTable.keys(); 93 for (int i=0; i<keys.size(); i++) { 94 SequencerRequest* request = m_readRequestTable.lookup(keys[i]); 95 if (current_time - request->issue_time >= m_deadlock_threshold) { 96 WARN_MSG("Possible Deadlock detected"); 97 WARN_EXPR(request); 98 WARN_EXPR(m_version); 99 WARN_EXPR(request->ruby_request.paddr); 100 WARN_EXPR(keys.size()); 101 WARN_EXPR(current_time); 102 WARN_EXPR(request->issue_time); 103 WARN_EXPR(current_time - request->issue_time); 104 ERROR_MSG("Aborting"); 105 } 106 } 107 108 keys = m_writeRequestTable.keys(); 109 for (int i=0; i<keys.size(); i++) { 110 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]); 111 if (current_time - request->issue_time >= m_deadlock_threshold) { 112 WARN_MSG("Possible Deadlock detected"); 113 WARN_EXPR(request); 114 WARN_EXPR(m_version); 115 WARN_EXPR(current_time); 116 WARN_EXPR(request->issue_time); 117 WARN_EXPR(current_time - request->issue_time); 118 WARN_EXPR(keys.size()); 119 ERROR_MSG("Aborting"); 120 } 121 } 122 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size(); 123 124 assert(m_outstanding_count == total_outstanding); 125 126 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking 127 schedule(deadlockCheckEvent, 128 (m_deadlock_threshold * g_eventQueue_ptr->getClock()) + curTick); 129 } 130} 131 132void Sequencer::printStats(ostream & out) const { 133 out << "Sequencer: " << m_name << endl; 134 out << " store_waiting_on_load_cycles: " << m_store_waiting_on_load_cycles << endl; 135 out << " store_waiting_on_store_cycles: " << m_store_waiting_on_store_cycles << endl; 136 out << " load_waiting_on_load_cycles: " << m_load_waiting_on_load_cycles << endl; 137 out << " load_waiting_on_store_cycles: " << m_load_waiting_on_store_cycles << endl; 138} 139 140void Sequencer::printProgress(ostream& out) const{ 141 /* 142 int total_demand = 0; 143 out << "Sequencer Stats Version " << m_version << endl; 144 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 145 out << "---------------" << endl; 146 out << "outstanding requests" << endl; 147 148 Vector<Address> rkeys = m_readRequestTable.keys(); 149 int read_size = rkeys.size(); 150 out << "proc " << m_version << " Read Requests = " << read_size << endl; 151 // print the request table 152 for(int i=0; i < read_size; ++i){ 153 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]); 154 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl; 155 total_demand++; 156 } 157 158 Vector<Address> wkeys = m_writeRequestTable.keys(); 159 int write_size = wkeys.size(); 160 out << "proc " << m_version << " Write Requests = " << write_size << endl; 161 // print the request table 162 for(int i=0; i < write_size; ++i){ 163 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]); 164 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl; 165 if( request.getPrefetch() == PrefetchBit_No ){ 166 total_demand++; 167 } 168 } 169 170 out << endl; 171 172 out << "Total Number Outstanding: " << m_outstanding_count << endl; 173 out << "Total Number Demand : " << total_demand << endl; 174 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl; 175 out << endl; 176 out << endl; 177 */ 178} 179 180void Sequencer::printConfig(ostream& out) const { 181 out << "Seqeuncer config: " << m_name << endl; 182 out << " controller: " << m_controller->getName() << endl; 183 out << " version: " << m_version << endl; 184 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl; 185 out << " deadlock_threshold: " << m_deadlock_threshold << endl; 186} 187 188// Insert the request on the correct request table. Return true if 189// the entry was already present. 190bool Sequencer::insertRequest(SequencerRequest* request) { 191 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 192 193 assert(m_outstanding_count == total_outstanding); 194 195 // See if we should schedule a deadlock check 196 if (deadlockCheckEvent.scheduled() == false) { 197 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick); 198 } 199 200 Address line_addr(request->ruby_request.paddr); 201 line_addr.makeLineAddress(); 202 if ((request->ruby_request.type == RubyRequestType_ST) || 203 (request->ruby_request.type == RubyRequestType_RMW_Read) || 204 (request->ruby_request.type == RubyRequestType_RMW_Write) || 205 (request->ruby_request.type == RubyRequestType_Locked_Read) || 206 (request->ruby_request.type == RubyRequestType_Locked_Write)) { 207 if (m_writeRequestTable.exist(line_addr)) { 208 m_writeRequestTable.lookup(line_addr) = request; 209 // return true; 210 assert(0); // drh5: isn't this an error? do you lose the initial request? 211 } 212 m_writeRequestTable.allocate(line_addr); 213 m_writeRequestTable.lookup(line_addr) = request; 214 m_outstanding_count++; 215 } else { 216 if (m_readRequestTable.exist(line_addr)) { 217 m_readRequestTable.lookup(line_addr) = request; 218 // return true; 219 assert(0); // drh5: isn't this an error? do you lose the initial request? 220 } 221 m_readRequestTable.allocate(line_addr); 222 m_readRequestTable.lookup(line_addr) = request; 223 m_outstanding_count++; 224 } 225 226 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 227 228 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 229 assert(m_outstanding_count == total_outstanding); 230 231 return false; 232} 233 234void Sequencer::removeRequest(SequencerRequest* srequest) { 235 236 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); 237 238 const RubyRequest & ruby_request = srequest->ruby_request; 239 Address line_addr(ruby_request.paddr); 240 line_addr.makeLineAddress(); 241 if ((ruby_request.type == RubyRequestType_ST) || 242 (ruby_request.type == RubyRequestType_RMW_Read) || 243 (ruby_request.type == RubyRequestType_RMW_Write) || 244 (ruby_request.type == RubyRequestType_Locked_Read) || 245 (ruby_request.type == RubyRequestType_Locked_Write)) { 246 m_writeRequestTable.deallocate(line_addr); 247 } else { 248 m_readRequestTable.deallocate(line_addr); 249 } 250 m_outstanding_count--; 251 252 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); 253} 254 255void Sequencer::writeCallback(const Address& address, DataBlock& data) { 256 257 assert(address == line_address(address)); 258 assert(m_writeRequestTable.exist(line_address(address))); 259 260 SequencerRequest* request = m_writeRequestTable.lookup(address); 261 262 removeRequest(request); 263 264 assert((request->ruby_request.type == RubyRequestType_ST) || 265 (request->ruby_request.type == RubyRequestType_RMW_Read) || 266 (request->ruby_request.type == RubyRequestType_RMW_Write) || 267 (request->ruby_request.type == RubyRequestType_Locked_Read) || 268 (request->ruby_request.type == RubyRequestType_Locked_Write)); 269 270 if (request->ruby_request.type == RubyRequestType_Locked_Read) { 271 m_dataCache_ptr->setLocked(address, m_version); 272 } 273 else if (request->ruby_request.type == RubyRequestType_RMW_Read) { 274 m_controller->blockOnQueue(address, m_mandatory_q_ptr); 275 } 276 else if (request->ruby_request.type == RubyRequestType_RMW_Write) { 277 m_controller->unblock(address); 278 } 279 280 hitCallback(request, data); 281} 282 283void Sequencer::readCallback(const Address& address, DataBlock& data) { 284 285 assert(address == line_address(address)); 286 assert(m_readRequestTable.exist(line_address(address))); 287 288 SequencerRequest* request = m_readRequestTable.lookup(address); 289 removeRequest(request); 290 291 assert((request->ruby_request.type == RubyRequestType_LD) || 292 (request->ruby_request.type == RubyRequestType_RMW_Read) || 293 (request->ruby_request.type == RubyRequestType_IFETCH)); 294 295 hitCallback(request, data); 296} 297 298void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) { 299 const RubyRequest & ruby_request = srequest->ruby_request; 300 Address request_address(ruby_request.paddr); 301 Address request_line_address(ruby_request.paddr); 302 request_line_address.makeLineAddress(); 303 RubyRequestType type = ruby_request.type; 304 Time issued_time = srequest->issue_time; 305 306 // Set this cache entry to the most recently used 307 if (type == RubyRequestType_IFETCH) { 308 if (m_instCache_ptr->isTagPresent(request_line_address) ) 309 m_instCache_ptr->setMRU(request_line_address); 310 } else { 311 if (m_dataCache_ptr->isTagPresent(request_line_address) ) 312 m_dataCache_ptr->setMRU(request_line_address); 313 } 314 315 assert(g_eventQueue_ptr->getTime() >= issued_time); 316 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 317 318 // Profile the miss latency for all non-zero demand misses 319 if (miss_latency != 0) { 320 g_system_ptr->getProfiler()->missLatency(miss_latency, type); 321 322 if (Debug::getProtocolTrace()) { 323 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr), 324 "", "Done", "", int_to_string(miss_latency)+" cycles"); 325 } 326 } 327 /* 328 if (request.getPrefetch() == PrefetchBit_Yes) { 329 return; // Ignore the prefetch 330 } 331 */ 332 333 // update the data 334 if (ruby_request.data != NULL) { 335 if ((type == RubyRequestType_LD) || 336 (type == RubyRequestType_IFETCH) || 337 (type == RubyRequestType_RMW_Read) || 338 (type == RubyRequestType_Locked_Read)) { 339 340 memcpy(ruby_request.data, 341 data.getData(request_address.getOffset(), ruby_request.len), 342 ruby_request.len); 343 344 } else { 345 346 data.setData(ruby_request.data, 347 request_address.getOffset(), 348 ruby_request.len); 349 350 } 351 } else { 352 DPRINTF(MemoryAccess, 353 "WARNING. Data not transfered from Ruby to M5 for type %s\n", 354 RubyRequestType_to_string(type)); 355 } 356 357 // 358 // If using the RubyTester, update the RubyTester sender state's subBlock 359 // with the recieved data. The tester will later access this state. 360 // Note: RubyPort will access it's sender state before the RubyTester. 361 // 362 if (m_usingRubyTester) { 363 RubyTester::SenderState* testerSenderState; 364 testerSenderState = safe_cast<RubyTester::SenderState*>( \ 365 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState)->saved); 366 testerSenderState->subBlock->mergeFrom(data); 367 } 368 369 ruby_hit_callback(ruby_request.pkt); 370 delete srequest; 371} 372 373// Returns true if the sequencer already has a load or store outstanding 374RequestStatus Sequencer::getRequestStatus(const RubyRequest& request) { 375 bool is_outstanding_store = m_writeRequestTable.exist(line_address(Address(request.paddr))); 376 bool is_outstanding_load = m_readRequestTable.exist(line_address(Address(request.paddr))); 377 if ( is_outstanding_store ) { 378 if ((request.type == RubyRequestType_LD) || 379 (request.type == RubyRequestType_IFETCH) || 380 (request.type == RubyRequestType_RMW_Read)) { 381 m_store_waiting_on_load_cycles++; 382 } else { 383 m_store_waiting_on_store_cycles++; 384 } 385 return RequestStatus_Aliased; 386 } else if ( is_outstanding_load ) { 387 if ((request.type == RubyRequestType_ST) || 388 (request.type == RubyRequestType_RMW_Write) ) { 389 m_load_waiting_on_store_cycles++; 390 } else { 391 m_load_waiting_on_load_cycles++; 392 } 393 return RequestStatus_Aliased; 394 } 395 396 if (m_outstanding_count >= m_max_outstanding_requests) { 397 return RequestStatus_BufferFull; 398 } 399 400 return RequestStatus_Ready; 401} 402 403bool Sequencer::empty() const { 404 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0); 405} 406 407 408RequestStatus Sequencer::makeRequest(const RubyRequest & request) 409{ 410 assert(Address(request.paddr).getOffset() + request.len <= 411 RubySystem::getBlockSizeBytes()); 412 RequestStatus status = getRequestStatus(request); 413 if (status == RequestStatus_Ready) { 414 SequencerRequest *srequest = new SequencerRequest(request, 415 g_eventQueue_ptr->getTime()); 416 bool found = insertRequest(srequest); 417 if (!found) { 418 if (request.type == RubyRequestType_Locked_Write) { 419 // 420 // NOTE: it is OK to check the locked flag here as the mandatory queue 421 // will be checked first ensuring that nothing comes between checking 422 // the flag and servicing the store. 423 // 424 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), 425 m_version)) { 426 removeRequest(srequest); 427 if (Debug::getProtocolTrace()) { 428 429 g_system_ptr->getProfiler()->profileTransition("Seq", 430 m_version, 431 Address(request.paddr), 432 "", 433 "SC Fail", 434 "", 435 RubyRequestType_to_string(request.type)); 436 437 } 438 return RequestStatus_LlscFailed; 439 } 440 else { 441 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr))); 442 } 443 } 444 issueRequest(request); 445 446 // TODO: issue hardware prefetches here 447 return RequestStatus_Issued; 448 } 449 else { 450 panic("Sequencer::makeRequest should never be called if the request"\ 451 "is already outstanding\n"); 452 return RequestStatus_NULL; 453 } 454 } else { 455 return status; 456 } 457} 458 459void Sequencer::issueRequest(const RubyRequest& request) { 460 461 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively 462 CacheRequestType ctype; 463 switch(request.type) { 464 case RubyRequestType_IFETCH: 465 ctype = CacheRequestType_IFETCH; 466 break; 467 case RubyRequestType_LD: 468 ctype = CacheRequestType_LD; 469 break; 470 case RubyRequestType_ST: 471 ctype = CacheRequestType_ST; 472 break; 473 case RubyRequestType_Locked_Read: 474 case RubyRequestType_Locked_Write: 475 ctype = CacheRequestType_ATOMIC; 476 break; 477 case RubyRequestType_RMW_Read: 478 ctype = CacheRequestType_ATOMIC; 479 break; 480 case RubyRequestType_RMW_Write: 481 ctype = CacheRequestType_ATOMIC; 482 break; 483 default: 484 assert(0); 485 } 486 AccessModeType amtype; 487 switch(request.access_mode){ 488 case RubyAccessMode_User: 489 amtype = AccessModeType_UserMode; 490 break; 491 case RubyAccessMode_Supervisor: 492 amtype = AccessModeType_SupervisorMode; 493 break; 494 case RubyAccessMode_Device: 495 amtype = AccessModeType_UserMode; 496 break; 497 default: 498 assert(0); 499 } 500 Address line_addr(request.paddr); 501 line_addr.makeLineAddress(); 502 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id); 503 504 if (Debug::getProtocolTrace()) { 505 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr), 506 "", "Begin", "", RubyRequestType_to_string(request.type)); 507 } 508 509 if (g_system_ptr->getTracer()->traceEnabled()) { 510 g_system_ptr->getTracer()->traceRequest(this, line_addr, Address(request.pc), 511 request.type, g_eventQueue_ptr->getTime()); 512 } 513 514 Time latency = 0; // initialzed to an null value 515 516 if (request.type == RubyRequestType_IFETCH) 517 latency = m_instCache_ptr->getLatency(); 518 else 519 latency = m_dataCache_ptr->getLatency(); 520 521 // Send the message to the cache controller 522 assert(latency > 0); 523 524 assert(m_mandatory_q_ptr != NULL); 525 m_mandatory_q_ptr->enqueue(msg, latency); 526} 527/* 528bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type, 529 AccessModeType access_mode, 530 int size, DataBlock*& data_ptr) { 531 if (type == CacheRequestType_IFETCH) { 532 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr); 533 } else { 534 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr); 535 } 536} 537*/ 538 539void Sequencer::print(ostream& out) const { 540 out << "[Sequencer: " << m_version 541 << ", outstanding requests: " << m_outstanding_count; 542 543 out << ", read request table: " << m_readRequestTable 544 << ", write request table: " << m_writeRequestTable; 545 out << "]"; 546} 547 548// this can be called from setState whenever coherence permissions are upgraded 549// when invoked, coherence violations will be checked for the given block 550void Sequencer::checkCoherence(const Address& addr) { 551#ifdef CHECK_COHERENCE 552 g_system_ptr->checkGlobalCoherenceInvariant(addr); 553#endif 554} 555 556