1 2/* 3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer; 10 * redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution; 13 * neither the name of the copyright holders nor the names of its 14 * contributors may be used to endorse or promote products derived from 15 * this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30#include "mem/ruby/common/Global.hh" 31#include "mem/ruby/system/Sequencer.hh" 32#include "mem/ruby/system/System.hh" 33#include "mem/protocol/Protocol.hh" 34#include "mem/ruby/profiler/Profiler.hh" 35#include "mem/ruby/system/CacheMemory.hh" 36#include "mem/protocol/CacheMsg.hh" 37#include "mem/ruby/recorder/Tracer.hh" 38#include "mem/ruby/common/SubBlock.hh" 39#include "mem/protocol/Protocol.hh" 40#include "mem/gems_common/Map.hh" 41#include "mem/ruby/buffers/MessageBuffer.hh" 42#include "mem/ruby/slicc_interface/AbstractController.hh" 43 44//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q) 45 46#define LLSC_FAIL -2
| 1 2/* 3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer; 10 * redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution; 13 * neither the name of the copyright holders nor the names of its 14 * contributors may be used to endorse or promote products derived from 15 * this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30#include "mem/ruby/common/Global.hh" 31#include "mem/ruby/system/Sequencer.hh" 32#include "mem/ruby/system/System.hh" 33#include "mem/protocol/Protocol.hh" 34#include "mem/ruby/profiler/Profiler.hh" 35#include "mem/ruby/system/CacheMemory.hh" 36#include "mem/protocol/CacheMsg.hh" 37#include "mem/ruby/recorder/Tracer.hh" 38#include "mem/ruby/common/SubBlock.hh" 39#include "mem/protocol/Protocol.hh" 40#include "mem/gems_common/Map.hh" 41#include "mem/ruby/buffers/MessageBuffer.hh" 42#include "mem/ruby/slicc_interface/AbstractController.hh" 43 44//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q) 45 46#define LLSC_FAIL -2
|
125 WARN_EXPR(m_version); 126 WARN_EXPR(current_time); 127 WARN_EXPR(request->issue_time); 128 WARN_EXPR(current_time - request->issue_time); 129 WARN_EXPR(keys.size()); 130 ERROR_MSG("Aborting"); 131 } 132 } 133 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size(); 134 135 assert(m_outstanding_count == total_outstanding); 136 137 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking 138 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold); 139 } else { 140 m_deadlock_check_scheduled = false; 141 } 142} 143 144void Sequencer::printProgress(ostream& out) const{ 145 /* 146 int total_demand = 0; 147 out << "Sequencer Stats Version " << m_version << endl; 148 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 149 out << "---------------" << endl; 150 out << "outstanding requests" << endl; 151 152 Vector<Address> rkeys = m_readRequestTable.keys(); 153 int read_size = rkeys.size(); 154 out << "proc " << m_version << " Read Requests = " << read_size << endl; 155 // print the request table 156 for(int i=0; i < read_size; ++i){ 157 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]); 158 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl; 159 total_demand++; 160 } 161 162 Vector<Address> wkeys = m_writeRequestTable.keys(); 163 int write_size = wkeys.size(); 164 out << "proc " << m_version << " Write Requests = " << write_size << endl; 165 // print the request table 166 for(int i=0; i < write_size; ++i){ 167 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]); 168 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl; 169 if( request.getPrefetch() == PrefetchBit_No ){ 170 total_demand++; 171 } 172 } 173 174 out << endl; 175 176 out << "Total Number Outstanding: " << m_outstanding_count << endl; 177 out << "Total Number Demand : " << total_demand << endl; 178 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl; 179 out << endl; 180 out << endl; 181 */ 182} 183 184void Sequencer::printConfig(ostream& out) const { 185 out << "Seqeuncer config: " << m_name << endl; 186 out << " controller: " << m_controller->getName() << endl; 187 out << " version: " << m_version << endl; 188 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl; 189 out << " deadlock_threshold: " << m_deadlock_threshold << endl; 190} 191 192// Insert the request on the correct request table. Return true if 193// the entry was already present. 194bool Sequencer::insertRequest(SequencerRequest* request) { 195 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 196 197 assert(m_outstanding_count == total_outstanding); 198 199 // See if we should schedule a deadlock check 200 if (m_deadlock_check_scheduled == false) { 201 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold); 202 m_deadlock_check_scheduled = true; 203 } 204 205 Address line_addr(request->ruby_request.paddr); 206 line_addr.makeLineAddress(); 207 if ((request->ruby_request.type == RubyRequestType_ST) || 208 (request->ruby_request.type == RubyRequestType_RMW_Read) || 209 (request->ruby_request.type == RubyRequestType_RMW_Write) || 210 (request->ruby_request.type == RubyRequestType_Locked_Read) || 211 (request->ruby_request.type == RubyRequestType_Locked_Write)) { 212 if (m_writeRequestTable.exist(line_addr)) { 213 m_writeRequestTable.lookup(line_addr) = request; 214 // return true; 215 assert(0); // drh5: isn't this an error? do you lose the initial request? 216 } 217 m_writeRequestTable.allocate(line_addr); 218 m_writeRequestTable.lookup(line_addr) = request; 219 m_outstanding_count++; 220 } else { 221 if (m_readRequestTable.exist(line_addr)) { 222 m_readRequestTable.lookup(line_addr) = request; 223 // return true; 224 assert(0); // drh5: isn't this an error? do you lose the initial request? 225 } 226 m_readRequestTable.allocate(line_addr); 227 m_readRequestTable.lookup(line_addr) = request; 228 m_outstanding_count++; 229 } 230 231 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 232 233 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 234 assert(m_outstanding_count == total_outstanding); 235 236 return false; 237} 238 239void Sequencer::removeRequest(SequencerRequest* srequest) { 240 241 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); 242 243 const RubyRequest & ruby_request = srequest->ruby_request; 244 Address line_addr(ruby_request.paddr); 245 line_addr.makeLineAddress(); 246 if ((ruby_request.type == RubyRequestType_ST) || 247 (ruby_request.type == RubyRequestType_RMW_Read) || 248 (ruby_request.type == RubyRequestType_RMW_Write) || 249 (ruby_request.type == RubyRequestType_Locked_Read) || 250 (ruby_request.type == RubyRequestType_Locked_Write)) { 251 m_writeRequestTable.deallocate(line_addr); 252 } else { 253 m_readRequestTable.deallocate(line_addr); 254 } 255 m_outstanding_count--; 256 257 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); 258} 259 260void Sequencer::writeCallback(const Address& address, DataBlock& data) { 261 262 assert(address == line_address(address)); 263 assert(m_writeRequestTable.exist(line_address(address))); 264 265 SequencerRequest* request = m_writeRequestTable.lookup(address); 266 removeRequest(request); 267 268 assert((request->ruby_request.type == RubyRequestType_ST) || 269 (request->ruby_request.type == RubyRequestType_RMW_Read) || 270 (request->ruby_request.type == RubyRequestType_RMW_Write) || 271 (request->ruby_request.type == RubyRequestType_Locked_Read) || 272 (request->ruby_request.type == RubyRequestType_Locked_Write)); 273 // POLINA: the assumption is that atomics are only on data cache and not instruction cache 274 if (request->ruby_request.type == RubyRequestType_Locked_Read) { 275 m_dataCache_ptr->setLocked(address, m_version); 276 } 277 else if (request->ruby_request.type == RubyRequestType_RMW_Read) { 278 m_controller->set_atomic(address); 279 } 280 else if (request->ruby_request.type == RubyRequestType_RMW_Write) { 281 m_controller->clear_atomic(); 282 } 283 284 hitCallback(request, data); 285} 286 287void Sequencer::readCallback(const Address& address, DataBlock& data) { 288 289 assert(address == line_address(address)); 290 assert(m_readRequestTable.exist(line_address(address))); 291 292 SequencerRequest* request = m_readRequestTable.lookup(address); 293 removeRequest(request); 294 295 assert((request->ruby_request.type == RubyRequestType_LD) || 296 (request->ruby_request.type == RubyRequestType_RMW_Read) || 297 (request->ruby_request.type == RubyRequestType_IFETCH)); 298 299 hitCallback(request, data); 300} 301 302void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) { 303 const RubyRequest & ruby_request = srequest->ruby_request; 304 Address request_address(ruby_request.paddr); 305 Address request_line_address(ruby_request.paddr); 306 request_line_address.makeLineAddress(); 307 RubyRequestType type = ruby_request.type; 308 Time issued_time = srequest->issue_time; 309 310 // Set this cache entry to the most recently used 311 if (type == RubyRequestType_IFETCH) { 312 if (m_instCache_ptr->isTagPresent(request_line_address) ) 313 m_instCache_ptr->setMRU(request_line_address); 314 } else { 315 if (m_dataCache_ptr->isTagPresent(request_line_address) ) 316 m_dataCache_ptr->setMRU(request_line_address); 317 } 318 319 assert(g_eventQueue_ptr->getTime() >= issued_time); 320 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 321 322 // Profile the miss latency for all non-zero demand misses 323 if (miss_latency != 0) { 324 g_system_ptr->getProfiler()->missLatency(miss_latency, type); 325 326 if (Debug::getProtocolTrace()) { 327 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr), 328 "", "Done", "", int_to_string(miss_latency)+" cycles"); 329 } 330 } 331 /* 332 if (request.getPrefetch() == PrefetchBit_Yes) { 333 return; // Ignore the prefetch 334 } 335 */ 336 337 // update the data 338 if (ruby_request.data != NULL) { 339 if ((type == RubyRequestType_LD) || 340 (type == RubyRequestType_IFETCH) || 341 (type == RubyRequestType_RMW_Read)) { 342 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len); 343 } else { 344 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len); 345 } 346 } 347 348 m_hit_callback(srequest->id); 349 delete srequest; 350} 351 352// Returns true if the sequencer already has a load or store outstanding 353bool Sequencer::isReady(const RubyRequest& request) { 354 // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready 355 // to simulate stalling of the front-end 356 // Do we stall all the sequencers? If it is atomic instruction - yes! 357 if (m_outstanding_count >= m_max_outstanding_requests) { 358 return false; 359 } 360 361 if( m_writeRequestTable.exist(line_address(Address(request.paddr))) || 362 m_readRequestTable.exist(line_address(Address(request.paddr))) ){ 363 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl; 364 //printProgress(cout); 365 return false; 366 } 367 368 if (m_servicing_atomic != -1 && m_servicing_atomic != (int)request.proc_id) { 369 assert(m_atomics_counter > 0); 370 return false; 371 } 372 else { 373 if (request.type == RubyRequestType_RMW_Read) { 374 if (m_servicing_atomic == -1) { 375 assert(m_atomics_counter == 0); 376 m_servicing_atomic = (int)request.proc_id; 377 } 378 else { 379 assert(m_servicing_atomic == (int)request.proc_id); 380 } 381 m_atomics_counter++; 382 } 383 else if (request.type == RubyRequestType_RMW_Write) { 384 assert(m_servicing_atomic == (int)request.proc_id); 385 assert(m_atomics_counter > 0); 386 m_atomics_counter--; 387 if (m_atomics_counter == 0) { 388 m_servicing_atomic = -1; 389 } 390 } 391 } 392 393 return true; 394} 395 396bool Sequencer::empty() const { 397 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0); 398} 399 400 401int64_t Sequencer::makeRequest(const RubyRequest & request) 402{ 403 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes()); 404 if (isReady(request)) { 405 int64_t id = makeUniqueRequestID(); 406 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime()); 407 bool found = insertRequest(srequest); 408 if (!found) 409 if (request.type == RubyRequestType_Locked_Write) { 410 // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first 411 // ensuring that nothing comes between checking the flag and servicing the store 412 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) { 413 return LLSC_FAIL; 414 } 415 else { 416 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr))); 417 } 418 } 419 if (request.type == RubyRequestType_RMW_Write) { 420 m_controller->started_writes(); 421 } 422 issueRequest(request); 423 424 // TODO: issue hardware prefetches here 425 return id; 426 } 427 else { 428 return -1; 429 } 430} 431 432void Sequencer::issueRequest(const RubyRequest& request) { 433 434 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively 435 CacheRequestType ctype; 436 switch(request.type) { 437 case RubyRequestType_IFETCH: 438 ctype = CacheRequestType_IFETCH; 439 break; 440 case RubyRequestType_LD: 441 ctype = CacheRequestType_LD; 442 break; 443 case RubyRequestType_ST: 444 ctype = CacheRequestType_ST; 445 break; 446 case RubyRequestType_Locked_Read: 447 ctype = CacheRequestType_ST; 448 break; 449 case RubyRequestType_Locked_Write: 450 ctype = CacheRequestType_ST; 451 break; 452 case RubyRequestType_RMW_Read: 453 ctype = CacheRequestType_ATOMIC; 454 break; 455 case RubyRequestType_RMW_Write: 456 ctype = CacheRequestType_ATOMIC; 457 break; 458 default: 459 assert(0); 460 } 461 AccessModeType amtype; 462 switch(request.access_mode){ 463 case RubyAccessMode_User: 464 amtype = AccessModeType_UserMode; 465 break; 466 case RubyAccessMode_Supervisor: 467 amtype = AccessModeType_SupervisorMode; 468 break; 469 case RubyAccessMode_Device: 470 amtype = AccessModeType_UserMode; 471 break; 472 default: 473 assert(0); 474 } 475 Address line_addr(request.paddr); 476 line_addr.makeLineAddress(); 477 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id); 478 479 if (Debug::getProtocolTrace()) { 480 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr), 481 "", "Begin", "", RubyRequestType_to_string(request.type)); 482 } 483 484 if (g_system_ptr->getTracer()->traceEnabled()) { 485 g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc), 486 request.type, g_eventQueue_ptr->getTime()); 487 } 488 489 Time latency = 0; // initialzed to an null value 490 491 if (request.type == RubyRequestType_IFETCH) 492 latency = m_instCache_ptr->getLatency(); 493 else 494 latency = m_dataCache_ptr->getLatency(); 495 496 // Send the message to the cache controller 497 assert(latency > 0); 498 499 500 m_mandatory_q_ptr->enqueue(msg, latency); 501} 502/* 503bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type, 504 AccessModeType access_mode, 505 int size, DataBlock*& data_ptr) { 506 if (type == CacheRequestType_IFETCH) { 507 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr); 508 } else { 509 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr); 510 } 511} 512*/ 513 514void Sequencer::print(ostream& out) const { 515 out << "[Sequencer: " << m_version 516 << ", outstanding requests: " << m_outstanding_count; 517 518 out << ", read request table: " << m_readRequestTable 519 << ", write request table: " << m_writeRequestTable; 520 out << "]"; 521} 522 523// this can be called from setState whenever coherence permissions are upgraded 524// when invoked, coherence violations will be checked for the given block 525void Sequencer::checkCoherence(const Address& addr) { 526#ifdef CHECK_COHERENCE 527 g_system_ptr->checkGlobalCoherenceInvariant(addr); 528#endif 529} 530
| 129 WARN_EXPR(m_version); 130 WARN_EXPR(current_time); 131 WARN_EXPR(request->issue_time); 132 WARN_EXPR(current_time - request->issue_time); 133 WARN_EXPR(keys.size()); 134 ERROR_MSG("Aborting"); 135 } 136 } 137 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size(); 138 139 assert(m_outstanding_count == total_outstanding); 140 141 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking 142 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold); 143 } else { 144 m_deadlock_check_scheduled = false; 145 } 146} 147 148void Sequencer::printProgress(ostream& out) const{ 149 /* 150 int total_demand = 0; 151 out << "Sequencer Stats Version " << m_version << endl; 152 out << "Current time = " << g_eventQueue_ptr->getTime() << endl; 153 out << "---------------" << endl; 154 out << "outstanding requests" << endl; 155 156 Vector<Address> rkeys = m_readRequestTable.keys(); 157 int read_size = rkeys.size(); 158 out << "proc " << m_version << " Read Requests = " << read_size << endl; 159 // print the request table 160 for(int i=0; i < read_size; ++i){ 161 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]); 162 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl; 163 total_demand++; 164 } 165 166 Vector<Address> wkeys = m_writeRequestTable.keys(); 167 int write_size = wkeys.size(); 168 out << "proc " << m_version << " Write Requests = " << write_size << endl; 169 // print the request table 170 for(int i=0; i < write_size; ++i){ 171 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]); 172 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl; 173 if( request.getPrefetch() == PrefetchBit_No ){ 174 total_demand++; 175 } 176 } 177 178 out << endl; 179 180 out << "Total Number Outstanding: " << m_outstanding_count << endl; 181 out << "Total Number Demand : " << total_demand << endl; 182 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl; 183 out << endl; 184 out << endl; 185 */ 186} 187 188void Sequencer::printConfig(ostream& out) const { 189 out << "Seqeuncer config: " << m_name << endl; 190 out << " controller: " << m_controller->getName() << endl; 191 out << " version: " << m_version << endl; 192 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl; 193 out << " deadlock_threshold: " << m_deadlock_threshold << endl; 194} 195 196// Insert the request on the correct request table. Return true if 197// the entry was already present. 198bool Sequencer::insertRequest(SequencerRequest* request) { 199 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 200 201 assert(m_outstanding_count == total_outstanding); 202 203 // See if we should schedule a deadlock check 204 if (m_deadlock_check_scheduled == false) { 205 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold); 206 m_deadlock_check_scheduled = true; 207 } 208 209 Address line_addr(request->ruby_request.paddr); 210 line_addr.makeLineAddress(); 211 if ((request->ruby_request.type == RubyRequestType_ST) || 212 (request->ruby_request.type == RubyRequestType_RMW_Read) || 213 (request->ruby_request.type == RubyRequestType_RMW_Write) || 214 (request->ruby_request.type == RubyRequestType_Locked_Read) || 215 (request->ruby_request.type == RubyRequestType_Locked_Write)) { 216 if (m_writeRequestTable.exist(line_addr)) { 217 m_writeRequestTable.lookup(line_addr) = request; 218 // return true; 219 assert(0); // drh5: isn't this an error? do you lose the initial request? 220 } 221 m_writeRequestTable.allocate(line_addr); 222 m_writeRequestTable.lookup(line_addr) = request; 223 m_outstanding_count++; 224 } else { 225 if (m_readRequestTable.exist(line_addr)) { 226 m_readRequestTable.lookup(line_addr) = request; 227 // return true; 228 assert(0); // drh5: isn't this an error? do you lose the initial request? 229 } 230 m_readRequestTable.allocate(line_addr); 231 m_readRequestTable.lookup(line_addr) = request; 232 m_outstanding_count++; 233 } 234 235 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count); 236 237 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size(); 238 assert(m_outstanding_count == total_outstanding); 239 240 return false; 241} 242 243void Sequencer::removeRequest(SequencerRequest* srequest) { 244 245 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); 246 247 const RubyRequest & ruby_request = srequest->ruby_request; 248 Address line_addr(ruby_request.paddr); 249 line_addr.makeLineAddress(); 250 if ((ruby_request.type == RubyRequestType_ST) || 251 (ruby_request.type == RubyRequestType_RMW_Read) || 252 (ruby_request.type == RubyRequestType_RMW_Write) || 253 (ruby_request.type == RubyRequestType_Locked_Read) || 254 (ruby_request.type == RubyRequestType_Locked_Write)) { 255 m_writeRequestTable.deallocate(line_addr); 256 } else { 257 m_readRequestTable.deallocate(line_addr); 258 } 259 m_outstanding_count--; 260 261 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); 262} 263 264void Sequencer::writeCallback(const Address& address, DataBlock& data) { 265 266 assert(address == line_address(address)); 267 assert(m_writeRequestTable.exist(line_address(address))); 268 269 SequencerRequest* request = m_writeRequestTable.lookup(address); 270 removeRequest(request); 271 272 assert((request->ruby_request.type == RubyRequestType_ST) || 273 (request->ruby_request.type == RubyRequestType_RMW_Read) || 274 (request->ruby_request.type == RubyRequestType_RMW_Write) || 275 (request->ruby_request.type == RubyRequestType_Locked_Read) || 276 (request->ruby_request.type == RubyRequestType_Locked_Write)); 277 // POLINA: the assumption is that atomics are only on data cache and not instruction cache 278 if (request->ruby_request.type == RubyRequestType_Locked_Read) { 279 m_dataCache_ptr->setLocked(address, m_version); 280 } 281 else if (request->ruby_request.type == RubyRequestType_RMW_Read) { 282 m_controller->set_atomic(address); 283 } 284 else if (request->ruby_request.type == RubyRequestType_RMW_Write) { 285 m_controller->clear_atomic(); 286 } 287 288 hitCallback(request, data); 289} 290 291void Sequencer::readCallback(const Address& address, DataBlock& data) { 292 293 assert(address == line_address(address)); 294 assert(m_readRequestTable.exist(line_address(address))); 295 296 SequencerRequest* request = m_readRequestTable.lookup(address); 297 removeRequest(request); 298 299 assert((request->ruby_request.type == RubyRequestType_LD) || 300 (request->ruby_request.type == RubyRequestType_RMW_Read) || 301 (request->ruby_request.type == RubyRequestType_IFETCH)); 302 303 hitCallback(request, data); 304} 305 306void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) { 307 const RubyRequest & ruby_request = srequest->ruby_request; 308 Address request_address(ruby_request.paddr); 309 Address request_line_address(ruby_request.paddr); 310 request_line_address.makeLineAddress(); 311 RubyRequestType type = ruby_request.type; 312 Time issued_time = srequest->issue_time; 313 314 // Set this cache entry to the most recently used 315 if (type == RubyRequestType_IFETCH) { 316 if (m_instCache_ptr->isTagPresent(request_line_address) ) 317 m_instCache_ptr->setMRU(request_line_address); 318 } else { 319 if (m_dataCache_ptr->isTagPresent(request_line_address) ) 320 m_dataCache_ptr->setMRU(request_line_address); 321 } 322 323 assert(g_eventQueue_ptr->getTime() >= issued_time); 324 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time; 325 326 // Profile the miss latency for all non-zero demand misses 327 if (miss_latency != 0) { 328 g_system_ptr->getProfiler()->missLatency(miss_latency, type); 329 330 if (Debug::getProtocolTrace()) { 331 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr), 332 "", "Done", "", int_to_string(miss_latency)+" cycles"); 333 } 334 } 335 /* 336 if (request.getPrefetch() == PrefetchBit_Yes) { 337 return; // Ignore the prefetch 338 } 339 */ 340 341 // update the data 342 if (ruby_request.data != NULL) { 343 if ((type == RubyRequestType_LD) || 344 (type == RubyRequestType_IFETCH) || 345 (type == RubyRequestType_RMW_Read)) { 346 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len); 347 } else { 348 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len); 349 } 350 } 351 352 m_hit_callback(srequest->id); 353 delete srequest; 354} 355 356// Returns true if the sequencer already has a load or store outstanding 357bool Sequencer::isReady(const RubyRequest& request) { 358 // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready 359 // to simulate stalling of the front-end 360 // Do we stall all the sequencers? If it is atomic instruction - yes! 361 if (m_outstanding_count >= m_max_outstanding_requests) { 362 return false; 363 } 364 365 if( m_writeRequestTable.exist(line_address(Address(request.paddr))) || 366 m_readRequestTable.exist(line_address(Address(request.paddr))) ){ 367 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl; 368 //printProgress(cout); 369 return false; 370 } 371 372 if (m_servicing_atomic != -1 && m_servicing_atomic != (int)request.proc_id) { 373 assert(m_atomics_counter > 0); 374 return false; 375 } 376 else { 377 if (request.type == RubyRequestType_RMW_Read) { 378 if (m_servicing_atomic == -1) { 379 assert(m_atomics_counter == 0); 380 m_servicing_atomic = (int)request.proc_id; 381 } 382 else { 383 assert(m_servicing_atomic == (int)request.proc_id); 384 } 385 m_atomics_counter++; 386 } 387 else if (request.type == RubyRequestType_RMW_Write) { 388 assert(m_servicing_atomic == (int)request.proc_id); 389 assert(m_atomics_counter > 0); 390 m_atomics_counter--; 391 if (m_atomics_counter == 0) { 392 m_servicing_atomic = -1; 393 } 394 } 395 } 396 397 return true; 398} 399 400bool Sequencer::empty() const { 401 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0); 402} 403 404 405int64_t Sequencer::makeRequest(const RubyRequest & request) 406{ 407 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes()); 408 if (isReady(request)) { 409 int64_t id = makeUniqueRequestID(); 410 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime()); 411 bool found = insertRequest(srequest); 412 if (!found) 413 if (request.type == RubyRequestType_Locked_Write) { 414 // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first 415 // ensuring that nothing comes between checking the flag and servicing the store 416 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) { 417 return LLSC_FAIL; 418 } 419 else { 420 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr))); 421 } 422 } 423 if (request.type == RubyRequestType_RMW_Write) { 424 m_controller->started_writes(); 425 } 426 issueRequest(request); 427 428 // TODO: issue hardware prefetches here 429 return id; 430 } 431 else { 432 return -1; 433 } 434} 435 436void Sequencer::issueRequest(const RubyRequest& request) { 437 438 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively 439 CacheRequestType ctype; 440 switch(request.type) { 441 case RubyRequestType_IFETCH: 442 ctype = CacheRequestType_IFETCH; 443 break; 444 case RubyRequestType_LD: 445 ctype = CacheRequestType_LD; 446 break; 447 case RubyRequestType_ST: 448 ctype = CacheRequestType_ST; 449 break; 450 case RubyRequestType_Locked_Read: 451 ctype = CacheRequestType_ST; 452 break; 453 case RubyRequestType_Locked_Write: 454 ctype = CacheRequestType_ST; 455 break; 456 case RubyRequestType_RMW_Read: 457 ctype = CacheRequestType_ATOMIC; 458 break; 459 case RubyRequestType_RMW_Write: 460 ctype = CacheRequestType_ATOMIC; 461 break; 462 default: 463 assert(0); 464 } 465 AccessModeType amtype; 466 switch(request.access_mode){ 467 case RubyAccessMode_User: 468 amtype = AccessModeType_UserMode; 469 break; 470 case RubyAccessMode_Supervisor: 471 amtype = AccessModeType_SupervisorMode; 472 break; 473 case RubyAccessMode_Device: 474 amtype = AccessModeType_UserMode; 475 break; 476 default: 477 assert(0); 478 } 479 Address line_addr(request.paddr); 480 line_addr.makeLineAddress(); 481 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id); 482 483 if (Debug::getProtocolTrace()) { 484 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr), 485 "", "Begin", "", RubyRequestType_to_string(request.type)); 486 } 487 488 if (g_system_ptr->getTracer()->traceEnabled()) { 489 g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc), 490 request.type, g_eventQueue_ptr->getTime()); 491 } 492 493 Time latency = 0; // initialzed to an null value 494 495 if (request.type == RubyRequestType_IFETCH) 496 latency = m_instCache_ptr->getLatency(); 497 else 498 latency = m_dataCache_ptr->getLatency(); 499 500 // Send the message to the cache controller 501 assert(latency > 0); 502 503 504 m_mandatory_q_ptr->enqueue(msg, latency); 505} 506/* 507bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type, 508 AccessModeType access_mode, 509 int size, DataBlock*& data_ptr) { 510 if (type == CacheRequestType_IFETCH) { 511 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr); 512 } else { 513 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr); 514 } 515} 516*/ 517 518void Sequencer::print(ostream& out) const { 519 out << "[Sequencer: " << m_version 520 << ", outstanding requests: " << m_outstanding_count; 521 522 out << ", read request table: " << m_readRequestTable 523 << ", write request table: " << m_writeRequestTable; 524 out << "]"; 525} 526 527// this can be called from setState whenever coherence permissions are upgraded 528// when invoked, coherence violations will be checked for the given block 529void Sequencer::checkCoherence(const Address& addr) { 530#ifdef CHECK_COHERENCE 531 g_system_ptr->checkGlobalCoherenceInvariant(addr); 532#endif 533} 534
|