Sequencer.cc (6845:9740ade45962) Sequencer.cc (6846:60e0df8086f0)
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright

--- 48 unchanged lines hidden (view full) ---

57 m_outstanding_count = 0;
58
59 m_max_outstanding_requests = 0;
60 m_deadlock_threshold = 0;
61 m_version = -1;
62 m_instCache_ptr = NULL;
63 m_dataCache_ptr = NULL;
64 m_controller = NULL;
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright

--- 48 unchanged lines hidden (view full) ---

57 m_outstanding_count = 0;
58
59 m_max_outstanding_requests = 0;
60 m_deadlock_threshold = 0;
61 m_version = -1;
62 m_instCache_ptr = NULL;
63 m_dataCache_ptr = NULL;
64 m_controller = NULL;
65 m_servicing_atomic = 200;
66 m_atomics_counter = 0;
65 m_atomic_reads = 0;
66 m_atomic_writes = 0;
67 for (size_t i=0; i<argv.size(); i+=2) {
68 if ( argv[i] == "controller") {
69 m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
70 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
71 } else if ( argv[i] == "icache")
72 m_instCache_ptr = RubySystem::getCache(argv[i+1]);
73 else if ( argv[i] == "dcache")
74 m_dataCache_ptr = RubySystem::getCache(argv[i+1]);

--- 185 unchanged lines hidden (view full) ---

260}
261
262void Sequencer::writeCallback(const Address& address, DataBlock& data) {
263
264 assert(address == line_address(address));
265 assert(m_writeRequestTable.exist(line_address(address)));
266
267 SequencerRequest* request = m_writeRequestTable.lookup(address);
67 for (size_t i=0; i<argv.size(); i+=2) {
68 if ( argv[i] == "controller") {
69 m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
70 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
71 } else if ( argv[i] == "icache")
72 m_instCache_ptr = RubySystem::getCache(argv[i+1]);
73 else if ( argv[i] == "dcache")
74 m_dataCache_ptr = RubySystem::getCache(argv[i+1]);

--- 185 unchanged lines hidden (view full) ---

260}
261
262void Sequencer::writeCallback(const Address& address, DataBlock& data) {
263
264 assert(address == line_address(address));
265 assert(m_writeRequestTable.exist(line_address(address)));
266
267 SequencerRequest* request = m_writeRequestTable.lookup(address);
268
268 removeRequest(request);
269
270 assert((request->ruby_request.type == RubyRequestType_ST) ||
271 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
272 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
273 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
274 (request->ruby_request.type == RubyRequestType_Locked_Write));
275 // POLINA: the assumption is that atomics are only on data cache and not instruction cache
276 if (request->ruby_request.type == RubyRequestType_Locked_Read) {
277 m_dataCache_ptr->setLocked(address, m_version);
278 }
279 else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
280 m_controller->set_atomic(address);
281 }
282 else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
269 removeRequest(request);
270
271 assert((request->ruby_request.type == RubyRequestType_ST) ||
272 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
273 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
274 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
275 (request->ruby_request.type == RubyRequestType_Locked_Write));
276 // POLINA: the assumption is that atomics are only on data cache and not instruction cache
277 if (request->ruby_request.type == RubyRequestType_Locked_Read) {
278 m_dataCache_ptr->setLocked(address, m_version);
279 }
280 else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
281 m_controller->set_atomic(address);
282 }
283 else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
283 m_controller->clear_atomic();
284 m_controller->clear_atomic(address);
284 }
285
286 hitCallback(request, data);
287}
288
289void Sequencer::readCallback(const Address& address, DataBlock& data) {
290
291 assert(address == line_address(address));

--- 49 unchanged lines hidden (view full) ---

341 if ((type == RubyRequestType_LD) ||
342 (type == RubyRequestType_IFETCH) ||
343 (type == RubyRequestType_RMW_Read)) {
344 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
345 } else {
346 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
347 }
348 }
285 }
286
287 hitCallback(request, data);
288}
289
290void Sequencer::readCallback(const Address& address, DataBlock& data) {
291
292 assert(address == line_address(address));

--- 49 unchanged lines hidden (view full) ---

342 if ((type == RubyRequestType_LD) ||
343 (type == RubyRequestType_IFETCH) ||
344 (type == RubyRequestType_RMW_Read)) {
345 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
346 } else {
347 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
348 }
349 }
349 if (type == RubyRequestType_RMW_Write) {
350 if (m_servicing_atomic != ruby_request.proc_id) {
351 assert(0);
352 }
353 assert(m_atomics_counter > 0);
354 m_atomics_counter--;
355 if (m_atomics_counter == 0) {
356 m_servicing_atomic = 200;
357 }
358 }
350
359 m_hit_callback(srequest->id);
360 delete srequest;
361}
362
363// Returns true if the sequencer already has a load or store outstanding
364int Sequencer::isReady(const RubyRequest& request) {
365 // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
366 // to simulate stalling of the front-end

--- 4 unchanged lines hidden (view full) ---

371
372 if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
373 m_readRequestTable.exist(line_address(Address(request.paddr))) ){
374 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
375 //printProgress(cout);
376 return LIBRUBY_ALIASED_REQUEST;
377 }
378
351 m_hit_callback(srequest->id);
352 delete srequest;
353}
354
355// Returns true if the sequencer already has a load or store outstanding
356int Sequencer::isReady(const RubyRequest& request) {
357 // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
358 // to simulate stalling of the front-end

--- 4 unchanged lines hidden (view full) ---

363
364 if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
365 m_readRequestTable.exist(line_address(Address(request.paddr))) ){
366 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
367 //printProgress(cout);
368 return LIBRUBY_ALIASED_REQUEST;
369 }
370
379 if (request.type == RubyRequestType_RMW_Read) {
380 if (m_servicing_atomic == 200) {
381 assert(m_atomics_counter == 0);
382 m_servicing_atomic = request.proc_id;
383 }
384 else {
385 assert(m_servicing_atomic == request.proc_id);
386 }
387 m_atomics_counter++;
388 }
389 else {
390 if (m_servicing_atomic == request.proc_id) {
391 if (request.type != RubyRequestType_RMW_Write) {
392 m_servicing_atomic = 200;
393 m_atomics_counter = 0;
394 }
395 }
396 }
397
398 return 1;
399}
400
401bool Sequencer::empty() const {
402 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
403}
404
405

--- 11 unchanged lines hidden (view full) ---

417 // ensuring that nothing comes between checking the flag and servicing the store
418 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
419 return LLSC_FAIL;
420 }
421 else {
422 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
423 }
424 }
371 return 1;
372}
373
374bool Sequencer::empty() const {
375 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
376}
377
378

--- 11 unchanged lines hidden (view full) ---

390 // ensuring that nothing comes between checking the flag and servicing the store
391 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
392 return LLSC_FAIL;
393 }
394 else {
395 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
396 }
397 }
425 if (request.type == RubyRequestType_RMW_Write) {
426 m_controller->started_writes();
427 }
428 issueRequest(request);
429
430 // TODO: issue hardware prefetches here
431 return id;
432 }
433 else {
434 assert(0);
435 }

--- 4 unchanged lines hidden (view full) ---

440}
441
442void Sequencer::issueRequest(const RubyRequest& request) {
443
444 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
445 CacheRequestType ctype;
446 switch(request.type) {
447 case RubyRequestType_IFETCH:
398 issueRequest(request);
399
400 // TODO: issue hardware prefetches here
401 return id;
402 }
403 else {
404 assert(0);
405 }

--- 4 unchanged lines hidden (view full) ---

410}
411
412void Sequencer::issueRequest(const RubyRequest& request) {
413
414 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
415 CacheRequestType ctype;
416 switch(request.type) {
417 case RubyRequestType_IFETCH:
418 if (m_atomic_reads > 0 && m_atomic_writes == 0) {
419 m_controller->reset_atomics();
420 }
421 else if (m_atomic_writes > 0) {
422 assert(m_atomic_reads > m_atomic_writes);
423 cerr << "WARNING: Expected: " << m_atomic_reads << " RMW_Writes, but only received: " << m_atomic_writes << endl;
424 assert(false);
425 }
448 ctype = CacheRequestType_IFETCH;
449 break;
450 case RubyRequestType_LD:
426 ctype = CacheRequestType_IFETCH;
427 break;
428 case RubyRequestType_LD:
429 if (m_atomic_reads > 0 && m_atomic_writes == 0) {
430 m_controller->reset_atomics();
431 }
432 else if (m_atomic_writes > 0) {
433 assert(m_atomic_reads > m_atomic_writes);
434 cerr << "WARNING: Expected: " << m_atomic_reads << " RMW_Writes, but only received: " << m_atomic_writes << endl;
435 assert(false);
436 }
451 ctype = CacheRequestType_LD;
452 break;
453 case RubyRequestType_ST:
437 ctype = CacheRequestType_LD;
438 break;
439 case RubyRequestType_ST:
440 if (m_atomic_reads > 0 && m_atomic_writes == 0) {
441 m_controller->reset_atomics();
442 }
443 else if (m_atomic_writes > 0) {
444 assert(m_atomic_reads > m_atomic_writes);
445 cerr << "WARNING: Expected: " << m_atomic_reads << " RMW_Writes, but only received: " << m_atomic_writes << endl;
446 assert(false);
447 }
454 ctype = CacheRequestType_ST;
455 break;
456 case RubyRequestType_Locked_Read:
457 case RubyRequestType_Locked_Write:
448 ctype = CacheRequestType_ST;
449 break;
450 case RubyRequestType_Locked_Read:
451 case RubyRequestType_Locked_Write:
452 ctype = CacheRequestType_ATOMIC;
453 break;
458 case RubyRequestType_RMW_Read:
454 case RubyRequestType_RMW_Read:
455 assert(m_atomic_writes == 0);
456 m_atomic_reads++;
457 ctype = CacheRequestType_ATOMIC;
458 break;
459 case RubyRequestType_RMW_Write:
459 case RubyRequestType_RMW_Write:
460 assert(m_atomic_reads > 0);
461 assert(m_atomic_writes < m_atomic_reads);
462 m_atomic_writes++;
463 if (m_atomic_reads == m_atomic_writes) {
464 m_atomic_reads = 0;
465 m_atomic_writes = 0;
466 }
460 ctype = CacheRequestType_ATOMIC;
461 break;
462 default:
463 assert(0);
464 }
465 AccessModeType amtype;
466 switch(request.access_mode){
467 case RubyAccessMode_User:

--- 67 unchanged lines hidden ---
467 ctype = CacheRequestType_ATOMIC;
468 break;
469 default:
470 assert(0);
471 }
472 AccessModeType amtype;
473 switch(request.access_mode){
474 case RubyAccessMode_User:

--- 67 unchanged lines hidden ---