Deleted Added
sdiff udiff text old ( 6151:bc6b84108443 ) new ( 6152:705b277e1141 )
full compact
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright

--- 33 unchanged lines hidden (view full) ---

42//#include "Tracer.hh"
43#include "AbstractChip.hh"
44#include "Chip.hh"
45#include "Tester.hh"
46#include "SubBlock.hh"
47#include "Protocol.hh"
48#include "Map.hh"
49#include "interface.hh"
50
51Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
52 m_chip_ptr = chip_ptr;
53 m_version = version;
54
55 m_deadlock_check_scheduled = false;
56 m_outstanding_count = 0;
57
58 int smt_threads = RubyConfig::numberofSMTThreads();

--- 88 unchanged lines hidden (view full) ---

147// returns the total number of demand requests
148int Sequencer::getNumberOutstandingDemand(){
149 int smt_threads = RubyConfig::numberofSMTThreads();
150 int total_demand = 0;
151 for(int p=0; p < smt_threads; ++p){
152 Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
153 for (int i=0; i< keys.size(); i++) {
154 CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
155 if(request.getPrefetch() == PrefetchBit_No){
156 total_demand++;
157 }
158 }
159
160 keys = m_writeRequestTable_ptr[p]->keys();
161 for (int i=0; i< keys.size(); i++) {
162 CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
163 if(request.getPrefetch() == PrefetchBit_No){
164 total_demand++;

--- 215 unchanged lines hidden (view full) ---

380
381 // See if we should schedule a deadlock check
382 if (m_deadlock_check_scheduled == false) {
383 g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
384 m_deadlock_check_scheduled = true;
385 }
386
387 if ((request.getType() == CacheRequestType_ST) ||
388 (request.getType() == CacheRequestType_ATOMIC)) {
389 if (m_writeRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
390 m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
391 return true;
392 }
393 m_writeRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
394 m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
395 m_outstanding_count++;

--- 24 unchanged lines hidden (view full) ---

420 int total_outstanding = 0;
421 int smt_threads = RubyConfig::numberofSMTThreads();
422 for(int p=0; p < smt_threads; ++p){
423 total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
424 }
425 assert(m_outstanding_count == total_outstanding);
426
427 if ((request.getType() == CacheRequestType_ST) ||
428 (request.getType() == CacheRequestType_ATOMIC)) {
429 m_writeRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
430 } else {
431 m_readRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
432 }
433 m_outstanding_count--;
434
435 total_outstanding = 0;

--- 43 unchanged lines hidden (view full) ---

479void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
480 assert(address == line_address(address));
481 assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
482 CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
483 assert( request.getThreadID() == thread);
484 removeRequest(request);
485
486 assert((request.getType() == CacheRequestType_ST) ||
487 (request.getType() == CacheRequestType_ATOMIC));
488
489 hitCallback(request, data, respondingMach, thread);
490
491}
492
493void Sequencer::readCallback(const Address& address) {
494 DataBlock data;

--- 34 unchanged lines hidden (view full) ---

529 assert(address == line_address(address));
530 assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
531
532 CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
533 assert( request.getThreadID() == thread );
534 removeRequest(request);
535
536 assert((request.getType() == CacheRequestType_LD) ||
537 (request.getType() == CacheRequestType_IFETCH)
538 );
539
540 hitCallback(request, data, respondingMach, thread);
541}
542
543void Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread) {
544 int size = request.getSize();

--- 59 unchanged lines hidden (view full) ---

604 uinteger_t stick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick_cmpr"));
605 cout << "END PROC " << m_version << hex << " tick = " << tick << " tick_cmpr = " << tick_cmpr << " stick = " << stick << " stick_cmpr = " << stick_cmpr << " cycle = "<< g_eventQueue_ptr->getTime() << dec << endl;
606#endif
607
608 }
609
610 bool write =
611 (type == CacheRequestType_ST) ||
612 (type == CacheRequestType_ATOMIC);
613
614 if (TSO && write) {
615 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data);
616 } else {
617
618 // Copy the correct bytes out of the cache line into the subblock
619 SubBlock subblock(request_address, request_logical_address, size);

--- 11 unchanged lines hidden (view full) ---

631 // (This is only triggered for the non-TSO case)
632 if (write) {
633 assert(!TSO);
634 subblock.mergeTo(data); // copy the correct bytes from SubBlock into the DataBlock
635 }
636 }
637}
638
639void Sequencer::printDebug(){
640 //notify driver of debug
641 g_system_ptr->getDriver()->printDebug();
642}
643
644// Returns true if the sequencer already has a load or store outstanding
645bool
646Sequencer::isReady(const Packet* pkt) const

--- 20 unchanged lines hidden (view full) ---

667 Address( physical_addr ),
668 type_of_request,
669 Address(0),
670 AccessModeType_UserMode, // User/supervisor mode
671 0, // Size in bytes of request
672 PrefetchBit_No, // Not a prefetch
673 0, // Version number
674 Address(logical_addr), // Virtual Address
675 thread // SMT thread
676 );
677 isReady(request);
678}
679
680bool
681Sequencer::isReady(const CacheMsg& request) const
682{
683 if (m_outstanding_count >= g_SEQUENCER_OUTSTANDING_REQUESTS) {
684 //cout << "TOO MANY OUTSTANDING: " << m_outstanding_count << " " << g_SEQUENCER_OUTSTANDING_REQUESTS << " VER " << m_version << endl;
685 //printProgress(cout);
686 return false;
687 }
688
689 // This code allows reads to be performed even when we have a write
690 // request outstanding for the line
691 bool write =
692 (request.getType() == CacheRequestType_ST) ||
693 (request.getType() == CacheRequestType_ATOMIC);
694
695 // LUKE - disallow more than one request type per address
696 // INVARIANT: at most one request type per address, per processor
697 int smt_threads = RubyConfig::numberofSMTThreads();
698 for(int p=0; p < smt_threads; ++p){
699 if( m_writeRequestTable_ptr[p]->exist(line_address(request.getAddress())) ||
700 m_readRequestTable_ptr[p]->exist(line_address(request.getAddress())) ){

--- 39 unchanged lines hidden (view full) ---

740 Address( physical_addr ),
741 type_of_request,
742 Address(virtual_pc),
743 access_mode, // User/supervisor mode
744 request_size, // Size in bytes of request
745 PrefetchBit_No, // Not a prefetch
746 0, // Version number
747 Address(logical_addr), // Virtual Address
748 thread // SMT thread
749 );
750 makeRequest(request);
751}
752
753void
754Sequencer::makeRequest(const CacheMsg& request)
755{
756 bool write = (request.getType() == CacheRequestType_ST) ||
757 (request.getType() == CacheRequestType_ATOMIC);
758
759 if (TSO && (request.getPrefetch() == PrefetchBit_No) && write) {
760 assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady());
761 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(request);
762 return;
763 }
764

--- 326 unchanged lines hidden ---