30,34d29
< /*
< * $Id: Sequencer.C 1.131 2006/11/06 17:41:01-06:00 bobba@gratiano.cs.wisc.edu $
< *
< */
<
41,45c36,37
< #include "mem/ruby/config/RubyConfig.hh"
< //#include "mem/ruby/recorder/Tracer.hh"
< #include "mem/ruby/slicc_interface/AbstractChip.hh"
< #include "mem/protocol/Chip.hh"
< #include "mem/ruby/tester/Tester.hh"
---
> #include "mem/protocol/CacheMsg.hh"
> #include "mem/ruby/recorder/Tracer.hh"
49c41,42
< #include "mem/packet.hh"
---
> #include "mem/ruby/buffers/MessageBuffer.hh"
> #include "mem/ruby/slicc_interface/AbstractController.hh"
51,53c44
< Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
< m_chip_ptr = chip_ptr;
< m_version = version;
---
> //Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
54a46,52
> Sequencer::Sequencer(const string & name)
> :RubyPort(name)
> {
> }
>
> void Sequencer::init(const vector<string> & argv)
> {
58,66c56,79
< int smt_threads = RubyConfig::numberofSMTThreads();
< m_writeRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
< m_readRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
<
< m_packetTable_ptr = new Map<Address, Packet*>;
<
< for(int p=0; p < smt_threads; ++p){
< m_writeRequestTable_ptr[p] = new Map<Address, CacheMsg>;
< m_readRequestTable_ptr[p] = new Map<Address, CacheMsg>;
---
> m_max_outstanding_requests = 0;
> m_deadlock_threshold = 0;
> m_version = -1;
> m_instCache_ptr = NULL;
> m_dataCache_ptr = NULL;
> m_controller = NULL;
> for (size_t i=0; i<argv.size(); i+=2) {
> if ( argv[i] == "controller") {
> m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
> m_mandatory_q_ptr = m_controller->getMandatoryQueue();
> } else if ( argv[i] == "icache")
> m_instCache_ptr = RubySystem::getCache(argv[i+1]);
> else if ( argv[i] == "dcache")
> m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
> else if ( argv[i] == "version")
> m_version = atoi(argv[i+1].c_str());
> else if ( argv[i] == "max_outstanding_requests")
> m_max_outstanding_requests = atoi(argv[i+1].c_str());
> else if ( argv[i] == "deadlock_threshold")
> m_deadlock_threshold = atoi(argv[i+1].c_str());
> else {
> cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
> assert(false);
> }
68c81,86
<
---
> assert(m_max_outstanding_requests > 0);
> assert(m_deadlock_threshold > 0);
> assert(m_version > -1);
> assert(m_instCache_ptr != NULL);
> assert(m_dataCache_ptr != NULL);
> assert(m_controller != NULL);
72,86c90
< int smt_threads = RubyConfig::numberofSMTThreads();
< for(int i=0; i < smt_threads; ++i){
< if(m_writeRequestTable_ptr[i]){
< delete m_writeRequestTable_ptr[i];
< }
< if(m_readRequestTable_ptr[i]){
< delete m_readRequestTable_ptr[i];
< }
< }
< if(m_writeRequestTable_ptr){
< delete [] m_writeRequestTable_ptr;
< }
< if(m_readRequestTable_ptr){
< delete [] m_readRequestTable_ptr;
< }
---
>
92d95
< bool deadlock = false;
95d97
< int smt_threads = RubyConfig::numberofSMTThreads();
97,114d98
< for(int p=0; p < smt_threads; ++p){
< Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
< for (int i=0; i<keys.size(); i++) {
< CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
< if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
< WARN_MSG("Possible Deadlock detected");
< WARN_EXPR(request);
< WARN_EXPR(m_chip_ptr->getID());
< WARN_EXPR(m_version);
< WARN_EXPR(keys.size());
< WARN_EXPR(current_time);
< WARN_EXPR(request.getTime());
< WARN_EXPR(current_time - request.getTime());
< WARN_EXPR(*m_readRequestTable_ptr[p]);
< ERROR_MSG("Aborting");
< deadlock = true;
< }
< }
116,131c100,111
< keys = m_writeRequestTable_ptr[p]->keys();
< for (int i=0; i<keys.size(); i++) {
< CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
< if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
< WARN_MSG("Possible Deadlock detected");
< WARN_EXPR(request);
< WARN_EXPR(m_chip_ptr->getID());
< WARN_EXPR(m_version);
< WARN_EXPR(current_time);
< WARN_EXPR(request.getTime());
< WARN_EXPR(current_time - request.getTime());
< WARN_EXPR(keys.size());
< WARN_EXPR(*m_writeRequestTable_ptr[p]);
< ERROR_MSG("Aborting");
< deadlock = true;
< }
---
> Vector<Address> keys = m_readRequestTable.keys();
> for (int i=0; i<keys.size(); i++) {
> SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
> if (current_time - request->issue_time >= m_deadlock_threshold) {
> WARN_MSG("Possible Deadlock detected");
> WARN_EXPR(request);
> WARN_EXPR(m_version);
> WARN_EXPR(keys.size());
> WARN_EXPR(current_time);
> WARN_EXPR(request->issue_time);
> WARN_EXPR(current_time - request->issue_time);
> ERROR_MSG("Aborting");
133,140d112
< total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
< } // across all request tables
< assert(m_outstanding_count == total_outstanding);
<
< if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
< g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
< } else {
< m_deadlock_check_scheduled = false;
142d113
< }
144,159c115,126
< //returns the total number of requests
< int Sequencer::getNumberOutstanding(){
< return m_outstanding_count;
< }
<
< // returns the total number of demand requests
< int Sequencer::getNumberOutstandingDemand(){
< int smt_threads = RubyConfig::numberofSMTThreads();
< int total_demand = 0;
< for(int p=0; p < smt_threads; ++p){
< Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
< for (int i=0; i< keys.size(); i++) {
< CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
< if(request.getPrefetch() == PrefetchBit_No){
< total_demand++;
< }
---
> keys = m_writeRequestTable.keys();
> for (int i=0; i<keys.size(); i++) {
> SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
> if (current_time - request->issue_time >= m_deadlock_threshold) {
> WARN_MSG("Possible Deadlock detected");
> WARN_EXPR(request);
> WARN_EXPR(m_version);
> WARN_EXPR(current_time);
> WARN_EXPR(request->issue_time);
> WARN_EXPR(current_time - request->issue_time);
> WARN_EXPR(keys.size());
> ERROR_MSG("Aborting");
161,168d127
<
< keys = m_writeRequestTable_ptr[p]->keys();
< for (int i=0; i< keys.size(); i++) {
< CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
< if(request.getPrefetch() == PrefetchBit_No){
< total_demand++;
< }
< }
169a129
> total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
171,172c131
< return total_demand;
< }
---
> assert(m_outstanding_count == total_outstanding);
174,241c133,134
< int Sequencer::getNumberOutstandingPrefetch(){
< int smt_threads = RubyConfig::numberofSMTThreads();
< int total_prefetch = 0;
< for(int p=0; p < smt_threads; ++p){
< Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
< for (int i=0; i< keys.size(); i++) {
< CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
< if(request.getPrefetch() == PrefetchBit_Yes){
< total_prefetch++;
< }
< }
<
< keys = m_writeRequestTable_ptr[p]->keys();
< for (int i=0; i< keys.size(); i++) {
< CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
< if(request.getPrefetch() == PrefetchBit_Yes){
< total_prefetch++;
< }
< }
< }
<
< return total_prefetch;
< }
<
< bool Sequencer::isPrefetchRequest(const Address & lineaddr){
< int smt_threads = RubyConfig::numberofSMTThreads();
< for(int p=0; p < smt_threads; ++p){
< // check load requests
< Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
< for (int i=0; i< keys.size(); i++) {
< CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
< if(line_address(request.getAddress()) == lineaddr){
< if(request.getPrefetch() == PrefetchBit_Yes){
< return true;
< }
< else{
< return false;
< }
< }
< }
<
< // check store requests
< keys = m_writeRequestTable_ptr[p]->keys();
< for (int i=0; i< keys.size(); i++) {
< CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
< if(line_address(request.getAddress()) == lineaddr){
< if(request.getPrefetch() == PrefetchBit_Yes){
< return true;
< }
< else{
< return false;
< }
< }
< }
< }
< // we should've found a matching request
< cout << "isRequestPrefetch() ERROR request NOT FOUND : " << lineaddr << endl;
< printProgress(cout);
< assert(0);
< }
<
< AccessModeType Sequencer::getAccessModeOfRequest(Address addr, int thread){
< if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
< CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
< return request.getAccessMode();
< } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
< CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
< return request.getAccessMode();
---
> if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
> g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
243,244c136
< printProgress(cout);
< ERROR_MSG("Request not found in RequestTables");
---
> m_deadlock_check_scheduled = false;
248,313d139
< Address Sequencer::getLogicalAddressOfRequest(Address addr, int thread){
< assert(thread >= 0);
< if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
< CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
< return request.getLogicalAddress();
< } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
< CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
< return request.getLogicalAddress();
< } else {
< printProgress(cout);
< WARN_MSG("Request not found in RequestTables");
< WARN_MSG(addr);
< WARN_MSG(thread);
< ASSERT(0);
< }
< }
<
< // returns the ThreadID of the request
< int Sequencer::getRequestThreadID(const Address & addr){
< int smt_threads = RubyConfig::numberofSMTThreads();
< int thread = -1;
< int num_found = 0;
< for(int p=0; p < smt_threads; ++p){
< if(m_readRequestTable_ptr[p]->exist(addr)){
< num_found++;
< thread = p;
< }
< if(m_writeRequestTable_ptr[p]->exist(addr)){
< num_found++;
< thread = p;
< }
< }
< if(num_found != 1){
< cout << "getRequestThreadID ERROR too many matching requests addr = " << addr << endl;
< printProgress(cout);
< }
< ASSERT(num_found == 1);
< ASSERT(thread != -1);
<
< return thread;
< }
<
< // given a line address, return the request's physical address
< Address Sequencer::getRequestPhysicalAddress(const Address & lineaddr){
< int smt_threads = RubyConfig::numberofSMTThreads();
< Address physaddr;
< int num_found = 0;
< for(int p=0; p < smt_threads; ++p){
< if(m_readRequestTable_ptr[p]->exist(lineaddr)){
< num_found++;
< physaddr = (m_readRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
< }
< if(m_writeRequestTable_ptr[p]->exist(lineaddr)){
< num_found++;
< physaddr = (m_writeRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
< }
< }
< if(num_found != 1){
< cout << "getRequestPhysicalAddress ERROR too many matching requests addr = " << lineaddr << endl;
< printProgress(cout);
< }
< ASSERT(num_found == 1);
<
< return physaddr;
< }
<
315c141
<
---
> /*
322,334c148,156
< int smt_threads = RubyConfig::numberofSMTThreads();
< for(int p=0; p < smt_threads; ++p){
< Vector<Address> rkeys = m_readRequestTable_ptr[p]->keys();
< int read_size = rkeys.size();
< out << "proc " << m_chip_ptr->getID() << " thread " << p << " Read Requests = " << read_size << endl;
< // print the request table
< for(int i=0; i < read_size; ++i){
< CacheMsg & request = m_readRequestTable_ptr[p]->lookup(rkeys[i]);
< out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << rkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
< if( request.getPrefetch() == PrefetchBit_No ){
< total_demand++;
< }
< }
---
> Vector<Address> rkeys = m_readRequestTable.keys();
> int read_size = rkeys.size();
> out << "proc " << m_version << " Read Requests = " << read_size << endl;
> // print the request table
> for(int i=0; i < read_size; ++i){
> SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
> out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
> total_demand++;
> }
336,341c158,163
< Vector<Address> wkeys = m_writeRequestTable_ptr[p]->keys();
< int write_size = wkeys.size();
< out << "proc " << m_chip_ptr->getID() << " thread " << p << " Write Requests = " << write_size << endl;
< // print the request table
< for(int i=0; i < write_size; ++i){
< CacheMsg & request = m_writeRequestTable_ptr[p]->lookup(wkeys[i]);
---
> Vector<Address> wkeys = m_writeRequestTable.keys();
> int write_size = wkeys.size();
> out << "proc " << m_version << " Write Requests = " << write_size << endl;
> // print the request table
> for(int i=0; i < write_size; ++i){
> CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
346,348d167
< }
<
< out << endl;
349a169,171
>
> out << endl;
>
355c177
<
---
> */
358,364c180,185
< void Sequencer::printConfig(ostream& out) {
< if (TSO) {
< out << "sequencer: Sequencer - TSO" << endl;
< } else {
< out << "sequencer: Sequencer - SC" << endl;
< }
< out << " max_outstanding_requests: " << g_SEQUENCER_OUTSTANDING_REQUESTS << endl;
---
> void Sequencer::printConfig(ostream& out) const {
> out << "Seqeuncer config: " << m_name << endl;
> out << " controller: " << m_controller->getName() << endl;
> out << " version: " << m_version << endl;
> out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
> out << " deadlock_threshold: " << m_deadlock_threshold << endl;
367,370d187
< bool Sequencer::empty() const {
< return m_outstanding_count == 0;
< }
<
373,380c190,192
< bool Sequencer::insertRequest(const CacheMsg& request) {
< int thread = request.getThreadID();
< assert(thread >= 0);
< int total_outstanding = 0;
< int smt_threads = RubyConfig::numberofSMTThreads();
< for(int p=0; p < smt_threads; ++p){
< total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
< }
---
> bool Sequencer::insertRequest(SequencerRequest* request) {
> int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
>
385c197
< g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
---
> g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
389,393c201,208
< if ((request.getType() == CacheRequestType_ST) ||
< (request.getType() == CacheRequestType_ATOMIC)) {
< if (m_writeRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
< m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
< return true;
---
> Address line_addr(request->ruby_request.paddr);
> line_addr.makeLineAddress();
> if ((request->ruby_request.type == RubyRequestType_ST) ||
> (request->ruby_request.type == RubyRequestType_RMW)) {
> if (m_writeRequestTable.exist(line_addr)) {
> m_writeRequestTable.lookup(line_addr) = request;
> // return true;
> assert(0); // drh5: isn't this an error? do you lose the initial request?
395,396c210,211
< m_writeRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
< m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
---
> m_writeRequestTable.allocate(line_addr);
> m_writeRequestTable.lookup(line_addr) = request;
399,401c214,217
< if (m_readRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
< m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
< return true;
---
> if (m_readRequestTable.exist(line_addr)) {
> m_readRequestTable.lookup(line_addr) = request;
> // return true;
> assert(0); // drh5: isn't this an error? do you lose the initial request?
403,404c219,220
< m_readRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
< m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
---
> m_readRequestTable.allocate(line_addr);
> m_readRequestTable.lookup(line_addr) = request;
410,414c226
< total_outstanding = 0;
< for(int p=0; p < smt_threads; ++p){
< total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
< }
<
---
> total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
415a228
>
419,427c232
< void Sequencer::removeRequest(const CacheMsg& request) {
< int thread = request.getThreadID();
< assert(thread >= 0);
< int total_outstanding = 0;
< int smt_threads = RubyConfig::numberofSMTThreads();
< for(int p=0; p < smt_threads; ++p){
< total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
< }
< assert(m_outstanding_count == total_outstanding);
---
> void Sequencer::removeRequest(SequencerRequest* srequest) {
429,431c234,241
< if ((request.getType() == CacheRequestType_ST) ||
< (request.getType() == CacheRequestType_ATOMIC)) {
< m_writeRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
---
> assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
>
> const RubyRequest & ruby_request = srequest->ruby_request;
> Address line_addr(ruby_request.paddr);
> line_addr.makeLineAddress();
> if ((ruby_request.type == RubyRequestType_ST) ||
> (ruby_request.type == RubyRequestType_RMW)) {
> m_writeRequestTable.deallocate(line_addr);
433c243
< m_readRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
---
> m_readRequestTable.deallocate(line_addr);
437,441c247
< total_outstanding = 0;
< for(int p=0; p < smt_threads; ++p){
< total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
< }
< assert(m_outstanding_count == total_outstanding);
---
> assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
444,448d249
< void Sequencer::writeCallback(const Address& address) {
< DataBlock data;
< writeCallback(address, data);
< }
<
450,464d250
< // process oldest thread first
< int thread = -1;
< Time oldest_time = 0;
< int smt_threads = RubyConfig::numberofSMTThreads();
< for(int t=0; t < smt_threads; ++t){
< if(m_writeRequestTable_ptr[t]->exist(address)){
< CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address);
< if(thread == -1 || (request.getTime() < oldest_time) ){
< thread = t;
< oldest_time = request.getTime();
< }
< }
< }
< // make sure we found an oldest thread
< ASSERT(thread != -1);
466,472d251
< CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address);
<
< writeCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
< }
<
< void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
<
474,475c253
< assert(thread >= 0);
< assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
---
> assert(m_writeRequestTable.exist(line_address(address)));
477,485c255
< writeCallback(address, data, respondingMach, thread);
<
< }
<
< void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
< assert(address == line_address(address));
< assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
< CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
< assert( request.getThreadID() == thread);
---
> SequencerRequest* request = m_writeRequestTable.lookup(address);
488,489c258,259
< assert((request.getType() == CacheRequestType_ST) ||
< (request.getType() == CacheRequestType_ATOMIC));
---
> assert((request->ruby_request.type == RubyRequestType_ST) ||
> (request->ruby_request.type == RubyRequestType_RMW));
491,492c261
< hitCallback(request, data, respondingMach, thread);
<
---
> hitCallback(request, data);
495,499d263
< void Sequencer::readCallback(const Address& address) {
< DataBlock data;
< readCallback(address, data);
< }
<
501,515d264
< // process oldest thread first
< int thread = -1;
< Time oldest_time = 0;
< int smt_threads = RubyConfig::numberofSMTThreads();
< for(int t=0; t < smt_threads; ++t){
< if(m_readRequestTable_ptr[t]->exist(address)){
< CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address);
< if(thread == -1 || (request.getTime() < oldest_time) ){
< thread = t;
< oldest_time = request.getTime();
< }
< }
< }
< // make sure we found an oldest thread
< ASSERT(thread != -1);
517,523d265
< CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address);
<
< readCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
< }
<
< void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
<
525c267
< assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
---
> assert(m_readRequestTable.exist(line_address(address)));
527,535c269
< readCallback(address, data, respondingMach, thread);
< }
<
< void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
< assert(address == line_address(address));
< assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
<
< CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
< assert( request.getThreadID() == thread );
---
> SequencerRequest* request = m_readRequestTable.lookup(address);
538,540c272,273
< assert((request.getType() == CacheRequestType_LD) ||
< (request.getType() == CacheRequestType_IFETCH)
< );
---
> assert((request->ruby_request.type == RubyRequestType_LD) ||
> (request->ruby_request.type == RubyRequestType_IFETCH));
542c275
< hitCallback(request, data, respondingMach, thread);
---
> hitCallback(request, data);
545,553c278,285
< void Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread) {
< int size = request.getSize();
< Address request_address = request.getAddress();
< Address request_logical_address = request.getLogicalAddress();
< Address request_line_address = line_address(request_address);
< CacheRequestType type = request.getType();
< int threadID = request.getThreadID();
< Time issued_time = request.getTime();
< int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID;
---
> void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
> const RubyRequest & ruby_request = srequest->ruby_request;
> int size = ruby_request.len;
> Address request_address(ruby_request.paddr);
> Address request_line_address(ruby_request.paddr);
> request_line_address.makeLineAddress();
> RubyRequestType type = ruby_request.type;
> Time issued_time = srequest->issue_time;
555,556d286
< DEBUG_MSG(SEQUENCER_COMP, MedPrio, size);
<
558,568c288,290
< if (type == CacheRequestType_IFETCH) {
< if (Protocol::m_TwoLevelCache) {
< if (m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
< m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->setMRU(request_line_address);
< }
< }
< else {
< if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
< m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
< }
< }
---
> if (type == RubyRequestType_IFETCH) {
> if (m_instCache_ptr->isTagPresent(request_line_address) )
> m_instCache_ptr->setMRU(request_line_address);
570,579c292,293
< if (Protocol::m_TwoLevelCache) {
< if (m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
< m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->setMRU(request_line_address);
< }
< }
< else {
< if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
< m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
< }
< }
---
> if (m_dataCache_ptr->isTagPresent(request_line_address) )
> m_dataCache_ptr->setMRU(request_line_address);
585,597d298
< if (PROTOCOL_DEBUG_TRACE) {
< g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Done", "",
< int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch()));
< }
<
< DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address);
< DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch());
< if (request.getPrefetch() == PrefetchBit_Yes) {
< DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return");
< g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach);
< return; // Ignore the software prefetch, don't callback the driver
< }
<
600c301
< g_system_ptr->getProfiler()->missLatency(miss_latency, type, respondingMach);
---
> g_system_ptr->getProfiler()->missLatency(miss_latency, type);
601a303,306
> if (Debug::getProtocolTrace()) {
> g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
> "", "Done", "", int_to_string(miss_latency)+" cycles");
> }
602a308,312
> /*
> if (request.getPrefetch() == PrefetchBit_Yes) {
> return; // Ignore the prefetch
> }
> */
604,619c314,320
< bool write =
< (type == CacheRequestType_ST) ||
< (type == CacheRequestType_ATOMIC);
<
< if (TSO && write) {
< m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data,
< m_packetTable_ptr->lookup(request.getAddress()));
< } else {
<
< // Copy the correct bytes out of the cache line into the subblock
< SubBlock subblock(request_address, request_logical_address, size);
< subblock.mergeFrom(data); // copy the correct bytes from DataBlock in the SubBlock
<
< // Scan the store buffer to see if there are any outstanding stores we need to collect
< if (TSO) {
< m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->updateSubBlock(subblock);
---
> // update the data
> if (ruby_request.data != NULL) {
> if ((type == RubyRequestType_LD) ||
> (type == RubyRequestType_IFETCH)) {
> memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
> } else {
> data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
621,646d321
<
< // Call into the Driver and let it read and/or modify the sub-block
< Packet* pkt = m_packetTable_ptr->lookup(request.getAddress());
<
< // update data if this is a store/atomic
<
< /*
< if (pkt->req->isCondSwap()) {
< L1Cache_Entry entry = m_L1Cache_vec[m_version]->lookup(Address(pkt->req->physAddr()));
< DataBlk datablk = entry->getDataBlk();
< uint8_t *orig_data = datablk.getArray();
< if ( datablk.equal(pkt->req->getExtraData()) )
< datablk->setArray(pkt->getData());
< pkt->setData(orig_data);
< }
< */
<
< g_system_ptr->getDriver()->hitCallback(pkt);
< m_packetTable_ptr->remove(request.getAddress());
<
< // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
< // (This is only triggered for the non-TSO case)
< if (write) {
< assert(!TSO);
< subblock.mergeTo(data); // copy the correct bytes from SubBlock into the DataBlock
< }
648d322
< }
650,652c324,325
< void Sequencer::printDebug(){
< //notify driver of debug
< g_system_ptr->getDriver()->printDebug();
---
> m_hit_callback(srequest->id);
> delete srequest;
655d327
< //dsm: breaks build, delayed
657,674c329,334
< bool
< Sequencer::isReady(const Packet* pkt) const
< {
<
< int cpu_number = pkt->req->contextId();
< la_t logical_addr = pkt->req->getVaddr();
< pa_t physical_addr = pkt->req->getPaddr();
< CacheRequestType type_of_request;
< if ( pkt->req->isInstFetch() ) {
< type_of_request = CacheRequestType_IFETCH;
< } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
< type_of_request = CacheRequestType_ATOMIC;
< } else if ( pkt->isRead() ) {
< type_of_request = CacheRequestType_LD;
< } else if ( pkt->isWrite() ) {
< type_of_request = CacheRequestType_ST;
< } else {
< assert(false);
---
> bool Sequencer::isReady(const RubyRequest& request) const {
> // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
> // to simulate stalling of the front-end
> // Do we stall all the sequencers? If it is atomic instruction - yes!
> if (m_outstanding_count >= m_max_outstanding_requests) {
> return false;
676d335
< int thread = pkt->req->threadId();
678,696c337,339
< CacheMsg request(Address( physical_addr ),
< Address( physical_addr ),
< type_of_request,
< Address(0),
< AccessModeType_UserMode, // User/supervisor mode
< 0, // Size in bytes of request
< PrefetchBit_No, // Not a prefetch
< 0, // Version number
< Address(logical_addr), // Virtual Address
< thread // SMT thread
< );
< return isReady(request);
< }
<
< bool
< Sequencer::isReady(const CacheMsg& request) const
< {
< if (m_outstanding_count >= g_SEQUENCER_OUTSTANDING_REQUESTS) {
< //cout << "TOO MANY OUTSTANDING: " << m_outstanding_count << " " << g_SEQUENCER_OUTSTANDING_REQUESTS << " VER " << m_version << endl;
---
> if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
> m_readRequestTable.exist(line_address(Address(request.paddr))) ){
> //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
701,721d343
< // This code allows reads to be performed even when we have a write
< // request outstanding for the line
< bool write =
< (request.getType() == CacheRequestType_ST) ||
< (request.getType() == CacheRequestType_ATOMIC);
<
< // LUKE - disallow more than one request type per address
< // INVARIANT: at most one request type per address, per processor
< int smt_threads = RubyConfig::numberofSMTThreads();
< for(int p=0; p < smt_threads; ++p){
< if( m_writeRequestTable_ptr[p]->exist(line_address(request.getAddress())) ||
< m_readRequestTable_ptr[p]->exist(line_address(request.getAddress())) ){
< //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
< //printProgress(cout);
< return false;
< }
< }
<
< if (TSO) {
< return m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady();
< }
725,728c347,351
< //dsm: breaks build, delayed
< // Called by Driver (Simics or Tester).
< void
< Sequencer::makeRequest(Packet* pkt)
---
> bool Sequencer::empty() const {
> return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
> }
>
> int64_t Sequencer::makeRequest(const RubyRequest & request)
730,748c353,362
< int cpu_number = pkt->req->contextId();
< la_t logical_addr = pkt->req->getVaddr();
< pa_t physical_addr = pkt->req->getPaddr();
< int request_size = pkt->getSize();
< CacheRequestType type_of_request;
< PrefetchBit prefetch;
< bool write = false;
< if ( pkt->req->isInstFetch() ) {
< type_of_request = CacheRequestType_IFETCH;
< } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
< type_of_request = CacheRequestType_ATOMIC;
< write = true;
< } else if ( pkt->isRead() ) {
< type_of_request = CacheRequestType_LD;
< } else if ( pkt->isWrite() ) {
< type_of_request = CacheRequestType_ST;
< write = true;
< } else {
< assert(false);
---
> assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
> if (isReady(request)) {
> int64_t id = makeUniqueRequestID();
> SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
> bool found = insertRequest(srequest);
> if (!found)
> issueRequest(request);
>
> // TODO: issue hardware prefetches here
> return id;
750,753c364,365
< if (pkt->req->isPrefetch()) {
< prefetch = PrefetchBit_Yes;
< } else {
< prefetch = PrefetchBit_No;
---
> else {
> return -1;
755,757c367
< la_t virtual_pc = pkt->req->getPC();
< int isPriv = false; // TODO: get permission data
< int thread = pkt->req->threadId();
---
> }
759c369
< AccessModeType access_mode = AccessModeType_UserMode; // TODO: get actual permission
---
> void Sequencer::issueRequest(const RubyRequest& request) {
761,776c371,387
< CacheMsg request(Address( physical_addr ),
< Address( physical_addr ),
< type_of_request,
< Address(virtual_pc),
< access_mode, // User/supervisor mode
< request_size, // Size in bytes of request
< prefetch,
< 0, // Version number
< Address(logical_addr), // Virtual Address
< thread // SMT thread
< );
<
< if ( TSO && write && !pkt->req->isPrefetch() ) {
< assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady());
< m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(pkt, request);
< return;
---
> // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
> CacheRequestType ctype;
> switch(request.type) {
> case RubyRequestType_IFETCH:
> ctype = CacheRequestType_IFETCH;
> break;
> case RubyRequestType_LD:
> ctype = CacheRequestType_LD;
> break;
> case RubyRequestType_ST:
> ctype = CacheRequestType_ST;
> break;
> case RubyRequestType_RMW:
> ctype = CacheRequestType_ATOMIC;
> break;
> default:
> assert(0);
777a389,405
> AccessModeType amtype;
> switch(request.access_mode){
> case RubyAccessMode_User:
> amtype = AccessModeType_UserMode;
> break;
> case RubyAccessMode_Supervisor:
> amtype = AccessModeType_SupervisorMode;
> break;
> case RubyAccessMode_Device:
> amtype = AccessModeType_UserMode;
> break;
> default:
> assert(0);
> }
> Address line_addr(request.paddr);
> line_addr.makeLineAddress();
> CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No);
779,801c407,409
< m_packetTable_ptr->insert(Address( physical_addr ), pkt);
<
< doRequest(request);
< }
<
< bool Sequencer::doRequest(const CacheMsg& request) {
< bool hit = false;
< // Check the fast path
< DataBlock* data_ptr;
<
< int thread = request.getThreadID();
<
< hit = tryCacheAccess(line_address(request.getAddress()),
< request.getType(),
< request.getProgramCounter(),
< request.getAccessMode(),
< request.getSize(),
< data_ptr);
<
< if (hit && (request.getType() == CacheRequestType_IFETCH || !REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) ) {
< DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path hit");
< hitCallback(request, *data_ptr, GenericMachineType_L1Cache, thread);
< return true;
---
> if (Debug::getProtocolTrace()) {
> g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
> "", "Begin", "", RubyRequestType_to_string(request.type));
804,812c412,414
< if (TSO && (request.getType() == CacheRequestType_LD || request.getType() == CacheRequestType_IFETCH)) {
<
< // See if we can satisfy the load entirely from the store buffer
< SubBlock subblock(line_address(request.getAddress()), request.getSize());
< if (m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->trySubBlock(subblock)) {
< DataBlock dummy;
< hitCallback(request, dummy, GenericMachineType_NULL, thread); // Call with an 'empty' datablock, since the data is in the store buffer
< return true;
< }
---
> if (g_system_ptr->getTracer()->traceEnabled()) {
> g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
> request.type, g_eventQueue_ptr->getTime());
815,818c417
< DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path miss");
< issueRequest(request);
< return hit;
< }
---
> Time latency = 0; // initialzed to an null value
820,821c419,422
< void Sequencer::issueRequest(const CacheMsg& request) {
< bool found = insertRequest(request);
---
> if (request.type == RubyRequestType_IFETCH)
> latency = m_instCache_ptr->getLatency();
> else
> latency = m_dataCache_ptr->getLatency();
823,825c424,425
< if (!found) {
< CacheMsg msg = request;
< msg.getAddress() = line_address(request.getAddress()); // Make line address
---
> // Send the message to the cache controller
> assert(latency > 0);
827,830d426
< // Fast Path L1 misses are profiled here - all non-fast path misses are profiled within the generated protocol code
< if (!REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) {
< g_system_ptr->getProfiler()->addPrimaryStatSample(msg, m_chip_ptr->getID());
< }
832,852c428
< if (PROTOCOL_DEBUG_TRACE) {
< g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip() + m_version), -1, msg.getAddress(),"", "Begin", "", CacheRequestType_to_string(request.getType()));
< }
<
< #if 0
< // Commented out by nate binkert because I removed the trace stuff
< if (g_system_ptr->getTracer()->traceEnabled()) {
< g_system_ptr->getTracer()->traceRequest((m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), msg.getAddress(), msg.getProgramCounter(),
< msg.getType(), g_eventQueue_ptr->getTime());
< }
< #endif
<
< Time latency = 0; // initialzed to an null value
<
< latency = SEQUENCER_TO_CONTROLLER_LATENCY;
<
< // Send the message to the cache controller
< assert(latency > 0);
< m_chip_ptr->m_L1Cache_mandatoryQueue_vec[m_version]->enqueue(msg, latency);
<
< } // !found
---
> m_mandatory_q_ptr->enqueue(msg, latency);
854c430
<
---
> /*
856c432
< const Address& pc, AccessModeType access_mode,
---
> AccessModeType access_mode,
859,864c435
< if (Protocol::m_TwoLevelCache) {
< return m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
< }
< else {
< return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
< }
---
> return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
866,871c437
< if (Protocol::m_TwoLevelCache) {
< return m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
< }
< else {
< return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
< }
---
> return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
873a440
> */
875,923d441
< void Sequencer::resetRequestTime(const Address& addr, int thread){
< assert(thread >= 0);
< //reset both load and store requests, if they exist
< if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
< CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
< if( request.m_AccessMode != AccessModeType_UserMode){
< cout << "resetRequestType ERROR read request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
< printProgress(cout);
< }
< //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
< request.setTime(g_eventQueue_ptr->getTime());
< }
< if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
< CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
< if( request.m_AccessMode != AccessModeType_UserMode){
< cout << "resetRequestType ERROR write request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
< printProgress(cout);
< }
< //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
< request.setTime(g_eventQueue_ptr->getTime());
< }
< }
<
< // removes load request from queue
< void Sequencer::removeLoadRequest(const Address & addr, int thread){
< removeRequest(getReadRequest(addr, thread));
< }
<
< void Sequencer::removeStoreRequest(const Address & addr, int thread){
< removeRequest(getWriteRequest(addr, thread));
< }
<
< // returns the read CacheMsg
< CacheMsg & Sequencer::getReadRequest( const Address & addr, int thread ){
< Address temp = addr;
< assert(thread >= 0);
< assert(temp == line_address(temp));
< assert(m_readRequestTable_ptr[thread]->exist(addr));
< return m_readRequestTable_ptr[thread]->lookup(addr);
< }
<
< CacheMsg & Sequencer::getWriteRequest( const Address & addr, int thread){
< Address temp = addr;
< assert(thread >= 0);
< assert(temp == line_address(temp));
< assert(m_writeRequestTable_ptr[thread]->exist(addr));
< return m_writeRequestTable_ptr[thread]->lookup(addr);
< }
<
925c443
< out << "[Sequencer: " << m_chip_ptr->getID()
---
> out << "[Sequencer: " << m_version
928,932c446,447
< int smt_threads = RubyConfig::numberofSMTThreads();
< for(int p=0; p < smt_threads; ++p){
< out << ", read request table[ " << p << " ]: " << *m_readRequestTable_ptr[p]
< << ", write request table[ " << p << " ]: " << *m_writeRequestTable_ptr[p];
< }
---
> out << ", read request table: " << m_readRequestTable
> << ", write request table: " << m_writeRequestTable;
943a459
> /*
945,951c461,514
< unsigned int size_in_bytes ) {
< for(unsigned int i=0; i < size_in_bytes; i++) {
< std::cerr << __FILE__ << "(" << __LINE__ << "): Not implemented. " << std::endl;
< value[i] = 0; // _read_physical_memory( m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version,
< // addr.getAddress() + i, 1 );
< }
< return false; // Do nothing?
---
> unsigned int size_in_bytes )
> {
> bool found = false;
> const Address lineAddr = line_address(addr);
> DataBlock data;
> PhysAddress paddr(addr);
> DataBlock* dataPtr = &data;
>
> MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
> int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
>
> if (Protocol::m_TwoLevelCache) {
> if(Protocol::m_CMP){
> assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
> }
> else{
> assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
> }
> }
>
> if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
> n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
> found = true;
> } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
> n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
> found = true;
> } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
> n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
> found = true;
> // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
> // ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
> // L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
>
> // int offset = addr.getOffset();
> // for(int i=0; i<size_in_bytes; ++i){
> // value[i] = tbeEntry.getDataBlk().getByte(offset + i);
> // }
>
> // found = true;
> } else {
> // Address not found
> //cout << " " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
> n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
> int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
> for(unsigned int i=0; i<size_in_bytes; ++i){
> int offset = addr.getOffset();
> value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
> }
> // Address not found
> //WARN_MSG("Couldn't find address");
> //WARN_EXPR(addr);
> found = false;
> }
> return true;
958c521,574
< return false; // Do nothing?
---
> // idea here is that coherent cache should find the
> // latest data, the update it
> bool found = false;
> const Address lineAddr = line_address(addr);
> PhysAddress paddr(addr);
> DataBlock data;
> DataBlock* dataPtr = &data;
> Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
>
> MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
> int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
>
> assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
> assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
> if (Protocol::m_TwoLevelCache) {
> if(Protocol::m_CMP){
> assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
> }
> else{
> assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
> }
> }
>
> if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
> n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
> found = true;
> } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
> n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
> found = true;
> } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
> n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
> found = true;
> } else {
> // Address not found
> n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
> int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
> for(unsigned int i=0; i<size_in_bytes; ++i){
> int offset = addr.getOffset();
> n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
> }
> found = false;
> }
>
> if (found){
> found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
> assert(found);
> if(value[0] != test_buffer[0]){
> WARN_EXPR((int) value[0]);
> WARN_EXPR((int) test_buffer[0]);
> ERROR_MSG("setRubyMemoryValue failed to set value.");
> }
> }
>
> return true;
959a576,577
> */
> /*
960a579,638
> void
> Sequencer::rubyMemAccess(const uint64 paddr, char* data, const int len, const AccessType type)
> {
> if ( type == AccessType_Read || type == AccessType_Write ) {
> // need to break up the packet data
> uint64 guest_ptr = paddr;
> Vector<DataBlock*> datablocks;
> while (paddr + len != guest_ptr) {
> Address addr(guest_ptr);
> Address line_addr = line_address(addr);
>
> int bytes_copied;
> if (addr.getOffset() == 0) {
> bytes_copied = (guest_ptr + RubyConfig::dataBlockBytes() > paddr + len)?
> (paddr + len - guest_ptr):
> RubyConfig::dataBlockBytes();
> } else {
> bytes_copied = RubyConfig::dataBlockBytes() - addr.getOffset();
> if (guest_ptr + bytes_copied > paddr + len)
> bytes_copied = paddr + len - guest_ptr;
> }
>
> // first we need to find all data blocks that have to be updated for a write
> // and the highest block for a read
> for(int i=0;i<RubyConfig::numberOfProcessors();i++) {
> if (Protocol::m_TwoLevelCache){
> if(m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->isTagPresent(line_address(addr)))
> datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
> if(m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->isTagPresent(line_address(addr)))
> datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
> } else {
> if(m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->isTagPresent(line_address(addr)))
> datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->lookup(line_addr).getDataBlk());
> }
> }
> if (Protocol::m_TwoLevelCache){
> int l2_bank = map_L2ChipId_to_L2Cache(addr, 0).num; // TODO: ONLY WORKS WITH CMP!!!
> if (m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->isTagPresent(line_address(Address(paddr)))) {
> datablocks.insertAtBottom(&m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->lookup(addr).getDataBlk());
> }
> }
> assert(dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec.size() > map_Address_to_DirectoryNode(addr));
> DirectoryMemory* dir = dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec[map_Address_to_DirectoryNode(addr)];
> Directory_Entry& entry = dir->lookup(line_addr);
> datablocks.insertAtBottom(&entry.getDataBlk());
>
> if (pkt->isRead()){
> datablocks[0]->copyData(pkt_data, addr.getOffset(), bytes_copied);
> } else {// pkt->isWrite() {
> for (int i=0;i<datablocks.size();i++)
> datablocks[i]->setData(pkt_data, addr.getOffset(), bytes_copied);
> }
>
> guest_ptr += bytes_copied;
> pkt_data += bytes_copied;
> datablocks.clear();
> }
> }
>
> */