Sequencer.cc (6165:2d26c346f1be) Sequencer.cc (6285:ce086eca1ede)
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright

--- 13 unchanged lines hidden (view full) ---

22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright

--- 13 unchanged lines hidden (view full) ---

22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * $Id: Sequencer.C 1.131 2006/11/06 17:41:01-06:00 bobba@gratiano.cs.wisc.edu $
32 *
33 */
34
35#include "mem/ruby/common/Global.hh"
36#include "mem/ruby/system/Sequencer.hh"
37#include "mem/ruby/system/System.hh"
38#include "mem/protocol/Protocol.hh"
39#include "mem/ruby/profiler/Profiler.hh"
40#include "mem/ruby/system/CacheMemory.hh"
30#include "mem/ruby/common/Global.hh"
31#include "mem/ruby/system/Sequencer.hh"
32#include "mem/ruby/system/System.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/ruby/profiler/Profiler.hh"
35#include "mem/ruby/system/CacheMemory.hh"
41#include "mem/ruby/config/RubyConfig.hh"
42//#include "mem/ruby/recorder/Tracer.hh"
43#include "mem/ruby/slicc_interface/AbstractChip.hh"
44#include "mem/protocol/Chip.hh"
45#include "mem/ruby/tester/Tester.hh"
36#include "mem/protocol/CacheMsg.hh"
37#include "mem/ruby/recorder/Tracer.hh"
46#include "mem/ruby/common/SubBlock.hh"
47#include "mem/protocol/Protocol.hh"
48#include "mem/gems_common/Map.hh"
38#include "mem/ruby/common/SubBlock.hh"
39#include "mem/protocol/Protocol.hh"
40#include "mem/gems_common/Map.hh"
49#include "mem/packet.hh"
41#include "mem/ruby/buffers/MessageBuffer.hh"
42#include "mem/ruby/slicc_interface/AbstractController.hh"
50
43
51Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
52 m_chip_ptr = chip_ptr;
53 m_version = version;
44//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
54
45
46Sequencer::Sequencer(const string & name)
47 :RubyPort(name)
48{
49}
50
51void Sequencer::init(const vector<string> & argv)
52{
55 m_deadlock_check_scheduled = false;
56 m_outstanding_count = 0;
57
53 m_deadlock_check_scheduled = false;
54 m_outstanding_count = 0;
55
58 int smt_threads = RubyConfig::numberofSMTThreads();
59 m_writeRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
60 m_readRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
61
62 m_packetTable_ptr = new Map<Address, Packet*>;
63
64 for(int p=0; p < smt_threads; ++p){
65 m_writeRequestTable_ptr[p] = new Map<Address, CacheMsg>;
66 m_readRequestTable_ptr[p] = new Map<Address, CacheMsg>;
56 m_max_outstanding_requests = 0;
57 m_deadlock_threshold = 0;
58 m_version = -1;
59 m_instCache_ptr = NULL;
60 m_dataCache_ptr = NULL;
61 m_controller = NULL;
62 for (size_t i=0; i<argv.size(); i+=2) {
63 if ( argv[i] == "controller") {
64 m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
65 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
66 } else if ( argv[i] == "icache")
67 m_instCache_ptr = RubySystem::getCache(argv[i+1]);
68 else if ( argv[i] == "dcache")
69 m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
70 else if ( argv[i] == "version")
71 m_version = atoi(argv[i+1].c_str());
72 else if ( argv[i] == "max_outstanding_requests")
73 m_max_outstanding_requests = atoi(argv[i+1].c_str());
74 else if ( argv[i] == "deadlock_threshold")
75 m_deadlock_threshold = atoi(argv[i+1].c_str());
76 else {
77 cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
78 assert(false);
79 }
67 }
80 }
68
81 assert(m_max_outstanding_requests > 0);
82 assert(m_deadlock_threshold > 0);
83 assert(m_version > -1);
84 assert(m_instCache_ptr != NULL);
85 assert(m_dataCache_ptr != NULL);
86 assert(m_controller != NULL);
69}
70
71Sequencer::~Sequencer() {
87}
88
89Sequencer::~Sequencer() {
72 int smt_threads = RubyConfig::numberofSMTThreads();
73 for(int i=0; i < smt_threads; ++i){
74 if(m_writeRequestTable_ptr[i]){
75 delete m_writeRequestTable_ptr[i];
76 }
77 if(m_readRequestTable_ptr[i]){
78 delete m_readRequestTable_ptr[i];
79 }
80 }
81 if(m_writeRequestTable_ptr){
82 delete [] m_writeRequestTable_ptr;
83 }
84 if(m_readRequestTable_ptr){
85 delete [] m_readRequestTable_ptr;
86 }
90
87}
88
89void Sequencer::wakeup() {
90 // Check for deadlock of any of the requests
91 Time current_time = g_eventQueue_ptr->getTime();
91}
92
93void Sequencer::wakeup() {
94 // Check for deadlock of any of the requests
95 Time current_time = g_eventQueue_ptr->getTime();
92 bool deadlock = false;
93
94 // Check across all outstanding requests
96
97 // Check across all outstanding requests
95 int smt_threads = RubyConfig::numberofSMTThreads();
96 int total_outstanding = 0;
98 int total_outstanding = 0;
97 for(int p=0; p < smt_threads; ++p){
98 Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
99 for (int i=0; i<keys.size(); i++) {
100 CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
101 if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
102 WARN_MSG("Possible Deadlock detected");
103 WARN_EXPR(request);
104 WARN_EXPR(m_chip_ptr->getID());
105 WARN_EXPR(m_version);
106 WARN_EXPR(keys.size());
107 WARN_EXPR(current_time);
108 WARN_EXPR(request.getTime());
109 WARN_EXPR(current_time - request.getTime());
110 WARN_EXPR(*m_readRequestTable_ptr[p]);
111 ERROR_MSG("Aborting");
112 deadlock = true;
113 }
114 }
115
99
116 keys = m_writeRequestTable_ptr[p]->keys();
117 for (int i=0; i<keys.size(); i++) {
118 CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
119 if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
120 WARN_MSG("Possible Deadlock detected");
121 WARN_EXPR(request);
122 WARN_EXPR(m_chip_ptr->getID());
123 WARN_EXPR(m_version);
124 WARN_EXPR(current_time);
125 WARN_EXPR(request.getTime());
126 WARN_EXPR(current_time - request.getTime());
127 WARN_EXPR(keys.size());
128 WARN_EXPR(*m_writeRequestTable_ptr[p]);
129 ERROR_MSG("Aborting");
130 deadlock = true;
131 }
100 Vector<Address> keys = m_readRequestTable.keys();
101 for (int i=0; i102 SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
103 if (current_time - request->issue_time >= m_deadlock_threshold) {
104 WARN_MSG("Possible Deadlock detected");
105 WARN_EXPR(request);
106 WARN_EXPR(m_version);
107 WARN_EXPR(keys.size());
108 WARN_EXPR(current_time);
109 WARN_EXPR(request->issue_time);
110 WARN_EXPR(current_time - request->issue_time);
111 ERROR_MSG("Aborting");
132 }
112 }
133 total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
134 } // across all request tables
135 assert(m_outstanding_count == total_outstanding);
136
137 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
138 g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
139 } else {
140 m_deadlock_check_scheduled = false;
141 }
113 }
142}
143
114
144//returns the total number of requests
145int Sequencer::getNumberOutstanding(){
146 return m_outstanding_count;
147}
148
149// returns the total number of demand requests
150int Sequencer::getNumberOutstandingDemand(){
151 int smt_threads = RubyConfig::numberofSMTThreads();
152 int total_demand = 0;
153 for(int p=0; p < smt_threads; ++p){
154 Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
155 for (int i=0; i< keys.size(); i++) {
156 CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
157 if(request.getPrefetch() == PrefetchBit_No){
158 total_demand++;
159 }
115 keys = m_writeRequestTable.keys();
116 for (int i=0; i<keys.size(); i++) {
117 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
118 if (current_time - request->issue_time >= m_deadlock_threshold) {
119 WARN_MSG("Possible Deadlock detected");
120 WARN_EXPR(request);
121 WARN_EXPR(m_version);
122 WARN_EXPR(current_time);
123 WARN_EXPR(request->issue_time);
124 WARN_EXPR(current_time - request->issue_time);
125 WARN_EXPR(keys.size());
126 ERROR_MSG("Aborting");
160 }
127 }
161
162 keys = m_writeRequestTable_ptr[p]->keys();
163 for (int i=0; i< keys.size(); i++) {
164 CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
165 if(request.getPrefetch() == PrefetchBit_No){
166 total_demand++;
167 }
168 }
169 }
128 }
129 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
170
130
171 return total_demand;
172}
131 assert(m_outstanding_count == total_outstanding);
173
132
174int Sequencer::getNumberOutstandingPrefetch(){
175 int smt_threads = RubyConfig::numberofSMTThreads();
176 int total_prefetch = 0;
177 for(int p=0; p < smt_threads; ++p){
178 Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
179 for (int i=0; i< keys.size(); i++) {
180 CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
181 if(request.getPrefetch() == PrefetchBit_Yes){
182 total_prefetch++;
183 }
184 }
185
186 keys = m_writeRequestTable_ptr[p]->keys();
187 for (int i=0; i< keys.size(); i++) {
188 CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
189 if(request.getPrefetch() == PrefetchBit_Yes){
190 total_prefetch++;
191 }
192 }
193 }
194
195 return total_prefetch;
196}
197
198bool Sequencer::isPrefetchRequest(const Address & lineaddr){
199 int smt_threads = RubyConfig::numberofSMTThreads();
200 for(int p=0; p < smt_threads; ++p){
201 // check load requests
202 Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
203 for (int i=0; i< keys.size(); i++) {
204 CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
205 if(line_address(request.getAddress()) == lineaddr){
206 if(request.getPrefetch() == PrefetchBit_Yes){
207 return true;
208 }
209 else{
210 return false;
211 }
212 }
213 }
214
215 // check store requests
216 keys = m_writeRequestTable_ptr[p]->keys();
217 for (int i=0; i< keys.size(); i++) {
218 CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
219 if(line_address(request.getAddress()) == lineaddr){
220 if(request.getPrefetch() == PrefetchBit_Yes){
221 return true;
222 }
223 else{
224 return false;
225 }
226 }
227 }
228 }
229 // we should've found a matching request
230 cout << "isRequestPrefetch() ERROR request NOT FOUND : " << lineaddr << endl;
231 printProgress(cout);
232 assert(0);
233}
234
235AccessModeType Sequencer::getAccessModeOfRequest(Address addr, int thread){
236 if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
237 CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
238 return request.getAccessMode();
239 } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
240 CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
241 return request.getAccessMode();
133 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
134 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
242 } else {
135 } else {
243 printProgress(cout);
244 ERROR_MSG("Request not found in RequestTables");
136 m_deadlock_check_scheduled = false;
245 }
246}
247
137 }
138}
139
248Address Sequencer::getLogicalAddressOfRequest(Address addr, int thread){
249 assert(thread >= 0);
250 if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
251 CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
252 return request.getLogicalAddress();
253 } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
254 CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
255 return request.getLogicalAddress();
256 } else {
257 printProgress(cout);
258 WARN_MSG("Request not found in RequestTables");
259 WARN_MSG(addr);
260 WARN_MSG(thread);
261 ASSERT(0);
262 }
263}
264
265// returns the ThreadID of the request
266int Sequencer::getRequestThreadID(const Address & addr){
267 int smt_threads = RubyConfig::numberofSMTThreads();
268 int thread = -1;
269 int num_found = 0;
270 for(int p=0; p < smt_threads; ++p){
271 if(m_readRequestTable_ptr[p]->exist(addr)){
272 num_found++;
273 thread = p;
274 }
275 if(m_writeRequestTable_ptr[p]->exist(addr)){
276 num_found++;
277 thread = p;
278 }
279 }
280 if(num_found != 1){
281 cout << "getRequestThreadID ERROR too many matching requests addr = " << addr << endl;
282 printProgress(cout);
283 }
284 ASSERT(num_found == 1);
285 ASSERT(thread != -1);
286
287 return thread;
288}
289
290// given a line address, return the request's physical address
291Address Sequencer::getRequestPhysicalAddress(const Address & lineaddr){
292 int smt_threads = RubyConfig::numberofSMTThreads();
293 Address physaddr;
294 int num_found = 0;
295 for(int p=0; p < smt_threads; ++p){
296 if(m_readRequestTable_ptr[p]->exist(lineaddr)){
297 num_found++;
298 physaddr = (m_readRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
299 }
300 if(m_writeRequestTable_ptr[p]->exist(lineaddr)){
301 num_found++;
302 physaddr = (m_writeRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
303 }
304 }
305 if(num_found != 1){
306 cout << "getRequestPhysicalAddress ERROR too many matching requests addr = " << lineaddr << endl;
307 printProgress(cout);
308 }
309 ASSERT(num_found == 1);
310
311 return physaddr;
312}
313
314void Sequencer::printProgress(ostream& out) const{
140void Sequencer::printProgress(ostream& out) const{
315
141 /*
316 int total_demand = 0;
317 out << "Sequencer Stats Version " << m_version << endl;
318 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
319 out << "---------------" << endl;
320 out << "outstanding requests" << endl;
321
142 int total_demand = 0;
143 out << "Sequencer Stats Version " << m_version << endl;
144 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
145 out << "---------------" << endl;
146 out << "outstanding requests" << endl;
147
322 int smt_threads = RubyConfig::numberofSMTThreads();
323 for(int p=0; p < smt_threads; ++p){
324 Vector<Address> rkeys = m_readRequestTable_ptr[p]->keys();
325 int read_size = rkeys.size();
326 out << "proc " << m_chip_ptr->getID() << " thread " << p << " Read Requests = " << read_size << endl;
327 // print the request table
328 for(int i=0; i < read_size; ++i){
329 CacheMsg & request = m_readRequestTable_ptr[p]->lookup(rkeys[i]);
330 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << rkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
331 if( request.getPrefetch() == PrefetchBit_No ){
332 total_demand++;
333 }
334 }
148 Vector<Address> rkeys = m_readRequestTable.keys();
149 int read_size = rkeys.size();
150 out << "proc " << m_version << " Read Requests = " << read_size << endl;
151 // print the request table
152 for(int i=0; i < read_size; ++i){
153 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
154 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
155 total_demand++;
156 }
335
157
336 Vector<Address> wkeys = m_writeRequestTable_ptr[p]->keys();
337 int write_size = wkeys.size();
338 out << "proc " << m_chip_ptr->getID() << " thread " << p << " Write Requests = " << write_size << endl;
339 // print the request table
340 for(int i=0; i < write_size; ++i){
341 CacheMsg & request = m_writeRequestTable_ptr[p]->lookup(wkeys[i]);
158 Vector<Address> wkeys = m_writeRequestTable.keys();
159 int write_size = wkeys.size();
160 out << "proc " << m_version << " Write Requests = " << write_size << endl;
161 // print the request table
162 for(int i=0; i < write_size; ++i){
163 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
342 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
343 if( request.getPrefetch() == PrefetchBit_No ){
344 total_demand++;
345 }
164 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
165 if( request.getPrefetch() == PrefetchBit_No ){
166 total_demand++;
167 }
346 }
347
348 out << endl;
349 }
168 }
169
170 out << endl;
171
350 out << "Total Number Outstanding: " << m_outstanding_count << endl;
351 out << "Total Number Demand : " << total_demand << endl;
352 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
353 out << endl;
354 out << endl;
172 out << "Total Number Outstanding: " << m_outstanding_count << endl;
173 out << "Total Number Demand : " << total_demand << endl;
174 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
175 out << endl;
176 out << endl;
355
177 */
356}
357
178}
179
358void Sequencer::printConfig(ostream& out) {
359 if (TSO) {
360 out << "sequencer: Sequencer - TSO" << endl;
361 } else {
362 out << "sequencer: Sequencer - SC" << endl;
363 }
364 out << " max_outstanding_requests: " << g_SEQUENCER_OUTSTANDING_REQUESTS << endl;
180void Sequencer::printConfig(ostream& out) const {
181 out << "Seqeuncer config: " << m_name << endl;
182 out << " controller: " << m_controller->getName() << endl;
183 out << " version: " << m_version << endl;
184 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
185 out << " deadlock_threshold: " << m_deadlock_threshold << endl;
365}
366
186}
187
367bool Sequencer::empty() const {
368 return m_outstanding_count == 0;
369}
370
371// Insert the request on the correct request table. Return true if
372// the entry was already present.
188// Insert the request on the correct request table. Return true if
189// the entry was already present.
373bool Sequencer::insertRequest(const CacheMsg& request) {
374 int thread = request.getThreadID();
375 assert(thread >= 0);
376 int total_outstanding = 0;
377 int smt_threads = RubyConfig::numberofSMTThreads();
378 for(int p=0; p < smt_threads; ++p){
379 total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
380 }
190bool Sequencer::insertRequest(SequencerRequest* request) {
191 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
192
381 assert(m_outstanding_count == total_outstanding);
382
383 // See if we should schedule a deadlock check
384 if (m_deadlock_check_scheduled == false) {
193 assert(m_outstanding_count == total_outstanding);
194
195 // See if we should schedule a deadlock check
196 if (m_deadlock_check_scheduled == false) {
385 g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
197 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
386 m_deadlock_check_scheduled = true;
387 }
388
198 m_deadlock_check_scheduled = true;
199 }
200
389 if ((request.getType() == CacheRequestType_ST) ||
390 (request.getType() == CacheRequestType_ATOMIC)) {
391 if (m_writeRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
392 m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
393 return true;
201 Address line_addr(request->ruby_request.paddr);
202 line_addr.makeLineAddress();
203 if ((request->ruby_request.type == RubyRequestType_ST) ||
204 (request->ruby_request.type == RubyRequestType_RMW)) {
205 if (m_writeRequestTable.exist(line_addr)) {
206 m_writeRequestTable.lookup(line_addr) = request;
207 // return true;
208 assert(0); // drh5: isn't this an error? do you lose the initial request?
394 }
209 }
395 m_writeRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
396 m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
210 m_writeRequestTable.allocate(line_addr);
211 m_writeRequestTable.lookup(line_addr) = request;
397 m_outstanding_count++;
398 } else {
212 m_outstanding_count++;
213 } else {
399 if (m_readRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
400 m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
401 return true;
214 if (m_readRequestTable.exist(line_addr)) {
215 m_readRequestTable.lookup(line_addr) = request;
216 // return true;
217 assert(0); // drh5: isn't this an error? do you lose the initial request?
402 }
218 }
403 m_readRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
404 m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
219 m_readRequestTable.allocate(line_addr);
220 m_readRequestTable.lookup(line_addr) = request;
405 m_outstanding_count++;
406 }
407
408 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
409
221 m_outstanding_count++;
222 }
223
224 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
225
410 total_outstanding = 0;
411 for(int p=0; p < smt_threads; ++p){
412 total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
413 }
414
226 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
415 assert(m_outstanding_count == total_outstanding);
227 assert(m_outstanding_count == total_outstanding);
228
416 return false;
417}
418
229 return false;
230}
231
419void Sequencer::removeRequest(const CacheMsg& request) {
420 int thread = request.getThreadID();
421 assert(thread >= 0);
422 int total_outstanding = 0;
423 int smt_threads = RubyConfig::numberofSMTThreads();
424 for(int p=0; p < smt_threads; ++p){
425 total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
426 }
427 assert(m_outstanding_count == total_outstanding);
232void Sequencer::removeRequest(SequencerRequest* srequest) {
428
233
429 if ((request.getType() == CacheRequestType_ST) ||
430 (request.getType() == CacheRequestType_ATOMIC)) {
431 m_writeRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
234 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
235
236 const RubyRequest & ruby_request = srequest->ruby_request;
237 Address line_addr(ruby_request.paddr);
238 line_addr.makeLineAddress();
239 if ((ruby_request.type == RubyRequestType_ST) ||
240 (ruby_request.type == RubyRequestType_RMW)) {
241 m_writeRequestTable.deallocate(line_addr);
432 } else {
242 } else {
433 m_readRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
243 m_readRequestTable.deallocate(line_addr);
434 }
435 m_outstanding_count--;
436
244 }
245 m_outstanding_count--;
246
437 total_outstanding = 0;
438 for(int p=0; p < smt_threads; ++p){
439 total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
440 }
441 assert(m_outstanding_count == total_outstanding);
247 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
442}
443
248}
249
444void Sequencer::writeCallback(const Address& address) {
445 DataBlock data;
446 writeCallback(address, data);
447}
448
449void Sequencer::writeCallback(const Address& address, DataBlock& data) {
250void Sequencer::writeCallback(const Address& address, DataBlock& data) {
450 // process oldest thread first
451 int thread = -1;
452 Time oldest_time = 0;
453 int smt_threads = RubyConfig::numberofSMTThreads();
454 for(int t=0; t < smt_threads; ++t){
455 if(m_writeRequestTable_ptr[t]->exist(address)){
456 CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address);
457 if(thread == -1 || (request.getTime() < oldest_time) ){
458 thread = t;
459 oldest_time = request.getTime();
460 }
461 }
462 }
463 // make sure we found an oldest thread
464 ASSERT(thread != -1);
465
251
466 CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address);
467
468 writeCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
469}
470
471void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
472
473 assert(address == line_address(address));
252 assert(address == line_address(address));
474 assert(thread >= 0);
475 assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
253 assert(m_writeRequestTable.exist(line_address(address)));
476
254
477 writeCallback(address, data, respondingMach, thread);
478
479}
480
481void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
482 assert(address == line_address(address));
483 assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
484 CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
485 assert( request.getThreadID() == thread);
255 SequencerRequest* request = m_writeRequestTable.lookup(address);
486 removeRequest(request);
487
256 removeRequest(request);
257
488 assert((request.getType() == CacheRequestType_ST) ||
489 (request.getType() == CacheRequestType_ATOMIC));
258 assert((request->ruby_request.type == RubyRequestType_ST) ||
259 (request->ruby_request.type == RubyRequestType_RMW));
490
260
491 hitCallback(request, data, respondingMach, thread);
492
261 hitCallback(request, data);
493}
494
262}
263
495void Sequencer::readCallback(const Address& address) {
496 DataBlock data;
497 readCallback(address, data);
498}
499
500void Sequencer::readCallback(const Address& address, DataBlock& data) {
264void Sequencer::readCallback(const Address& address, DataBlock& data) {
501 // process oldest thread first
502 int thread = -1;
503 Time oldest_time = 0;
504 int smt_threads = RubyConfig::numberofSMTThreads();
505 for(int t=0; t < smt_threads; ++t){
506 if(m_readRequestTable_ptr[t]->exist(address)){
507 CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address);
508 if(thread == -1 || (request.getTime() < oldest_time) ){
509 thread = t;
510 oldest_time = request.getTime();
511 }
512 }
513 }
514 // make sure we found an oldest thread
515 ASSERT(thread != -1);
516
265
517 CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address);
518
519 readCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
520}
521
522void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
523
524 assert(address == line_address(address));
266 assert(address == line_address(address));
525 assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
267 assert(m_readRequestTable.exist(line_address(address)));
526
268
527 readCallback(address, data, respondingMach, thread);
528}
529
530void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
531 assert(address == line_address(address));
532 assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
533
534 CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
535 assert( request.getThreadID() == thread );
269 SequencerRequest* request = m_readRequestTable.lookup(address);
536 removeRequest(request);
537
270 removeRequest(request);
271
538 assert((request.getType() == CacheRequestType_LD) ||
539 (request.getType() == CacheRequestType_IFETCH)
540 );
272 assert((request->ruby_request.type == RubyRequestType_LD) ||
273 (request->ruby_request.type == RubyRequestType_IFETCH));
541
274
542 hitCallback(request, data, respondingMach, thread);
275 hitCallback(request, data);
543}
544
276}
277
545void Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread) {
546 int size = request.getSize();
547 Address request_address = request.getAddress();
548 Address request_logical_address = request.getLogicalAddress();
549 Address request_line_address = line_address(request_address);
550 CacheRequestType type = request.getType();
551 int threadID = request.getThreadID();
552 Time issued_time = request.getTime();
553 int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID;
278void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
279 const RubyRequest & ruby_request = srequest->ruby_request;
280 int size = ruby_request.len;
281 Address request_address(ruby_request.paddr);
282 Address request_line_address(ruby_request.paddr);
283 request_line_address.makeLineAddress();
284 RubyRequestType type = ruby_request.type;
285 Time issued_time = srequest->issue_time;
554
286
555 DEBUG_MSG(SEQUENCER_COMP, MedPrio, size);
556
557 // Set this cache entry to the most recently used
287 // Set this cache entry to the most recently used
558 if (type == CacheRequestType_IFETCH) {
559 if (Protocol::m_TwoLevelCache) {
560 if (m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
561 m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->setMRU(request_line_address);
562 }
563 }
564 else {
565 if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
566 m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
567 }
568 }
288 if (type == RubyRequestType_IFETCH) {
289 if (m_instCache_ptr->isTagPresent(request_line_address) )
290 m_instCache_ptr->setMRU(request_line_address);
569 } else {
291 } else {
570 if (Protocol::m_TwoLevelCache) {
571 if (m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
572 m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->setMRU(request_line_address);
573 }
574 }
575 else {
576 if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
577 m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
578 }
579 }
292 if (m_dataCache_ptr->isTagPresent(request_line_address) )
293 m_dataCache_ptr->setMRU(request_line_address);
580 }
581
582 assert(g_eventQueue_ptr->getTime() >= issued_time);
583 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
584
294 }
295
296 assert(g_eventQueue_ptr->getTime() >= issued_time);
297 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
298
585 if (PROTOCOL_DEBUG_TRACE) {
586 g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Done", "",
587 int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch()));
588 }
589
590 DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address);
591 DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch());
592 if (request.getPrefetch() == PrefetchBit_Yes) {
593 DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return");
594 g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach);
595 return; // Ignore the software prefetch, don't callback the driver
596 }
597
598 // Profile the miss latency for all non-zero demand misses
599 if (miss_latency != 0) {
299 // Profile the miss latency for all non-zero demand misses
300 if (miss_latency != 0) {
600 g_system_ptr->getProfiler()->missLatency(miss_latency, type, respondingMach);
301 g_system_ptr->getProfiler()->missLatency(miss_latency, type);
601
302
303 if (Debug::getProtocolTrace()) {
304 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
305 "", "Done", "", int_to_string(miss_latency)+" cycles");
306 }
602 }
307 }
308 /*
309 if (request.getPrefetch() == PrefetchBit_Yes) {
310 return; // Ignore the prefetch
311 }
312 */
603
313
604 bool write =
605 (type == CacheRequestType_ST) ||
606 (type == CacheRequestType_ATOMIC);
607
608 if (TSO && write) {
609 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data,
610 m_packetTable_ptr->lookup(request.getAddress()));
611 } else {
612
613 // Copy the correct bytes out of the cache line into the subblock
614 SubBlock subblock(request_address, request_logical_address, size);
615 subblock.mergeFrom(data); // copy the correct bytes from DataBlock in the SubBlock
616
617 // Scan the store buffer to see if there are any outstanding stores we need to collect
618 if (TSO) {
619 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->updateSubBlock(subblock);
314 // update the data
315 if (ruby_request.data != NULL) {
316 if ((type == RubyRequestType_LD) ||
317 (type == RubyRequestType_IFETCH)) {
318 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
319 } else {
320 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
620 }
321 }
621
622 // Call into the Driver and let it read and/or modify the sub-block
623 Packet* pkt = m_packetTable_ptr->lookup(request.getAddress());
624
625 // update data if this is a store/atomic
626
627 /*
628 if (pkt->req->isCondSwap()) {
629 L1Cache_Entry entry = m_L1Cache_vec[m_version]->lookup(Address(pkt->req->physAddr()));
630 DataBlk datablk = entry->getDataBlk();
631 uint8_t *orig_data = datablk.getArray();
632 if ( datablk.equal(pkt->req->getExtraData()) )
633 datablk->setArray(pkt->getData());
634 pkt->setData(orig_data);
635 }
636 */
637
638 g_system_ptr->getDriver()->hitCallback(pkt);
639 m_packetTable_ptr->remove(request.getAddress());
640
641 // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
642 // (This is only triggered for the non-TSO case)
643 if (write) {
644 assert(!TSO);
645 subblock.mergeTo(data); // copy the correct bytes from SubBlock into the DataBlock
646 }
647 }
322 }
648}
649
323
650void Sequencer::printDebug(){
651 //notify driver of debug
652 g_system_ptr->getDriver()->printDebug();
324 m_hit_callback(srequest->id);
325 delete srequest;
653}
654
326}
327
655//dsm: breaks build, delayed
656// Returns true if the sequencer already has a load or store outstanding
328// Returns true if the sequencer already has a load or store outstanding
657bool
658Sequencer::isReady(const Packet* pkt) const
659{
660
661 int cpu_number = pkt->req->contextId();
662 la_t logical_addr = pkt->req->getVaddr();
663 pa_t physical_addr = pkt->req->getPaddr();
664 CacheRequestType type_of_request;
665 if ( pkt->req->isInstFetch() ) {
666 type_of_request = CacheRequestType_IFETCH;
667 } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
668 type_of_request = CacheRequestType_ATOMIC;
669 } else if ( pkt->isRead() ) {
670 type_of_request = CacheRequestType_LD;
671 } else if ( pkt->isWrite() ) {
672 type_of_request = CacheRequestType_ST;
673 } else {
674 assert(false);
329bool Sequencer::isReady(const RubyRequest& request) const {
330 // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
331 // to simulate stalling of the front-end
332 // Do we stall all the sequencers? If it is atomic instruction - yes!
333 if (m_outstanding_count >= m_max_outstanding_requests) {
334 return false;
675 }
335 }
676 int thread = pkt->req->threadId();
677
336
678 CacheMsg request(Address( physical_addr ),
679 Address( physical_addr ),
680 type_of_request,
681 Address(0),
682 AccessModeType_UserMode, // User/supervisor mode
683 0, // Size in bytes of request
684 PrefetchBit_No, // Not a prefetch
685 0, // Version number
686 Address(logical_addr), // Virtual Address
687 thread // SMT thread
688 );
689 return isReady(request);
690}
691
692bool
693Sequencer::isReady(const CacheMsg& request) const
694{
695 if (m_outstanding_count >= g_SEQUENCER_OUTSTANDING_REQUESTS) {
696 //cout << "TOO MANY OUTSTANDING: " << m_outstanding_count << " " << g_SEQUENCER_OUTSTANDING_REQUESTS << " VER " << m_version << endl;
337 if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
338 m_readRequestTable.exist(line_address(Address(request.paddr))) ){
339 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
697 //printProgress(cout);
698 return false;
699 }
700
340 //printProgress(cout);
341 return false;
342 }
343
701 // This code allows reads to be performed even when we have a write
702 // request outstanding for the line
703 bool write =
704 (request.getType() == CacheRequestType_ST) ||
705 (request.getType() == CacheRequestType_ATOMIC);
706
707 // LUKE - disallow more than one request type per address
708 // INVARIANT: at most one request type per address, per processor
709 int smt_threads = RubyConfig::numberofSMTThreads();
710 for(int p=0; p < smt_threads; ++p){
711 if( m_writeRequestTable_ptr[p]->exist(line_address(request.getAddress())) ||
712 m_readRequestTable_ptr[p]->exist(line_address(request.getAddress())) ){
713 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
714 //printProgress(cout);
715 return false;
716 }
717 }
718
719 if (TSO) {
720 return m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady();
721 }
722 return true;
723}
724
344 return true;
345}
346
725//dsm: breaks build, delayed
726// Called by Driver (Simics or Tester).
727void
728Sequencer::makeRequest(Packet* pkt)
347bool Sequencer::empty() const {
348 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
349}
350
351int64_t Sequencer::makeRequest(const RubyRequest & request)
729{
352{
730 int cpu_number = pkt->req->contextId();
731 la_t logical_addr = pkt->req->getVaddr();
732 pa_t physical_addr = pkt->req->getPaddr();
733 int request_size = pkt->getSize();
734 CacheRequestType type_of_request;
735 PrefetchBit prefetch;
736 bool write = false;
737 if ( pkt->req->isInstFetch() ) {
738 type_of_request = CacheRequestType_IFETCH;
739 } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
740 type_of_request = CacheRequestType_ATOMIC;
741 write = true;
742 } else if ( pkt->isRead() ) {
743 type_of_request = CacheRequestType_LD;
744 } else if ( pkt->isWrite() ) {
745 type_of_request = CacheRequestType_ST;
746 write = true;
747 } else {
748 assert(false);
353 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
354 if (isReady(request)) {
355 int64_t id = makeUniqueRequestID();
356 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
357 bool found = insertRequest(srequest);
358 if (!found)
359 issueRequest(request);
360
361 // TODO: issue hardware prefetches here
362 return id;
749 }
363 }
750 if (pkt->req->isPrefetch()) {
751 prefetch = PrefetchBit_Yes;
752 } else {
753 prefetch = PrefetchBit_No;
364 else {
365 return -1;
754 }
366 }
755 la_t virtual_pc = pkt->req->getPC();
756 int isPriv = false; // TODO: get permission data
757 int thread = pkt->req->threadId();
367}
758
368
759 AccessModeType access_mode = AccessModeType_UserMode; // TODO: get actual permission
369void Sequencer::issueRequest(const RubyRequest& request) {
760
370
761 CacheMsg request(Address( physical_addr ),
762 Address( physical_addr ),
763 type_of_request,
764 Address(virtual_pc),
765 access_mode, // User/supervisor mode
766 request_size, // Size in bytes of request
767 prefetch,
768 0, // Version number
769 Address(logical_addr), // Virtual Address
770 thread // SMT thread
771 );
772
773 if ( TSO && write && !pkt->req->isPrefetch() ) {
774 assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady());
775 m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(pkt, request);
776 return;
371 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
372 CacheRequestType ctype;
373 switch(request.type) {
374 case RubyRequestType_IFETCH:
375 ctype = CacheRequestType_IFETCH;
376 break;
377 case RubyRequestType_LD:
378 ctype = CacheRequestType_LD;
379 break;
380 case RubyRequestType_ST:
381 ctype = CacheRequestType_ST;
382 break;
383 case RubyRequestType_RMW:
384 ctype = CacheRequestType_ATOMIC;
385 break;
386 default:
387 assert(0);
777 }
388 }
389 AccessModeType amtype;
390 switch(request.access_mode){
391 case RubyAccessMode_User:
392 amtype = AccessModeType_UserMode;
393 break;
394 case RubyAccessMode_Supervisor:
395 amtype = AccessModeType_SupervisorMode;
396 break;
397 case RubyAccessMode_Device:
398 amtype = AccessModeType_UserMode;
399 break;
400 default:
401 assert(0);
402 }
403 Address line_addr(request.paddr);
404 line_addr.makeLineAddress();
405 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No);
778
406
779 m_packetTable_ptr->insert(Address( physical_addr ), pkt);
780
781 doRequest(request);
782}
783
784bool Sequencer::doRequest(const CacheMsg& request) {
785 bool hit = false;
786 // Check the fast path
787 DataBlock* data_ptr;
788
789 int thread = request.getThreadID();
790
791 hit = tryCacheAccess(line_address(request.getAddress()),
792 request.getType(),
793 request.getProgramCounter(),
794 request.getAccessMode(),
795 request.getSize(),
796 data_ptr);
797
798 if (hit && (request.getType() == CacheRequestType_IFETCH || !REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) ) {
799 DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path hit");
800 hitCallback(request, *data_ptr, GenericMachineType_L1Cache, thread);
801 return true;
407 if (Debug::getProtocolTrace()) {
408 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
409 "", "Begin", "", RubyRequestType_to_string(request.type));
802 }
803
410 }
411
804 if (TSO && (request.getType() == CacheRequestType_LD || request.getType() == CacheRequestType_IFETCH)) {
805
806 // See if we can satisfy the load entirely from the store buffer
807 SubBlock subblock(line_address(request.getAddress()), request.getSize());
808 if (m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->trySubBlock(subblock)) {
809 DataBlock dummy;
810 hitCallback(request, dummy, GenericMachineType_NULL, thread); // Call with an 'empty' datablock, since the data is in the store buffer
811 return true;
812 }
412 if (g_system_ptr->getTracer()->traceEnabled()) {
413 g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
414 request.type, g_eventQueue_ptr->getTime());
813 }
814
415 }
416
815 DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path miss");
816 issueRequest(request);
817 return hit;
818}
417 Time latency = 0; // initialzed to an null value
819
418
820void Sequencer::issueRequest(const CacheMsg& request) {
821 bool found = insertRequest(request);
419 if (request.type == RubyRequestType_IFETCH)
420 latency = m_instCache_ptr->getLatency();
421 else
422 latency = m_dataCache_ptr->getLatency();
822
423
823 if (!found) {
824 CacheMsg msg = request;
825 msg.getAddress() = line_address(request.getAddress()); // Make line address
424 // Send the message to the cache controller
425 assert(latency > 0);
826
426
827 // Fast Path L1 misses are profiled here - all non-fast path misses are profiled within the generated protocol code
828 if (!REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) {
829 g_system_ptr->getProfiler()->addPrimaryStatSample(msg, m_chip_ptr->getID());
830 }
831
427
832 if (PROTOCOL_DEBUG_TRACE) {
833 g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip() + m_version), -1, msg.getAddress(),"", "Begin", "", CacheRequestType_to_string(request.getType()));
834 }
835
836#if 0
837 // Commented out by nate binkert because I removed the trace stuff
838 if (g_system_ptr->getTracer()->traceEnabled()) {
839 g_system_ptr->getTracer()->traceRequest((m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), msg.getAddress(), msg.getProgramCounter(),
840 msg.getType(), g_eventQueue_ptr->getTime());
841 }
842#endif
843
844 Time latency = 0; // initialzed to an null value
845
846 latency = SEQUENCER_TO_CONTROLLER_LATENCY;
847
848 // Send the message to the cache controller
849 assert(latency > 0);
850 m_chip_ptr->m_L1Cache_mandatoryQueue_vec[m_version]->enqueue(msg, latency);
851
852 } // !found
428 m_mandatory_q_ptr->enqueue(msg, latency);
853}
429}
854
430/*
855bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
431bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
856 const Address& pc, AccessModeType access_mode,
432 AccessModeType access_mode,
857 int size, DataBlock*& data_ptr) {
858 if (type == CacheRequestType_IFETCH) {
433 int size, DataBlock*& data_ptr) {
434 if (type == CacheRequestType_IFETCH) {
859 if (Protocol::m_TwoLevelCache) {
860 return m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
861 }
862 else {
863 return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
864 }
435 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
865 } else {
436 } else {
866 if (Protocol::m_TwoLevelCache) {
867 return m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
868 }
869 else {
870 return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
871 }
437 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
872 }
873}
438 }
439}
440*/
874
441
875void Sequencer::resetRequestTime(const Address& addr, int thread){
876 assert(thread >= 0);
877 //reset both load and store requests, if they exist
878 if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
879 CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
880 if( request.m_AccessMode != AccessModeType_UserMode){
881 cout << "resetRequestType ERROR read request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
882 printProgress(cout);
883 }
884 //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
885 request.setTime(g_eventQueue_ptr->getTime());
886 }
887 if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
888 CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
889 if( request.m_AccessMode != AccessModeType_UserMode){
890 cout << "resetRequestType ERROR write request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
891 printProgress(cout);
892 }
893 //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
894 request.setTime(g_eventQueue_ptr->getTime());
895 }
896}
897
898// removes load request from queue
899void Sequencer::removeLoadRequest(const Address & addr, int thread){
900 removeRequest(getReadRequest(addr, thread));
901}
902
903void Sequencer::removeStoreRequest(const Address & addr, int thread){
904 removeRequest(getWriteRequest(addr, thread));
905}
906
907// returns the read CacheMsg
908CacheMsg & Sequencer::getReadRequest( const Address & addr, int thread ){
909 Address temp = addr;
910 assert(thread >= 0);
911 assert(temp == line_address(temp));
912 assert(m_readRequestTable_ptr[thread]->exist(addr));
913 return m_readRequestTable_ptr[thread]->lookup(addr);
914}
915
916CacheMsg & Sequencer::getWriteRequest( const Address & addr, int thread){
917 Address temp = addr;
918 assert(thread >= 0);
919 assert(temp == line_address(temp));
920 assert(m_writeRequestTable_ptr[thread]->exist(addr));
921 return m_writeRequestTable_ptr[thread]->lookup(addr);
922}
923
924void Sequencer::print(ostream& out) const {
442void Sequencer::print(ostream& out) const {
925 out << "[Sequencer: " << m_chip_ptr->getID()
443 out << "[Sequencer: " << m_version
926 << ", outstanding requests: " << m_outstanding_count;
927
444 << ", outstanding requests: " << m_outstanding_count;
445
928 int smt_threads = RubyConfig::numberofSMTThreads();
929 for(int p=0; p < smt_threads; ++p){
930 out << ", read request table[ " << p << " ]: " << *m_readRequestTable_ptr[p]
931 << ", write request table[ " << p << " ]: " << *m_writeRequestTable_ptr[p];
932 }
446 out << ", read request table: " << m_readRequestTable
447 << ", write request table: " << m_writeRequestTable;
933 out << "]";
934}
935
936// this can be called from setState whenever coherence permissions are upgraded
937// when invoked, coherence violations will be checked for the given block
938void Sequencer::checkCoherence(const Address& addr) {
939#ifdef CHECK_COHERENCE
940 g_system_ptr->checkGlobalCoherenceInvariant(addr);
941#endif
942}
943
448 out << "]";
449}
450
451// this can be called from setState whenever coherence permissions are upgraded
452// when invoked, coherence violations will be checked for the given block
453void Sequencer::checkCoherence(const Address& addr) {
454#ifdef CHECK_COHERENCE
455 g_system_ptr->checkGlobalCoherenceInvariant(addr);
456#endif
457}
458
459/*
944bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
460bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
945 unsigned int size_in_bytes ) {
946 for(unsigned int i=0; i < size_in_bytes; i++) {
947 std::cerr << __FILE__ << "(" << __LINE__ << "): Not implemented. " << std::endl;
948 value[i] = 0; // _read_physical_memory( m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version,
949 // addr.getAddress() + i, 1 );
950 }
951 return false; // Do nothing?
461 unsigned int size_in_bytes )
462{
463 bool found = false;
464 const Address lineAddr = line_address(addr);
465 DataBlock data;
466 PhysAddress paddr(addr);
467 DataBlock* dataPtr = &data;
468
469 MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
470 int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
471
472 if (Protocol::m_TwoLevelCache) {
473 if(Protocol::m_CMP){
474 assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
475 }
476 else{
477 assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
478 }
479 }
480
481 if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
482 n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
483 found = true;
484 } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
485 n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
486 found = true;
487 } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
488 n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
489 found = true;
490 // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
491// ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
492// L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
493
494// int offset = addr.getOffset();
495// for(int i=0; i<size_in_bytes; ++i){
496// value[i] = tbeEntry.getDataBlk().getByte(offset + i);
497// }
498
499// found = true;
500 } else {
501 // Address not found
502 //cout << " " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
503 n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
504 int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
505 for(unsigned int i=0; i<size_in_bytes; ++i){
506 int offset = addr.getOffset();
507 value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
508 }
509 // Address not found
510 //WARN_MSG("Couldn't find address");
511 //WARN_EXPR(addr);
512 found = false;
513 }
514 return true;
952}
953
954bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
955 unsigned int size_in_bytes) {
956 char test_buffer[64];
957
515}
516
517bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
518 unsigned int size_in_bytes) {
519 char test_buffer[64];
520
958 return false; // Do nothing?
521 // idea here is that coherent cache should find the
522 // latest data, the update it
523 bool found = false;
524 const Address lineAddr = line_address(addr);
525 PhysAddress paddr(addr);
526 DataBlock data;
527 DataBlock* dataPtr = &data;
528 Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
529
530 MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
531 int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
532
533 assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
534 assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
535 if (Protocol::m_TwoLevelCache) {
536 if(Protocol::m_CMP){
537 assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
538 }
539 else{
540 assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
541 }
542 }
543
544 if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
545 n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
546 found = true;
547 } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
548 n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
549 found = true;
550 } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
551 n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
552 found = true;
553 } else {
554 // Address not found
555 n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
556 int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
557 for(unsigned int i=0; i<size_in_bytes; ++i){
558 int offset = addr.getOffset();
559 n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
560 }
561 found = false;
562 }
563
564 if (found){
565 found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
566 assert(found);
567 if(value[0] != test_buffer[0]){
568 WARN_EXPR((int) value[0]);
569 WARN_EXPR((int) test_buffer[0]);
570 ERROR_MSG("setRubyMemoryValue failed to set value.");
571 }
572 }
573
574 return true;
959}
575}
576*/
577/*
960
578
579void
580Sequencer::rubyMemAccess(const uint64 paddr, char* data, const int len, const AccessType type)
581{
582 if ( type == AccessType_Read || type == AccessType_Write ) {
583 // need to break up the packet data
584 uint64 guest_ptr = paddr;
585 Vector<DataBlock*> datablocks;
586 while (paddr + len != guest_ptr) {
587 Address addr(guest_ptr);
588 Address line_addr = line_address(addr);
589
590 int bytes_copied;
591 if (addr.getOffset() == 0) {
592 bytes_copied = (guest_ptr + RubyConfig::dataBlockBytes() > paddr + len)?
593 (paddr + len - guest_ptr):
594 RubyConfig::dataBlockBytes();
595 } else {
596 bytes_copied = RubyConfig::dataBlockBytes() - addr.getOffset();
597 if (guest_ptr + bytes_copied > paddr + len)
598 bytes_copied = paddr + len - guest_ptr;
599 }
600
601 // first we need to find all data blocks that have to be updated for a write
602 // and the highest block for a read
603 for(int i=0;i<RubyConfig::numberOfProcessors();i++) {
604 if (Protocol::m_TwoLevelCache){
605 if(m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->isTagPresent(line_address(addr)))
606 datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
607 if(m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->isTagPresent(line_address(addr)))
608 datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
609 } else {
610 if(m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->isTagPresent(line_address(addr)))
611 datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->lookup(line_addr).getDataBlk());
612 }
613 }
614 if (Protocol::m_TwoLevelCache){
615 int l2_bank = map_L2ChipId_to_L2Cache(addr, 0).num; // TODO: ONLY WORKS WITH CMP!!!
616 if (m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->isTagPresent(line_address(Address(paddr)))) {
617 datablocks.insertAtBottom(&m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->lookup(addr).getDataBlk());
618 }
619 }
620 assert(dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec.size() > map_Address_to_DirectoryNode(addr));
621 DirectoryMemory* dir = dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec[map_Address_to_DirectoryNode(addr)];
622 Directory_Entry& entry = dir->lookup(line_addr);
623 datablocks.insertAtBottom(&entry.getDataBlk());
624
625 if (pkt->isRead()){
626 datablocks[0]->copyData(pkt_data, addr.getOffset(), bytes_copied);
627 } else {// pkt->isWrite() {
628 for (int i=0;i<datablocks.size();i++)
629 datablocks[i]->setData(pkt_data, addr.getOffset(), bytes_copied);
630 }
631
632 guest_ptr += bytes_copied;
633 pkt_data += bytes_copied;
634 datablocks.clear();
635 }
636}
637
638*/