Deleted Added
sdiff udiff text old ( 6856:f3caa1cd1d9a ) new ( 6859:5de565c4b7bd )
full compact
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/libruby.hh"
31#include "mem/ruby/common/Global.hh"
32#include "mem/ruby/system/Sequencer.hh"
33#include "mem/ruby/system/System.hh"
34#include "mem/protocol/Protocol.hh"
35#include "mem/ruby/profiler/Profiler.hh"
36#include "mem/ruby/system/CacheMemory.hh"
37#include "mem/protocol/CacheMsg.hh"
38#include "mem/ruby/recorder/Tracer.hh"
39#include "mem/ruby/common/SubBlock.hh"
40#include "mem/protocol/Protocol.hh"
41#include "mem/gems_common/Map.hh"
42#include "mem/ruby/buffers/MessageBuffer.hh"
43#include "mem/ruby/slicc_interface/AbstractController.hh"
44
45//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
46
47#define LLSC_FAIL -2
48long int already = 0;
49Sequencer::Sequencer(const string & name)
50 :RubyPort(name)
51{
52}
53
54void Sequencer::init(const vector<string> & argv)
55{
56 m_deadlock_check_scheduled = false;
57 m_outstanding_count = 0;
58
59 m_max_outstanding_requests = 0;
60 m_deadlock_threshold = 0;
61 m_version = -1;
62 m_instCache_ptr = NULL;
63 m_dataCache_ptr = NULL;
64 m_controller = NULL;
65 m_atomic_reads = 0;
66 m_atomic_writes = 0;
67 for (size_t i=0; i<argv.size(); i+=2) {
68 if ( argv[i] == "controller") {
69 m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
70 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
71 } else if ( argv[i] == "icache")
72 m_instCache_ptr = RubySystem::getCache(argv[i+1]);
73 else if ( argv[i] == "dcache")
74 m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
75 else if ( argv[i] == "version")
76 m_version = atoi(argv[i+1].c_str());
77 else if ( argv[i] == "max_outstanding_requests")
78 m_max_outstanding_requests = atoi(argv[i+1].c_str());
79 else if ( argv[i] == "deadlock_threshold")
80 m_deadlock_threshold = atoi(argv[i+1].c_str());
81 else {
82 cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
83 assert(false);
84 }
85 }
86 assert(m_max_outstanding_requests > 0);
87 assert(m_deadlock_threshold > 0);
88 assert(m_version > -1);
89 assert(m_instCache_ptr != NULL);
90 assert(m_dataCache_ptr != NULL);
91 assert(m_controller != NULL);
92}
93
94Sequencer::~Sequencer() {
95
96}
97
98void Sequencer::wakeup() {
99 // Check for deadlock of any of the requests
100 Time current_time = g_eventQueue_ptr->getTime();
101
102 // Check across all outstanding requests
103 int total_outstanding = 0;
104
105 Vector<Address> keys = m_readRequestTable.keys();
106 for (int i=0; i<keys.size(); i++) {
107 SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
108 if (current_time - request->issue_time >= m_deadlock_threshold) {
109 WARN_MSG("Possible Deadlock detected");
110 WARN_EXPR(request);
111 WARN_EXPR(m_version);
112 WARN_EXPR(request->ruby_request.paddr);
113 WARN_EXPR(keys.size());
114 WARN_EXPR(current_time);
115 WARN_EXPR(request->issue_time);
116 WARN_EXPR(current_time - request->issue_time);
117 ERROR_MSG("Aborting");
118 }
119 }
120
121 keys = m_writeRequestTable.keys();
122 for (int i=0; i<keys.size(); i++) {
123 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
124 if (current_time - request->issue_time >= m_deadlock_threshold) {
125 WARN_MSG("Possible Deadlock detected");
126 WARN_EXPR(request);
127 WARN_EXPR(m_version);
128 WARN_EXPR(current_time);
129 WARN_EXPR(request->issue_time);
130 WARN_EXPR(current_time - request->issue_time);
131 WARN_EXPR(keys.size());
132 ERROR_MSG("Aborting");
133 }
134 }
135 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
136
137 assert(m_outstanding_count == total_outstanding);
138
139 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
140 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
141 } else {
142 m_deadlock_check_scheduled = false;
143 }
144}
145
146void Sequencer::printProgress(ostream& out) const{
147 /*
148 int total_demand = 0;
149 out << "Sequencer Stats Version " << m_version << endl;
150 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
151 out << "---------------" << endl;
152 out << "outstanding requests" << endl;
153
154 Vector<Address> rkeys = m_readRequestTable.keys();
155 int read_size = rkeys.size();
156 out << "proc " << m_version << " Read Requests = " << read_size << endl;
157 // print the request table
158 for(int i=0; i < read_size; ++i){
159 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
160 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
161 total_demand++;
162 }
163
164 Vector<Address> wkeys = m_writeRequestTable.keys();
165 int write_size = wkeys.size();
166 out << "proc " << m_version << " Write Requests = " << write_size << endl;
167 // print the request table
168 for(int i=0; i < write_size; ++i){
169 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
170 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
171 if( request.getPrefetch() == PrefetchBit_No ){
172 total_demand++;
173 }
174 }
175
176 out << endl;
177
178 out << "Total Number Outstanding: " << m_outstanding_count << endl;
179 out << "Total Number Demand : " << total_demand << endl;
180 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
181 out << endl;
182 out << endl;
183 */
184}
185
186void Sequencer::printConfig(ostream& out) const {
187 out << "Seqeuncer config: " << m_name << endl;
188 out << " controller: " << m_controller->getName() << endl;
189 out << " version: " << m_version << endl;
190 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
191 out << " deadlock_threshold: " << m_deadlock_threshold << endl;
192}
193
194// Insert the request on the correct request table. Return true if
195// the entry was already present.
196bool Sequencer::insertRequest(SequencerRequest* request) {
197 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
198
199 assert(m_outstanding_count == total_outstanding);
200
201 // See if we should schedule a deadlock check
202 if (m_deadlock_check_scheduled == false) {
203 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
204 m_deadlock_check_scheduled = true;
205 }
206
207 Address line_addr(request->ruby_request.paddr);
208 line_addr.makeLineAddress();
209 if ((request->ruby_request.type == RubyRequestType_ST) ||
210 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
211 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
212 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
213 (request->ruby_request.type == RubyRequestType_Locked_Write)) {
214 if (m_writeRequestTable.exist(line_addr)) {
215 m_writeRequestTable.lookup(line_addr) = request;
216 // return true;
217 assert(0); // drh5: isn't this an error? do you lose the initial request?
218 }
219 m_writeRequestTable.allocate(line_addr);
220 m_writeRequestTable.lookup(line_addr) = request;
221 m_outstanding_count++;
222 } else {
223 if (m_readRequestTable.exist(line_addr)) {
224 m_readRequestTable.lookup(line_addr) = request;
225 // return true;
226 assert(0); // drh5: isn't this an error? do you lose the initial request?
227 }
228 m_readRequestTable.allocate(line_addr);
229 m_readRequestTable.lookup(line_addr) = request;
230 m_outstanding_count++;
231 }
232
233 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
234
235 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
236 assert(m_outstanding_count == total_outstanding);
237
238 return false;
239}
240
241void Sequencer::removeRequest(SequencerRequest* srequest) {
242
243 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
244
245 const RubyRequest & ruby_request = srequest->ruby_request;
246 Address line_addr(ruby_request.paddr);
247 line_addr.makeLineAddress();
248 if ((ruby_request.type == RubyRequestType_ST) ||
249 (ruby_request.type == RubyRequestType_RMW_Read) ||
250 (ruby_request.type == RubyRequestType_RMW_Write) ||
251 (ruby_request.type == RubyRequestType_Locked_Read) ||
252 (ruby_request.type == RubyRequestType_Locked_Write)) {
253 m_writeRequestTable.deallocate(line_addr);
254 } else {
255 m_readRequestTable.deallocate(line_addr);
256 }
257 m_outstanding_count--;
258
259 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
260}
261
262void Sequencer::writeCallback(const Address& address, DataBlock& data) {
263
264 assert(address == line_address(address));
265 assert(m_writeRequestTable.exist(line_address(address)));
266
267 SequencerRequest* request = m_writeRequestTable.lookup(address);
268
269 removeRequest(request);
270
271 assert((request->ruby_request.type == RubyRequestType_ST) ||
272 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
273 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
274 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
275 (request->ruby_request.type == RubyRequestType_Locked_Write));
276 // POLINA: the assumption is that atomics are only on data cache and not instruction cache
277 if (request->ruby_request.type == RubyRequestType_Locked_Read) {
278 m_dataCache_ptr->setLocked(address, m_version);
279 }
280 else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
281 m_controller->set_atomic(address);
282 }
283 else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
284 m_controller->clear_atomic(address);
285 }
286
287 hitCallback(request, data);
288}
289
290void Sequencer::readCallback(const Address& address, DataBlock& data) {
291
292 assert(address == line_address(address));
293 assert(m_readRequestTable.exist(line_address(address)));
294
295 SequencerRequest* request = m_readRequestTable.lookup(address);
296 removeRequest(request);
297
298 assert((request->ruby_request.type == RubyRequestType_LD) ||
299 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
300 (request->ruby_request.type == RubyRequestType_IFETCH));
301
302 hitCallback(request, data);
303}
304
305void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
306 const RubyRequest & ruby_request = srequest->ruby_request;
307 Address request_address(ruby_request.paddr);
308 Address request_line_address(ruby_request.paddr);
309 request_line_address.makeLineAddress();
310 RubyRequestType type = ruby_request.type;
311 Time issued_time = srequest->issue_time;
312
313 // Set this cache entry to the most recently used
314 if (type == RubyRequestType_IFETCH) {
315 if (m_instCache_ptr->isTagPresent(request_line_address) )
316 m_instCache_ptr->setMRU(request_line_address);
317 } else {
318 if (m_dataCache_ptr->isTagPresent(request_line_address) )
319 m_dataCache_ptr->setMRU(request_line_address);
320 }
321
322 assert(g_eventQueue_ptr->getTime() >= issued_time);
323 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
324
325 // Profile the miss latency for all non-zero demand misses
326 if (miss_latency != 0) {
327 g_system_ptr->getProfiler()->missLatency(miss_latency, type);
328
329 if (Debug::getProtocolTrace()) {
330 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
331 "", "Done", "", int_to_string(miss_latency)+" cycles");
332 }
333 }
334 /*
335 if (request.getPrefetch() == PrefetchBit_Yes) {
336 return; // Ignore the prefetch
337 }
338 */
339
340 // update the data
341 if (ruby_request.data != NULL) {
342 if ((type == RubyRequestType_LD) ||
343 (type == RubyRequestType_IFETCH) ||
344 (type == RubyRequestType_RMW_Read)) {
345 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
346 } else {
347 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
348 }
349 }
350
351 m_hit_callback(srequest->id);
352 delete srequest;
353}
354
355// Returns true if the sequencer already has a load or store outstanding
356int Sequencer::isReady(const RubyRequest& request) {
357 if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
358 m_readRequestTable.exist(line_address(Address(request.paddr))) ){
359 return LIBRUBY_ALIASED_REQUEST;
360 }
361
362 if (m_outstanding_count >= m_max_outstanding_requests) {
363 return LIBRUBY_BUFFER_FULL;
364 }
365
366 return 1;
367}
368
369bool Sequencer::empty() const {
370 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
371}
372
373
374int64_t Sequencer::makeRequest(const RubyRequest & request)
375{
376 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
377 int ready = isReady(request);
378 if (ready > 0) {
379 int64_t id = makeUniqueRequestID();
380 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
381 bool found = insertRequest(srequest);
382 if (!found) {
383 if (request.type == RubyRequestType_Locked_Write) {
384 // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
385 // ensuring that nothing comes between checking the flag and servicing the store
386 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
387 return LLSC_FAIL;
388 }
389 else {
390 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
391 }
392 }
393 issueRequest(request);
394
395 // TODO: issue hardware prefetches here
396 return id;
397 }
398 else {
399 assert(0);
400 }
401 }
402 else {
403 return ready;
404 }
405}
406
407void Sequencer::issueRequest(const RubyRequest& request) {
408
409 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
410 CacheRequestType ctype;
411 switch(request.type) {
412 case RubyRequestType_IFETCH:
413 if (m_atomic_reads > 0 && m_atomic_writes == 0) {
414 m_controller->reset_atomics();
415 m_atomic_writes = 0;
416 m_atomic_reads = 0;
417 }
418 else if (m_atomic_writes > 0) {
419 assert(m_atomic_reads > m_atomic_writes);
420 cerr << "WARNING: Expected: " << m_atomic_reads << " RMW_Writes, but only received: " << m_atomic_writes << endl;
421 assert(false);
422 }
423 ctype = CacheRequestType_IFETCH;
424 break;
425 case RubyRequestType_LD:
426 if (m_atomic_reads > 0 && m_atomic_writes == 0) {
427 m_controller->reset_atomics();
428 m_atomic_writes = 0;
429 m_atomic_reads = 0;
430 }
431 else if (m_atomic_writes > 0) {
432 assert(m_atomic_reads > m_atomic_writes);
433 cerr << "WARNING: Expected: " << m_atomic_reads << " RMW_Writes, but only received: " << m_atomic_writes << endl;
434 assert(false);
435 }
436 ctype = CacheRequestType_LD;
437 break;
438 case RubyRequestType_ST:
439 if (m_atomic_reads > 0 && m_atomic_writes == 0) {
440 m_controller->reset_atomics();
441 m_atomic_writes = 0;
442 m_atomic_reads = 0;
443 }
444 else if (m_atomic_writes > 0) {
445 assert(m_atomic_reads > m_atomic_writes);
446 cerr << "WARNING: Expected: " << m_atomic_reads << " RMW_Writes, but only received: " << m_atomic_writes << endl;
447 assert(false);
448 }
449 ctype = CacheRequestType_ST;
450 break;
451 case RubyRequestType_Locked_Read:
452 case RubyRequestType_Locked_Write:
453 ctype = CacheRequestType_ATOMIC;
454 break;
455 case RubyRequestType_RMW_Read:
456 assert(m_atomic_writes == 0);
457 m_atomic_reads++;
458 ctype = CacheRequestType_ATOMIC;
459 break;
460 case RubyRequestType_RMW_Write:
461 assert(m_atomic_reads > 0);
462 assert(m_atomic_writes < m_atomic_reads);
463 m_atomic_writes++;
464 if (m_atomic_reads == m_atomic_writes) {
465 m_atomic_reads = 0;
466 m_atomic_writes = 0;
467 }
468 ctype = CacheRequestType_ATOMIC;
469 break;
470 default:
471 assert(0);
472 }
473 AccessModeType amtype;
474 switch(request.access_mode){
475 case RubyAccessMode_User:
476 amtype = AccessModeType_UserMode;
477 break;
478 case RubyAccessMode_Supervisor:
479 amtype = AccessModeType_SupervisorMode;
480 break;
481 case RubyAccessMode_Device:
482 amtype = AccessModeType_UserMode;
483 break;
484 default:
485 assert(0);
486 }
487 Address line_addr(request.paddr);
488 line_addr.makeLineAddress();
489 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
490
491 if (Debug::getProtocolTrace()) {
492 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
493 "", "Begin", "", RubyRequestType_to_string(request.type));
494 }
495
496 if (g_system_ptr->getTracer()->traceEnabled()) {
497 g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
498 request.type, g_eventQueue_ptr->getTime());
499 }
500
501 Time latency = 0; // initialzed to an null value
502
503 if (request.type == RubyRequestType_IFETCH)
504 latency = m_instCache_ptr->getLatency();
505 else
506 latency = m_dataCache_ptr->getLatency();
507
508 // Send the message to the cache controller
509 assert(latency > 0);
510
511
512 m_mandatory_q_ptr->enqueue(msg, latency);
513}
514/*
515bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
516 AccessModeType access_mode,
517 int size, DataBlock*& data_ptr) {
518 if (type == CacheRequestType_IFETCH) {
519 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
520 } else {
521 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
522 }
523}
524*/
525
526void Sequencer::print(ostream& out) const {
527 out << "[Sequencer: " << m_version
528 << ", outstanding requests: " << m_outstanding_count;
529
530 out << ", read request table: " << m_readRequestTable
531 << ", write request table: " << m_writeRequestTable;
532 out << "]";
533}
534
535// this can be called from setState whenever coherence permissions are upgraded
536// when invoked, coherence violations will be checked for the given block
537void Sequencer::checkCoherence(const Address& addr) {
538#ifdef CHECK_COHERENCE
539 g_system_ptr->checkGlobalCoherenceInvariant(addr);
540#endif
541}
542