Deleted Added
sdiff udiff text old ( 6876:a658c315512c ) new ( 6886:3137c3d41107 )
full compact
1
2/*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/ruby/libruby.hh"
31#include "mem/ruby/common/Global.hh"
32#include "mem/ruby/system/Sequencer.hh"
33#include "mem/ruby/system/System.hh"
34#include "mem/protocol/Protocol.hh"
35#include "mem/ruby/profiler/Profiler.hh"
36#include "mem/ruby/system/CacheMemory.hh"
37#include "mem/protocol/CacheMsg.hh"
38#include "mem/ruby/recorder/Tracer.hh"
39#include "mem/ruby/common/SubBlock.hh"
40#include "mem/protocol/Protocol.hh"
41#include "mem/gems_common/Map.hh"
42#include "mem/ruby/buffers/MessageBuffer.hh"
43#include "mem/ruby/slicc_interface/AbstractController.hh"
44
45#include "params/RubySequencer.hh"
46
47//Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
48
49#define LLSC_FAIL -2
50long int already = 0;
51
52Sequencer *
53RubySequencerParams::create()
54{
55 return new Sequencer(this);
56}
57
58Sequencer::Sequencer(const Params *p)
59 : RubyPort(p)
60{
61 m_store_waiting_on_load_cycles = 0;
62 m_store_waiting_on_store_cycles = 0;
63 m_load_waiting_on_store_cycles = 0;
64 m_load_waiting_on_load_cycles = 0;
65
66 m_deadlock_check_scheduled = false;
67 m_outstanding_count = 0;
68
69 m_max_outstanding_requests = 0;
70 m_deadlock_threshold = 0;
71 m_instCache_ptr = NULL;
72 m_dataCache_ptr = NULL;
73
74 m_instCache_ptr = p->icache;
75 m_dataCache_ptr = p->dcache;
76 m_max_outstanding_requests = p->max_outstanding_requests;
77 m_deadlock_threshold = p->deadlock_threshold;
78
79 assert(m_max_outstanding_requests > 0);
80 assert(m_deadlock_threshold > 0);
81 assert(m_instCache_ptr != NULL);
82 assert(m_dataCache_ptr != NULL);
83}
84
85Sequencer::~Sequencer() {
86
87}
88
89void Sequencer::wakeup() {
90 // Check for deadlock of any of the requests
91 Time current_time = g_eventQueue_ptr->getTime();
92
93 // Check across all outstanding requests
94 int total_outstanding = 0;
95
96 Vector<Address> keys = m_readRequestTable.keys();
97 for (int i=0; i<keys.size(); i++) {
98 SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
99 if (current_time - request->issue_time >= m_deadlock_threshold) {
100 WARN_MSG("Possible Deadlock detected");
101 WARN_EXPR(request);
102 WARN_EXPR(m_version);
103 WARN_EXPR(request->ruby_request.paddr);
104 WARN_EXPR(keys.size());
105 WARN_EXPR(current_time);
106 WARN_EXPR(request->issue_time);
107 WARN_EXPR(current_time - request->issue_time);
108 ERROR_MSG("Aborting");
109 }
110 }
111
112 keys = m_writeRequestTable.keys();
113 for (int i=0; i<keys.size(); i++) {
114 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
115 if (current_time - request->issue_time >= m_deadlock_threshold) {
116 WARN_MSG("Possible Deadlock detected");
117 WARN_EXPR(request);
118 WARN_EXPR(m_version);
119 WARN_EXPR(current_time);
120 WARN_EXPR(request->issue_time);
121 WARN_EXPR(current_time - request->issue_time);
122 WARN_EXPR(keys.size());
123 ERROR_MSG("Aborting");
124 }
125 }
126 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
127
128 assert(m_outstanding_count == total_outstanding);
129
130 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
131 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
132 } else {
133 m_deadlock_check_scheduled = false;
134 }
135}
136
137void Sequencer::printStats(ostream & out) const {
138 out << "Sequencer: " << m_name << endl;
139 out << " store_waiting_on_load_cycles: " << m_store_waiting_on_load_cycles << endl;
140 out << " store_waiting_on_store_cycles: " << m_store_waiting_on_store_cycles << endl;
141 out << " load_waiting_on_load_cycles: " << m_load_waiting_on_load_cycles << endl;
142 out << " load_waiting_on_store_cycles: " << m_load_waiting_on_store_cycles << endl;
143}
144
145void Sequencer::printProgress(ostream& out) const{
146 /*
147 int total_demand = 0;
148 out << "Sequencer Stats Version " << m_version << endl;
149 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
150 out << "---------------" << endl;
151 out << "outstanding requests" << endl;
152
153 Vector<Address> rkeys = m_readRequestTable.keys();
154 int read_size = rkeys.size();
155 out << "proc " << m_version << " Read Requests = " << read_size << endl;
156 // print the request table
157 for(int i=0; i < read_size; ++i){
158 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
159 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
160 total_demand++;
161 }
162
163 Vector<Address> wkeys = m_writeRequestTable.keys();
164 int write_size = wkeys.size();
165 out << "proc " << m_version << " Write Requests = " << write_size << endl;
166 // print the request table
167 for(int i=0; i < write_size; ++i){
168 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
169 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
170 if( request.getPrefetch() == PrefetchBit_No ){
171 total_demand++;
172 }
173 }
174
175 out << endl;
176
177 out << "Total Number Outstanding: " << m_outstanding_count << endl;
178 out << "Total Number Demand : " << total_demand << endl;
179 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
180 out << endl;
181 out << endl;
182 */
183}
184
185void Sequencer::printConfig(ostream& out) const {
186 out << "Seqeuncer config: " << m_name << endl;
187 out << " controller: " << m_controller->getName() << endl;
188 out << " version: " << m_version << endl;
189 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
190 out << " deadlock_threshold: " << m_deadlock_threshold << endl;
191}
192
193// Insert the request on the correct request table. Return true if
194// the entry was already present.
195bool Sequencer::insertRequest(SequencerRequest* request) {
196 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
197
198 assert(m_outstanding_count == total_outstanding);
199
200 // See if we should schedule a deadlock check
201 if (m_deadlock_check_scheduled == false) {
202 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
203 m_deadlock_check_scheduled = true;
204 }
205
206 Address line_addr(request->ruby_request.paddr);
207 line_addr.makeLineAddress();
208 if ((request->ruby_request.type == RubyRequestType_ST) ||
209 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
210 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
211 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
212 (request->ruby_request.type == RubyRequestType_Locked_Write)) {
213 if (m_writeRequestTable.exist(line_addr)) {
214 m_writeRequestTable.lookup(line_addr) = request;
215 // return true;
216 assert(0); // drh5: isn't this an error? do you lose the initial request?
217 }
218 m_writeRequestTable.allocate(line_addr);
219 m_writeRequestTable.lookup(line_addr) = request;
220 m_outstanding_count++;
221 } else {
222 if (m_readRequestTable.exist(line_addr)) {
223 m_readRequestTable.lookup(line_addr) = request;
224 // return true;
225 assert(0); // drh5: isn't this an error? do you lose the initial request?
226 }
227 m_readRequestTable.allocate(line_addr);
228 m_readRequestTable.lookup(line_addr) = request;
229 m_outstanding_count++;
230 }
231
232 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
233
234 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
235 assert(m_outstanding_count == total_outstanding);
236
237 return false;
238}
239
240void Sequencer::removeRequest(SequencerRequest* srequest) {
241
242 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
243
244 const RubyRequest & ruby_request = srequest->ruby_request;
245 Address line_addr(ruby_request.paddr);
246 line_addr.makeLineAddress();
247 if ((ruby_request.type == RubyRequestType_ST) ||
248 (ruby_request.type == RubyRequestType_RMW_Read) ||
249 (ruby_request.type == RubyRequestType_RMW_Write) ||
250 (ruby_request.type == RubyRequestType_Locked_Read) ||
251 (ruby_request.type == RubyRequestType_Locked_Write)) {
252 m_writeRequestTable.deallocate(line_addr);
253 } else {
254 m_readRequestTable.deallocate(line_addr);
255 }
256 m_outstanding_count--;
257
258 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
259}
260
261void Sequencer::writeCallback(const Address& address, DataBlock& data) {
262
263 assert(address == line_address(address));
264 assert(m_writeRequestTable.exist(line_address(address)));
265
266 SequencerRequest* request = m_writeRequestTable.lookup(address);
267
268 removeRequest(request);
269
270 assert((request->ruby_request.type == RubyRequestType_ST) ||
271 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
272 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
273 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
274 (request->ruby_request.type == RubyRequestType_Locked_Write));
275
276 if (request->ruby_request.type == RubyRequestType_Locked_Read) {
277 m_dataCache_ptr->setLocked(address, m_version);
278 }
279 else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
280 m_controller->blockOnQueue(address, m_mandatory_q_ptr);
281 }
282 else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
283 m_controller->unblock(address);
284 }
285
286 hitCallback(request, data);
287}
288
289void Sequencer::readCallback(const Address& address, DataBlock& data) {
290
291 assert(address == line_address(address));
292 assert(m_readRequestTable.exist(line_address(address)));
293
294 SequencerRequest* request = m_readRequestTable.lookup(address);
295 removeRequest(request);
296
297 assert((request->ruby_request.type == RubyRequestType_LD) ||
298 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
299 (request->ruby_request.type == RubyRequestType_IFETCH));
300
301 hitCallback(request, data);
302}
303
304void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
305 const RubyRequest & ruby_request = srequest->ruby_request;
306 Address request_address(ruby_request.paddr);
307 Address request_line_address(ruby_request.paddr);
308 request_line_address.makeLineAddress();
309 RubyRequestType type = ruby_request.type;
310 Time issued_time = srequest->issue_time;
311
312 // Set this cache entry to the most recently used
313 if (type == RubyRequestType_IFETCH) {
314 if (m_instCache_ptr->isTagPresent(request_line_address) )
315 m_instCache_ptr->setMRU(request_line_address);
316 } else {
317 if (m_dataCache_ptr->isTagPresent(request_line_address) )
318 m_dataCache_ptr->setMRU(request_line_address);
319 }
320
321 assert(g_eventQueue_ptr->getTime() >= issued_time);
322 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
323
324 // Profile the miss latency for all non-zero demand misses
325 if (miss_latency != 0) {
326 g_system_ptr->getProfiler()->missLatency(miss_latency, type);
327
328 if (Debug::getProtocolTrace()) {
329 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
330 "", "Done", "", int_to_string(miss_latency)+" cycles");
331 }
332 }
333 /*
334 if (request.getPrefetch() == PrefetchBit_Yes) {
335 return; // Ignore the prefetch
336 }
337 */
338
339 // update the data
340 if (ruby_request.data != NULL) {
341 if ((type == RubyRequestType_LD) ||
342 (type == RubyRequestType_IFETCH) ||
343 (type == RubyRequestType_RMW_Read)) {
344 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
345 } else {
346 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
347 }
348 }
349
350 m_hit_callback(srequest->id);
351 delete srequest;
352}
353
354// Returns true if the sequencer already has a load or store outstanding
355int Sequencer::isReady(const RubyRequest& request) {
356 bool is_outstanding_store = m_writeRequestTable.exist(line_address(Address(request.paddr)));
357 bool is_outstanding_load = m_readRequestTable.exist(line_address(Address(request.paddr)));
358 if ( is_outstanding_store ) {
359 if ((request.type == RubyRequestType_LD) ||
360 (request.type == RubyRequestType_IFETCH) ||
361 (request.type == RubyRequestType_RMW_Read)) {
362 m_store_waiting_on_load_cycles++;
363 } else {
364 m_store_waiting_on_store_cycles++;
365 }
366 return LIBRUBY_ALIASED_REQUEST;
367 } else if ( is_outstanding_load ) {
368 if ((request.type == RubyRequestType_ST) ||
369 (request.type == RubyRequestType_RMW_Write) ) {
370 m_load_waiting_on_store_cycles++;
371 } else {
372 m_load_waiting_on_load_cycles++;
373 }
374 return LIBRUBY_ALIASED_REQUEST;
375 }
376
377 if (m_outstanding_count >= m_max_outstanding_requests) {
378 return LIBRUBY_BUFFER_FULL;
379 }
380
381 return 1;
382}
383
384bool Sequencer::empty() const {
385 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
386}
387
388
389int64_t Sequencer::makeRequest(const RubyRequest & request)
390{
391 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
392 int ready = isReady(request);
393 if (ready > 0) {
394 int64_t id = makeUniqueRequestID();
395 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
396 bool found = insertRequest(srequest);
397 if (!found) {
398 if (request.type == RubyRequestType_Locked_Write) {
399 // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
400 // ensuring that nothing comes between checking the flag and servicing the store
401 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
402 return LLSC_FAIL;
403 }
404 else {
405 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
406 }
407 }
408 issueRequest(request);
409
410 // TODO: issue hardware prefetches here
411 return id;
412 }
413 else {
414 assert(0);
415 return 0;
416 }
417 } else {
418 return ready;
419 }
420}
421
422void Sequencer::issueRequest(const RubyRequest& request) {
423
424 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
425 CacheRequestType ctype;
426 switch(request.type) {
427 case RubyRequestType_IFETCH:
428 ctype = CacheRequestType_IFETCH;
429 break;
430 case RubyRequestType_LD:
431 ctype = CacheRequestType_LD;
432 break;
433 case RubyRequestType_ST:
434 ctype = CacheRequestType_ST;
435 break;
436 case RubyRequestType_Locked_Read:
437 case RubyRequestType_Locked_Write:
438 ctype = CacheRequestType_ATOMIC;
439 break;
440 case RubyRequestType_RMW_Read:
441 ctype = CacheRequestType_ATOMIC;
442 break;
443 case RubyRequestType_RMW_Write:
444 ctype = CacheRequestType_ATOMIC;
445 break;
446 default:
447 assert(0);
448 }
449 AccessModeType amtype;
450 switch(request.access_mode){
451 case RubyAccessMode_User:
452 amtype = AccessModeType_UserMode;
453 break;
454 case RubyAccessMode_Supervisor:
455 amtype = AccessModeType_SupervisorMode;
456 break;
457 case RubyAccessMode_Device:
458 amtype = AccessModeType_UserMode;
459 break;
460 default:
461 assert(0);
462 }
463 Address line_addr(request.paddr);
464 line_addr.makeLineAddress();
465 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
466
467 if (Debug::getProtocolTrace()) {
468 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
469 "", "Begin", "", RubyRequestType_to_string(request.type));
470 }
471
472 if (g_system_ptr->getTracer()->traceEnabled()) {
473 g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
474 request.type, g_eventQueue_ptr->getTime());
475 }
476
477 Time latency = 0; // initialzed to an null value
478
479 if (request.type == RubyRequestType_IFETCH)
480 latency = m_instCache_ptr->getLatency();
481 else
482 latency = m_dataCache_ptr->getLatency();
483
484 // Send the message to the cache controller
485 assert(latency > 0);
486
487 assert(m_mandatory_q_ptr != NULL);
488 m_mandatory_q_ptr->enqueue(msg, latency);
489}
490/*
491bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
492 AccessModeType access_mode,
493 int size, DataBlock*& data_ptr) {
494 if (type == CacheRequestType_IFETCH) {
495 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
496 } else {
497 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
498 }
499}
500*/
501
502void Sequencer::print(ostream& out) const {
503 out << "[Sequencer: " << m_version
504 << ", outstanding requests: " << m_outstanding_count;
505
506 out << ", read request table: " << m_readRequestTable
507 << ", write request table: " << m_writeRequestTable;
508 out << "]";
509}
510
511// this can be called from setState whenever coherence permissions are upgraded
512// when invoked, coherence violations will be checked for the given block
513void Sequencer::checkCoherence(const Address& addr) {
514#ifdef CHECK_COHERENCE
515 g_system_ptr->checkGlobalCoherenceInvariant(addr);
516#endif
517}
518