Sequencer.cc (8092:6782b51ae8a8) Sequencer.cc (8164:b043c0efa024)
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/str.hh"
30#include "base/misc.hh"
31#include "cpu/testers/rubytest/RubyTester.hh"
32#include "mem/protocol/CacheMsg.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/protocol/Protocol.hh"
35#include "mem/ruby/buffers/MessageBuffer.hh"
36#include "mem/ruby/common/Global.hh"
37#include "mem/ruby/common/SubBlock.hh"
38#include "mem/ruby/slicc_interface/RubyRequest.hh"
39#include "mem/ruby/profiler/Profiler.hh"
40#include "mem/ruby/recorder/Tracer.hh"
41#include "mem/ruby/slicc_interface/AbstractController.hh"
42#include "mem/ruby/system/CacheMemory.hh"
43#include "mem/ruby/system/Sequencer.hh"
44#include "mem/ruby/system/System.hh"
45#include "mem/packet.hh"
46#include "params/RubySequencer.hh"
47
48using namespace std;
49
50Sequencer *
51RubySequencerParams::create()
52{
53 return new Sequencer(this);
54}
55
56Sequencer::Sequencer(const Params *p)
57 : RubyPort(p), deadlockCheckEvent(this)
58{
59 m_store_waiting_on_load_cycles = 0;
60 m_store_waiting_on_store_cycles = 0;
61 m_load_waiting_on_store_cycles = 0;
62 m_load_waiting_on_load_cycles = 0;
63
64 m_outstanding_count = 0;
65
66 m_max_outstanding_requests = 0;
67 m_deadlock_threshold = 0;
68 m_instCache_ptr = NULL;
69 m_dataCache_ptr = NULL;
70
71 m_instCache_ptr = p->icache;
72 m_dataCache_ptr = p->dcache;
73 m_max_outstanding_requests = p->max_outstanding_requests;
74 m_deadlock_threshold = p->deadlock_threshold;
75
76 assert(m_max_outstanding_requests > 0);
77 assert(m_deadlock_threshold > 0);
78 assert(m_instCache_ptr != NULL);
79 assert(m_dataCache_ptr != NULL);
80}
81
82Sequencer::~Sequencer()
83{
84}
85
86void
87Sequencer::wakeup()
88{
89 // Check for deadlock of any of the requests
90 Time current_time = g_eventQueue_ptr->getTime();
91
92 // Check across all outstanding requests
93 int total_outstanding = 0;
94
95 RequestTable::iterator read = m_readRequestTable.begin();
96 RequestTable::iterator read_end = m_readRequestTable.end();
97 for (; read != read_end; ++read) {
98 SequencerRequest* request = read->second;
99 if (current_time - request->issue_time < m_deadlock_threshold)
100 continue;
101
102 panic("Possible Deadlock detected. Aborting!\n"
103 "version: %d request.paddr: 0x%x m_readRequestTable: %d "
104 "current time: %u issue_time: %d difference: %d\n", m_version,
105 request->ruby_request.paddr, m_readRequestTable.size(),
106 current_time, request->issue_time,
107 current_time - request->issue_time);
108 }
109
110 RequestTable::iterator write = m_writeRequestTable.begin();
111 RequestTable::iterator write_end = m_writeRequestTable.end();
112 for (; write != write_end; ++write) {
113 SequencerRequest* request = write->second;
114 if (current_time - request->issue_time < m_deadlock_threshold)
115 continue;
116
117 panic("Possible Deadlock detected. Aborting!\n"
118 "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
119 "current time: %u issue_time: %d difference: %d\n", m_version,
120 request->ruby_request.paddr, m_writeRequestTable.size(),
121 current_time, request->issue_time,
122 current_time - request->issue_time);
123 }
124
125 total_outstanding += m_writeRequestTable.size();
126 total_outstanding += m_readRequestTable.size();
127
128 assert(m_outstanding_count == total_outstanding);
129
130 if (m_outstanding_count > 0) {
131 // If there are still outstanding requests, keep checking
132 schedule(deadlockCheckEvent,
133 m_deadlock_threshold * g_eventQueue_ptr->getClock() +
134 curTick());
135 }
136}
137
138void
139Sequencer::printStats(ostream & out) const
140{
141 out << "Sequencer: " << m_name << endl
142 << " store_waiting_on_load_cycles: "
143 << m_store_waiting_on_load_cycles << endl
144 << " store_waiting_on_store_cycles: "
145 << m_store_waiting_on_store_cycles << endl
146 << " load_waiting_on_load_cycles: "
147 << m_load_waiting_on_load_cycles << endl
148 << " load_waiting_on_store_cycles: "
149 << m_load_waiting_on_store_cycles << endl;
150}
151
152void
153Sequencer::printProgress(ostream& out) const
154{
155#if 0
156 int total_demand = 0;
157 out << "Sequencer Stats Version " << m_version << endl;
158 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
159 out << "---------------" << endl;
160 out << "outstanding requests" << endl;
161
162 out << "proc " << m_Read
163 << " version Requests = " << m_readRequestTable.size() << endl;
164
165 // print the request table
166 RequestTable::iterator read = m_readRequestTable.begin();
167 RequestTable::iterator read_end = m_readRequestTable.end();
168 for (; read != read_end; ++read) {
169 SequencerRequest* request = read->second;
170 out << "\tRequest[ " << i << " ] = " << request->type
171 << " Address " << rkeys[i]
172 << " Posted " << request->issue_time
173 << " PF " << PrefetchBit_No << endl;
174 total_demand++;
175 }
176
177 out << "proc " << m_version
178 << " Write Requests = " << m_writeRequestTable.size << endl;
179
180 // print the request table
181 RequestTable::iterator write = m_writeRequestTable.begin();
182 RequestTable::iterator write_end = m_writeRequestTable.end();
183 for (; write != write_end; ++write) {
184 SequencerRequest* request = write->second;
185 out << "\tRequest[ " << i << " ] = " << request.getType()
186 << " Address " << wkeys[i]
187 << " Posted " << request.getTime()
188 << " PF " << request.getPrefetch() << endl;
189 if (request.getPrefetch() == PrefetchBit_No) {
190 total_demand++;
191 }
192 }
193
194 out << endl;
195
196 out << "Total Number Outstanding: " << m_outstanding_count << endl
197 << "Total Number Demand : " << total_demand << endl
198 << "Total Number Prefetches : " << m_outstanding_count - total_demand
199 << endl << endl << endl;
200#endif
201}
202
203void
204Sequencer::printConfig(ostream& out) const
205{
206 out << "Seqeuncer config: " << m_name << endl
207 << " controller: " << m_controller->getName() << endl
208 << " version: " << m_version << endl
209 << " max_outstanding_requests: " << m_max_outstanding_requests << endl
210 << " deadlock_threshold: " << m_deadlock_threshold << endl;
211}
212
213// Insert the request on the correct request table. Return true if
214// the entry was already present.
215bool
216Sequencer::insertRequest(SequencerRequest* request)
217{
218 int total_outstanding =
219 m_writeRequestTable.size() + m_readRequestTable.size();
220
221 assert(m_outstanding_count == total_outstanding);
222
223 // See if we should schedule a deadlock check
224 if (deadlockCheckEvent.scheduled() == false) {
225 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
226 }
227
228 Address line_addr(request->ruby_request.paddr);
229 line_addr.makeLineAddress();
230 if ((request->ruby_request.type == RubyRequestType_ST) ||
231 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
232 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
233 (request->ruby_request.type == RubyRequestType_Load_Linked) ||
234 (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
235 (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
236 (request->ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
237 pair<RequestTable::iterator, bool> r =
238 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
239 bool success = r.second;
240 RequestTable::iterator i = r.first;
241 if (!success) {
242 i->second = request;
243 // return true;
244
245 // drh5: isn't this an error? do you lose the initial request?
246 assert(0);
247 }
248 i->second = request;
249 m_outstanding_count++;
250 } else {
251 pair<RequestTable::iterator, bool> r =
252 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0));
253 bool success = r.second;
254 RequestTable::iterator i = r.first;
255 if (!success) {
256 i->second = request;
257 // return true;
258
259 // drh5: isn't this an error? do you lose the initial request?
260 assert(0);
261 }
262 i->second = request;
263 m_outstanding_count++;
264 }
265
266 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
267
268 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
269 assert(m_outstanding_count == total_outstanding);
270
271 return false;
272}
273
274void
275Sequencer::markRemoved()
276{
277 m_outstanding_count--;
278 assert(m_outstanding_count ==
279 m_writeRequestTable.size() + m_readRequestTable.size());
280}
281
282void
283Sequencer::removeRequest(SequencerRequest* srequest)
284{
285 assert(m_outstanding_count ==
286 m_writeRequestTable.size() + m_readRequestTable.size());
287
288 const RubyRequest & ruby_request = srequest->ruby_request;
289 Address line_addr(ruby_request.paddr);
290 line_addr.makeLineAddress();
291 if ((ruby_request.type == RubyRequestType_ST) ||
292 (ruby_request.type == RubyRequestType_RMW_Read) ||
293 (ruby_request.type == RubyRequestType_RMW_Write) ||
294 (ruby_request.type == RubyRequestType_Load_Linked) ||
295 (ruby_request.type == RubyRequestType_Store_Conditional) ||
296 (ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
297 (ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
298 m_writeRequestTable.erase(line_addr);
299 } else {
300 m_readRequestTable.erase(line_addr);
301 }
302
303 markRemoved();
304}
305
306bool
307Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
308{
309 //
310 // The success flag indicates whether the LLSC operation was successful.
311 // LL ops will always succeed, but SC may fail if the cache line is no
312 // longer locked.
313 //
314 bool success = true;
315 if (request->ruby_request.type == RubyRequestType_Store_Conditional) {
316 if (!m_dataCache_ptr->isLocked(address, m_version)) {
317 //
318 // For failed SC requests, indicate the failure to the cpu by
319 // setting the extra data to zero.
320 //
321 request->ruby_request.pkt->req->setExtraData(0);
322 success = false;
323 } else {
324 //
325 // For successful SC requests, indicate the success to the cpu by
326 // setting the extra data to one.
327 //
328 request->ruby_request.pkt->req->setExtraData(1);
329 }
330 //
331 // Independent of success, all SC operations must clear the lock
332 //
333 m_dataCache_ptr->clearLocked(address);
334 } else if (request->ruby_request.type == RubyRequestType_Load_Linked) {
335 //
336 // Note: To fully follow Alpha LLSC semantics, should the LL clear any
337 // previously locked cache lines?
338 //
339 m_dataCache_ptr->setLocked(address, m_version);
340 } else if (m_dataCache_ptr->isLocked(address, m_version)) {
341 //
342 // Normal writes should clear the locked address
343 //
344 m_dataCache_ptr->clearLocked(address);
345 }
346 return success;
347}
348
349void
350Sequencer::writeCallback(const Address& address, DataBlock& data)
351{
352 writeCallback(address, GenericMachineType_NULL, data);
353}
354
355void
356Sequencer::writeCallback(const Address& address,
357 GenericMachineType mach,
358 DataBlock& data)
359{
360 writeCallback(address, mach, data, 0, 0, 0);
361}
362
363void
364Sequencer::writeCallback(const Address& address,
365 GenericMachineType mach,
366 DataBlock& data,
367 Time initialRequestTime,
368 Time forwardRequestTime,
369 Time firstResponseTime)
370{
371 assert(address == line_address(address));
372 assert(m_writeRequestTable.count(line_address(address)));
373
374 RequestTable::iterator i = m_writeRequestTable.find(address);
375 assert(i != m_writeRequestTable.end());
376 SequencerRequest* request = i->second;
377
378 m_writeRequestTable.erase(i);
379 markRemoved();
380
381 assert((request->ruby_request.type == RubyRequestType_ST) ||
382 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
383 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
384 (request->ruby_request.type == RubyRequestType_Load_Linked) ||
385 (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
386 (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
387 (request->ruby_request.type == RubyRequestType_Locked_RMW_Write));
388
389 //
390 // For Alpha, properly handle LL, SC, and write requests with respect to
391 // locked cache blocks.
392 //
393 bool success = handleLlsc(address, request);
394
395 if (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) {
396 m_controller->blockOnQueue(address, m_mandatory_q_ptr);
397 } else if (request->ruby_request.type == RubyRequestType_Locked_RMW_Write) {
398 m_controller->unblock(address);
399 }
400
401 hitCallback(request, mach, data, success,
402 initialRequestTime, forwardRequestTime, firstResponseTime);
403}
404
405void
406Sequencer::readCallback(const Address& address, DataBlock& data)
407{
408 readCallback(address, GenericMachineType_NULL, data);
409}
410
411void
412Sequencer::readCallback(const Address& address,
413 GenericMachineType mach,
414 DataBlock& data)
415{
416 readCallback(address, mach, data, 0, 0, 0);
417}
418
419void
420Sequencer::readCallback(const Address& address,
421 GenericMachineType mach,
422 DataBlock& data,
423 Time initialRequestTime,
424 Time forwardRequestTime,
425 Time firstResponseTime)
426{
427 assert(address == line_address(address));
428 assert(m_readRequestTable.count(line_address(address)));
429
430 RequestTable::iterator i = m_readRequestTable.find(address);
431 assert(i != m_readRequestTable.end());
432 SequencerRequest* request = i->second;
433
434 m_readRequestTable.erase(i);
435 markRemoved();
436
437 assert((request->ruby_request.type == RubyRequestType_LD) ||
438 (request->ruby_request.type == RubyRequestType_IFETCH));
439
440 hitCallback(request, mach, data, true,
441 initialRequestTime, forwardRequestTime, firstResponseTime);
442}
443
444void
445Sequencer::hitCallback(SequencerRequest* srequest,
446 GenericMachineType mach,
447 DataBlock& data,
448 bool success,
449 Time initialRequestTime,
450 Time forwardRequestTime,
451 Time firstResponseTime)
452{
453 const RubyRequest & ruby_request = srequest->ruby_request;
454 Address request_address(ruby_request.paddr);
455 Address request_line_address(ruby_request.paddr);
456 request_line_address.makeLineAddress();
457 RubyRequestType type = ruby_request.type;
458 Time issued_time = srequest->issue_time;
459
460 // Set this cache entry to the most recently used
461 if (type == RubyRequestType_IFETCH) {
462 if (m_instCache_ptr->isTagPresent(request_line_address))
463 m_instCache_ptr->setMRU(request_line_address);
464 } else {
465 if (m_dataCache_ptr->isTagPresent(request_line_address))
466 m_dataCache_ptr->setMRU(request_line_address);
467 }
468
469 assert(g_eventQueue_ptr->getTime() >= issued_time);
470 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
471
472 // Profile the miss latency for all non-zero demand misses
473 if (miss_latency != 0) {
474 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach);
475
476 if (mach == GenericMachineType_L1Cache_wCC) {
477 g_system_ptr->getProfiler()->missLatencyWcc(issued_time,
478 initialRequestTime,
479 forwardRequestTime,
480 firstResponseTime,
481 g_eventQueue_ptr->getTime());
482 }
483
484 if (mach == GenericMachineType_Directory) {
485 g_system_ptr->getProfiler()->missLatencyDir(issued_time,
486 initialRequestTime,
487 forwardRequestTime,
488 firstResponseTime,
489 g_eventQueue_ptr->getTime());
490 }
491
492 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %d cycles\n",
493 g_eventQueue_ptr->getTime(), m_version, "Seq",
494 success ? "Done" : "SC_Failed", "", "",
495 Address(ruby_request.paddr), miss_latency);
496 }
497#if 0
498 if (request.getPrefetch() == PrefetchBit_Yes) {
499 return; // Ignore the prefetch
500 }
501#endif
502
503 // update the data
504 if (ruby_request.data != NULL) {
505 if ((type == RubyRequestType_LD) ||
506 (type == RubyRequestType_IFETCH) ||
507 (type == RubyRequestType_RMW_Read) ||
508 (type == RubyRequestType_Locked_RMW_Read) ||
509 (type == RubyRequestType_Load_Linked)) {
510 memcpy(ruby_request.data,
511 data.getData(request_address.getOffset(), ruby_request.len),
512 ruby_request.len);
513 } else {
514 data.setData(ruby_request.data, request_address.getOffset(),
515 ruby_request.len);
516 }
517 } else {
518 DPRINTF(MemoryAccess,
519 "WARNING. Data not transfered from Ruby to M5 for type %s\n",
520 RubyRequestType_to_string(type));
521 }
522
523 // If using the RubyTester, update the RubyTester sender state's
524 // subBlock with the recieved data. The tester will later access
525 // this state.
526 // Note: RubyPort will access it's sender state before the
527 // RubyTester.
528 if (m_usingRubyTester) {
529 RubyPort::SenderState *requestSenderState =
530 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState);
531 RubyTester::SenderState* testerSenderState =
532 safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
533 testerSenderState->subBlock->mergeFrom(data);
534 }
535
536 ruby_hit_callback(ruby_request.pkt);
537 delete srequest;
538}
539
540// Returns true if the sequencer already has a load or store outstanding
541RequestStatus
542Sequencer::getRequestStatus(const RubyRequest& request)
543{
544 bool is_outstanding_store =
545 !!m_writeRequestTable.count(line_address(Address(request.paddr)));
546 bool is_outstanding_load =
547 !!m_readRequestTable.count(line_address(Address(request.paddr)));
548 if (is_outstanding_store) {
549 if ((request.type == RubyRequestType_LD) ||
550 (request.type == RubyRequestType_IFETCH) ||
551 (request.type == RubyRequestType_RMW_Read)) {
552 m_store_waiting_on_load_cycles++;
553 } else {
554 m_store_waiting_on_store_cycles++;
555 }
556 return RequestStatus_Aliased;
557 } else if (is_outstanding_load) {
558 if ((request.type == RubyRequestType_ST) ||
559 (request.type == RubyRequestType_RMW_Write)) {
560 m_load_waiting_on_store_cycles++;
561 } else {
562 m_load_waiting_on_load_cycles++;
563 }
564 return RequestStatus_Aliased;
565 }
566
567 if (m_outstanding_count >= m_max_outstanding_requests) {
568 return RequestStatus_BufferFull;
569 }
570
571 return RequestStatus_Ready;
572}
573
574bool
575Sequencer::empty() const
576{
577 return m_writeRequestTable.empty() && m_readRequestTable.empty();
578}
579
580RequestStatus
581Sequencer::makeRequest(const RubyRequest &request)
582{
583 assert(Address(request.paddr).getOffset() + request.len <=
584 RubySystem::getBlockSizeBytes());
585 RequestStatus status = getRequestStatus(request);
586 if (status != RequestStatus_Ready)
587 return status;
588
589 SequencerRequest *srequest =
590 new SequencerRequest(request, g_eventQueue_ptr->getTime());
591 bool found = insertRequest(srequest);
592 if (found) {
593 panic("Sequencer::makeRequest should never be called if the "
594 "request is already outstanding\n");
595 return RequestStatus_NULL;
596 }
597
598 issueRequest(request);
599
600 // TODO: issue hardware prefetches here
601 return RequestStatus_Issued;
602}
603
604void
605Sequencer::issueRequest(const RubyRequest& request)
606{
607 // TODO: get rid of CacheMsg, CacheRequestType, and
608 // AccessModeTYpe, & have SLICC use RubyRequest and subtypes
609 // natively
610 CacheRequestType ctype;
611 switch(request.type) {
612 case RubyRequestType_IFETCH:
613 ctype = CacheRequestType_IFETCH;
614 break;
615 case RubyRequestType_LD:
616 ctype = CacheRequestType_LD;
617 break;
618 case RubyRequestType_ST:
619 case RubyRequestType_RMW_Read:
620 case RubyRequestType_RMW_Write:
621 //
622 // x86 locked instructions are translated to store cache coherence
623 // requests because these requests should always be treated as read
624 // exclusive operations and should leverage any migratory sharing
625 // optimization built into the protocol.
626 //
627 case RubyRequestType_Locked_RMW_Read:
628 case RubyRequestType_Locked_RMW_Write:
629 ctype = CacheRequestType_ST;
630 break;
631 //
632 // Alpha LL/SC instructions need to be handled carefully by the cache
633 // coherence protocol to ensure they follow the proper semantics. In
634 // particular, by identifying the operations as atomic, the protocol
635 // should understand that migratory sharing optimizations should not be
636 // performed (i.e. a load between the LL and SC should not steal away
637 // exclusive permission).
638 //
639 case RubyRequestType_Load_Linked:
640 case RubyRequestType_Store_Conditional:
641 ctype = CacheRequestType_ATOMIC;
642 break;
643 default:
644 assert(0);
645 }
646
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/str.hh"
30#include "base/misc.hh"
31#include "cpu/testers/rubytest/RubyTester.hh"
32#include "mem/protocol/CacheMsg.hh"
33#include "mem/protocol/Protocol.hh"
34#include "mem/protocol/Protocol.hh"
35#include "mem/ruby/buffers/MessageBuffer.hh"
36#include "mem/ruby/common/Global.hh"
37#include "mem/ruby/common/SubBlock.hh"
38#include "mem/ruby/slicc_interface/RubyRequest.hh"
39#include "mem/ruby/profiler/Profiler.hh"
40#include "mem/ruby/recorder/Tracer.hh"
41#include "mem/ruby/slicc_interface/AbstractController.hh"
42#include "mem/ruby/system/CacheMemory.hh"
43#include "mem/ruby/system/Sequencer.hh"
44#include "mem/ruby/system/System.hh"
45#include "mem/packet.hh"
46#include "params/RubySequencer.hh"
47
48using namespace std;
49
50Sequencer *
51RubySequencerParams::create()
52{
53 return new Sequencer(this);
54}
55
56Sequencer::Sequencer(const Params *p)
57 : RubyPort(p), deadlockCheckEvent(this)
58{
59 m_store_waiting_on_load_cycles = 0;
60 m_store_waiting_on_store_cycles = 0;
61 m_load_waiting_on_store_cycles = 0;
62 m_load_waiting_on_load_cycles = 0;
63
64 m_outstanding_count = 0;
65
66 m_max_outstanding_requests = 0;
67 m_deadlock_threshold = 0;
68 m_instCache_ptr = NULL;
69 m_dataCache_ptr = NULL;
70
71 m_instCache_ptr = p->icache;
72 m_dataCache_ptr = p->dcache;
73 m_max_outstanding_requests = p->max_outstanding_requests;
74 m_deadlock_threshold = p->deadlock_threshold;
75
76 assert(m_max_outstanding_requests > 0);
77 assert(m_deadlock_threshold > 0);
78 assert(m_instCache_ptr != NULL);
79 assert(m_dataCache_ptr != NULL);
80}
81
82Sequencer::~Sequencer()
83{
84}
85
86void
87Sequencer::wakeup()
88{
89 // Check for deadlock of any of the requests
90 Time current_time = g_eventQueue_ptr->getTime();
91
92 // Check across all outstanding requests
93 int total_outstanding = 0;
94
95 RequestTable::iterator read = m_readRequestTable.begin();
96 RequestTable::iterator read_end = m_readRequestTable.end();
97 for (; read != read_end; ++read) {
98 SequencerRequest* request = read->second;
99 if (current_time - request->issue_time < m_deadlock_threshold)
100 continue;
101
102 panic("Possible Deadlock detected. Aborting!\n"
103 "version: %d request.paddr: 0x%x m_readRequestTable: %d "
104 "current time: %u issue_time: %d difference: %d\n", m_version,
105 request->ruby_request.paddr, m_readRequestTable.size(),
106 current_time, request->issue_time,
107 current_time - request->issue_time);
108 }
109
110 RequestTable::iterator write = m_writeRequestTable.begin();
111 RequestTable::iterator write_end = m_writeRequestTable.end();
112 for (; write != write_end; ++write) {
113 SequencerRequest* request = write->second;
114 if (current_time - request->issue_time < m_deadlock_threshold)
115 continue;
116
117 panic("Possible Deadlock detected. Aborting!\n"
118 "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
119 "current time: %u issue_time: %d difference: %d\n", m_version,
120 request->ruby_request.paddr, m_writeRequestTable.size(),
121 current_time, request->issue_time,
122 current_time - request->issue_time);
123 }
124
125 total_outstanding += m_writeRequestTable.size();
126 total_outstanding += m_readRequestTable.size();
127
128 assert(m_outstanding_count == total_outstanding);
129
130 if (m_outstanding_count > 0) {
131 // If there are still outstanding requests, keep checking
132 schedule(deadlockCheckEvent,
133 m_deadlock_threshold * g_eventQueue_ptr->getClock() +
134 curTick());
135 }
136}
137
138void
139Sequencer::printStats(ostream & out) const
140{
141 out << "Sequencer: " << m_name << endl
142 << " store_waiting_on_load_cycles: "
143 << m_store_waiting_on_load_cycles << endl
144 << " store_waiting_on_store_cycles: "
145 << m_store_waiting_on_store_cycles << endl
146 << " load_waiting_on_load_cycles: "
147 << m_load_waiting_on_load_cycles << endl
148 << " load_waiting_on_store_cycles: "
149 << m_load_waiting_on_store_cycles << endl;
150}
151
152void
153Sequencer::printProgress(ostream& out) const
154{
155#if 0
156 int total_demand = 0;
157 out << "Sequencer Stats Version " << m_version << endl;
158 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
159 out << "---------------" << endl;
160 out << "outstanding requests" << endl;
161
162 out << "proc " << m_Read
163 << " version Requests = " << m_readRequestTable.size() << endl;
164
165 // print the request table
166 RequestTable::iterator read = m_readRequestTable.begin();
167 RequestTable::iterator read_end = m_readRequestTable.end();
168 for (; read != read_end; ++read) {
169 SequencerRequest* request = read->second;
170 out << "\tRequest[ " << i << " ] = " << request->type
171 << " Address " << rkeys[i]
172 << " Posted " << request->issue_time
173 << " PF " << PrefetchBit_No << endl;
174 total_demand++;
175 }
176
177 out << "proc " << m_version
178 << " Write Requests = " << m_writeRequestTable.size << endl;
179
180 // print the request table
181 RequestTable::iterator write = m_writeRequestTable.begin();
182 RequestTable::iterator write_end = m_writeRequestTable.end();
183 for (; write != write_end; ++write) {
184 SequencerRequest* request = write->second;
185 out << "\tRequest[ " << i << " ] = " << request.getType()
186 << " Address " << wkeys[i]
187 << " Posted " << request.getTime()
188 << " PF " << request.getPrefetch() << endl;
189 if (request.getPrefetch() == PrefetchBit_No) {
190 total_demand++;
191 }
192 }
193
194 out << endl;
195
196 out << "Total Number Outstanding: " << m_outstanding_count << endl
197 << "Total Number Demand : " << total_demand << endl
198 << "Total Number Prefetches : " << m_outstanding_count - total_demand
199 << endl << endl << endl;
200#endif
201}
202
203void
204Sequencer::printConfig(ostream& out) const
205{
206 out << "Seqeuncer config: " << m_name << endl
207 << " controller: " << m_controller->getName() << endl
208 << " version: " << m_version << endl
209 << " max_outstanding_requests: " << m_max_outstanding_requests << endl
210 << " deadlock_threshold: " << m_deadlock_threshold << endl;
211}
212
213// Insert the request on the correct request table. Return true if
214// the entry was already present.
215bool
216Sequencer::insertRequest(SequencerRequest* request)
217{
218 int total_outstanding =
219 m_writeRequestTable.size() + m_readRequestTable.size();
220
221 assert(m_outstanding_count == total_outstanding);
222
223 // See if we should schedule a deadlock check
224 if (deadlockCheckEvent.scheduled() == false) {
225 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
226 }
227
228 Address line_addr(request->ruby_request.paddr);
229 line_addr.makeLineAddress();
230 if ((request->ruby_request.type == RubyRequestType_ST) ||
231 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
232 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
233 (request->ruby_request.type == RubyRequestType_Load_Linked) ||
234 (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
235 (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
236 (request->ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
237 pair<RequestTable::iterator, bool> r =
238 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
239 bool success = r.second;
240 RequestTable::iterator i = r.first;
241 if (!success) {
242 i->second = request;
243 // return true;
244
245 // drh5: isn't this an error? do you lose the initial request?
246 assert(0);
247 }
248 i->second = request;
249 m_outstanding_count++;
250 } else {
251 pair<RequestTable::iterator, bool> r =
252 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0));
253 bool success = r.second;
254 RequestTable::iterator i = r.first;
255 if (!success) {
256 i->second = request;
257 // return true;
258
259 // drh5: isn't this an error? do you lose the initial request?
260 assert(0);
261 }
262 i->second = request;
263 m_outstanding_count++;
264 }
265
266 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
267
268 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
269 assert(m_outstanding_count == total_outstanding);
270
271 return false;
272}
273
274void
275Sequencer::markRemoved()
276{
277 m_outstanding_count--;
278 assert(m_outstanding_count ==
279 m_writeRequestTable.size() + m_readRequestTable.size());
280}
281
282void
283Sequencer::removeRequest(SequencerRequest* srequest)
284{
285 assert(m_outstanding_count ==
286 m_writeRequestTable.size() + m_readRequestTable.size());
287
288 const RubyRequest & ruby_request = srequest->ruby_request;
289 Address line_addr(ruby_request.paddr);
290 line_addr.makeLineAddress();
291 if ((ruby_request.type == RubyRequestType_ST) ||
292 (ruby_request.type == RubyRequestType_RMW_Read) ||
293 (ruby_request.type == RubyRequestType_RMW_Write) ||
294 (ruby_request.type == RubyRequestType_Load_Linked) ||
295 (ruby_request.type == RubyRequestType_Store_Conditional) ||
296 (ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
297 (ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
298 m_writeRequestTable.erase(line_addr);
299 } else {
300 m_readRequestTable.erase(line_addr);
301 }
302
303 markRemoved();
304}
305
306bool
307Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
308{
309 //
310 // The success flag indicates whether the LLSC operation was successful.
311 // LL ops will always succeed, but SC may fail if the cache line is no
312 // longer locked.
313 //
314 bool success = true;
315 if (request->ruby_request.type == RubyRequestType_Store_Conditional) {
316 if (!m_dataCache_ptr->isLocked(address, m_version)) {
317 //
318 // For failed SC requests, indicate the failure to the cpu by
319 // setting the extra data to zero.
320 //
321 request->ruby_request.pkt->req->setExtraData(0);
322 success = false;
323 } else {
324 //
325 // For successful SC requests, indicate the success to the cpu by
326 // setting the extra data to one.
327 //
328 request->ruby_request.pkt->req->setExtraData(1);
329 }
330 //
331 // Independent of success, all SC operations must clear the lock
332 //
333 m_dataCache_ptr->clearLocked(address);
334 } else if (request->ruby_request.type == RubyRequestType_Load_Linked) {
335 //
336 // Note: To fully follow Alpha LLSC semantics, should the LL clear any
337 // previously locked cache lines?
338 //
339 m_dataCache_ptr->setLocked(address, m_version);
340 } else if (m_dataCache_ptr->isLocked(address, m_version)) {
341 //
342 // Normal writes should clear the locked address
343 //
344 m_dataCache_ptr->clearLocked(address);
345 }
346 return success;
347}
348
349void
350Sequencer::writeCallback(const Address& address, DataBlock& data)
351{
352 writeCallback(address, GenericMachineType_NULL, data);
353}
354
355void
356Sequencer::writeCallback(const Address& address,
357 GenericMachineType mach,
358 DataBlock& data)
359{
360 writeCallback(address, mach, data, 0, 0, 0);
361}
362
363void
364Sequencer::writeCallback(const Address& address,
365 GenericMachineType mach,
366 DataBlock& data,
367 Time initialRequestTime,
368 Time forwardRequestTime,
369 Time firstResponseTime)
370{
371 assert(address == line_address(address));
372 assert(m_writeRequestTable.count(line_address(address)));
373
374 RequestTable::iterator i = m_writeRequestTable.find(address);
375 assert(i != m_writeRequestTable.end());
376 SequencerRequest* request = i->second;
377
378 m_writeRequestTable.erase(i);
379 markRemoved();
380
381 assert((request->ruby_request.type == RubyRequestType_ST) ||
382 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
383 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
384 (request->ruby_request.type == RubyRequestType_Load_Linked) ||
385 (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
386 (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
387 (request->ruby_request.type == RubyRequestType_Locked_RMW_Write));
388
389 //
390 // For Alpha, properly handle LL, SC, and write requests with respect to
391 // locked cache blocks.
392 //
393 bool success = handleLlsc(address, request);
394
395 if (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) {
396 m_controller->blockOnQueue(address, m_mandatory_q_ptr);
397 } else if (request->ruby_request.type == RubyRequestType_Locked_RMW_Write) {
398 m_controller->unblock(address);
399 }
400
401 hitCallback(request, mach, data, success,
402 initialRequestTime, forwardRequestTime, firstResponseTime);
403}
404
405void
406Sequencer::readCallback(const Address& address, DataBlock& data)
407{
408 readCallback(address, GenericMachineType_NULL, data);
409}
410
411void
412Sequencer::readCallback(const Address& address,
413 GenericMachineType mach,
414 DataBlock& data)
415{
416 readCallback(address, mach, data, 0, 0, 0);
417}
418
419void
420Sequencer::readCallback(const Address& address,
421 GenericMachineType mach,
422 DataBlock& data,
423 Time initialRequestTime,
424 Time forwardRequestTime,
425 Time firstResponseTime)
426{
427 assert(address == line_address(address));
428 assert(m_readRequestTable.count(line_address(address)));
429
430 RequestTable::iterator i = m_readRequestTable.find(address);
431 assert(i != m_readRequestTable.end());
432 SequencerRequest* request = i->second;
433
434 m_readRequestTable.erase(i);
435 markRemoved();
436
437 assert((request->ruby_request.type == RubyRequestType_LD) ||
438 (request->ruby_request.type == RubyRequestType_IFETCH));
439
440 hitCallback(request, mach, data, true,
441 initialRequestTime, forwardRequestTime, firstResponseTime);
442}
443
444void
445Sequencer::hitCallback(SequencerRequest* srequest,
446 GenericMachineType mach,
447 DataBlock& data,
448 bool success,
449 Time initialRequestTime,
450 Time forwardRequestTime,
451 Time firstResponseTime)
452{
453 const RubyRequest & ruby_request = srequest->ruby_request;
454 Address request_address(ruby_request.paddr);
455 Address request_line_address(ruby_request.paddr);
456 request_line_address.makeLineAddress();
457 RubyRequestType type = ruby_request.type;
458 Time issued_time = srequest->issue_time;
459
460 // Set this cache entry to the most recently used
461 if (type == RubyRequestType_IFETCH) {
462 if (m_instCache_ptr->isTagPresent(request_line_address))
463 m_instCache_ptr->setMRU(request_line_address);
464 } else {
465 if (m_dataCache_ptr->isTagPresent(request_line_address))
466 m_dataCache_ptr->setMRU(request_line_address);
467 }
468
469 assert(g_eventQueue_ptr->getTime() >= issued_time);
470 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
471
472 // Profile the miss latency for all non-zero demand misses
473 if (miss_latency != 0) {
474 g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach);
475
476 if (mach == GenericMachineType_L1Cache_wCC) {
477 g_system_ptr->getProfiler()->missLatencyWcc(issued_time,
478 initialRequestTime,
479 forwardRequestTime,
480 firstResponseTime,
481 g_eventQueue_ptr->getTime());
482 }
483
484 if (mach == GenericMachineType_Directory) {
485 g_system_ptr->getProfiler()->missLatencyDir(issued_time,
486 initialRequestTime,
487 forwardRequestTime,
488 firstResponseTime,
489 g_eventQueue_ptr->getTime());
490 }
491
492 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %d cycles\n",
493 g_eventQueue_ptr->getTime(), m_version, "Seq",
494 success ? "Done" : "SC_Failed", "", "",
495 Address(ruby_request.paddr), miss_latency);
496 }
497#if 0
498 if (request.getPrefetch() == PrefetchBit_Yes) {
499 return; // Ignore the prefetch
500 }
501#endif
502
503 // update the data
504 if (ruby_request.data != NULL) {
505 if ((type == RubyRequestType_LD) ||
506 (type == RubyRequestType_IFETCH) ||
507 (type == RubyRequestType_RMW_Read) ||
508 (type == RubyRequestType_Locked_RMW_Read) ||
509 (type == RubyRequestType_Load_Linked)) {
510 memcpy(ruby_request.data,
511 data.getData(request_address.getOffset(), ruby_request.len),
512 ruby_request.len);
513 } else {
514 data.setData(ruby_request.data, request_address.getOffset(),
515 ruby_request.len);
516 }
517 } else {
518 DPRINTF(MemoryAccess,
519 "WARNING. Data not transfered from Ruby to M5 for type %s\n",
520 RubyRequestType_to_string(type));
521 }
522
523 // If using the RubyTester, update the RubyTester sender state's
524 // subBlock with the recieved data. The tester will later access
525 // this state.
526 // Note: RubyPort will access it's sender state before the
527 // RubyTester.
528 if (m_usingRubyTester) {
529 RubyPort::SenderState *requestSenderState =
530 safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState);
531 RubyTester::SenderState* testerSenderState =
532 safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
533 testerSenderState->subBlock->mergeFrom(data);
534 }
535
536 ruby_hit_callback(ruby_request.pkt);
537 delete srequest;
538}
539
540// Returns true if the sequencer already has a load or store outstanding
541RequestStatus
542Sequencer::getRequestStatus(const RubyRequest& request)
543{
544 bool is_outstanding_store =
545 !!m_writeRequestTable.count(line_address(Address(request.paddr)));
546 bool is_outstanding_load =
547 !!m_readRequestTable.count(line_address(Address(request.paddr)));
548 if (is_outstanding_store) {
549 if ((request.type == RubyRequestType_LD) ||
550 (request.type == RubyRequestType_IFETCH) ||
551 (request.type == RubyRequestType_RMW_Read)) {
552 m_store_waiting_on_load_cycles++;
553 } else {
554 m_store_waiting_on_store_cycles++;
555 }
556 return RequestStatus_Aliased;
557 } else if (is_outstanding_load) {
558 if ((request.type == RubyRequestType_ST) ||
559 (request.type == RubyRequestType_RMW_Write)) {
560 m_load_waiting_on_store_cycles++;
561 } else {
562 m_load_waiting_on_load_cycles++;
563 }
564 return RequestStatus_Aliased;
565 }
566
567 if (m_outstanding_count >= m_max_outstanding_requests) {
568 return RequestStatus_BufferFull;
569 }
570
571 return RequestStatus_Ready;
572}
573
574bool
575Sequencer::empty() const
576{
577 return m_writeRequestTable.empty() && m_readRequestTable.empty();
578}
579
580RequestStatus
581Sequencer::makeRequest(const RubyRequest &request)
582{
583 assert(Address(request.paddr).getOffset() + request.len <=
584 RubySystem::getBlockSizeBytes());
585 RequestStatus status = getRequestStatus(request);
586 if (status != RequestStatus_Ready)
587 return status;
588
589 SequencerRequest *srequest =
590 new SequencerRequest(request, g_eventQueue_ptr->getTime());
591 bool found = insertRequest(srequest);
592 if (found) {
593 panic("Sequencer::makeRequest should never be called if the "
594 "request is already outstanding\n");
595 return RequestStatus_NULL;
596 }
597
598 issueRequest(request);
599
600 // TODO: issue hardware prefetches here
601 return RequestStatus_Issued;
602}
603
604void
605Sequencer::issueRequest(const RubyRequest& request)
606{
607 // TODO: get rid of CacheMsg, CacheRequestType, and
608 // AccessModeTYpe, & have SLICC use RubyRequest and subtypes
609 // natively
610 CacheRequestType ctype;
611 switch(request.type) {
612 case RubyRequestType_IFETCH:
613 ctype = CacheRequestType_IFETCH;
614 break;
615 case RubyRequestType_LD:
616 ctype = CacheRequestType_LD;
617 break;
618 case RubyRequestType_ST:
619 case RubyRequestType_RMW_Read:
620 case RubyRequestType_RMW_Write:
621 //
622 // x86 locked instructions are translated to store cache coherence
623 // requests because these requests should always be treated as read
624 // exclusive operations and should leverage any migratory sharing
625 // optimization built into the protocol.
626 //
627 case RubyRequestType_Locked_RMW_Read:
628 case RubyRequestType_Locked_RMW_Write:
629 ctype = CacheRequestType_ST;
630 break;
631 //
632 // Alpha LL/SC instructions need to be handled carefully by the cache
633 // coherence protocol to ensure they follow the proper semantics. In
634 // particular, by identifying the operations as atomic, the protocol
635 // should understand that migratory sharing optimizations should not be
636 // performed (i.e. a load between the LL and SC should not steal away
637 // exclusive permission).
638 //
639 case RubyRequestType_Load_Linked:
640 case RubyRequestType_Store_Conditional:
641 ctype = CacheRequestType_ATOMIC;
642 break;
643 default:
644 assert(0);
645 }
646
647 AccessModeType amtype;
647 RubyAccessMode amtype;
648 switch(request.access_mode){
649 case RubyAccessMode_User:
648 switch(request.access_mode){
649 case RubyAccessMode_User:
650 amtype = AccessModeType_UserMode;
650 amtype = RubyAccessMode_User;
651 break;
652 case RubyAccessMode_Supervisor:
651 break;
652 case RubyAccessMode_Supervisor:
653 amtype = AccessModeType_SupervisorMode;
653 amtype = RubyAccessMode_Supervisor;
654 break;
655 case RubyAccessMode_Device:
654 break;
655 case RubyAccessMode_Device:
656 amtype = AccessModeType_UserMode;
656 amtype = RubyAccessMode_User;
657 break;
658 default:
659 assert(0);
660 }
661
662 Address line_addr(request.paddr);
663 line_addr.makeLineAddress();
664 CacheMsg *msg = new CacheMsg(line_addr, Address(request.paddr), ctype,
665 Address(request.pc), amtype, request.len, PrefetchBit_No,
666 request.proc_id);
667
668 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\n",
669 g_eventQueue_ptr->getTime(), m_version, "Seq", "Begin", "", "",
670 Address(request.paddr), RubyRequestType_to_string(request.type));
671
672 Time latency = 0; // initialzed to an null value
673
674 if (request.type == RubyRequestType_IFETCH)
675 latency = m_instCache_ptr->getLatency();
676 else
677 latency = m_dataCache_ptr->getLatency();
678
679 // Send the message to the cache controller
680 assert(latency > 0);
681
682 assert(m_mandatory_q_ptr != NULL);
683 m_mandatory_q_ptr->enqueue(msg, latency);
684}
685
686#if 0
687bool
688Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
657 break;
658 default:
659 assert(0);
660 }
661
662 Address line_addr(request.paddr);
663 line_addr.makeLineAddress();
664 CacheMsg *msg = new CacheMsg(line_addr, Address(request.paddr), ctype,
665 Address(request.pc), amtype, request.len, PrefetchBit_No,
666 request.proc_id);
667
668 DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\n",
669 g_eventQueue_ptr->getTime(), m_version, "Seq", "Begin", "", "",
670 Address(request.paddr), RubyRequestType_to_string(request.type));
671
672 Time latency = 0; // initialzed to an null value
673
674 if (request.type == RubyRequestType_IFETCH)
675 latency = m_instCache_ptr->getLatency();
676 else
677 latency = m_dataCache_ptr->getLatency();
678
679 // Send the message to the cache controller
680 assert(latency > 0);
681
682 assert(m_mandatory_q_ptr != NULL);
683 m_mandatory_q_ptr->enqueue(msg, latency);
684}
685
686#if 0
687bool
688Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
689 AccessModeType access_mode,
689 RubyAccessMode access_mode,
690 int size, DataBlock*& data_ptr)
691{
692 CacheMemory *cache =
693 (type == CacheRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr;
694
695 return cache->tryCacheAccess(line_address(addr), type, data_ptr);
696}
697#endif
698
699template <class KEY, class VALUE>
700std::ostream &
701operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
702{
703 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
704 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
705
706 out << "[";
707 for (; i != end; ++i)
708 out << " " << i->first << "=" << i->second;
709 out << " ]";
710
711 return out;
712}
713
714void
715Sequencer::print(ostream& out) const
716{
717 out << "[Sequencer: " << m_version
718 << ", outstanding requests: " << m_outstanding_count
719 << ", read request table: " << m_readRequestTable
720 << ", write request table: " << m_writeRequestTable
721 << "]";
722}
723
724// this can be called from setState whenever coherence permissions are
725// upgraded when invoked, coherence violations will be checked for the
726// given block
727void
728Sequencer::checkCoherence(const Address& addr)
729{
730#ifdef CHECK_COHERENCE
731 g_system_ptr->checkGlobalCoherenceInvariant(addr);
732#endif
733}
690 int size, DataBlock*& data_ptr)
691{
692 CacheMemory *cache =
693 (type == CacheRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr;
694
695 return cache->tryCacheAccess(line_address(addr), type, data_ptr);
696}
697#endif
698
699template <class KEY, class VALUE>
700std::ostream &
701operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
702{
703 typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
704 typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
705
706 out << "[";
707 for (; i != end; ++i)
708 out << " " << i->first << "=" << i->second;
709 out << " ]";
710
711 return out;
712}
713
714void
715Sequencer::print(ostream& out) const
716{
717 out << "[Sequencer: " << m_version
718 << ", outstanding requests: " << m_outstanding_count
719 << ", read request table: " << m_readRequestTable
720 << ", write request table: " << m_writeRequestTable
721 << "]";
722}
723
724// this can be called from setState whenever coherence permissions are
725// upgraded when invoked, coherence violations will be checked for the
726// given block
727void
728Sequencer::checkCoherence(const Address& addr)
729{
730#ifdef CHECK_COHERENCE
731 g_system_ptr->checkGlobalCoherenceInvariant(addr);
732#endif
733}