Deleted Added
sdiff udiff text old ( 8530:3aaa99208a84 ) new ( 8615:e66a566f2cfa )
full compact
1/*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 14 unchanged lines hidden (view full) ---

23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "base/misc.hh"
30#include "base/str.hh"
31#include "config/the_isa.hh"
32#if THE_ISA == X86_ISA
33#include "arch/x86/insts/microldstop.hh"
34#endif // X86_ISA
35#include "cpu/testers/rubytest/RubyTester.hh"
36#include "debug/MemoryAccess.hh"
37#include "debug/ProtocolTrace.hh"
38#include "debug/RubySequencer.hh"
39#include "mem/protocol/PrefetchBit.hh"
40#include "mem/protocol/RubyAccessMode.hh"
41#include "mem/ruby/buffers/MessageBuffer.hh"
42#include "mem/ruby/common/Global.hh"
43#include "mem/ruby/common/SubBlock.hh"
44#include "mem/ruby/profiler/Profiler.hh"
45#include "mem/ruby/recorder/Tracer.hh"
46#include "mem/ruby/slicc_interface/RubyRequest.hh"
47#include "mem/ruby/system/CacheMemory.hh"
48#include "mem/ruby/system/Sequencer.hh"
49#include "mem/ruby/system/System.hh"
50#include "mem/packet.hh"
51#include "params/RubySequencer.hh"
52
53using namespace std;

--- 9 unchanged lines hidden (view full) ---

63{
64 m_store_waiting_on_load_cycles = 0;
65 m_store_waiting_on_store_cycles = 0;
66 m_load_waiting_on_store_cycles = 0;
67 m_load_waiting_on_load_cycles = 0;
68
69 m_outstanding_count = 0;
70
71 m_deadlock_threshold = 0;
72 m_instCache_ptr = NULL;
73 m_dataCache_ptr = NULL;
74
75 m_instCache_ptr = p->icache;
76 m_dataCache_ptr = p->dcache;
77 m_max_outstanding_requests = p->max_outstanding_requests;
78 m_deadlock_threshold = p->deadlock_threshold;

--- 24 unchanged lines hidden (view full) ---

103 for (; read != read_end; ++read) {
104 SequencerRequest* request = read->second;
105 if (current_time - request->issue_time < m_deadlock_threshold)
106 continue;
107
108 panic("Possible Deadlock detected. Aborting!\n"
109 "version: %d request.paddr: 0x%x m_readRequestTable: %d "
110 "current time: %u issue_time: %d difference: %d\n", m_version,
111 Address(request->pkt->getAddr()), m_readRequestTable.size(),
112 current_time, request->issue_time,
113 current_time - request->issue_time);
114 }
115
116 RequestTable::iterator write = m_writeRequestTable.begin();
117 RequestTable::iterator write_end = m_writeRequestTable.end();
118 for (; write != write_end; ++write) {
119 SequencerRequest* request = write->second;
120 if (current_time - request->issue_time < m_deadlock_threshold)
121 continue;
122
123 panic("Possible Deadlock detected. Aborting!\n"
124 "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
125 "current time: %u issue_time: %d difference: %d\n", m_version,
126 Address(request->pkt->getAddr()), m_writeRequestTable.size(),
127 current_time, request->issue_time,
128 current_time - request->issue_time);
129 }
130
131 total_outstanding += m_writeRequestTable.size();
132 total_outstanding += m_readRequestTable.size();
133
134 assert(m_outstanding_count == total_outstanding);

--- 78 unchanged lines hidden (view full) ---

213 << " controller: " << m_controller->getName() << endl
214 << " version: " << m_version << endl
215 << " max_outstanding_requests: " << m_max_outstanding_requests << endl
216 << " deadlock_threshold: " << m_deadlock_threshold << endl;
217}
218
219// Insert the request on the correct request table. Return true if
220// the entry was already present.
221RequestStatus
222Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
223{
224 int total_outstanding =
225 m_writeRequestTable.size() + m_readRequestTable.size();
226
227 assert(m_outstanding_count == total_outstanding);
228
229 // See if we should schedule a deadlock check
230 if (deadlockCheckEvent.scheduled() == false) {
231 schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
232 }
233
234 Address line_addr(pkt->getAddr());
235 line_addr.makeLineAddress();
236 if ((request_type == RubyRequestType_ST) ||
237 (request_type == RubyRequestType_RMW_Read) ||
238 (request_type == RubyRequestType_RMW_Write) ||
239 (request_type == RubyRequestType_Load_Linked) ||
240 (request_type == RubyRequestType_Store_Conditional) ||
241 (request_type == RubyRequestType_Locked_RMW_Read) ||
242 (request_type == RubyRequestType_Locked_RMW_Write) ||
243 (request_type == RubyRequestType_FLUSH)) {
244
245 // Check if there is any outstanding read request for the same
246 // cache line.
247 if (m_readRequestTable.count(line_addr) > 0) {
248 m_store_waiting_on_load_cycles++;
249 return RequestStatus_Aliased;
250 }
251
252 pair<RequestTable::iterator, bool> r =
253 m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
254 if (r.second) {
255 RequestTable::iterator i = r.first;
256 i->second = new SequencerRequest(pkt, request_type,
257 g_eventQueue_ptr->getTime());
258 m_outstanding_count++;
259 } else {
260 // There is an outstanding write request for the cache line
261 m_store_waiting_on_store_cycles++;
262 return RequestStatus_Aliased;
263 }
264 } else {
265 // Check if there is any outstanding write request for the same
266 // cache line.
267 if (m_writeRequestTable.count(line_addr) > 0) {
268 m_load_waiting_on_store_cycles++;
269 return RequestStatus_Aliased;
270 }
271
272 pair<RequestTable::iterator, bool> r =
273 m_readRequestTable.insert(RequestTable::value_type(line_addr, 0));
274
275 if (r.second) {
276 RequestTable::iterator i = r.first;
277 i->second = new SequencerRequest(pkt, request_type,
278 g_eventQueue_ptr->getTime());
279 m_outstanding_count++;
280 } else {
281 // There is an outstanding read request for the cache line
282 m_load_waiting_on_load_cycles++;
283 return RequestStatus_Aliased;
284 }
285 }
286
287 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
288 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
289 assert(m_outstanding_count == total_outstanding);
290
291 return RequestStatus_Ready;
292}
293
294void
295Sequencer::markRemoved()
296{
297 m_outstanding_count--;
298 assert(m_outstanding_count ==
299 m_writeRequestTable.size() + m_readRequestTable.size());
300}
301
302void
303Sequencer::removeRequest(SequencerRequest* srequest)
304{
305 assert(m_outstanding_count ==
306 m_writeRequestTable.size() + m_readRequestTable.size());
307
308 Address line_addr(srequest->pkt->getAddr());
309 line_addr.makeLineAddress();
310 if ((srequest->m_type == RubyRequestType_ST) ||
311 (srequest->m_type == RubyRequestType_RMW_Read) ||
312 (srequest->m_type == RubyRequestType_RMW_Write) ||
313 (srequest->m_type == RubyRequestType_Load_Linked) ||
314 (srequest->m_type == RubyRequestType_Store_Conditional) ||
315 (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
316 (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
317 m_writeRequestTable.erase(line_addr);
318 } else {
319 m_readRequestTable.erase(line_addr);
320 }
321
322 markRemoved();
323}
324
325bool
326Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
327{
328 //
329 // The success flag indicates whether the LLSC operation was successful.
330 // LL ops will always succeed, but SC may fail if the cache line is no
331 // longer locked.
332 //
333 bool success = true;
334 if (request->m_type == RubyRequestType_Store_Conditional) {
335 if (!m_dataCache_ptr->isLocked(address, m_version)) {
336 //
337 // For failed SC requests, indicate the failure to the cpu by
338 // setting the extra data to zero.
339 //
340 request->pkt->req->setExtraData(0);
341 success = false;
342 } else {
343 //
344 // For successful SC requests, indicate the success to the cpu by
345 // setting the extra data to one.
346 //
347 request->pkt->req->setExtraData(1);
348 }
349 //
350 // Independent of success, all SC operations must clear the lock
351 //
352 m_dataCache_ptr->clearLocked(address);
353 } else if (request->m_type == RubyRequestType_Load_Linked) {
354 //
355 // Note: To fully follow Alpha LLSC semantics, should the LL clear any
356 // previously locked cache lines?
357 //
358 m_dataCache_ptr->setLocked(address, m_version);
359 } else if ((m_dataCache_ptr->isTagPresent(address)) &&
360 (m_dataCache_ptr->isLocked(address, m_version))) {
361 //
362 // Normal writes should clear the locked address
363 //
364 m_dataCache_ptr->clearLocked(address);
365 }
366 return success;
367}
368

--- 24 unchanged lines hidden (view full) ---

393
394 RequestTable::iterator i = m_writeRequestTable.find(address);
395 assert(i != m_writeRequestTable.end());
396 SequencerRequest* request = i->second;
397
398 m_writeRequestTable.erase(i);
399 markRemoved();
400
401 assert((request->m_type == RubyRequestType_ST) ||
402 (request->m_type == RubyRequestType_ATOMIC) ||
403 (request->m_type == RubyRequestType_RMW_Read) ||
404 (request->m_type == RubyRequestType_RMW_Write) ||
405 (request->m_type == RubyRequestType_Load_Linked) ||
406 (request->m_type == RubyRequestType_Store_Conditional) ||
407 (request->m_type == RubyRequestType_Locked_RMW_Read) ||
408 (request->m_type == RubyRequestType_Locked_RMW_Write) ||
409 (request->m_type == RubyRequestType_FLUSH));
410
411
412 //
413 // For Alpha, properly handle LL, SC, and write requests with respect to
414 // locked cache blocks.
415 //
416 // Not valid for Network_test protocl
417 //
418 bool success = true;
419 if(!m_usingNetworkTester)
420 success = handleLlsc(address, request);
421
422 if (request->m_type == RubyRequestType_Locked_RMW_Read) {
423 m_controller->blockOnQueue(address, m_mandatory_q_ptr);
424 } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
425 m_controller->unblock(address);
426 }
427
428 hitCallback(request, mach, data, success,
429 initialRequestTime, forwardRequestTime, firstResponseTime);
430}
431
432void

--- 23 unchanged lines hidden (view full) ---

456
457 RequestTable::iterator i = m_readRequestTable.find(address);
458 assert(i != m_readRequestTable.end());
459 SequencerRequest* request = i->second;
460
461 m_readRequestTable.erase(i);
462 markRemoved();
463
464 assert((request->m_type == RubyRequestType_LD) ||
465 (request->m_type == RubyRequestType_IFETCH));
466
467 hitCallback(request, mach, data, true,
468 initialRequestTime, forwardRequestTime, firstResponseTime);
469}
470
471void
472Sequencer::hitCallback(SequencerRequest* srequest,
473 GenericMachineType mach,
474 DataBlock& data,
475 bool success,
476 Time initialRequestTime,
477 Time forwardRequestTime,
478 Time firstResponseTime)
479{
480 PacketPtr pkt = srequest->pkt;
481 Address request_address(pkt->getAddr());
482 Address request_line_address(pkt->getAddr());
483 request_line_address.makeLineAddress();
484 RubyRequestType type = srequest->m_type;
485 Time issued_time = srequest->issue_time;
486
487 // Set this cache entry to the most recently used
488 if (type == RubyRequestType_IFETCH) {
489 if (m_instCache_ptr->isTagPresent(request_line_address))
490 m_instCache_ptr->setMRU(request_line_address);
491 } else {
492 if (m_dataCache_ptr->isTagPresent(request_line_address))

--- 21 unchanged lines hidden (view full) ---

514 forwardRequestTime,
515 firstResponseTime,
516 g_eventQueue_ptr->getTime());
517 }
518
519 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
520 curTick(), m_version, "Seq",
521 success ? "Done" : "SC_Failed", "", "",
522 request_address, miss_latency);
523 }
524
525 // update the data
526 if (pkt->getPtr<uint8_t>(true) != NULL) {
527 if ((type == RubyRequestType_LD) ||
528 (type == RubyRequestType_IFETCH) ||
529 (type == RubyRequestType_RMW_Read) ||
530 (type == RubyRequestType_Locked_RMW_Read) ||
531 (type == RubyRequestType_Load_Linked)) {
532 memcpy(pkt->getPtr<uint8_t>(true),
533 data.getData(request_address.getOffset(), pkt->getSize()),
534 pkt->getSize());
535 } else {
536 data.setData(pkt->getPtr<uint8_t>(true),
537 request_address.getOffset(), pkt->getSize());
538 }
539 } else {
540 DPRINTF(MemoryAccess,
541 "WARNING. Data not transfered from Ruby to M5 for type %s\n",
542 RubyRequestType_to_string(type));
543 }
544
545 // If using the RubyTester, update the RubyTester sender state's
546 // subBlock with the recieved data. The tester will later access
547 // this state.
548 // Note: RubyPort will access it's sender state before the
549 // RubyTester.
550 if (m_usingRubyTester) {
551 RubyPort::SenderState *requestSenderState =
552 safe_cast(pkt->senderState);
553 RubyTester::SenderState* testerSenderState =
554 safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
555 testerSenderState->subBlock->mergeFrom(data);
556 }
557
558 ruby_hit_callback(pkt);
559 delete srequest;
560}
561
562bool
563Sequencer::empty() const
564{
565 return m_writeRequestTable.empty() && m_readRequestTable.empty();
566}
567
568RequestStatus
569Sequencer::makeRequest(PacketPtr pkt)
570{
571 if (m_outstanding_count >= m_max_outstanding_requests) {
572 return RequestStatus_BufferFull;
573 }
574
575 RubyRequestType primary_type = RubyRequestType_NULL;
576 RubyRequestType secondary_type = RubyRequestType_NULL;
577
578 if (pkt->isLLSC()) {
579 //
580 // Alpha LL/SC instructions need to be handled carefully by the cache
581 // coherence protocol to ensure they follow the proper semantics. In
582 // particular, by identifying the operations as atomic, the protocol
583 // should understand that migratory sharing optimizations should not
584 // be performed (i.e. a load between the LL and SC should not steal
585 // away exclusive permission).
586 //
587 if (pkt->isWrite()) {
588 DPRINTF(RubySequencer, "Issuing SC\n");
589 primary_type = RubyRequestType_Store_Conditional;
590 } else {
591 DPRINTF(RubySequencer, "Issuing LL\n");
592 assert(pkt->isRead());
593 primary_type = RubyRequestType_Load_Linked;
594 }
595 secondary_type = RubyRequestType_ATOMIC;
596 } else if (pkt->req->isLocked()) {
597 //
598 // x86 locked instructions are translated to store cache coherence
599 // requests because these requests should always be treated as read
600 // exclusive operations and should leverage any migratory sharing
601 // optimization built into the protocol.
602 //
603 if (pkt->isWrite()) {
604 DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
605 primary_type = RubyRequestType_Locked_RMW_Write;
606 } else {
607 DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
608 assert(pkt->isRead());
609 primary_type = RubyRequestType_Locked_RMW_Read;
610 }
611 secondary_type = RubyRequestType_ST;
612 } else {
613 if (pkt->isRead()) {
614 if (pkt->req->isInstFetch()) {
615 primary_type = secondary_type = RubyRequestType_IFETCH;
616 } else {
617#if THE_ISA == X86_ISA
618 uint32_t flags = pkt->req->getFlags();
619 bool storeCheck = flags &
620 (TheISA::StoreCheck << TheISA::FlagShift);
621#else
622 bool storeCheck = false;
623#endif // X86_ISA
624 if (storeCheck) {
625 primary_type = RubyRequestType_RMW_Read;
626 secondary_type = RubyRequestType_ST;
627 } else {
628 primary_type = secondary_type = RubyRequestType_LD;
629 }
630 }
631 } else if (pkt->isWrite()) {
632 //
633 // Note: M5 packets do not differentiate ST from RMW_Write
634 //
635 primary_type = secondary_type = RubyRequestType_ST;
636 } else if (pkt->isFlush()) {
637 primary_type = secondary_type = RubyRequestType_FLUSH;
638 } else {
639 panic("Unsupported ruby packet type\n");
640 }
641 }
642
643 RequestStatus status = insertRequest(pkt, primary_type);
644 if (status != RequestStatus_Ready)
645 return status;
646
647 issueRequest(pkt, secondary_type);
648
649 // TODO: issue hardware prefetches here
650 return RequestStatus_Issued;
651}
652
653void
654Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
655{
656 int proc_id = -1;
657 if (pkt != NULL && pkt->req->hasContextId()) {
658 proc_id = pkt->req->contextId();
659 }
660
661 // If valid, copy the pc to the ruby request
662 Addr pc = 0;
663 if (pkt->req->hasPC()) {
664 pc = pkt->req->getPC();
665 }
666
667 RubyRequest *msg = new RubyRequest(pkt->getAddr(),
668 pkt->getPtr<uint8_t>(true),
669 pkt->getSize(), pc, secondary_type,
670 RubyAccessMode_Supervisor, pkt,
671 PrefetchBit_No, proc_id);
672
673 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
674 curTick(), m_version, "Seq", "Begin", "", "",
675 msg->getPhysicalAddress(),
676 RubyRequestType_to_string(secondary_type));
677
678 Time latency = 0; // initialzed to an null value
679
680 if (secondary_type == RubyRequestType_IFETCH)
681 latency = m_instCache_ptr->getLatency();
682 else
683 latency = m_dataCache_ptr->getLatency();
684
685 // Send the message to the cache controller
686 assert(latency > 0);
687
688 assert(m_mandatory_q_ptr != NULL);

--- 38 unchanged lines hidden ---