30a31,34
> #include "config/the_isa.hh"
> #if THE_ISA == X86_ISA
> #include "arch/x86/insts/microldstop.hh"
> #endif // X86_ISA
33a38,40
> #include "debug/RubySequencer.hh"
> #include "mem/protocol/PrefetchBit.hh"
> #include "mem/protocol/RubyAccessMode.hh"
39d45
< #include "mem/ruby/slicc_interface/AbstractController.hh"
65d70
< m_max_outstanding_requests = 0;
106c111
< request->ruby_request.m_PhysicalAddress, m_readRequestTable.size(),
---
> Address(request->pkt->getAddr()), m_readRequestTable.size(),
121c126
< request->ruby_request.m_PhysicalAddress, m_writeRequestTable.size(),
---
> Address(request->pkt->getAddr()), m_writeRequestTable.size(),
216,217c221,222
< bool
< Sequencer::insertRequest(SequencerRequest* request)
---
> RequestStatus
> Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
229c234
< Address line_addr(request->ruby_request.m_PhysicalAddress);
---
> Address line_addr(pkt->getAddr());
231,239c236,251
< if ((request->ruby_request.m_Type == RubyRequestType_ST) ||
< (request->ruby_request.m_Type == RubyRequestType_ATOMIC) ||
< (request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
< (request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
< (request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
< (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
< (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
< (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) ||
< (request->ruby_request.m_Type == RubyRequestType_FLUSH)) {
---
> if ((request_type == RubyRequestType_ST) ||
> (request_type == RubyRequestType_RMW_Read) ||
> (request_type == RubyRequestType_RMW_Write) ||
> (request_type == RubyRequestType_Load_Linked) ||
> (request_type == RubyRequestType_Store_Conditional) ||
> (request_type == RubyRequestType_Locked_RMW_Read) ||
> (request_type == RubyRequestType_Locked_RMW_Write) ||
> (request_type == RubyRequestType_FLUSH)) {
>
> // Check if there is any outstanding read request for the same
> // cache line.
> if (m_readRequestTable.count(line_addr) > 0) {
> m_store_waiting_on_load_cycles++;
> return RequestStatus_Aliased;
> }
>
242,249c254,262
< bool success = r.second;
< RequestTable::iterator i = r.first;
< if (!success) {
< i->second = request;
< // return true;
<
< // drh5: isn't this an error? do you lose the initial request?
< assert(0);
---
> if (r.second) {
> RequestTable::iterator i = r.first;
> i->second = new SequencerRequest(pkt, request_type,
> g_eventQueue_ptr->getTime());
> m_outstanding_count++;
> } else {
> // There is an outstanding write request for the cache line
> m_store_waiting_on_store_cycles++;
> return RequestStatus_Aliased;
251,252d263
< i->second = request;
< m_outstanding_count++;
253a265,271
> // Check if there is any outstanding write request for the same
> // cache line.
> if (m_writeRequestTable.count(line_addr) > 0) {
> m_load_waiting_on_store_cycles++;
> return RequestStatus_Aliased;
> }
>
256,260d273
< bool success = r.second;
< RequestTable::iterator i = r.first;
< if (!success) {
< i->second = request;
< // return true;
262,263c275,283
< // drh5: isn't this an error? do you lose the initial request?
< assert(0);
---
> if (r.second) {
> RequestTable::iterator i = r.first;
> i->second = new SequencerRequest(pkt, request_type,
> g_eventQueue_ptr->getTime());
> m_outstanding_count++;
> } else {
> // There is an outstanding read request for the cache line
> m_load_waiting_on_load_cycles++;
> return RequestStatus_Aliased;
265,266d284
< i->second = request;
< m_outstanding_count++;
270d287
<
274c291
< return false;
---
> return RequestStatus_Ready;
291,292c308
< const RubyRequest & ruby_request = srequest->ruby_request;
< Address line_addr(ruby_request.m_PhysicalAddress);
---
> Address line_addr(srequest->pkt->getAddr());
294,300c310,316
< if ((ruby_request.m_Type == RubyRequestType_ST) ||
< (ruby_request.m_Type == RubyRequestType_RMW_Read) ||
< (ruby_request.m_Type == RubyRequestType_RMW_Write) ||
< (ruby_request.m_Type == RubyRequestType_Load_Linked) ||
< (ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
< (ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
< (ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) {
---
> if ((srequest->m_type == RubyRequestType_ST) ||
> (srequest->m_type == RubyRequestType_RMW_Read) ||
> (srequest->m_type == RubyRequestType_RMW_Write) ||
> (srequest->m_type == RubyRequestType_Load_Linked) ||
> (srequest->m_type == RubyRequestType_Store_Conditional) ||
> (srequest->m_type == RubyRequestType_Locked_RMW_Read) ||
> (srequest->m_type == RubyRequestType_Locked_RMW_Write)) {
318c334
< if (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) {
---
> if (request->m_type == RubyRequestType_Store_Conditional) {
324c340
< request->ruby_request.pkt->req->setExtraData(0);
---
> request->pkt->req->setExtraData(0);
331c347
< request->ruby_request.pkt->req->setExtraData(1);
---
> request->pkt->req->setExtraData(1);
337c353
< } else if (request->ruby_request.m_Type == RubyRequestType_Load_Linked) {
---
> } else if (request->m_type == RubyRequestType_Load_Linked) {
343c359,360
< } else if ((m_dataCache_ptr->isTagPresent(address)) && (m_dataCache_ptr->isLocked(address, m_version))) {
---
> } else if ((m_dataCache_ptr->isTagPresent(address)) &&
> (m_dataCache_ptr->isLocked(address, m_version))) {
384,392c401,409
< assert((request->ruby_request.m_Type == RubyRequestType_ST) ||
< (request->ruby_request.m_Type == RubyRequestType_ATOMIC) ||
< (request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
< (request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
< (request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
< (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
< (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
< (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) ||
< (request->ruby_request.m_Type == RubyRequestType_FLUSH));
---
> assert((request->m_type == RubyRequestType_ST) ||
> (request->m_type == RubyRequestType_ATOMIC) ||
> (request->m_type == RubyRequestType_RMW_Read) ||
> (request->m_type == RubyRequestType_RMW_Write) ||
> (request->m_type == RubyRequestType_Load_Linked) ||
> (request->m_type == RubyRequestType_Store_Conditional) ||
> (request->m_type == RubyRequestType_Locked_RMW_Read) ||
> (request->m_type == RubyRequestType_Locked_RMW_Write) ||
> (request->m_type == RubyRequestType_FLUSH));
405c422
< if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) {
---
> if (request->m_type == RubyRequestType_Locked_RMW_Read) {
407c424
< } else if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) {
---
> } else if (request->m_type == RubyRequestType_Locked_RMW_Write) {
447,448c464,465
< assert((request->ruby_request.m_Type == RubyRequestType_LD) ||
< (request->ruby_request.m_Type == RubyRequestType_IFETCH));
---
> assert((request->m_type == RubyRequestType_LD) ||
> (request->m_type == RubyRequestType_IFETCH));
463,465c480,482
< const RubyRequest & ruby_request = srequest->ruby_request;
< Address request_address(ruby_request.m_PhysicalAddress);
< Address request_line_address(ruby_request.m_PhysicalAddress);
---
> PacketPtr pkt = srequest->pkt;
> Address request_address(pkt->getAddr());
> Address request_line_address(pkt->getAddr());
467c484
< RubyRequestType type = ruby_request.m_Type;
---
> RubyRequestType type = srequest->m_type;
505c522
< ruby_request.m_PhysicalAddress, miss_latency);
---
> request_address, miss_latency);
509c526
< if (ruby_request.data != NULL) {
---
> if (pkt->getPtr<uint8_t>(true) != NULL) {
515,517c532,534
< memcpy(ruby_request.data,
< data.getData(request_address.getOffset(), ruby_request.m_Size),
< ruby_request.m_Size);
---
> memcpy(pkt->getPtr<uint8_t>(true),
> data.getData(request_address.getOffset(), pkt->getSize()),
> pkt->getSize());
519,520c536,537
< data.setData(ruby_request.data, request_address.getOffset(),
< ruby_request.m_Size);
---
> data.setData(pkt->getPtr<uint8_t>(true),
> request_address.getOffset(), pkt->getSize());
535c552
< safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState);
---
> safe_cast<RubyPort::SenderState*>(pkt->senderState);
541c558
< ruby_hit_callback(ruby_request.pkt);
---
> ruby_hit_callback(pkt);
545,578d561
< // Returns true if the sequencer already has a load or store outstanding
< RequestStatus
< Sequencer::getRequestStatus(const RubyRequest& request)
< {
< bool is_outstanding_store =
< !!m_writeRequestTable.count(line_address(request.m_PhysicalAddress));
< bool is_outstanding_load =
< !!m_readRequestTable.count(line_address(request.m_PhysicalAddress));
< if (is_outstanding_store) {
< if ((request.m_Type == RubyRequestType_LD) ||
< (request.m_Type == RubyRequestType_IFETCH) ||
< (request.m_Type == RubyRequestType_RMW_Read)) {
< m_store_waiting_on_load_cycles++;
< } else {
< m_store_waiting_on_store_cycles++;
< }
< return RequestStatus_Aliased;
< } else if (is_outstanding_load) {
< if ((request.m_Type == RubyRequestType_ST) ||
< (request.m_Type == RubyRequestType_RMW_Write)) {
< m_load_waiting_on_store_cycles++;
< } else {
< m_load_waiting_on_load_cycles++;
< }
< return RequestStatus_Aliased;
< }
<
< if (m_outstanding_count >= m_max_outstanding_requests) {
< return RequestStatus_BufferFull;
< }
<
< return RequestStatus_Ready;
< }
<
586c569
< Sequencer::makeRequest(const RubyRequest &request)
---
> Sequencer::makeRequest(PacketPtr pkt)
588,592c571,573
< assert(request.m_PhysicalAddress.getOffset() + request.m_Size <=
< RubySystem::getBlockSizeBytes());
< RequestStatus status = getRequestStatus(request);
< if (status != RequestStatus_Ready)
< return status;
---
> if (m_outstanding_count >= m_max_outstanding_requests) {
> return RequestStatus_BufferFull;
> }
594,600c575,640
< SequencerRequest *srequest =
< new SequencerRequest(request, g_eventQueue_ptr->getTime());
< bool found = insertRequest(srequest);
< if (found) {
< panic("Sequencer::makeRequest should never be called if the "
< "request is already outstanding\n");
< return RequestStatus_NULL;
---
> RubyRequestType primary_type = RubyRequestType_NULL;
> RubyRequestType secondary_type = RubyRequestType_NULL;
>
> if (pkt->isLLSC()) {
> //
> // Alpha LL/SC instructions need to be handled carefully by the cache
> // coherence protocol to ensure they follow the proper semantics. In
> // particular, by identifying the operations as atomic, the protocol
> // should understand that migratory sharing optimizations should not
> // be performed (i.e. a load between the LL and SC should not steal
> // away exclusive permission).
> //
> if (pkt->isWrite()) {
> DPRINTF(RubySequencer, "Issuing SC\n");
> primary_type = RubyRequestType_Store_Conditional;
> } else {
> DPRINTF(RubySequencer, "Issuing LL\n");
> assert(pkt->isRead());
> primary_type = RubyRequestType_Load_Linked;
> }
> secondary_type = RubyRequestType_ATOMIC;
> } else if (pkt->req->isLocked()) {
> //
> // x86 locked instructions are translated to store cache coherence
> // requests because these requests should always be treated as read
> // exclusive operations and should leverage any migratory sharing
> // optimization built into the protocol.
> //
> if (pkt->isWrite()) {
> DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
> primary_type = RubyRequestType_Locked_RMW_Write;
> } else {
> DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
> assert(pkt->isRead());
> primary_type = RubyRequestType_Locked_RMW_Read;
> }
> secondary_type = RubyRequestType_ST;
> } else {
> if (pkt->isRead()) {
> if (pkt->req->isInstFetch()) {
> primary_type = secondary_type = RubyRequestType_IFETCH;
> } else {
> #if THE_ISA == X86_ISA
> uint32_t flags = pkt->req->getFlags();
> bool storeCheck = flags &
> (TheISA::StoreCheck << TheISA::FlagShift);
> #else
> bool storeCheck = false;
> #endif // X86_ISA
> if (storeCheck) {
> primary_type = RubyRequestType_RMW_Read;
> secondary_type = RubyRequestType_ST;
> } else {
> primary_type = secondary_type = RubyRequestType_LD;
> }
> }
> } else if (pkt->isWrite()) {
> //
> // Note: M5 packets do not differentiate ST from RMW_Write
> //
> primary_type = secondary_type = RubyRequestType_ST;
> } else if (pkt->isFlush()) {
> primary_type = secondary_type = RubyRequestType_FLUSH;
> } else {
> panic("Unsupported ruby packet type\n");
> }
603c643,645
< issueRequest(request);
---
> RequestStatus status = insertRequest(pkt, primary_type);
> if (status != RequestStatus_Ready)
> return status;
604a647,648
> issueRequest(pkt, secondary_type);
>
610c654
< Sequencer::issueRequest(const RubyRequest& request)
---
> Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
612,652c656,658
< // TODO: Eliminate RubyRequest being copied again.
<
< RubyRequestType ctype = RubyRequestType_NUM;
< switch(request.m_Type) {
< case RubyRequestType_IFETCH:
< ctype = RubyRequestType_IFETCH;
< break;
< case RubyRequestType_LD:
< ctype = RubyRequestType_LD;
< break;
< case RubyRequestType_FLUSH:
< ctype = RubyRequestType_FLUSH;
< break;
< case RubyRequestType_ST:
< case RubyRequestType_RMW_Read:
< case RubyRequestType_RMW_Write:
< //
< // x86 locked instructions are translated to store cache coherence
< // requests because these requests should always be treated as read
< // exclusive operations and should leverage any migratory sharing
< // optimization built into the protocol.
< //
< case RubyRequestType_Locked_RMW_Read:
< case RubyRequestType_Locked_RMW_Write:
< ctype = RubyRequestType_ST;
< break;
< //
< // Alpha LL/SC instructions need to be handled carefully by the cache
< // coherence protocol to ensure they follow the proper semantics. In
< // particular, by identifying the operations as atomic, the protocol
< // should understand that migratory sharing optimizations should not be
< // performed (i.e. a load between the LL and SC should not steal away
< // exclusive permission).
< //
< case RubyRequestType_Load_Linked:
< case RubyRequestType_Store_Conditional:
< case RubyRequestType_ATOMIC:
< ctype = RubyRequestType_ATOMIC;
< break;
< default:
< assert(0);
---
> int proc_id = -1;
> if (pkt != NULL && pkt->req->hasContextId()) {
> proc_id = pkt->req->contextId();
655,667c661,664
< RubyAccessMode amtype = RubyAccessMode_NUM;
< switch(request.m_AccessMode){
< case RubyAccessMode_User:
< amtype = RubyAccessMode_User;
< break;
< case RubyAccessMode_Supervisor:
< amtype = RubyAccessMode_Supervisor;
< break;
< case RubyAccessMode_Device:
< amtype = RubyAccessMode_User;
< break;
< default:
< assert(0);
---
> // If valid, copy the pc to the ruby request
> Addr pc = 0;
> if (pkt->req->hasPC()) {
> pc = pkt->req->getPC();
670,679c667,670
< Address line_addr(request.m_PhysicalAddress);
< line_addr.makeLineAddress();
< int proc_id = -1;
< if (request.pkt != NULL && request.pkt->req->hasContextId()) {
< proc_id = request.pkt->req->contextId();
< }
< RubyRequest *msg = new RubyRequest(request.m_PhysicalAddress.getAddress(),
< request.data, request.m_Size,
< request.m_ProgramCounter.getAddress(),
< ctype, amtype, request.pkt,
---
> RubyRequest *msg = new RubyRequest(pkt->getAddr(),
> pkt->getPtr<uint8_t>(true),
> pkt->getSize(), pc, secondary_type,
> RubyAccessMode_Supervisor, pkt,
684c675,676
< request.m_PhysicalAddress, RubyRequestType_to_string(request.m_Type));
---
> msg->getPhysicalAddress(),
> RubyRequestType_to_string(secondary_type));
688c680
< if (request.m_Type == RubyRequestType_IFETCH)
---
> if (secondary_type == RubyRequestType_IFETCH)