112608Sjason@lowepower.com/* 212608Sjason@lowepower.com * Copyright (c) 2017 Jason Lowe-Power 312608Sjason@lowepower.com * All rights reserved. 412608Sjason@lowepower.com * 512608Sjason@lowepower.com * Redistribution and use in source and binary forms, with or without 612608Sjason@lowepower.com * modification, are permitted provided that the following conditions are 712608Sjason@lowepower.com * met: redistributions of source code must retain the above copyright 812608Sjason@lowepower.com * notice, this list of conditions and the following disclaimer; 912608Sjason@lowepower.com * redistributions in binary form must reproduce the above copyright 1012608Sjason@lowepower.com * notice, this list of conditions and the following disclaimer in the 1112608Sjason@lowepower.com * documentation and/or other materials provided with the distribution; 1212608Sjason@lowepower.com * neither the name of the copyright holders nor the names of its 1312608Sjason@lowepower.com * contributors may be used to endorse or promote products derived from 1412608Sjason@lowepower.com * this software without specific prior written permission. 1512608Sjason@lowepower.com * 1612608Sjason@lowepower.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1712608Sjason@lowepower.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 1812608Sjason@lowepower.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 1912608Sjason@lowepower.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 2012608Sjason@lowepower.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 2112608Sjason@lowepower.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 2212608Sjason@lowepower.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2312608Sjason@lowepower.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2412608Sjason@lowepower.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2512608Sjason@lowepower.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 2612608Sjason@lowepower.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2712608Sjason@lowepower.com */ 2812608Sjason@lowepower.com 2912608Sjason@lowepower.com/** 3012608Sjason@lowepower.com * This file contains the directory controller of a simple example MSI protocol 3112608Sjason@lowepower.com * 3212608Sjason@lowepower.com * In Ruby the directory controller both contains the directory coherence state 3312608Sjason@lowepower.com * but also functions as the memory controller in many ways. There are states 3412608Sjason@lowepower.com * in the directory that are both memory-centric and cache-centric. Be careful! 3512608Sjason@lowepower.com * 3612608Sjason@lowepower.com * The protocol in this file is based off of the MSI protocol found in 3712608Sjason@lowepower.com * A Primer on Memory Consistency and Cache Coherence 3812608Sjason@lowepower.com * Daniel J. Sorin, Mark D. Hill, and David A. Wood 3912608Sjason@lowepower.com * Synthesis Lectures on Computer Architecture 2011 6:3, 141-149 4012608Sjason@lowepower.com * 4112608Sjason@lowepower.com * Table 8.2 contains the transitions and actions found in this file and 4212608Sjason@lowepower.com * section 8.2.4 explains the protocol in detail. 4312608Sjason@lowepower.com * 4412608Sjason@lowepower.com * See Learning gem5 Part 3: Ruby for more details. 4512608Sjason@lowepower.com * 4612608Sjason@lowepower.com * Authors: Jason Lowe-Power 4712608Sjason@lowepower.com */ 4812608Sjason@lowepower.com 4912608Sjason@lowepower.commachine(MachineType:Directory, "Directory protocol") 5012608Sjason@lowepower.com : 5112608Sjason@lowepower.com // This "DirectoryMemory" is a little weird. It is initially allocated 5212608Sjason@lowepower.com // so that it *can* cover all of memory (i.e., there are pointers for 5312608Sjason@lowepower.com // every 64-byte block in memory). However, the entries are lazily 5412608Sjason@lowepower.com // created in getDirEntry() 5512608Sjason@lowepower.com DirectoryMemory * directory; 5612608Sjason@lowepower.com // You can put any parameters you want here. They will be exported as 5712608Sjason@lowepower.com // normal SimObject parameters (like in the SimObject description file) 5812608Sjason@lowepower.com // and you can set these parameters at runtime via the python config 5912608Sjason@lowepower.com // file. If there is no default here (like directory), it is mandatory 6012608Sjason@lowepower.com // to set the parameter in the python config. Otherwise, it uses the 6112608Sjason@lowepower.com // default value set here. 6212608Sjason@lowepower.com Cycles toMemLatency := 1; 6312608Sjason@lowepower.com 6412608Sjason@lowepower.com // Forwarding requests from the directory *to* the caches. 6512608Sjason@lowepower.com MessageBuffer *forwardToCache, network="To", virtual_network="1", 6612608Sjason@lowepower.com vnet_type="forward"; 6712608Sjason@lowepower.com // Response from the directory *to* the cache. 6812608Sjason@lowepower.com MessageBuffer *responseToCache, network="To", virtual_network="2", 6912608Sjason@lowepower.com vnet_type="response"; 7012608Sjason@lowepower.com 7112608Sjason@lowepower.com // Requests *from* the cache to the directory 7212608Sjason@lowepower.com MessageBuffer *requestFromCache, network="From", virtual_network="0", 7312608Sjason@lowepower.com vnet_type="request"; 7412608Sjason@lowepower.com 7512608Sjason@lowepower.com // Responses *from* the cache to the directory 7612608Sjason@lowepower.com MessageBuffer *responseFromCache, network="From", virtual_network="2", 7712608Sjason@lowepower.com vnet_type="response"; 7812608Sjason@lowepower.com 7912608Sjason@lowepower.com // Special buffer for memory responses. Kind of like the mandatory queue 8012608Sjason@lowepower.com MessageBuffer *responseFromMemory; 8112608Sjason@lowepower.com 8212608Sjason@lowepower.com{ 8312608Sjason@lowepower.com // For many things in SLICC you can specify a default. However, this 8412608Sjason@lowepower.com // default must use the C++ name (mangled SLICC name). For the state below 8512608Sjason@lowepower.com // you have to use the controller name and the name we use for states. 8612608Sjason@lowepower.com state_declaration(State, desc="Directory states", 8712608Sjason@lowepower.com default="Directory_State_I") { 8812608Sjason@lowepower.com // Stable states. 8912608Sjason@lowepower.com // NOTE: Thise are "cache-centric" states like in Sorin et al. 9012608Sjason@lowepower.com // However, The access permissions are memory-centric. 9112608Sjason@lowepower.com I, AccessPermission:Read_Write, desc="Invalid in the caches."; 9212608Sjason@lowepower.com S, AccessPermission:Read_Only, desc="At least one cache has the blk"; 9312608Sjason@lowepower.com M, AccessPermission:Invalid, desc="A cache has the block in M"; 9412608Sjason@lowepower.com 9512608Sjason@lowepower.com // Transient states 9612608Sjason@lowepower.com S_D, AccessPermission:Busy, desc="Moving to S, but need data"; 9712608Sjason@lowepower.com 9812608Sjason@lowepower.com // Waiting for data from memory 9912608Sjason@lowepower.com S_m, AccessPermission:Read_Write, desc="In S waiting for mem"; 10012608Sjason@lowepower.com M_m, AccessPermission:Read_Write, desc="Moving to M waiting for mem"; 10112608Sjason@lowepower.com 10212608Sjason@lowepower.com // Waiting for write-ack from memory 10312608Sjason@lowepower.com MI_m, AccessPermission:Busy, desc="Moving to I waiting for ack"; 10412608Sjason@lowepower.com SS_m, AccessPermission:Busy, desc="Moving to S waiting for ack"; 10512608Sjason@lowepower.com } 10612608Sjason@lowepower.com 10712608Sjason@lowepower.com enumeration(Event, desc="Directory events") { 10812608Sjason@lowepower.com // Data requests from the cache 10912608Sjason@lowepower.com GetS, desc="Request for read-only data from cache"; 11012608Sjason@lowepower.com GetM, desc="Request for read-write data from cache"; 11112608Sjason@lowepower.com 11212608Sjason@lowepower.com // Writeback requests from the cache 11312608Sjason@lowepower.com PutSNotLast, desc="PutS and the block has other sharers"; 11412608Sjason@lowepower.com PutSLast, desc="PutS and the block has no other sharers"; 11512608Sjason@lowepower.com PutMOwner, desc="Dirty data writeback from the owner"; 11612608Sjason@lowepower.com PutMNonOwner, desc="Dirty data writeback from non-owner"; 11712608Sjason@lowepower.com 11812608Sjason@lowepower.com // Cache responses 11912608Sjason@lowepower.com Data, desc="Response to fwd request with data"; 12012608Sjason@lowepower.com 12112608Sjason@lowepower.com // From Memory 12212608Sjason@lowepower.com MemData, desc="Data from memory"; 12312608Sjason@lowepower.com MemAck, desc="Ack from memory that write is complete"; 12412608Sjason@lowepower.com } 12512608Sjason@lowepower.com 12612608Sjason@lowepower.com // NOTE: We use a netdest for the sharers and the owner so we can simply 12712608Sjason@lowepower.com // copy the structure into the message we send as a response. 12812608Sjason@lowepower.com structure(Entry, desc="...", interface="AbstractEntry") { 12912608Sjason@lowepower.com State DirState, desc="Directory state"; 13012608Sjason@lowepower.com NetDest Sharers, desc="Sharers for this block"; 13112608Sjason@lowepower.com NetDest Owner, desc="Owner of this block"; 13212608Sjason@lowepower.com } 13312608Sjason@lowepower.com 13412608Sjason@lowepower.com Tick clockEdge(); 13512608Sjason@lowepower.com 13612608Sjason@lowepower.com // This either returns the valid directory entry, or, if it hasn't been 13712608Sjason@lowepower.com // allocated yet, this allocates the entry. This may save some host memory 13812608Sjason@lowepower.com // since this is lazily populated. 13912608Sjason@lowepower.com Entry getDirectoryEntry(Addr addr), return_by_pointer = "yes" { 14012608Sjason@lowepower.com Entry dir_entry := static_cast(Entry, "pointer", directory[addr]); 14112608Sjason@lowepower.com if (is_invalid(dir_entry)) { 14212608Sjason@lowepower.com // This first time we see this address allocate an entry for it. 14312608Sjason@lowepower.com dir_entry := static_cast(Entry, "pointer", 14412608Sjason@lowepower.com directory.allocate(addr, new Entry)); 14512608Sjason@lowepower.com } 14612608Sjason@lowepower.com return dir_entry; 14712608Sjason@lowepower.com } 14812608Sjason@lowepower.com 14912608Sjason@lowepower.com /*************************************************************************/ 15012608Sjason@lowepower.com // Functions that we need to define/override to use our specific structures 15112608Sjason@lowepower.com // in this implementation. 15212608Sjason@lowepower.com // NOTE: we don't have TBE in this machine, so we don't need to pass it 15312608Sjason@lowepower.com // to these overridden functions. 15412608Sjason@lowepower.com 15512608Sjason@lowepower.com State getState(Addr addr) { 15612608Sjason@lowepower.com if (directory.isPresent(addr)) { 15712608Sjason@lowepower.com return getDirectoryEntry(addr).DirState; 15812608Sjason@lowepower.com } else { 15912608Sjason@lowepower.com return State:I; 16012608Sjason@lowepower.com } 16112608Sjason@lowepower.com } 16212608Sjason@lowepower.com 16312608Sjason@lowepower.com void setState(Addr addr, State state) { 16412608Sjason@lowepower.com if (directory.isPresent(addr)) { 16512608Sjason@lowepower.com if (state == State:M) { 16612608Sjason@lowepower.com DPRINTF(RubySlicc, "Owner %s\n", getDirectoryEntry(addr).Owner); 16712608Sjason@lowepower.com assert(getDirectoryEntry(addr).Owner.count() == 1); 16812608Sjason@lowepower.com assert(getDirectoryEntry(addr).Sharers.count() == 0); 16912608Sjason@lowepower.com } 17012608Sjason@lowepower.com getDirectoryEntry(addr).DirState := state; 17112608Sjason@lowepower.com if (state == State:I) { 17212608Sjason@lowepower.com assert(getDirectoryEntry(addr).Owner.count() == 0); 17312608Sjason@lowepower.com assert(getDirectoryEntry(addr).Sharers.count() == 0); 17412608Sjason@lowepower.com } 17512608Sjason@lowepower.com } 17612608Sjason@lowepower.com } 17712608Sjason@lowepower.com 17812608Sjason@lowepower.com // This is really the access permissions of memory. 17912608Sjason@lowepower.com // TODO: I don't understand this at the directory. 18012608Sjason@lowepower.com AccessPermission getAccessPermission(Addr addr) { 18112608Sjason@lowepower.com if (directory.isPresent(addr)) { 18212608Sjason@lowepower.com Entry e := getDirectoryEntry(addr); 18312608Sjason@lowepower.com return Directory_State_to_permission(e.DirState); 18412608Sjason@lowepower.com } else { 18512608Sjason@lowepower.com return AccessPermission:NotPresent; 18612608Sjason@lowepower.com } 18712608Sjason@lowepower.com } 18812608Sjason@lowepower.com void setAccessPermission(Addr addr, State state) { 18912608Sjason@lowepower.com if (directory.isPresent(addr)) { 19012608Sjason@lowepower.com Entry e := getDirectoryEntry(addr); 19112608Sjason@lowepower.com e.changePermission(Directory_State_to_permission(state)); 19212608Sjason@lowepower.com } 19312608Sjason@lowepower.com } 19412608Sjason@lowepower.com 19512608Sjason@lowepower.com void functionalRead(Addr addr, Packet *pkt) { 19612608Sjason@lowepower.com functionalMemoryRead(pkt); 19712608Sjason@lowepower.com } 19812608Sjason@lowepower.com 19912608Sjason@lowepower.com // This returns the number of writes. So, if we write then return 1 20012608Sjason@lowepower.com int functionalWrite(Addr addr, Packet *pkt) { 20112608Sjason@lowepower.com if (functionalMemoryWrite(pkt)) { 20212608Sjason@lowepower.com return 1; 20312608Sjason@lowepower.com } else { 20412608Sjason@lowepower.com return 0; 20512608Sjason@lowepower.com } 20612608Sjason@lowepower.com } 20712608Sjason@lowepower.com 20812608Sjason@lowepower.com 20912608Sjason@lowepower.com /*************************************************************************/ 21012608Sjason@lowepower.com // Network ports 21112608Sjason@lowepower.com 21212608Sjason@lowepower.com out_port(forward_out, RequestMsg, forwardToCache); 21312608Sjason@lowepower.com out_port(response_out, ResponseMsg, responseToCache); 21412608Sjason@lowepower.com 21512608Sjason@lowepower.com in_port(memQueue_in, MemoryMsg, responseFromMemory) { 21612608Sjason@lowepower.com if (memQueue_in.isReady(clockEdge())) { 21712608Sjason@lowepower.com peek(memQueue_in, MemoryMsg) { 21812608Sjason@lowepower.com if (in_msg.Type == MemoryRequestType:MEMORY_READ) { 21912608Sjason@lowepower.com trigger(Event:MemData, in_msg.addr); 22012608Sjason@lowepower.com } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) { 22112608Sjason@lowepower.com trigger(Event:MemAck, in_msg.addr); 22212608Sjason@lowepower.com } else { 22312608Sjason@lowepower.com error("Invalid message"); 22412608Sjason@lowepower.com } 22512608Sjason@lowepower.com } 22612608Sjason@lowepower.com } 22712608Sjason@lowepower.com } 22812608Sjason@lowepower.com 22912608Sjason@lowepower.com in_port(response_in, ResponseMsg, responseFromCache) { 23012608Sjason@lowepower.com if (response_in.isReady(clockEdge())) { 23112608Sjason@lowepower.com peek(response_in, ResponseMsg) { 23212608Sjason@lowepower.com if (in_msg.Type == CoherenceResponseType:Data) { 23312608Sjason@lowepower.com trigger(Event:Data, in_msg.addr); 23412608Sjason@lowepower.com } else { 23512608Sjason@lowepower.com error("Unexpected message type."); 23612608Sjason@lowepower.com } 23712608Sjason@lowepower.com } 23812608Sjason@lowepower.com } 23912608Sjason@lowepower.com } 24012608Sjason@lowepower.com 24112608Sjason@lowepower.com in_port(request_in, RequestMsg, requestFromCache) { 24212608Sjason@lowepower.com if (request_in.isReady(clockEdge())) { 24312608Sjason@lowepower.com peek(request_in, RequestMsg) { 24412608Sjason@lowepower.com Entry entry := getDirectoryEntry(in_msg.addr); 24512608Sjason@lowepower.com if (in_msg.Type == CoherenceRequestType:GetS) { 24612608Sjason@lowepower.com // NOTE: Since we don't have a TBE in this machine, there 24712608Sjason@lowepower.com // is no need to pass a TBE into trigger. Also, for the 24812608Sjason@lowepower.com // directory there is no cache entry. 24912608Sjason@lowepower.com trigger(Event:GetS, in_msg.addr); 25012608Sjason@lowepower.com } else if (in_msg.Type == CoherenceRequestType:GetM) { 25112608Sjason@lowepower.com trigger(Event:GetM, in_msg.addr); 25212608Sjason@lowepower.com } else if (in_msg.Type == CoherenceRequestType:PutS) { 25312608Sjason@lowepower.com assert(is_valid(entry)); 25412608Sjason@lowepower.com // If there is only a single sharer (i.e., the requestor) 25512608Sjason@lowepower.com if (entry.Sharers.count() == 1) { 25612608Sjason@lowepower.com assert(entry.Sharers.isElement(in_msg.Requestor)); 25712608Sjason@lowepower.com trigger(Event:PutSLast, in_msg.addr); 25812608Sjason@lowepower.com } else { 25912608Sjason@lowepower.com trigger(Event:PutSNotLast, in_msg.addr); 26012608Sjason@lowepower.com } 26112608Sjason@lowepower.com } else if (in_msg.Type == CoherenceRequestType:PutM) { 26212608Sjason@lowepower.com assert(is_valid(entry)); 26312608Sjason@lowepower.com if (entry.Owner.isElement(in_msg.Requestor)) { 26412608Sjason@lowepower.com trigger(Event:PutMOwner, in_msg.addr); 26512608Sjason@lowepower.com } else { 26612608Sjason@lowepower.com trigger(Event:PutMNonOwner, in_msg.addr); 26712608Sjason@lowepower.com } 26812608Sjason@lowepower.com } else { 26912608Sjason@lowepower.com error("Unexpected message type."); 27012608Sjason@lowepower.com } 27112608Sjason@lowepower.com } 27212608Sjason@lowepower.com } 27312608Sjason@lowepower.com } 27412608Sjason@lowepower.com 27512608Sjason@lowepower.com 27612608Sjason@lowepower.com 27712608Sjason@lowepower.com /*************************************************************************/ 27812608Sjason@lowepower.com // Actions 27912608Sjason@lowepower.com 28012608Sjason@lowepower.com // Memory actions. 28112608Sjason@lowepower.com 28212608Sjason@lowepower.com action(sendMemRead, "r", desc="Send a memory read request") { 28312608Sjason@lowepower.com peek(request_in, RequestMsg) { 28412608Sjason@lowepower.com // Special function from AbstractController that will send a new 28512608Sjason@lowepower.com // packet out of the "Ruby" black box to the memory side. At some 28612608Sjason@lowepower.com // point the response will be on the memory queue. 28712608Sjason@lowepower.com // Like enqeue, this takes a latency for the request. 28812608Sjason@lowepower.com queueMemoryRead(in_msg.Requestor, address, toMemLatency); 28912608Sjason@lowepower.com } 29012608Sjason@lowepower.com } 29112608Sjason@lowepower.com 29212608Sjason@lowepower.com action(sendDataToMem, "w", desc="Write data to memory") { 29312608Sjason@lowepower.com peek(request_in, RequestMsg) { 29412608Sjason@lowepower.com DPRINTF(RubySlicc, "Writing memory for %#x\n", address); 29512608Sjason@lowepower.com DPRINTF(RubySlicc, "Writing %s\n", in_msg.DataBlk); 29612608Sjason@lowepower.com queueMemoryWrite(in_msg.Requestor, address, toMemLatency, 29712608Sjason@lowepower.com in_msg.DataBlk); 29812608Sjason@lowepower.com } 29912608Sjason@lowepower.com } 30012608Sjason@lowepower.com 30112608Sjason@lowepower.com action(sendRespDataToMem, "rw", desc="Write data to memory from resp") { 30212608Sjason@lowepower.com peek(response_in, ResponseMsg) { 30312608Sjason@lowepower.com DPRINTF(RubySlicc, "Writing memory for %#x\n", address); 30412608Sjason@lowepower.com DPRINTF(RubySlicc, "Writing %s\n", in_msg.DataBlk); 30512608Sjason@lowepower.com queueMemoryWrite(in_msg.Sender, address, toMemLatency, 30612608Sjason@lowepower.com in_msg.DataBlk); 30712608Sjason@lowepower.com } 30812608Sjason@lowepower.com } 30912608Sjason@lowepower.com 31012608Sjason@lowepower.com // Sharer/owner actions 31112608Sjason@lowepower.com 31212608Sjason@lowepower.com action(addReqToSharers, "aS", desc="Add requestor to sharer list") { 31312608Sjason@lowepower.com peek(request_in, RequestMsg) { 31412608Sjason@lowepower.com getDirectoryEntry(address).Sharers.add(in_msg.Requestor); 31512608Sjason@lowepower.com } 31612608Sjason@lowepower.com } 31712608Sjason@lowepower.com 31812608Sjason@lowepower.com action(setOwner, "sO", desc="Set the owner") { 31912608Sjason@lowepower.com peek(request_in, RequestMsg) { 32012608Sjason@lowepower.com getDirectoryEntry(address).Owner.add(in_msg.Requestor); 32112608Sjason@lowepower.com } 32212608Sjason@lowepower.com } 32312608Sjason@lowepower.com 32412608Sjason@lowepower.com action(addOwnerToSharers, "oS", desc="Add the owner to sharers") { 32512608Sjason@lowepower.com Entry e := getDirectoryEntry(address); 32612608Sjason@lowepower.com assert(e.Owner.count() == 1); 32712608Sjason@lowepower.com e.Sharers.addNetDest(e.Owner); 32812608Sjason@lowepower.com } 32912608Sjason@lowepower.com 33012608Sjason@lowepower.com action(removeReqFromSharers, "rS", desc="Remove requestor from sharers") { 33112608Sjason@lowepower.com peek(request_in, RequestMsg) { 33212608Sjason@lowepower.com getDirectoryEntry(address).Sharers.remove(in_msg.Requestor); 33312608Sjason@lowepower.com } 33412608Sjason@lowepower.com } 33512608Sjason@lowepower.com 33612608Sjason@lowepower.com action(clearSharers, "cS", desc="Clear the sharer list") { 33712608Sjason@lowepower.com getDirectoryEntry(address).Sharers.clear(); 33812608Sjason@lowepower.com } 33912608Sjason@lowepower.com 34012608Sjason@lowepower.com action(clearOwner, "cO", desc="Clear the owner") { 34112608Sjason@lowepower.com getDirectoryEntry(address).Owner.clear(); 34212608Sjason@lowepower.com } 34312608Sjason@lowepower.com 34412608Sjason@lowepower.com // Invalidates and forwards 34512608Sjason@lowepower.com 34612608Sjason@lowepower.com action(sendInvToSharers, "i", desc="Send invalidate to all sharers") { 34712608Sjason@lowepower.com peek(request_in, RequestMsg) { 34812608Sjason@lowepower.com enqueue(forward_out, RequestMsg, 1) { 34912608Sjason@lowepower.com out_msg.addr := address; 35012608Sjason@lowepower.com out_msg.Type := CoherenceRequestType:Inv; 35112608Sjason@lowepower.com out_msg.Requestor := in_msg.Requestor; 35212608Sjason@lowepower.com out_msg.Destination := getDirectoryEntry(address).Sharers; 35312608Sjason@lowepower.com out_msg.MessageSize := MessageSizeType:Control; 35412608Sjason@lowepower.com } 35512608Sjason@lowepower.com } 35612608Sjason@lowepower.com } 35712608Sjason@lowepower.com 35812608Sjason@lowepower.com action(sendFwdGetS, "fS", desc="Send forward getS to owner") { 35912608Sjason@lowepower.com assert(getDirectoryEntry(address).Owner.count() == 1); 36012608Sjason@lowepower.com peek(request_in, RequestMsg) { 36112608Sjason@lowepower.com enqueue(forward_out, RequestMsg, 1) { 36212608Sjason@lowepower.com out_msg.addr := address; 36312608Sjason@lowepower.com out_msg.Type := CoherenceRequestType:GetS; 36412608Sjason@lowepower.com out_msg.Requestor := in_msg.Requestor; 36512608Sjason@lowepower.com out_msg.Destination := getDirectoryEntry(address).Owner; 36612608Sjason@lowepower.com out_msg.MessageSize := MessageSizeType:Control; 36712608Sjason@lowepower.com } 36812608Sjason@lowepower.com } 36912608Sjason@lowepower.com } 37012608Sjason@lowepower.com 37112608Sjason@lowepower.com action(sendFwdGetM, "fM", desc="Send forward getM to owner") { 37212608Sjason@lowepower.com assert(getDirectoryEntry(address).Owner.count() == 1); 37312608Sjason@lowepower.com peek(request_in, RequestMsg) { 37412608Sjason@lowepower.com enqueue(forward_out, RequestMsg, 1) { 37512608Sjason@lowepower.com out_msg.addr := address; 37612608Sjason@lowepower.com out_msg.Type := CoherenceRequestType:GetM; 37712608Sjason@lowepower.com out_msg.Requestor := in_msg.Requestor; 37812608Sjason@lowepower.com out_msg.Destination := getDirectoryEntry(address).Owner; 37912608Sjason@lowepower.com out_msg.MessageSize := MessageSizeType:Control; 38012608Sjason@lowepower.com } 38112608Sjason@lowepower.com } 38212608Sjason@lowepower.com } 38312608Sjason@lowepower.com 38412608Sjason@lowepower.com // Responses to requests 38512608Sjason@lowepower.com 38612608Sjason@lowepower.com // This also needs to send along the number of sharers!!!! 38712608Sjason@lowepower.com action(sendDataToReq, "d", desc="Send data from memory to requestor. ") { 38812608Sjason@lowepower.com //"May need to send sharer number, too") { 38912608Sjason@lowepower.com peek(memQueue_in, MemoryMsg) { 39012608Sjason@lowepower.com enqueue(response_out, ResponseMsg, 1) { 39112608Sjason@lowepower.com out_msg.addr := address; 39212608Sjason@lowepower.com out_msg.Type := CoherenceResponseType:Data; 39312608Sjason@lowepower.com out_msg.Sender := machineID; 39412608Sjason@lowepower.com out_msg.Destination.add(in_msg.OriginalRequestorMachId); 39512608Sjason@lowepower.com out_msg.DataBlk := in_msg.DataBlk; 39612608Sjason@lowepower.com out_msg.MessageSize := MessageSizeType:Data; 39712608Sjason@lowepower.com Entry e := getDirectoryEntry(address); 39812608Sjason@lowepower.com // Only need to include acks if we are the owner. 39912608Sjason@lowepower.com if (e.Owner.isElement(in_msg.OriginalRequestorMachId)) { 40012608Sjason@lowepower.com out_msg.Acks := e.Sharers.count(); 40112608Sjason@lowepower.com } else { 40212608Sjason@lowepower.com out_msg.Acks := 0; 40312608Sjason@lowepower.com } 40412608Sjason@lowepower.com assert(out_msg.Acks >= 0); 40512608Sjason@lowepower.com } 40612608Sjason@lowepower.com } 40712608Sjason@lowepower.com } 40812608Sjason@lowepower.com 40912608Sjason@lowepower.com action(sendPutAck, "a", desc="Send the put ack") { 41012608Sjason@lowepower.com peek(request_in, RequestMsg) { 41112608Sjason@lowepower.com enqueue(forward_out, RequestMsg, 1) { 41212608Sjason@lowepower.com out_msg.addr := address; 41312608Sjason@lowepower.com out_msg.Type := CoherenceRequestType:PutAck; 41412608Sjason@lowepower.com out_msg.Requestor := machineID; 41512608Sjason@lowepower.com out_msg.Destination.add(in_msg.Requestor); 41612608Sjason@lowepower.com out_msg.MessageSize := MessageSizeType:Control; 41712608Sjason@lowepower.com } 41812608Sjason@lowepower.com } 41912608Sjason@lowepower.com } 42012608Sjason@lowepower.com 42112608Sjason@lowepower.com // Queue management 42212608Sjason@lowepower.com 42312608Sjason@lowepower.com action(popResponseQueue, "pR", desc="Pop the response queue") { 42412608Sjason@lowepower.com response_in.dequeue(clockEdge()); 42512608Sjason@lowepower.com } 42612608Sjason@lowepower.com 42712608Sjason@lowepower.com action(popRequestQueue, "pQ", desc="Pop the request queue") { 42812608Sjason@lowepower.com request_in.dequeue(clockEdge()); 42912608Sjason@lowepower.com } 43012608Sjason@lowepower.com 43112608Sjason@lowepower.com action(popMemQueue, "pM", desc="Pop the memory queue") { 43212608Sjason@lowepower.com memQueue_in.dequeue(clockEdge()); 43312608Sjason@lowepower.com } 43412608Sjason@lowepower.com 43512608Sjason@lowepower.com // Stalling actions 43612608Sjason@lowepower.com action(stall, "z", desc="Stall the incoming request") { 43712608Sjason@lowepower.com // Do nothing. 43812608Sjason@lowepower.com } 43912608Sjason@lowepower.com 44012608Sjason@lowepower.com 44112608Sjason@lowepower.com /*************************************************************************/ 44212608Sjason@lowepower.com // transitions 44312608Sjason@lowepower.com 44412608Sjason@lowepower.com transition({I, S}, GetS, S_m) { 44512608Sjason@lowepower.com sendMemRead; 44612608Sjason@lowepower.com addReqToSharers; 44712608Sjason@lowepower.com popRequestQueue; 44812608Sjason@lowepower.com } 44912608Sjason@lowepower.com 45012608Sjason@lowepower.com transition(I, {PutSNotLast, PutSLast, PutMNonOwner}) { 45112608Sjason@lowepower.com sendPutAck; 45212608Sjason@lowepower.com popRequestQueue; 45312608Sjason@lowepower.com } 45412608Sjason@lowepower.com 45512608Sjason@lowepower.com transition(S_m, MemData, S) { 45612608Sjason@lowepower.com sendDataToReq; 45712608Sjason@lowepower.com popMemQueue; 45812608Sjason@lowepower.com } 45912608Sjason@lowepower.com 46012608Sjason@lowepower.com transition(I, GetM, M_m) { 46112608Sjason@lowepower.com sendMemRead; 46212608Sjason@lowepower.com setOwner; 46312608Sjason@lowepower.com popRequestQueue; 46412608Sjason@lowepower.com } 46512608Sjason@lowepower.com 46612608Sjason@lowepower.com transition(M_m, MemData, M) { 46712608Sjason@lowepower.com sendDataToReq; 46812608Sjason@lowepower.com clearSharers; // NOTE: This isn't *required* in some cases. 46912608Sjason@lowepower.com popMemQueue; 47012608Sjason@lowepower.com } 47112608Sjason@lowepower.com 47212608Sjason@lowepower.com transition(S, GetM, M_m) { 47312608Sjason@lowepower.com sendMemRead; 47412608Sjason@lowepower.com removeReqFromSharers; 47512608Sjason@lowepower.com sendInvToSharers; 47612608Sjason@lowepower.com setOwner; 47712608Sjason@lowepower.com popRequestQueue; 47812608Sjason@lowepower.com } 47912608Sjason@lowepower.com 48012608Sjason@lowepower.com transition({S, S_D, SS_m, S_m}, {PutSNotLast, PutMNonOwner}) { 48112608Sjason@lowepower.com removeReqFromSharers; 48212608Sjason@lowepower.com sendPutAck; 48312608Sjason@lowepower.com popRequestQueue; 48412608Sjason@lowepower.com } 48512608Sjason@lowepower.com 48612608Sjason@lowepower.com transition(S, PutSLast, I) { 48712608Sjason@lowepower.com removeReqFromSharers; 48812608Sjason@lowepower.com sendPutAck; 48912608Sjason@lowepower.com popRequestQueue; 49012608Sjason@lowepower.com } 49112608Sjason@lowepower.com 49212608Sjason@lowepower.com transition(M, GetS, S_D) { 49312608Sjason@lowepower.com sendFwdGetS; 49412608Sjason@lowepower.com addReqToSharers; 49512608Sjason@lowepower.com addOwnerToSharers; 49612608Sjason@lowepower.com clearOwner; 49712608Sjason@lowepower.com popRequestQueue; 49812608Sjason@lowepower.com } 49912608Sjason@lowepower.com 50012608Sjason@lowepower.com transition(M, GetM) { 50112608Sjason@lowepower.com sendFwdGetM; 50212608Sjason@lowepower.com clearOwner; 50312608Sjason@lowepower.com setOwner; 50412608Sjason@lowepower.com popRequestQueue; 50512608Sjason@lowepower.com } 50612608Sjason@lowepower.com 50712608Sjason@lowepower.com transition({M, M_m, MI_m}, {PutSNotLast, PutSLast, PutMNonOwner}) { 50812608Sjason@lowepower.com sendPutAck; 50912608Sjason@lowepower.com popRequestQueue; 51012608Sjason@lowepower.com } 51112608Sjason@lowepower.com 51212608Sjason@lowepower.com transition(M, PutMOwner, MI_m) { 51312608Sjason@lowepower.com sendDataToMem; 51412608Sjason@lowepower.com clearOwner; 51512608Sjason@lowepower.com sendPutAck; 51612608Sjason@lowepower.com popRequestQueue; 51712608Sjason@lowepower.com } 51812608Sjason@lowepower.com 51912608Sjason@lowepower.com transition(MI_m, MemAck, I) { 52012608Sjason@lowepower.com popMemQueue; 52112608Sjason@lowepower.com } 52212608Sjason@lowepower.com 52312608Sjason@lowepower.com transition(S_D, {GetS, GetM}) { 52412608Sjason@lowepower.com stall; 52512608Sjason@lowepower.com } 52612608Sjason@lowepower.com 52712608Sjason@lowepower.com transition(S_D, PutSLast) { 52812608Sjason@lowepower.com removeReqFromSharers; 52912608Sjason@lowepower.com sendPutAck; 53012608Sjason@lowepower.com popRequestQueue; 53112608Sjason@lowepower.com } 53212608Sjason@lowepower.com 53312608Sjason@lowepower.com transition(S_D, Data, SS_m) { 53412608Sjason@lowepower.com sendRespDataToMem; 53512608Sjason@lowepower.com popResponseQueue; 53612608Sjason@lowepower.com } 53712608Sjason@lowepower.com 53812608Sjason@lowepower.com transition(SS_m, MemAck, S) { 53912608Sjason@lowepower.com popMemQueue; 54012608Sjason@lowepower.com } 54112608Sjason@lowepower.com 54212608Sjason@lowepower.com // If we get another request for a block that's waiting on memory, 54312608Sjason@lowepower.com // stall that request. 54412608Sjason@lowepower.com transition({MI_m, SS_m, S_m, M_m}, {GetS, GetM}) { 54512608Sjason@lowepower.com stall; 54612608Sjason@lowepower.com } 54712608Sjason@lowepower.com 54812608Sjason@lowepower.com} 549