1/* 2 * Copyright (c) 2009 Advanced Micro Devices, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * 29 * Authors: Brad Beckmann 30 * Tushar Krishna 31 */ 32 33 34machine(MachineType:L1Cache, "Garnet_standalone L1 Cache") 35 : Sequencer * sequencer; 36 Cycles issue_latency := 2; 37 38 // NETWORK BUFFERS 39 MessageBuffer * requestFromCache, network="To", virtual_network="0", 40 vnet_type = "request"; 41 MessageBuffer * forwardFromCache, network="To", virtual_network="1", 42 vnet_type = "forward"; 43 MessageBuffer * responseFromCache, network="To", virtual_network="2", 44 vnet_type = "response"; 45 46 MessageBuffer * mandatoryQueue; 47{ 48 // STATES 49 state_declaration(State, desc="Cache states", default="L1Cache_State_I") { 50 I, AccessPermission:Invalid, desc="Not Present/Invalid"; 51 } 52 53 // EVENTS 54 enumeration(Event, desc="Cache events") { 55 // From processor 56 Request, desc="Request from Garnet_standalone"; 57 Forward, desc="Forward from Garnet_standalone"; 58 Response, desc="Response from Garnet_standalone"; 59 } 60 61 // STRUCTURE DEFINITIONS 62 DataBlock dummyData; 63 64 // CacheEntry 65 structure(Entry, desc="...", interface="AbstractCacheEntry") { 66 State CacheState, desc="cache state"; 67 DataBlock DataBlk, desc="Data in the block"; 68 } 69 70 // FUNCTIONS 71 Tick clockEdge(); 72 MachineID mapAddressToMachine(Addr addr, MachineType mtype); 73 74 // cpu/testers/networktest/networktest.cc generates packets of the type 75 // ReadReq, INST_FETCH, and WriteReq. 76 // These are converted to LD, IFETCH and ST by mem/ruby/system/RubyPort.cc. 77 // These are then sent to the sequencer, which sends them here. 78 // Garnet_standalone-cache.sm tags LD, IFETCH and ST as Request, Forward, 79 // and Response Events respectively, which are then injected into 80 // virtual networks 0, 1 and 2 respectively. 81 // This models traffic of different types within the network. 82 // 83 // Note that requests and forwards are MessageSizeType:Control, 84 // while responses are MessageSizeType:Data. 85 // 86 Event mandatory_request_type_to_event(RubyRequestType type) { 87 if (type == RubyRequestType:LD) { 88 return Event:Request; 89 } else if (type == RubyRequestType:IFETCH) { 90 return Event:Forward; 91 } else if (type == RubyRequestType:ST) { 92 return Event:Response; 93 } else { 94 error("Invalid RubyRequestType"); 95 } 96 } 97 98 99 State getState(Entry cache_entry, Addr addr) { 100 return State:I; 101 } 102 103 void setState(Entry cache_entry, Addr addr, State state) { 104 105 } 106 107 AccessPermission getAccessPermission(Addr addr) { 108 return AccessPermission:NotPresent; 109 } 110 111 void setAccessPermission(Entry cache_entry, Addr addr, State state) { 112 } 113 114 Entry getCacheEntry(Addr address), return_by_pointer="yes" { 115 return OOD; 116 } 117 118 void functionalRead(Addr addr, Packet *pkt) { 119 error("Garnet_standalone does not support functional read."); 120 } 121 122 int functionalWrite(Addr addr, Packet *pkt) { 123 error("Garnet_standalone does not support functional write."); 124 } 125 126 // NETWORK PORTS 127 128 out_port(requestNetwork_out, RequestMsg, requestFromCache); 129 out_port(forwardNetwork_out, RequestMsg, forwardFromCache); 130 out_port(responseNetwork_out, RequestMsg, responseFromCache); 131 132 // Mandatory Queue 133 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") { 134 if (mandatoryQueue_in.isReady(clockEdge())) { 135 peek(mandatoryQueue_in, RubyRequest) { 136 trigger(mandatory_request_type_to_event(in_msg.Type), 137 in_msg.LineAddress, getCacheEntry(in_msg.LineAddress)); 138 } 139 } 140 } 141 142 // ACTIONS 143 144 // The destination directory of the packets is embedded in the address 145 // map_Address_to_Directory is used to retrieve it. 146 147 action(a_issueRequest, "a", desc="Issue a request") { 148 enqueue(requestNetwork_out, RequestMsg, issue_latency) { 149 out_msg.addr := address; 150 out_msg.Type := CoherenceRequestType:MSG; 151 out_msg.Requestor := machineID; 152 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory)); 153 154 // To send broadcasts in vnet0 (to emulate broadcast-based protocols), 155 // replace the above line by the following: 156 // out_msg.Destination := broadcast(MachineType:Directory); 157 158 out_msg.MessageSize := MessageSizeType:Control; 159 } 160 } 161 162 action(b_issueForward, "b", desc="Issue a forward") { 163 enqueue(forwardNetwork_out, RequestMsg, issue_latency) { 164 out_msg.addr := address; 165 out_msg.Type := CoherenceRequestType:MSG; 166 out_msg.Requestor := machineID; 167 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory)); 168 out_msg.MessageSize := MessageSizeType:Control; 169 } 170 } 171 172 action(c_issueResponse, "c", desc="Issue a response") { 173 enqueue(responseNetwork_out, RequestMsg, issue_latency) { 174 out_msg.addr := address; 175 out_msg.Type := CoherenceRequestType:MSG; 176 out_msg.Requestor := machineID; 177 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory)); 178 out_msg.MessageSize := MessageSizeType:Data; 179 } 180 } 181 182 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") { 183 mandatoryQueue_in.dequeue(clockEdge()); 184 } 185 186 action(r_load_hit, "r", desc="Notify sequencer the load completed.") { 187 sequencer.readCallback(address, dummyData); 188 } 189 190 action(s_store_hit, "s", desc="Notify sequencer that store completed.") { 191 sequencer.writeCallback(address, dummyData); 192 } 193 194 195 // TRANSITIONS 196 197 // sequencer hit call back is performed after injecting the packets. 198 // The goal of the Garnet_standalone protocol is only to inject packets into 199 // the network, not to keep track of them via TBEs. 200 201 transition(I, Response) { 202 s_store_hit; 203 c_issueResponse; 204 m_popMandatoryQueue; 205 } 206 207 transition(I, Request) { 208 r_load_hit; 209 a_issueRequest; 210 m_popMandatoryQueue; 211 } 212 transition(I, Forward) { 213 r_load_hit; 214 b_issueForward; 215 m_popMandatoryQueue; 216 } 217 218} 219