MESI_Three_Level-L1cache.sm (14184:11ac1337c5e2) MESI_Three_Level-L1cache.sm (14300:22183ae13998)
1/*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP")
30 : CacheMemory * cache;
31 int l2_select_num_bits;
32 Cycles l1_request_latency := 2;
33 Cycles l1_response_latency := 2;
34 Cycles to_l2_latency := 1;
35
36 // Message Buffers between the L1 and the L0 Cache
37 // From the L1 cache to the L0 cache
38 MessageBuffer * bufferToL0, network="To";
39
40 // From the L0 cache to the L1 cache
41 MessageBuffer * bufferFromL0, network="From";
42
43 // Message queue from this L1 cache TO the network / L2
44 MessageBuffer * requestToL2, network="To", virtual_network="0",
45 vnet_type="request";
46
47 MessageBuffer * responseToL2, network="To", virtual_network="1",
48 vnet_type="response";
49 MessageBuffer * unblockToL2, network="To", virtual_network="2",
50 vnet_type="unblock";
51
52 // To this L1 cache FROM the network / L2
53 MessageBuffer * requestFromL2, network="From", virtual_network="2",
54 vnet_type="request";
55 MessageBuffer * responseFromL2, network="From", virtual_network="1",
56 vnet_type="response";
57
58{
59 // STATES
60 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
61 // Base states
62 I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
63 S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
64 SS, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
65 E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
66 EE, AccessPermission:Read_Write, desc="a L1 cache entry Exclusive";
67 M, AccessPermission:Maybe_Stale, desc="a L1 cache entry Modified", format="!b";
68 MM, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
69
70 // Transient States
71 IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
72 IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
73 SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
74 IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
75 M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
76 SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
77
78 // For all of the following states, invalidate
79 // message has been sent to L0 cache. The response
80 // from the L0 cache has not been seen yet.
81 S_IL0, AccessPermission:Busy;
82 E_IL0, AccessPermission:Busy;
83 M_IL0, AccessPermission:Busy;
84 MM_IL0, AccessPermission:Read_Write;
85 SM_IL0, AccessPermission:Busy;
86 }
87
88 // EVENTS
89 enumeration(Event, desc="Cache events") {
90 // Requests from the L0 cache
91 Load, desc="Load request";
92 Store, desc="Store request";
93 WriteBack, desc="Writeback request";
94
95 // Responses from the L0 Cache
96 // L0 cache received the invalidation message
97 // and has sent the data.
98 L0_DataAck;
99
100 Inv, desc="Invalidate request from L2 bank";
101
102 // internal generated request
103 // Invalidate the line in L0 due to own requirements
104 L0_Invalidate_Own;
105 // Invalidate the line in L0 due to some other cache's requirements
106 L0_Invalidate_Else;
107 // Invalidate the line in the cache due to some one else / space needs.
108 L1_Replacement;
109
110 // other requests
111 Fwd_GETX, desc="GETX from other processor";
112 Fwd_GETS, desc="GETS from other processor";
113
114 Data, desc="Data for processor";
115 Data_Exclusive, desc="Data for processor";
116 DataS_fromL1, desc="data for GETS request, need to unblock directory";
117 Data_all_Acks, desc="Data for processor, all acks";
118
119 L0_Ack, desc="Ack for processor";
120 Ack, desc="Ack for processor";
121 Ack_all, desc="Last ack for processor";
122
123 WB_Ack, desc="Ack for replacement";
124 }
125
126 // TYPES
127
128 // CacheEntry
129 structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
130 State CacheState, desc="cache state";
131 DataBlock DataBlk, desc="data for the block";
132 bool Dirty, default="false", desc="data is dirty";
133 }
134
135 // TBE fields
136 structure(TBE, desc="...") {
137 Addr addr, desc="Physical address for this TBE";
138 State TBEState, desc="Transient state";
139 DataBlock DataBlk, desc="Buffer for the data block";
140 bool Dirty, default="false", desc="data is dirty";
141 int pendingAcks, default="0", desc="number of pending acks";
142 }
143
144 structure(TBETable, external="yes") {
145 TBE lookup(Addr);
146 void allocate(Addr);
147 void deallocate(Addr);
148 bool isPresent(Addr);
149 }
150
151 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
152
153 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
154
155 Tick clockEdge();
156 Cycles ticksToCycles(Tick t);
157 void set_cache_entry(AbstractCacheEntry a);
158 void unset_cache_entry();
159 void set_tbe(TBE a);
160 void unset_tbe();
161 void wakeUpBuffers(Addr a);
162 void wakeUpAllBuffers(Addr a);
163 void profileMsgDelay(int virtualNetworkType, Cycles c);
164
165 // inclusive cache returns L1 entries only
166 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
167 Entry cache_entry := static_cast(Entry, "pointer", cache[addr]);
168 return cache_entry;
169 }
170
171 State getState(TBE tbe, Entry cache_entry, Addr addr) {
172 if(is_valid(tbe)) {
173 return tbe.TBEState;
174 } else if (is_valid(cache_entry)) {
175 return cache_entry.CacheState;
176 }
177 return State:I;
178 }
179
180 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
181 // MUST CHANGE
182 if(is_valid(tbe)) {
183 tbe.TBEState := state;
184 }
185
186 if (is_valid(cache_entry)) {
187 cache_entry.CacheState := state;
188 }
189 }
190
191 AccessPermission getAccessPermission(Addr addr) {
192 TBE tbe := TBEs[addr];
193 if(is_valid(tbe)) {
194 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
195 return L1Cache_State_to_permission(tbe.TBEState);
196 }
197
198 Entry cache_entry := getCacheEntry(addr);
199 if(is_valid(cache_entry)) {
200 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
201 return L1Cache_State_to_permission(cache_entry.CacheState);
202 }
203
204 DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
205 return AccessPermission:NotPresent;
206 }
207
208 void functionalRead(Addr addr, Packet *pkt) {
209 TBE tbe := TBEs[addr];
210 if(is_valid(tbe)) {
211 testAndRead(addr, tbe.DataBlk, pkt);
212 } else {
213 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
214 }
215 }
216
217 int functionalWrite(Addr addr, Packet *pkt) {
218 int num_functional_writes := 0;
219
220 TBE tbe := TBEs[addr];
221 if(is_valid(tbe)) {
222 num_functional_writes := num_functional_writes +
223 testAndWrite(addr, tbe.DataBlk, pkt);
224 return num_functional_writes;
225 }
226
227 num_functional_writes := num_functional_writes +
228 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
229 return num_functional_writes;
230 }
231
232 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
233 if (is_valid(cache_entry)) {
234 cache_entry.changePermission(L1Cache_State_to_permission(state));
235 }
236 }
237
238 Event mandatory_request_type_to_event(CoherenceClass type) {
239 if (type == CoherenceClass:GETS) {
240 return Event:Load;
241 } else if ((type == CoherenceClass:GETX) ||
242 (type == CoherenceClass:UPGRADE)) {
243 return Event:Store;
244 } else if (type == CoherenceClass:PUTX) {
245 return Event:WriteBack;
246 } else {
247 error("Invalid RequestType");
248 }
249 }
250
251 int getPendingAcks(TBE tbe) {
252 return tbe.pendingAcks;
253 }
254
255 bool inL0Cache(State state) {
256 if (state == State:S || state == State:E || state == State:M ||
257 state == State:S_IL0 || state == State:E_IL0 ||
258 state == State:M_IL0 || state == State:SM_IL0) {
259 return true;
260 }
261
262 return false;
263 }
264
265 out_port(requestNetwork_out, RequestMsg, requestToL2);
266 out_port(responseNetwork_out, ResponseMsg, responseToL2);
267 out_port(unblockNetwork_out, ResponseMsg, unblockToL2);
268 out_port(bufferToL0_out, CoherenceMsg, bufferToL0);
269
270 // Response From the L2 Cache to this L1 cache
271 in_port(responseNetwork_in, ResponseMsg, responseFromL2, rank = 3) {
272 if (responseNetwork_in.isReady(clockEdge())) {
273 peek(responseNetwork_in, ResponseMsg) {
274 assert(in_msg.Destination.isElement(machineID));
275
276 Entry cache_entry := getCacheEntry(in_msg.addr);
277 TBE tbe := TBEs[in_msg.addr];
278
279 if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
280 trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
281 } else if(in_msg.Type == CoherenceResponseType:DATA) {
282 if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
283 getState(tbe, cache_entry, in_msg.addr) == State:IS_I) &&
284 machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
285
286 trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
287
288 } else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
289 trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
290 } else {
291 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
292 }
293 } else if (in_msg.Type == CoherenceResponseType:ACK) {
294 if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
295 trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
296 } else {
297 trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
298 }
299 } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
300 trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
301 } else {
302 error("Invalid L1 response type");
303 }
304 }
305 }
306 }
307
308 // Request to this L1 cache from the shared L2
309 in_port(requestNetwork_in, RequestMsg, requestFromL2, rank = 2) {
310 if(requestNetwork_in.isReady(clockEdge())) {
311 peek(requestNetwork_in, RequestMsg) {
312 assert(in_msg.Destination.isElement(machineID));
313 Entry cache_entry := getCacheEntry(in_msg.addr);
314 TBE tbe := TBEs[in_msg.addr];
315
316 if (in_msg.Type == CoherenceRequestType:INV) {
317 if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
318 trigger(Event:L0_Invalidate_Else, in_msg.addr,
319 cache_entry, tbe);
320 } else {
321 trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
322 }
323 } else if (in_msg.Type == CoherenceRequestType:GETX ||
324 in_msg.Type == CoherenceRequestType:UPGRADE) {
325 if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
326 trigger(Event:L0_Invalidate_Else, in_msg.addr,
327 cache_entry, tbe);
328 } else {
329 trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
330 }
331 } else if (in_msg.Type == CoherenceRequestType:GETS) {
332 if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
333 trigger(Event:L0_Invalidate_Else, in_msg.addr,
334 cache_entry, tbe);
335 } else {
336 trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
337 }
338 } else {
339 error("Invalid forwarded request type");
340 }
341 }
342 }
343 }
344
345 // Requests to this L1 cache from the L0 cache.
346 in_port(messageBufferFromL0_in, CoherenceMsg, bufferFromL0, rank = 0) {
347 if (messageBufferFromL0_in.isReady(clockEdge())) {
348 peek(messageBufferFromL0_in, CoherenceMsg) {
349 Entry cache_entry := getCacheEntry(in_msg.addr);
350 TBE tbe := TBEs[in_msg.addr];
351
352 if(in_msg.Class == CoherenceClass:INV_DATA) {
353 trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
354 } else if (in_msg.Class == CoherenceClass:INV_ACK) {
355 trigger(Event:L0_Ack, in_msg.addr, cache_entry, tbe);
356 } else {
357 if (is_valid(cache_entry)) {
358 trigger(mandatory_request_type_to_event(in_msg.Class),
359 in_msg.addr, cache_entry, tbe);
360 } else {
361 if (cache.cacheAvail(in_msg.addr)) {
362 // L1 does't have the line, but we have space for it
363 // in the L1 let's see if the L2 has it
364 trigger(mandatory_request_type_to_event(in_msg.Class),
365 in_msg.addr, cache_entry, tbe);
366 } else {
367 // No room in the L1, so we need to make room in the L1
1/*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP")
30 : CacheMemory * cache;
31 int l2_select_num_bits;
32 Cycles l1_request_latency := 2;
33 Cycles l1_response_latency := 2;
34 Cycles to_l2_latency := 1;
35
36 // Message Buffers between the L1 and the L0 Cache
37 // From the L1 cache to the L0 cache
38 MessageBuffer * bufferToL0, network="To";
39
40 // From the L0 cache to the L1 cache
41 MessageBuffer * bufferFromL0, network="From";
42
43 // Message queue from this L1 cache TO the network / L2
44 MessageBuffer * requestToL2, network="To", virtual_network="0",
45 vnet_type="request";
46
47 MessageBuffer * responseToL2, network="To", virtual_network="1",
48 vnet_type="response";
49 MessageBuffer * unblockToL2, network="To", virtual_network="2",
50 vnet_type="unblock";
51
52 // To this L1 cache FROM the network / L2
53 MessageBuffer * requestFromL2, network="From", virtual_network="2",
54 vnet_type="request";
55 MessageBuffer * responseFromL2, network="From", virtual_network="1",
56 vnet_type="response";
57
58{
59 // STATES
60 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
61 // Base states
62 I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
63 S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
64 SS, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
65 E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
66 EE, AccessPermission:Read_Write, desc="a L1 cache entry Exclusive";
67 M, AccessPermission:Maybe_Stale, desc="a L1 cache entry Modified", format="!b";
68 MM, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
69
70 // Transient States
71 IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
72 IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
73 SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
74 IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
75 M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
76 SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
77
78 // For all of the following states, invalidate
79 // message has been sent to L0 cache. The response
80 // from the L0 cache has not been seen yet.
81 S_IL0, AccessPermission:Busy;
82 E_IL0, AccessPermission:Busy;
83 M_IL0, AccessPermission:Busy;
84 MM_IL0, AccessPermission:Read_Write;
85 SM_IL0, AccessPermission:Busy;
86 }
87
88 // EVENTS
89 enumeration(Event, desc="Cache events") {
90 // Requests from the L0 cache
91 Load, desc="Load request";
92 Store, desc="Store request";
93 WriteBack, desc="Writeback request";
94
95 // Responses from the L0 Cache
96 // L0 cache received the invalidation message
97 // and has sent the data.
98 L0_DataAck;
99
100 Inv, desc="Invalidate request from L2 bank";
101
102 // internal generated request
103 // Invalidate the line in L0 due to own requirements
104 L0_Invalidate_Own;
105 // Invalidate the line in L0 due to some other cache's requirements
106 L0_Invalidate_Else;
107 // Invalidate the line in the cache due to some one else / space needs.
108 L1_Replacement;
109
110 // other requests
111 Fwd_GETX, desc="GETX from other processor";
112 Fwd_GETS, desc="GETS from other processor";
113
114 Data, desc="Data for processor";
115 Data_Exclusive, desc="Data for processor";
116 DataS_fromL1, desc="data for GETS request, need to unblock directory";
117 Data_all_Acks, desc="Data for processor, all acks";
118
119 L0_Ack, desc="Ack for processor";
120 Ack, desc="Ack for processor";
121 Ack_all, desc="Last ack for processor";
122
123 WB_Ack, desc="Ack for replacement";
124 }
125
126 // TYPES
127
128 // CacheEntry
129 structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
130 State CacheState, desc="cache state";
131 DataBlock DataBlk, desc="data for the block";
132 bool Dirty, default="false", desc="data is dirty";
133 }
134
135 // TBE fields
136 structure(TBE, desc="...") {
137 Addr addr, desc="Physical address for this TBE";
138 State TBEState, desc="Transient state";
139 DataBlock DataBlk, desc="Buffer for the data block";
140 bool Dirty, default="false", desc="data is dirty";
141 int pendingAcks, default="0", desc="number of pending acks";
142 }
143
144 structure(TBETable, external="yes") {
145 TBE lookup(Addr);
146 void allocate(Addr);
147 void deallocate(Addr);
148 bool isPresent(Addr);
149 }
150
151 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
152
153 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
154
155 Tick clockEdge();
156 Cycles ticksToCycles(Tick t);
157 void set_cache_entry(AbstractCacheEntry a);
158 void unset_cache_entry();
159 void set_tbe(TBE a);
160 void unset_tbe();
161 void wakeUpBuffers(Addr a);
162 void wakeUpAllBuffers(Addr a);
163 void profileMsgDelay(int virtualNetworkType, Cycles c);
164
165 // inclusive cache returns L1 entries only
166 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
167 Entry cache_entry := static_cast(Entry, "pointer", cache[addr]);
168 return cache_entry;
169 }
170
171 State getState(TBE tbe, Entry cache_entry, Addr addr) {
172 if(is_valid(tbe)) {
173 return tbe.TBEState;
174 } else if (is_valid(cache_entry)) {
175 return cache_entry.CacheState;
176 }
177 return State:I;
178 }
179
180 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
181 // MUST CHANGE
182 if(is_valid(tbe)) {
183 tbe.TBEState := state;
184 }
185
186 if (is_valid(cache_entry)) {
187 cache_entry.CacheState := state;
188 }
189 }
190
191 AccessPermission getAccessPermission(Addr addr) {
192 TBE tbe := TBEs[addr];
193 if(is_valid(tbe)) {
194 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
195 return L1Cache_State_to_permission(tbe.TBEState);
196 }
197
198 Entry cache_entry := getCacheEntry(addr);
199 if(is_valid(cache_entry)) {
200 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
201 return L1Cache_State_to_permission(cache_entry.CacheState);
202 }
203
204 DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
205 return AccessPermission:NotPresent;
206 }
207
208 void functionalRead(Addr addr, Packet *pkt) {
209 TBE tbe := TBEs[addr];
210 if(is_valid(tbe)) {
211 testAndRead(addr, tbe.DataBlk, pkt);
212 } else {
213 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
214 }
215 }
216
217 int functionalWrite(Addr addr, Packet *pkt) {
218 int num_functional_writes := 0;
219
220 TBE tbe := TBEs[addr];
221 if(is_valid(tbe)) {
222 num_functional_writes := num_functional_writes +
223 testAndWrite(addr, tbe.DataBlk, pkt);
224 return num_functional_writes;
225 }
226
227 num_functional_writes := num_functional_writes +
228 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
229 return num_functional_writes;
230 }
231
232 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
233 if (is_valid(cache_entry)) {
234 cache_entry.changePermission(L1Cache_State_to_permission(state));
235 }
236 }
237
238 Event mandatory_request_type_to_event(CoherenceClass type) {
239 if (type == CoherenceClass:GETS) {
240 return Event:Load;
241 } else if ((type == CoherenceClass:GETX) ||
242 (type == CoherenceClass:UPGRADE)) {
243 return Event:Store;
244 } else if (type == CoherenceClass:PUTX) {
245 return Event:WriteBack;
246 } else {
247 error("Invalid RequestType");
248 }
249 }
250
251 int getPendingAcks(TBE tbe) {
252 return tbe.pendingAcks;
253 }
254
255 bool inL0Cache(State state) {
256 if (state == State:S || state == State:E || state == State:M ||
257 state == State:S_IL0 || state == State:E_IL0 ||
258 state == State:M_IL0 || state == State:SM_IL0) {
259 return true;
260 }
261
262 return false;
263 }
264
265 out_port(requestNetwork_out, RequestMsg, requestToL2);
266 out_port(responseNetwork_out, ResponseMsg, responseToL2);
267 out_port(unblockNetwork_out, ResponseMsg, unblockToL2);
268 out_port(bufferToL0_out, CoherenceMsg, bufferToL0);
269
270 // Response From the L2 Cache to this L1 cache
271 in_port(responseNetwork_in, ResponseMsg, responseFromL2, rank = 3) {
272 if (responseNetwork_in.isReady(clockEdge())) {
273 peek(responseNetwork_in, ResponseMsg) {
274 assert(in_msg.Destination.isElement(machineID));
275
276 Entry cache_entry := getCacheEntry(in_msg.addr);
277 TBE tbe := TBEs[in_msg.addr];
278
279 if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
280 trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
281 } else if(in_msg.Type == CoherenceResponseType:DATA) {
282 if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
283 getState(tbe, cache_entry, in_msg.addr) == State:IS_I) &&
284 machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
285
286 trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
287
288 } else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
289 trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
290 } else {
291 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
292 }
293 } else if (in_msg.Type == CoherenceResponseType:ACK) {
294 if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
295 trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
296 } else {
297 trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
298 }
299 } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
300 trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
301 } else {
302 error("Invalid L1 response type");
303 }
304 }
305 }
306 }
307
308 // Request to this L1 cache from the shared L2
309 in_port(requestNetwork_in, RequestMsg, requestFromL2, rank = 2) {
310 if(requestNetwork_in.isReady(clockEdge())) {
311 peek(requestNetwork_in, RequestMsg) {
312 assert(in_msg.Destination.isElement(machineID));
313 Entry cache_entry := getCacheEntry(in_msg.addr);
314 TBE tbe := TBEs[in_msg.addr];
315
316 if (in_msg.Type == CoherenceRequestType:INV) {
317 if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
318 trigger(Event:L0_Invalidate_Else, in_msg.addr,
319 cache_entry, tbe);
320 } else {
321 trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
322 }
323 } else if (in_msg.Type == CoherenceRequestType:GETX ||
324 in_msg.Type == CoherenceRequestType:UPGRADE) {
325 if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
326 trigger(Event:L0_Invalidate_Else, in_msg.addr,
327 cache_entry, tbe);
328 } else {
329 trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
330 }
331 } else if (in_msg.Type == CoherenceRequestType:GETS) {
332 if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
333 trigger(Event:L0_Invalidate_Else, in_msg.addr,
334 cache_entry, tbe);
335 } else {
336 trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
337 }
338 } else {
339 error("Invalid forwarded request type");
340 }
341 }
342 }
343 }
344
345 // Requests to this L1 cache from the L0 cache.
346 in_port(messageBufferFromL0_in, CoherenceMsg, bufferFromL0, rank = 0) {
347 if (messageBufferFromL0_in.isReady(clockEdge())) {
348 peek(messageBufferFromL0_in, CoherenceMsg) {
349 Entry cache_entry := getCacheEntry(in_msg.addr);
350 TBE tbe := TBEs[in_msg.addr];
351
352 if(in_msg.Class == CoherenceClass:INV_DATA) {
353 trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
354 } else if (in_msg.Class == CoherenceClass:INV_ACK) {
355 trigger(Event:L0_Ack, in_msg.addr, cache_entry, tbe);
356 } else {
357 if (is_valid(cache_entry)) {
358 trigger(mandatory_request_type_to_event(in_msg.Class),
359 in_msg.addr, cache_entry, tbe);
360 } else {
361 if (cache.cacheAvail(in_msg.addr)) {
362 // L1 does't have the line, but we have space for it
363 // in the L1 let's see if the L2 has it
364 trigger(mandatory_request_type_to_event(in_msg.Class),
365 in_msg.addr, cache_entry, tbe);
366 } else {
367 // No room in the L1, so we need to make room in the L1
368 Entry victim_entry :=
369 getCacheEntry(cache.cacheProbe(in_msg.addr));
370 TBE victim_tbe := TBEs[cache.cacheProbe(in_msg.addr)];
368 Addr victim := cache.cacheProbe(in_msg.addr);
369 Entry victim_entry := getCacheEntry(victim);
370 TBE victim_tbe := TBEs[victim];
371
372 if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
373 trigger(Event:L0_Invalidate_Own,
371
372 if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
373 trigger(Event:L0_Invalidate_Own,
374 cache.cacheProbe(in_msg.addr),
375 victim_entry, victim_tbe);
374 victim, victim_entry, victim_tbe);
376 } else {
377 trigger(Event:L1_Replacement,
375 } else {
376 trigger(Event:L1_Replacement,
378 cache.cacheProbe(in_msg.addr),
379 victim_entry, victim_tbe);
377 victim, victim_entry, victim_tbe);
380 }
381 }
382 }
383 }
384 }
385 }
386 }
387
388 // ACTIONS
389 action(a_issueGETS, "a", desc="Issue GETS") {
390 peek(messageBufferFromL0_in, CoherenceMsg) {
391 enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
392 out_msg.addr := address;
393 out_msg.Type := CoherenceRequestType:GETS;
394 out_msg.Requestor := machineID;
395 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
396 l2_select_low_bit, l2_select_num_bits, clusterID));
397 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
398 address, out_msg.Destination);
399 out_msg.MessageSize := MessageSizeType:Control;
400 out_msg.AccessMode := in_msg.AccessMode;
401 }
402 }
403 }
404
405 action(b_issueGETX, "b", desc="Issue GETX") {
406 peek(messageBufferFromL0_in, CoherenceMsg) {
407 enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
408 out_msg.addr := address;
409 out_msg.Type := CoherenceRequestType:GETX;
410 out_msg.Requestor := machineID;
411 DPRINTF(RubySlicc, "%s\n", machineID);
412 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
413 l2_select_low_bit, l2_select_num_bits, clusterID));
414 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
415 address, out_msg.Destination);
416 out_msg.MessageSize := MessageSizeType:Control;
417 out_msg.AccessMode := in_msg.AccessMode;
418 }
419 }
420 }
421
422 action(c_issueUPGRADE, "c", desc="Issue GETX") {
423 peek(messageBufferFromL0_in, CoherenceMsg) {
424 enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
425 out_msg.addr := address;
426 out_msg.Type := CoherenceRequestType:UPGRADE;
427 out_msg.Requestor := machineID;
428 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
429 l2_select_low_bit, l2_select_num_bits, clusterID));
430 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
431 address, out_msg.Destination);
432 out_msg.MessageSize := MessageSizeType:Control;
433 out_msg.AccessMode := in_msg.AccessMode;
434 }
435 }
436 }
437
438 action(d_sendDataToRequestor, "d", desc="send data to requestor") {
439 peek(requestNetwork_in, RequestMsg) {
440 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
441 assert(is_valid(cache_entry));
442 out_msg.addr := address;
443 out_msg.Type := CoherenceResponseType:DATA;
444 out_msg.DataBlk := cache_entry.DataBlk;
445 out_msg.Dirty := cache_entry.Dirty;
446 out_msg.Sender := machineID;
447 out_msg.Destination.add(in_msg.Requestor);
448 out_msg.MessageSize := MessageSizeType:Response_Data;
449 }
450 }
451 }
452
453 action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
454 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
455 assert(is_valid(cache_entry));
456 out_msg.addr := address;
457 out_msg.Type := CoherenceResponseType:DATA;
458 out_msg.DataBlk := cache_entry.DataBlk;
459 out_msg.Dirty := cache_entry.Dirty;
460 out_msg.Sender := machineID;
461 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
462 l2_select_low_bit, l2_select_num_bits, clusterID));
463 out_msg.MessageSize := MessageSizeType:Response_Data;
464 }
465 }
466
467 action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
468 peek(requestNetwork_in, RequestMsg) {
469 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
470 assert(is_valid(tbe));
471 out_msg.addr := address;
472 out_msg.Type := CoherenceResponseType:DATA;
473 out_msg.DataBlk := tbe.DataBlk;
474 out_msg.Dirty := tbe.Dirty;
475 out_msg.Sender := machineID;
476 out_msg.Destination.add(in_msg.Requestor);
477 out_msg.MessageSize := MessageSizeType:Response_Data;
478 }
479 }
480 }
481
482 action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
483 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
484 assert(is_valid(tbe));
485 out_msg.addr := address;
486 out_msg.Type := CoherenceResponseType:DATA;
487 out_msg.DataBlk := tbe.DataBlk;
488 out_msg.Dirty := tbe.Dirty;
489 out_msg.Sender := machineID;
490 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
491 l2_select_low_bit, l2_select_num_bits, clusterID));
492 out_msg.MessageSize := MessageSizeType:Response_Data;
493 }
494 }
495
496 action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
497 peek(requestNetwork_in, RequestMsg) {
498 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
499 out_msg.addr := address;
500 out_msg.Type := CoherenceResponseType:ACK;
501 out_msg.Sender := machineID;
502 out_msg.Destination.add(in_msg.Requestor);
503 out_msg.MessageSize := MessageSizeType:Response_Control;
504 }
505 }
506 }
507
508 action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
509 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
510 assert(is_valid(cache_entry));
511 out_msg.addr := address;
512 out_msg.Type := CoherenceResponseType:DATA;
513 out_msg.DataBlk := cache_entry.DataBlk;
514 out_msg.Dirty := cache_entry.Dirty;
515 out_msg.Sender := machineID;
516 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
517 l2_select_low_bit, l2_select_num_bits, clusterID));
518 out_msg.MessageSize := MessageSizeType:Writeback_Data;
519 }
520 }
521
522 action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
523 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
524 assert(is_valid(tbe));
525 out_msg.addr := address;
526 out_msg.Type := CoherenceResponseType:DATA;
527 out_msg.DataBlk := tbe.DataBlk;
528 out_msg.Dirty := tbe.Dirty;
529 out_msg.Sender := machineID;
530 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
531 l2_select_low_bit, l2_select_num_bits, clusterID));
532 out_msg.MessageSize := MessageSizeType:Writeback_Data;
533 }
534 }
535
536 action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
537 peek(requestNetwork_in, RequestMsg) {
538 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
539 out_msg.addr := address;
540 out_msg.Type := CoherenceResponseType:ACK;
541 out_msg.Sender := machineID;
542 out_msg.Destination.add(in_msg.Requestor);
543 out_msg.MessageSize := MessageSizeType:Response_Control;
544 out_msg.AckCount := 1;
545 }
546 }
547 }
548
549 action(forward_eviction_to_L0, "\cc", desc="sends eviction information to the processor") {
550 enqueue(bufferToL0_out, CoherenceMsg, l1_request_latency) {
551 out_msg.addr := address;
552 out_msg.Class := CoherenceClass:INV;
553 out_msg.Sender := machineID;
554 out_msg.Dest := createMachineID(MachineType:L0Cache, version);
555 out_msg.MessageSize := MessageSizeType:Control;
556 }
557 }
558
559 action(g_issuePUTX, "g", desc="send data to the L2 cache") {
560 enqueue(requestNetwork_out, RequestMsg, l1_response_latency) {
561 assert(is_valid(cache_entry));
562 out_msg.addr := address;
563 out_msg.Type := CoherenceRequestType:PUTX;
564 out_msg.Dirty := cache_entry.Dirty;
565 out_msg.Requestor:= machineID;
566 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
567 l2_select_low_bit, l2_select_num_bits, clusterID));
568 if (cache_entry.Dirty) {
569 out_msg.MessageSize := MessageSizeType:Writeback_Data;
570 out_msg.DataBlk := cache_entry.DataBlk;
571 } else {
572 out_msg.MessageSize := MessageSizeType:Writeback_Control;
573 }
574 }
575 }
576
577 action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
578 enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
579 out_msg.addr := address;
580 out_msg.Type := CoherenceResponseType:UNBLOCK;
581 out_msg.Sender := machineID;
582 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
583 l2_select_low_bit, l2_select_num_bits, clusterID));
584 out_msg.MessageSize := MessageSizeType:Response_Control;
585 DPRINTF(RubySlicc, "%#x\n", address);
586 }
587 }
588
589 action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
590 enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
591 out_msg.addr := address;
592 out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
593 out_msg.Sender := machineID;
594 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
595 l2_select_low_bit, l2_select_num_bits, clusterID));
596 out_msg.MessageSize := MessageSizeType:Response_Control;
597 DPRINTF(RubySlicc, "%#x\n", address);
598
599 }
600 }
601
602 action(h_data_to_l0, "h", desc="If not prefetch, send data to the L0 cache.") {
603 enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
604 assert(is_valid(cache_entry));
605
606 out_msg.addr := address;
607 out_msg.Class := CoherenceClass:DATA;
608 out_msg.Sender := machineID;
609 out_msg.Dest := createMachineID(MachineType:L0Cache, version);
610 out_msg.DataBlk := cache_entry.DataBlk;
611 out_msg.MessageSize := MessageSizeType:Response_Data;
612 }
613 }
614
615 action(hh_xdata_to_l0, "\h", desc="If not prefetch, notify sequencer that store completed.") {
616 enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
617 assert(is_valid(cache_entry));
618
619 out_msg.addr := address;
620 out_msg.Class := CoherenceClass:DATA_EXCLUSIVE;
621 out_msg.Sender := machineID;
622 out_msg.Dest := createMachineID(MachineType:L0Cache, version);
623 out_msg.DataBlk := cache_entry.DataBlk;
624 out_msg.Dirty := cache_entry.Dirty;
625 out_msg.MessageSize := MessageSizeType:Response_Data;
626
627 //cache_entry.Dirty := true;
628 }
629 }
630
631 action(h_stale_data_to_l0, "hs", desc="If not prefetch, send data to the L0 cache.") {
632 enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
633 assert(is_valid(cache_entry));
634
635 out_msg.addr := address;
636 out_msg.Class := CoherenceClass:STALE_DATA;
637 out_msg.Sender := machineID;
638 out_msg.Dest := createMachineID(MachineType:L0Cache, version);
639 out_msg.DataBlk := cache_entry.DataBlk;
640 out_msg.Dirty := cache_entry.Dirty;
641 out_msg.MessageSize := MessageSizeType:Response_Data;
642 }
643 }
644
645 action(i_allocateTBE, "i", desc="Allocate TBE (number of invalidates=0)") {
646 check_allocate(TBEs);
647 assert(is_valid(cache_entry));
648 TBEs.allocate(address);
649 set_tbe(TBEs[address]);
650 tbe.Dirty := cache_entry.Dirty;
651 tbe.DataBlk := cache_entry.DataBlk;
652 }
653
654 action(k_popL0RequestQueue, "k", desc="Pop mandatory queue.") {
655 messageBufferFromL0_in.dequeue(clockEdge());
656 }
657
658 action(l_popL2RequestQueue, "l",
659 desc="Pop incoming request queue and profile the delay within this virtual network") {
660 Tick delay := requestNetwork_in.dequeue(clockEdge());
661 profileMsgDelay(2, ticksToCycles(delay));
662 }
663
664 action(o_popL2ResponseQueue, "o",
665 desc="Pop Incoming Response queue and profile the delay within this virtual network") {
666 Tick delay := responseNetwork_in.dequeue(clockEdge());
667 profileMsgDelay(1, ticksToCycles(delay));
668 }
669
670 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
671 TBEs.deallocate(address);
672 unset_tbe();
673 }
674
675 action(u_writeDataFromL0Request, "ureql0", desc="Write data to cache") {
676 peek(messageBufferFromL0_in, CoherenceMsg) {
677 assert(is_valid(cache_entry));
678 if (in_msg.Dirty) {
679 cache_entry.DataBlk := in_msg.DataBlk;
680 cache_entry.Dirty := in_msg.Dirty;
681 }
682 }
683 }
684
685 action(u_writeDataFromL2Response, "uresl2", desc="Write data to cache") {
686 peek(responseNetwork_in, ResponseMsg) {
687 assert(is_valid(cache_entry));
688 cache_entry.DataBlk := in_msg.DataBlk;
689 }
690 }
691
692 action(u_writeDataFromL0Response, "uresl0", desc="Write data to cache") {
693 peek(messageBufferFromL0_in, CoherenceMsg) {
694 assert(is_valid(cache_entry));
695 if (in_msg.Dirty) {
696 cache_entry.DataBlk := in_msg.DataBlk;
697 cache_entry.Dirty := in_msg.Dirty;
698 }
699 }
700 }
701
702 action(q_updateAckCount, "q", desc="Update ack count") {
703 peek(responseNetwork_in, ResponseMsg) {
704 assert(is_valid(tbe));
705 tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
706 APPEND_TRANSITION_COMMENT(in_msg.AckCount);
707 APPEND_TRANSITION_COMMENT(" p: ");
708 APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
709 }
710 }
711
712 action(ff_deallocateCacheBlock, "\f",
713 desc="Deallocate L1 cache block.") {
714 if (cache.isTagPresent(address)) {
715 cache.deallocate(address);
716 }
717 unset_cache_entry();
718 }
719
720 action(oo_allocateCacheBlock, "\o", desc="Set cache tag equal to tag of block B.") {
721 if (is_invalid(cache_entry)) {
722 set_cache_entry(cache.allocate(address, new Entry));
723 }
724 }
725
726 action(z0_stallAndWaitL0Queue, "\z0", desc="recycle L0 request queue") {
727 stall_and_wait(messageBufferFromL0_in, address);
728 }
729
730 action(z2_stallAndWaitL2Queue, "\z2", desc="recycle L2 request queue") {
731 stall_and_wait(requestNetwork_in, address);
732 }
733
734 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
735 wakeUpAllBuffers(address);
736 }
737
738 action(uu_profileMiss, "\um", desc="Profile the demand miss") {
739 ++cache.demand_misses;
740 }
741
742 action(uu_profileHit, "\uh", desc="Profile the demand hit") {
743 ++cache.demand_hits;
744 }
745
746
747 //*****************************************************
748 // TRANSITIONS
749 //*****************************************************
750
751 // Transitions for Load/Store/Replacement/WriteBack from transient states
752 transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK, S_IL0, M_IL0, E_IL0, MM_IL0},
753 {Load, Store, L1_Replacement}) {
754 z0_stallAndWaitL0Queue;
755 }
756
757 transition(I, Load, IS) {
758 oo_allocateCacheBlock;
759 i_allocateTBE;
760 a_issueGETS;
761 uu_profileMiss;
762 k_popL0RequestQueue;
763 }
764
765 transition(I, Store, IM) {
766 oo_allocateCacheBlock;
767 i_allocateTBE;
768 b_issueGETX;
769 uu_profileMiss;
770 k_popL0RequestQueue;
771 }
772
773 transition(I, Inv) {
774 fi_sendInvAck;
775 l_popL2RequestQueue;
776 }
777
778 // Transitions from Shared
779 transition({S,SS}, Load, S) {
780 h_data_to_l0;
781 uu_profileHit;
782 k_popL0RequestQueue;
783 }
784
785 transition(EE, Load, E) {
786 hh_xdata_to_l0;
787 uu_profileHit;
788 k_popL0RequestQueue;
789 }
790
791 transition(MM, Load, M) {
792 hh_xdata_to_l0;
793 uu_profileHit;
794 k_popL0RequestQueue;
795 }
796
797 transition({S,SS}, Store, SM) {
798 i_allocateTBE;
799 c_issueUPGRADE;
800 uu_profileMiss;
801 k_popL0RequestQueue;
802 }
803
804 transition(SS, L1_Replacement, I) {
805 ff_deallocateCacheBlock;
806 }
807
808 transition(S, {L0_Invalidate_Own, L0_Invalidate_Else}, S_IL0) {
809 forward_eviction_to_L0;
810 }
811
812 transition(SS, Inv, I) {
813 fi_sendInvAck;
814 ff_deallocateCacheBlock;
815 l_popL2RequestQueue;
816 }
817
818 // Transitions from Exclusive
819
820 transition({EE,MM}, Store, M) {
821 hh_xdata_to_l0;
822 uu_profileHit;
823 k_popL0RequestQueue;
824 }
825
826 transition(EE, L1_Replacement, M_I) {
827 // silent E replacement??
828 i_allocateTBE;
829 g_issuePUTX; // send data, but hold in case forwarded request
830 ff_deallocateCacheBlock;
831 }
832
833 transition(EE, Inv, I) {
834 // don't send data
835 fi_sendInvAck;
836 ff_deallocateCacheBlock;
837 l_popL2RequestQueue;
838 }
839
840 transition(EE, Fwd_GETX, I) {
841 d_sendDataToRequestor;
842 ff_deallocateCacheBlock;
843 l_popL2RequestQueue;
844 }
845
846 transition(EE, Fwd_GETS, SS) {
847 d_sendDataToRequestor;
848 d2_sendDataToL2;
849 l_popL2RequestQueue;
850 }
851
852 transition(E, {L0_Invalidate_Own, L0_Invalidate_Else}, E_IL0) {
853 forward_eviction_to_L0;
854 }
855
856 // Transitions from Modified
857 transition(MM, L1_Replacement, M_I) {
858 i_allocateTBE;
859 g_issuePUTX; // send data, but hold in case forwarded request
860 ff_deallocateCacheBlock;
861 }
862
863 transition({M,E}, WriteBack, MM) {
864 u_writeDataFromL0Request;
865 k_popL0RequestQueue;
866 }
867
868 transition(M_I, WB_Ack, I) {
869 s_deallocateTBE;
870 o_popL2ResponseQueue;
871 ff_deallocateCacheBlock;
872 kd_wakeUpDependents;
873 }
874
875 transition(MM, Inv, I) {
876 f_sendDataToL2;
877 ff_deallocateCacheBlock;
878 l_popL2RequestQueue;
879 }
880
881 transition(M_I, Inv, SINK_WB_ACK) {
882 ft_sendDataToL2_fromTBE;
883 l_popL2RequestQueue;
884 }
885
886 transition(MM, Fwd_GETX, I) {
887 d_sendDataToRequestor;
888 ff_deallocateCacheBlock;
889 l_popL2RequestQueue;
890 }
891
892 transition(MM, Fwd_GETS, SS) {
893 d_sendDataToRequestor;
894 d2_sendDataToL2;
895 l_popL2RequestQueue;
896 }
897
898 transition(M, {L0_Invalidate_Own, L0_Invalidate_Else}, M_IL0) {
899 forward_eviction_to_L0;
900 }
901
902 transition(M_I, Fwd_GETX, SINK_WB_ACK) {
903 dt_sendDataToRequestor_fromTBE;
904 l_popL2RequestQueue;
905 }
906
907 transition(M_I, Fwd_GETS, SINK_WB_ACK) {
908 dt_sendDataToRequestor_fromTBE;
909 d2t_sendDataToL2_fromTBE;
910 l_popL2RequestQueue;
911 }
912
913 // Transitions from IS
914 transition({IS,IS_I}, Inv, IS_I) {
915 fi_sendInvAck;
916 l_popL2RequestQueue;
917 }
918
919 transition(IS, Data_all_Acks, S) {
920 u_writeDataFromL2Response;
921 h_data_to_l0;
922 s_deallocateTBE;
923 o_popL2ResponseQueue;
924 kd_wakeUpDependents;
925 }
926
927 transition(IS_I, Data_all_Acks, I) {
928 u_writeDataFromL2Response;
929 h_stale_data_to_l0;
930 s_deallocateTBE;
931 ff_deallocateCacheBlock;
932 o_popL2ResponseQueue;
933 kd_wakeUpDependents;
934 }
935
936 transition(IS, DataS_fromL1, S) {
937 u_writeDataFromL2Response;
938 j_sendUnblock;
939 h_data_to_l0;
940 s_deallocateTBE;
941 o_popL2ResponseQueue;
942 kd_wakeUpDependents;
943 }
944
945 transition(IS_I, DataS_fromL1, I) {
946 u_writeDataFromL2Response;
947 j_sendUnblock;
948 h_stale_data_to_l0;
949 s_deallocateTBE;
950 ff_deallocateCacheBlock;
951 o_popL2ResponseQueue;
952 kd_wakeUpDependents;
953 }
954
955 // directory is blocked when sending exclusive data
956 transition({IS,IS_I}, Data_Exclusive, E) {
957 u_writeDataFromL2Response;
958 hh_xdata_to_l0;
959 jj_sendExclusiveUnblock;
960 s_deallocateTBE;
961 o_popL2ResponseQueue;
962 kd_wakeUpDependents;
963 }
964
965 // Transitions from IM
966 transition({IM,SM}, Inv, IM) {
967 fi_sendInvAck;
968 l_popL2RequestQueue;
969 }
970
971 transition(IM, Data, SM) {
972 u_writeDataFromL2Response;
973 q_updateAckCount;
974 o_popL2ResponseQueue;
975 }
976
977 transition(IM, Data_all_Acks, M) {
978 u_writeDataFromL2Response;
979 hh_xdata_to_l0;
980 jj_sendExclusiveUnblock;
981 s_deallocateTBE;
982 o_popL2ResponseQueue;
983 kd_wakeUpDependents;
984 }
985
986 transition({SM, IM}, Ack) {
987 q_updateAckCount;
988 o_popL2ResponseQueue;
989 }
990
991 transition(SM, Ack_all, M) {
992 jj_sendExclusiveUnblock;
993 hh_xdata_to_l0;
994 s_deallocateTBE;
995 o_popL2ResponseQueue;
996 kd_wakeUpDependents;
997 }
998
999 transition(SM, L0_Invalidate_Else, SM_IL0) {
1000 forward_eviction_to_L0;
1001 }
1002
1003 transition(SINK_WB_ACK, Inv){
1004 fi_sendInvAck;
1005 l_popL2RequestQueue;
1006 }
1007
1008 transition(SINK_WB_ACK, WB_Ack, I){
1009 s_deallocateTBE;
1010 o_popL2ResponseQueue;
1011 ff_deallocateCacheBlock;
1012 kd_wakeUpDependents;
1013 }
1014
1015 transition({M_IL0, E_IL0}, WriteBack, MM_IL0) {
1016 u_writeDataFromL0Request;
1017 k_popL0RequestQueue;
1018 kd_wakeUpDependents;
1019 }
1020
1021 transition({M_IL0, E_IL0}, L0_DataAck, MM) {
1022 u_writeDataFromL0Response;
1023 k_popL0RequestQueue;
1024 kd_wakeUpDependents;
1025 }
1026
1027 transition({M_IL0, MM_IL0}, L0_Ack, MM) {
1028 k_popL0RequestQueue;
1029 kd_wakeUpDependents;
1030 }
1031
1032 transition(E_IL0, L0_Ack, EE) {
1033 k_popL0RequestQueue;
1034 kd_wakeUpDependents;
1035 }
1036
1037 transition(S_IL0, L0_Ack, SS) {
1038 k_popL0RequestQueue;
1039 kd_wakeUpDependents;
1040 }
1041
1042 transition(SM_IL0, L0_Ack, IM) {
1043 k_popL0RequestQueue;
1044 kd_wakeUpDependents;
1045 }
1046
1047 transition({S_IL0, M_IL0, E_IL0, SM_IL0, SM}, L0_Invalidate_Own) {
1048 z0_stallAndWaitL0Queue;
1049 }
1050
1051 transition({S_IL0, M_IL0, E_IL0, SM_IL0}, L0_Invalidate_Else) {
1052 z2_stallAndWaitL2Queue;
1053 }
1054
1055 transition({S_IL0, M_IL0, E_IL0, MM_IL0}, {Inv, Fwd_GETX, Fwd_GETS}) {
1056 z2_stallAndWaitL2Queue;
1057 }
1058}
378 }
379 }
380 }
381 }
382 }
383 }
384 }
385
386 // ACTIONS
387 action(a_issueGETS, "a", desc="Issue GETS") {
388 peek(messageBufferFromL0_in, CoherenceMsg) {
389 enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
390 out_msg.addr := address;
391 out_msg.Type := CoherenceRequestType:GETS;
392 out_msg.Requestor := machineID;
393 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
394 l2_select_low_bit, l2_select_num_bits, clusterID));
395 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
396 address, out_msg.Destination);
397 out_msg.MessageSize := MessageSizeType:Control;
398 out_msg.AccessMode := in_msg.AccessMode;
399 }
400 }
401 }
402
403 action(b_issueGETX, "b", desc="Issue GETX") {
404 peek(messageBufferFromL0_in, CoherenceMsg) {
405 enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
406 out_msg.addr := address;
407 out_msg.Type := CoherenceRequestType:GETX;
408 out_msg.Requestor := machineID;
409 DPRINTF(RubySlicc, "%s\n", machineID);
410 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
411 l2_select_low_bit, l2_select_num_bits, clusterID));
412 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
413 address, out_msg.Destination);
414 out_msg.MessageSize := MessageSizeType:Control;
415 out_msg.AccessMode := in_msg.AccessMode;
416 }
417 }
418 }
419
420 action(c_issueUPGRADE, "c", desc="Issue GETX") {
421 peek(messageBufferFromL0_in, CoherenceMsg) {
422 enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
423 out_msg.addr := address;
424 out_msg.Type := CoherenceRequestType:UPGRADE;
425 out_msg.Requestor := machineID;
426 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
427 l2_select_low_bit, l2_select_num_bits, clusterID));
428 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
429 address, out_msg.Destination);
430 out_msg.MessageSize := MessageSizeType:Control;
431 out_msg.AccessMode := in_msg.AccessMode;
432 }
433 }
434 }
435
436 action(d_sendDataToRequestor, "d", desc="send data to requestor") {
437 peek(requestNetwork_in, RequestMsg) {
438 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
439 assert(is_valid(cache_entry));
440 out_msg.addr := address;
441 out_msg.Type := CoherenceResponseType:DATA;
442 out_msg.DataBlk := cache_entry.DataBlk;
443 out_msg.Dirty := cache_entry.Dirty;
444 out_msg.Sender := machineID;
445 out_msg.Destination.add(in_msg.Requestor);
446 out_msg.MessageSize := MessageSizeType:Response_Data;
447 }
448 }
449 }
450
451 action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
452 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
453 assert(is_valid(cache_entry));
454 out_msg.addr := address;
455 out_msg.Type := CoherenceResponseType:DATA;
456 out_msg.DataBlk := cache_entry.DataBlk;
457 out_msg.Dirty := cache_entry.Dirty;
458 out_msg.Sender := machineID;
459 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
460 l2_select_low_bit, l2_select_num_bits, clusterID));
461 out_msg.MessageSize := MessageSizeType:Response_Data;
462 }
463 }
464
465 action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
466 peek(requestNetwork_in, RequestMsg) {
467 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
468 assert(is_valid(tbe));
469 out_msg.addr := address;
470 out_msg.Type := CoherenceResponseType:DATA;
471 out_msg.DataBlk := tbe.DataBlk;
472 out_msg.Dirty := tbe.Dirty;
473 out_msg.Sender := machineID;
474 out_msg.Destination.add(in_msg.Requestor);
475 out_msg.MessageSize := MessageSizeType:Response_Data;
476 }
477 }
478 }
479
480 action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
481 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
482 assert(is_valid(tbe));
483 out_msg.addr := address;
484 out_msg.Type := CoherenceResponseType:DATA;
485 out_msg.DataBlk := tbe.DataBlk;
486 out_msg.Dirty := tbe.Dirty;
487 out_msg.Sender := machineID;
488 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
489 l2_select_low_bit, l2_select_num_bits, clusterID));
490 out_msg.MessageSize := MessageSizeType:Response_Data;
491 }
492 }
493
494 action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
495 peek(requestNetwork_in, RequestMsg) {
496 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
497 out_msg.addr := address;
498 out_msg.Type := CoherenceResponseType:ACK;
499 out_msg.Sender := machineID;
500 out_msg.Destination.add(in_msg.Requestor);
501 out_msg.MessageSize := MessageSizeType:Response_Control;
502 }
503 }
504 }
505
506 action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
507 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
508 assert(is_valid(cache_entry));
509 out_msg.addr := address;
510 out_msg.Type := CoherenceResponseType:DATA;
511 out_msg.DataBlk := cache_entry.DataBlk;
512 out_msg.Dirty := cache_entry.Dirty;
513 out_msg.Sender := machineID;
514 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
515 l2_select_low_bit, l2_select_num_bits, clusterID));
516 out_msg.MessageSize := MessageSizeType:Writeback_Data;
517 }
518 }
519
520 action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
521 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
522 assert(is_valid(tbe));
523 out_msg.addr := address;
524 out_msg.Type := CoherenceResponseType:DATA;
525 out_msg.DataBlk := tbe.DataBlk;
526 out_msg.Dirty := tbe.Dirty;
527 out_msg.Sender := machineID;
528 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
529 l2_select_low_bit, l2_select_num_bits, clusterID));
530 out_msg.MessageSize := MessageSizeType:Writeback_Data;
531 }
532 }
533
534 action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
535 peek(requestNetwork_in, RequestMsg) {
536 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
537 out_msg.addr := address;
538 out_msg.Type := CoherenceResponseType:ACK;
539 out_msg.Sender := machineID;
540 out_msg.Destination.add(in_msg.Requestor);
541 out_msg.MessageSize := MessageSizeType:Response_Control;
542 out_msg.AckCount := 1;
543 }
544 }
545 }
546
547 action(forward_eviction_to_L0, "\cc", desc="sends eviction information to the processor") {
548 enqueue(bufferToL0_out, CoherenceMsg, l1_request_latency) {
549 out_msg.addr := address;
550 out_msg.Class := CoherenceClass:INV;
551 out_msg.Sender := machineID;
552 out_msg.Dest := createMachineID(MachineType:L0Cache, version);
553 out_msg.MessageSize := MessageSizeType:Control;
554 }
555 }
556
557 action(g_issuePUTX, "g", desc="send data to the L2 cache") {
558 enqueue(requestNetwork_out, RequestMsg, l1_response_latency) {
559 assert(is_valid(cache_entry));
560 out_msg.addr := address;
561 out_msg.Type := CoherenceRequestType:PUTX;
562 out_msg.Dirty := cache_entry.Dirty;
563 out_msg.Requestor:= machineID;
564 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
565 l2_select_low_bit, l2_select_num_bits, clusterID));
566 if (cache_entry.Dirty) {
567 out_msg.MessageSize := MessageSizeType:Writeback_Data;
568 out_msg.DataBlk := cache_entry.DataBlk;
569 } else {
570 out_msg.MessageSize := MessageSizeType:Writeback_Control;
571 }
572 }
573 }
574
575 action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
576 enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
577 out_msg.addr := address;
578 out_msg.Type := CoherenceResponseType:UNBLOCK;
579 out_msg.Sender := machineID;
580 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
581 l2_select_low_bit, l2_select_num_bits, clusterID));
582 out_msg.MessageSize := MessageSizeType:Response_Control;
583 DPRINTF(RubySlicc, "%#x\n", address);
584 }
585 }
586
587 action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
588 enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
589 out_msg.addr := address;
590 out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
591 out_msg.Sender := machineID;
592 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
593 l2_select_low_bit, l2_select_num_bits, clusterID));
594 out_msg.MessageSize := MessageSizeType:Response_Control;
595 DPRINTF(RubySlicc, "%#x\n", address);
596
597 }
598 }
599
600 action(h_data_to_l0, "h", desc="If not prefetch, send data to the L0 cache.") {
601 enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
602 assert(is_valid(cache_entry));
603
604 out_msg.addr := address;
605 out_msg.Class := CoherenceClass:DATA;
606 out_msg.Sender := machineID;
607 out_msg.Dest := createMachineID(MachineType:L0Cache, version);
608 out_msg.DataBlk := cache_entry.DataBlk;
609 out_msg.MessageSize := MessageSizeType:Response_Data;
610 }
611 }
612
613 action(hh_xdata_to_l0, "\h", desc="If not prefetch, notify sequencer that store completed.") {
614 enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
615 assert(is_valid(cache_entry));
616
617 out_msg.addr := address;
618 out_msg.Class := CoherenceClass:DATA_EXCLUSIVE;
619 out_msg.Sender := machineID;
620 out_msg.Dest := createMachineID(MachineType:L0Cache, version);
621 out_msg.DataBlk := cache_entry.DataBlk;
622 out_msg.Dirty := cache_entry.Dirty;
623 out_msg.MessageSize := MessageSizeType:Response_Data;
624
625 //cache_entry.Dirty := true;
626 }
627 }
628
629 action(h_stale_data_to_l0, "hs", desc="If not prefetch, send data to the L0 cache.") {
630 enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
631 assert(is_valid(cache_entry));
632
633 out_msg.addr := address;
634 out_msg.Class := CoherenceClass:STALE_DATA;
635 out_msg.Sender := machineID;
636 out_msg.Dest := createMachineID(MachineType:L0Cache, version);
637 out_msg.DataBlk := cache_entry.DataBlk;
638 out_msg.Dirty := cache_entry.Dirty;
639 out_msg.MessageSize := MessageSizeType:Response_Data;
640 }
641 }
642
643 action(i_allocateTBE, "i", desc="Allocate TBE (number of invalidates=0)") {
644 check_allocate(TBEs);
645 assert(is_valid(cache_entry));
646 TBEs.allocate(address);
647 set_tbe(TBEs[address]);
648 tbe.Dirty := cache_entry.Dirty;
649 tbe.DataBlk := cache_entry.DataBlk;
650 }
651
652 action(k_popL0RequestQueue, "k", desc="Pop mandatory queue.") {
653 messageBufferFromL0_in.dequeue(clockEdge());
654 }
655
656 action(l_popL2RequestQueue, "l",
657 desc="Pop incoming request queue and profile the delay within this virtual network") {
658 Tick delay := requestNetwork_in.dequeue(clockEdge());
659 profileMsgDelay(2, ticksToCycles(delay));
660 }
661
662 action(o_popL2ResponseQueue, "o",
663 desc="Pop Incoming Response queue and profile the delay within this virtual network") {
664 Tick delay := responseNetwork_in.dequeue(clockEdge());
665 profileMsgDelay(1, ticksToCycles(delay));
666 }
667
668 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
669 TBEs.deallocate(address);
670 unset_tbe();
671 }
672
673 action(u_writeDataFromL0Request, "ureql0", desc="Write data to cache") {
674 peek(messageBufferFromL0_in, CoherenceMsg) {
675 assert(is_valid(cache_entry));
676 if (in_msg.Dirty) {
677 cache_entry.DataBlk := in_msg.DataBlk;
678 cache_entry.Dirty := in_msg.Dirty;
679 }
680 }
681 }
682
683 action(u_writeDataFromL2Response, "uresl2", desc="Write data to cache") {
684 peek(responseNetwork_in, ResponseMsg) {
685 assert(is_valid(cache_entry));
686 cache_entry.DataBlk := in_msg.DataBlk;
687 }
688 }
689
690 action(u_writeDataFromL0Response, "uresl0", desc="Write data to cache") {
691 peek(messageBufferFromL0_in, CoherenceMsg) {
692 assert(is_valid(cache_entry));
693 if (in_msg.Dirty) {
694 cache_entry.DataBlk := in_msg.DataBlk;
695 cache_entry.Dirty := in_msg.Dirty;
696 }
697 }
698 }
699
700 action(q_updateAckCount, "q", desc="Update ack count") {
701 peek(responseNetwork_in, ResponseMsg) {
702 assert(is_valid(tbe));
703 tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
704 APPEND_TRANSITION_COMMENT(in_msg.AckCount);
705 APPEND_TRANSITION_COMMENT(" p: ");
706 APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
707 }
708 }
709
710 action(ff_deallocateCacheBlock, "\f",
711 desc="Deallocate L1 cache block.") {
712 if (cache.isTagPresent(address)) {
713 cache.deallocate(address);
714 }
715 unset_cache_entry();
716 }
717
718 action(oo_allocateCacheBlock, "\o", desc="Set cache tag equal to tag of block B.") {
719 if (is_invalid(cache_entry)) {
720 set_cache_entry(cache.allocate(address, new Entry));
721 }
722 }
723
724 action(z0_stallAndWaitL0Queue, "\z0", desc="recycle L0 request queue") {
725 stall_and_wait(messageBufferFromL0_in, address);
726 }
727
728 action(z2_stallAndWaitL2Queue, "\z2", desc="recycle L2 request queue") {
729 stall_and_wait(requestNetwork_in, address);
730 }
731
732 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
733 wakeUpAllBuffers(address);
734 }
735
736 action(uu_profileMiss, "\um", desc="Profile the demand miss") {
737 ++cache.demand_misses;
738 }
739
740 action(uu_profileHit, "\uh", desc="Profile the demand hit") {
741 ++cache.demand_hits;
742 }
743
744
745 //*****************************************************
746 // TRANSITIONS
747 //*****************************************************
748
749 // Transitions for Load/Store/Replacement/WriteBack from transient states
750 transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK, S_IL0, M_IL0, E_IL0, MM_IL0},
751 {Load, Store, L1_Replacement}) {
752 z0_stallAndWaitL0Queue;
753 }
754
755 transition(I, Load, IS) {
756 oo_allocateCacheBlock;
757 i_allocateTBE;
758 a_issueGETS;
759 uu_profileMiss;
760 k_popL0RequestQueue;
761 }
762
763 transition(I, Store, IM) {
764 oo_allocateCacheBlock;
765 i_allocateTBE;
766 b_issueGETX;
767 uu_profileMiss;
768 k_popL0RequestQueue;
769 }
770
771 transition(I, Inv) {
772 fi_sendInvAck;
773 l_popL2RequestQueue;
774 }
775
776 // Transitions from Shared
777 transition({S,SS}, Load, S) {
778 h_data_to_l0;
779 uu_profileHit;
780 k_popL0RequestQueue;
781 }
782
783 transition(EE, Load, E) {
784 hh_xdata_to_l0;
785 uu_profileHit;
786 k_popL0RequestQueue;
787 }
788
789 transition(MM, Load, M) {
790 hh_xdata_to_l0;
791 uu_profileHit;
792 k_popL0RequestQueue;
793 }
794
795 transition({S,SS}, Store, SM) {
796 i_allocateTBE;
797 c_issueUPGRADE;
798 uu_profileMiss;
799 k_popL0RequestQueue;
800 }
801
802 transition(SS, L1_Replacement, I) {
803 ff_deallocateCacheBlock;
804 }
805
806 transition(S, {L0_Invalidate_Own, L0_Invalidate_Else}, S_IL0) {
807 forward_eviction_to_L0;
808 }
809
810 transition(SS, Inv, I) {
811 fi_sendInvAck;
812 ff_deallocateCacheBlock;
813 l_popL2RequestQueue;
814 }
815
816 // Transitions from Exclusive
817
818 transition({EE,MM}, Store, M) {
819 hh_xdata_to_l0;
820 uu_profileHit;
821 k_popL0RequestQueue;
822 }
823
824 transition(EE, L1_Replacement, M_I) {
825 // silent E replacement??
826 i_allocateTBE;
827 g_issuePUTX; // send data, but hold in case forwarded request
828 ff_deallocateCacheBlock;
829 }
830
831 transition(EE, Inv, I) {
832 // don't send data
833 fi_sendInvAck;
834 ff_deallocateCacheBlock;
835 l_popL2RequestQueue;
836 }
837
838 transition(EE, Fwd_GETX, I) {
839 d_sendDataToRequestor;
840 ff_deallocateCacheBlock;
841 l_popL2RequestQueue;
842 }
843
844 transition(EE, Fwd_GETS, SS) {
845 d_sendDataToRequestor;
846 d2_sendDataToL2;
847 l_popL2RequestQueue;
848 }
849
850 transition(E, {L0_Invalidate_Own, L0_Invalidate_Else}, E_IL0) {
851 forward_eviction_to_L0;
852 }
853
854 // Transitions from Modified
855 transition(MM, L1_Replacement, M_I) {
856 i_allocateTBE;
857 g_issuePUTX; // send data, but hold in case forwarded request
858 ff_deallocateCacheBlock;
859 }
860
861 transition({M,E}, WriteBack, MM) {
862 u_writeDataFromL0Request;
863 k_popL0RequestQueue;
864 }
865
866 transition(M_I, WB_Ack, I) {
867 s_deallocateTBE;
868 o_popL2ResponseQueue;
869 ff_deallocateCacheBlock;
870 kd_wakeUpDependents;
871 }
872
873 transition(MM, Inv, I) {
874 f_sendDataToL2;
875 ff_deallocateCacheBlock;
876 l_popL2RequestQueue;
877 }
878
879 transition(M_I, Inv, SINK_WB_ACK) {
880 ft_sendDataToL2_fromTBE;
881 l_popL2RequestQueue;
882 }
883
884 transition(MM, Fwd_GETX, I) {
885 d_sendDataToRequestor;
886 ff_deallocateCacheBlock;
887 l_popL2RequestQueue;
888 }
889
890 transition(MM, Fwd_GETS, SS) {
891 d_sendDataToRequestor;
892 d2_sendDataToL2;
893 l_popL2RequestQueue;
894 }
895
896 transition(M, {L0_Invalidate_Own, L0_Invalidate_Else}, M_IL0) {
897 forward_eviction_to_L0;
898 }
899
900 transition(M_I, Fwd_GETX, SINK_WB_ACK) {
901 dt_sendDataToRequestor_fromTBE;
902 l_popL2RequestQueue;
903 }
904
905 transition(M_I, Fwd_GETS, SINK_WB_ACK) {
906 dt_sendDataToRequestor_fromTBE;
907 d2t_sendDataToL2_fromTBE;
908 l_popL2RequestQueue;
909 }
910
911 // Transitions from IS
912 transition({IS,IS_I}, Inv, IS_I) {
913 fi_sendInvAck;
914 l_popL2RequestQueue;
915 }
916
917 transition(IS, Data_all_Acks, S) {
918 u_writeDataFromL2Response;
919 h_data_to_l0;
920 s_deallocateTBE;
921 o_popL2ResponseQueue;
922 kd_wakeUpDependents;
923 }
924
925 transition(IS_I, Data_all_Acks, I) {
926 u_writeDataFromL2Response;
927 h_stale_data_to_l0;
928 s_deallocateTBE;
929 ff_deallocateCacheBlock;
930 o_popL2ResponseQueue;
931 kd_wakeUpDependents;
932 }
933
934 transition(IS, DataS_fromL1, S) {
935 u_writeDataFromL2Response;
936 j_sendUnblock;
937 h_data_to_l0;
938 s_deallocateTBE;
939 o_popL2ResponseQueue;
940 kd_wakeUpDependents;
941 }
942
943 transition(IS_I, DataS_fromL1, I) {
944 u_writeDataFromL2Response;
945 j_sendUnblock;
946 h_stale_data_to_l0;
947 s_deallocateTBE;
948 ff_deallocateCacheBlock;
949 o_popL2ResponseQueue;
950 kd_wakeUpDependents;
951 }
952
953 // directory is blocked when sending exclusive data
954 transition({IS,IS_I}, Data_Exclusive, E) {
955 u_writeDataFromL2Response;
956 hh_xdata_to_l0;
957 jj_sendExclusiveUnblock;
958 s_deallocateTBE;
959 o_popL2ResponseQueue;
960 kd_wakeUpDependents;
961 }
962
963 // Transitions from IM
964 transition({IM,SM}, Inv, IM) {
965 fi_sendInvAck;
966 l_popL2RequestQueue;
967 }
968
969 transition(IM, Data, SM) {
970 u_writeDataFromL2Response;
971 q_updateAckCount;
972 o_popL2ResponseQueue;
973 }
974
975 transition(IM, Data_all_Acks, M) {
976 u_writeDataFromL2Response;
977 hh_xdata_to_l0;
978 jj_sendExclusiveUnblock;
979 s_deallocateTBE;
980 o_popL2ResponseQueue;
981 kd_wakeUpDependents;
982 }
983
984 transition({SM, IM}, Ack) {
985 q_updateAckCount;
986 o_popL2ResponseQueue;
987 }
988
989 transition(SM, Ack_all, M) {
990 jj_sendExclusiveUnblock;
991 hh_xdata_to_l0;
992 s_deallocateTBE;
993 o_popL2ResponseQueue;
994 kd_wakeUpDependents;
995 }
996
997 transition(SM, L0_Invalidate_Else, SM_IL0) {
998 forward_eviction_to_L0;
999 }
1000
1001 transition(SINK_WB_ACK, Inv){
1002 fi_sendInvAck;
1003 l_popL2RequestQueue;
1004 }
1005
1006 transition(SINK_WB_ACK, WB_Ack, I){
1007 s_deallocateTBE;
1008 o_popL2ResponseQueue;
1009 ff_deallocateCacheBlock;
1010 kd_wakeUpDependents;
1011 }
1012
1013 transition({M_IL0, E_IL0}, WriteBack, MM_IL0) {
1014 u_writeDataFromL0Request;
1015 k_popL0RequestQueue;
1016 kd_wakeUpDependents;
1017 }
1018
1019 transition({M_IL0, E_IL0}, L0_DataAck, MM) {
1020 u_writeDataFromL0Response;
1021 k_popL0RequestQueue;
1022 kd_wakeUpDependents;
1023 }
1024
1025 transition({M_IL0, MM_IL0}, L0_Ack, MM) {
1026 k_popL0RequestQueue;
1027 kd_wakeUpDependents;
1028 }
1029
1030 transition(E_IL0, L0_Ack, EE) {
1031 k_popL0RequestQueue;
1032 kd_wakeUpDependents;
1033 }
1034
1035 transition(S_IL0, L0_Ack, SS) {
1036 k_popL0RequestQueue;
1037 kd_wakeUpDependents;
1038 }
1039
1040 transition(SM_IL0, L0_Ack, IM) {
1041 k_popL0RequestQueue;
1042 kd_wakeUpDependents;
1043 }
1044
1045 transition({S_IL0, M_IL0, E_IL0, SM_IL0, SM}, L0_Invalidate_Own) {
1046 z0_stallAndWaitL0Queue;
1047 }
1048
1049 transition({S_IL0, M_IL0, E_IL0, SM_IL0}, L0_Invalidate_Else) {
1050 z2_stallAndWaitL2Queue;
1051 }
1052
1053 transition({S_IL0, M_IL0, E_IL0, MM_IL0}, {Inv, Fwd_GETX, Fwd_GETS}) {
1054 z2_stallAndWaitL2Queue;
1055 }
1056}