1/*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 *          Brad Beckmann
34 */
35
36machine(MachineType:L1Cache, "AMD Hammer-like protocol")
37    : Sequencer * sequencer;
38      CacheMemory * L1Icache;
39      CacheMemory * L1Dcache;
40      CacheMemory * L2cache;
41      Cycles cache_response_latency := 10;
42      Cycles issue_latency := 2;
43      Cycles l2_cache_hit_latency := 10;
44      bool no_mig_atomic := "True";
45      bool send_evictions;
46
47      // NETWORK BUFFERS
48      MessageBuffer * requestFromCache, network="To", virtual_network="2",
49            vnet_type="request";
50      MessageBuffer * responseFromCache, network="To", virtual_network="4",
51            vnet_type="response";
52      MessageBuffer * unblockFromCache, network="To", virtual_network="5",
53            vnet_type="unblock";
54
55      MessageBuffer * forwardToCache, network="From", virtual_network="3",
56            vnet_type="forward";
57      MessageBuffer * responseToCache, network="From", virtual_network="4",
58            vnet_type="response";
59
60      MessageBuffer * mandatoryQueue;
61
62      MessageBuffer * triggerQueue;
63{
64  // STATES
65  state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
66    // Base states
67    I, AccessPermission:Invalid, desc="Idle";
68    S, AccessPermission:Read_Only, desc="Shared";
69    O, AccessPermission:Read_Only, desc="Owned";
70    M, AccessPermission:Read_Only, desc="Modified (dirty)";
71    MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
72
73    // Base states, locked and ready to service the mandatory queue
74    IR, AccessPermission:Invalid, desc="Idle";
75    SR, AccessPermission:Read_Only, desc="Shared";
76    OR, AccessPermission:Read_Only, desc="Owned";
77    MR, AccessPermission:Read_Only, desc="Modified (dirty)";
78    MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
79
80    // Transient States
81    IM, AccessPermission:Busy, "IM", desc="Issued GetX";
82    SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
83    OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
84    ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
85    M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
86    MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
87    IS, AccessPermission:Busy, "IS", desc="Issued GetS";
88    SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
89    OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
90    MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
91    II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
92    ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
93    OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
94    MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
95    MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
96
97    //Transition States Related to Flushing
98    MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
99    MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
100    IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
101    ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
102    SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
103    OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
104    MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
105  }
106
107  // EVENTS
108  enumeration(Event, desc="Cache events") {
109    Load,            desc="Load request from the processor";
110    Ifetch,          desc="I-fetch request from the processor";
111    Store,           desc="Store request from the processor";
112    L2_Replacement,  desc="L2 Replacement";
113    L1_to_L2,        desc="L1 to L2 transfer";
114    Trigger_L2_to_L1D,  desc="Trigger L2 to L1-Data transfer";
115    Trigger_L2_to_L1I,  desc="Trigger L2 to L1-Instruction transfer";
116    Complete_L2_to_L1, desc="L2 to L1 transfer completed";
117
118    // Requests
119    Other_GETX,      desc="A GetX from another processor";
120    Other_GETS,      desc="A GetS from another processor";
121    Merged_GETS,     desc="A Merged GetS from another processor";
122    Other_GETS_No_Mig, desc="A GetS from another processor";
123    NC_DMA_GETS,     desc="special GetS when only DMA exists";
124    Invalidate,      desc="Invalidate block";
125
126    // Responses
127    Ack,             desc="Received an ack message";
128    Shared_Ack,      desc="Received an ack message, responder has a shared copy";
129    Data,            desc="Received a data message";
130    Shared_Data,     desc="Received a data message, responder has a shared copy";
131    Exclusive_Data,  desc="Received a data message, responder had an exclusive copy, they gave it to us";
132
133    Writeback_Ack,   desc="Writeback O.K. from directory";
134    Writeback_Nack,  desc="Writeback not O.K. from directory";
135
136    // Triggers
137    All_acks,                  desc="Received all required data and message acks";
138    All_acks_no_sharers,        desc="Received all acks and no other processor has a shared copy";
139
140    // For Flush
141    Flush_line,                  desc="flush the cache line from all caches";
142    Block_Ack,                   desc="the directory is blocked and ready for the flush";
143  }
144
145  // STRUCTURE DEFINITIONS
146  // CacheEntry
147  structure(Entry, desc="...", interface="AbstractCacheEntry") {
148    State CacheState,        desc="cache state";
149    bool Dirty,              desc="Is the data dirty (different than memory)?";
150    DataBlock DataBlk,       desc="data for the block";
151    bool FromL2, default="false", desc="block just moved from L2";
152    bool AtomicAccessed, default="false", desc="block just moved from L2";
153  }
154
155  // TBE fields
156  structure(TBE, desc="...") {
157    State TBEState,          desc="Transient state";
158    DataBlock DataBlk,       desc="data for the block, required for concurrent writebacks";
159    bool Dirty,              desc="Is the data dirty (different than memory)?";
160    int NumPendingMsgs,      desc="Number of acks/data messages that this processor is waiting for";
161    bool Sharers,            desc="On a GetS, did we find any other sharers in the system";
162    bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
163    MachineID LastResponder, desc="last machine to send a response for this request";
164    MachineID CurOwner,      desc="current owner of the block, used for UnblockS responses";
165
166    Cycles InitialRequestTime, default="Cycles(0)",
167            desc="time the initial requests was sent from the L1Cache";
168    Cycles ForwardRequestTime, default="Cycles(0)",
169            desc="time the dir forwarded the request";
170    Cycles FirstResponseTime, default="Cycles(0)",
171            desc="the time the first response was received";
172  }
173
174  structure(TBETable, external="yes") {
175    TBE lookup(Addr);
176    void allocate(Addr);
177    void deallocate(Addr);
178    bool isPresent(Addr);
179  }
180
181  TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
182
183  Tick clockEdge();
184  void set_cache_entry(AbstractCacheEntry b);
185  void unset_cache_entry();
186  void set_tbe(TBE b);
187  void unset_tbe();
188  void wakeUpAllBuffers();
189  void wakeUpBuffers(Addr a);
190  Cycles curCycle();
191  MachineID mapAddressToMachine(Addr addr, MachineType mtype);
192
193  Entry getCacheEntry(Addr address), return_by_pointer="yes" {
194    Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
195    if(is_valid(L2cache_entry)) {
196      return L2cache_entry;
197    }
198
199    Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
200    if(is_valid(L1Dcache_entry)) {
201      return L1Dcache_entry;
202    }
203
204    Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
205    return L1Icache_entry;
206  }
207
208  void functionalRead(Addr addr, Packet *pkt) {
209    Entry cache_entry := getCacheEntry(addr);
210    if(is_valid(cache_entry)) {
211      testAndRead(addr, cache_entry.DataBlk, pkt);
212    } else {
213      TBE tbe := TBEs[addr];
214      if(is_valid(tbe)) {
215        testAndRead(addr, tbe.DataBlk, pkt);
216      } else {
217        error("Missing data block");
218      }
219    }
220  }
221
222  int functionalWrite(Addr addr, Packet *pkt) {
223    int num_functional_writes := 0;
224
225    Entry cache_entry := getCacheEntry(addr);
226    if(is_valid(cache_entry)) {
227      num_functional_writes := num_functional_writes +
228        testAndWrite(addr, cache_entry.DataBlk, pkt);
229      return num_functional_writes;
230    }
231
232    TBE tbe := TBEs[addr];
233    num_functional_writes := num_functional_writes +
234      testAndWrite(addr, tbe.DataBlk, pkt);
235    return num_functional_writes;
236  }
237
238  Entry getL2CacheEntry(Addr address), return_by_pointer="yes" {
239    Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
240    return L2cache_entry;
241  }
242
243  Entry getL1DCacheEntry(Addr address), return_by_pointer="yes" {
244    Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
245    return L1Dcache_entry;
246  }
247
248  Entry getL1ICacheEntry(Addr address), return_by_pointer="yes" {
249    Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
250    return L1Icache_entry;
251  }
252
253  State getState(TBE tbe, Entry cache_entry, Addr addr) {
254    if(is_valid(tbe)) {
255      return tbe.TBEState;
256    } else if (is_valid(cache_entry)) {
257      return cache_entry.CacheState;
258    }
259    return State:I;
260  }
261
262  void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
263    assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
264    assert((L1Icache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
265    assert((L1Dcache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
266
267    if (is_valid(tbe)) {
268      tbe.TBEState := state;
269    }
270
271    if (is_valid(cache_entry)) {
272      cache_entry.CacheState := state;
273    }
274  }
275
276  AccessPermission getAccessPermission(Addr addr) {
277    TBE tbe := TBEs[addr];
278    if(is_valid(tbe)) {
279      return L1Cache_State_to_permission(tbe.TBEState);
280    }
281
282    Entry cache_entry := getCacheEntry(addr);
283    if(is_valid(cache_entry)) {
284      return L1Cache_State_to_permission(cache_entry.CacheState);
285    }
286
287    return AccessPermission:NotPresent;
288  }
289
290  void setAccessPermission(Entry cache_entry, Addr addr, State state) {
291    if (is_valid(cache_entry)) {
292      cache_entry.changePermission(L1Cache_State_to_permission(state));
293    }
294  }
295
296  Event mandatory_request_type_to_event(RubyRequestType type) {
297    if (type == RubyRequestType:LD) {
298      return Event:Load;
299    } else if (type == RubyRequestType:IFETCH) {
300      return Event:Ifetch;
301    } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
302      return Event:Store;
303    } else if ((type == RubyRequestType:FLUSH)) {
304      return Event:Flush_line;
305    } else {
306      error("Invalid RubyRequestType");
307    }
308  }
309
310  MachineType testAndClearLocalHit(Entry cache_entry) {
311    if (is_valid(cache_entry) && cache_entry.FromL2) {
312      cache_entry.FromL2 := false;
313      return MachineType:L2Cache;
314    }
315    return MachineType:L1Cache;
316  }
317
318  bool IsAtomicAccessed(Entry cache_entry) {
319    assert(is_valid(cache_entry));
320    return cache_entry.AtomicAccessed;
321  }
322
323  // ** OUT_PORTS **
324  out_port(requestNetwork_out, RequestMsg, requestFromCache);
325  out_port(responseNetwork_out, ResponseMsg, responseFromCache);
326  out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
327  out_port(triggerQueue_out, TriggerMsg, triggerQueue);
328
329  // ** IN_PORTS **
330
331  // Trigger Queue
332  in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
333    if (triggerQueue_in.isReady(clockEdge())) {
334      peek(triggerQueue_in, TriggerMsg) {
335
336        Entry cache_entry := getCacheEntry(in_msg.addr);
337        TBE tbe := TBEs[in_msg.addr];
338
339        if (in_msg.Type == TriggerType:L2_to_L1) {
340          trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
341        } else if (in_msg.Type == TriggerType:ALL_ACKS) {
342          trigger(Event:All_acks, in_msg.addr, cache_entry, tbe);
343        } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
344          trigger(Event:All_acks_no_sharers, in_msg.addr, cache_entry, tbe);
345        } else {
346          error("Unexpected message");
347        }
348      }
349    }
350  }
351
352  // Nothing from the unblock network
353
354  // Response Network
355  in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
356    if (responseToCache_in.isReady(clockEdge())) {
357      peek(responseToCache_in, ResponseMsg, block_on="addr") {
358
359        Entry cache_entry := getCacheEntry(in_msg.addr);
360        TBE tbe := TBEs[in_msg.addr];
361
362        if (in_msg.Type == CoherenceResponseType:ACK) {
363          trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
364        } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
365          trigger(Event:Shared_Ack, in_msg.addr, cache_entry, tbe);
366        } else if (in_msg.Type == CoherenceResponseType:DATA) {
367          trigger(Event:Data, in_msg.addr, cache_entry, tbe);
368        } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
369          trigger(Event:Shared_Data, in_msg.addr, cache_entry, tbe);
370        } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
371          trigger(Event:Exclusive_Data, in_msg.addr, cache_entry, tbe);
372        } else {
373          error("Unexpected message");
374        }
375      }
376    }
377  }
378
379  // Forward Network
380  in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
381    if (forwardToCache_in.isReady(clockEdge())) {
382      peek(forwardToCache_in, RequestMsg, block_on="addr") {
383
384        Entry cache_entry := getCacheEntry(in_msg.addr);
385        TBE tbe := TBEs[in_msg.addr];
386
387        if ((in_msg.Type == CoherenceRequestType:GETX) ||
388            (in_msg.Type == CoherenceRequestType:GETF)) {
389          trigger(Event:Other_GETX, in_msg.addr, cache_entry, tbe);
390        } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
391          trigger(Event:Merged_GETS, in_msg.addr, cache_entry, tbe);
392        } else if (in_msg.Type == CoherenceRequestType:GETS) {
393          if (machineCount(MachineType:L1Cache) > 1) {
394            if (is_valid(cache_entry)) {
395              if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
396                trigger(Event:Other_GETS_No_Mig, in_msg.addr, cache_entry, tbe);
397              } else {
398                trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
399              }
400            } else {
401              trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
402            }
403          } else {
404            trigger(Event:NC_DMA_GETS, in_msg.addr, cache_entry, tbe);
405          }
406        } else if (in_msg.Type == CoherenceRequestType:INV) {
407          trigger(Event:Invalidate, in_msg.addr, cache_entry, tbe);
408        } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
409          trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
410        } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
411          trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
412        } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
413          trigger(Event:Block_Ack, in_msg.addr, cache_entry, tbe);
414        } else {
415          error("Unexpected message");
416        }
417      }
418    }
419  }
420
421  // Nothing from the request network
422
423  // Mandatory Queue
424  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
425    if (mandatoryQueue_in.isReady(clockEdge())) {
426      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
427
428        // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
429        TBE tbe := TBEs[in_msg.LineAddress];
430
431        if (in_msg.Type == RubyRequestType:IFETCH) {
432          // ** INSTRUCTION ACCESS ***
433
434          Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
435          if (is_valid(L1Icache_entry)) {
436            // The tag matches for the L1, so the L1 fetches the line.
437            // We know it can't be in the L2 due to exclusion
438            trigger(mandatory_request_type_to_event(in_msg.Type),
439                    in_msg.LineAddress, L1Icache_entry, tbe);
440          } else {
441            // Check to see if it is in the OTHER L1
442            Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
443            if (is_valid(L1Dcache_entry)) {
444              // The block is in the wrong L1, try to write it to the L2
445              if (L2cache.cacheAvail(in_msg.LineAddress)) {
446                trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
447              } else {
448                Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
449                trigger(Event:L2_Replacement,
450                        l2_victim_addr,
451                        getL2CacheEntry(l2_victim_addr),
452                        TBEs[l2_victim_addr]);
453              }
454            }
455
456            if (L1Icache.cacheAvail(in_msg.LineAddress)) {
457              // L1 does't have the line, but we have space for it in the L1
458
459              Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
460              if (is_valid(L2cache_entry)) {
461                // L2 has it (maybe not with the right permissions)
462                trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
463                        L2cache_entry, tbe);
464              } else {
465                // We have room, the L2 doesn't have it, so the L1 fetches the line
466                trigger(mandatory_request_type_to_event(in_msg.Type),
467                        in_msg.LineAddress, L1Icache_entry, tbe);
468              }
469            } else {
470              // No room in the L1, so we need to make room
471              // Check if the line we want to evict is not locked
472              Addr l1i_victim_addr := L1Icache.cacheProbe(in_msg.LineAddress);
473              check_on_cache_probe(mandatoryQueue_in, l1i_victim_addr);
474              if (L2cache.cacheAvail(l1i_victim_addr)) {
475                // The L2 has room, so we move the line from the L1 to the L2
476                trigger(Event:L1_to_L2,
477                        l1i_victim_addr,
478                        getL1ICacheEntry(l1i_victim_addr),
479                        TBEs[l1i_victim_addr]);
480              } else {
481                Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
482                // The L2 does not have room, so we replace a line from the L2
483                trigger(Event:L2_Replacement,
484                        l2_victim_addr,
485                        getL2CacheEntry(l2_victim_addr),
486                        TBEs[l2_victim_addr]);
487              }
488            }
489          }
490        } else {
491          // *** DATA ACCESS ***
492
493          Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
494          if (is_valid(L1Dcache_entry)) {
495            // The tag matches for the L1, so the L1 fetches the line.
496            // We know it can't be in the L2 due to exclusion
497            trigger(mandatory_request_type_to_event(in_msg.Type),
498                    in_msg.LineAddress, L1Dcache_entry, tbe);
499          } else {
500
501            // Check to see if it is in the OTHER L1
502            Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
503            if (is_valid(L1Icache_entry)) {
504              // The block is in the wrong L1, try to write it to the L2
505              if (L2cache.cacheAvail(in_msg.LineAddress)) {
506                trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
507              } else {
508                Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
509                trigger(Event:L2_Replacement,
510                        l2_victim_addr,
511                        getL2CacheEntry(l2_victim_addr),
512                        TBEs[l2_victim_addr]);
513              }
514            }
515
516            if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
517              // L1 does't have the line, but we have space for it in the L1
518              Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
519              if (is_valid(L2cache_entry)) {
520                // L2 has it (maybe not with the right permissions)
521                trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
522                        L2cache_entry, tbe);
523              } else {
524                // We have room, the L2 doesn't have it, so the L1 fetches the line
525                trigger(mandatory_request_type_to_event(in_msg.Type),
526                        in_msg.LineAddress, L1Dcache_entry, tbe);
527              }
528            } else {
529              // No room in the L1, so we need to make room
530              // Check if the line we want to evict is not locked
531              Addr l1d_victim_addr := L1Dcache.cacheProbe(in_msg.LineAddress);
532              check_on_cache_probe(mandatoryQueue_in, l1d_victim_addr);
533              if (L2cache.cacheAvail(l1d_victim_addr)) {
534                // The L2 has room, so we move the line from the L1 to the L2
535                trigger(Event:L1_to_L2,
536                        l1d_victim_addr,
537                        getL1DCacheEntry(l1d_victim_addr),
538                        TBEs[l1d_victim_addr]);
539              } else {
540                Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
541                // The L2 does not have room, so we replace a line from the L2
542                trigger(Event:L2_Replacement,
543                        l2_victim_addr,
544                        getL2CacheEntry(l2_victim_addr),
545                        TBEs[l2_victim_addr]);
546              }
547            }
548          }
549        }
550      }
551    }
552  }
553
554  // ACTIONS
555
556  action(a_issueGETS, "a", desc="Issue GETS") {
557    enqueue(requestNetwork_out, RequestMsg, issue_latency) {
558      assert(is_valid(tbe));
559      out_msg.addr := address;
560      out_msg.Type := CoherenceRequestType:GETS;
561      out_msg.Requestor := machineID;
562      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
563      out_msg.MessageSize := MessageSizeType:Request_Control;
564      out_msg.InitialRequestTime := curCycle();
565
566      // One from each other cache (n-1) plus the memory (+1)
567      tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
568    }
569  }
570
571  action(b_issueGETX, "b", desc="Issue GETX") {
572    enqueue(requestNetwork_out, RequestMsg, issue_latency) {
573      assert(is_valid(tbe));
574      out_msg.addr := address;
575      out_msg.Type := CoherenceRequestType:GETX;
576      out_msg.Requestor := machineID;
577      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
578      out_msg.MessageSize := MessageSizeType:Request_Control;
579      out_msg.InitialRequestTime := curCycle();
580
581      // One from each other cache (n-1) plus the memory (+1)
582      tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
583    }
584  }
585
586  action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
587    if (machineCount(MachineType:L1Cache) > 1) {
588      enqueue(requestNetwork_out, RequestMsg, issue_latency) {
589        assert(is_valid(tbe));
590        out_msg.addr := address;
591        out_msg.Type := CoherenceRequestType:GETX;
592        out_msg.Requestor := machineID;
593        out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
594        out_msg.MessageSize := MessageSizeType:Request_Control;
595        out_msg.InitialRequestTime := curCycle();
596      }
597    }
598
599    // One from each other cache (n-1) plus the memory (+1)
600    tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
601  }
602
603  action(bf_issueGETF, "bf", desc="Issue GETF") {
604    enqueue(requestNetwork_out, RequestMsg, issue_latency) {
605      assert(is_valid(tbe));
606      out_msg.addr := address;
607      out_msg.Type := CoherenceRequestType:GETF;
608      out_msg.Requestor := machineID;
609      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
610      out_msg.MessageSize := MessageSizeType:Request_Control;
611      out_msg.InitialRequestTime := curCycle();
612
613      // One from each other cache (n-1) plus the memory (+1)
614      tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
615    }
616  }
617
618  action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
619    peek(forwardToCache_in, RequestMsg) {
620      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
621        assert(is_valid(cache_entry));
622        out_msg.addr := address;
623        out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
624        out_msg.Sender := machineID;
625        out_msg.Destination.add(in_msg.Requestor);
626        out_msg.DataBlk := cache_entry.DataBlk;
627        out_msg.Dirty := cache_entry.Dirty;
628        if (in_msg.DirectedProbe) {
629          out_msg.Acks := machineCount(MachineType:L1Cache);
630        } else {
631          out_msg.Acks := 2;
632        }
633        out_msg.SilentAcks := in_msg.SilentAcks;
634        out_msg.MessageSize := MessageSizeType:Response_Data;
635        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
636        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
637      }
638    }
639  }
640
641  action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
642    peek(forwardToCache_in, RequestMsg) {
643      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
644        assert(is_valid(tbe));
645        out_msg.addr := address;
646        out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
647        out_msg.Sender := machineID;
648        out_msg.Destination.add(in_msg.Requestor);
649        out_msg.DataBlk := tbe.DataBlk;
650        out_msg.Dirty := tbe.Dirty;
651        if (in_msg.DirectedProbe) {
652          out_msg.Acks := machineCount(MachineType:L1Cache);
653        } else {
654          out_msg.Acks := 2;
655        }
656        out_msg.SilentAcks := in_msg.SilentAcks;
657        out_msg.MessageSize := MessageSizeType:Response_Data;
658        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
659        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
660      }
661    }
662  }
663
664  action(d_issuePUT, "d", desc="Issue PUT") {
665    enqueue(requestNetwork_out, RequestMsg, issue_latency) {
666      out_msg.addr := address;
667      out_msg.Type := CoherenceRequestType:PUT;
668      out_msg.Requestor := machineID;
669      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
670      out_msg.MessageSize := MessageSizeType:Writeback_Control;
671    }
672  }
673
674  action(df_issuePUTF, "df", desc="Issue PUTF") {
675    enqueue(requestNetwork_out, RequestMsg, issue_latency) {
676      out_msg.addr := address;
677      out_msg.Type := CoherenceRequestType:PUTF;
678      out_msg.Requestor := machineID;
679      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
680      out_msg.MessageSize := MessageSizeType:Writeback_Control;
681    }
682  }
683
684  action(e_sendData, "e", desc="Send data from cache to requestor") {
685    peek(forwardToCache_in, RequestMsg) {
686      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
687        assert(is_valid(cache_entry));
688        out_msg.addr := address;
689        out_msg.Type := CoherenceResponseType:DATA;
690        out_msg.Sender := machineID;
691        out_msg.Destination.add(in_msg.Requestor);
692        out_msg.DataBlk := cache_entry.DataBlk;
693        out_msg.Dirty := cache_entry.Dirty;
694        if (in_msg.DirectedProbe) {
695          out_msg.Acks := machineCount(MachineType:L1Cache);
696        } else {
697          out_msg.Acks := 2;
698        }
699        out_msg.SilentAcks := in_msg.SilentAcks;
700        out_msg.MessageSize := MessageSizeType:Response_Data;
701        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
702        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
703      }
704    }
705  }
706
707  action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
708    peek(forwardToCache_in, RequestMsg) {
709      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
710        assert(is_valid(cache_entry));
711        out_msg.addr := address;
712        out_msg.Type := CoherenceResponseType:DATA_SHARED;
713        out_msg.Sender := machineID;
714        out_msg.Destination.add(in_msg.Requestor);
715        out_msg.DataBlk := cache_entry.DataBlk;
716        out_msg.Dirty := cache_entry.Dirty;
717        DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
718        if (in_msg.DirectedProbe) {
719          out_msg.Acks := machineCount(MachineType:L1Cache);
720        } else {
721          out_msg.Acks := 2;
722        }
723        out_msg.SilentAcks := in_msg.SilentAcks;
724        out_msg.MessageSize := MessageSizeType:Response_Data;
725        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
726        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
727      }
728    }
729  }
730
731  action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
732    peek(forwardToCache_in, RequestMsg) {
733      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
734        assert(is_valid(tbe));
735        out_msg.addr := address;
736        out_msg.Type := CoherenceResponseType:DATA_SHARED;
737        out_msg.Sender := machineID;
738        out_msg.Destination.add(in_msg.Requestor);
739        out_msg.DataBlk := tbe.DataBlk;
740        out_msg.Dirty := tbe.Dirty;
741        DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
742        if (in_msg.DirectedProbe) {
743          out_msg.Acks := machineCount(MachineType:L1Cache);
744        } else {
745          out_msg.Acks := 2;
746        }
747        out_msg.SilentAcks := in_msg.SilentAcks;
748        out_msg.MessageSize := MessageSizeType:Response_Data;
749        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
750        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
751      }
752    }
753  }
754
755  action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
756    peek(forwardToCache_in, RequestMsg) {
757      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
758        assert(is_valid(cache_entry));
759        out_msg.addr := address;
760        out_msg.Type := CoherenceResponseType:DATA_SHARED;
761        out_msg.Sender := machineID;
762        out_msg.Destination := in_msg.MergedRequestors;
763        out_msg.DataBlk := cache_entry.DataBlk;
764        out_msg.Dirty := cache_entry.Dirty;
765        DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
766        out_msg.Acks := machineCount(MachineType:L1Cache);
767        out_msg.SilentAcks := in_msg.SilentAcks;
768        out_msg.MessageSize := MessageSizeType:Response_Data;
769        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
770        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
771      }
772    }
773  }
774
775  action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
776    peek(forwardToCache_in, RequestMsg) {
777      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
778        assert(is_valid(tbe));
779        out_msg.addr := address;
780        out_msg.Type := CoherenceResponseType:DATA_SHARED;
781        out_msg.Sender := machineID;
782        out_msg.Destination := in_msg.MergedRequestors;
783        out_msg.DataBlk := tbe.DataBlk;
784        out_msg.Dirty := tbe.Dirty;
785        DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
786        out_msg.Acks := machineCount(MachineType:L1Cache);
787        out_msg.SilentAcks := in_msg.SilentAcks;
788        out_msg.MessageSize := MessageSizeType:Response_Data;
789        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
790        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
791      }
792    }
793  }
794
795  action(f_sendAck, "f", desc="Send ack from cache to requestor") {
796    peek(forwardToCache_in, RequestMsg) {
797      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
798        out_msg.addr := address;
799        out_msg.Type := CoherenceResponseType:ACK;
800        out_msg.Sender := machineID;
801        out_msg.Destination.add(in_msg.Requestor);
802        out_msg.Acks := 1;
803        out_msg.SilentAcks := in_msg.SilentAcks;
804        assert(in_msg.DirectedProbe == false);
805        out_msg.MessageSize := MessageSizeType:Response_Control;
806        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
807        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
808      }
809    }
810  }
811
812  action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
813    peek(forwardToCache_in, RequestMsg) {
814      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
815        out_msg.addr := address;
816        out_msg.Type := CoherenceResponseType:ACK_SHARED;
817        out_msg.Sender := machineID;
818        out_msg.Destination.add(in_msg.Requestor);
819        out_msg.Acks := 1;
820        out_msg.SilentAcks := in_msg.SilentAcks;
821        assert(in_msg.DirectedProbe == false);
822        out_msg.MessageSize := MessageSizeType:Response_Control;
823        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
824        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
825      }
826    }
827  }
828
829  action(g_sendUnblock, "g", desc="Send unblock to memory") {
830    enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
831      out_msg.addr := address;
832      out_msg.Type := CoherenceResponseType:UNBLOCK;
833      out_msg.Sender := machineID;
834      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
835      out_msg.MessageSize := MessageSizeType:Unblock_Control;
836    }
837  }
838
839  action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
840    enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
841      out_msg.addr := address;
842      out_msg.Type := CoherenceResponseType:UNBLOCKM;
843      out_msg.Sender := machineID;
844      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
845      out_msg.MessageSize := MessageSizeType:Unblock_Control;
846    }
847  }
848
849  action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
850    enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
851      assert(is_valid(tbe));
852      out_msg.addr := address;
853      out_msg.Type := CoherenceResponseType:UNBLOCKS;
854      out_msg.Sender := machineID;
855      out_msg.CurOwner := tbe.CurOwner;
856      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
857      out_msg.MessageSize := MessageSizeType:Unblock_Control;
858    }
859  }
860
861  action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
862    assert(is_valid(cache_entry));
863    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
864    L1Dcache.setMRU(cache_entry);
865    sequencer.readCallback(address, cache_entry.DataBlk, false,
866                           testAndClearLocalHit(cache_entry));
867  }
868
869  action(h_ifetch_hit, "hi", desc="Notify sequencer the ifetch completed.") {
870    assert(is_valid(cache_entry));
871    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
872    L1Icache.setMRU(cache_entry);
873    sequencer.readCallback(address, cache_entry.DataBlk, false,
874                           testAndClearLocalHit(cache_entry));
875  }
876
877  action(hx_external_load_hit, "hx", desc="load required external msgs") {
878    assert(is_valid(cache_entry));
879    assert(is_valid(tbe));
880    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
881    peek(responseToCache_in, ResponseMsg) {
882      L1Icache.setMRU(address);
883      L1Dcache.setMRU(address);
884      sequencer.readCallback(address, cache_entry.DataBlk, true,
885                 machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
886                 tbe.ForwardRequestTime, tbe.FirstResponseTime);
887    }
888  }
889
890  action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
891    assert(is_valid(cache_entry));
892    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
893    peek(mandatoryQueue_in, RubyRequest) {
894      L1Dcache.setMRU(cache_entry);
895      sequencer.writeCallback(address, cache_entry.DataBlk, false,
896                              testAndClearLocalHit(cache_entry));
897
898      cache_entry.Dirty := true;
899      if (in_msg.Type == RubyRequestType:ATOMIC) {
900        cache_entry.AtomicAccessed := true;
901      }
902    }
903  }
904
905  action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
906    assert(is_valid(tbe));
907    DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
908    sequencer.writeCallback(address, tbe.DataBlk, false, MachineType:L1Cache);
909  }
910
911  action(sx_external_store_hit, "sx", desc="store required external msgs.") {
912    assert(is_valid(cache_entry));
913    assert(is_valid(tbe));
914    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
915    peek(responseToCache_in, ResponseMsg) {
916      L1Icache.setMRU(address);
917      L1Dcache.setMRU(address);
918      sequencer.writeCallback(address, cache_entry.DataBlk, true,
919              machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
920              tbe.ForwardRequestTime, tbe.FirstResponseTime);
921    }
922    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
923    cache_entry.Dirty := true;
924  }
925
926  action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
927    assert(is_valid(cache_entry));
928    assert(is_valid(tbe));
929    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
930    L1Icache.setMRU(address);
931    L1Dcache.setMRU(address);
932    sequencer.writeCallback(address, cache_entry.DataBlk, true,
933            machineIDToMachineType(tbe.LastResponder), tbe.InitialRequestTime,
934            tbe.ForwardRequestTime, tbe.FirstResponseTime);
935
936    cache_entry.Dirty := true;
937  }
938
939  action(i_allocateTBE, "i", desc="Allocate TBE") {
940    check_allocate(TBEs);
941    assert(is_valid(cache_entry));
942    TBEs.allocate(address);
943    set_tbe(TBEs[address]);
944    tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
945    tbe.Dirty := cache_entry.Dirty;
946    tbe.Sharers := false;
947  }
948
949  action(it_allocateTBE, "it", desc="Allocate TBE") {
950    check_allocate(TBEs);
951    TBEs.allocate(address);
952    set_tbe(TBEs[address]);
953    tbe.Dirty := false;
954    tbe.Sharers := false;
955  }
956
957  action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
958    triggerQueue_in.dequeue(clockEdge());
959  }
960
961  action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
962    mandatoryQueue_in.dequeue(clockEdge());
963  }
964
965  action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
966    forwardToCache_in.dequeue(clockEdge());
967  }
968
969  action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
970    assert(is_valid(cache_entry));
971    assert(is_valid(tbe));
972    cache_entry.Dirty   := tbe.Dirty;
973    cache_entry.DataBlk := tbe.DataBlk;
974  }
975
976  action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
977    assert(is_valid(cache_entry));
978    assert(is_valid(tbe));
979    cache_entry.Dirty   := tbe.Dirty;
980    cache_entry.DataBlk := tbe.DataBlk;
981    cache_entry.FromL2 := true;
982  }
983
984  action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
985    peek(responseToCache_in, ResponseMsg) {
986      assert(in_msg.Acks >= 0);
987      assert(is_valid(tbe));
988      DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
989      DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
990      if (tbe.AppliedSilentAcks == false) {
991        tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
992        tbe.AppliedSilentAcks := true;
993      }
994      DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
995      tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
996      DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
997      APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
998      APPEND_TRANSITION_COMMENT(in_msg.Sender);
999      tbe.LastResponder := in_msg.Sender;
1000      if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
1001        assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
1002      }
1003      if (in_msg.InitialRequestTime != zero_time()) {
1004        tbe.InitialRequestTime := in_msg.InitialRequestTime;
1005      }
1006      if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
1007        assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
1008      }
1009      if (in_msg.ForwardRequestTime != zero_time()) {
1010        tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
1011      }
1012      if (tbe.FirstResponseTime == zero_time()) {
1013        tbe.FirstResponseTime := curCycle();
1014      }
1015    }
1016  }
1017  action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
1018    peek(responseToCache_in, ResponseMsg) {
1019      assert(is_valid(tbe));
1020      tbe.CurOwner := in_msg.Sender;
1021    }
1022  }
1023
1024  action(n_popResponseQueue, "n", desc="Pop response queue") {
1025    responseToCache_in.dequeue(clockEdge());
1026  }
1027
1028  action(ll_L2toL1Transfer, "ll", desc="") {
1029    enqueue(triggerQueue_out, TriggerMsg, l2_cache_hit_latency) {
1030      out_msg.addr := address;
1031      out_msg.Type := TriggerType:L2_to_L1;
1032    }
1033  }
1034
1035  action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
1036    assert(is_valid(tbe));
1037    if (tbe.NumPendingMsgs == 0) {
1038      enqueue(triggerQueue_out, TriggerMsg) {
1039        out_msg.addr := address;
1040        if (tbe.Sharers) {
1041          out_msg.Type := TriggerType:ALL_ACKS;
1042        } else {
1043          out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
1044        }
1045      }
1046    }
1047  }
1048
1049  action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
1050    assert(is_valid(tbe));
1051    tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
1052  }
1053
1054  action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
1055    assert(is_valid(tbe));
1056    tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
1057  }
1058
1059  action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
1060    peek(forwardToCache_in, RequestMsg) {
1061        assert(in_msg.Requestor != machineID);
1062      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1063        assert(is_valid(tbe));
1064        out_msg.addr := address;
1065        out_msg.Type := CoherenceResponseType:DATA;
1066        out_msg.Sender := machineID;
1067        out_msg.Destination.add(in_msg.Requestor);
1068        DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1069        out_msg.DataBlk := tbe.DataBlk;
1070        out_msg.Dirty := tbe.Dirty;
1071        if (in_msg.DirectedProbe) {
1072          out_msg.Acks := machineCount(MachineType:L1Cache);
1073        } else {
1074          out_msg.Acks := 2;
1075        }
1076        out_msg.SilentAcks := in_msg.SilentAcks;
1077        out_msg.MessageSize := MessageSizeType:Response_Data;
1078        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1079        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1080      }
1081    }
1082  }
1083
1084  action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
1085    peek(forwardToCache_in, RequestMsg) {
1086        assert(in_msg.Requestor != machineID);
1087      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1088        assert(is_valid(tbe));
1089        out_msg.addr := address;
1090        out_msg.Type := CoherenceResponseType:DATA_SHARED;
1091        out_msg.Sender := machineID;
1092        out_msg.Destination.add(in_msg.Requestor);
1093        DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1094        out_msg.DataBlk := tbe.DataBlk;
1095        out_msg.Dirty := tbe.Dirty;
1096        if (in_msg.DirectedProbe) {
1097          out_msg.Acks := machineCount(MachineType:L1Cache);
1098        } else {
1099          out_msg.Acks := 2;
1100        }
1101        out_msg.SilentAcks := in_msg.SilentAcks;
1102        out_msg.MessageSize := MessageSizeType:Response_Data;
1103        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1104        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1105      }
1106    }
1107  }
1108
1109  action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
1110    peek(forwardToCache_in, RequestMsg) {
1111      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1112        assert(is_valid(tbe));
1113        out_msg.addr := address;
1114        out_msg.Type := CoherenceResponseType:DATA_SHARED;
1115        out_msg.Sender := machineID;
1116        out_msg.Destination := in_msg.MergedRequestors;
1117        DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1118        out_msg.DataBlk := tbe.DataBlk;
1119        out_msg.Dirty := tbe.Dirty;
1120        out_msg.Acks := machineCount(MachineType:L1Cache);
1121        out_msg.SilentAcks := in_msg.SilentAcks;
1122        out_msg.MessageSize := MessageSizeType:Response_Data;
1123        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1124        out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1125      }
1126    }
1127  }
1128
1129  action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
1130    enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
1131      assert(is_valid(tbe));
1132      out_msg.addr := address;
1133      out_msg.Sender := machineID;
1134      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
1135      out_msg.Dirty := tbe.Dirty;
1136      if (tbe.Dirty) {
1137        out_msg.Type := CoherenceResponseType:WB_DIRTY;
1138        out_msg.DataBlk := tbe.DataBlk;
1139        out_msg.MessageSize := MessageSizeType:Writeback_Data;
1140      } else {
1141        out_msg.Type := CoherenceResponseType:WB_CLEAN;
1142        // NOTE: in a real system this would not send data.  We send
1143        // data here only so we can check it at the memory
1144        out_msg.DataBlk := tbe.DataBlk;
1145        out_msg.MessageSize := MessageSizeType:Writeback_Control;
1146      }
1147    }
1148  }
1149
1150  action(r_setSharerBit, "r", desc="We saw other sharers") {
1151    assert(is_valid(tbe));
1152    tbe.Sharers := true;
1153  }
1154
1155  action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1156    TBEs.deallocate(address);
1157    unset_tbe();
1158  }
1159
1160  action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
1161    enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
1162      assert(is_valid(tbe));
1163      out_msg.addr := address;
1164      out_msg.Sender := machineID;
1165      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
1166      out_msg.DataBlk := tbe.DataBlk;
1167      out_msg.Dirty := tbe.Dirty;
1168      if (tbe.Dirty) {
1169        out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
1170        out_msg.DataBlk := tbe.DataBlk;
1171        out_msg.MessageSize := MessageSizeType:Writeback_Data;
1172      } else {
1173        out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
1174        // NOTE: in a real system this would not send data.  We send
1175        // data here only so we can check it at the memory
1176        out_msg.DataBlk := tbe.DataBlk;
1177        out_msg.MessageSize := MessageSizeType:Writeback_Control;
1178      }
1179    }
1180  }
1181
1182  action(u_writeDataToCache, "u", desc="Write data to cache") {
1183    peek(responseToCache_in, ResponseMsg) {
1184      assert(is_valid(cache_entry));
1185      cache_entry.DataBlk := in_msg.DataBlk;
1186      cache_entry.Dirty := in_msg.Dirty;
1187    }
1188  }
1189
1190  action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
1191    peek(responseToCache_in, ResponseMsg) {
1192      assert(is_valid(tbe));
1193      tbe.DataBlk := in_msg.DataBlk;
1194      tbe.Dirty := in_msg.Dirty;
1195    }
1196  }
1197
1198  action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
1199    peek(responseToCache_in, ResponseMsg) {
1200      assert(is_valid(cache_entry));
1201      DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1202              cache_entry.DataBlk, in_msg.DataBlk);
1203      assert(cache_entry.DataBlk == in_msg.DataBlk);
1204      cache_entry.DataBlk := in_msg.DataBlk;
1205      cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
1206    }
1207  }
1208
1209  action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
1210    peek(responseToCache_in, ResponseMsg) {
1211      assert(is_valid(tbe));
1212      DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1213              tbe.DataBlk, in_msg.DataBlk);
1214      assert(tbe.DataBlk == in_msg.DataBlk);
1215      tbe.DataBlk := in_msg.DataBlk;
1216      tbe.Dirty := in_msg.Dirty || tbe.Dirty;
1217    }
1218  }
1219
1220  action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block.  Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1221    if (L1Dcache.isTagPresent(address)) {
1222      L1Dcache.deallocate(address);
1223    } else {
1224      L1Icache.deallocate(address);
1225    }
1226    unset_cache_entry();
1227  }
1228
1229  action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1230    if (is_invalid(cache_entry)) {
1231      set_cache_entry(L1Dcache.allocate(address, new Entry));
1232    }
1233  }
1234
1235  action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
1236    if (is_invalid(cache_entry)) {
1237      set_cache_entry(L1Icache.allocate(address, new Entry));
1238    }
1239  }
1240
1241  action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1242    set_cache_entry(L2cache.allocate(address, new Entry));
1243  }
1244
1245  action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block.  Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1246    L2cache.deallocate(address);
1247    unset_cache_entry();
1248  }
1249
1250  action(gr_deallocateCacheBlock, "\gr", desc="Deallocate an L1 or L2 cache block.") {
1251    if (L1Dcache.isTagPresent(address)) {
1252      L1Dcache.deallocate(address);
1253    }
1254    else if (L1Icache.isTagPresent(address)){
1255      L1Icache.deallocate(address);
1256    }
1257    else {
1258      assert(L2cache.isTagPresent(address));
1259      L2cache.deallocate(address);
1260    }
1261    unset_cache_entry();
1262  }
1263
1264  action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
1265    if (send_evictions) {
1266      DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
1267      sequencer.evictionCallback(address);
1268    }
1269  }
1270
1271  action(uu_profileL1DataMiss, "\udm", desc="Profile the demand miss") {
1272      ++L1Dcache.demand_misses;
1273  }
1274
1275  action(uu_profileL1DataHit, "\udh", desc="Profile the demand hits") {
1276      ++L1Dcache.demand_hits;
1277  }
1278
1279  action(uu_profileL1InstMiss, "\uim", desc="Profile the demand miss") {
1280      ++L1Icache.demand_misses;
1281  }
1282
1283  action(uu_profileL1InstHit, "\uih", desc="Profile the demand hits") {
1284      ++L1Icache.demand_hits;
1285  }
1286
1287  action(uu_profileL2Miss, "\um", desc="Profile the demand miss") {
1288      ++L2cache.demand_misses;
1289  }
1290
1291  action(uu_profileL2Hit, "\uh", desc="Profile the demand hits ") {
1292      ++L2cache.demand_hits;
1293  }
1294
1295  action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1296    stall_and_wait(mandatoryQueue_in, address);    
1297  }
1298
1299  action(z_stall, "z", desc="stall") {
1300    // do nothing and the special z_stall action will return a protocol stall
1301    // so that the next port is checked
1302  }
1303
1304  action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1305    wakeUpBuffers(address);
1306  }
1307
1308  action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1309    wakeUpAllBuffers();
1310  }
1311
1312  //*****************************************************
1313  // TRANSITIONS
1314  //*****************************************************
1315
1316  // Transitions for Load/Store/L2_Replacement from transient states
1317  transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1318    zz_stallAndWaitMandatoryQueue;
1319  }
1320
1321  transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
1322    zz_stallAndWaitMandatoryQueue;
1323  }
1324
1325  transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
1326    zz_stallAndWaitMandatoryQueue;
1327  }
1328
1329  transition({IM, IS, OI, MI, II, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
1330    zz_stallAndWaitMandatoryQueue;
1331  }
1332
1333  transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
1334    zz_stallAndWaitMandatoryQueue;
1335  }
1336
1337  transition({MI_F, MM_F}, {Store}) {
1338    zz_stallAndWaitMandatoryQueue;
1339  }
1340
1341  transition({MM_F, MI_F}, {Flush_line}) {
1342    zz_stallAndWaitMandatoryQueue;
1343  }
1344
1345  transition({ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
1346    z_stall;
1347  }
1348
1349  transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1350    z_stall;
1351  }
1352
1353  // Transitions moving data between the L1 and L2 caches
1354  transition({S, O, M, MM}, L1_to_L2) {
1355    i_allocateTBE;
1356    gg_deallocateL1CacheBlock;
1357    vv_allocateL2CacheBlock;
1358    hp_copyFromTBEToL2;
1359    s_deallocateTBE;
1360  }
1361
1362  transition(S, Trigger_L2_to_L1D, ST) {
1363    i_allocateTBE;
1364    rr_deallocateL2CacheBlock;
1365    ii_allocateL1DCacheBlock;
1366    nb_copyFromTBEToL1;
1367    s_deallocateTBE;
1368    zz_stallAndWaitMandatoryQueue;
1369    ll_L2toL1Transfer;
1370  }
1371
1372  transition(O, Trigger_L2_to_L1D, OT) {
1373    i_allocateTBE;
1374    rr_deallocateL2CacheBlock;
1375    ii_allocateL1DCacheBlock;
1376    nb_copyFromTBEToL1;
1377    s_deallocateTBE;
1378    zz_stallAndWaitMandatoryQueue;
1379    ll_L2toL1Transfer;
1380  }
1381
1382  transition(M, Trigger_L2_to_L1D, MT) {
1383    i_allocateTBE;
1384    rr_deallocateL2CacheBlock;
1385    ii_allocateL1DCacheBlock;
1386    nb_copyFromTBEToL1;
1387    s_deallocateTBE;
1388    zz_stallAndWaitMandatoryQueue;
1389    ll_L2toL1Transfer;
1390  }
1391
1392  transition(MM, Trigger_L2_to_L1D, MMT) {
1393    i_allocateTBE;
1394    rr_deallocateL2CacheBlock;
1395    ii_allocateL1DCacheBlock;
1396    nb_copyFromTBEToL1;
1397    s_deallocateTBE;
1398    zz_stallAndWaitMandatoryQueue;
1399    ll_L2toL1Transfer;
1400  }
1401
1402  transition(S, Trigger_L2_to_L1I, ST) {
1403    i_allocateTBE;
1404    rr_deallocateL2CacheBlock;
1405    jj_allocateL1ICacheBlock;
1406    nb_copyFromTBEToL1;
1407    s_deallocateTBE;
1408    zz_stallAndWaitMandatoryQueue;
1409    ll_L2toL1Transfer;
1410  }
1411
1412  transition(O, Trigger_L2_to_L1I, OT) {
1413    i_allocateTBE;
1414    rr_deallocateL2CacheBlock;
1415    jj_allocateL1ICacheBlock;
1416    nb_copyFromTBEToL1;
1417    s_deallocateTBE;
1418    zz_stallAndWaitMandatoryQueue;
1419    ll_L2toL1Transfer;
1420  }
1421
1422  transition(M, Trigger_L2_to_L1I, MT) {
1423    i_allocateTBE;
1424    rr_deallocateL2CacheBlock;
1425    jj_allocateL1ICacheBlock;
1426    nb_copyFromTBEToL1;
1427    s_deallocateTBE;
1428    zz_stallAndWaitMandatoryQueue;
1429    ll_L2toL1Transfer;
1430  }
1431
1432  transition(MM, Trigger_L2_to_L1I, MMT) {
1433    i_allocateTBE;
1434    rr_deallocateL2CacheBlock;
1435    jj_allocateL1ICacheBlock;
1436    nb_copyFromTBEToL1;
1437    s_deallocateTBE;
1438    zz_stallAndWaitMandatoryQueue;
1439    ll_L2toL1Transfer;
1440  }
1441
1442  transition(ST, Complete_L2_to_L1, SR) {
1443    j_popTriggerQueue;
1444    kd_wakeUpDependents;
1445  }
1446
1447  transition(OT, Complete_L2_to_L1, OR) {
1448    j_popTriggerQueue;
1449    kd_wakeUpDependents;
1450  }
1451
1452  transition(MT, Complete_L2_to_L1, MR) {
1453    j_popTriggerQueue;
1454    kd_wakeUpDependents;
1455  }
1456
1457  transition(MMT, Complete_L2_to_L1, MMR) {
1458    j_popTriggerQueue;
1459    kd_wakeUpDependents;
1460  }
1461
1462  // Transitions from Idle
1463  transition({I,IR}, Load, IS) {
1464    ii_allocateL1DCacheBlock;
1465    i_allocateTBE;
1466    a_issueGETS;
1467    uu_profileL1DataMiss;
1468    uu_profileL2Miss;
1469    k_popMandatoryQueue;
1470  }
1471
1472  transition({I,IR}, Ifetch, IS) {
1473    jj_allocateL1ICacheBlock;
1474    i_allocateTBE;
1475    a_issueGETS;
1476    uu_profileL1InstMiss;
1477    uu_profileL2Miss;
1478    k_popMandatoryQueue;
1479  }
1480
1481  transition({I,IR}, Store, IM) {
1482    ii_allocateL1DCacheBlock;
1483    i_allocateTBE;
1484    b_issueGETX;
1485    uu_profileL1DataMiss;
1486    uu_profileL2Miss;
1487    k_popMandatoryQueue;
1488  }
1489
1490  transition({I, IR}, Flush_line, IM_F) {
1491    it_allocateTBE;
1492    bf_issueGETF;
1493    k_popMandatoryQueue;
1494  }
1495
1496  transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1497    f_sendAck;
1498    l_popForwardQueue;
1499  }
1500
1501  // Transitions from Shared
1502  transition({S, SM, ISM}, Load) {
1503    h_load_hit;
1504    uu_profileL1DataHit;
1505    k_popMandatoryQueue;
1506  }
1507
1508  transition({S, SM, ISM}, Ifetch) {
1509    h_ifetch_hit;
1510    uu_profileL1InstHit;
1511    k_popMandatoryQueue;
1512  }
1513
1514  transition(SR, Load, S) {
1515    h_load_hit;
1516    uu_profileL1DataMiss;
1517    uu_profileL2Hit;
1518    k_popMandatoryQueue;
1519    ka_wakeUpAllDependents;
1520  }
1521
1522  transition(SR, Ifetch, S) {
1523    h_ifetch_hit;
1524    uu_profileL1InstMiss;
1525    uu_profileL2Hit;
1526    k_popMandatoryQueue;
1527    ka_wakeUpAllDependents;
1528  }
1529
1530  transition({S,SR}, Store, SM) {
1531    i_allocateTBE;
1532    b_issueGETX;
1533    uu_profileL1DataMiss;
1534    uu_profileL2Miss;
1535    k_popMandatoryQueue;
1536  }
1537
1538  transition({S, SR}, Flush_line, SM_F) {
1539    i_allocateTBE;
1540    bf_issueGETF;
1541    forward_eviction_to_cpu;
1542    gg_deallocateL1CacheBlock;
1543    k_popMandatoryQueue;
1544  }
1545
1546  transition(S, L2_Replacement, I) {
1547    forward_eviction_to_cpu;
1548    rr_deallocateL2CacheBlock;
1549    ka_wakeUpAllDependents;
1550  }
1551
1552  transition(S, {Other_GETX, Invalidate}, I) {
1553    f_sendAck;
1554    forward_eviction_to_cpu;
1555    gr_deallocateCacheBlock;
1556    l_popForwardQueue;
1557  }
1558
1559  transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1560    ff_sendAckShared;
1561    l_popForwardQueue;
1562  }
1563
1564  // Transitions from Owned
1565  transition({O, OM, SS, MM_W, M_W}, {Load}) {
1566    h_load_hit;
1567    uu_profileL1DataHit;
1568    k_popMandatoryQueue;
1569  }
1570
1571  transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
1572    h_ifetch_hit;
1573    uu_profileL1InstHit;
1574    k_popMandatoryQueue;
1575  }
1576
1577  transition(OR, Load, O) {
1578    h_load_hit;
1579    uu_profileL1DataMiss;
1580    uu_profileL2Hit;
1581    k_popMandatoryQueue;
1582    ka_wakeUpAllDependents;
1583  }
1584
1585  transition(OR, Ifetch, O) {
1586    h_ifetch_hit;
1587    uu_profileL1InstMiss;
1588    uu_profileL2Hit;
1589    k_popMandatoryQueue;
1590    ka_wakeUpAllDependents;
1591  }
1592
1593  transition({O,OR}, Store, OM) {
1594    i_allocateTBE;
1595    b_issueGETX;
1596    p_decrementNumberOfMessagesByOne;
1597    uu_profileL1DataMiss;
1598    uu_profileL2Miss;
1599    k_popMandatoryQueue;
1600  }
1601
1602  transition({O, OR}, Flush_line, OM_F) {
1603    i_allocateTBE;
1604    bf_issueGETF;
1605    p_decrementNumberOfMessagesByOne;
1606    forward_eviction_to_cpu;
1607    gg_deallocateL1CacheBlock;
1608    k_popMandatoryQueue;
1609  }
1610
1611  transition(O, L2_Replacement, OI) {
1612    i_allocateTBE;
1613    d_issuePUT;
1614    forward_eviction_to_cpu;
1615    rr_deallocateL2CacheBlock;
1616    ka_wakeUpAllDependents;
1617  }
1618
1619  transition(O, {Other_GETX, Invalidate}, I) {
1620    e_sendData;
1621    forward_eviction_to_cpu;
1622    gr_deallocateCacheBlock;
1623    l_popForwardQueue;
1624  }
1625
1626  transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1627    ee_sendDataShared;
1628    l_popForwardQueue;
1629  }
1630
1631  transition(O, Merged_GETS) {
1632    em_sendDataSharedMultiple;
1633    l_popForwardQueue;
1634  }
1635
1636  // Transitions from Modified
1637  transition({MM, M}, {Ifetch}) {
1638    h_ifetch_hit;
1639    uu_profileL1InstHit;
1640    k_popMandatoryQueue;
1641  }
1642
1643  transition({MM, M}, {Load}) {
1644    h_load_hit;
1645    uu_profileL1DataHit;
1646    k_popMandatoryQueue;
1647  }
1648
1649  transition(MM, Store) {
1650    hh_store_hit;
1651    uu_profileL1DataHit;
1652    k_popMandatoryQueue;
1653  }
1654
1655  transition(MMR, Load, MM) {
1656    h_load_hit;
1657    uu_profileL1DataMiss;
1658    uu_profileL2Hit;
1659    k_popMandatoryQueue;
1660    ka_wakeUpAllDependents;
1661  }
1662
1663  transition(MMR, Ifetch, MM) {
1664    h_ifetch_hit;
1665    uu_profileL1InstMiss;
1666    uu_profileL2Hit;
1667    k_popMandatoryQueue;
1668    ka_wakeUpAllDependents;
1669  }
1670
1671  transition(MMR, Store, MM) {
1672    hh_store_hit;
1673    uu_profileL1DataMiss;
1674    uu_profileL2Hit;
1675    k_popMandatoryQueue;
1676    ka_wakeUpAllDependents;
1677  }
1678
1679  transition({MM, M, MMR, MR}, Flush_line, MM_F) {
1680    i_allocateTBE;
1681    bf_issueGETF;
1682    p_decrementNumberOfMessagesByOne;
1683    forward_eviction_to_cpu;
1684    gg_deallocateL1CacheBlock;
1685    k_popMandatoryQueue;
1686  }
1687
1688  transition(MM_F, Block_Ack, MI_F) {
1689    df_issuePUTF;
1690    l_popForwardQueue;
1691    kd_wakeUpDependents;
1692  }
1693
1694  transition(MM, L2_Replacement, MI) {
1695    i_allocateTBE;
1696    d_issuePUT;
1697    forward_eviction_to_cpu;
1698    rr_deallocateL2CacheBlock;
1699    ka_wakeUpAllDependents;
1700  }
1701
1702  transition(MM, {Other_GETX, Invalidate}, I) {
1703    c_sendExclusiveData;
1704    forward_eviction_to_cpu;
1705    gr_deallocateCacheBlock;
1706    l_popForwardQueue;
1707  }
1708
1709  transition(MM, Other_GETS, I) {
1710    c_sendExclusiveData;
1711    forward_eviction_to_cpu;
1712    gr_deallocateCacheBlock;
1713    l_popForwardQueue;
1714  }
1715
1716  transition(MM, NC_DMA_GETS, O) {
1717    ee_sendDataShared;
1718    l_popForwardQueue;
1719  }
1720
1721  transition(MM, Other_GETS_No_Mig, O) {
1722    ee_sendDataShared;
1723    l_popForwardQueue;
1724  }
1725
1726  transition(MM, Merged_GETS, O) {
1727    em_sendDataSharedMultiple;
1728    l_popForwardQueue;
1729  }
1730
1731  // Transitions from Dirty Exclusive
1732  transition(M, Store, MM) {
1733    hh_store_hit;
1734    uu_profileL1DataHit;
1735    k_popMandatoryQueue;
1736  }
1737
1738  transition(MR, Load, M) {
1739    h_load_hit;
1740    uu_profileL1DataMiss;
1741    uu_profileL2Hit;
1742    k_popMandatoryQueue;
1743    ka_wakeUpAllDependents;
1744  }
1745
1746  transition(MR, Ifetch, M) {
1747    h_ifetch_hit;
1748    uu_profileL1InstMiss;
1749    uu_profileL2Hit;
1750    k_popMandatoryQueue;
1751    ka_wakeUpAllDependents;
1752  }
1753
1754  transition(MR, Store, MM) {
1755    hh_store_hit;
1756    uu_profileL1DataMiss;
1757    uu_profileL2Hit;
1758    k_popMandatoryQueue;
1759    ka_wakeUpAllDependents;
1760  }
1761
1762  transition(M, L2_Replacement, MI) {
1763    i_allocateTBE;
1764    d_issuePUT;
1765    forward_eviction_to_cpu;
1766    rr_deallocateL2CacheBlock;
1767    ka_wakeUpAllDependents;
1768  }
1769
1770  transition(M, {Other_GETX, Invalidate}, I) {
1771    c_sendExclusiveData;
1772    forward_eviction_to_cpu;
1773    gr_deallocateCacheBlock;
1774    l_popForwardQueue;
1775  }
1776
1777  transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1778    ee_sendDataShared;
1779    l_popForwardQueue;
1780  }
1781
1782  transition(M, NC_DMA_GETS, O) {
1783    ee_sendDataShared;
1784    l_popForwardQueue;
1785  }
1786
1787  transition(M, Merged_GETS, O) {
1788    em_sendDataSharedMultiple;
1789    l_popForwardQueue;
1790  }
1791
1792  // Transitions from IM
1793
1794  transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1795    f_sendAck;
1796    l_popForwardQueue;
1797  }
1798
1799  transition({IM, IM_F, MM_F}, Ack) {
1800    m_decrementNumberOfMessages;
1801    o_checkForCompletion;
1802    n_popResponseQueue;
1803  }
1804
1805  transition(IM, Data, ISM) {
1806    u_writeDataToCache;
1807    m_decrementNumberOfMessages;
1808    o_checkForCompletion;
1809    n_popResponseQueue;
1810  }
1811
1812  transition(IM_F, Data, ISM_F) {
1813      uf_writeDataToCacheTBE;
1814      m_decrementNumberOfMessages;
1815      o_checkForCompletion;
1816      n_popResponseQueue;
1817  }
1818
1819  transition(IM, Exclusive_Data, MM_W) {
1820    u_writeDataToCache;
1821    m_decrementNumberOfMessages;
1822    o_checkForCompletion;
1823    sx_external_store_hit;
1824    n_popResponseQueue;
1825    kd_wakeUpDependents;
1826  }
1827
1828  transition(IM_F, Exclusive_Data, MM_WF) {
1829      uf_writeDataToCacheTBE;
1830      m_decrementNumberOfMessages;
1831      o_checkForCompletion;
1832      n_popResponseQueue;
1833  }
1834
1835  // Transitions from SM
1836  transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1837    ff_sendAckShared;
1838    l_popForwardQueue;
1839  }
1840
1841  transition(SM, {Other_GETX, Invalidate}, IM) {
1842    f_sendAck;
1843    forward_eviction_to_cpu;
1844    l_popForwardQueue;
1845  }
1846
1847  transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
1848    f_sendAck;
1849    forward_eviction_to_cpu;
1850    l_popForwardQueue;
1851  }
1852
1853  transition({SM, SM_F}, Ack) {
1854    m_decrementNumberOfMessages;
1855    o_checkForCompletion;
1856    n_popResponseQueue;
1857  }
1858
1859  transition(SM, {Data, Exclusive_Data}, ISM) {
1860    v_writeDataToCacheVerify;
1861    m_decrementNumberOfMessages;
1862    o_checkForCompletion;
1863    n_popResponseQueue;
1864  }
1865
1866  transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
1867    vt_writeDataToTBEVerify;
1868    m_decrementNumberOfMessages;
1869    o_checkForCompletion;
1870    n_popResponseQueue;
1871  }
1872
1873  // Transitions from ISM
1874  transition({ISM, ISM_F}, Ack) {
1875    m_decrementNumberOfMessages;
1876    o_checkForCompletion;
1877    n_popResponseQueue;
1878  }
1879
1880  transition(ISM, All_acks_no_sharers, MM) {
1881    sxt_trig_ext_store_hit;
1882    gm_sendUnblockM;
1883    s_deallocateTBE;
1884    j_popTriggerQueue;
1885    kd_wakeUpDependents;
1886  }
1887
1888  transition(ISM_F, All_acks_no_sharers, MI_F) {
1889    df_issuePUTF;
1890    j_popTriggerQueue;
1891    kd_wakeUpDependents;
1892  }
1893
1894  // Transitions from OM
1895
1896  transition(OM, {Other_GETX, Invalidate}, IM) {
1897    e_sendData;
1898    pp_incrementNumberOfMessagesByOne;
1899    forward_eviction_to_cpu;
1900    l_popForwardQueue;
1901  }
1902
1903  transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
1904    q_sendDataFromTBEToCache;
1905    pp_incrementNumberOfMessagesByOne;
1906    forward_eviction_to_cpu;
1907    l_popForwardQueue;
1908  }
1909
1910  transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1911    ee_sendDataShared;
1912    l_popForwardQueue;
1913  }
1914
1915  transition(OM, Merged_GETS) {
1916    em_sendDataSharedMultiple;
1917    l_popForwardQueue;
1918  }
1919
1920  transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1921    et_sendDataSharedFromTBE;
1922    l_popForwardQueue;
1923  }
1924
1925  transition(OM_F, Merged_GETS) {
1926    emt_sendDataSharedMultipleFromTBE;
1927    l_popForwardQueue;
1928  }
1929
1930  transition({OM, OM_F}, Ack) {
1931    m_decrementNumberOfMessages;
1932    o_checkForCompletion;
1933    n_popResponseQueue;
1934  }
1935
1936  transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1937    sxt_trig_ext_store_hit;
1938    gm_sendUnblockM;
1939    s_deallocateTBE;
1940    j_popTriggerQueue;
1941    kd_wakeUpDependents;
1942  }
1943
1944  transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
1945    df_issuePUTF;
1946    j_popTriggerQueue;
1947    kd_wakeUpDependents;
1948  }
1949  // Transitions from IS
1950
1951  transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1952    f_sendAck;
1953    l_popForwardQueue;
1954  }
1955
1956  transition(IS, Ack) {
1957    m_decrementNumberOfMessages;
1958    o_checkForCompletion;
1959    n_popResponseQueue;
1960  }
1961
1962  transition(IS, Shared_Ack) {
1963    m_decrementNumberOfMessages;
1964    r_setSharerBit;
1965    o_checkForCompletion;
1966    n_popResponseQueue;
1967  }
1968
1969  transition(IS, Data, SS) {
1970    u_writeDataToCache;
1971    m_decrementNumberOfMessages;
1972    o_checkForCompletion;
1973    hx_external_load_hit;
1974    uo_updateCurrentOwner;
1975    n_popResponseQueue;
1976    kd_wakeUpDependents;
1977  }
1978
1979  transition(IS, Exclusive_Data, M_W) {
1980    u_writeDataToCache;
1981    m_decrementNumberOfMessages;
1982    o_checkForCompletion;
1983    hx_external_load_hit;
1984    n_popResponseQueue;
1985    kd_wakeUpDependents;
1986  }
1987
1988  transition(IS, Shared_Data, SS) {
1989    u_writeDataToCache;
1990    r_setSharerBit;
1991    m_decrementNumberOfMessages;
1992    o_checkForCompletion;
1993    hx_external_load_hit;
1994    uo_updateCurrentOwner;
1995    n_popResponseQueue;
1996    kd_wakeUpDependents;
1997  }
1998
1999  // Transitions from SS
2000
2001  transition(SS, Ack) {
2002    m_decrementNumberOfMessages;
2003    o_checkForCompletion;
2004    n_popResponseQueue;
2005  }
2006
2007  transition(SS, Shared_Ack) {
2008    m_decrementNumberOfMessages;
2009    r_setSharerBit;
2010    o_checkForCompletion;
2011    n_popResponseQueue;
2012  }
2013
2014  transition(SS, All_acks, S) {
2015    gs_sendUnblockS;
2016    s_deallocateTBE;
2017    j_popTriggerQueue;
2018    kd_wakeUpDependents;
2019  }
2020
2021  transition(SS, All_acks_no_sharers, S) {
2022    // Note: The directory might still be the owner, so that is why we go to S
2023    gs_sendUnblockS;
2024    s_deallocateTBE;
2025    j_popTriggerQueue;
2026    kd_wakeUpDependents;
2027  }
2028
2029  // Transitions from MM_W
2030
2031  transition(MM_W, Store) {
2032    hh_store_hit;
2033    uu_profileL1DataHit;
2034    k_popMandatoryQueue;
2035  }
2036
2037  transition({MM_W, MM_WF}, Ack) {
2038    m_decrementNumberOfMessages;
2039    o_checkForCompletion;
2040    n_popResponseQueue;
2041  }
2042
2043  transition(MM_W, All_acks_no_sharers, MM) {
2044    gm_sendUnblockM;
2045    s_deallocateTBE;
2046    j_popTriggerQueue;
2047    kd_wakeUpDependents;
2048  }
2049
2050  transition(MM_WF, All_acks_no_sharers, MI_F) {
2051    df_issuePUTF;
2052    j_popTriggerQueue;
2053    kd_wakeUpDependents;
2054  }
2055  // Transitions from M_W
2056
2057  transition(M_W, Store, MM_W) {
2058    hh_store_hit;
2059    uu_profileL1DataHit;
2060    k_popMandatoryQueue;
2061  }
2062
2063  transition(M_W, Ack) {
2064    m_decrementNumberOfMessages;
2065    o_checkForCompletion;
2066    n_popResponseQueue;
2067  }
2068
2069  transition(M_W, All_acks_no_sharers, M) {
2070    gm_sendUnblockM;
2071    s_deallocateTBE;
2072    j_popTriggerQueue;
2073    kd_wakeUpDependents;
2074  }
2075
2076  // Transitions from OI/MI
2077
2078  transition({OI, MI}, {Other_GETX, Invalidate}, II) {
2079    q_sendDataFromTBEToCache;
2080    l_popForwardQueue;
2081  }
2082
2083  transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
2084    sq_sendSharedDataFromTBEToCache;
2085    l_popForwardQueue;
2086  }
2087
2088  transition({OI, MI}, Merged_GETS, OI) {
2089    qm_sendDataFromTBEToCache;
2090    l_popForwardQueue;
2091  }
2092
2093  transition(MI, Writeback_Ack, I) {
2094    t_sendExclusiveDataFromTBEToMemory;
2095    s_deallocateTBE;
2096    l_popForwardQueue;
2097    kd_wakeUpDependents;
2098  }
2099
2100  transition(MI_F, Writeback_Ack, I) {
2101      hh_flush_hit;
2102      t_sendExclusiveDataFromTBEToMemory;
2103      s_deallocateTBE;
2104      l_popForwardQueue;
2105      kd_wakeUpDependents;
2106  }
2107
2108  transition(OI, Writeback_Ack, I) {
2109    qq_sendDataFromTBEToMemory;
2110    s_deallocateTBE;
2111    l_popForwardQueue;
2112    kd_wakeUpDependents;
2113  }
2114
2115  // Transitions from II
2116  transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
2117    f_sendAck;
2118    l_popForwardQueue;
2119  }
2120
2121  transition(II, Writeback_Ack, I) {
2122    g_sendUnblock;
2123    s_deallocateTBE;
2124    l_popForwardQueue;
2125    kd_wakeUpDependents;
2126  }
2127
2128  transition(II, Writeback_Nack, I) {
2129    s_deallocateTBE;
2130    l_popForwardQueue;
2131    kd_wakeUpDependents;
2132  }
2133
2134  transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
2135    ct_sendExclusiveDataFromTBE;
2136    pp_incrementNumberOfMessagesByOne;
2137    l_popForwardQueue;
2138  }
2139
2140  transition(MM_F, Other_GETS, IM_F) {
2141    ct_sendExclusiveDataFromTBE;
2142    pp_incrementNumberOfMessagesByOne;
2143    l_popForwardQueue;
2144  }
2145
2146  transition(MM_F, NC_DMA_GETS, OM_F) {
2147    sq_sendSharedDataFromTBEToCache;
2148    l_popForwardQueue;
2149  }
2150
2151  transition(MM_F, Other_GETS_No_Mig, OM_F) {
2152    et_sendDataSharedFromTBE;
2153    l_popForwardQueue;
2154  }
2155
2156  transition(MM_F, Merged_GETS, OM_F) {
2157    emt_sendDataSharedMultipleFromTBE;
2158    l_popForwardQueue;
2159  }
2160}
2161