1/*
2 * Copyright (c) 2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41machine(MachineType:L1Cache, "L1 cache protocol")
42 : Sequencer * sequencer;
43   CacheMemory * L1Icache;
44   CacheMemory * L1Dcache;
45   Cycles request_latency := 1;
46   Cycles response_latency := 1;
47   Cycles use_timeout_latency := 50;
48   bool send_evictions;
49
50   // Message Queues
51   // From this node's L1 cache TO the network
52   // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
53   MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
54        vnet_type="request";
55   // a local L1 -> this L2 bank
56   MessageBuffer * responseFromL1Cache, network="To", virtual_network="2",
57        vnet_type="response";
58
59   // To this node's L1 cache FROM the network
60   // a L2 bank -> this L1
61   MessageBuffer * requestToL1Cache, network="From", virtual_network="0",
62        vnet_type="request";
63   // a L2 bank -> this L1
64   MessageBuffer * responseToL1Cache, network="From", virtual_network="2",
65        vnet_type="response";
66
67   MessageBuffer * triggerQueue;
68
69   MessageBuffer * mandatoryQueue;
70{
71  // STATES
72  state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
73    // Base states
74    I, AccessPermission:Invalid, desc="Idle";
75    S, AccessPermission:Read_Only, desc="Shared";
76    O, AccessPermission:Read_Only, desc="Owned";
77    M, AccessPermission:Read_Only, desc="Modified (dirty)";
78    M_W, AccessPermission:Read_Only, desc="Modified (dirty)";
79    MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
80    MM_W, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
81
82    // Transient States
83    IM, AccessPermission:Busy, "IM", desc="Issued GetX";
84    SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
85    OM, AccessPermission:Read_Only, "SM", desc="Issued GetX, received data";
86    IS, AccessPermission:Busy, "IS", desc="Issued GetS";
87    SI, AccessPermission:Busy, "OI", desc="Issued PutS, waiting for ack";
88    OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
89    MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
90    II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
91  }
92
93  // EVENTS
94  enumeration(Event, desc="Cache events") {
95    Load,            desc="Load request from the processor";
96    Ifetch,          desc="I-fetch request from the processor";
97    Store,           desc="Store request from the processor";
98    L1_Replacement,  desc="Replacement";
99
100    // Requests
101    Own_GETX,      desc="We observe our own GetX forwarded back to us";
102    Fwd_GETX,      desc="A GetX from another processor";
103    Fwd_GETS,      desc="A GetS from another processor";
104    Fwd_DMA,      desc="A GetS from another processor";
105    Inv,           desc="Invalidations from the directory";
106
107    // Responses
108    Ack,             desc="Received an ack message";
109    Data,            desc="Received a data message, responder has a shared copy";
110    Exclusive_Data,  desc="Received a data message";
111
112    Writeback_Ack,   desc="Writeback O.K. from directory";
113    Writeback_Ack_Data,   desc="Writeback O.K. from directory";
114    Writeback_Nack,  desc="Writeback not O.K. from directory";
115
116    // Triggers
117    All_acks,                  desc="Received all required data and message acks";
118
119    // Timeouts
120    Use_Timeout, desc="lockout period ended";
121  }
122
123  // TYPES
124
125  // CacheEntry
126  structure(Entry, desc="...", interface="AbstractCacheEntry") {
127    State CacheState,        desc="cache state";
128    bool Dirty,              desc="Is the data dirty (different than memory)?";
129    DataBlock DataBlk,       desc="data for the block";
130  }
131
132  // TBE fields
133  structure(TBE, desc="...") {
134    Addr addr,         desc="Physical address for this TBE";
135    State TBEState,          desc="Transient state";
136    DataBlock DataBlk,       desc="data for the block, required for concurrent writebacks";
137    bool Dirty,              desc="Is the data dirty (different than memory)?";
138    int NumPendingMsgs, default="0",     desc="Number of acks/data messages that this processor is waiting for";
139  }
140
141  structure(TBETable, external ="yes") {
142    TBE lookup(Addr);
143    void allocate(Addr);
144    void deallocate(Addr);
145    bool isPresent(Addr);
146  }
147
148  Tick clockEdge();
149  Tick cyclesToTicks(Cycles c);
150  void set_cache_entry(AbstractCacheEntry b);
151  void unset_cache_entry();
152  void set_tbe(TBE b);
153  void unset_tbe();
154  MachineID mapAddressToMachine(Addr addr, MachineType mtype);
155
156  TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
157  TimerTable useTimerTable;
158
159  Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
160    Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
161    if(is_valid(L1Dcache_entry)) {
162      return L1Dcache_entry;
163    }
164
165    Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
166    return L1Icache_entry;
167  }
168
169  Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
170    return static_cast(Entry, "pointer", L1Dcache.lookup(addr));
171  }
172
173  Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
174    return static_cast(Entry, "pointer", L1Icache.lookup(addr));
175  }
176
177  State getState(TBE tbe, Entry cache_entry, Addr addr) {
178    if(is_valid(tbe)) {
179      return tbe.TBEState;
180    } else if (is_valid(cache_entry)) {
181      return cache_entry.CacheState;
182    }
183    return State:I;
184  }
185
186  // L1 hit latency
187  Cycles mandatoryQueueLatency(RubyRequestType type) {
188    if (type == RubyRequestType:IFETCH) {
189      return L1Icache.getTagLatency();
190    } else {
191      return L1Dcache.getTagLatency();
192    }
193  }
194
195  // Latency for responses that fetch data from cache
196  Cycles cacheResponseLatency() {
197    if (L1Dcache.getTagLatency() > response_latency) {
198      return L1Dcache.getTagLatency();
199    } else {
200      return response_latency;
201    }
202  }
203
204  void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
205    assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
206
207    if (is_valid(tbe)) {
208      tbe.TBEState := state;
209    }
210
211    if (is_valid(cache_entry)) {
212      if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
213         ((cache_entry.CacheState != State:MM) && (state == State:MM)) ||
214         ((cache_entry.CacheState != State:S) && (state == State:S)) ||
215         ((cache_entry.CacheState != State:O) && (state == State:O)) ) {
216
217        cache_entry.CacheState := state;
218        sequencer.checkCoherence(addr);
219      }
220      else {
221        cache_entry.CacheState := state;
222      }
223    }
224  }
225
226  AccessPermission getAccessPermission(Addr addr) {
227    TBE tbe := TBEs[addr];
228    if(is_valid(tbe)) {
229      DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
230      return L1Cache_State_to_permission(tbe.TBEState);
231    }
232
233    Entry cache_entry := getCacheEntry(addr);
234    if(is_valid(cache_entry)) {
235      DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
236      return L1Cache_State_to_permission(cache_entry.CacheState);
237    }
238
239    DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
240    return AccessPermission:NotPresent;
241  }
242
243  void setAccessPermission(Entry cache_entry, Addr addr, State state) {
244    if (is_valid(cache_entry)) {
245      cache_entry.changePermission(L1Cache_State_to_permission(state));
246    }
247  }
248
249  void functionalRead(Addr addr, Packet *pkt) {
250    Entry cache_entry := getCacheEntry(addr);
251    if(is_valid(cache_entry)) {
252      testAndRead(addr, cache_entry.DataBlk, pkt);
253    } else {
254      TBE tbe := TBEs[addr];
255      if(is_valid(tbe)) {
256        testAndRead(addr, tbe.DataBlk, pkt);
257      } else {
258        error("Data block missing!");
259      }
260    }
261  }
262
263  int functionalWrite(Addr addr, Packet *pkt) {
264    int num_functional_writes := 0;
265
266    Entry cache_entry := getCacheEntry(addr);
267    if(is_valid(cache_entry)) {
268      num_functional_writes := num_functional_writes +
269        testAndWrite(addr, cache_entry.DataBlk, pkt);
270      return num_functional_writes;
271    }
272
273    TBE tbe := TBEs[addr];
274    num_functional_writes := num_functional_writes +
275        testAndWrite(addr, tbe.DataBlk, pkt);
276    return num_functional_writes;
277  }
278
279  Event mandatory_request_type_to_event(RubyRequestType type) {
280    if (type == RubyRequestType:LD) {
281      return Event:Load;
282    } else if (type == RubyRequestType:IFETCH) {
283      return Event:Ifetch;
284    } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
285      return Event:Store;
286    } else {
287      error("Invalid RubyRequestType");
288    }
289  }
290
291  // ** OUT_PORTS **
292
293  out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
294  out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
295  out_port(triggerQueue_out, TriggerMsg, triggerQueue);
296
297  // ** IN_PORTS **
298
299  // Use Timer
300  in_port(useTimerTable_in, Addr, useTimerTable, rank=4) {
301    if (useTimerTable_in.isReady(clockEdge())) {
302        Addr readyAddress := useTimerTable.nextAddress();
303        trigger(Event:Use_Timeout, readyAddress, getCacheEntry(readyAddress),
304                TBEs.lookup(readyAddress));
305    }
306  }
307
308  // Trigger Queue
309  in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
310    if (triggerQueue_in.isReady(clockEdge())) {
311      peek(triggerQueue_in, TriggerMsg) {
312        if (in_msg.Type == TriggerType:ALL_ACKS) {
313          trigger(Event:All_acks, in_msg.addr,
314                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
315        } else {
316          error("Unexpected message");
317        }
318      }
319    }
320  }
321
322  // Response Network
323  in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache, rank=2) {
324    if (responseToL1Cache_in.isReady(clockEdge())) {
325      peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
326        if (in_msg.Type == CoherenceResponseType:ACK) {
327          trigger(Event:Ack, in_msg.addr,
328                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
329        } else if (in_msg.Type == CoherenceResponseType:DATA) {
330          trigger(Event:Data, in_msg.addr,
331                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
332        } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
333          trigger(Event:Exclusive_Data, in_msg.addr,
334                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
335        } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
336          trigger(Event:Writeback_Ack, in_msg.addr,
337                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
338        } else if (in_msg.Type == CoherenceResponseType:WB_ACK_DATA) {
339          trigger(Event:Writeback_Ack_Data, in_msg.addr,
340                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
341        } else if (in_msg.Type == CoherenceResponseType:WB_NACK) {
342          trigger(Event:Writeback_Nack, in_msg.addr,
343                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
344        } else {
345          error("Unexpected message");
346        }
347      }
348    }
349  }
350
351
352  // Request Network
353  in_port(requestNetwork_in, RequestMsg, requestToL1Cache, rank=1) {
354    if (requestNetwork_in.isReady(clockEdge())) {
355      peek(requestNetwork_in, RequestMsg, block_on="addr") {
356        assert(in_msg.Destination.isElement(machineID));
357        DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
358
359        if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
360          if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
361            trigger(Event:Own_GETX, in_msg.addr,
362                    getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
363          } else {
364            trigger(Event:Fwd_GETX, in_msg.addr,
365                    getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
366          }
367        } else if (in_msg.Type == CoherenceRequestType:GETS) {
368          trigger(Event:Fwd_GETS, in_msg.addr,
369                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
370        } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
371          trigger(Event:Fwd_DMA, in_msg.addr,
372                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
373        } else if (in_msg.Type == CoherenceRequestType:INV) {
374          trigger(Event:Inv, in_msg.addr,
375                  getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
376        } else {
377          error("Unexpected message");
378        }
379      }
380    }
381  }
382
383  // Mandatory Queue betweens Node's CPU and it's L1 caches
384  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, rank=0) {
385    if (mandatoryQueue_in.isReady(clockEdge())) {
386      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
387
388        // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
389
390        if (in_msg.Type == RubyRequestType:IFETCH) {
391          // ** INSTRUCTION ACCESS ***
392
393          Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
394          if (is_valid(L1Icache_entry)) {
395            // The tag matches for the L1, so the L1 asks the L2 for it.
396            trigger(mandatory_request_type_to_event(in_msg.Type),
397                    in_msg.LineAddress, L1Icache_entry,
398                    TBEs[in_msg.LineAddress]);
399          } else {
400
401            Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
402            // Check to see if it is in the OTHER L1
403            if (is_valid(L1Dcache_entry)) {
404              // The block is in the wrong L1, put the request on the queue to the shared L2
405              trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
406                      TBEs[in_msg.LineAddress]);
407            }
408            if (L1Icache.cacheAvail(in_msg.LineAddress)) {
409              // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
410              trigger(mandatory_request_type_to_event(in_msg.Type),
411                      in_msg.LineAddress, L1Icache_entry,
412                      TBEs[in_msg.LineAddress]);
413            } else {
414              // No room in the L1, so we need to make room in the L1
415              // Check if the line we want to evict is not locked
416              Addr addr := L1Icache.cacheProbe(in_msg.LineAddress);
417              check_on_cache_probe(mandatoryQueue_in, addr);
418              trigger(Event:L1_Replacement,
419                      addr,
420                      getL1ICacheEntry(addr),
421                      TBEs[addr]);
422            }
423          }
424        } else {
425          // *** DATA ACCESS ***
426
427          Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
428          if (is_valid(L1Dcache_entry)) {
429            // The tag matches for the L1, so the L1 ask the L2 for it
430            trigger(mandatory_request_type_to_event(in_msg.Type),
431                    in_msg.LineAddress, L1Dcache_entry,
432                    TBEs[in_msg.LineAddress]);
433          } else {
434
435            Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
436            // Check to see if it is in the OTHER L1
437            if (is_valid(L1Icache_entry)) {
438              // The block is in the wrong L1, put the request on the queue to the shared L2
439              trigger(Event:L1_Replacement, in_msg.LineAddress,
440                      L1Icache_entry, TBEs[in_msg.LineAddress]);
441            }
442            if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
443              // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
444              trigger(mandatory_request_type_to_event(in_msg.Type),
445                      in_msg.LineAddress, L1Dcache_entry,
446                      TBEs[in_msg.LineAddress]);
447            } else {
448              // No room in the L1, so we need to make room in the L1
449              // Check if the line we want to evict is not locked
450              Addr addr := L1Dcache.cacheProbe(in_msg.LineAddress);
451              check_on_cache_probe(mandatoryQueue_in, addr);
452              trigger(Event:L1_Replacement,
453                      addr,
454                      getL1DCacheEntry(addr),
455                      TBEs[addr]);
456            }
457          }
458        }
459      }
460    }
461  }
462
463
464  // ACTIONS
465
466  action(a_issueGETS, "a", desc="Issue GETS") {
467    peek(mandatoryQueue_in, RubyRequest) {
468      enqueue(requestNetwork_out, RequestMsg,  request_latency) {
469        out_msg.addr := address;
470        out_msg.Type := CoherenceRequestType:GETS;
471        out_msg.Requestor := machineID;
472        out_msg.RequestorMachine := MachineType:L1Cache;
473        out_msg.Destination.add(mapAddressToMachine(address,
474                                                    MachineType:L2Cache));
475        out_msg.MessageSize := MessageSizeType:Request_Control;
476        out_msg.AccessMode := in_msg.AccessMode;
477        out_msg.Prefetch := in_msg.Prefetch;
478      }
479    }
480  }
481
482  action(b_issueGETX, "b", desc="Issue GETX") {
483    peek(mandatoryQueue_in, RubyRequest) {
484      enqueue(requestNetwork_out, RequestMsg, request_latency) {
485        out_msg.addr := address;
486        out_msg.Type := CoherenceRequestType:GETX;
487        out_msg.Requestor := machineID;
488        out_msg.RequestorMachine := MachineType:L1Cache;
489        out_msg.Destination.add(mapAddressToMachine(address,
490                                                    MachineType:L2Cache));
491        out_msg.MessageSize := MessageSizeType:Request_Control;
492        out_msg.AccessMode := in_msg.AccessMode;
493        out_msg.Prefetch := in_msg.Prefetch;
494      }
495    }
496  }
497
498  action(d_issuePUTX, "d", desc="Issue PUTX") {
499    enqueue(requestNetwork_out, RequestMsg, request_latency) {
500      out_msg.addr := address;
501      out_msg.Type := CoherenceRequestType:PUTX;
502      out_msg.Requestor := machineID;
503      out_msg.RequestorMachine := MachineType:L1Cache;
504      out_msg.Destination.add(mapAddressToMachine(address,
505                                                  MachineType:L2Cache));
506      out_msg.MessageSize := MessageSizeType:Writeback_Control;
507    }
508  }
509
510  action(dd_issuePUTO, "\d", desc="Issue PUTO") {
511    enqueue(requestNetwork_out, RequestMsg, request_latency) {
512      out_msg.addr := address;
513      out_msg.Type := CoherenceRequestType:PUTO;
514      out_msg.Requestor := machineID;
515      out_msg.RequestorMachine := MachineType:L1Cache;
516      out_msg.Destination.add(mapAddressToMachine(address,
517                                                  MachineType:L2Cache));
518      out_msg.MessageSize := MessageSizeType:Writeback_Control;
519    }
520  }
521
522  action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
523    enqueue(requestNetwork_out, RequestMsg, request_latency) {
524      out_msg.addr := address;
525      out_msg.Type := CoherenceRequestType:PUTS;
526      out_msg.Requestor := machineID;
527      out_msg.RequestorMachine := MachineType:L1Cache;
528      out_msg.Destination.add(mapAddressToMachine(address,
529                                                  MachineType:L2Cache));
530      out_msg.MessageSize := MessageSizeType:Writeback_Control;
531    }
532  }
533
534  action(e_sendData, "e", desc="Send data from cache to requestor") {
535    peek(requestNetwork_in, RequestMsg) {
536      assert(is_valid(cache_entry));
537      if (in_msg.RequestorMachine == MachineType:L2Cache) {
538        enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
539          out_msg.addr := address;
540          out_msg.Type := CoherenceResponseType:DATA;
541          out_msg.Sender := machineID;
542          out_msg.SenderMachine := MachineType:L1Cache;
543          out_msg.Destination.add(mapAddressToMachine(address,
544                                                      MachineType:L2Cache));
545          out_msg.DataBlk := cache_entry.DataBlk;
546          // out_msg.Dirty := cache_entry.Dirty;
547          out_msg.Dirty := false;
548          out_msg.Acks := in_msg.Acks;
549          out_msg.MessageSize := MessageSizeType:Response_Data;
550        }
551        DPRINTF(RubySlicc, "Sending data to L2: %#x\n", in_msg.addr);
552      }
553      else {
554        enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
555          out_msg.addr := address;
556          out_msg.Type := CoherenceResponseType:DATA;
557          out_msg.Sender := machineID;
558          out_msg.SenderMachine := MachineType:L1Cache;
559          out_msg.Destination.add(in_msg.Requestor);
560          out_msg.DataBlk := cache_entry.DataBlk;
561          // out_msg.Dirty := cache_entry.Dirty;
562          out_msg.Dirty := false;
563          out_msg.Acks := in_msg.Acks;
564          out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
565        }
566        DPRINTF(RubySlicc, "Sending data to L1\n");
567      }
568    }
569  }
570
571  action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
572    peek(requestNetwork_in, RequestMsg) {
573      assert(is_valid(cache_entry));
574      if (in_msg.RequestorMachine == MachineType:L2Cache) {
575        enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
576          out_msg.addr := address;
577          out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
578          out_msg.Sender := machineID;
579          out_msg.SenderMachine := MachineType:L1Cache;
580          out_msg.Destination.add(mapAddressToMachine(address,
581                                                      MachineType:L2Cache));
582          out_msg.DataBlk := cache_entry.DataBlk;
583          out_msg.Dirty := cache_entry.Dirty;
584          out_msg.Acks := in_msg.Acks;
585          out_msg.MessageSize := MessageSizeType:Response_Data;
586        }
587        DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
588      }
589      else {
590        enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
591          out_msg.addr := address;
592          out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
593          out_msg.Sender := machineID;
594          out_msg.SenderMachine := MachineType:L1Cache;
595          out_msg.Destination.add(in_msg.Requestor);
596          out_msg.DataBlk := cache_entry.DataBlk;
597          out_msg.Dirty := cache_entry.Dirty;
598          out_msg.Acks := in_msg.Acks;
599          out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
600        }
601        DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
602      }
603    }
604  }
605
606  action(f_sendAck, "f", desc="Send ack from cache to requestor") {
607    peek(requestNetwork_in, RequestMsg) {
608      if (in_msg.RequestorMachine == MachineType:L1Cache) {
609        enqueue(responseNetwork_out, ResponseMsg, response_latency) {
610          out_msg.addr := address;
611          out_msg.Type := CoherenceResponseType:ACK;
612          out_msg.Sender := machineID;
613          out_msg.SenderMachine := MachineType:L1Cache;
614          out_msg.Destination.add(in_msg.Requestor);
615          out_msg.Acks := 0 - 1; // -1
616          out_msg.MessageSize := MessageSizeType:Response_Control;
617        }
618      }
619      else {
620        enqueue(responseNetwork_out, ResponseMsg, response_latency) {
621          out_msg.addr := address;
622          out_msg.Type := CoherenceResponseType:ACK;
623          out_msg.Sender := machineID;
624          out_msg.SenderMachine := MachineType:L1Cache;
625          out_msg.Destination.add(mapAddressToMachine(address,
626                                                      MachineType:L2Cache));
627          out_msg.Acks := 0 - 1; // -1
628          out_msg.MessageSize := MessageSizeType:Response_Control;
629        }
630      }
631    }
632  }
633
634  action(g_sendUnblock, "g", desc="Send unblock to memory") {
635    enqueue(responseNetwork_out, ResponseMsg, response_latency) {
636      out_msg.addr := address;
637      out_msg.Type := CoherenceResponseType:UNBLOCK;
638      out_msg.Sender := machineID;
639      out_msg.SenderMachine := MachineType:L1Cache;
640      out_msg.Destination.add(mapAddressToMachine(address,
641                                                  MachineType:L2Cache));
642      out_msg.MessageSize := MessageSizeType:Unblock_Control;
643    }
644  }
645
646  action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
647    enqueue(responseNetwork_out, ResponseMsg, response_latency) {
648      out_msg.addr := address;
649      out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
650      out_msg.Sender := machineID;
651      out_msg.SenderMachine := MachineType:L1Cache;
652      out_msg.Destination.add(mapAddressToMachine(address,
653                                                  MachineType:L2Cache));
654      out_msg.MessageSize := MessageSizeType:Unblock_Control;
655    }
656  }
657
658  action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
659    assert(is_valid(cache_entry));
660    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
661    L1Dcache.setMRU(cache_entry);
662    sequencer.readCallback(address, cache_entry.DataBlk);
663  }
664
665  action(h_ifetch_hit, "hi", desc="Notify the sequencer about ifetch completion.") {
666    assert(is_valid(cache_entry));
667    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
668    L1Icache.setMRU(cache_entry);
669    sequencer.readCallback(address, cache_entry.DataBlk);
670  }
671
672  action(hx_load_hit, "hx", desc="Notify sequencer the load completed.") {
673    assert(is_valid(cache_entry));
674    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
675    L1Icache.setMRU(address);
676    L1Dcache.setMRU(address);
677    sequencer.readCallback(address, cache_entry.DataBlk, true);
678  }
679
680  action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
681    assert(is_valid(cache_entry));
682    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
683    L1Dcache.setMRU(cache_entry);
684    sequencer.writeCallback(address, cache_entry.DataBlk);
685    cache_entry.Dirty := true;
686  }
687
688  action(xx_store_hit, "\xx", desc="Notify sequencer that store completed.") {
689    assert(is_valid(cache_entry));
690    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
691    L1Icache.setMRU(address);
692    L1Dcache.setMRU(address);
693    sequencer.writeCallback(address, cache_entry.DataBlk, true);
694    cache_entry.Dirty := true;
695  }
696
697  action(i_allocateTBE, "i", desc="Allocate TBE") {
698    check_allocate(TBEs);
699    TBEs.allocate(address);
700    set_tbe(TBEs[address]);
701    assert(is_valid(cache_entry));
702    tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
703    tbe.Dirty := cache_entry.Dirty;
704  }
705
706  action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
707    triggerQueue_in.dequeue(clockEdge());
708  }
709
710  action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
711    useTimerTable.unset(address);
712  }
713
714  action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
715    mandatoryQueue_in.dequeue(clockEdge());
716  }
717
718  action(l_popForwardQueue, "l", desc="Pop forwarded request queue.") {
719    requestNetwork_in.dequeue(clockEdge());
720  }
721
722  action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
723    peek(responseToL1Cache_in, ResponseMsg) {
724      assert(is_valid(tbe));
725      DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
726      tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
727    }
728  }
729
730  action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
731    peek(requestNetwork_in, RequestMsg) {
732      assert(is_valid(tbe));
733      tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
734    }
735  }
736
737  action(n_popResponseQueue, "n", desc="Pop response queue") {
738    responseToL1Cache_in.dequeue(clockEdge());
739  }
740
741  action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
742    assert(is_valid(tbe));
743    if (tbe.NumPendingMsgs == 0) {
744      enqueue(triggerQueue_out, TriggerMsg) {
745        out_msg.addr := address;
746        out_msg.Type := TriggerType:ALL_ACKS;
747      }
748    }
749  }
750
751  action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
752    useTimerTable.set(address,
753                      clockEdge() + cyclesToTicks(use_timeout_latency));
754  }
755
756  action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
757    peek(requestNetwork_in, RequestMsg) {
758      enqueue(responseNetwork_out, ResponseMsg, response_latency) {
759        out_msg.addr := address;
760        out_msg.Type := CoherenceResponseType:DMA_ACK;
761        out_msg.Sender := machineID;
762        out_msg.SenderMachine := MachineType:L1Cache;
763        out_msg.Destination.add(mapAddressToMachine(address,
764                                                    MachineType:L2Cache));
765        out_msg.Dirty := false;
766        out_msg.Acks := 1;
767        out_msg.MessageSize := MessageSizeType:Response_Control;
768      }
769    }
770  }
771
772  action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
773    peek(requestNetwork_in, RequestMsg) {
774      assert(is_valid(tbe));
775      if (in_msg.RequestorMachine == MachineType:L1Cache ||
776          in_msg.RequestorMachine == MachineType:DMA) {
777        enqueue(responseNetwork_out, ResponseMsg, response_latency) {
778          out_msg.addr := address;
779          out_msg.Type := CoherenceResponseType:DATA;
780          out_msg.Sender := machineID;
781          out_msg.SenderMachine := MachineType:L1Cache;
782          out_msg.Destination.add(in_msg.Requestor);
783          out_msg.DataBlk := tbe.DataBlk;
784          // out_msg.Dirty := tbe.Dirty;
785          out_msg.Dirty := false;
786          out_msg.Acks := in_msg.Acks;
787          out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
788        }
789      }
790      else {
791        enqueue(responseNetwork_out, ResponseMsg, response_latency) {
792          out_msg.addr := address;
793          out_msg.Type := CoherenceResponseType:DATA;
794          out_msg.Sender := machineID;
795          out_msg.SenderMachine := MachineType:L1Cache;
796          out_msg.Destination.add(mapAddressToMachine(address,
797                                                      MachineType:L2Cache));
798          out_msg.DataBlk := tbe.DataBlk;
799          // out_msg.Dirty := tbe.Dirty;
800          out_msg.Dirty := false;
801          out_msg.Acks := in_msg.Acks;
802          out_msg.MessageSize := MessageSizeType:Response_Data;
803        }
804      }
805    }
806  }
807
808  action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
809    peek(requestNetwork_in, RequestMsg) {
810      assert(is_valid(tbe));
811      if (in_msg.RequestorMachine == MachineType:L1Cache) {
812        enqueue(responseNetwork_out, ResponseMsg, response_latency) {
813          out_msg.addr := address;
814          out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
815          out_msg.Sender := machineID;
816          out_msg.SenderMachine := MachineType:L1Cache;
817          out_msg.Destination.add(in_msg.Requestor);
818          out_msg.DataBlk := tbe.DataBlk;
819          out_msg.Dirty := tbe.Dirty;
820          out_msg.Acks := in_msg.Acks;
821          out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
822        }
823      }
824      else {
825        enqueue(responseNetwork_out, ResponseMsg, response_latency) {
826          out_msg.addr := address;
827          out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
828          out_msg.Sender := machineID;
829          out_msg.SenderMachine := MachineType:L1Cache;
830          out_msg.Destination.add(mapAddressToMachine(address,
831                                                      MachineType:L2Cache));
832          out_msg.DataBlk := tbe.DataBlk;
833          out_msg.Dirty := tbe.Dirty;
834          out_msg.Acks := in_msg.Acks;
835          out_msg.MessageSize := MessageSizeType:Response_Data;
836        }
837      }
838    }
839  }
840
841  // L2 will usually request data for a writeback
842  action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
843    enqueue(requestNetwork_out, RequestMsg, request_latency) {
844      assert(is_valid(tbe));
845      out_msg.addr := address;
846      out_msg.Requestor := machineID;
847      out_msg.RequestorMachine := MachineType:L1Cache;
848      out_msg.Destination.add(mapAddressToMachine(address,
849                                                  MachineType:L2Cache));
850      if (tbe.Dirty) {
851        out_msg.Type := CoherenceRequestType:WRITEBACK_DIRTY_DATA;
852      } else {
853        out_msg.Type := CoherenceRequestType:WRITEBACK_CLEAN_DATA;
854      }
855      out_msg.DataBlk := tbe.DataBlk;
856      out_msg.MessageSize := MessageSizeType:Writeback_Data;
857    }
858  }
859
860  action(s_deallocateTBE, "s", desc="Deallocate TBE") {
861    TBEs.deallocate(address);
862    unset_tbe();
863  }
864
865  action(u_writeDataToCache, "u", desc="Write data to cache") {
866    peek(responseToL1Cache_in, ResponseMsg) {
867      assert(is_valid(cache_entry));
868      cache_entry.DataBlk := in_msg.DataBlk;
869      cache_entry.Dirty := in_msg.Dirty;
870
871      if (in_msg.Type == CoherenceResponseType:DATA) {
872        //assert(in_msg.Dirty == false);
873      }
874    }
875  }
876
877  action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block.  Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
878    if (L1Dcache.isTagPresent(address)) {
879      L1Dcache.deallocate(address);
880    } else {
881      L1Icache.deallocate(address);
882    }
883    unset_cache_entry();
884  }
885
886  action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
887    if ((is_invalid(cache_entry))) {
888      set_cache_entry(L1Dcache.allocate(address, new Entry));
889    }
890  }
891
892  action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
893    if ((is_invalid(cache_entry))) {
894      set_cache_entry(L1Icache.allocate(address, new Entry));
895    }
896  }
897
898  action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
899    if (send_evictions) {
900      DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
901      sequencer.evictionCallback(address);
902    }
903  }
904
905  action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
906      ++L1Icache.demand_misses;
907  }
908
909  action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
910      ++L1Icache.demand_hits;
911  }
912
913  action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
914      ++L1Dcache.demand_misses;
915  }
916
917  action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
918      ++L1Dcache.demand_hits;
919  }
920
921  action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
922    requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
923  }
924
925  action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
926    mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
927  }
928
929  //*****************************************************
930  // TRANSITIONS
931  //*****************************************************
932
933  // Transitions for Load/Store/L2_Replacement from transient states
934  transition({IM, SM, OM, IS, OI, SI, MI, II}, {Store, L1_Replacement}) {
935    zz_recycleMandatoryQueue;
936  }
937
938  transition({M_W, MM_W}, L1_Replacement) {
939    zz_recycleMandatoryQueue;
940  }
941
942  transition({M_W, MM_W}, {Fwd_GETS, Fwd_DMA, Fwd_GETX, Own_GETX, Inv}) {
943    z_recycleRequestQueue;
944  }
945
946  transition({IM, IS, OI, MI, SI, II}, {Load, Ifetch}) {
947    zz_recycleMandatoryQueue;
948  }
949
950  // Transitions from Idle
951  transition(I, Load, IS) {
952    ii_allocateL1DCacheBlock;
953    i_allocateTBE;
954    a_issueGETS;
955    uu_profileDataMiss;
956    k_popMandatoryQueue;
957  }
958
959  transition(I, Ifetch, IS) {
960    jj_allocateL1ICacheBlock;
961    i_allocateTBE;
962    a_issueGETS;
963    uu_profileInstMiss;
964    k_popMandatoryQueue;
965  }
966
967  transition(I, Store, IM) {
968    ii_allocateL1DCacheBlock;
969    i_allocateTBE;
970    b_issueGETX;
971    uu_profileDataMiss;
972    k_popMandatoryQueue;
973  }
974
975  transition(I, L1_Replacement) {
976    kk_deallocateL1CacheBlock;
977  }
978
979  transition(I, Inv) {
980    f_sendAck;
981    l_popForwardQueue;
982  }
983
984  transition({S, SM, O, OM, MM, MM_W, M, M_W}, Load) {
985    h_load_hit;
986    uu_profileDataHit;
987    k_popMandatoryQueue;
988  }
989
990  transition({S, SM, O, OM, MM, MM_W, M, M_W}, Ifetch) {
991    h_ifetch_hit;
992    uu_profileInstHit;
993    k_popMandatoryQueue;
994  }
995
996  // Transitions from Shared
997  transition(S, Store, SM) {
998    i_allocateTBE;
999    b_issueGETX;
1000    uu_profileDataMiss;
1001    k_popMandatoryQueue;
1002  }
1003
1004  transition(S, L1_Replacement, SI) {
1005    i_allocateTBE;
1006    dd_issuePUTS;
1007    forward_eviction_to_cpu;
1008    kk_deallocateL1CacheBlock;
1009  }
1010
1011  transition(S, Inv, I) {
1012    f_sendAck;
1013    forward_eviction_to_cpu;
1014    l_popForwardQueue;
1015  }
1016
1017  transition(S, Fwd_GETS) {
1018    e_sendData;
1019    l_popForwardQueue;
1020  }
1021
1022  transition(S, Fwd_DMA) {
1023    e_sendData;
1024    ub_dmaUnblockL2Cache;
1025    l_popForwardQueue;
1026  }
1027
1028  // Transitions from Owned
1029  transition(O, Store, OM) {
1030    i_allocateTBE;
1031    b_issueGETX;
1032    uu_profileDataMiss;
1033    k_popMandatoryQueue;
1034  }
1035
1036  transition(O, L1_Replacement, OI) {
1037    i_allocateTBE;
1038    dd_issuePUTO;
1039    forward_eviction_to_cpu;
1040    kk_deallocateL1CacheBlock;
1041  }
1042
1043  transition(O, Fwd_GETX, I) {
1044    ee_sendDataExclusive;
1045    forward_eviction_to_cpu;
1046    l_popForwardQueue;
1047  }
1048
1049  transition(O, Fwd_GETS) {
1050    e_sendData;
1051    l_popForwardQueue;
1052  }
1053
1054  transition(O, Fwd_DMA) {
1055    e_sendData;
1056    ub_dmaUnblockL2Cache;
1057    l_popForwardQueue;
1058  }
1059
1060  // Transitions from MM
1061  transition({MM, MM_W}, Store) {
1062    hh_store_hit;
1063    uu_profileDataHit;
1064    k_popMandatoryQueue;
1065  }
1066
1067  transition(MM, L1_Replacement, MI) {
1068    i_allocateTBE;
1069    d_issuePUTX;
1070    forward_eviction_to_cpu;
1071    kk_deallocateL1CacheBlock;
1072  }
1073
1074  transition(MM, Fwd_GETX, I) {
1075    ee_sendDataExclusive;
1076    forward_eviction_to_cpu;
1077    l_popForwardQueue;
1078  }
1079
1080  transition(MM, Fwd_GETS, I) {
1081    ee_sendDataExclusive;
1082    forward_eviction_to_cpu;
1083    l_popForwardQueue;
1084  }
1085
1086  transition(MM, Fwd_DMA, MM) {
1087    e_sendData;
1088    ub_dmaUnblockL2Cache;
1089    l_popForwardQueue;
1090  }
1091
1092  // Transitions from M
1093  transition(M, Store, MM) {
1094    hh_store_hit;
1095    uu_profileDataHit;
1096    k_popMandatoryQueue;
1097  }
1098
1099  transition(M_W, Store, MM_W) {
1100    hh_store_hit;
1101    uu_profileDataHit;
1102    k_popMandatoryQueue;
1103  }
1104
1105  transition(M, L1_Replacement, MI) {
1106    i_allocateTBE;
1107    d_issuePUTX;
1108    forward_eviction_to_cpu;
1109    kk_deallocateL1CacheBlock;
1110  }
1111
1112  transition(M, Fwd_GETX, I) {
1113    // e_sendData;
1114    ee_sendDataExclusive;
1115    forward_eviction_to_cpu;
1116    l_popForwardQueue;
1117  }
1118
1119  transition(M, Fwd_GETS, O) {
1120    e_sendData;
1121    l_popForwardQueue;
1122  }
1123
1124  transition(M, Fwd_DMA) {
1125    e_sendData;
1126    ub_dmaUnblockL2Cache;
1127    l_popForwardQueue;
1128  }
1129
1130  // Transitions from IM
1131
1132  transition(IM, Inv) {
1133    f_sendAck;
1134    l_popForwardQueue;
1135  }
1136
1137  transition(IM, Ack) {
1138    m_decrementNumberOfMessages;
1139    o_checkForCompletion;
1140    n_popResponseQueue;
1141  }
1142
1143  transition(IM, {Exclusive_Data, Data}, OM) {
1144    u_writeDataToCache;
1145    m_decrementNumberOfMessages;
1146    o_checkForCompletion;
1147    n_popResponseQueue;
1148  }
1149
1150  // Transitions from SM
1151  transition(SM, Inv, IM) {
1152    f_sendAck;
1153    forward_eviction_to_cpu;
1154    l_popForwardQueue;
1155  }
1156
1157  transition(SM, Ack) {
1158    m_decrementNumberOfMessages;
1159    o_checkForCompletion;
1160    n_popResponseQueue;
1161  }
1162
1163  transition(SM, {Data, Exclusive_Data}, OM) {
1164    // v_writeDataToCacheVerify;
1165    m_decrementNumberOfMessages;
1166    o_checkForCompletion;
1167    n_popResponseQueue;
1168  }
1169
1170  transition(SM, Fwd_GETS) {
1171    e_sendData;
1172    l_popForwardQueue;
1173  }
1174
1175  transition(SM, Fwd_DMA) {
1176    e_sendData;
1177    ub_dmaUnblockL2Cache;
1178    l_popForwardQueue;
1179  }
1180
1181  // Transitions from OM
1182  transition(OM, Own_GETX) {
1183    mm_decrementNumberOfMessages;
1184    o_checkForCompletion;
1185    l_popForwardQueue;
1186  }
1187
1188
1189  // transition(OM, Fwd_GETX, OMF) {
1190  transition(OM, Fwd_GETX, IM) {
1191    ee_sendDataExclusive;
1192    l_popForwardQueue;
1193  }
1194
1195  transition(OM, Fwd_GETS) {
1196    e_sendData;
1197    l_popForwardQueue;
1198  }
1199
1200  transition(OM, Fwd_DMA) {
1201    e_sendData;
1202    ub_dmaUnblockL2Cache;
1203    l_popForwardQueue;
1204  }
1205
1206  //transition({OM, OMF}, Ack) {
1207  transition(OM, Ack) {
1208    m_decrementNumberOfMessages;
1209    o_checkForCompletion;
1210    n_popResponseQueue;
1211  }
1212
1213  transition(OM, All_acks, MM_W) {
1214    xx_store_hit;
1215    gg_sendUnblockExclusive;
1216    s_deallocateTBE;
1217    o_scheduleUseTimeout;
1218    j_popTriggerQueue;
1219  }
1220
1221  transition(MM_W, Use_Timeout, MM) {
1222    jj_unsetUseTimer;
1223  }
1224
1225  // Transitions from IS
1226
1227  transition(IS, Inv) {
1228    f_sendAck;
1229    l_popForwardQueue;
1230  }
1231
1232  transition(IS, Data, S) {
1233    u_writeDataToCache;
1234    m_decrementNumberOfMessages;
1235    hx_load_hit;
1236    g_sendUnblock;
1237    s_deallocateTBE;
1238    n_popResponseQueue;
1239  }
1240
1241  transition(IS, Exclusive_Data, M_W) {
1242    u_writeDataToCache;
1243    m_decrementNumberOfMessages;
1244    hx_load_hit;
1245    gg_sendUnblockExclusive;
1246    o_scheduleUseTimeout;
1247    s_deallocateTBE;
1248    n_popResponseQueue;
1249  }
1250
1251  transition(M_W, Use_Timeout, M) {
1252    jj_unsetUseTimer;
1253  }
1254
1255  // Transitions from OI/MI
1256
1257  transition(MI, Fwd_GETS, OI) {
1258    q_sendDataFromTBEToCache;
1259    l_popForwardQueue;
1260  }
1261
1262  transition(MI, Fwd_DMA) {
1263    q_sendDataFromTBEToCache;
1264    ub_dmaUnblockL2Cache;
1265    l_popForwardQueue;
1266  }
1267
1268  transition(MI, Fwd_GETX, II) {
1269    q_sendExclusiveDataFromTBEToCache;
1270    l_popForwardQueue;
1271  }
1272
1273  transition({SI, OI}, Fwd_GETS) {
1274    q_sendDataFromTBEToCache;
1275    l_popForwardQueue;
1276  }
1277
1278  transition({SI, OI}, Fwd_DMA) {
1279    q_sendDataFromTBEToCache;
1280    ub_dmaUnblockL2Cache;
1281    l_popForwardQueue;
1282  }
1283
1284  transition(OI, Fwd_GETX, II) {
1285    q_sendExclusiveDataFromTBEToCache;
1286    l_popForwardQueue;
1287  }
1288
1289  transition({SI, OI, MI}, Writeback_Ack_Data, I) {
1290    qq_sendWBDataFromTBEToL2;  // always send data
1291    s_deallocateTBE;
1292    n_popResponseQueue;
1293  }
1294
1295  transition({SI, OI, MI}, Writeback_Ack, I) {
1296    g_sendUnblock;
1297    s_deallocateTBE;
1298    n_popResponseQueue;
1299  }
1300
1301  transition({MI, OI}, Writeback_Nack, OI) {
1302    // FIXME: This might cause deadlock by re-using the writeback
1303    // channel, we should handle this case differently.
1304    dd_issuePUTO;
1305    n_popResponseQueue;
1306  }
1307
1308  // Transitions from II
1309  transition(II, {Writeback_Ack, Writeback_Ack_Data}, I) {
1310    g_sendUnblock;
1311    s_deallocateTBE;
1312    n_popResponseQueue;
1313  }
1314
1315  // transition({II, SI}, Writeback_Nack, I) {
1316  transition(II, Writeback_Nack, I) {
1317    s_deallocateTBE;
1318    n_popResponseQueue;
1319  }
1320
1321  transition(SI, Writeback_Nack) {
1322    dd_issuePUTS;
1323    n_popResponseQueue;
1324  }
1325
1326  transition(II, Inv) {
1327    f_sendAck;
1328    l_popForwardQueue;
1329  }
1330
1331  transition(SI, Inv, II) {
1332    f_sendAck;
1333    l_popForwardQueue;
1334  }
1335}
1336