MESI_Two_Level-L1cache.sm revision 14184
1/*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP")
30 : Sequencer * sequencer;
31   CacheMemory * L1Icache;
32   CacheMemory * L1Dcache;
33   Prefetcher * prefetcher;
34   int l2_select_num_bits;
35   Cycles l1_request_latency := 2;
36   Cycles l1_response_latency := 2;
37   Cycles to_l2_latency := 1;
38   bool send_evictions;
39   bool enable_prefetch := "False";
40
41   // Message Queues
42   // From this node's L1 cache TO the network
43
44   // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
45   MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
46        vnet_type="request";
47
48   // a local L1 -> this L2 bank
49   MessageBuffer * responseFromL1Cache, network="To", virtual_network="1",
50        vnet_type="response";
51
52   MessageBuffer * unblockFromL1Cache, network="To", virtual_network="2",
53        vnet_type="unblock";
54
55
56   // To this node's L1 cache FROM the network
57   // a L2 bank -> this L1
58   MessageBuffer * requestToL1Cache, network="From", virtual_network="2",
59        vnet_type="request";
60
61   // a L2 bank -> this L1
62   MessageBuffer * responseToL1Cache, network="From", virtual_network="1",
63        vnet_type="response";
64
65  // Request Buffer for prefetches
66  MessageBuffer * optionalQueue;
67
68  // Buffer for requests generated by the processor core.
69  MessageBuffer * mandatoryQueue;
70{
71  // STATES
72  state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
73    // Base states
74    NP, AccessPermission:Invalid, desc="Not present in either cache";
75    I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
76    S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
77    E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
78    M, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
79
80    // Transient States
81    IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
82    IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
83    SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
84    IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
85
86    M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
87    SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
88
89    // Transient States in which block is being prefetched
90    PF_IS, AccessPermission:Busy, desc="Issued GETS, have not seen response yet";
91    PF_IM, AccessPermission:Busy, desc="Issued GETX, have not seen response yet";
92    PF_SM, AccessPermission:Busy, desc="Issued GETX, received data, waiting for acks";
93    PF_IS_I, AccessPermission:Busy, desc="Issued GETs, saw inv before data";
94  }
95
96  // EVENTS
97  enumeration(Event, desc="Cache events") {
98    // L1 events
99    Load,            desc="Load request from the home processor";
100    Ifetch,          desc="I-fetch request from the home processor";
101    Store,           desc="Store request from the home processor";
102
103    Inv,           desc="Invalidate request from L2 bank";
104
105    // internal generated request
106    L1_Replacement,  desc="L1 Replacement", format="!r";
107    PF_L1_Replacement,  desc="Prefetch L1 Replacement", format="!pr";
108
109    // other requests
110    Fwd_GETX,   desc="GETX from other processor";
111    Fwd_GETS,   desc="GETS from other processor";
112    Fwd_GET_INSTR,   desc="GET_INSTR from other processor";
113
114    Data,       desc="Data for processor";
115    Data_Exclusive,       desc="Data for processor";
116    DataS_fromL1,       desc="data for GETS request, need to unblock directory";
117    Data_all_Acks,       desc="Data for processor, all acks";
118
119    Ack,        desc="Ack for processor";
120    Ack_all,      desc="Last ack for processor";
121
122    WB_Ack,        desc="Ack for replacement";
123
124    PF_Load,    desc="load request from prefetcher";
125    PF_Ifetch,  desc="instruction fetch request from prefetcher";
126    PF_Store,   desc="exclusive load request from prefetcher";
127  }
128
129  // TYPES
130
131  // CacheEntry
132  structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
133    State CacheState,        desc="cache state";
134    DataBlock DataBlk,       desc="data for the block";
135    bool Dirty, default="false",   desc="data is dirty";
136    bool isPrefetch, desc="Set if this block was prefetched and not yet accessed";
137  }
138
139  // TBE fields
140  structure(TBE, desc="...") {
141    Addr addr,              desc="Physical address for this TBE";
142    State TBEState,        desc="Transient state";
143    DataBlock DataBlk,                desc="Buffer for the data block";
144    bool Dirty, default="false",   desc="data is dirty";
145    bool isPrefetch,       desc="Set if this was caused by a prefetch";
146    int pendingAcks, default="0", desc="number of pending acks";
147  }
148
149  structure(TBETable, external="yes") {
150    TBE lookup(Addr);
151    void allocate(Addr);
152    void deallocate(Addr);
153    bool isPresent(Addr);
154  }
155
156  TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
157
158  int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
159
160  Tick clockEdge();
161  Cycles ticksToCycles(Tick t);
162  void set_cache_entry(AbstractCacheEntry a);
163  void unset_cache_entry();
164  void set_tbe(TBE a);
165  void unset_tbe();
166  void wakeUpBuffers(Addr a);
167  void profileMsgDelay(int virtualNetworkType, Cycles c);
168
169  // inclusive cache returns L1 entries only
170  Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
171    Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
172    if(is_valid(L1Dcache_entry)) {
173      return L1Dcache_entry;
174    }
175
176    Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
177    return L1Icache_entry;
178  }
179
180  Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
181    Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
182    return L1Dcache_entry;
183  }
184
185  Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
186    Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
187    return L1Icache_entry;
188  }
189
190  State getState(TBE tbe, Entry cache_entry, Addr addr) {
191    assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
192
193    if(is_valid(tbe)) {
194      return tbe.TBEState;
195    } else if (is_valid(cache_entry)) {
196      return cache_entry.CacheState;
197    }
198    return State:NP;
199  }
200
201  void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
202    assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
203
204    // MUST CHANGE
205    if(is_valid(tbe)) {
206      tbe.TBEState := state;
207    }
208
209    if (is_valid(cache_entry)) {
210      cache_entry.CacheState := state;
211    }
212  }
213
214  AccessPermission getAccessPermission(Addr addr) {
215    TBE tbe := TBEs[addr];
216    if(is_valid(tbe)) {
217      DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
218      return L1Cache_State_to_permission(tbe.TBEState);
219    }
220
221    Entry cache_entry := getCacheEntry(addr);
222    if(is_valid(cache_entry)) {
223      DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
224      return L1Cache_State_to_permission(cache_entry.CacheState);
225    }
226
227    DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
228    return AccessPermission:NotPresent;
229  }
230
231  void functionalRead(Addr addr, Packet *pkt) {
232    TBE tbe := TBEs[addr];
233    if(is_valid(tbe)) {
234      testAndRead(addr, tbe.DataBlk, pkt);
235    } else {
236      testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
237    }
238  }
239
240  int functionalWrite(Addr addr, Packet *pkt) {
241    int num_functional_writes := 0;
242
243    TBE tbe := TBEs[addr];
244    if(is_valid(tbe)) {
245      num_functional_writes := num_functional_writes +
246        testAndWrite(addr, tbe.DataBlk, pkt);
247      return num_functional_writes;
248    }
249
250    num_functional_writes := num_functional_writes +
251        testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
252    return num_functional_writes;
253  }
254
255  void setAccessPermission(Entry cache_entry, Addr addr, State state) {
256    if (is_valid(cache_entry)) {
257      cache_entry.changePermission(L1Cache_State_to_permission(state));
258    }
259  }
260
261  Event mandatory_request_type_to_event(RubyRequestType type) {
262    if (type == RubyRequestType:LD) {
263      return Event:Load;
264    } else if (type == RubyRequestType:IFETCH) {
265      return Event:Ifetch;
266    } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
267      return Event:Store;
268    } else {
269      error("Invalid RubyRequestType");
270    }
271  }
272
273  Event prefetch_request_type_to_event(RubyRequestType type) {
274      if (type == RubyRequestType:LD) {
275          return Event:PF_Load;
276      } else if (type == RubyRequestType:IFETCH) {
277          return Event:PF_Ifetch;
278      } else if ((type == RubyRequestType:ST) ||
279                 (type == RubyRequestType:ATOMIC)) {
280          return Event:PF_Store;
281      } else {
282          error("Invalid RubyRequestType");
283      }
284  }
285
286  int getPendingAcks(TBE tbe) {
287    return tbe.pendingAcks;
288  }
289
290  out_port(requestL1Network_out, RequestMsg, requestFromL1Cache);
291  out_port(responseL1Network_out, ResponseMsg, responseFromL1Cache);
292  out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
293  out_port(optionalQueue_out, RubyRequest, optionalQueue);
294
295
296  // Prefetch queue between the controller and the prefetcher
297  // As per Spracklen et al. (HPCA 2005), the prefetch queue should be
298  // implemented as a LIFO structure.  The structure would allow for fast
299  // searches of all entries in the queue, not just the head msg. All
300  // msgs in the structure can be invalidated if a demand miss matches.
301  in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
302      if (optionalQueue_in.isReady(clockEdge())) {
303          peek(optionalQueue_in, RubyRequest) {
304              // Instruction Prefetch
305              if (in_msg.Type == RubyRequestType:IFETCH) {
306                  Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
307                  if (is_valid(L1Icache_entry)) {
308                      // The block to be prefetched is already present in the
309                      // cache. We should drop this request.
310                      trigger(prefetch_request_type_to_event(in_msg.Type),
311                              in_msg.LineAddress,
312                              L1Icache_entry, TBEs[in_msg.LineAddress]);
313                  }
314
315                  // Check to see if it is in the OTHER L1
316                  Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
317                  if (is_valid(L1Dcache_entry)) {
318                      // The block is in the wrong L1 cache. We should drop
319                      // this request.
320                      trigger(prefetch_request_type_to_event(in_msg.Type),
321                              in_msg.LineAddress,
322                              L1Dcache_entry, TBEs[in_msg.LineAddress]);
323                  }
324
325                  if (L1Icache.cacheAvail(in_msg.LineAddress)) {
326                      // L1 does't have the line, but we have space for it
327                      // in the L1 so let's see if the L2 has it
328                      trigger(prefetch_request_type_to_event(in_msg.Type),
329                              in_msg.LineAddress,
330                              L1Icache_entry, TBEs[in_msg.LineAddress]);
331                  } else {
332                      // No room in the L1, so we need to make room in the L1
333                      trigger(Event:PF_L1_Replacement,
334                              L1Icache.cacheProbe(in_msg.LineAddress),
335                              getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
336                              TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
337                  }
338              } else {
339                  // Data prefetch
340                  Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
341                  if (is_valid(L1Dcache_entry)) {
342                      // The block to be prefetched is already present in the
343                      // cache. We should drop this request.
344                      trigger(prefetch_request_type_to_event(in_msg.Type),
345                              in_msg.LineAddress,
346                              L1Dcache_entry, TBEs[in_msg.LineAddress]);
347                  }
348
349                  // Check to see if it is in the OTHER L1
350                  Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
351                  if (is_valid(L1Icache_entry)) {
352                      // The block is in the wrong L1. Just drop the prefetch
353                      // request.
354                      trigger(prefetch_request_type_to_event(in_msg.Type),
355                              in_msg.LineAddress,
356                              L1Icache_entry, TBEs[in_msg.LineAddress]);
357                  }
358
359                  if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
360                      // L1 does't have the line, but we have space for it in
361                      // the L1 let's see if the L2 has it
362                      trigger(prefetch_request_type_to_event(in_msg.Type),
363                              in_msg.LineAddress,
364                              L1Dcache_entry, TBEs[in_msg.LineAddress]);
365                  } else {
366                      // No room in the L1, so we need to make room in the L1
367                      trigger(Event:PF_L1_Replacement,
368                              L1Dcache.cacheProbe(in_msg.LineAddress),
369                              getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
370                              TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
371                  }
372              }
373          }
374      }
375  }
376
377  // Response  L1 Network - response msg to this L1 cache
378  in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
379    if (responseL1Network_in.isReady(clockEdge())) {
380      peek(responseL1Network_in, ResponseMsg, block_on="addr") {
381        assert(in_msg.Destination.isElement(machineID));
382
383        Entry cache_entry := getCacheEntry(in_msg.addr);
384        TBE tbe := TBEs[in_msg.addr];
385
386        if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
387          trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
388        } else if(in_msg.Type == CoherenceResponseType:DATA) {
389          if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
390               getState(tbe, cache_entry, in_msg.addr) == State:IS_I ||
391               getState(tbe, cache_entry, in_msg.addr) == State:PF_IS ||
392               getState(tbe, cache_entry, in_msg.addr) == State:PF_IS_I) &&
393              machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
394
395              trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
396
397          } else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
398            trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
399          } else {
400            trigger(Event:Data, in_msg.addr, cache_entry, tbe);
401          }
402        } else if (in_msg.Type == CoherenceResponseType:ACK) {
403          if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
404            trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
405          } else {
406            trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
407          }
408        } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
409          trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
410        } else {
411          error("Invalid L1 response type");
412        }
413      }
414    }
415  }
416
417  // Request InterChip network - request from this L1 cache to the shared L2
418  in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
419    if(requestL1Network_in.isReady(clockEdge())) {
420      peek(requestL1Network_in, RequestMsg, block_on="addr") {
421        assert(in_msg.Destination.isElement(machineID));
422
423        Entry cache_entry := getCacheEntry(in_msg.addr);
424        TBE tbe := TBEs[in_msg.addr];
425
426        if (in_msg.Type == CoherenceRequestType:INV) {
427          trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
428        } else if (in_msg.Type == CoherenceRequestType:GETX ||
429                   in_msg.Type == CoherenceRequestType:UPGRADE) {
430          // upgrade transforms to GETX due to race
431          trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
432        } else if (in_msg.Type == CoherenceRequestType:GETS) {
433          trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
434        } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
435          trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
436        } else {
437          error("Invalid forwarded request type");
438        }
439      }
440    }
441  }
442
443  // Mandatory Queue betweens Node's CPU and it's L1 caches
444  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
445    if (mandatoryQueue_in.isReady(clockEdge())) {
446      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
447
448        // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
449
450        if (in_msg.Type == RubyRequestType:IFETCH) {
451          // ** INSTRUCTION ACCESS ***
452
453          Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
454          if (is_valid(L1Icache_entry)) {
455            // The tag matches for the L1, so the L1 asks the L2 for it.
456            trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
457                    L1Icache_entry, TBEs[in_msg.LineAddress]);
458          } else {
459
460            // Check to see if it is in the OTHER L1
461            Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
462            if (is_valid(L1Dcache_entry)) {
463              // The block is in the wrong L1, put the request on the queue to the shared L2
464              trigger(Event:L1_Replacement, in_msg.LineAddress,
465                      L1Dcache_entry, TBEs[in_msg.LineAddress]);
466            }
467
468            if (L1Icache.cacheAvail(in_msg.LineAddress)) {
469              // L1 does't have the line, but we have space for it
470              // in the L1 so let's see if the L2 has it.
471              trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
472                      L1Icache_entry, TBEs[in_msg.LineAddress]);
473            } else {
474              // No room in the L1, so we need to make room in the L1
475
476              // Check if the line we want to evict is not locked
477              Addr addr := L1Icache.cacheProbe(in_msg.LineAddress);
478              check_on_cache_probe(mandatoryQueue_in, addr);
479
480              trigger(Event:L1_Replacement, addr,
481                      getL1ICacheEntry(addr),
482                      TBEs[addr]);
483            }
484          }
485        } else {
486
487          // *** DATA ACCESS ***
488          Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
489          if (is_valid(L1Dcache_entry)) {
490            // The tag matches for the L1, so the L1 ask the L2 for it
491            trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
492                    L1Dcache_entry, TBEs[in_msg.LineAddress]);
493          } else {
494
495            // Check to see if it is in the OTHER L1
496            Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
497            if (is_valid(L1Icache_entry)) {
498              // The block is in the wrong L1, put the request on the queue to the shared L2
499              trigger(Event:L1_Replacement, in_msg.LineAddress,
500                      L1Icache_entry, TBEs[in_msg.LineAddress]);
501            }
502
503            if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
504              // L1 does't have the line, but we have space for it
505              // in the L1 let's see if the L2 has it.
506              trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
507                      L1Dcache_entry, TBEs[in_msg.LineAddress]);
508            } else {
509              // No room in the L1, so we need to make room in the L1
510
511              // Check if the line we want to evict is not locked
512              Addr addr := L1Dcache.cacheProbe(in_msg.LineAddress);
513              check_on_cache_probe(mandatoryQueue_in, addr);
514
515              trigger(Event:L1_Replacement, addr,
516                      getL1DCacheEntry(addr),
517                      TBEs[addr]);
518            }
519          }
520        }
521      }
522    }
523  }
524
525  void enqueuePrefetch(Addr address, RubyRequestType type) {
526      enqueue(optionalQueue_out, RubyRequest, 1) {
527          out_msg.LineAddress := address;
528          out_msg.Type := type;
529          out_msg.AccessMode := RubyAccessMode:Supervisor;
530      }
531  }
532
533  // ACTIONS
534  action(a_issueGETS, "a", desc="Issue GETS") {
535    peek(mandatoryQueue_in, RubyRequest) {
536      enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
537        out_msg.addr := address;
538        out_msg.Type := CoherenceRequestType:GETS;
539        out_msg.Requestor := machineID;
540        out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
541                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
542        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
543                address, out_msg.Destination);
544        out_msg.MessageSize := MessageSizeType:Control;
545        out_msg.Prefetch := in_msg.Prefetch;
546        out_msg.AccessMode := in_msg.AccessMode;
547      }
548    }
549  }
550
551  action(pa_issuePfGETS, "pa", desc="Issue prefetch GETS") {
552    peek(optionalQueue_in, RubyRequest) {
553      enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
554        out_msg.addr := address;
555        out_msg.Type := CoherenceRequestType:GETS;
556        out_msg.Requestor := machineID;
557        out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
558                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
559        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
560                address, out_msg.Destination);
561        out_msg.MessageSize := MessageSizeType:Control;
562        out_msg.Prefetch := in_msg.Prefetch;
563        out_msg.AccessMode := in_msg.AccessMode;
564      }
565    }
566  }
567
568  action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
569    peek(mandatoryQueue_in, RubyRequest) {
570      enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
571        out_msg.addr := address;
572        out_msg.Type := CoherenceRequestType:GET_INSTR;
573        out_msg.Requestor := machineID;
574        out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
575                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
576        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
577                address, out_msg.Destination);
578        out_msg.MessageSize := MessageSizeType:Control;
579        out_msg.Prefetch := in_msg.Prefetch;
580        out_msg.AccessMode := in_msg.AccessMode;
581      }
582    }
583  }
584
585  action(pai_issuePfGETINSTR, "pai",
586         desc="Issue GETINSTR for prefetch request") {
587      peek(optionalQueue_in, RubyRequest) {
588          enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
589              out_msg.addr := address;
590              out_msg.Type := CoherenceRequestType:GET_INSTR;
591              out_msg.Requestor := machineID;
592              out_msg.Destination.add(
593                  mapAddressToRange(address, MachineType:L2Cache,
594                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
595              out_msg.MessageSize := MessageSizeType:Control;
596              out_msg.Prefetch := in_msg.Prefetch;
597              out_msg.AccessMode := in_msg.AccessMode;
598
599              DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
600                      address, out_msg.Destination);
601          }
602      }
603  }
604
605  action(b_issueGETX, "b", desc="Issue GETX") {
606    peek(mandatoryQueue_in, RubyRequest) {
607      enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
608        out_msg.addr := address;
609        out_msg.Type := CoherenceRequestType:GETX;
610        out_msg.Requestor := machineID;
611        DPRINTF(RubySlicc, "%s\n", machineID);
612        out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
613                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
614        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
615                address, out_msg.Destination);
616        out_msg.MessageSize := MessageSizeType:Control;
617        out_msg.Prefetch := in_msg.Prefetch;
618        out_msg.AccessMode := in_msg.AccessMode;
619      }
620    }
621  }
622
623  action(pb_issuePfGETX, "pb", desc="Issue prefetch GETX") {
624      peek(optionalQueue_in, RubyRequest) {
625          enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
626              out_msg.addr := address;
627              out_msg.Type := CoherenceRequestType:GETX;
628              out_msg.Requestor := machineID;
629              DPRINTF(RubySlicc, "%s\n", machineID);
630
631              out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
632                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
633
634              DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
635                      address, out_msg.Destination);
636              out_msg.MessageSize := MessageSizeType:Control;
637              out_msg.Prefetch := in_msg.Prefetch;
638              out_msg.AccessMode := in_msg.AccessMode;
639          }
640      }
641  }
642
643  action(c_issueUPGRADE, "c", desc="Issue GETX") {
644    peek(mandatoryQueue_in, RubyRequest) {
645      enqueue(requestL1Network_out, RequestMsg,  l1_request_latency) {
646        out_msg.addr := address;
647        out_msg.Type := CoherenceRequestType:UPGRADE;
648        out_msg.Requestor := machineID;
649        out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
650                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
651        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
652                address, out_msg.Destination);
653        out_msg.MessageSize := MessageSizeType:Control;
654        out_msg.Prefetch := in_msg.Prefetch;
655        out_msg.AccessMode := in_msg.AccessMode;
656      }
657    }
658  }
659
660  action(d_sendDataToRequestor, "d", desc="send data to requestor") {
661    peek(requestL1Network_in, RequestMsg) {
662      enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
663        assert(is_valid(cache_entry));
664        out_msg.addr := address;
665        out_msg.Type := CoherenceResponseType:DATA;
666        out_msg.DataBlk := cache_entry.DataBlk;
667        out_msg.Dirty := cache_entry.Dirty;
668        out_msg.Sender := machineID;
669        out_msg.Destination.add(in_msg.Requestor);
670        out_msg.MessageSize := MessageSizeType:Response_Data;
671      }
672    }
673  }
674
675  action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
676    enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
677      assert(is_valid(cache_entry));
678      out_msg.addr := address;
679      out_msg.Type := CoherenceResponseType:DATA;
680      out_msg.DataBlk := cache_entry.DataBlk;
681      out_msg.Dirty := cache_entry.Dirty;
682      out_msg.Sender := machineID;
683      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
684                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
685      out_msg.MessageSize := MessageSizeType:Response_Data;
686    }
687  }
688
689  action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
690    peek(requestL1Network_in, RequestMsg) {
691      enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
692        assert(is_valid(tbe));
693        out_msg.addr := address;
694        out_msg.Type := CoherenceResponseType:DATA;
695        out_msg.DataBlk := tbe.DataBlk;
696        out_msg.Dirty := tbe.Dirty;
697        out_msg.Sender := machineID;
698        out_msg.Destination.add(in_msg.Requestor);
699        out_msg.MessageSize := MessageSizeType:Response_Data;
700      }
701    }
702  }
703
704  action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
705    enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
706      assert(is_valid(tbe));
707      out_msg.addr := address;
708      out_msg.Type := CoherenceResponseType:DATA;
709      out_msg.DataBlk := tbe.DataBlk;
710      out_msg.Dirty := tbe.Dirty;
711      out_msg.Sender := machineID;
712      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
713                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
714      out_msg.MessageSize := MessageSizeType:Response_Data;
715    }
716  }
717
718  action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
719    peek(requestL1Network_in, RequestMsg) {
720      enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
721        out_msg.addr := address;
722        out_msg.Type := CoherenceResponseType:ACK;
723        out_msg.Sender := machineID;
724        out_msg.Destination.add(in_msg.Requestor);
725        out_msg.MessageSize := MessageSizeType:Response_Control;
726      }
727    }
728  }
729
730  action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
731    enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
732      assert(is_valid(cache_entry));
733      out_msg.addr := address;
734      out_msg.Type := CoherenceResponseType:DATA;
735      out_msg.DataBlk := cache_entry.DataBlk;
736      out_msg.Dirty := cache_entry.Dirty;
737      out_msg.Sender := machineID;
738      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
739                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
740      out_msg.MessageSize := MessageSizeType:Writeback_Data;
741    }
742  }
743
744  action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
745    enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
746      assert(is_valid(tbe));
747      out_msg.addr := address;
748      out_msg.Type := CoherenceResponseType:DATA;
749      out_msg.DataBlk := tbe.DataBlk;
750      out_msg.Dirty := tbe.Dirty;
751      out_msg.Sender := machineID;
752      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
753                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
754      out_msg.MessageSize := MessageSizeType:Writeback_Data;
755    }
756  }
757
758  action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
759    peek(requestL1Network_in, RequestMsg) {
760      enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
761        out_msg.addr := address;
762        out_msg.Type := CoherenceResponseType:ACK;
763        out_msg.Sender := machineID;
764        out_msg.Destination.add(in_msg.Requestor);
765        out_msg.MessageSize := MessageSizeType:Response_Control;
766        out_msg.AckCount := 1;
767      }
768    }
769  }
770
771  action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
772    if (send_evictions) {
773      DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
774      sequencer.evictionCallback(address);
775    }
776  }
777
778  action(g_issuePUTX, "g", desc="send data to the L2 cache") {
779    enqueue(requestL1Network_out, RequestMsg, l1_response_latency) {
780      assert(is_valid(cache_entry));
781      out_msg.addr := address;
782      out_msg.Type := CoherenceRequestType:PUTX;
783      out_msg.DataBlk := cache_entry.DataBlk;
784      out_msg.Dirty := cache_entry.Dirty;
785      out_msg.Requestor:= machineID;
786      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
787                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
788      if (cache_entry.Dirty) {
789        out_msg.MessageSize := MessageSizeType:Writeback_Data;
790      } else {
791        out_msg.MessageSize := MessageSizeType:Writeback_Control;
792      }
793    }
794  }
795
796  action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
797    enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
798      out_msg.addr := address;
799      out_msg.Type := CoherenceResponseType:UNBLOCK;
800      out_msg.Sender := machineID;
801      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
802                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
803      out_msg.MessageSize := MessageSizeType:Response_Control;
804      DPRINTF(RubySlicc, "%#x\n", address);
805    }
806  }
807
808  action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
809    enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
810      out_msg.addr := address;
811      out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
812      out_msg.Sender := machineID;
813      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
814                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
815      out_msg.MessageSize := MessageSizeType:Response_Control;
816      DPRINTF(RubySlicc, "%#x\n", address);
817
818    }
819  }
820
821  action(dg_invalidate_sc, "dg",
822         desc="Invalidate store conditional as the cache lost permissions") {
823    sequencer.invalidateSC(address);
824  }
825
826  action(h_load_hit, "hd",
827         desc="Notify sequencer the load completed.")
828  {
829    assert(is_valid(cache_entry));
830    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
831    L1Dcache.setMRU(cache_entry);
832    sequencer.readCallback(address, cache_entry.DataBlk);
833  }
834
835  action(h_ifetch_hit, "hi", desc="Notify sequencer the instruction fetch completed.")
836  {
837    assert(is_valid(cache_entry));
838    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
839    L1Icache.setMRU(cache_entry);
840    sequencer.readCallback(address, cache_entry.DataBlk);
841  }
842
843  action(hx_load_hit, "hx", desc="Notify sequencer the load completed.")
844  {
845    assert(is_valid(cache_entry));
846    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
847    L1Icache.setMRU(address);
848    L1Dcache.setMRU(address);
849    sequencer.readCallback(address, cache_entry.DataBlk, true);
850  }
851
852  action(hh_store_hit, "\h", desc="Notify sequencer that store completed.")
853  {
854    assert(is_valid(cache_entry));
855    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
856    L1Dcache.setMRU(cache_entry);
857    sequencer.writeCallback(address, cache_entry.DataBlk);
858    cache_entry.Dirty := true;
859  }
860
861  action(hhx_store_hit, "\hx", desc="Notify sequencer that store completed.")
862  {
863    assert(is_valid(cache_entry));
864    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
865    L1Icache.setMRU(address);
866    L1Dcache.setMRU(address);
867    sequencer.writeCallback(address, cache_entry.DataBlk, true);
868    cache_entry.Dirty := true;
869  }
870
871  action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
872    check_allocate(TBEs);
873    assert(is_valid(cache_entry));
874    TBEs.allocate(address);
875    set_tbe(TBEs[address]);
876    tbe.isPrefetch := false;
877    tbe.Dirty := cache_entry.Dirty;
878    tbe.DataBlk := cache_entry.DataBlk;
879  }
880
881  action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
882    mandatoryQueue_in.dequeue(clockEdge());
883  }
884
885  action(l_popRequestQueue, "l",
886    desc="Pop incoming request queue and profile the delay within this virtual network") {
887    Tick delay := requestL1Network_in.dequeue(clockEdge());
888    profileMsgDelay(2, ticksToCycles(delay));
889  }
890
891  action(o_popIncomingResponseQueue, "o",
892    desc="Pop Incoming Response queue and profile the delay within this virtual network") {
893    Tick delay := responseL1Network_in.dequeue(clockEdge());
894    profileMsgDelay(1, ticksToCycles(delay));
895  }
896
897  action(s_deallocateTBE, "s", desc="Deallocate TBE") {
898    TBEs.deallocate(address);
899    unset_tbe();
900  }
901
902  action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
903    peek(responseL1Network_in, ResponseMsg) {
904      assert(is_valid(cache_entry));
905      cache_entry.DataBlk := in_msg.DataBlk;
906      cache_entry.Dirty := in_msg.Dirty;
907    }
908  }
909
910  action(q_updateAckCount, "q", desc="Update ack count") {
911    peek(responseL1Network_in, ResponseMsg) {
912      assert(is_valid(tbe));
913      tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
914      APPEND_TRANSITION_COMMENT(in_msg.AckCount);
915      APPEND_TRANSITION_COMMENT(" p: ");
916      APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
917    }
918  }
919
920  action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block.  Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
921    if (L1Dcache.isTagPresent(address)) {
922      L1Dcache.deallocate(address);
923    } else {
924      L1Icache.deallocate(address);
925    }
926    unset_cache_entry();
927  }
928
929  action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
930    if (is_invalid(cache_entry)) {
931      set_cache_entry(L1Dcache.allocate(address, new Entry));
932    }
933  }
934
935  action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
936    if (is_invalid(cache_entry)) {
937      set_cache_entry(L1Icache.allocate(address, new Entry));
938    }
939  }
940
941  action(z_stallAndWaitMandatoryQueue, "\z", desc="Stall and wait the L1 mandatory request queue") {
942    stall_and_wait(mandatoryQueue_in, address);
943  }
944
945  action(z_stallAndWaitOptionalQueue, "\pz", desc="Stall and wait the L1 prefetch request queue") {
946    stall_and_wait(optionalQueue_in, address);
947  }
948
949  action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
950    wakeUpBuffers(address);
951  }
952
953  action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
954      ++L1Icache.demand_misses;
955  }
956
957  action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
958      ++L1Icache.demand_hits;
959  }
960
961  action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
962      ++L1Dcache.demand_misses;
963  }
964
965  action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
966      ++L1Dcache.demand_hits;
967  }
968
969  action(po_observeHit, "\ph", desc="Inform the prefetcher about the hit") {
970      peek(mandatoryQueue_in, RubyRequest) {
971          if (cache_entry.isPrefetch) {
972              prefetcher.observePfHit(in_msg.LineAddress);
973              cache_entry.isPrefetch := false;
974          }
975      }
976  }
977
978  action(po_observeMiss, "\po", desc="Inform the prefetcher about the miss") {
979      peek(mandatoryQueue_in, RubyRequest) {
980          if (enable_prefetch) {
981              prefetcher.observeMiss(in_msg.LineAddress, in_msg.Type);
982          }
983      }
984  }
985
986  action(ppm_observePfMiss, "\ppm",
987         desc="Inform the prefetcher about the partial miss") {
988      peek(mandatoryQueue_in, RubyRequest) {
989          prefetcher.observePfMiss(in_msg.LineAddress);
990      }
991  }
992
993  action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
994      optionalQueue_in.dequeue(clockEdge());
995  }
996
997  action(mp_markPrefetched, "mp", desc="Set the isPrefetch flag") {
998      assert(is_valid(cache_entry));
999      cache_entry.isPrefetch := true;
1000  }
1001
1002
1003  //*****************************************************
1004  // TRANSITIONS
1005  //*****************************************************
1006
1007  // Transitions for Load/Store/Replacement/WriteBack from transient states
1008  transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK}, {Load, Ifetch, Store, L1_Replacement}) {
1009    z_stallAndWaitMandatoryQueue;
1010  }
1011
1012  transition({PF_IS, PF_IS_I}, {Store, L1_Replacement}) {
1013    z_stallAndWaitMandatoryQueue;
1014  }
1015
1016  transition({PF_IM, PF_SM}, {Load, Ifetch, L1_Replacement}) {
1017    z_stallAndWaitMandatoryQueue;
1018  }
1019
1020  transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK, PF_IS, PF_IS_I, PF_IM, PF_SM}, PF_L1_Replacement) {
1021    z_stallAndWaitOptionalQueue;
1022  }
1023
1024  // Transitions from Idle
1025  transition({NP,I}, {L1_Replacement, PF_L1_Replacement}) {
1026    ff_deallocateL1CacheBlock;
1027  }
1028
1029  transition({S,E,M,IS,IM,SM,IS_I,PF_IS_I,M_I,SINK_WB_ACK,PF_IS,PF_IM},
1030             {PF_Load, PF_Store, PF_Ifetch}) {
1031      pq_popPrefetchQueue;
1032  }
1033
1034  transition({NP,I}, Load, IS) {
1035    oo_allocateL1DCacheBlock;
1036    i_allocateTBE;
1037    a_issueGETS;
1038    uu_profileDataMiss;
1039    po_observeMiss;
1040    k_popMandatoryQueue;
1041  }
1042
1043  transition({NP,I}, PF_Load, PF_IS) {
1044    oo_allocateL1DCacheBlock;
1045    i_allocateTBE;
1046    pa_issuePfGETS;
1047    pq_popPrefetchQueue;
1048  }
1049
1050  transition(PF_IS, Load, IS) {
1051    uu_profileDataMiss;
1052    ppm_observePfMiss;
1053    k_popMandatoryQueue;
1054  }
1055
1056  transition(PF_IS_I, Load, IS_I) {
1057    uu_profileDataMiss;
1058    ppm_observePfMiss;
1059    k_popMandatoryQueue;
1060  }
1061
1062  transition(PF_IS_I, Ifetch, IS_I) {
1063    uu_profileInstMiss;
1064    ppm_observePfMiss;
1065    k_popMandatoryQueue;
1066  }
1067
1068  transition({NP,I}, Ifetch, IS) {
1069    pp_allocateL1ICacheBlock;
1070    i_allocateTBE;
1071    ai_issueGETINSTR;
1072    uu_profileInstMiss;
1073    po_observeMiss;
1074    k_popMandatoryQueue;
1075  }
1076
1077  transition({NP,I}, PF_Ifetch, PF_IS) {
1078    pp_allocateL1ICacheBlock;
1079    i_allocateTBE;
1080    pai_issuePfGETINSTR;
1081    pq_popPrefetchQueue;
1082  }
1083
1084  // We proactively assume that the prefetch is in to
1085  // the instruction cache
1086  transition(PF_IS, Ifetch, IS) {
1087    uu_profileDataMiss;
1088    ppm_observePfMiss;
1089    k_popMandatoryQueue;
1090  }
1091
1092  transition({NP,I}, Store, IM) {
1093    oo_allocateL1DCacheBlock;
1094    i_allocateTBE;
1095    b_issueGETX;
1096    uu_profileDataMiss;
1097    po_observeMiss;
1098    k_popMandatoryQueue;
1099  }
1100
1101  transition({NP,I}, PF_Store, PF_IM) {
1102    oo_allocateL1DCacheBlock;
1103    i_allocateTBE;
1104    pb_issuePfGETX;
1105    pq_popPrefetchQueue;
1106  }
1107
1108  transition(PF_IM, Store, IM) {
1109    uu_profileDataMiss;
1110    ppm_observePfMiss;
1111    k_popMandatoryQueue;
1112  }
1113
1114  transition(PF_SM, Store, SM) {
1115    uu_profileDataMiss;
1116    ppm_observePfMiss;
1117    k_popMandatoryQueue;
1118  }
1119
1120  transition({NP, I}, Inv) {
1121    fi_sendInvAck;
1122    l_popRequestQueue;
1123  }
1124
1125  // Transitions from Shared
1126  transition({S,E,M}, Load) {
1127    h_load_hit;
1128    uu_profileDataHit;
1129    po_observeHit;
1130    k_popMandatoryQueue;
1131  }
1132
1133  transition({S,E,M}, Ifetch) {
1134    h_ifetch_hit;
1135    uu_profileInstHit;
1136    po_observeHit;
1137    k_popMandatoryQueue;
1138  }
1139
1140  transition(S, Store, SM) {
1141    i_allocateTBE;
1142    c_issueUPGRADE;
1143    uu_profileDataMiss;
1144    k_popMandatoryQueue;
1145  }
1146
1147  transition(S, {L1_Replacement, PF_L1_Replacement}, I) {
1148    forward_eviction_to_cpu;
1149    ff_deallocateL1CacheBlock;
1150  }
1151
1152  transition(S, Inv, I) {
1153    forward_eviction_to_cpu;
1154    fi_sendInvAck;
1155    l_popRequestQueue;
1156  }
1157
1158  // Transitions from Exclusive
1159
1160  transition({E,M}, Store, M) {
1161    hh_store_hit;
1162    uu_profileDataHit;
1163    po_observeHit;
1164    k_popMandatoryQueue;
1165  }
1166
1167  transition(E, {L1_Replacement, PF_L1_Replacement}, M_I) {
1168    // silent E replacement??
1169    forward_eviction_to_cpu;
1170    i_allocateTBE;
1171    g_issuePUTX;   // send data, but hold in case forwarded request
1172    ff_deallocateL1CacheBlock;
1173  }
1174
1175  transition(E, Inv, I) {
1176    // don't send data
1177    forward_eviction_to_cpu;
1178    fi_sendInvAck;
1179    l_popRequestQueue;
1180  }
1181
1182  transition(E, Fwd_GETX, I) {
1183    forward_eviction_to_cpu;
1184    d_sendDataToRequestor;
1185    l_popRequestQueue;
1186  }
1187
1188  transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
1189    d_sendDataToRequestor;
1190    d2_sendDataToL2;
1191    l_popRequestQueue;
1192  }
1193
1194  // Transitions from Modified
1195
1196  transition(M, {L1_Replacement, PF_L1_Replacement}, M_I) {
1197    forward_eviction_to_cpu;
1198    i_allocateTBE;
1199    g_issuePUTX;   // send data, but hold in case forwarded request
1200    ff_deallocateL1CacheBlock;
1201  }
1202
1203  transition(M_I, WB_Ack, I) {
1204    s_deallocateTBE;
1205    o_popIncomingResponseQueue;
1206    kd_wakeUpDependents;
1207  }
1208
1209  transition(M, Inv, I) {
1210    forward_eviction_to_cpu;
1211    f_sendDataToL2;
1212    l_popRequestQueue;
1213  }
1214
1215  transition(M_I, Inv, SINK_WB_ACK) {
1216    ft_sendDataToL2_fromTBE;
1217    l_popRequestQueue;
1218  }
1219
1220  transition(M, Fwd_GETX, I) {
1221    forward_eviction_to_cpu;
1222    d_sendDataToRequestor;
1223    l_popRequestQueue;
1224  }
1225
1226  transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
1227    d_sendDataToRequestor;
1228    d2_sendDataToL2;
1229    l_popRequestQueue;
1230  }
1231
1232  transition(M_I, Fwd_GETX, SINK_WB_ACK) {
1233    dt_sendDataToRequestor_fromTBE;
1234    l_popRequestQueue;
1235  }
1236
1237  transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, SINK_WB_ACK) {
1238    dt_sendDataToRequestor_fromTBE;
1239    d2t_sendDataToL2_fromTBE;
1240    l_popRequestQueue;
1241  }
1242
1243  // Transitions from IS
1244  transition({IS, IS_I}, Inv, IS_I) {
1245    fi_sendInvAck;
1246    l_popRequestQueue;
1247  }
1248
1249  transition({PF_IS, PF_IS_I}, Inv, PF_IS_I) {
1250    fi_sendInvAck;
1251    l_popRequestQueue;
1252  }
1253
1254  transition(IS, Data_all_Acks, S) {
1255    u_writeDataToL1Cache;
1256    hx_load_hit;
1257    s_deallocateTBE;
1258    o_popIncomingResponseQueue;
1259    kd_wakeUpDependents;
1260  }
1261
1262  transition(PF_IS, Data_all_Acks, S) {
1263    u_writeDataToL1Cache;
1264    s_deallocateTBE;
1265    mp_markPrefetched;
1266    o_popIncomingResponseQueue;
1267    kd_wakeUpDependents;
1268  }
1269
1270  transition(IS_I, Data_all_Acks, I) {
1271    u_writeDataToL1Cache;
1272    hx_load_hit;
1273    s_deallocateTBE;
1274    o_popIncomingResponseQueue;
1275    kd_wakeUpDependents;
1276  }
1277
1278  transition(PF_IS_I, Data_all_Acks, I) {
1279    s_deallocateTBE;
1280    o_popIncomingResponseQueue;
1281    kd_wakeUpDependents;
1282  }
1283
1284  transition(IS, DataS_fromL1, S) {
1285    u_writeDataToL1Cache;
1286    j_sendUnblock;
1287    hx_load_hit;
1288    s_deallocateTBE;
1289    o_popIncomingResponseQueue;
1290    kd_wakeUpDependents;
1291  }
1292
1293  transition(PF_IS, DataS_fromL1, S) {
1294    u_writeDataToL1Cache;
1295    j_sendUnblock;
1296    s_deallocateTBE;
1297    o_popIncomingResponseQueue;
1298    kd_wakeUpDependents;
1299  }
1300
1301  transition(IS_I, DataS_fromL1, I) {
1302    u_writeDataToL1Cache;
1303    j_sendUnblock;
1304    hx_load_hit;
1305    s_deallocateTBE;
1306    o_popIncomingResponseQueue;
1307    kd_wakeUpDependents;
1308  }
1309
1310  transition(PF_IS_I, DataS_fromL1, I) {
1311    j_sendUnblock;
1312    s_deallocateTBE;
1313    o_popIncomingResponseQueue;
1314    kd_wakeUpDependents;
1315  }
1316
1317  // directory is blocked when sending exclusive data
1318  transition(IS_I, Data_Exclusive, E) {
1319    u_writeDataToL1Cache;
1320    hx_load_hit;
1321    jj_sendExclusiveUnblock;
1322    s_deallocateTBE;
1323    o_popIncomingResponseQueue;
1324    kd_wakeUpDependents;
1325  }
1326
1327  // directory is blocked when sending exclusive data
1328  transition(PF_IS_I, Data_Exclusive, E) {
1329    u_writeDataToL1Cache;
1330    jj_sendExclusiveUnblock;
1331    s_deallocateTBE;
1332    o_popIncomingResponseQueue;
1333    kd_wakeUpDependents;
1334  }
1335
1336  transition(IS, Data_Exclusive, E) {
1337    u_writeDataToL1Cache;
1338    hx_load_hit;
1339    jj_sendExclusiveUnblock;
1340    s_deallocateTBE;
1341    o_popIncomingResponseQueue;
1342    kd_wakeUpDependents;
1343  }
1344
1345  transition(PF_IS, Data_Exclusive, E) {
1346    u_writeDataToL1Cache;
1347    jj_sendExclusiveUnblock;
1348    s_deallocateTBE;
1349    mp_markPrefetched;
1350    o_popIncomingResponseQueue;
1351    kd_wakeUpDependents;
1352  }
1353
1354  // Transitions from IM
1355  transition(IM, Inv, IM) {
1356    fi_sendInvAck;
1357    l_popRequestQueue;
1358  }
1359
1360  transition({PF_IM, PF_SM}, Inv, PF_IM) {
1361    fi_sendInvAck;
1362    l_popRequestQueue;
1363  }
1364
1365  transition(IM, Data, SM) {
1366    u_writeDataToL1Cache;
1367    q_updateAckCount;
1368    o_popIncomingResponseQueue;
1369  }
1370
1371  transition(PF_IM, Data, PF_SM) {
1372    u_writeDataToL1Cache;
1373    q_updateAckCount;
1374    o_popIncomingResponseQueue;
1375  }
1376
1377  transition(IM, Data_all_Acks, M) {
1378    u_writeDataToL1Cache;
1379    hhx_store_hit;
1380    jj_sendExclusiveUnblock;
1381    s_deallocateTBE;
1382    o_popIncomingResponseQueue;
1383    kd_wakeUpDependents;
1384  }
1385
1386  transition(PF_IM, Data_all_Acks, M) {
1387    u_writeDataToL1Cache;
1388    jj_sendExclusiveUnblock;
1389    s_deallocateTBE;
1390    mp_markPrefetched;
1391    o_popIncomingResponseQueue;
1392    kd_wakeUpDependents;
1393  }
1394
1395  // transitions from SM
1396  transition(SM, Inv, IM) {
1397    forward_eviction_to_cpu;
1398    fi_sendInvAck;
1399    dg_invalidate_sc;
1400    l_popRequestQueue;
1401  }
1402
1403  transition({SM, IM, PF_SM, PF_IM}, Ack) {
1404    q_updateAckCount;
1405    o_popIncomingResponseQueue;
1406  }
1407
1408  transition(SM, Ack_all, M) {
1409    jj_sendExclusiveUnblock;
1410    hhx_store_hit;
1411    s_deallocateTBE;
1412    o_popIncomingResponseQueue;
1413    kd_wakeUpDependents;
1414  }
1415
1416  transition(PF_SM, Ack_all, M) {
1417    jj_sendExclusiveUnblock;
1418    s_deallocateTBE;
1419    mp_markPrefetched;
1420    o_popIncomingResponseQueue;
1421    kd_wakeUpDependents;
1422  }
1423
1424  transition(SINK_WB_ACK, Inv){
1425    fi_sendInvAck;
1426    l_popRequestQueue;
1427  }
1428
1429  transition(SINK_WB_ACK, WB_Ack, I){
1430    s_deallocateTBE;
1431    o_popIncomingResponseQueue;
1432    kd_wakeUpDependents;
1433  }
1434}
1435