1/*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP")
30 : Sequencer * sequencer;
31   CacheMemory * L1Icache;
32   CacheMemory * L1Dcache;
33   Prefetcher * prefetcher;
34   int l2_select_num_bits;
35   Cycles l1_request_latency := 2;
36   Cycles l1_response_latency := 2;
37   Cycles to_l2_latency := 1;
38   bool send_evictions;
39   bool enable_prefetch := "False";
40
41   // Message Queues
42   // From this node's L1 cache TO the network
43
44   // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
45   MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
46        vnet_type="request";
47
48   // a local L1 -> this L2 bank
49   MessageBuffer * responseFromL1Cache, network="To", virtual_network="1",
50        vnet_type="response";
51
52   MessageBuffer * unblockFromL1Cache, network="To", virtual_network="2",
53        vnet_type="unblock";
54
55
56   // To this node's L1 cache FROM the network
57   // a L2 bank -> this L1
58   MessageBuffer * requestToL1Cache, network="From", virtual_network="2",
59        vnet_type="request";
60
61   // a L2 bank -> this L1
62   MessageBuffer * responseToL1Cache, network="From", virtual_network="1",
63        vnet_type="response";
64
65  // Request Buffer for prefetches
66  MessageBuffer * optionalQueue;
67
68  // Buffer for requests generated by the processor core.
69  MessageBuffer * mandatoryQueue;
70{
71  // STATES
72  state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
73    // Base states
74    NP, AccessPermission:Invalid, desc="Not present in either cache";
75    I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
76    S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
77    E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
78    M, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
79
80    // Transient States
81    IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
82    IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
83    SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
84    IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
85
86    M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
87    SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
88
89    // Transient States in which block is being prefetched
90    PF_IS, AccessPermission:Busy, desc="Issued GETS, have not seen response yet";
91    PF_IM, AccessPermission:Busy, desc="Issued GETX, have not seen response yet";
92    PF_SM, AccessPermission:Busy, desc="Issued GETX, received data, waiting for acks";
93    PF_IS_I, AccessPermission:Busy, desc="Issued GETs, saw inv before data";
94  }
95
96  // EVENTS
97  enumeration(Event, desc="Cache events") {
98    // L1 events
99    Load,            desc="Load request from the home processor";
100    Ifetch,          desc="I-fetch request from the home processor";
101    Store,           desc="Store request from the home processor";
102
103    Inv,           desc="Invalidate request from L2 bank";
104
105    // internal generated request
106    L1_Replacement,  desc="L1 Replacement", format="!r";
107    PF_L1_Replacement,  desc="Prefetch L1 Replacement", format="!pr";
108
109    // other requests
110    Fwd_GETX,   desc="GETX from other processor";
111    Fwd_GETS,   desc="GETS from other processor";
112    Fwd_GET_INSTR,   desc="GET_INSTR from other processor";
113
114    Data,       desc="Data for processor";
115    Data_Exclusive,       desc="Data for processor";
116    DataS_fromL1,       desc="data for GETS request, need to unblock directory";
117    Data_all_Acks,       desc="Data for processor, all acks";
118
119    Ack,        desc="Ack for processor";
120    Ack_all,      desc="Last ack for processor";
121
122    WB_Ack,        desc="Ack for replacement";
123
124    PF_Load,    desc="load request from prefetcher";
125    PF_Ifetch,  desc="instruction fetch request from prefetcher";
126    PF_Store,   desc="exclusive load request from prefetcher";
127  }
128
129  // TYPES
130
131  // CacheEntry
132  structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
133    State CacheState,        desc="cache state";
134    DataBlock DataBlk,       desc="data for the block";
135    bool Dirty, default="false",   desc="data is dirty";
136    bool isPrefetch, desc="Set if this block was prefetched and not yet accessed";
137  }
138
139  // TBE fields
140  structure(TBE, desc="...") {
141    Addr addr,              desc="Physical address for this TBE";
142    State TBEState,        desc="Transient state";
143    DataBlock DataBlk,                desc="Buffer for the data block";
144    bool Dirty, default="false",   desc="data is dirty";
145    bool isPrefetch,       desc="Set if this was caused by a prefetch";
146    int pendingAcks, default="0", desc="number of pending acks";
147  }
148
149  structure(TBETable, external="yes") {
150    TBE lookup(Addr);
151    void allocate(Addr);
152    void deallocate(Addr);
153    bool isPresent(Addr);
154  }
155
156  TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
157
158  int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
159
160  Tick clockEdge();
161  Cycles ticksToCycles(Tick t);
162  void set_cache_entry(AbstractCacheEntry a);
163  void unset_cache_entry();
164  void set_tbe(TBE a);
165  void unset_tbe();
166  void wakeUpBuffers(Addr a);
167  void profileMsgDelay(int virtualNetworkType, Cycles c);
168
169  // inclusive cache returns L1 entries only
170  Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
171    Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
172    if(is_valid(L1Dcache_entry)) {
173      return L1Dcache_entry;
174    }
175
176    Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
177    return L1Icache_entry;
178  }
179
180  Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
181    Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
182    return L1Dcache_entry;
183  }
184
185  Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
186    Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
187    return L1Icache_entry;
188  }
189
190  State getState(TBE tbe, Entry cache_entry, Addr addr) {
191    assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
192
193    if(is_valid(tbe)) {
194      return tbe.TBEState;
195    } else if (is_valid(cache_entry)) {
196      return cache_entry.CacheState;
197    }
198    return State:NP;
199  }
200
201  void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
202    assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
203
204    // MUST CHANGE
205    if(is_valid(tbe)) {
206      tbe.TBEState := state;
207    }
208
209    if (is_valid(cache_entry)) {
210      cache_entry.CacheState := state;
211    }
212  }
213
214  AccessPermission getAccessPermission(Addr addr) {
215    TBE tbe := TBEs[addr];
216    if(is_valid(tbe)) {
217      DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
218      return L1Cache_State_to_permission(tbe.TBEState);
219    }
220
221    Entry cache_entry := getCacheEntry(addr);
222    if(is_valid(cache_entry)) {
223      DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
224      return L1Cache_State_to_permission(cache_entry.CacheState);
225    }
226
227    DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
228    return AccessPermission:NotPresent;
229  }
230
231  void functionalRead(Addr addr, Packet *pkt) {
232    TBE tbe := TBEs[addr];
233    if(is_valid(tbe)) {
234      testAndRead(addr, tbe.DataBlk, pkt);
235    } else {
236      testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
237    }
238  }
239
240  int functionalWrite(Addr addr, Packet *pkt) {
241    int num_functional_writes := 0;
242
243    TBE tbe := TBEs[addr];
244    if(is_valid(tbe)) {
245      num_functional_writes := num_functional_writes +
246        testAndWrite(addr, tbe.DataBlk, pkt);
247      return num_functional_writes;
248    }
249
250    num_functional_writes := num_functional_writes +
251        testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
252    return num_functional_writes;
253  }
254
255  void setAccessPermission(Entry cache_entry, Addr addr, State state) {
256    if (is_valid(cache_entry)) {
257      cache_entry.changePermission(L1Cache_State_to_permission(state));
258    }
259  }
260
261  Event mandatory_request_type_to_event(RubyRequestType type) {
262    if (type == RubyRequestType:LD) {
263      return Event:Load;
264    } else if (type == RubyRequestType:IFETCH) {
265      return Event:Ifetch;
266    } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
267      return Event:Store;
268    } else {
269      error("Invalid RubyRequestType");
270    }
271  }
272
273  Event prefetch_request_type_to_event(RubyRequestType type) {
274      if (type == RubyRequestType:LD) {
275          return Event:PF_Load;
276      } else if (type == RubyRequestType:IFETCH) {
277          return Event:PF_Ifetch;
278      } else if ((type == RubyRequestType:ST) ||
279                 (type == RubyRequestType:ATOMIC)) {
280          return Event:PF_Store;
281      } else {
282          error("Invalid RubyRequestType");
283      }
284  }
285
286  int getPendingAcks(TBE tbe) {
287    return tbe.pendingAcks;
288  }
289
290  out_port(requestL1Network_out, RequestMsg, requestFromL1Cache);
291  out_port(responseL1Network_out, ResponseMsg, responseFromL1Cache);
292  out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
293  out_port(optionalQueue_out, RubyRequest, optionalQueue);
294
295
296  // Prefetch queue between the controller and the prefetcher
297  // As per Spracklen et al. (HPCA 2005), the prefetch queue should be
298  // implemented as a LIFO structure.  The structure would allow for fast
299  // searches of all entries in the queue, not just the head msg. All
300  // msgs in the structure can be invalidated if a demand miss matches.
301  in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
302      if (optionalQueue_in.isReady(clockEdge())) {
303          peek(optionalQueue_in, RubyRequest) {
304              // Instruction Prefetch
305              if (in_msg.Type == RubyRequestType:IFETCH) {
306                  Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
307                  if (is_valid(L1Icache_entry)) {
308                      // The block to be prefetched is already present in the
309                      // cache. We should drop this request.
310                      trigger(prefetch_request_type_to_event(in_msg.Type),
311                              in_msg.LineAddress,
312                              L1Icache_entry, TBEs[in_msg.LineAddress]);
313                  }
314
315                  // Check to see if it is in the OTHER L1
316                  Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
317                  if (is_valid(L1Dcache_entry)) {
318                      // The block is in the wrong L1 cache. We should drop
319                      // this request.
320                      trigger(prefetch_request_type_to_event(in_msg.Type),
321                              in_msg.LineAddress,
322                              L1Dcache_entry, TBEs[in_msg.LineAddress]);
323                  }
324
325                  if (L1Icache.cacheAvail(in_msg.LineAddress)) {
326                      // L1 does't have the line, but we have space for it
327                      // in the L1 so let's see if the L2 has it
328                      trigger(prefetch_request_type_to_event(in_msg.Type),
329                              in_msg.LineAddress,
330                              L1Icache_entry, TBEs[in_msg.LineAddress]);
331                  } else {
332                      // No room in the L1, so we need to make room in the L1
333                      Addr victim := L1Icache.cacheProbe(in_msg.LineAddress);
334                      trigger(Event:PF_L1_Replacement,
335                              victim, getL1ICacheEntry(victim), TBEs[victim]);
336                  }
337              } else {
338                  // Data prefetch
339                  Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
340                  if (is_valid(L1Dcache_entry)) {
341                      // The block to be prefetched is already present in the
342                      // cache. We should drop this request.
343                      trigger(prefetch_request_type_to_event(in_msg.Type),
344                              in_msg.LineAddress,
345                              L1Dcache_entry, TBEs[in_msg.LineAddress]);
346                  }
347
348                  // Check to see if it is in the OTHER L1
349                  Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
350                  if (is_valid(L1Icache_entry)) {
351                      // The block is in the wrong L1. Just drop the prefetch
352                      // request.
353                      trigger(prefetch_request_type_to_event(in_msg.Type),
354                              in_msg.LineAddress,
355                              L1Icache_entry, TBEs[in_msg.LineAddress]);
356                  }
357
358                  if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
359                      // L1 does't have the line, but we have space for it in
360                      // the L1 let's see if the L2 has it
361                      trigger(prefetch_request_type_to_event(in_msg.Type),
362                              in_msg.LineAddress,
363                              L1Dcache_entry, TBEs[in_msg.LineAddress]);
364                  } else {
365                      // No room in the L1, so we need to make room in the L1
366                      Addr victim := L1Dcache.cacheProbe(in_msg.LineAddress);
367                      trigger(Event:PF_L1_Replacement,
368                              victim, getL1DCacheEntry(victim), TBEs[victim]);
369                  }
370              }
371          }
372      }
373  }
374
375  // Response  L1 Network - response msg to this L1 cache
376  in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
377    if (responseL1Network_in.isReady(clockEdge())) {
378      peek(responseL1Network_in, ResponseMsg, block_on="addr") {
379        assert(in_msg.Destination.isElement(machineID));
380
381        Entry cache_entry := getCacheEntry(in_msg.addr);
382        TBE tbe := TBEs[in_msg.addr];
383
384        if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
385          trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
386        } else if(in_msg.Type == CoherenceResponseType:DATA) {
387          if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
388               getState(tbe, cache_entry, in_msg.addr) == State:IS_I ||
389               getState(tbe, cache_entry, in_msg.addr) == State:PF_IS ||
390               getState(tbe, cache_entry, in_msg.addr) == State:PF_IS_I) &&
391              machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
392
393              trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
394
395          } else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
396            trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
397          } else {
398            trigger(Event:Data, in_msg.addr, cache_entry, tbe);
399          }
400        } else if (in_msg.Type == CoherenceResponseType:ACK) {
401          if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
402            trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
403          } else {
404            trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
405          }
406        } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
407          trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
408        } else {
409          error("Invalid L1 response type");
410        }
411      }
412    }
413  }
414
415  // Request InterChip network - request from this L1 cache to the shared L2
416  in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
417    if(requestL1Network_in.isReady(clockEdge())) {
418      peek(requestL1Network_in, RequestMsg, block_on="addr") {
419        assert(in_msg.Destination.isElement(machineID));
420
421        Entry cache_entry := getCacheEntry(in_msg.addr);
422        TBE tbe := TBEs[in_msg.addr];
423
424        if (in_msg.Type == CoherenceRequestType:INV) {
425          trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
426        } else if (in_msg.Type == CoherenceRequestType:GETX ||
427                   in_msg.Type == CoherenceRequestType:UPGRADE) {
428          // upgrade transforms to GETX due to race
429          trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
430        } else if (in_msg.Type == CoherenceRequestType:GETS) {
431          trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
432        } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
433          trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
434        } else {
435          error("Invalid forwarded request type");
436        }
437      }
438    }
439  }
440
441  // Mandatory Queue betweens Node's CPU and it's L1 caches
442  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
443    if (mandatoryQueue_in.isReady(clockEdge())) {
444      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
445
446        // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
447
448        if (in_msg.Type == RubyRequestType:IFETCH) {
449          // ** INSTRUCTION ACCESS ***
450
451          Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
452          if (is_valid(L1Icache_entry)) {
453            // The tag matches for the L1, so the L1 asks the L2 for it.
454            trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
455                    L1Icache_entry, TBEs[in_msg.LineAddress]);
456          } else {
457
458            // Check to see if it is in the OTHER L1
459            Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
460            if (is_valid(L1Dcache_entry)) {
461              // The block is in the wrong L1, put the request on the queue to the shared L2
462              trigger(Event:L1_Replacement, in_msg.LineAddress,
463                      L1Dcache_entry, TBEs[in_msg.LineAddress]);
464            }
465
466            if (L1Icache.cacheAvail(in_msg.LineAddress)) {
467              // L1 does't have the line, but we have space for it
468              // in the L1 so let's see if the L2 has it.
469              trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
470                      L1Icache_entry, TBEs[in_msg.LineAddress]);
471            } else {
472              // No room in the L1, so we need to make room in the L1
473
474              // Check if the line we want to evict is not locked
475              Addr addr := L1Icache.cacheProbe(in_msg.LineAddress);
476              check_on_cache_probe(mandatoryQueue_in, addr);
477
478              trigger(Event:L1_Replacement, addr,
479                      getL1ICacheEntry(addr),
480                      TBEs[addr]);
481            }
482          }
483        } else {
484
485          // *** DATA ACCESS ***
486          Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
487          if (is_valid(L1Dcache_entry)) {
488            // The tag matches for the L1, so the L1 ask the L2 for it
489            trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
490                    L1Dcache_entry, TBEs[in_msg.LineAddress]);
491          } else {
492
493            // Check to see if it is in the OTHER L1
494            Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
495            if (is_valid(L1Icache_entry)) {
496              // The block is in the wrong L1, put the request on the queue to the shared L2
497              trigger(Event:L1_Replacement, in_msg.LineAddress,
498                      L1Icache_entry, TBEs[in_msg.LineAddress]);
499            }
500
501            if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
502              // L1 does't have the line, but we have space for it
503              // in the L1 let's see if the L2 has it.
504              trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
505                      L1Dcache_entry, TBEs[in_msg.LineAddress]);
506            } else {
507              // No room in the L1, so we need to make room in the L1
508
509              // Check if the line we want to evict is not locked
510              Addr addr := L1Dcache.cacheProbe(in_msg.LineAddress);
511              check_on_cache_probe(mandatoryQueue_in, addr);
512
513              trigger(Event:L1_Replacement, addr,
514                      getL1DCacheEntry(addr),
515                      TBEs[addr]);
516            }
517          }
518        }
519      }
520    }
521  }
522
523  void enqueuePrefetch(Addr address, RubyRequestType type) {
524      enqueue(optionalQueue_out, RubyRequest, 1) {
525          out_msg.LineAddress := address;
526          out_msg.Type := type;
527          out_msg.AccessMode := RubyAccessMode:Supervisor;
528      }
529  }
530
531  // ACTIONS
532  action(a_issueGETS, "a", desc="Issue GETS") {
533    peek(mandatoryQueue_in, RubyRequest) {
534      enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
535        out_msg.addr := address;
536        out_msg.Type := CoherenceRequestType:GETS;
537        out_msg.Requestor := machineID;
538        out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
539                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
540        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
541                address, out_msg.Destination);
542        out_msg.MessageSize := MessageSizeType:Control;
543        out_msg.Prefetch := in_msg.Prefetch;
544        out_msg.AccessMode := in_msg.AccessMode;
545      }
546    }
547  }
548
549  action(pa_issuePfGETS, "pa", desc="Issue prefetch GETS") {
550    peek(optionalQueue_in, RubyRequest) {
551      enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
552        out_msg.addr := address;
553        out_msg.Type := CoherenceRequestType:GETS;
554        out_msg.Requestor := machineID;
555        out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
556                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
557        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
558                address, out_msg.Destination);
559        out_msg.MessageSize := MessageSizeType:Control;
560        out_msg.Prefetch := in_msg.Prefetch;
561        out_msg.AccessMode := in_msg.AccessMode;
562      }
563    }
564  }
565
566  action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
567    peek(mandatoryQueue_in, RubyRequest) {
568      enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
569        out_msg.addr := address;
570        out_msg.Type := CoherenceRequestType:GET_INSTR;
571        out_msg.Requestor := machineID;
572        out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
573                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
574        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
575                address, out_msg.Destination);
576        out_msg.MessageSize := MessageSizeType:Control;
577        out_msg.Prefetch := in_msg.Prefetch;
578        out_msg.AccessMode := in_msg.AccessMode;
579      }
580    }
581  }
582
583  action(pai_issuePfGETINSTR, "pai",
584         desc="Issue GETINSTR for prefetch request") {
585      peek(optionalQueue_in, RubyRequest) {
586          enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
587              out_msg.addr := address;
588              out_msg.Type := CoherenceRequestType:GET_INSTR;
589              out_msg.Requestor := machineID;
590              out_msg.Destination.add(
591                  mapAddressToRange(address, MachineType:L2Cache,
592                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
593              out_msg.MessageSize := MessageSizeType:Control;
594              out_msg.Prefetch := in_msg.Prefetch;
595              out_msg.AccessMode := in_msg.AccessMode;
596
597              DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
598                      address, out_msg.Destination);
599          }
600      }
601  }
602
603  action(b_issueGETX, "b", desc="Issue GETX") {
604    peek(mandatoryQueue_in, RubyRequest) {
605      enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
606        out_msg.addr := address;
607        out_msg.Type := CoherenceRequestType:GETX;
608        out_msg.Requestor := machineID;
609        DPRINTF(RubySlicc, "%s\n", machineID);
610        out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
611                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
612        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
613                address, out_msg.Destination);
614        out_msg.MessageSize := MessageSizeType:Control;
615        out_msg.Prefetch := in_msg.Prefetch;
616        out_msg.AccessMode := in_msg.AccessMode;
617      }
618    }
619  }
620
621  action(pb_issuePfGETX, "pb", desc="Issue prefetch GETX") {
622      peek(optionalQueue_in, RubyRequest) {
623          enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
624              out_msg.addr := address;
625              out_msg.Type := CoherenceRequestType:GETX;
626              out_msg.Requestor := machineID;
627              DPRINTF(RubySlicc, "%s\n", machineID);
628
629              out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
630                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
631
632              DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
633                      address, out_msg.Destination);
634              out_msg.MessageSize := MessageSizeType:Control;
635              out_msg.Prefetch := in_msg.Prefetch;
636              out_msg.AccessMode := in_msg.AccessMode;
637          }
638      }
639  }
640
641  action(c_issueUPGRADE, "c", desc="Issue GETX") {
642    peek(mandatoryQueue_in, RubyRequest) {
643      enqueue(requestL1Network_out, RequestMsg,  l1_request_latency) {
644        out_msg.addr := address;
645        out_msg.Type := CoherenceRequestType:UPGRADE;
646        out_msg.Requestor := machineID;
647        out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
648                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
649        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
650                address, out_msg.Destination);
651        out_msg.MessageSize := MessageSizeType:Control;
652        out_msg.Prefetch := in_msg.Prefetch;
653        out_msg.AccessMode := in_msg.AccessMode;
654      }
655    }
656  }
657
658  action(d_sendDataToRequestor, "d", desc="send data to requestor") {
659    peek(requestL1Network_in, RequestMsg) {
660      enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
661        assert(is_valid(cache_entry));
662        out_msg.addr := address;
663        out_msg.Type := CoherenceResponseType:DATA;
664        out_msg.DataBlk := cache_entry.DataBlk;
665        out_msg.Dirty := cache_entry.Dirty;
666        out_msg.Sender := machineID;
667        out_msg.Destination.add(in_msg.Requestor);
668        out_msg.MessageSize := MessageSizeType:Response_Data;
669      }
670    }
671  }
672
673  action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
674    enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
675      assert(is_valid(cache_entry));
676      out_msg.addr := address;
677      out_msg.Type := CoherenceResponseType:DATA;
678      out_msg.DataBlk := cache_entry.DataBlk;
679      out_msg.Dirty := cache_entry.Dirty;
680      out_msg.Sender := machineID;
681      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
682                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
683      out_msg.MessageSize := MessageSizeType:Response_Data;
684    }
685  }
686
687  action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
688    peek(requestL1Network_in, RequestMsg) {
689      enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
690        assert(is_valid(tbe));
691        out_msg.addr := address;
692        out_msg.Type := CoherenceResponseType:DATA;
693        out_msg.DataBlk := tbe.DataBlk;
694        out_msg.Dirty := tbe.Dirty;
695        out_msg.Sender := machineID;
696        out_msg.Destination.add(in_msg.Requestor);
697        out_msg.MessageSize := MessageSizeType:Response_Data;
698      }
699    }
700  }
701
702  action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
703    enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
704      assert(is_valid(tbe));
705      out_msg.addr := address;
706      out_msg.Type := CoherenceResponseType:DATA;
707      out_msg.DataBlk := tbe.DataBlk;
708      out_msg.Dirty := tbe.Dirty;
709      out_msg.Sender := machineID;
710      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
711                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
712      out_msg.MessageSize := MessageSizeType:Response_Data;
713    }
714  }
715
716  action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
717    peek(requestL1Network_in, RequestMsg) {
718      enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
719        out_msg.addr := address;
720        out_msg.Type := CoherenceResponseType:ACK;
721        out_msg.Sender := machineID;
722        out_msg.Destination.add(in_msg.Requestor);
723        out_msg.MessageSize := MessageSizeType:Response_Control;
724      }
725    }
726  }
727
728  action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
729    enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
730      assert(is_valid(cache_entry));
731      out_msg.addr := address;
732      out_msg.Type := CoherenceResponseType:DATA;
733      out_msg.DataBlk := cache_entry.DataBlk;
734      out_msg.Dirty := cache_entry.Dirty;
735      out_msg.Sender := machineID;
736      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
737                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
738      out_msg.MessageSize := MessageSizeType:Writeback_Data;
739    }
740  }
741
742  action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
743    enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
744      assert(is_valid(tbe));
745      out_msg.addr := address;
746      out_msg.Type := CoherenceResponseType:DATA;
747      out_msg.DataBlk := tbe.DataBlk;
748      out_msg.Dirty := tbe.Dirty;
749      out_msg.Sender := machineID;
750      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
751                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
752      out_msg.MessageSize := MessageSizeType:Writeback_Data;
753    }
754  }
755
756  action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
757    peek(requestL1Network_in, RequestMsg) {
758      enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
759        out_msg.addr := address;
760        out_msg.Type := CoherenceResponseType:ACK;
761        out_msg.Sender := machineID;
762        out_msg.Destination.add(in_msg.Requestor);
763        out_msg.MessageSize := MessageSizeType:Response_Control;
764        out_msg.AckCount := 1;
765      }
766    }
767  }
768
769  action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
770    if (send_evictions) {
771      DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
772      sequencer.evictionCallback(address);
773    }
774  }
775
776  action(g_issuePUTX, "g", desc="send data to the L2 cache") {
777    enqueue(requestL1Network_out, RequestMsg, l1_response_latency) {
778      assert(is_valid(cache_entry));
779      out_msg.addr := address;
780      out_msg.Type := CoherenceRequestType:PUTX;
781      out_msg.DataBlk := cache_entry.DataBlk;
782      out_msg.Dirty := cache_entry.Dirty;
783      out_msg.Requestor:= machineID;
784      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
785                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
786      if (cache_entry.Dirty) {
787        out_msg.MessageSize := MessageSizeType:Writeback_Data;
788      } else {
789        out_msg.MessageSize := MessageSizeType:Writeback_Control;
790      }
791    }
792  }
793
794  action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
795    enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
796      out_msg.addr := address;
797      out_msg.Type := CoherenceResponseType:UNBLOCK;
798      out_msg.Sender := machineID;
799      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
800                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
801      out_msg.MessageSize := MessageSizeType:Response_Control;
802      DPRINTF(RubySlicc, "%#x\n", address);
803    }
804  }
805
806  action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
807    enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
808      out_msg.addr := address;
809      out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
810      out_msg.Sender := machineID;
811      out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
812                          l2_select_low_bit, l2_select_num_bits, intToID(0)));
813      out_msg.MessageSize := MessageSizeType:Response_Control;
814      DPRINTF(RubySlicc, "%#x\n", address);
815
816    }
817  }
818
819  action(dg_invalidate_sc, "dg",
820         desc="Invalidate store conditional as the cache lost permissions") {
821    sequencer.invalidateSC(address);
822  }
823
824  action(h_load_hit, "hd",
825         desc="Notify sequencer the load completed.")
826  {
827    assert(is_valid(cache_entry));
828    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
829    L1Dcache.setMRU(cache_entry);
830    sequencer.readCallback(address, cache_entry.DataBlk);
831  }
832
833  action(h_ifetch_hit, "hi", desc="Notify sequencer the instruction fetch completed.")
834  {
835    assert(is_valid(cache_entry));
836    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
837    L1Icache.setMRU(cache_entry);
838    sequencer.readCallback(address, cache_entry.DataBlk);
839  }
840
841  action(hx_load_hit, "hx", desc="Notify sequencer the load completed.")
842  {
843    assert(is_valid(cache_entry));
844    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
845    L1Icache.setMRU(address);
846    L1Dcache.setMRU(address);
847    sequencer.readCallback(address, cache_entry.DataBlk, true);
848  }
849
850  action(hh_store_hit, "\h", desc="Notify sequencer that store completed.")
851  {
852    assert(is_valid(cache_entry));
853    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
854    L1Dcache.setMRU(cache_entry);
855    sequencer.writeCallback(address, cache_entry.DataBlk);
856    cache_entry.Dirty := true;
857  }
858
859  action(hhx_store_hit, "\hx", desc="Notify sequencer that store completed.")
860  {
861    assert(is_valid(cache_entry));
862    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
863    L1Icache.setMRU(address);
864    L1Dcache.setMRU(address);
865    sequencer.writeCallback(address, cache_entry.DataBlk, true);
866    cache_entry.Dirty := true;
867  }
868
869  action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
870    check_allocate(TBEs);
871    assert(is_valid(cache_entry));
872    TBEs.allocate(address);
873    set_tbe(TBEs[address]);
874    tbe.isPrefetch := false;
875    tbe.Dirty := cache_entry.Dirty;
876    tbe.DataBlk := cache_entry.DataBlk;
877  }
878
879  action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
880    mandatoryQueue_in.dequeue(clockEdge());
881  }
882
883  action(l_popRequestQueue, "l",
884    desc="Pop incoming request queue and profile the delay within this virtual network") {
885    Tick delay := requestL1Network_in.dequeue(clockEdge());
886    profileMsgDelay(2, ticksToCycles(delay));
887  }
888
889  action(o_popIncomingResponseQueue, "o",
890    desc="Pop Incoming Response queue and profile the delay within this virtual network") {
891    Tick delay := responseL1Network_in.dequeue(clockEdge());
892    profileMsgDelay(1, ticksToCycles(delay));
893  }
894
895  action(s_deallocateTBE, "s", desc="Deallocate TBE") {
896    TBEs.deallocate(address);
897    unset_tbe();
898  }
899
900  action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
901    peek(responseL1Network_in, ResponseMsg) {
902      assert(is_valid(cache_entry));
903      cache_entry.DataBlk := in_msg.DataBlk;
904      cache_entry.Dirty := in_msg.Dirty;
905    }
906  }
907
908  action(q_updateAckCount, "q", desc="Update ack count") {
909    peek(responseL1Network_in, ResponseMsg) {
910      assert(is_valid(tbe));
911      tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
912      APPEND_TRANSITION_COMMENT(in_msg.AckCount);
913      APPEND_TRANSITION_COMMENT(" p: ");
914      APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
915    }
916  }
917
918  action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block.  Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
919    if (L1Dcache.isTagPresent(address)) {
920      L1Dcache.deallocate(address);
921    } else {
922      L1Icache.deallocate(address);
923    }
924    unset_cache_entry();
925  }
926
927  action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
928    if (is_invalid(cache_entry)) {
929      set_cache_entry(L1Dcache.allocate(address, new Entry));
930    }
931  }
932
933  action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
934    if (is_invalid(cache_entry)) {
935      set_cache_entry(L1Icache.allocate(address, new Entry));
936    }
937  }
938
939  action(z_stallAndWaitMandatoryQueue, "\z", desc="Stall and wait the L1 mandatory request queue") {
940    stall_and_wait(mandatoryQueue_in, address);
941  }
942
943  action(z_stallAndWaitOptionalQueue, "\pz", desc="Stall and wait the L1 prefetch request queue") {
944    stall_and_wait(optionalQueue_in, address);
945  }
946
947  action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
948    wakeUpBuffers(address);
949  }
950
951  action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
952      ++L1Icache.demand_misses;
953  }
954
955  action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
956      ++L1Icache.demand_hits;
957  }
958
959  action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
960      ++L1Dcache.demand_misses;
961  }
962
963  action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
964      ++L1Dcache.demand_hits;
965  }
966
967  action(po_observeHit, "\ph", desc="Inform the prefetcher about the hit") {
968      peek(mandatoryQueue_in, RubyRequest) {
969          if (cache_entry.isPrefetch) {
970              prefetcher.observePfHit(in_msg.LineAddress);
971              cache_entry.isPrefetch := false;
972          }
973      }
974  }
975
976  action(po_observeMiss, "\po", desc="Inform the prefetcher about the miss") {
977      peek(mandatoryQueue_in, RubyRequest) {
978          if (enable_prefetch) {
979              prefetcher.observeMiss(in_msg.LineAddress, in_msg.Type);
980          }
981      }
982  }
983
984  action(ppm_observePfMiss, "\ppm",
985         desc="Inform the prefetcher about the partial miss") {
986      peek(mandatoryQueue_in, RubyRequest) {
987          prefetcher.observePfMiss(in_msg.LineAddress);
988      }
989  }
990
991  action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
992      optionalQueue_in.dequeue(clockEdge());
993  }
994
995  action(mp_markPrefetched, "mp", desc="Set the isPrefetch flag") {
996      assert(is_valid(cache_entry));
997      cache_entry.isPrefetch := true;
998  }
999
1000
1001  //*****************************************************
1002  // TRANSITIONS
1003  //*****************************************************
1004
1005  // Transitions for Load/Store/Replacement/WriteBack from transient states
1006  transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK}, {Load, Ifetch, Store, L1_Replacement}) {
1007    z_stallAndWaitMandatoryQueue;
1008  }
1009
1010  transition({PF_IS, PF_IS_I}, {Store, L1_Replacement}) {
1011    z_stallAndWaitMandatoryQueue;
1012  }
1013
1014  transition({PF_IM, PF_SM}, {Load, Ifetch, L1_Replacement}) {
1015    z_stallAndWaitMandatoryQueue;
1016  }
1017
1018  transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK, PF_IS, PF_IS_I, PF_IM, PF_SM}, PF_L1_Replacement) {
1019    z_stallAndWaitOptionalQueue;
1020  }
1021
1022  // Transitions from Idle
1023  transition({NP,I}, {L1_Replacement, PF_L1_Replacement}) {
1024    ff_deallocateL1CacheBlock;
1025  }
1026
1027  transition({S,E,M,IS,IM,SM,IS_I,PF_IS_I,M_I,SINK_WB_ACK,PF_IS,PF_IM},
1028             {PF_Load, PF_Store, PF_Ifetch}) {
1029      pq_popPrefetchQueue;
1030  }
1031
1032  transition({NP,I}, Load, IS) {
1033    oo_allocateL1DCacheBlock;
1034    i_allocateTBE;
1035    a_issueGETS;
1036    uu_profileDataMiss;
1037    po_observeMiss;
1038    k_popMandatoryQueue;
1039  }
1040
1041  transition({NP,I}, PF_Load, PF_IS) {
1042    oo_allocateL1DCacheBlock;
1043    i_allocateTBE;
1044    pa_issuePfGETS;
1045    pq_popPrefetchQueue;
1046  }
1047
1048  transition(PF_IS, Load, IS) {
1049    uu_profileDataMiss;
1050    ppm_observePfMiss;
1051    k_popMandatoryQueue;
1052  }
1053
1054  transition(PF_IS_I, Load, IS_I) {
1055    uu_profileDataMiss;
1056    ppm_observePfMiss;
1057    k_popMandatoryQueue;
1058  }
1059
1060  transition(PF_IS_I, Ifetch, IS_I) {
1061    uu_profileInstMiss;
1062    ppm_observePfMiss;
1063    k_popMandatoryQueue;
1064  }
1065
1066  transition({NP,I}, Ifetch, IS) {
1067    pp_allocateL1ICacheBlock;
1068    i_allocateTBE;
1069    ai_issueGETINSTR;
1070    uu_profileInstMiss;
1071    po_observeMiss;
1072    k_popMandatoryQueue;
1073  }
1074
1075  transition({NP,I}, PF_Ifetch, PF_IS) {
1076    pp_allocateL1ICacheBlock;
1077    i_allocateTBE;
1078    pai_issuePfGETINSTR;
1079    pq_popPrefetchQueue;
1080  }
1081
1082  // We proactively assume that the prefetch is in to
1083  // the instruction cache
1084  transition(PF_IS, Ifetch, IS) {
1085    uu_profileDataMiss;
1086    ppm_observePfMiss;
1087    k_popMandatoryQueue;
1088  }
1089
1090  transition({NP,I}, Store, IM) {
1091    oo_allocateL1DCacheBlock;
1092    i_allocateTBE;
1093    b_issueGETX;
1094    uu_profileDataMiss;
1095    po_observeMiss;
1096    k_popMandatoryQueue;
1097  }
1098
1099  transition({NP,I}, PF_Store, PF_IM) {
1100    oo_allocateL1DCacheBlock;
1101    i_allocateTBE;
1102    pb_issuePfGETX;
1103    pq_popPrefetchQueue;
1104  }
1105
1106  transition(PF_IM, Store, IM) {
1107    uu_profileDataMiss;
1108    ppm_observePfMiss;
1109    k_popMandatoryQueue;
1110  }
1111
1112  transition(PF_SM, Store, SM) {
1113    uu_profileDataMiss;
1114    ppm_observePfMiss;
1115    k_popMandatoryQueue;
1116  }
1117
1118  transition({NP, I}, Inv) {
1119    fi_sendInvAck;
1120    l_popRequestQueue;
1121  }
1122
1123  // Transitions from Shared
1124  transition({S,E,M}, Load) {
1125    h_load_hit;
1126    uu_profileDataHit;
1127    po_observeHit;
1128    k_popMandatoryQueue;
1129  }
1130
1131  transition({S,E,M}, Ifetch) {
1132    h_ifetch_hit;
1133    uu_profileInstHit;
1134    po_observeHit;
1135    k_popMandatoryQueue;
1136  }
1137
1138  transition(S, Store, SM) {
1139    i_allocateTBE;
1140    c_issueUPGRADE;
1141    uu_profileDataMiss;
1142    k_popMandatoryQueue;
1143  }
1144
1145  transition(S, {L1_Replacement, PF_L1_Replacement}, I) {
1146    forward_eviction_to_cpu;
1147    ff_deallocateL1CacheBlock;
1148  }
1149
1150  transition(S, Inv, I) {
1151    forward_eviction_to_cpu;
1152    fi_sendInvAck;
1153    l_popRequestQueue;
1154  }
1155
1156  // Transitions from Exclusive
1157
1158  transition({E,M}, Store, M) {
1159    hh_store_hit;
1160    uu_profileDataHit;
1161    po_observeHit;
1162    k_popMandatoryQueue;
1163  }
1164
1165  transition(E, {L1_Replacement, PF_L1_Replacement}, M_I) {
1166    // silent E replacement??
1167    forward_eviction_to_cpu;
1168    i_allocateTBE;
1169    g_issuePUTX;   // send data, but hold in case forwarded request
1170    ff_deallocateL1CacheBlock;
1171  }
1172
1173  transition(E, Inv, I) {
1174    // don't send data
1175    forward_eviction_to_cpu;
1176    fi_sendInvAck;
1177    l_popRequestQueue;
1178  }
1179
1180  transition(E, Fwd_GETX, I) {
1181    forward_eviction_to_cpu;
1182    d_sendDataToRequestor;
1183    l_popRequestQueue;
1184  }
1185
1186  transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
1187    d_sendDataToRequestor;
1188    d2_sendDataToL2;
1189    l_popRequestQueue;
1190  }
1191
1192  // Transitions from Modified
1193
1194  transition(M, {L1_Replacement, PF_L1_Replacement}, M_I) {
1195    forward_eviction_to_cpu;
1196    i_allocateTBE;
1197    g_issuePUTX;   // send data, but hold in case forwarded request
1198    ff_deallocateL1CacheBlock;
1199  }
1200
1201  transition(M_I, WB_Ack, I) {
1202    s_deallocateTBE;
1203    o_popIncomingResponseQueue;
1204    kd_wakeUpDependents;
1205  }
1206
1207  transition(M, Inv, I) {
1208    forward_eviction_to_cpu;
1209    f_sendDataToL2;
1210    l_popRequestQueue;
1211  }
1212
1213  transition(M_I, Inv, SINK_WB_ACK) {
1214    ft_sendDataToL2_fromTBE;
1215    l_popRequestQueue;
1216  }
1217
1218  transition(M, Fwd_GETX, I) {
1219    forward_eviction_to_cpu;
1220    d_sendDataToRequestor;
1221    l_popRequestQueue;
1222  }
1223
1224  transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
1225    d_sendDataToRequestor;
1226    d2_sendDataToL2;
1227    l_popRequestQueue;
1228  }
1229
1230  transition(M_I, Fwd_GETX, SINK_WB_ACK) {
1231    dt_sendDataToRequestor_fromTBE;
1232    l_popRequestQueue;
1233  }
1234
1235  transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, SINK_WB_ACK) {
1236    dt_sendDataToRequestor_fromTBE;
1237    d2t_sendDataToL2_fromTBE;
1238    l_popRequestQueue;
1239  }
1240
1241  // Transitions from IS
1242  transition({IS, IS_I}, Inv, IS_I) {
1243    fi_sendInvAck;
1244    l_popRequestQueue;
1245  }
1246
1247  transition({PF_IS, PF_IS_I}, Inv, PF_IS_I) {
1248    fi_sendInvAck;
1249    l_popRequestQueue;
1250  }
1251
1252  transition(IS, Data_all_Acks, S) {
1253    u_writeDataToL1Cache;
1254    hx_load_hit;
1255    s_deallocateTBE;
1256    o_popIncomingResponseQueue;
1257    kd_wakeUpDependents;
1258  }
1259
1260  transition(PF_IS, Data_all_Acks, S) {
1261    u_writeDataToL1Cache;
1262    s_deallocateTBE;
1263    mp_markPrefetched;
1264    o_popIncomingResponseQueue;
1265    kd_wakeUpDependents;
1266  }
1267
1268  transition(IS_I, Data_all_Acks, I) {
1269    u_writeDataToL1Cache;
1270    hx_load_hit;
1271    s_deallocateTBE;
1272    o_popIncomingResponseQueue;
1273    kd_wakeUpDependents;
1274  }
1275
1276  transition(PF_IS_I, Data_all_Acks, I) {
1277    s_deallocateTBE;
1278    o_popIncomingResponseQueue;
1279    kd_wakeUpDependents;
1280  }
1281
1282  transition(IS, DataS_fromL1, S) {
1283    u_writeDataToL1Cache;
1284    j_sendUnblock;
1285    hx_load_hit;
1286    s_deallocateTBE;
1287    o_popIncomingResponseQueue;
1288    kd_wakeUpDependents;
1289  }
1290
1291  transition(PF_IS, DataS_fromL1, S) {
1292    u_writeDataToL1Cache;
1293    j_sendUnblock;
1294    s_deallocateTBE;
1295    o_popIncomingResponseQueue;
1296    kd_wakeUpDependents;
1297  }
1298
1299  transition(IS_I, DataS_fromL1, I) {
1300    u_writeDataToL1Cache;
1301    j_sendUnblock;
1302    hx_load_hit;
1303    s_deallocateTBE;
1304    o_popIncomingResponseQueue;
1305    kd_wakeUpDependents;
1306  }
1307
1308  transition(PF_IS_I, DataS_fromL1, I) {
1309    j_sendUnblock;
1310    s_deallocateTBE;
1311    o_popIncomingResponseQueue;
1312    kd_wakeUpDependents;
1313  }
1314
1315  // directory is blocked when sending exclusive data
1316  transition(IS_I, Data_Exclusive, E) {
1317    u_writeDataToL1Cache;
1318    hx_load_hit;
1319    jj_sendExclusiveUnblock;
1320    s_deallocateTBE;
1321    o_popIncomingResponseQueue;
1322    kd_wakeUpDependents;
1323  }
1324
1325  // directory is blocked when sending exclusive data
1326  transition(PF_IS_I, Data_Exclusive, E) {
1327    u_writeDataToL1Cache;
1328    jj_sendExclusiveUnblock;
1329    s_deallocateTBE;
1330    o_popIncomingResponseQueue;
1331    kd_wakeUpDependents;
1332  }
1333
1334  transition(IS, Data_Exclusive, E) {
1335    u_writeDataToL1Cache;
1336    hx_load_hit;
1337    jj_sendExclusiveUnblock;
1338    s_deallocateTBE;
1339    o_popIncomingResponseQueue;
1340    kd_wakeUpDependents;
1341  }
1342
1343  transition(PF_IS, Data_Exclusive, E) {
1344    u_writeDataToL1Cache;
1345    jj_sendExclusiveUnblock;
1346    s_deallocateTBE;
1347    mp_markPrefetched;
1348    o_popIncomingResponseQueue;
1349    kd_wakeUpDependents;
1350  }
1351
1352  // Transitions from IM
1353  transition(IM, Inv, IM) {
1354    fi_sendInvAck;
1355    l_popRequestQueue;
1356  }
1357
1358  transition({PF_IM, PF_SM}, Inv, PF_IM) {
1359    fi_sendInvAck;
1360    l_popRequestQueue;
1361  }
1362
1363  transition(IM, Data, SM) {
1364    u_writeDataToL1Cache;
1365    q_updateAckCount;
1366    o_popIncomingResponseQueue;
1367  }
1368
1369  transition(PF_IM, Data, PF_SM) {
1370    u_writeDataToL1Cache;
1371    q_updateAckCount;
1372    o_popIncomingResponseQueue;
1373  }
1374
1375  transition(IM, Data_all_Acks, M) {
1376    u_writeDataToL1Cache;
1377    hhx_store_hit;
1378    jj_sendExclusiveUnblock;
1379    s_deallocateTBE;
1380    o_popIncomingResponseQueue;
1381    kd_wakeUpDependents;
1382  }
1383
1384  transition(PF_IM, Data_all_Acks, M) {
1385    u_writeDataToL1Cache;
1386    jj_sendExclusiveUnblock;
1387    s_deallocateTBE;
1388    mp_markPrefetched;
1389    o_popIncomingResponseQueue;
1390    kd_wakeUpDependents;
1391  }
1392
1393  // transitions from SM
1394  transition(SM, Inv, IM) {
1395    forward_eviction_to_cpu;
1396    fi_sendInvAck;
1397    dg_invalidate_sc;
1398    l_popRequestQueue;
1399  }
1400
1401  transition({SM, IM, PF_SM, PF_IM}, Ack) {
1402    q_updateAckCount;
1403    o_popIncomingResponseQueue;
1404  }
1405
1406  transition(SM, Ack_all, M) {
1407    jj_sendExclusiveUnblock;
1408    hhx_store_hit;
1409    s_deallocateTBE;
1410    o_popIncomingResponseQueue;
1411    kd_wakeUpDependents;
1412  }
1413
1414  transition(PF_SM, Ack_all, M) {
1415    jj_sendExclusiveUnblock;
1416    s_deallocateTBE;
1417    mp_markPrefetched;
1418    o_popIncomingResponseQueue;
1419    kd_wakeUpDependents;
1420  }
1421
1422  transition(SINK_WB_ACK, Inv){
1423    fi_sendInvAck;
1424    l_popRequestQueue;
1425  }
1426
1427  transition(SINK_WB_ACK, WB_Ack, I){
1428    s_deallocateTBE;
1429    o_popIncomingResponseQueue;
1430    kd_wakeUpDependents;
1431  }
1432}
1433