MOESI_CMP_token-L2cache.sm revision 14300:22183ae13998
1/*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29machine(MachineType:L2Cache, "Token protocol")
30 : CacheMemory * L2cache;
31   int N_tokens;
32   Cycles l2_request_latency := 5;
33   Cycles l2_response_latency := 5;
34   bool filtering_enabled := "True";
35
36   // L2 BANK QUEUES
37   // From local bank of L2 cache TO the network
38 
39   // this L2 bank -> a local L1 || mod-directory
40   MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
41        vnet_type="response";
42   // this L2 bank -> mod-directory
43   MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
44        vnet_type="request";
45   // this L2 bank -> a local L1
46   MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
47        vnet_type="request";
48 
49 
50   // FROM the network to this local bank of L2 cache
51 
52   // a local L1 || mod-directory -> this L2 bank
53   MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
54        vnet_type="response";
55   MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
56        vnet_type="persistent";
57   // mod-directory -> this L2 bank
58   MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
59        vnet_type="request";
60   // a local L1 -> this L2 bank
61   MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
62        vnet_type="request";
63
64{
65  // STATES
66  state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
67    // Base states
68    NP, AccessPermission:Invalid, desc="Not Present";
69    I, AccessPermission:Invalid, desc="Idle";
70    S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
71    O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
72    M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
73
74    // Locked states
75    I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
76    S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
77  }
78
79  // EVENTS
80  enumeration(Event, desc="Cache events") {
81
82    // Requests
83    L1_GETS,             desc="local L1 GETS request";
84    L1_GETS_Last_Token,    desc="local L1 GETS request";
85    L1_GETX,             desc="local L1 GETX request";
86    L1_INV,              desc="L1 no longer has tokens";
87    Transient_GETX,      desc="A GetX from another processor";
88    Transient_GETS,      desc="A GetS from another processor";
89    Transient_GETS_Last_Token,   desc="A GetS from another processor";
90
91    // events initiated by this L2
92    L2_Replacement,     desc="L2 Replacement", format="!r";
93
94    // events of external L2 responses
95
96    // Responses
97    Writeback_Tokens,               desc="Received a writeback from L1 with only tokens (no data)";
98    Writeback_Shared_Data,               desc="Received a writeback from L1 that includes clean data";
99    Writeback_All_Tokens,    desc="Received a writeback from L1";
100    Writeback_Owned,                desc="Received a writeback from L1";
101
102
103    Data_Shared,             desc="Received a data message, we are now a sharer";
104    Data_Owner,              desc="Received a data message, we are now the owner";
105    Data_All_Tokens,   desc="Received a data message, we are now the owner, we now have all the tokens";
106    Ack,                     desc="Received an ack message";
107    Ack_All_Tokens,          desc="Received an ack message, we now have all the tokens";
108
109    // Lock/Unlock
110    Persistent_GETX,     desc="Another processor has priority to read/write";
111    Persistent_GETS,     desc="Another processor has priority to read";
112    Persistent_GETS_Last_Token, desc="Another processor has priority to read";
113    Own_Lock_or_Unlock,  desc="This processor now has priority";
114  }
115
116  // TYPES
117
118  // CacheEntry
119  structure(Entry, desc="...", interface="AbstractCacheEntry") {
120    State CacheState,        desc="cache state";
121    bool Dirty,              desc="Is the data dirty (different than memory)?";
122    int Tokens,              desc="The number of tokens we're holding for the line";
123    DataBlock DataBlk,       desc="data for the block";
124  }
125
126  structure(DirEntry, desc="...", interface="AbstractEntry") {
127    Set Sharers,            desc="Set of the internal processors that want the block in shared state";
128    bool exclusive, default="false", desc="if local exclusive is likely";
129  }
130
131  structure(PerfectCacheMemory, external="yes") {
132    void allocate(Addr);
133    void deallocate(Addr);
134    DirEntry lookup(Addr);
135    bool isTagPresent(Addr);
136  }
137
138  structure(PersistentTable, external="yes") {
139    void persistentRequestLock(Addr, MachineID, AccessType);
140    void persistentRequestUnlock(Addr, MachineID);
141    MachineID findSmallest(Addr);
142    AccessType typeOfSmallest(Addr);
143    void markEntries(Addr);
144    bool isLocked(Addr);
145    int countStarvingForAddress(Addr);
146    int countReadStarvingForAddress(Addr);
147  }
148
149  PersistentTable persistentTable;
150  PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
151
152  Tick clockEdge();
153  void set_cache_entry(AbstractCacheEntry b);
154  void unset_cache_entry();
155  MachineID mapAddressToMachine(Addr addr, MachineType mtype);
156
157  Entry getCacheEntry(Addr address), return_by_pointer="yes" {
158    Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
159    return cache_entry;
160  }
161
162  DirEntry getDirEntry(Addr address), return_by_pointer="yes" {
163    return localDirectory.lookup(address);
164  }
165
166  void functionalRead(Addr addr, Packet *pkt) {
167    testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
168  }
169
170  int functionalWrite(Addr addr, Packet *pkt) {
171    int num_functional_writes := 0;
172    num_functional_writes := num_functional_writes +
173        testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
174    return num_functional_writes;
175  }
176
177  int getTokens(Entry cache_entry) {
178    if (is_valid(cache_entry)) {
179      return cache_entry.Tokens;
180    } else {
181      return 0;
182    }
183  }
184
185  State getState(Entry cache_entry, Addr addr) {
186    if (is_valid(cache_entry)) {
187      return cache_entry.CacheState;
188    } else if (persistentTable.isLocked(addr)) {
189      return State:I_L;
190    } else {
191      return State:NP;
192    }
193  }
194
195  void setState(Entry cache_entry, Addr addr, State state) {
196
197    if (is_valid(cache_entry)) {
198      // Make sure the token count is in range
199      assert(cache_entry.Tokens >= 0);
200      assert(cache_entry.Tokens <= max_tokens());
201      assert(cache_entry.Tokens != (max_tokens() / 2));
202
203      // Make sure we have no tokens in L
204      if ((state == State:I_L) ) {
205        assert(cache_entry.Tokens == 0);
206      }
207
208      // in M and E you have all the tokens
209      if (state == State:M ) {
210        assert(cache_entry.Tokens == max_tokens());
211      }
212
213      // in NP you have no tokens
214      if (state == State:NP) {
215        assert(cache_entry.Tokens == 0);
216      }
217
218      // You have at least one token in S-like states
219      if (state == State:S ) {
220        assert(cache_entry.Tokens > 0);
221      }
222
223      // You have at least half the token in O-like states
224      if (state == State:O ) {
225        assert(cache_entry.Tokens > (max_tokens() / 2));
226      }
227
228      cache_entry.CacheState := state;
229    }
230  }
231
232  AccessPermission getAccessPermission(Addr addr) {
233    Entry cache_entry := getCacheEntry(addr);
234    if(is_valid(cache_entry)) {
235      return L2Cache_State_to_permission(cache_entry.CacheState);
236    }
237
238    return AccessPermission:NotPresent;
239  }
240
241  void setAccessPermission(Entry cache_entry, Addr addr, State state) {
242    if (is_valid(cache_entry)) {
243      cache_entry.changePermission(L2Cache_State_to_permission(state));
244    }
245  }
246
247  void removeSharer(Addr addr, NodeID id) {
248
249    if (localDirectory.isTagPresent(addr)) {
250      DirEntry dir_entry := getDirEntry(addr);
251      dir_entry.Sharers.remove(id);
252      if (dir_entry.Sharers.count() == 0) {
253        localDirectory.deallocate(addr);
254      }
255    }
256  }
257
258  bool sharersExist(Addr addr) {
259    if (localDirectory.isTagPresent(addr)) {
260      DirEntry dir_entry := getDirEntry(addr);
261      if (dir_entry.Sharers.count() > 0) {
262        return true;
263      }
264      else {
265        return false;
266      }
267    }
268    else {
269      return false;
270    }
271  }
272
273  bool exclusiveExists(Addr addr) {
274    if (localDirectory.isTagPresent(addr)) {
275      DirEntry dir_entry := getDirEntry(addr);
276      if (dir_entry.exclusive) {
277        return true;
278      }
279      else {
280        return false;
281      }
282    }
283    else {
284      return false;
285    }
286  }
287
288  // assumes that caller will check to make sure tag is present
289  Set getSharers(Addr addr) {
290    DirEntry dir_entry := getDirEntry(addr);
291    return dir_entry.Sharers;
292  }
293
294  void setNewWriter(Addr addr, NodeID id) {
295    if (localDirectory.isTagPresent(addr) == false) {
296      localDirectory.allocate(addr);
297    }
298    DirEntry dir_entry := getDirEntry(addr);
299    dir_entry.Sharers.clear();
300    dir_entry.Sharers.add(id);
301    dir_entry.exclusive := true;
302  }
303
304  void addNewSharer(Addr addr, NodeID id) {
305    if (localDirectory.isTagPresent(addr) == false) {
306      localDirectory.allocate(addr);
307    }
308    DirEntry dir_entry := getDirEntry(addr);
309    dir_entry.Sharers.add(id);
310    // dir_entry.exclusive := false;
311  }
312
313  void clearExclusiveBitIfExists(Addr addr) {
314    if (localDirectory.isTagPresent(addr)) {
315      DirEntry dir_entry := getDirEntry(addr);
316      dir_entry.exclusive := false;
317    }
318  }
319
320  // ** OUT_PORTS **
321  out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
322  out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
323  out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
324
325
326
327  // ** IN_PORTS **
328
329  // Persistent Network
330  in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
331    if (persistentNetwork_in.isReady(clockEdge())) {
332      peek(persistentNetwork_in, PersistentMsg) {
333        assert(in_msg.Destination.isElement(machineID));
334
335        if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
336          persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
337        } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
338          persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
339        } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
340          persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
341        } else {
342          error("Unexpected message");
343        }
344
345        Entry cache_entry := getCacheEntry(in_msg.addr);
346        // React to the message based on the current state of the table
347        if (persistentTable.isLocked(in_msg.addr)) {
348
349          if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) {
350            if (getTokens(cache_entry) == 1 ||
351                getTokens(cache_entry) == (max_tokens() / 2) + 1) {
352              trigger(Event:Persistent_GETS_Last_Token, in_msg.addr,
353                      cache_entry);
354            } else {
355              trigger(Event:Persistent_GETS, in_msg.addr, cache_entry);
356            }
357          } else {
358            trigger(Event:Persistent_GETX, in_msg.addr, cache_entry);
359          }
360        }
361        else {
362            trigger(Event:Own_Lock_or_Unlock, in_msg.addr, cache_entry);
363        }
364      }
365    }
366  }
367
368
369  // Request Network
370  in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
371    if (requestNetwork_in.isReady(clockEdge())) {
372      peek(requestNetwork_in, RequestMsg) {
373        assert(in_msg.Destination.isElement(machineID));
374
375        Entry cache_entry := getCacheEntry(in_msg.addr);
376        if (in_msg.Type == CoherenceRequestType:GETX) {
377            trigger(Event:Transient_GETX, in_msg.addr, cache_entry);
378        } else if (in_msg.Type == CoherenceRequestType:GETS) {
379          if (getTokens(cache_entry) == 1) {
380            trigger(Event:Transient_GETS_Last_Token, in_msg.addr,
381                    cache_entry);
382          }
383          else {
384            trigger(Event:Transient_GETS, in_msg.addr, cache_entry);
385          }
386        } else {
387          error("Unexpected message");
388        }
389      }
390    }
391  }
392
393  in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
394    if (L1requestNetwork_in.isReady(clockEdge())) {
395      peek(L1requestNetwork_in, RequestMsg) {
396        assert(in_msg.Destination.isElement(machineID));
397        Entry cache_entry := getCacheEntry(in_msg.addr);
398        if (in_msg.Type == CoherenceRequestType:GETX) {
399          trigger(Event:L1_GETX, in_msg.addr, cache_entry);
400        } else if (in_msg.Type == CoherenceRequestType:GETS) {
401          if (getTokens(cache_entry) == 1 ||
402              getTokens(cache_entry) == (max_tokens() / 2) + 1) {
403            trigger(Event:L1_GETS_Last_Token, in_msg.addr, cache_entry);
404          }
405          else {
406            trigger(Event:L1_GETS, in_msg.addr, cache_entry);
407          }
408        } else {
409          error("Unexpected message");
410        }
411      }
412    }
413  }
414
415
416  // Response Network
417  in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
418    if (responseNetwork_in.isReady(clockEdge())) {
419      peek(responseNetwork_in, ResponseMsg) {
420        assert(in_msg.Destination.isElement(machineID));
421        Entry cache_entry := getCacheEntry(in_msg.addr);
422
423        if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
424          if (in_msg.Type == CoherenceResponseType:ACK) {
425            assert(in_msg.Tokens < (max_tokens() / 2));
426            trigger(Event:Ack, in_msg.addr, cache_entry);
427          } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
428            trigger(Event:Data_Owner, in_msg.addr, cache_entry);
429          } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
430            trigger(Event:Data_Shared, in_msg.addr, cache_entry);
431          } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
432                     in_msg.Type == CoherenceResponseType:WB_OWNED ||
433                     in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
434
435            if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
436
437              // either room is available or the block is already present
438
439              if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
440                assert(in_msg.Dirty == false);
441                trigger(Event:Writeback_Tokens, in_msg.addr, cache_entry);
442              } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
443                assert(in_msg.Dirty == false);
444                trigger(Event:Writeback_Shared_Data, in_msg.addr, cache_entry);
445              }
446              else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
447                //assert(in_msg.Dirty == false);
448                trigger(Event:Writeback_Owned, in_msg.addr, cache_entry);
449              }
450            }
451            else {
452                Addr victim := L2cache.cacheProbe(in_msg.addr);
453                trigger(Event:L2_Replacement, victim, getCacheEntry(victim));
454            }
455          } else if (in_msg.Type == CoherenceResponseType:INV) {
456            trigger(Event:L1_INV, in_msg.addr, cache_entry);
457          } else {
458            error("Unexpected message");
459          }
460        } else {
461          if (in_msg.Type == CoherenceResponseType:ACK) {
462            assert(in_msg.Tokens < (max_tokens() / 2));
463            trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry);
464          } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
465                     in_msg.Type == CoherenceResponseType:DATA_SHARED) {
466            trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry);
467          } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
468                     in_msg.Type == CoherenceResponseType:WB_OWNED ||
469                     in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
470            if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
471
472              // either room is available or the block is already present
473
474              if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
475                assert(in_msg.Dirty == false);
476                assert(  (getState(cache_entry, in_msg.addr) != State:NP)
477                      && (getState(cache_entry, in_msg.addr) != State:I) );
478                trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
479              } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
480                assert(in_msg.Dirty == false);
481                trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
482              }
483              else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
484                trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
485              }
486            }
487            else {
488                Addr victim := L2cache.cacheProbe(in_msg.addr);
489                trigger(Event:L2_Replacement, victim, getCacheEntry(victim));
490            }
491          } else if (in_msg.Type == CoherenceResponseType:INV) {
492            trigger(Event:L1_INV, in_msg.addr, cache_entry);
493          } else {
494            DPRINTF(RubySlicc, "%s\n", in_msg.Type);
495            error("Unexpected message");
496          }
497        }
498      }
499    }
500  }
501
502
503  // ACTIONS
504
505  action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
506
507    peek(L1requestNetwork_in, RequestMsg) {
508
509     // if this is a retry or no local sharers, broadcast normally
510        enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) {
511           out_msg.addr := in_msg.addr;
512           out_msg.Type := in_msg.Type;
513           out_msg.Requestor := in_msg.Requestor;
514           out_msg.RetryNum := in_msg.RetryNum;
515
516           //
517           // If a statically shared L2 cache, then no other L2 caches can
518           // store the block
519           //
520           //out_msg.Destination.broadcast(MachineType:L2Cache);
521           //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
522           //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
523
524           out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
525           out_msg.MessageSize := MessageSizeType:Request_Control;
526           out_msg.AccessMode := in_msg.AccessMode;
527           out_msg.Prefetch := in_msg.Prefetch;
528        } //enqueue
529      // } // if
530
531         //profile_filter_action(0);
532    } // peek
533  } //action
534
535
536  action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
537    peek(responseNetwork_in, ResponseMsg) {
538      // FIXME, should use a 3rd vnet
539      enqueue(responseNetwork_out, ResponseMsg, 1) {
540        out_msg.addr := address;
541        out_msg.Type := in_msg.Type;
542        out_msg.Sender := machineID;
543        out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
544        out_msg.Tokens := in_msg.Tokens;
545        out_msg.MessageSize := in_msg.MessageSize;
546        out_msg.DataBlk := in_msg.DataBlk;
547        out_msg.Dirty := in_msg.Dirty;
548      }
549    }
550  }
551
552  action(c_cleanReplacement, "c", desc="Issue clean writeback") {
553    assert(is_valid(cache_entry));
554    if (cache_entry.Tokens > 0) {
555      enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
556        out_msg.addr := address;
557        out_msg.Type := CoherenceResponseType:ACK;
558        out_msg.Sender := machineID;
559        out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
560        out_msg.Tokens := cache_entry.Tokens;
561        out_msg.MessageSize := MessageSizeType:Writeback_Control;
562      }
563      cache_entry.Tokens := 0;
564    }
565  }
566
567  action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
568    assert(is_valid(cache_entry));
569    enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
570      out_msg.addr := address;
571      out_msg.Sender := machineID;
572      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
573      out_msg.Tokens := cache_entry.Tokens;
574      out_msg.DataBlk := cache_entry.DataBlk;
575      out_msg.Dirty := cache_entry.Dirty;
576
577      if (cache_entry.Dirty) {
578        out_msg.MessageSize := MessageSizeType:Writeback_Data;
579        out_msg.Type := CoherenceResponseType:DATA_OWNER;
580      } else {
581        out_msg.MessageSize := MessageSizeType:Writeback_Control;
582        out_msg.Type := CoherenceResponseType:ACK_OWNER;
583      }
584    }
585    cache_entry.Tokens := 0;
586  }
587
588  action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
589    peek(requestNetwork_in, RequestMsg) {
590      assert(is_valid(cache_entry));
591      if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
592        enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
593          out_msg.addr := address;
594          out_msg.Type := CoherenceResponseType:DATA_SHARED;
595          out_msg.Sender := machineID;
596          out_msg.Destination.add(in_msg.Requestor);
597          out_msg.Tokens := N_tokens;
598          out_msg.DataBlk := cache_entry.DataBlk;
599          out_msg.Dirty := false;
600          out_msg.MessageSize := MessageSizeType:Response_Data;
601        }
602        cache_entry.Tokens := cache_entry.Tokens - N_tokens;
603      }
604      else {
605        enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
606          out_msg.addr := address;
607          out_msg.Type := CoherenceResponseType:DATA_SHARED;
608          out_msg.Sender := machineID;
609          out_msg.Destination.add(in_msg.Requestor);
610          out_msg.Tokens := 1;
611          out_msg.DataBlk := cache_entry.DataBlk;
612          out_msg.Dirty := false;
613          out_msg.MessageSize := MessageSizeType:Response_Data;
614        }
615        cache_entry.Tokens := cache_entry.Tokens - 1;
616      }
617    }
618  }
619
620  action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
621    assert(is_valid(cache_entry));
622    peek(requestNetwork_in, RequestMsg) {
623      enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
624        out_msg.addr := address;
625        out_msg.Type := CoherenceResponseType:DATA_OWNER;
626        out_msg.Sender := machineID;
627        out_msg.Destination.add(in_msg.Requestor);
628        assert(cache_entry.Tokens >= 1);
629        out_msg.Tokens := cache_entry.Tokens;
630        out_msg.DataBlk := cache_entry.DataBlk;
631        out_msg.Dirty := cache_entry.Dirty;
632        out_msg.MessageSize := MessageSizeType:Response_Data;
633      }
634    }
635    cache_entry.Tokens := 0;
636  }
637
638  action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
639    assert(is_valid(cache_entry));
640    if (cache_entry.Tokens > 0) {
641      enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
642        out_msg.addr := address;
643        out_msg.Type := CoherenceResponseType:ACK;
644        out_msg.Sender := machineID;
645        out_msg.Destination.add(persistentTable.findSmallest(address));
646        assert(cache_entry.Tokens >= 1);
647        out_msg.Tokens := cache_entry.Tokens;
648        out_msg.MessageSize := MessageSizeType:Response_Control;
649      }
650    }
651    cache_entry.Tokens := 0;
652  }
653
654  action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
655    assert(is_valid(cache_entry));
656    enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
657      out_msg.addr := address;
658      out_msg.Type := CoherenceResponseType:DATA_OWNER;
659      out_msg.Sender := machineID;
660      out_msg.Destination.add(persistentTable.findSmallest(address));
661      assert(cache_entry.Tokens >= 1);
662      out_msg.Tokens := cache_entry.Tokens;
663      out_msg.DataBlk := cache_entry.DataBlk;
664      out_msg.Dirty := cache_entry.Dirty;
665      out_msg.MessageSize := MessageSizeType:Response_Data;
666    }
667    cache_entry.Tokens := 0;
668  }
669
670  action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
671    //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
672    assert(is_valid(cache_entry));
673    assert(cache_entry.Tokens > 0);
674    if (cache_entry.Tokens > 1) {
675      enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
676        out_msg.addr := address;
677        out_msg.Type := CoherenceResponseType:ACK;
678        out_msg.Sender := machineID;
679        out_msg.Destination.add(persistentTable.findSmallest(address));
680        assert(cache_entry.Tokens >= 1);
681        out_msg.Tokens := cache_entry.Tokens - 1;
682        out_msg.MessageSize := MessageSizeType:Response_Control;
683      }
684    }
685    cache_entry.Tokens := 1;
686  }
687
688  action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
689    //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
690    assert(is_valid(cache_entry));
691    assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
692    enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
693        out_msg.addr := address;
694        out_msg.Type := CoherenceResponseType:DATA_OWNER;
695        out_msg.Sender := machineID;
696        out_msg.Destination.add(persistentTable.findSmallest(address));
697        out_msg.Tokens := cache_entry.Tokens - 1;
698        out_msg.DataBlk := cache_entry.DataBlk;
699        out_msg.Dirty := cache_entry.Dirty;
700        out_msg.MessageSize := MessageSizeType:Response_Data;
701    }
702    cache_entry.Tokens := 1;
703  }
704
705  action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
706    //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
707    assert(is_valid(cache_entry));
708    assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
709    enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
710        out_msg.addr := address;
711        out_msg.Type := CoherenceResponseType:DATA_OWNER;
712        out_msg.Sender := machineID;
713        out_msg.Destination.add(persistentTable.findSmallest(address));
714        out_msg.Tokens := cache_entry.Tokens;
715        out_msg.DataBlk := cache_entry.DataBlk;
716        out_msg.Dirty := cache_entry.Dirty;
717        out_msg.MessageSize := MessageSizeType:Response_Data;
718    }
719    cache_entry.Tokens := 0;
720  }
721
722
723
724  action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
725    // assert(persistentTable.isLocked(address));
726    peek(responseNetwork_in, ResponseMsg) {
727      // FIXME, should use a 3rd vnet in some cases
728      enqueue(responseNetwork_out, ResponseMsg, 1) {
729        out_msg.addr := address;
730        out_msg.Type := in_msg.Type;
731        out_msg.Sender := machineID;
732        out_msg.Destination.add(persistentTable.findSmallest(address));
733        out_msg.Tokens := in_msg.Tokens;
734        out_msg.DataBlk := in_msg.DataBlk;
735        out_msg.Dirty := in_msg.Dirty;
736        out_msg.MessageSize := in_msg.MessageSize;
737      }
738    }
739  }
740
741  action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
742    //assert(persistentTable.isLocked(address));
743    peek(responseNetwork_in, ResponseMsg) {
744      // FIXME, should use a 3rd vnet in some cases
745      enqueue(responseNetwork_out, ResponseMsg, 1) {
746        out_msg.addr := address;
747        if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
748          out_msg.Type := CoherenceResponseType:DATA_SHARED;
749        } else {
750          assert(in_msg.Tokens < (max_tokens() / 2));
751          out_msg.Type := CoherenceResponseType:ACK;
752        }
753        out_msg.Sender := machineID;
754        out_msg.Destination.add(persistentTable.findSmallest(address));
755        out_msg.Tokens := in_msg.Tokens;
756        out_msg.DataBlk := in_msg.DataBlk;
757        out_msg.Dirty := in_msg.Dirty;
758        out_msg.MessageSize := in_msg.MessageSize;
759      }
760    }
761  }
762
763  action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
764    // assert(persistentTable.isLocked(address));
765    peek(responseNetwork_in, ResponseMsg) {
766      // FIXME, should use a 3rd vnet in some cases
767      enqueue(responseNetwork_out, ResponseMsg, 1) {
768        out_msg.addr := address;
769        out_msg.Type := CoherenceResponseType:DATA_OWNER;
770        out_msg.Sender := machineID;
771        out_msg.Destination.add(persistentTable.findSmallest(address));
772        out_msg.Tokens := in_msg.Tokens;
773        out_msg.DataBlk := in_msg.DataBlk;
774        out_msg.Dirty := in_msg.Dirty;
775        out_msg.MessageSize := in_msg.MessageSize;
776      }
777    }
778  }
779
780
781  action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
782    peek(responseNetwork_in, ResponseMsg) {
783      removeSharer(in_msg.addr, machineIDToNodeID(in_msg.Sender));
784    }
785  }
786
787  action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
788    peek(requestNetwork_in, RequestMsg) {
789      if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.addr) == false) {
790        //profile_filter_action(1);
791        DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
792                in_msg.RetryNum);
793      }
794      else {
795        enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) {
796           out_msg.addr := in_msg.addr;
797           out_msg.Requestor := in_msg.Requestor;
798
799           //
800           // Currently assuming only one chip so all L1s are local
801           //
802           //out_msg.Destination := getLocalL1IDs(machineID);
803           out_msg.Destination.broadcast(MachineType:L1Cache);
804           out_msg.Destination.remove(in_msg.Requestor);
805
806           out_msg.Type := in_msg.Type;
807           out_msg.isLocal := false;
808           out_msg.MessageSize := MessageSizeType:Broadcast_Control;
809           out_msg.AccessMode := in_msg.AccessMode;
810           out_msg.Prefetch := in_msg.Prefetch;
811        }
812        //profile_filter_action(0);
813      }
814    }
815  }
816
817  action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
818    peek(L1requestNetwork_in, RequestMsg) {
819      assert(is_valid(cache_entry));
820      assert(cache_entry.Tokens > 0);
821      enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
822        out_msg.addr := address;
823        out_msg.Type := CoherenceResponseType:DATA_SHARED;
824        out_msg.Sender := machineID;
825        out_msg.Destination.add(in_msg.Requestor);
826        out_msg.DataBlk := cache_entry.DataBlk;
827        out_msg.Dirty := false;
828        out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
829        out_msg.Tokens := 1;
830      }
831      cache_entry.Tokens := cache_entry.Tokens - 1;
832    }
833  }
834
835  action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
836    peek(L1requestNetwork_in, RequestMsg) {
837      assert(is_valid(cache_entry));
838      assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
839      enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
840        out_msg.addr := address;
841        out_msg.Type := CoherenceResponseType:DATA_OWNER;
842        out_msg.Sender := machineID;
843        out_msg.Destination.add(in_msg.Requestor);
844        out_msg.DataBlk := cache_entry.DataBlk;
845        out_msg.Dirty := cache_entry.Dirty;
846        out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
847        out_msg.Tokens := cache_entry.Tokens;
848      }
849      cache_entry.Tokens := 0;
850    }
851  }
852
853  action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
854    peek(L1requestNetwork_in, RequestMsg) {
855      assert(is_valid(cache_entry));
856//      assert(cache_entry.Tokens == max_tokens());
857      enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
858        out_msg.addr := address;
859        out_msg.Type := CoherenceResponseType:DATA_OWNER;
860        out_msg.Sender := machineID;
861        out_msg.Destination.add(in_msg.Requestor);
862        out_msg.DataBlk := cache_entry.DataBlk;
863        out_msg.Dirty := cache_entry.Dirty;
864        out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
865        //out_msg.Tokens := max_tokens();
866        out_msg.Tokens := cache_entry.Tokens;
867      }
868      cache_entry.Tokens := 0;
869    }
870  }
871
872  action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
873    persistentNetwork_in.dequeue(clockEdge());
874  }
875
876  action(m_popRequestQueue, "m", desc="Pop request queue.") {
877    requestNetwork_in.dequeue(clockEdge());
878  }
879
880  action(n_popResponseQueue, "n", desc="Pop response queue") {
881    responseNetwork_in.dequeue(clockEdge());
882  }
883
884  action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
885    L1requestNetwork_in.dequeue(clockEdge());
886  }
887
888
889  action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
890    peek(responseNetwork_in, ResponseMsg) {
891      assert(is_valid(cache_entry));
892      assert(in_msg.Tokens != 0);
893      cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
894
895      // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
896      //  may not trigger this action.
897      if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
898        cache_entry.Dirty := true;
899      }
900    }
901  }
902
903  action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
904    peek(L1requestNetwork_in, RequestMsg) {
905      if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
906        if (in_msg.Type == CoherenceRequestType:GETX) {
907          setNewWriter(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
908        } else if (in_msg.Type == CoherenceRequestType:GETS) {
909          addNewSharer(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
910        }
911      }
912    }
913  }
914
915  action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
916    clearExclusiveBitIfExists(address);
917  }
918
919  action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
920    peek(L1requestNetwork_in, RequestMsg) {
921      if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
922          (is_valid(cache_entry))) {
923        L2cache.setMRU(address);
924      }
925    }
926  }
927
928  action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
929    assert(is_valid(cache_entry));
930    if (cache_entry.Tokens > 0) {
931      peek(requestNetwork_in, RequestMsg) {
932        enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
933          out_msg.addr := address;
934          out_msg.Type := CoherenceResponseType:ACK;
935          out_msg.Sender := machineID;
936          out_msg.Destination.add(in_msg.Requestor);
937          assert(cache_entry.Tokens >= 1);
938          out_msg.Tokens := cache_entry.Tokens;
939          out_msg.MessageSize := MessageSizeType:Response_Control;
940        }
941      }
942    }
943    cache_entry.Tokens := 0;
944  }
945
946  action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
947    assert(is_valid(cache_entry));
948    if (cache_entry.Tokens > 0) {
949      peek(L1requestNetwork_in, RequestMsg) {
950        enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
951          out_msg.addr := address;
952          out_msg.Type := CoherenceResponseType:ACK;
953          out_msg.Sender := machineID;
954          out_msg.Destination.add(in_msg.Requestor);
955          assert(cache_entry.Tokens >= 1);
956          out_msg.Tokens := cache_entry.Tokens;
957          out_msg.MessageSize := MessageSizeType:Response_Control;
958        }
959      }
960    }
961    cache_entry.Tokens := 0;
962  }
963
964  action(u_writeDataToCache, "u", desc="Write data to cache") {
965    peek(responseNetwork_in, ResponseMsg) {
966      assert(is_valid(cache_entry));
967      cache_entry.DataBlk := in_msg.DataBlk;
968      if ((cache_entry.Dirty == false) && in_msg.Dirty) {
969        cache_entry.Dirty := in_msg.Dirty;
970      }
971    }
972  }
973
974  action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
975    set_cache_entry(L2cache.allocate(address, new Entry));
976  }
977
978  action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block.  Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
979    L2cache.deallocate(address);
980    unset_cache_entry();
981  }
982
983  action(uu_profileMiss, "\um", desc="Profile the demand miss") {
984      ++L2cache.demand_misses;
985  }
986
987  action(uu_profileHit, "\uh", desc="Profile the demand hit") {
988      ++L2cache.demand_hits;
989  }
990
991  action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
992    peek(responseNetwork_in, ResponseMsg) {
993      if (in_msg.Type != CoherenceResponseType:ACK &&
994          in_msg.Type != CoherenceResponseType:WB_TOKENS) {
995        assert(is_valid(cache_entry));
996        assert(cache_entry.DataBlk == in_msg.DataBlk);
997      }
998    }
999  }
1000
1001
1002  //*****************************************************
1003  // TRANSITIONS
1004  //*****************************************************
1005
1006  transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
1007
1008    h_updateFilterFromL1HintOrWB;
1009    n_popResponseQueue;
1010  }
1011
1012  transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
1013    l_popPersistentQueue;
1014  }
1015
1016
1017  // Transitions from NP
1018
1019  transition(NP, {Transient_GETX, Transient_GETS}) {
1020    // forward message to local sharers
1021    r_clearExclusive;
1022    j_forwardTransientRequestToLocalSharers;
1023    m_popRequestQueue;
1024  }
1025
1026
1027  transition(NP,  {L1_GETS, L1_GETX}) {
1028    a_broadcastLocalRequest;
1029    r_markNewSharer;
1030    uu_profileMiss;
1031    o_popL1RequestQueue;
1032  }
1033
1034  transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1035    bb_bounceResponse;
1036    n_popResponseQueue;
1037  }
1038
1039  transition(NP, Writeback_Shared_Data, S) {
1040    vv_allocateL2CacheBlock;
1041    u_writeDataToCache;
1042    q_updateTokensFromResponse;
1043    h_updateFilterFromL1HintOrWB;
1044    n_popResponseQueue;
1045  }
1046
1047  transition(NP, Writeback_Tokens, I) {
1048    vv_allocateL2CacheBlock;
1049    q_updateTokensFromResponse;
1050    h_updateFilterFromL1HintOrWB;
1051    n_popResponseQueue;
1052  }
1053
1054  transition(NP, Writeback_All_Tokens, M) {
1055    vv_allocateL2CacheBlock;
1056    u_writeDataToCache;
1057    q_updateTokensFromResponse;
1058    h_updateFilterFromL1HintOrWB;
1059    n_popResponseQueue;
1060  }
1061
1062  transition(NP, Writeback_Owned, O) {
1063    vv_allocateL2CacheBlock;
1064    u_writeDataToCache;
1065    q_updateTokensFromResponse;
1066    h_updateFilterFromL1HintOrWB;
1067    n_popResponseQueue;
1068  }
1069
1070
1071  transition(NP,
1072             {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1073             I_L) {
1074    l_popPersistentQueue;
1075  }
1076
1077  // Transitions from Idle
1078
1079  transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1080    a_broadcastLocalRequest;
1081    tt_sendLocalAckWithCollectedTokens;  // send any tokens we have collected
1082    r_markNewSharer;
1083    uu_profileMiss;
1084    o_popL1RequestQueue;
1085  }
1086
1087  transition(I, L1_GETX) {
1088    a_broadcastLocalRequest;
1089    tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1090    r_markNewSharer;
1091    uu_profileMiss;
1092    o_popL1RequestQueue;
1093  }
1094
1095  transition(I, L2_Replacement) {
1096    c_cleanReplacement; // Only needed in some cases
1097    rr_deallocateL2CacheBlock;
1098  }
1099
1100  transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1101    r_clearExclusive;
1102    t_sendAckWithCollectedTokens;
1103    j_forwardTransientRequestToLocalSharers;
1104    m_popRequestQueue;
1105  }
1106
1107  transition(I,
1108             {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1109             I_L) {
1110    e_sendAckWithCollectedTokens;
1111    l_popPersistentQueue;
1112  }
1113
1114
1115  transition(I, Ack) {
1116    q_updateTokensFromResponse;
1117    n_popResponseQueue;
1118  }
1119
1120  transition(I, Data_Shared, S) {
1121    u_writeDataToCache;
1122    q_updateTokensFromResponse;
1123    n_popResponseQueue;
1124  }
1125
1126  transition(I, Writeback_Shared_Data, S) {
1127    u_writeDataToCache;
1128    q_updateTokensFromResponse;
1129    h_updateFilterFromL1HintOrWB;
1130    n_popResponseQueue;
1131  }
1132
1133  transition(I, Writeback_Tokens) {
1134    q_updateTokensFromResponse;
1135    h_updateFilterFromL1HintOrWB;
1136    n_popResponseQueue;
1137  }
1138
1139  transition(I, Data_Owner, O) {
1140    u_writeDataToCache;
1141    q_updateTokensFromResponse;
1142    n_popResponseQueue;
1143  }
1144
1145  transition(I, Writeback_Owned, O) {
1146    u_writeDataToCache;
1147    q_updateTokensFromResponse;
1148    h_updateFilterFromL1HintOrWB;
1149    n_popResponseQueue;
1150  }
1151
1152  transition(I, Data_All_Tokens, M) {
1153    u_writeDataToCache;
1154    q_updateTokensFromResponse;
1155    n_popResponseQueue;
1156  }
1157
1158
1159  transition(I, Writeback_All_Tokens, M) {
1160    u_writeDataToCache;
1161    q_updateTokensFromResponse;
1162    h_updateFilterFromL1HintOrWB;
1163    n_popResponseQueue;
1164  }
1165
1166  // Transitions from Shared
1167
1168  transition(S, L2_Replacement, I) {
1169    c_cleanReplacement;
1170    rr_deallocateL2CacheBlock;
1171  }
1172
1173  transition(S, Transient_GETX, I) {
1174    r_clearExclusive;
1175    t_sendAckWithCollectedTokens;
1176    j_forwardTransientRequestToLocalSharers;
1177    m_popRequestQueue;
1178  }
1179
1180  transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1181    j_forwardTransientRequestToLocalSharers;
1182    r_clearExclusive;
1183    m_popRequestQueue;
1184  }
1185
1186  transition(S, Persistent_GETX, I_L) {
1187    e_sendAckWithCollectedTokens;
1188    l_popPersistentQueue;
1189  }
1190
1191
1192  transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1193    f_sendAckWithAllButOneTokens;
1194    l_popPersistentQueue;
1195  }
1196
1197
1198  transition(S, Ack) {
1199    q_updateTokensFromResponse;
1200    n_popResponseQueue;
1201  }
1202
1203  transition(S, Data_Shared) {
1204    w_assertIncomingDataAndCacheDataMatch;
1205    q_updateTokensFromResponse;
1206    n_popResponseQueue;
1207  }
1208
1209  transition(S, Writeback_Tokens) {
1210    q_updateTokensFromResponse;
1211    h_updateFilterFromL1HintOrWB;
1212    n_popResponseQueue;
1213  }
1214
1215  transition(S, Writeback_Shared_Data) {
1216    w_assertIncomingDataAndCacheDataMatch;
1217    q_updateTokensFromResponse;
1218    h_updateFilterFromL1HintOrWB;
1219    n_popResponseQueue;
1220  }
1221
1222
1223  transition(S, Data_Owner, O) {
1224    w_assertIncomingDataAndCacheDataMatch;
1225    q_updateTokensFromResponse;
1226    n_popResponseQueue;
1227  }
1228
1229  transition(S, Writeback_Owned, O) {
1230    w_assertIncomingDataAndCacheDataMatch;
1231    q_updateTokensFromResponse;
1232    h_updateFilterFromL1HintOrWB;
1233    n_popResponseQueue;
1234  }
1235
1236  transition(S, Data_All_Tokens, M) {
1237    w_assertIncomingDataAndCacheDataMatch;
1238    q_updateTokensFromResponse;
1239    n_popResponseQueue;
1240  }
1241
1242  transition(S, Writeback_All_Tokens,  M) {
1243    w_assertIncomingDataAndCacheDataMatch;
1244    q_updateTokensFromResponse;
1245    h_updateFilterFromL1HintOrWB;
1246    n_popResponseQueue;
1247  }
1248
1249  transition(S, L1_GETX, I) {
1250    a_broadcastLocalRequest;
1251    tt_sendLocalAckWithCollectedTokens;
1252    r_markNewSharer;
1253    r_setMRU;
1254    uu_profileMiss;
1255    o_popL1RequestQueue;
1256  }
1257
1258
1259  transition(S, L1_GETS) {
1260    k_dataFromL2CacheToL1Requestor;
1261    r_markNewSharer;
1262    r_setMRU;
1263    uu_profileHit;
1264    o_popL1RequestQueue;
1265  }
1266
1267  transition(S, L1_GETS_Last_Token, I) {
1268
1269    k_dataFromL2CacheToL1Requestor;
1270    r_markNewSharer;
1271    r_setMRU;
1272    uu_profileHit;
1273    o_popL1RequestQueue;
1274  }
1275
1276  // Transitions from Owned
1277
1278  transition(O, L2_Replacement, I) {
1279    cc_dirtyReplacement;
1280    rr_deallocateL2CacheBlock;
1281  }
1282
1283  transition(O, Transient_GETX, I) {
1284    r_clearExclusive;
1285    dd_sendDataWithAllTokens;
1286    j_forwardTransientRequestToLocalSharers;
1287    m_popRequestQueue;
1288  }
1289
1290  transition(O, Persistent_GETX, I_L) {
1291    ee_sendDataWithAllTokens;
1292    l_popPersistentQueue;
1293  }
1294
1295  transition(O, Persistent_GETS, S_L) {
1296    ff_sendDataWithAllButOneTokens;
1297    l_popPersistentQueue;
1298  }
1299
1300  transition(O, Persistent_GETS_Last_Token, I_L) {
1301    fa_sendDataWithAllTokens;
1302    l_popPersistentQueue;
1303  }
1304
1305  transition(O, Transient_GETS) {
1306    // send multiple tokens
1307    r_clearExclusive;
1308    d_sendDataWithTokens;
1309    m_popRequestQueue;
1310  }
1311
1312  transition(O, Transient_GETS_Last_Token) {
1313    // WAIT FOR IT TO GO PERSISTENT
1314    r_clearExclusive;
1315    m_popRequestQueue;
1316  }
1317
1318  transition(O, Ack) {
1319    q_updateTokensFromResponse;
1320    n_popResponseQueue;
1321  }
1322
1323  transition(O, Ack_All_Tokens, M) {
1324    q_updateTokensFromResponse;
1325    n_popResponseQueue;
1326  }
1327
1328  transition(O, Data_Shared) {
1329    w_assertIncomingDataAndCacheDataMatch;
1330    q_updateTokensFromResponse;
1331    n_popResponseQueue;
1332  }
1333
1334
1335  transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1336    w_assertIncomingDataAndCacheDataMatch;
1337    q_updateTokensFromResponse;
1338    h_updateFilterFromL1HintOrWB;
1339    n_popResponseQueue;
1340  }
1341
1342  transition(O, Data_All_Tokens, M) {
1343    w_assertIncomingDataAndCacheDataMatch;
1344    q_updateTokensFromResponse;
1345    n_popResponseQueue;
1346  }
1347
1348  transition(O, Writeback_All_Tokens, M) {
1349    w_assertIncomingDataAndCacheDataMatch;
1350    q_updateTokensFromResponse;
1351    h_updateFilterFromL1HintOrWB;
1352    n_popResponseQueue;
1353  }
1354
1355  transition(O, L1_GETS) {
1356    k_dataFromL2CacheToL1Requestor;
1357    r_markNewSharer;
1358    r_setMRU;
1359    uu_profileHit;
1360    o_popL1RequestQueue;
1361  }
1362
1363  transition(O, L1_GETS_Last_Token, I) {
1364    k_dataOwnerFromL2CacheToL1Requestor;
1365    r_markNewSharer;
1366    r_setMRU;
1367    uu_profileHit;
1368    o_popL1RequestQueue;
1369  }
1370
1371  transition(O, L1_GETX, I) {
1372    a_broadcastLocalRequest;
1373    k_dataAndAllTokensFromL2CacheToL1Requestor;
1374    r_markNewSharer;
1375    r_setMRU;
1376    uu_profileMiss;
1377    o_popL1RequestQueue;
1378  }
1379
1380  // Transitions from M
1381
1382  transition(M, L2_Replacement, I) {
1383    cc_dirtyReplacement;
1384    rr_deallocateL2CacheBlock;
1385  }
1386
1387  // MRM_DEBUG:  Give up all tokens even for GETS? ???
1388  transition(M, {Transient_GETX, Transient_GETS}, I) {
1389    r_clearExclusive;
1390    dd_sendDataWithAllTokens;
1391    m_popRequestQueue;
1392  }
1393
1394  transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1395    ee_sendDataWithAllTokens;
1396    l_popPersistentQueue;
1397  }
1398
1399
1400  transition(M, L1_GETS, O) {
1401    k_dataFromL2CacheToL1Requestor;
1402    r_markNewSharer;
1403    r_setMRU;
1404    uu_profileHit;
1405    o_popL1RequestQueue;
1406  }
1407
1408  transition(M, L1_GETX, I) {
1409    k_dataAndAllTokensFromL2CacheToL1Requestor;
1410    r_markNewSharer;
1411    r_setMRU;
1412    uu_profileHit;
1413    o_popL1RequestQueue;
1414  }
1415
1416
1417  //Transitions from locked states
1418
1419  transition({I_L, S_L}, Ack) {
1420    gg_bounceResponseToStarver;
1421    n_popResponseQueue;
1422  }
1423
1424  transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1425    gg_bounceResponseToStarver;
1426    n_popResponseQueue;
1427  }
1428
1429  transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1430    gg_bounceWBSharedToStarver;
1431    h_updateFilterFromL1HintOrWB;
1432    n_popResponseQueue;
1433  }
1434
1435  transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1436    gg_bounceWBOwnedToStarver;
1437    h_updateFilterFromL1HintOrWB;
1438    n_popResponseQueue;
1439  }
1440
1441  transition(S_L, L2_Replacement, I) {
1442    c_cleanReplacement;
1443    rr_deallocateL2CacheBlock;
1444  }
1445
1446  transition(I_L, L2_Replacement, I) {
1447    rr_deallocateL2CacheBlock;
1448  }
1449
1450  transition(I_L, Own_Lock_or_Unlock, I) {
1451    l_popPersistentQueue;
1452  }
1453
1454  transition(S_L, Own_Lock_or_Unlock, S) {
1455    l_popPersistentQueue;
1456  }
1457
1458  transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1459    r_clearExclusive;
1460    m_popRequestQueue;
1461  }
1462
1463  transition(I_L, {L1_GETX, L1_GETS}) {
1464    a_broadcastLocalRequest;
1465    r_markNewSharer;
1466    uu_profileMiss;
1467    o_popL1RequestQueue;
1468  }
1469
1470  transition(S_L, L1_GETX, I_L) {
1471    a_broadcastLocalRequest;
1472    tt_sendLocalAckWithCollectedTokens;
1473    r_markNewSharer;
1474    r_setMRU;
1475    uu_profileMiss;
1476    o_popL1RequestQueue;
1477  }
1478
1479  transition(S_L, L1_GETS) {
1480    k_dataFromL2CacheToL1Requestor;
1481    r_markNewSharer;
1482    r_setMRU;
1483    uu_profileHit;
1484    o_popL1RequestQueue;
1485  }
1486
1487  transition(S_L, L1_GETS_Last_Token, I_L) {
1488    k_dataFromL2CacheToL1Requestor;
1489    r_markNewSharer;
1490    r_setMRU;
1491    uu_profileHit;
1492    o_popL1RequestQueue;
1493  }
1494
1495  transition(S_L, Persistent_GETX, I_L) {
1496    e_sendAckWithCollectedTokens;
1497    l_popPersistentQueue;
1498  }
1499
1500  transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1501    l_popPersistentQueue;
1502  }
1503
1504  transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1505    l_popPersistentQueue;
1506  }
1507}
1508