1/*
2 * Copyright (c) 2013-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: Sooraj Puthoor, Blake Hechtman
34 */
35
36/*
37 * This file is inherited from GPU_VIPER-TCC.sm and retains its structure.
38 * There are very few modifications in this file from the original VIPER TCC
39 */
40
41machine(MachineType:TCC, "TCC Cache")
42 : CacheMemory * L2cache;
43   bool WB; /*is this cache Writeback?*/
44   int regionBufferNum;
45   Cycles l2_request_latency := 50;
46   Cycles l2_response_latency := 20;
47
48  // From the TCPs or SQCs
49  MessageBuffer * requestFromTCP, network="From", virtual_network="1", ordered="true", vnet_type="request";
50  // To the Cores. TCC deals only with TCPs/SQCs. CP cores do not communicate directly with TCC.
51  MessageBuffer * responseToCore, network="To", virtual_network="3", ordered="true", vnet_type="response";
52  // From the NB
53  MessageBuffer * probeFromNB, network="From", virtual_network="0", ordered="false", vnet_type="request";
54  MessageBuffer * responseFromNB, network="From", virtual_network="2", ordered="false", vnet_type="response";
55  // To the NB
56  MessageBuffer * requestToNB, network="To", virtual_network="0", ordered="false", vnet_type="request";
57  MessageBuffer * responseToNB, network="To", virtual_network="2", ordered="false", vnet_type="response";
58  MessageBuffer * unblockToNB, network="To", virtual_network="4", ordered="false", vnet_type="unblock";
59
60  MessageBuffer * triggerQueue, ordered="true", random="false";
61{
62  // EVENTS
63  enumeration(Event, desc="TCC Events") {
64    // Requests coming from the Cores
65    RdBlk,                  desc="RdBlk event";
66    WrVicBlk,               desc="L1 Write Through";
67    WrVicBlkBack,           desc="L1 Write Back(dirty cache)";
68    Atomic,                 desc="Atomic Op";
69    AtomicDone,             desc="AtomicOps Complete";
70    AtomicNotDone,          desc="AtomicOps not Complete";
71    Data,                   desc="data messgae";
72    // Coming from this TCC
73    L2_Repl,                desc="L2 Replacement";
74    // Probes
75    PrbInv,                 desc="Invalidating probe";
76    // Coming from Memory Controller
77    WBAck,                  desc="writethrough ack from memory";
78  }
79
80  // STATES
81  state_declaration(State, desc="TCC State", default="TCC_State_I") {
82    M, AccessPermission:Read_Write, desc="Modified(dirty cache only)";
83    W, AccessPermission:Read_Write, desc="Written(dirty cache only)";
84    V, AccessPermission:Read_Only,  desc="Valid";
85    I, AccessPermission:Invalid,    desc="Invalid";
86    IV, AccessPermission:Busy,      desc="Waiting for Data";
87    WI, AccessPermission:Busy,      desc="Waiting on Writethrough Ack";
88    A, AccessPermission:Busy,       desc="Invalid waiting on atomic Data";
89  }
90
91  enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
92    DataArrayRead,    desc="Read the data array";
93    DataArrayWrite,   desc="Write the data array";
94    TagArrayRead,     desc="Read the data array";
95    TagArrayWrite,    desc="Write the data array";
96  }
97
98
99  // STRUCTURES
100
101  structure(Entry, desc="...", interface="AbstractCacheEntry") {
102    State CacheState,           desc="cache state";
103    bool Dirty,                 desc="Is the data dirty (diff from memory?)";
104    DataBlock DataBlk,          desc="Data for the block";
105    WriteMask writeMask,        desc="Dirty byte mask";
106  }
107
108  structure(TBE, desc="...") {
109    State TBEState,     desc="Transient state";
110    DataBlock DataBlk,  desc="data for the block";
111    bool Dirty,         desc="Is the data dirty?";
112    bool Shared,        desc="Victim hit by shared probe";
113    MachineID From,     desc="Waiting for writeback from...";
114    NetDest Destination, desc="Data destination";
115    int numAtomics,     desc="number remaining atomics";
116  }
117
118  structure(TBETable, external="yes") {
119    TBE lookup(Addr);
120    void allocate(Addr);
121    void deallocate(Addr);
122    bool isPresent(Addr);
123  }
124
125  TBETable TBEs, template="<TCC_TBE>", constructor="m_number_of_TBEs";
126
127  void set_cache_entry(AbstractCacheEntry b);
128  void unset_cache_entry();
129  void set_tbe(TBE b);
130  void unset_tbe();
131  void wakeUpAllBuffers();
132  void wakeUpBuffers(Addr a);
133
134  MachineID mapAddressToMachine(Addr addr, MachineType mtype);
135
136  // FUNCTION DEFINITIONS
137
138  Tick clockEdge();
139  Tick cyclesToTicks(Cycles c);
140
141  MachineID getPeer(MachineID mach) {
142    return createMachineID(MachineType:RegionBuffer, intToID(regionBufferNum));
143  }
144
145 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
146    return static_cast(Entry, "pointer", L2cache.lookup(addr));
147  }
148
149  DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
150    return getCacheEntry(addr).DataBlk;
151  }
152
153  bool presentOrAvail(Addr addr) {
154    return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
155  }
156
157  State getState(TBE tbe, Entry cache_entry, Addr addr) {
158    if (is_valid(tbe)) {
159      return tbe.TBEState;
160    } else if (is_valid(cache_entry)) {
161      return cache_entry.CacheState;
162    }
163    return State:I;
164  }
165
166  void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
167    if (is_valid(tbe)) {
168        tbe.TBEState := state;
169    }
170
171    if (is_valid(cache_entry)) {
172        cache_entry.CacheState := state;
173    }
174  }
175
176  void functionalRead(Addr addr, Packet *pkt) {
177    TBE tbe := TBEs.lookup(addr);
178    if(is_valid(tbe)) {
179      testAndRead(addr, tbe.DataBlk, pkt);
180    } else {
181      functionalMemoryRead(pkt);
182    }
183  }
184
185  int functionalWrite(Addr addr, Packet *pkt) {
186    int num_functional_writes := 0;
187
188    TBE tbe := TBEs.lookup(addr);
189    if(is_valid(tbe)) {
190      num_functional_writes := num_functional_writes +
191            testAndWrite(addr, tbe.DataBlk, pkt);
192    }
193
194    num_functional_writes := num_functional_writes +
195        functionalMemoryWrite(pkt);
196    return num_functional_writes;
197  }
198
199  AccessPermission getAccessPermission(Addr addr) {
200    TBE tbe := TBEs.lookup(addr);
201    if(is_valid(tbe)) {
202      return TCC_State_to_permission(tbe.TBEState);
203    }
204
205    Entry cache_entry := getCacheEntry(addr);
206    if(is_valid(cache_entry)) {
207      return TCC_State_to_permission(cache_entry.CacheState);
208    }
209
210    return AccessPermission:NotPresent;
211  }
212
213  void setAccessPermission(Entry cache_entry, Addr addr, State state) {
214    if (is_valid(cache_entry)) {
215      cache_entry.changePermission(TCC_State_to_permission(state));
216    }
217  }
218
219  void recordRequestType(RequestType request_type, Addr addr) {
220    if (request_type == RequestType:DataArrayRead) {
221      L2cache.recordRequestType(CacheRequestType:DataArrayRead,addr);
222    } else if (request_type == RequestType:DataArrayWrite) {
223      L2cache.recordRequestType(CacheRequestType:DataArrayWrite,addr);
224    } else if (request_type == RequestType:TagArrayRead) {
225      L2cache.recordRequestType(CacheRequestType:TagArrayRead,addr);
226    } else if (request_type == RequestType:TagArrayWrite) {
227      L2cache.recordRequestType(CacheRequestType:TagArrayWrite,addr);
228    }
229  }
230
231  bool checkResourceAvailable(RequestType request_type, Addr addr) {
232    if (request_type == RequestType:DataArrayRead) {
233      return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
234    } else if (request_type == RequestType:DataArrayWrite) {
235      return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
236    } else if (request_type == RequestType:TagArrayRead) {
237      return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
238    } else if (request_type == RequestType:TagArrayWrite) {
239      return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
240    } else {
241      error("Invalid RequestType type in checkResourceAvailable");
242      return true;
243    }
244  }
245
246
247  // ** OUT_PORTS **
248
249  // Three classes of ports
250  // Class 1: downward facing network links to NB
251  out_port(requestToNB_out, CPURequestMsg, requestToNB);
252  out_port(responseToNB_out, ResponseMsg, responseToNB);
253  out_port(unblockToNB_out, UnblockMsg, unblockToNB);
254
255  // Class 2: upward facing ports to GPU cores
256  out_port(responseToCore_out, ResponseMsg, responseToCore);
257
258  out_port(triggerQueue_out, TriggerMsg, triggerQueue);
259  //
260  // request queue going to NB
261  //
262
263
264// ** IN_PORTS **
265  in_port(triggerQueue_in, TiggerMsg, triggerQueue) {
266    if (triggerQueue_in.isReady(clockEdge())) {
267      peek(triggerQueue_in, TriggerMsg) {
268        TBE tbe := TBEs.lookup(in_msg.addr);
269        Entry cache_entry := getCacheEntry(in_msg.addr);
270        if (tbe.numAtomics == 0) {
271            trigger(Event:AtomicDone, in_msg.addr, cache_entry, tbe);
272        } else {
273            trigger(Event:AtomicNotDone, in_msg.addr, cache_entry, tbe);
274        }
275      }
276    }
277  }
278
279
280
281  in_port(responseFromNB_in, ResponseMsg, responseFromNB) {
282    if (responseFromNB_in.isReady(clockEdge())) {
283      peek(responseFromNB_in, ResponseMsg, block_on="addr") {
284        TBE tbe := TBEs.lookup(in_msg.addr);
285        Entry cache_entry := getCacheEntry(in_msg.addr);
286        if (in_msg.Type == CoherenceResponseType:NBSysResp) {
287          if(presentOrAvail(in_msg.addr)) {
288            trigger(Event:Data, in_msg.addr, cache_entry, tbe);
289          } else {
290            Addr victim :=  L2cache.cacheProbe(in_msg.addr);
291            trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
292          }
293        } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
294          trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
295        } else {
296          error("Unexpected Response Message to Core");
297        }
298      }
299    }
300  }
301
302  // Finally handling incoming requests (from TCP) and probes (from NB).
303
304  in_port(probeNetwork_in, NBProbeRequestMsg, probeFromNB) {
305    if (probeNetwork_in.isReady(clockEdge())) {
306      peek(probeNetwork_in, NBProbeRequestMsg) {
307        DPRINTF(RubySlicc, "%s\n", in_msg);
308        DPRINTF(RubySlicc, "machineID: %s\n", machineID);
309        Entry cache_entry := getCacheEntry(in_msg.addr);
310        TBE tbe := TBEs.lookup(in_msg.addr);
311        trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
312      }
313    }
314  }
315
316
317  in_port(coreRequestNetwork_in, CPURequestMsg, requestFromTCP, rank=0) {
318    if (coreRequestNetwork_in.isReady(clockEdge())) {
319      peek(coreRequestNetwork_in, CPURequestMsg) {
320        TBE tbe := TBEs.lookup(in_msg.addr);
321        Entry cache_entry := getCacheEntry(in_msg.addr);
322        if (in_msg.Type == CoherenceRequestType:WriteThrough) {
323            if(WB) {
324                if(presentOrAvail(in_msg.addr)) {
325                    trigger(Event:WrVicBlkBack, in_msg.addr, cache_entry, tbe);
326                } else {
327                    Addr victim :=  L2cache.cacheProbe(in_msg.addr);
328                    trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
329                }
330            } else {
331                trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
332            }
333        } else if (in_msg.Type == CoherenceRequestType:Atomic) {
334          trigger(Event:Atomic, in_msg.addr, cache_entry, tbe);
335        } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
336          trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
337        } else {
338          DPRINTF(RubySlicc, "%s\n", in_msg);
339          error("Unexpected Response Message to Core");
340        }
341      }
342    }
343  }
344  // BEGIN ACTIONS
345
346  action(i_invL2, "i", desc="invalidate TCC cache block") {
347    if (is_valid(cache_entry)) {
348        L2cache.deallocate(address);
349    }
350    unset_cache_entry();
351  }
352
353  // Data available at TCC. Send the DATA to TCP
354  action(sd_sendData, "sd", desc="send Shared response") {
355    peek(coreRequestNetwork_in, CPURequestMsg) {
356      enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
357        out_msg.addr := address;
358        out_msg.Type := CoherenceResponseType:TDSysResp;
359        out_msg.Sender := machineID;
360        out_msg.Destination.add(in_msg.Requestor);
361        out_msg.DataBlk := cache_entry.DataBlk;
362        out_msg.MessageSize := MessageSizeType:Response_Data;
363        out_msg.Dirty := false;
364        out_msg.State := CoherenceState:Shared;
365        DPRINTF(RubySlicc, "%s\n", out_msg);
366      }
367    }
368  }
369
370
371  // Data was not available at TCC. So, TCC forwarded the request to
372  // directory and directory responded back with data. Now, forward the
373  // DATA to TCP and send the unblock ack back to directory.
374  action(sdr_sendDataResponse, "sdr", desc="send Shared response") {
375    enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
376      out_msg.addr := address;
377      out_msg.Type := CoherenceResponseType:TDSysResp;
378      out_msg.Sender := machineID;
379      out_msg.Destination := tbe.Destination;
380      out_msg.DataBlk := cache_entry.DataBlk;
381      out_msg.MessageSize := MessageSizeType:Response_Data;
382      out_msg.Dirty := false;
383      out_msg.State := CoherenceState:Shared;
384      DPRINTF(RubySlicc, "%s\n", out_msg);
385    }
386    enqueue(unblockToNB_out, UnblockMsg, 1) {
387      out_msg.addr := address;
388      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
389      out_msg.MessageSize := MessageSizeType:Unblock_Control;
390      DPRINTF(RubySlicc, "%s\n", out_msg);
391    }
392  }
393
394
395  action(rd_requestData, "r", desc="Miss in L2, pass on") {
396    if(tbe.Destination.count()==1){
397      peek(coreRequestNetwork_in, CPURequestMsg) {
398        enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
399          out_msg.addr := address;
400          out_msg.Type := in_msg.Type;
401          out_msg.Requestor := machineID;
402          out_msg.Destination.add(getPeer(machineID));
403          out_msg.Shared := false; // unneeded for this request
404          out_msg.MessageSize := in_msg.MessageSize;
405          DPRINTF(RubySlicc, "%s\n", out_msg);
406        }
407      }
408    }
409  }
410
411  action(w_sendResponseWBAck, "w", desc="send WB Ack") {
412    peek(responseFromNB_in, ResponseMsg) {
413        enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
414          out_msg.addr := address;
415          out_msg.Type := CoherenceResponseType:TDSysWBAck;
416          out_msg.Destination.clear();
417          out_msg.Destination.add(in_msg.WTRequestor);
418          out_msg.Sender := machineID;
419          out_msg.MessageSize := MessageSizeType:Writeback_Control;
420        }
421    }
422  }
423
424  action(swb_sendWBAck, "swb", desc="send WB Ack") {
425    peek(coreRequestNetwork_in, CPURequestMsg) {
426      enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
427        out_msg.addr := address;
428        out_msg.Type := CoherenceResponseType:TDSysWBAck;
429        out_msg.Destination.clear();
430        out_msg.Destination.add(in_msg.Requestor);
431        out_msg.Sender := machineID;
432        out_msg.MessageSize := MessageSizeType:Writeback_Control;
433      }
434    }
435  }
436
437  action(ar_sendAtomicResponse, "ar", desc="send Atomic Ack") {
438    peek(responseFromNB_in, ResponseMsg) {
439        enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
440          out_msg.addr := address;
441          out_msg.Type := CoherenceResponseType:TDSysResp;
442          out_msg.Destination.add(in_msg.WTRequestor);
443          out_msg.Sender := machineID;
444          out_msg.MessageSize := in_msg.MessageSize;
445          out_msg.DataBlk := in_msg.DataBlk;
446        }
447    }
448  }
449  action(sd2rb_sendDone2RegionBuffer, "sd2rb", desc="Request finished, send done ack") {
450    enqueue(unblockToNB_out, UnblockMsg, 1) {
451      out_msg.addr := address;
452      out_msg.Destination.add(getPeer(machineID));
453      out_msg.DoneAck := true;
454      out_msg.MessageSize := MessageSizeType:Unblock_Control;
455      if (is_valid(tbe)) {
456          out_msg.Dirty := tbe.Dirty;
457      } else {
458          out_msg.Dirty := false;
459      }
460      DPRINTF(RubySlicc, "%s\n", out_msg);
461    }
462  }
463
464  action(a_allocateBlock, "a", desc="allocate TCC block") {
465    if (is_invalid(cache_entry)) {
466      set_cache_entry(L2cache.allocate(address, new Entry));
467      cache_entry.writeMask.clear();
468    }
469  }
470
471  action(t_allocateTBE, "t", desc="allocate TBE Entry") {
472    if (is_invalid(tbe)) {
473      check_allocate(TBEs);
474      TBEs.allocate(address);
475      set_tbe(TBEs.lookup(address));
476      tbe.Destination.clear();
477      tbe.numAtomics := 0;
478    }
479    if (coreRequestNetwork_in.isReady(clockEdge())) {
480      peek(coreRequestNetwork_in, CPURequestMsg) {
481        if(in_msg.Type == CoherenceRequestType:RdBlk || in_msg.Type == CoherenceRequestType:Atomic){
482          tbe.Destination.add(in_msg.Requestor);
483        }
484      }
485    }
486  }
487
488  action(dt_deallocateTBE, "dt", desc="Deallocate TBE entry") {
489    tbe.Destination.clear();
490    TBEs.deallocate(address);
491    unset_tbe();
492  }
493
494  action(wcb_writeCacheBlock, "wcb", desc="write data to TCC") {
495    peek(responseFromNB_in, ResponseMsg) {
496      cache_entry.DataBlk := in_msg.DataBlk;
497      DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
498    }
499  }
500
501  action(wdb_writeDirtyBytes, "wdb", desc="write data to TCC") {
502    peek(coreRequestNetwork_in, CPURequestMsg) {
503      cache_entry.DataBlk.copyPartial(in_msg.DataBlk,in_msg.writeMask);
504      cache_entry.writeMask.orMask(in_msg.writeMask);
505      DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
506    }
507  }
508
509  action(wt_writeThrough, "wt", desc="write through data") {
510    peek(coreRequestNetwork_in, CPURequestMsg) {
511      enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
512        out_msg.addr := address;
513        out_msg.Requestor := machineID;
514        out_msg.WTRequestor := in_msg.Requestor;
515        out_msg.Destination.add(getPeer(machineID));
516        out_msg.MessageSize := MessageSizeType:Data;
517        out_msg.Type := CoherenceRequestType:WriteThrough;
518        out_msg.Dirty := true;
519        out_msg.DataBlk := in_msg.DataBlk;
520        out_msg.writeMask.orMask(in_msg.writeMask);
521      }
522    }
523  }
524
525  action(wb_writeBack, "wb", desc="write back data") {
526    enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
527      out_msg.addr := address;
528      out_msg.Requestor := machineID;
529      out_msg.WTRequestor := machineID;
530      out_msg.Destination.add(getPeer(machineID));
531      out_msg.MessageSize := MessageSizeType:Data;
532      out_msg.Type := CoherenceRequestType:WriteThrough;
533      out_msg.Dirty := true;
534      out_msg.DataBlk := cache_entry.DataBlk;
535      out_msg.writeMask.orMask(cache_entry.writeMask);
536    }
537  }
538
539  action(at_atomicThrough, "at", desc="write back data") {
540    peek(coreRequestNetwork_in, CPURequestMsg) {
541      enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
542        out_msg.addr := address;
543        out_msg.Requestor := machineID;
544        out_msg.WTRequestor := in_msg.Requestor;
545        out_msg.Destination.add(getPeer(machineID));
546        out_msg.MessageSize := MessageSizeType:Data;
547        out_msg.Type := CoherenceRequestType:Atomic;
548        out_msg.Dirty := true;
549        out_msg.writeMask.orMask(in_msg.writeMask);
550      }
551    }
552  }
553
554  action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
555    enqueue(responseToNB_out, ResponseMsg, 1) {
556      out_msg.addr := address;
557      out_msg.Type := CoherenceResponseType:CPUPrbResp;  // TCC, L3  respond in same way to probes
558      out_msg.Sender := machineID;
559      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
560      out_msg.Dirty := false;
561      out_msg.Hit := false;
562      out_msg.Ntsl := true;
563      out_msg.State := CoherenceState:NA;
564      out_msg.MessageSize := MessageSizeType:Response_Control;
565    }
566  }
567  action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
568    L2cache.setMRU(address);
569  }
570
571  action(p_popRequestQueue, "p", desc="pop request queue") {
572    coreRequestNetwork_in.dequeue(clockEdge());
573  }
574
575  action(pr_popResponseQueue, "pr", desc="pop response queue") {
576    responseFromNB_in.dequeue(clockEdge());
577  }
578
579  action(pp_popProbeQueue, "pp", desc="pop probe queue") {
580    probeNetwork_in.dequeue(clockEdge());
581  }
582  action(zz_recycleRequestQueue, "z", desc="stall"){
583    coreRequestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
584  }
585
586
587  action(ina_incrementNumAtomics, "ina", desc="inc num atomics") {
588    tbe.numAtomics := tbe.numAtomics + 1;
589  }
590
591
592  action(dna_decrementNumAtomics, "dna", desc="dec num atomics") {
593    tbe.numAtomics := tbe.numAtomics - 1;
594    if (tbe.numAtomics==0) {
595      enqueue(triggerQueue_out, TriggerMsg, 1) {
596        out_msg.addr := address;
597        out_msg.Type := TriggerType:AtomicDone;
598      }
599    }
600  }
601
602  action(ptr_popTriggerQueue, "ptr", desc="pop Trigger") {
603    triggerQueue_in.dequeue(clockEdge());
604  }
605
606  // END ACTIONS
607
608  // BEGIN TRANSITIONS
609  // transitions from base
610  // Assumptions for ArrayRead/Write
611  // TBE checked before tags
612  // Data Read/Write requires Tag Read
613
614  transition(WI, {RdBlk, WrVicBlk, Atomic, WrVicBlkBack}) {TagArrayRead} {
615    zz_recycleRequestQueue;
616  }
617  transition(A, {RdBlk, WrVicBlk, WrVicBlkBack}) {TagArrayRead} {
618    zz_recycleRequestQueue;
619  }
620  transition(IV, {WrVicBlk, Atomic, WrVicBlkBack}) {TagArrayRead} {
621    zz_recycleRequestQueue;
622  }
623  transition({M, V}, RdBlk) {TagArrayRead, DataArrayRead} {
624    sd_sendData;
625    ut_updateTag;
626    p_popRequestQueue;
627  }
628  transition(W, RdBlk, WI) {TagArrayRead, DataArrayRead} {
629    t_allocateTBE;
630    wb_writeBack;
631  }
632
633  transition(I, RdBlk, IV) {TagArrayRead} {
634    t_allocateTBE;
635    rd_requestData;
636    p_popRequestQueue;
637  }
638
639  transition(IV, RdBlk) {
640    t_allocateTBE;
641    rd_requestData;
642    p_popRequestQueue;
643  }
644
645  transition({V, I},Atomic, A) {TagArrayRead} {
646    i_invL2;
647    t_allocateTBE;
648    at_atomicThrough;
649    ina_incrementNumAtomics;
650    p_popRequestQueue;
651  }
652
653  transition(A, Atomic) {
654    at_atomicThrough;
655    ina_incrementNumAtomics;
656    p_popRequestQueue;
657  }
658
659  transition({M, W}, Atomic, WI) {TagArrayRead} {
660    t_allocateTBE;
661    wb_writeBack;
662  }
663
664  // Cahceblock stays in I state which implies
665  // this TCC is a write-no-allocate cache
666  transition(I, WrVicBlk) {TagArrayRead} {
667    wt_writeThrough;
668    p_popRequestQueue;
669  }
670
671  transition(V, WrVicBlk) {TagArrayRead, DataArrayWrite} {
672    ut_updateTag;
673    wdb_writeDirtyBytes;
674    wt_writeThrough;
675    p_popRequestQueue;
676  }
677
678  transition({V, M}, WrVicBlkBack, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
679    ut_updateTag;
680    swb_sendWBAck;
681    wdb_writeDirtyBytes;
682    p_popRequestQueue;
683  }
684
685  transition(W, WrVicBlkBack) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
686    ut_updateTag;
687    swb_sendWBAck;
688    wdb_writeDirtyBytes;
689    p_popRequestQueue;
690  }
691
692  transition(I, WrVicBlkBack, W) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
693    a_allocateBlock;
694    ut_updateTag;
695    swb_sendWBAck;
696    wdb_writeDirtyBytes;
697    p_popRequestQueue;
698  }
699
700  transition({W, M}, L2_Repl, WI) {TagArrayRead, DataArrayRead} {
701    t_allocateTBE;
702    wb_writeBack;
703    i_invL2;
704  }
705
706  transition({I, V}, L2_Repl, I) {TagArrayRead, TagArrayWrite} {
707    i_invL2;
708  }
709
710  transition({A, IV, WI}, L2_Repl) {
711    i_invL2;
712  }
713
714  transition({I, V}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
715    pi_sendProbeResponseInv;
716    pp_popProbeQueue;
717  }
718
719  transition(M, PrbInv, W) {TagArrayRead, TagArrayWrite} {
720    pi_sendProbeResponseInv;
721    pp_popProbeQueue;
722  }
723
724  transition(W, PrbInv) {TagArrayRead} {
725    pi_sendProbeResponseInv;
726    pp_popProbeQueue;
727  }
728
729  transition({A, IV, WI}, PrbInv) {
730    pi_sendProbeResponseInv;
731    pp_popProbeQueue;
732  }
733
734  transition(IV, Data, V) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
735    a_allocateBlock;
736    ut_updateTag;
737    wcb_writeCacheBlock;
738    sdr_sendDataResponse;
739    sd2rb_sendDone2RegionBuffer;
740    pr_popResponseQueue;
741    dt_deallocateTBE;
742  }
743
744  transition(A, Data) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
745    a_allocateBlock;
746    ar_sendAtomicResponse;
747    sd2rb_sendDone2RegionBuffer;
748    dna_decrementNumAtomics;
749    pr_popResponseQueue;
750  }
751
752  transition(A, AtomicDone, I) {TagArrayRead, TagArrayWrite} {
753    dt_deallocateTBE;
754    ptr_popTriggerQueue;
755  }
756
757  transition(A, AtomicNotDone) {TagArrayRead} {
758    ptr_popTriggerQueue;
759  }
760
761  //M,W should not see WBAck as the cache is in WB mode
762  //WBAcks do not need to check tags
763  transition({I, V, IV, A}, WBAck) {
764    w_sendResponseWBAck;
765    sd2rb_sendDone2RegionBuffer;
766    pr_popResponseQueue;
767  }
768
769  transition(WI, WBAck,I) {
770    sd2rb_sendDone2RegionBuffer;
771    dt_deallocateTBE;
772    pr_popResponseQueue;
773  }
774}
775