MOESI_AMD_Base-dir.sm revision 14184:11ac1337c5e2
1/*
2 * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Authors: Lisa Hsu
34 */
35
36machine(MachineType:Directory, "AMD Baseline protocol")
37: DirectoryMemory * directory;
38  CacheMemory * L3CacheMemory;
39  Cycles response_latency := 5;
40  Cycles l3_hit_latency := 50;
41  bool noTCCdir := "False";
42  bool CPUonly := "False";
43  int TCC_select_num_bits;
44  bool useL3OnWT := "False";
45  Cycles to_memory_controller_latency := 1;
46
47  // From the Cores
48  MessageBuffer * requestFromCores, network="From", virtual_network="0", vnet_type="request";
49  MessageBuffer * responseFromCores, network="From", virtual_network="2", vnet_type="response";
50  MessageBuffer * unblockFromCores, network="From", virtual_network="4", vnet_type="unblock";
51
52  MessageBuffer * probeToCore, network="To", virtual_network="0", vnet_type="request";
53  MessageBuffer * responseToCore, network="To", virtual_network="2", vnet_type="response";
54
55  MessageBuffer * triggerQueue;
56  MessageBuffer * L3triggerQueue;
57  MessageBuffer * responseFromMemory;
58{
59  // STATES
60  state_declaration(State, desc="Directory states", default="Directory_State_U") {
61    U, AccessPermission:Backing_Store,                 desc="unblocked";
62    BL, AccessPermission:Busy,                  desc="got L3 WB request";
63    // BL is Busy because it's possible for the data only to be in the network
64    // in the WB, L3 has sent it and gone on with its business in possibly I
65    // state.
66    BS_M, AccessPermission:Backing_Store,                 desc="blocked waiting for memory";
67    BM_M, AccessPermission:Backing_Store,                 desc="blocked waiting for memory";
68    B_M, AccessPermission:Backing_Store,                 desc="blocked waiting for memory";
69    BP, AccessPermission:Backing_Store,                 desc="blocked waiting for probes, no need for memory";
70    BS_PM, AccessPermission:Backing_Store,                desc="blocked waiting for probes and Memory";
71    BM_PM, AccessPermission:Backing_Store,                desc="blocked waiting for probes and Memory";
72    B_PM, AccessPermission:Backing_Store,                desc="blocked waiting for probes and Memory";
73    BS_Pm, AccessPermission:Backing_Store,                desc="blocked waiting for probes, already got memory";
74    BM_Pm, AccessPermission:Backing_Store,                desc="blocked waiting for probes, already got memory";
75    B_Pm, AccessPermission:Backing_Store,                desc="blocked waiting for probes, already got memory";
76    B, AccessPermission:Backing_Store,                  desc="sent response, Blocked til ack";
77  }
78
79  // Events
80  enumeration(Event, desc="Directory events") {
81    // CPU requests
82    RdBlkS,             desc="...";
83    RdBlkM,             desc="...";
84    RdBlk,              desc="...";
85    CtoD,               desc="...";
86    WriteThrough,       desc="WriteThrough Message";
87    Atomic,             desc="Atomic Message";
88
89    // writebacks
90    VicDirty,           desc="...";
91    VicClean,           desc="...";
92    CPUData,            desc="WB data from CPU";
93    StaleWB,         desc="Notification that WB has been superceded by a probe";
94
95    // probe responses
96    CPUPrbResp,            desc="Probe Response Msg";
97
98    ProbeAcksComplete,  desc="Probe Acks Complete";
99
100    L3Hit,              desc="Hit in L3 return data to core";
101
102    // Memory Controller
103    MemData, desc="Fetched data from memory arrives";
104    WBAck, desc="Writeback Ack from memory arrives";
105
106    CoreUnblock,            desc="Core received data, unblock";
107    UnblockWriteThrough,    desc="Unblock because of writethrough request finishing";
108
109    StaleVicDirty,        desc="Core invalidated before VicDirty processed";
110  }
111
112  enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
113    L3DataArrayRead,    desc="Read the data array";
114    L3DataArrayWrite,   desc="Write the data array";
115    L3TagArrayRead,     desc="Read the data array";
116    L3TagArrayWrite,    desc="Write the data array";
117  }
118
119  // TYPES
120
121  // DirectoryEntry
122  structure(Entry, desc="...", interface="AbstractEntry") {
123    State DirectoryState,          desc="Directory state";
124    DataBlock DataBlk,             desc="data for the block";
125    NetDest VicDirtyIgnore,  desc="VicDirty coming from whom to ignore";
126  }
127
128  structure(CacheEntry, desc="...", interface="AbstractCacheEntry") {
129    DataBlock DataBlk,          desc="data for the block";
130    MachineID LastSender,       desc="Mach which this block came from";
131  }
132
133  structure(TBE, desc="...") {
134    State TBEState,     desc="Transient state";
135    DataBlock DataBlk,  desc="data for the block";
136    bool Dirty,         desc="Is the data dirty?";
137    int NumPendingAcks,        desc="num acks expected";
138    MachineID OriginalRequestor,        desc="Original Requestor";
139    MachineID WTRequestor,        desc="WT Requestor";
140    bool Cached,        desc="data hit in Cache";
141    bool MemData,       desc="Got MemData?",default="false";
142    bool wtData,       desc="Got write through data?",default="false";
143    bool atomicData,   desc="Got Atomic op?",default="false";
144    Cycles InitialRequestTime, desc="...";
145    Cycles ForwardRequestTime, desc="...";
146    Cycles ProbeRequestStartTime, desc="...";
147    MachineID LastSender, desc="Mach which this block came from";
148    bool L3Hit, default="false", desc="Was this an L3 hit?";
149    uint64_t probe_id,        desc="probe id for lifetime profiling";
150    WriteMask writeMask,    desc="outstanding write through mask";
151  }
152
153  structure(TBETable, external="yes") {
154    TBE lookup(Addr);
155    void allocate(Addr);
156    void deallocate(Addr);
157    bool isPresent(Addr);
158  }
159
160  TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
161
162  int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
163
164  Tick clockEdge();
165  Tick cyclesToTicks(Cycles c);
166
167  void set_tbe(TBE a);
168  void unset_tbe();
169  void wakeUpAllBuffers();
170  void wakeUpBuffers(Addr a);
171  Cycles curCycle();
172
173  Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
174    Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
175
176    if (is_valid(dir_entry)) {
177      return dir_entry;
178    }
179
180    dir_entry :=  static_cast(Entry, "pointer",
181                              directory.allocate(addr, new Entry));
182    return dir_entry;
183  }
184
185  DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
186    TBE tbe := TBEs.lookup(addr);
187    if (is_valid(tbe) && tbe.MemData) {
188      DPRINTF(RubySlicc, "Returning DataBlk from TBE %s:%s\n", addr, tbe);
189      return tbe.DataBlk;
190    }
191    DPRINTF(RubySlicc, "Returning DataBlk from Dir %s:%s\n", addr, getDirectoryEntry(addr));
192    return getDirectoryEntry(addr).DataBlk;
193  }
194
195  State getState(TBE tbe, CacheEntry entry, Addr addr) {
196    return getDirectoryEntry(addr).DirectoryState;
197  }
198
199  void setState(TBE tbe, CacheEntry entry, Addr addr, State state) {
200    getDirectoryEntry(addr).DirectoryState := state;
201  }
202
203  void functionalRead(Addr addr, Packet *pkt) {
204    TBE tbe := TBEs.lookup(addr);
205    if(is_valid(tbe)) {
206      testAndRead(addr, tbe.DataBlk, pkt);
207    } else {
208      functionalMemoryRead(pkt);
209    }
210  }
211
212  int functionalWrite(Addr addr, Packet *pkt) {
213    int num_functional_writes := 0;
214
215    TBE tbe := TBEs.lookup(addr);
216    if(is_valid(tbe)) {
217      num_functional_writes := num_functional_writes +
218            testAndWrite(addr, tbe.DataBlk, pkt);
219    }
220
221    num_functional_writes := num_functional_writes
222        + functionalMemoryWrite(pkt);
223    return num_functional_writes;
224  }
225
226  AccessPermission getAccessPermission(Addr addr) {
227    // For this Directory, all permissions are just tracked in Directory, since
228    // it's not possible to have something in TBE but not Dir, just keep track
229    // of state all in one place.
230    if (directory.isPresent(addr)) {
231      return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
232    }
233
234    return AccessPermission:NotPresent;
235  }
236
237  void setAccessPermission(CacheEntry entry, Addr addr, State state) {
238    getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
239  }
240
241  void recordRequestType(RequestType request_type, Addr addr) {
242    if (request_type == RequestType:L3DataArrayRead) {
243        L3CacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr);
244    } else if (request_type == RequestType:L3DataArrayWrite) {
245        L3CacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
246    } else if (request_type == RequestType:L3TagArrayRead) {
247        L3CacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
248    } else if (request_type == RequestType:L3TagArrayWrite) {
249        L3CacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
250    }
251  }
252
253  bool checkResourceAvailable(RequestType request_type, Addr addr) {
254    if (request_type == RequestType:L3DataArrayRead) {
255      return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
256    } else if (request_type == RequestType:L3DataArrayWrite) {
257      return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
258    } else if (request_type == RequestType:L3TagArrayRead) {
259      return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
260    } else if (request_type == RequestType:L3TagArrayWrite) {
261      return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
262    } else {
263      error("Invalid RequestType type in checkResourceAvailable");
264      return true;
265    }
266  }
267
268  // ** OUT_PORTS **
269  out_port(probeNetwork_out, NBProbeRequestMsg, probeToCore);
270  out_port(responseNetwork_out, ResponseMsg, responseToCore);
271
272  out_port(triggerQueue_out, TriggerMsg, triggerQueue);
273  out_port(L3TriggerQueue_out, TriggerMsg, L3triggerQueue);
274
275  // ** IN_PORTS **
276
277  // Trigger Queue
278  in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
279    if (triggerQueue_in.isReady(clockEdge())) {
280      peek(triggerQueue_in, TriggerMsg) {
281        TBE tbe := TBEs.lookup(in_msg.addr);
282        CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
283        if (in_msg.Type == TriggerType:AcksComplete) {
284          trigger(Event:ProbeAcksComplete, in_msg.addr, entry, tbe);
285        }else if (in_msg.Type == TriggerType:UnblockWriteThrough) {
286          trigger(Event:UnblockWriteThrough, in_msg.addr, entry, tbe);
287        } else {
288          error("Unknown trigger msg");
289        }
290      }
291    }
292  }
293
294  in_port(L3TriggerQueue_in, TriggerMsg, L3triggerQueue, rank=4) {
295    if (L3TriggerQueue_in.isReady(clockEdge())) {
296      peek(L3TriggerQueue_in, TriggerMsg) {
297        TBE tbe := TBEs.lookup(in_msg.addr);
298        CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
299        if (in_msg.Type == TriggerType:L3Hit) {
300          trigger(Event:L3Hit, in_msg.addr, entry, tbe);
301        } else {
302          error("Unknown trigger msg");
303        }
304      }
305    }
306  }
307
308  // Unblock Network
309  in_port(unblockNetwork_in, UnblockMsg, unblockFromCores, rank=3) {
310    if (unblockNetwork_in.isReady(clockEdge())) {
311      peek(unblockNetwork_in, UnblockMsg) {
312        TBE tbe := TBEs.lookup(in_msg.addr);
313        CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
314        trigger(Event:CoreUnblock, in_msg.addr, entry, tbe);
315      }
316    }
317  }
318
319  // Core response network
320  in_port(responseNetwork_in, ResponseMsg, responseFromCores, rank=2) {
321    if (responseNetwork_in.isReady(clockEdge())) {
322      peek(responseNetwork_in, ResponseMsg) {
323        TBE tbe := TBEs.lookup(in_msg.addr);
324        CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
325        if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
326          trigger(Event:CPUPrbResp, in_msg.addr, entry, tbe);
327        } else if (in_msg.Type == CoherenceResponseType:CPUData) {
328          trigger(Event:CPUData, in_msg.addr, entry, tbe);
329        } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
330            trigger(Event:StaleWB, in_msg.addr, entry, tbe);
331        } else {
332          error("Unexpected response type");
333        }
334      }
335    }
336  }
337
338  // off-chip memory request/response is done
339  in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=1) {
340    if (memQueue_in.isReady(clockEdge())) {
341      peek(memQueue_in, MemoryMsg) {
342        TBE tbe := TBEs.lookup(in_msg.addr);
343        CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
344        if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
345          trigger(Event:MemData, in_msg.addr, entry, tbe);
346          DPRINTF(RubySlicc, "%s\n", in_msg);
347        } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
348          trigger(Event:WBAck, in_msg.addr, entry, tbe); // ignore WBAcks, don't care about them.
349        } else {
350          DPRINTF(RubySlicc, "%s\n", in_msg.Type);
351          error("Invalid message");
352        }
353      }
354    }
355  }
356
357  in_port(requestNetwork_in, CPURequestMsg, requestFromCores, rank=0) {
358    if (requestNetwork_in.isReady(clockEdge())) {
359      peek(requestNetwork_in, CPURequestMsg) {
360        TBE tbe := TBEs.lookup(in_msg.addr);
361        CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
362        if (in_msg.Type == CoherenceRequestType:RdBlk) {
363          trigger(Event:RdBlk, in_msg.addr, entry, tbe);
364        } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
365          trigger(Event:RdBlkS, in_msg.addr, entry, tbe);
366        } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
367          trigger(Event:RdBlkM, in_msg.addr, entry, tbe);
368        } else if (in_msg.Type == CoherenceRequestType:WriteThrough) {
369          trigger(Event:WriteThrough, in_msg.addr, entry, tbe);
370        } else if (in_msg.Type == CoherenceRequestType:Atomic) {
371          trigger(Event:Atomic, in_msg.addr, entry, tbe);
372        } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
373          if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
374            DPRINTF(RubySlicc, "Dropping VicDirty for address %s\n", in_msg.addr);
375            trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
376          } else {
377            DPRINTF(RubySlicc, "Got VicDirty from %s on %s\n", in_msg.Requestor, in_msg.addr);
378            trigger(Event:VicDirty, in_msg.addr, entry, tbe);
379          }
380        } else if (in_msg.Type == CoherenceRequestType:VicClean) {
381          if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
382            DPRINTF(RubySlicc, "Dropping VicClean for address %s\n", in_msg.addr);
383            trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
384          } else {
385            DPRINTF(RubySlicc, "Got VicClean from %s on %s\n", in_msg.Requestor, in_msg.addr);
386            trigger(Event:VicClean, in_msg.addr, entry, tbe);
387          }
388        } else {
389          error("Bad request message type");
390        }
391      }
392    }
393  }
394
395  // Actions
396  action(s_sendResponseS, "s", desc="send Shared response") {
397    enqueue(responseNetwork_out, ResponseMsg, response_latency) {
398      out_msg.addr := address;
399      out_msg.Type := CoherenceResponseType:NBSysResp;
400      if (tbe.L3Hit) {
401        out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
402      } else {
403        out_msg.Sender := machineID;
404      }
405      out_msg.Destination.add(tbe.OriginalRequestor);
406      out_msg.DataBlk := tbe.DataBlk;
407      out_msg.MessageSize := MessageSizeType:Response_Data;
408      out_msg.Dirty := false;
409      out_msg.State := CoherenceState:Shared;
410      out_msg.InitialRequestTime := tbe.InitialRequestTime;
411      out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
412      out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
413      out_msg.OriginalResponder := tbe.LastSender;
414      out_msg.L3Hit := tbe.L3Hit;
415      DPRINTF(RubySlicc, "%s\n", out_msg);
416    }
417  }
418
419  action(es_sendResponseES, "es", desc="send Exclusive or Shared response") {
420    enqueue(responseNetwork_out, ResponseMsg, response_latency) {
421      out_msg.addr := address;
422      out_msg.Type := CoherenceResponseType:NBSysResp;
423      if (tbe.L3Hit) {
424        out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
425      } else {
426        out_msg.Sender := machineID;
427      }
428      out_msg.Destination.add(tbe.OriginalRequestor);
429      out_msg.DataBlk := tbe.DataBlk;
430      out_msg.MessageSize := MessageSizeType:Response_Data;
431      out_msg.Dirty := tbe.Dirty;
432      if (tbe.Cached) {
433        out_msg.State := CoherenceState:Shared;
434      } else {
435        out_msg.State := CoherenceState:Exclusive;
436      }
437      out_msg.InitialRequestTime := tbe.InitialRequestTime;
438      out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
439      out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
440      out_msg.OriginalResponder := tbe.LastSender;
441      out_msg.L3Hit := tbe.L3Hit;
442      DPRINTF(RubySlicc, "%s\n", out_msg);
443    }
444  }
445
446  action(m_sendResponseM, "m", desc="send Modified response") {
447    if (tbe.wtData) {
448      enqueue(triggerQueue_out, TriggerMsg, 1) {
449        out_msg.addr := address;
450        out_msg.Type := TriggerType:UnblockWriteThrough;
451      }
452    }else{
453      enqueue(responseNetwork_out, ResponseMsg, response_latency) {
454        out_msg.addr := address;
455        out_msg.Type := CoherenceResponseType:NBSysResp;
456        if (tbe.L3Hit) {
457          out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
458        } else {
459          out_msg.Sender := machineID;
460        }
461        out_msg.Destination.add(tbe.OriginalRequestor);
462        out_msg.DataBlk := tbe.DataBlk;
463        out_msg.MessageSize := MessageSizeType:Response_Data;
464        out_msg.Dirty := tbe.Dirty;
465        out_msg.State := CoherenceState:Modified;
466        out_msg.CtoD := false;
467        out_msg.InitialRequestTime := tbe.InitialRequestTime;
468        out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
469        out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
470        out_msg.OriginalResponder := tbe.LastSender;
471        if(tbe.atomicData){
472          out_msg.WTRequestor := tbe.WTRequestor;
473        }
474        out_msg.L3Hit := tbe.L3Hit;
475        DPRINTF(RubySlicc, "%s\n", out_msg);
476      }
477      if (tbe.atomicData) {
478        enqueue(triggerQueue_out, TriggerMsg, 1) {
479          out_msg.addr := address;
480          out_msg.Type := TriggerType:UnblockWriteThrough;
481        }
482      }
483    }
484  }
485
486  action(c_sendResponseCtoD, "c", desc="send CtoD Ack") {
487      enqueue(responseNetwork_out, ResponseMsg, response_latency) {
488        out_msg.addr := address;
489        out_msg.Type := CoherenceResponseType:NBSysResp;
490        out_msg.Sender := machineID;
491        out_msg.Destination.add(tbe.OriginalRequestor);
492        out_msg.MessageSize := MessageSizeType:Response_Control;
493        out_msg.Dirty := false;
494        out_msg.State := CoherenceState:Modified;
495        out_msg.CtoD := true;
496        out_msg.InitialRequestTime := tbe.InitialRequestTime;
497        out_msg.ForwardRequestTime := curCycle();
498        out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
499        DPRINTF(RubySlicc, "%s\n", out_msg);
500      }
501  }
502
503  action(w_sendResponseWBAck, "w", desc="send WB Ack") {
504    peek(requestNetwork_in, CPURequestMsg) {
505      enqueue(responseNetwork_out, ResponseMsg, 1) {
506        out_msg.addr := address;
507        out_msg.Type := CoherenceResponseType:NBSysWBAck;
508        out_msg.Destination.add(in_msg.Requestor);
509        out_msg.WTRequestor := in_msg.WTRequestor;
510        out_msg.Sender := machineID;
511        out_msg.MessageSize := MessageSizeType:Writeback_Control;
512        out_msg.InitialRequestTime := in_msg.InitialRequestTime;
513        out_msg.ForwardRequestTime := curCycle();
514        out_msg.ProbeRequestStartTime := curCycle();
515      }
516    }
517  }
518
519  action(l_queueMemWBReq, "lq", desc="Write WB data to memory") {
520    peek(responseNetwork_in, ResponseMsg) {
521      queueMemoryWrite(machineID, address, to_memory_controller_latency,
522                       in_msg.DataBlk);
523    }
524  }
525
526  action(l_queueMemRdReq, "lr", desc="Read data from memory") {
527    peek(requestNetwork_in, CPURequestMsg) {
528      if (L3CacheMemory.isTagPresent(address)) {
529        enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) {
530          out_msg.addr := address;
531          out_msg.Type := TriggerType:L3Hit;
532          DPRINTF(RubySlicc, "%s\n", out_msg);
533        }
534        CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
535        if (tbe.Dirty == false) {
536          tbe.DataBlk := entry.DataBlk;
537        }
538        tbe.LastSender := entry.LastSender;
539        tbe.L3Hit := true;
540        tbe.MemData := true;
541        L3CacheMemory.deallocate(address);
542      } else {
543        queueMemoryRead(machineID, address, to_memory_controller_latency);
544      }
545    }
546  }
547
548  action(dc_probeInvCoreData, "dc", desc="probe inv cores, return data") {
549    peek(requestNetwork_in, CPURequestMsg) {
550      enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
551        out_msg.addr := address;
552        out_msg.Type := ProbeRequestType:PrbInv;
553        out_msg.ReturnData := true;
554        out_msg.MessageSize := MessageSizeType:Control;
555        out_msg.Destination.broadcast(MachineType:CorePair);  // won't be realistic for multisocket
556
557        // add relevant TCC node to list. This replaces all TCPs and SQCs
558        if (((in_msg.Type == CoherenceRequestType:WriteThrough ||
559              in_msg.Type == CoherenceRequestType:Atomic) &&
560             in_msg.NoWriteConflict) ||
561            CPUonly) {
562        } else if (noTCCdir) {
563          out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
564                                  TCC_select_low_bit, TCC_select_num_bits));
565        } else {
566	      out_msg.Destination.add(mapAddressToRange(address,
567                                                    MachineType:TCCdir,
568                            TCC_select_low_bit, TCC_select_num_bits));
569        }
570        out_msg.Destination.remove(in_msg.Requestor);
571        tbe.NumPendingAcks := out_msg.Destination.count();
572        if (tbe.NumPendingAcks == 0) {
573          enqueue(triggerQueue_out, TriggerMsg, 1) {
574            out_msg.addr := address;
575            out_msg.Type := TriggerType:AcksComplete;
576          }
577        }
578        DPRINTF(RubySlicc, "%s\n", out_msg);
579        APPEND_TRANSITION_COMMENT(" dc: Acks remaining: ");
580        APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
581        tbe.ProbeRequestStartTime := curCycle();
582      }
583    }
584  }
585
586  action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") {
587    peek(requestNetwork_in, CPURequestMsg) { // not the right network?
588      enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
589        out_msg.addr := address;
590        out_msg.Type := ProbeRequestType:PrbDowngrade;
591        out_msg.ReturnData := true;
592        out_msg.MessageSize := MessageSizeType:Control;
593        out_msg.Destination.broadcast(MachineType:CorePair);  // won't be realistic for multisocket
594        // add relevant TCC node to the list. This replaces all TCPs and SQCs
595        if (noTCCdir || CPUonly) {
596          //Don't need to notify TCC about reads
597        } else {
598	      out_msg.Destination.add(mapAddressToRange(address,
599                                                    MachineType:TCCdir,
600                            TCC_select_low_bit, TCC_select_num_bits));
601          tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
602        }
603        if (noTCCdir && !CPUonly) {
604          out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
605                                  TCC_select_low_bit, TCC_select_num_bits));
606        }
607        out_msg.Destination.remove(in_msg.Requestor);
608        tbe.NumPendingAcks := out_msg.Destination.count();
609        if (tbe.NumPendingAcks == 0) {
610          enqueue(triggerQueue_out, TriggerMsg, 1) {
611            out_msg.addr := address;
612            out_msg.Type := TriggerType:AcksComplete;
613          }
614        }
615        DPRINTF(RubySlicc, "%s\n", (out_msg));
616        APPEND_TRANSITION_COMMENT(" sc: Acks remaining: ");
617        APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
618        tbe.ProbeRequestStartTime := curCycle();
619      }
620    }
621  }
622
623  action(ic_probeInvCore, "ic", desc="probe invalidate core, no return data needed") {
624    peek(requestNetwork_in, CPURequestMsg) { // not the right network?
625      enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
626        out_msg.addr := address;
627        out_msg.Type := ProbeRequestType:PrbInv;
628        out_msg.ReturnData := false;
629        out_msg.MessageSize := MessageSizeType:Control;
630        out_msg.Destination.broadcast(MachineType:CorePair);  // won't be realistic for multisocket
631
632        // add relevant TCC node to the list. This replaces all TCPs and SQCs
633        if (noTCCdir && !CPUonly) {
634            out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
635                              TCC_select_low_bit, TCC_select_num_bits));
636        } else {
637            if (!noTCCdir) {
638                out_msg.Destination.add(mapAddressToRange(address,
639                                                          MachineType:TCCdir,
640                                                          TCC_select_low_bit,
641                                                          TCC_select_num_bits));
642            }
643        }
644        out_msg.Destination.remove(in_msg.Requestor);
645        tbe.NumPendingAcks := out_msg.Destination.count();
646        if (tbe.NumPendingAcks == 0) {
647          enqueue(triggerQueue_out, TriggerMsg, 1) {
648            out_msg.addr := address;
649            out_msg.Type := TriggerType:AcksComplete;
650          }
651        }
652        APPEND_TRANSITION_COMMENT(" ic: Acks remaining: ");
653        APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
654        DPRINTF(RubySlicc, "%s\n", out_msg);
655        tbe.ProbeRequestStartTime := curCycle();
656      }
657    }
658  }
659
660  action(d_writeDataToMemory, "d", desc="Write data to memory") {
661    peek(responseNetwork_in, ResponseMsg) {
662      getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
663      if (tbe.Dirty == false) {
664          // have to update the TBE, too, because of how this
665          // directory deals with functional writes
666        tbe.DataBlk := in_msg.DataBlk;
667      }
668    }
669  }
670
671  action(t_allocateTBE, "t", desc="allocate TBE Entry") {
672    check_allocate(TBEs);
673    peek(requestNetwork_in, CPURequestMsg) {
674      TBEs.allocate(address);
675      set_tbe(TBEs.lookup(address));
676      if (in_msg.Type == CoherenceRequestType:WriteThrough) {
677        tbe.writeMask.clear();
678        tbe.writeMask.orMask(in_msg.writeMask);
679        tbe.wtData := true;
680        tbe.WTRequestor := in_msg.WTRequestor;
681        tbe.LastSender := in_msg.Requestor;
682      }
683      if (in_msg.Type == CoherenceRequestType:Atomic) {
684        tbe.writeMask.clear();
685        tbe.writeMask.orMask(in_msg.writeMask);
686        tbe.atomicData := true;
687        tbe.WTRequestor := in_msg.WTRequestor;
688        tbe.LastSender := in_msg.Requestor;
689      }
690      tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
691      tbe.Dirty := false;
692      if (in_msg.Type == CoherenceRequestType:WriteThrough) {
693        tbe.DataBlk.copyPartial(in_msg.DataBlk,in_msg.writeMask);
694        tbe.Dirty := true;
695      }
696      tbe.OriginalRequestor := in_msg.Requestor;
697      tbe.NumPendingAcks := 0;
698      tbe.Cached := in_msg.ForceShared;
699      tbe.InitialRequestTime := in_msg.InitialRequestTime;
700    }
701  }
702
703  action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
704    if (tbe.Dirty == false) {
705        getDirectoryEntry(address).DataBlk := tbe.DataBlk;
706    }
707    TBEs.deallocate(address);
708    unset_tbe();
709  }
710
711  action(wd_writeBackData, "wd", desc="Write back data if needed") {
712    if (tbe.wtData) {
713      getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, tbe.writeMask);
714    } else if (tbe.atomicData) {
715      tbe.DataBlk.atomicPartial(getDirectoryEntry(address).DataBlk,tbe.writeMask);
716      getDirectoryEntry(address).DataBlk := tbe.DataBlk;
717    } else if (tbe.Dirty == false) {
718      getDirectoryEntry(address).DataBlk := tbe.DataBlk;
719    }
720  }
721
722  action(mt_writeMemDataToTBE, "mt", desc="write Mem data to TBE") {
723    peek(memQueue_in, MemoryMsg) {
724      if (tbe.wtData == true) {
725          // do nothing
726      } else if (tbe.Dirty == false) {
727        tbe.DataBlk := getDirectoryEntry(address).DataBlk;
728      }
729      tbe.MemData := true;
730    }
731  }
732
733  action(y_writeProbeDataToTBE, "y", desc="write Probe Data to TBE") {
734    peek(responseNetwork_in, ResponseMsg) {
735      if (in_msg.Dirty) {
736        if (tbe.wtData) {
737          DataBlock tmp := in_msg.DataBlk;
738          tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
739          tbe.DataBlk := tmp;
740          tbe.writeMask.fillMask();
741        } else if (tbe.Dirty) {
742          if(tbe.atomicData == false && tbe.wtData == false) {
743            DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender);
744            assert(tbe.DataBlk == in_msg.DataBlk);  // in case of double data
745          }
746        } else {
747          tbe.DataBlk := in_msg.DataBlk;
748          tbe.Dirty := in_msg.Dirty;
749          tbe.LastSender := in_msg.Sender;
750        }
751      }
752      if (in_msg.Hit) {
753        tbe.Cached := true;
754      }
755    }
756  }
757
758  action(mwc_markSinkWriteCancel, "mwc", desc="Mark to sink impending VicDirty") {
759    peek(responseNetwork_in, ResponseMsg) {
760      getDirectoryEntry(address).VicDirtyIgnore.add(in_msg.Sender);
761      APPEND_TRANSITION_COMMENT(" setting bit to sink VicDirty ");
762    }
763  }
764
765  action(x_decrementAcks, "x", desc="decrement Acks pending") {
766    tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
767    APPEND_TRANSITION_COMMENT(" Acks remaining: ");
768    APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
769  }
770
771  action(o_checkForCompletion, "o", desc="check for ack completion") {
772    if (tbe.NumPendingAcks == 0) {
773      enqueue(triggerQueue_out, TriggerMsg, 1) {
774        out_msg.addr := address;
775        out_msg.Type := TriggerType:AcksComplete;
776      }
777    }
778    APPEND_TRANSITION_COMMENT(" Check: Acks remaining: ");
779    APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
780  }
781
782  action(rv_removeVicDirtyIgnore, "rv", desc="Remove ignored core") {
783    peek(requestNetwork_in, CPURequestMsg) {
784      getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor);
785    }
786  }
787
788  action(al_allocateL3Block, "al", desc="allocate the L3 block on WB") {
789    peek(responseNetwork_in, ResponseMsg) {
790      if (L3CacheMemory.isTagPresent(address)) {
791        CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
792        APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
793        entry.DataBlk := in_msg.DataBlk;
794        entry.LastSender := in_msg.Sender;
795      } else {
796        if (L3CacheMemory.cacheAvail(address) == false) {
797          Addr victim := L3CacheMemory.cacheProbe(address);
798          CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
799                                                 L3CacheMemory.lookup(victim));
800          queueMemoryWrite(machineID, victim, to_memory_controller_latency,
801                           victim_entry.DataBlk);
802          L3CacheMemory.deallocate(victim);
803        }
804        assert(L3CacheMemory.cacheAvail(address));
805        CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
806        APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
807        entry.DataBlk := in_msg.DataBlk;
808
809        entry.LastSender := in_msg.Sender;
810      }
811    }
812  }
813
814  action(alwt_allocateL3BlockOnWT, "alwt", desc="allocate the L3 block on WT") {
815    if ((tbe.wtData || tbe.atomicData) && useL3OnWT) {
816      if (L3CacheMemory.isTagPresent(address)) {
817        CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
818        APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
819        entry.DataBlk := tbe.DataBlk;
820        entry.LastSender := tbe.LastSender;
821      } else {
822        if (L3CacheMemory.cacheAvail(address) == false) {
823          Addr victim := L3CacheMemory.cacheProbe(address);
824          CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
825                                                 L3CacheMemory.lookup(victim));
826          queueMemoryWrite(machineID, victim, to_memory_controller_latency,
827                           victim_entry.DataBlk);
828          L3CacheMemory.deallocate(victim);
829        }
830        assert(L3CacheMemory.cacheAvail(address));
831        CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
832        APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
833        entry.DataBlk := tbe.DataBlk;
834        entry.LastSender := tbe.LastSender;
835      }
836    }
837  }
838
839  action(sf_setForwardReqTime, "sf", desc="...") {
840    tbe.ForwardRequestTime := curCycle();
841  }
842
843  action(dl_deallocateL3, "dl", desc="deallocate the L3 block") {
844    L3CacheMemory.deallocate(address);
845  }
846
847  action(p_popRequestQueue, "p", desc="pop request queue") {
848    requestNetwork_in.dequeue(clockEdge());
849  }
850
851  action(pr_popResponseQueue, "pr", desc="pop response queue") {
852    responseNetwork_in.dequeue(clockEdge());
853  }
854
855  action(pm_popMemQueue, "pm", desc="pop mem queue") {
856    memQueue_in.dequeue(clockEdge());
857  }
858
859  action(pt_popTriggerQueue, "pt", desc="pop trigger queue") {
860    triggerQueue_in.dequeue(clockEdge());
861  }
862
863  action(ptl_popTriggerQueue, "ptl", desc="pop L3 trigger queue") {
864    L3TriggerQueue_in.dequeue(clockEdge());
865  }
866
867  action(pu_popUnblockQueue, "pu", desc="pop unblock queue") {
868    unblockNetwork_in.dequeue(clockEdge());
869  }
870
871  action(zz_recycleRequestQueue, "zz", desc="recycle request queue") {
872    requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
873  }
874
875  action(yy_recycleResponseQueue, "yy", desc="recycle response queue") {
876    responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
877  }
878
879  action(st_stallAndWaitRequest, "st", desc="Stall and wait on the address") {
880    stall_and_wait(requestNetwork_in, address);
881  }
882
883  action(wa_wakeUpDependents, "wa", desc="Wake up any requests waiting for this address") {
884    wakeUpBuffers(address);
885  }
886
887  action(wa_wakeUpAllDependents, "waa", desc="Wake up any requests waiting for this region") {
888    wakeUpAllBuffers();
889  }
890
891  action(z_stall, "z", desc="...") {
892  }
893
894  // TRANSITIONS
895  transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {RdBlkS, RdBlkM, RdBlk, CtoD}) {
896      st_stallAndWaitRequest;
897  }
898
899  // It may be possible to save multiple invalidations here!
900  transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {Atomic, WriteThrough}) {
901      st_stallAndWaitRequest;
902  }
903
904
905  // transitions from U
906  transition(U, {RdBlkS}, BS_PM) {L3TagArrayRead} {
907    t_allocateTBE;
908    l_queueMemRdReq;
909    sc_probeShrCoreData;
910    p_popRequestQueue;
911  }
912
913  transition(U, WriteThrough, BM_PM) {L3TagArrayRead, L3TagArrayWrite} {
914    t_allocateTBE;
915    w_sendResponseWBAck;
916    l_queueMemRdReq;
917    dc_probeInvCoreData;
918    p_popRequestQueue;
919  }
920
921  transition(U, Atomic, BM_PM) {L3TagArrayRead, L3TagArrayWrite} {
922    t_allocateTBE;
923    l_queueMemRdReq;
924    dc_probeInvCoreData;
925    p_popRequestQueue;
926  }
927
928  transition(U, {RdBlkM}, BM_PM) {L3TagArrayRead} {
929    t_allocateTBE;
930    l_queueMemRdReq;
931    dc_probeInvCoreData;
932    p_popRequestQueue;
933  }
934
935  transition(U, RdBlk, B_PM) {L3TagArrayRead}{
936    t_allocateTBE;
937    l_queueMemRdReq;
938    sc_probeShrCoreData;
939    p_popRequestQueue;
940  }
941
942  transition(U, CtoD, BP) {L3TagArrayRead} {
943    t_allocateTBE;
944    ic_probeInvCore;
945    p_popRequestQueue;
946  }
947
948  transition(U, VicDirty, BL) {L3TagArrayRead} {
949    t_allocateTBE;
950    w_sendResponseWBAck;
951    p_popRequestQueue;
952  }
953
954  transition(U, VicClean, BL) {L3TagArrayRead} {
955    t_allocateTBE;
956    w_sendResponseWBAck;
957    p_popRequestQueue;
958  }
959
960  transition(BL, {VicDirty, VicClean}) {
961    zz_recycleRequestQueue;
962  }
963
964  transition(BL, CPUData, U) {L3TagArrayWrite, L3DataArrayWrite} {
965    d_writeDataToMemory;
966    al_allocateL3Block;
967    wa_wakeUpDependents;
968    dt_deallocateTBE;
969    pr_popResponseQueue;
970  }
971
972  transition(BL, StaleWB, U) {L3TagArrayWrite} {
973    dt_deallocateTBE;
974    wa_wakeUpAllDependents;
975    pr_popResponseQueue;
976  }
977
978  transition({B, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm}, {VicDirty, VicClean}) {
979    z_stall;
980  }
981
982  transition({U, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, WBAck) {
983    pm_popMemQueue;
984  }
985
986  transition({U, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, StaleVicDirty) {
987    rv_removeVicDirtyIgnore;
988    w_sendResponseWBAck;
989    p_popRequestQueue;
990  }
991
992  transition({B}, CoreUnblock, U) {
993    wa_wakeUpDependents;
994    pu_popUnblockQueue;
995  }
996
997  transition(B, UnblockWriteThrough, U) {
998    wa_wakeUpDependents;
999    pt_popTriggerQueue;
1000  }
1001
1002  transition(BS_PM, MemData, BS_Pm) {} {
1003    mt_writeMemDataToTBE;
1004    pm_popMemQueue;
1005  }
1006
1007  transition(BM_PM, MemData, BM_Pm){} {
1008    mt_writeMemDataToTBE;
1009    pm_popMemQueue;
1010  }
1011
1012  transition(B_PM, MemData, B_Pm){} {
1013    mt_writeMemDataToTBE;
1014    pm_popMemQueue;
1015  }
1016
1017  transition(BS_PM, L3Hit, BS_Pm) {} {
1018    ptl_popTriggerQueue;
1019  }
1020
1021  transition(BM_PM, L3Hit, BM_Pm) {} {
1022    ptl_popTriggerQueue;
1023  }
1024
1025  transition(B_PM, L3Hit, B_Pm) {} {
1026    ptl_popTriggerQueue;
1027  }
1028
1029  transition(BS_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
1030    mt_writeMemDataToTBE;
1031    s_sendResponseS;
1032    wd_writeBackData;
1033    alwt_allocateL3BlockOnWT;
1034    dt_deallocateTBE;
1035    pm_popMemQueue;
1036  }
1037
1038  transition(BM_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
1039    mt_writeMemDataToTBE;
1040    m_sendResponseM;
1041    wd_writeBackData;
1042    alwt_allocateL3BlockOnWT;
1043    dt_deallocateTBE;
1044    pm_popMemQueue;
1045  }
1046
1047  transition(B_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
1048    mt_writeMemDataToTBE;
1049    es_sendResponseES;
1050    wd_writeBackData;
1051    alwt_allocateL3BlockOnWT;
1052    dt_deallocateTBE;
1053    pm_popMemQueue;
1054  }
1055
1056  transition(BS_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
1057    s_sendResponseS;
1058    wd_writeBackData;
1059    alwt_allocateL3BlockOnWT;
1060    dt_deallocateTBE;
1061    ptl_popTriggerQueue;
1062  }
1063
1064  transition(BM_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} {
1065    m_sendResponseM;
1066    wd_writeBackData;
1067    alwt_allocateL3BlockOnWT;
1068    dt_deallocateTBE;
1069    ptl_popTriggerQueue;
1070  }
1071
1072  transition(B_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} {
1073    es_sendResponseES;
1074    wd_writeBackData;
1075    alwt_allocateL3BlockOnWT;
1076    dt_deallocateTBE;
1077    ptl_popTriggerQueue;
1078  }
1079
1080  transition({BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, BP}, CPUPrbResp) {
1081    y_writeProbeDataToTBE;
1082    x_decrementAcks;
1083    o_checkForCompletion;
1084    pr_popResponseQueue;
1085  }
1086
1087  transition(BS_PM, ProbeAcksComplete, BS_M) {} {
1088    sf_setForwardReqTime;
1089    pt_popTriggerQueue;
1090  }
1091
1092  transition(BM_PM, ProbeAcksComplete, BM_M) {} {
1093    sf_setForwardReqTime;
1094    pt_popTriggerQueue;
1095  }
1096
1097  transition(B_PM, ProbeAcksComplete, B_M){} {
1098    sf_setForwardReqTime;
1099    pt_popTriggerQueue;
1100  }
1101
1102  transition(BS_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
1103    sf_setForwardReqTime;
1104    s_sendResponseS;
1105    wd_writeBackData;
1106    alwt_allocateL3BlockOnWT;
1107    dt_deallocateTBE;
1108    pt_popTriggerQueue;
1109  }
1110
1111  transition(BM_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
1112    sf_setForwardReqTime;
1113    m_sendResponseM;
1114    wd_writeBackData;
1115    alwt_allocateL3BlockOnWT;
1116    dt_deallocateTBE;
1117    pt_popTriggerQueue;
1118  }
1119
1120  transition(B_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
1121    sf_setForwardReqTime;
1122    es_sendResponseES;
1123    wd_writeBackData;
1124    alwt_allocateL3BlockOnWT;
1125    dt_deallocateTBE;
1126    pt_popTriggerQueue;
1127  }
1128
1129  transition(BP, ProbeAcksComplete, B){L3TagArrayWrite, L3TagArrayWrite} {
1130    sf_setForwardReqTime;
1131    c_sendResponseCtoD;
1132    wd_writeBackData;
1133    alwt_allocateL3BlockOnWT;
1134    dt_deallocateTBE;
1135    pt_popTriggerQueue;
1136  }
1137}
1138