1/*
2 * Copyright (c) 2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41machine(MachineType:Directory, "Directory protocol")
42:  DirectoryMemory * directory;
43   Cycles directory_latency := 6;
44   Cycles to_memory_controller_latency := 1;
45
46   // Message Queues
47   MessageBuffer * requestToDir, network="From", virtual_network="1",
48        vnet_type="request";  // a mod-L2 bank -> this Dir
49   MessageBuffer * responseToDir, network="From", virtual_network="2",
50        vnet_type="response";  // a mod-L2 bank -> this Dir
51
52   MessageBuffer * forwardFromDir, network="To", virtual_network="1",
53        vnet_type="forward";
54   MessageBuffer * responseFromDir, network="To", virtual_network="2",
55        vnet_type="response";  // Dir -> mod-L2 bank
56
57   MessageBuffer * responseFromMemory;
58{
59  // STATES
60  state_declaration(State, desc="Directory states", default="Directory_State_I") {
61    // Base states
62    I, AccessPermission:Read_Write, desc="Invalid";
63    S, AccessPermission:Read_Only, desc="Shared";
64    O, AccessPermission:Maybe_Stale, desc="Owner";
65    M, AccessPermission:Maybe_Stale, desc="Modified";
66
67    IS, AccessPermission:Busy, desc="Blocked, was in idle";
68    SS, AccessPermission:Read_Only, desc="Blocked, was in shared";
69    OO, AccessPermission:Busy, desc="Blocked, was in owned";
70    MO, AccessPermission:Busy, desc="Blocked, going to owner or maybe modified";
71    MM, AccessPermission:Busy, desc="Blocked, going to modified";
72
73    MI, AccessPermission:Busy, desc="Blocked on a writeback";
74    MIS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
75    OS, AccessPermission:Busy, desc="Blocked on a writeback";
76    OSS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
77
78    XI_M, AccessPermission:Busy, desc="In a stable state, going to I, waiting for the memory controller";
79    XI_U, AccessPermission:Busy, desc="In a stable state, going to I, waiting for an unblock";
80    OI_D, AccessPermission:Busy, desc="In O, going to I, waiting for data";
81
82    OD, AccessPermission:Busy, desc="In O, waiting for dma ack from L2";
83    MD, AccessPermission:Busy, desc="In M, waiting for dma ack from L2";
84  }
85
86  // Events
87  enumeration(Event, desc="Directory events") {
88    GETX, desc="A GETX arrives";
89    GETS, desc="A GETS arrives";
90    PUTX, desc="A PUTX arrives";
91    PUTO, desc="A PUTO arrives";
92    PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
93    Unblock, desc="An unblock message arrives";
94    Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
95    Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
96    Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
97    Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
98    Memory_Data,   desc="Fetched data from memory arrives";
99    Memory_Ack,    desc="Writeback Ack from memory arrives";
100    DMA_READ,      desc="DMA Read";
101    DMA_WRITE,     desc="DMA Write";
102    DMA_ACK,       desc="DMA Ack";
103    Data,          desc="Data to directory";
104  }
105
106  // TYPES
107
108  // DirectoryEntry
109  structure(Entry, desc="...", interface='AbstractEntry') {
110    State DirectoryState,          desc="Directory state";
111    NetDest Sharers,                   desc="Sharers for this block";
112    NetDest Owner,                     desc="Owner of this block";
113    int WaitingUnblocks,           desc="Number of acks we're waiting for";
114  }
115
116  structure(TBE, desc="...") {
117    Addr PhysicalAddress,   desc="Physical address for this entry";
118    int Len,           desc="Length of request";
119    DataBlock DataBlk, desc="DataBlk";
120    MachineID Requestor, desc="original requestor";
121  }
122
123  structure(TBETable, external = "yes") {
124    TBE lookup(Addr);
125    void allocate(Addr);
126    void deallocate(Addr);
127    bool isPresent(Addr);
128  }
129
130  // ** OBJECTS **
131  TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
132
133  Tick clockEdge();
134  Tick cyclesToTicks(Cycles c);
135  void set_tbe(TBE b);
136  void unset_tbe();
137
138  Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
139    Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
140
141    if (is_valid(dir_entry)) {
142      return dir_entry;
143    }
144
145    dir_entry :=  static_cast(Entry, "pointer",
146                              directory.allocate(addr, new Entry));
147    return dir_entry;
148  }
149
150  State getState(TBE tbe, Addr addr) {
151    return getDirectoryEntry(addr).DirectoryState;
152  }
153
154  void setState(TBE tbe, Addr addr, State state) {
155    if (directory.isPresent(addr)) {
156
157      if (state == State:I) {
158        assert(getDirectoryEntry(addr).Owner.count() == 0);
159        assert(getDirectoryEntry(addr).Sharers.count() == 0);
160      }
161
162      if (state == State:S) {
163        assert(getDirectoryEntry(addr).Owner.count() == 0);
164      }
165
166      if (state == State:O) {
167        assert(getDirectoryEntry(addr).Owner.count() == 1);
168        assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
169      }
170
171      if (state == State:M) {
172        assert(getDirectoryEntry(addr).Owner.count() == 1);
173        assert(getDirectoryEntry(addr).Sharers.count() == 0);
174      }
175
176      if ((state != State:SS) && (state != State:OO)) {
177        assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
178      }
179
180      if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
181        getDirectoryEntry(addr).DirectoryState := state;
182         // disable coherence checker
183        // sequencer.checkCoherence(addr);
184      }
185      else {
186        getDirectoryEntry(addr).DirectoryState := state;
187      }
188    }
189  }
190
191  AccessPermission getAccessPermission(Addr addr) {
192    if (directory.isPresent(addr)) {
193      DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
194      return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
195    }
196
197    DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
198    return AccessPermission:NotPresent;
199  }
200
201  void setAccessPermission(Addr addr, State state) {
202    if (directory.isPresent(addr)) {
203      getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
204    }
205  }
206
207  void functionalRead(Addr addr, Packet *pkt) {
208    functionalMemoryRead(pkt);
209  }
210
211  int functionalWrite(Addr addr, Packet *pkt) {
212    int num_functional_writes := 0;
213    num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
214    return num_functional_writes;
215  }
216
217  // if no sharers, then directory can be considered
218  // both a sharer and exclusive w.r.t. coherence checking
219  bool isBlockShared(Addr addr) {
220    if (directory.isPresent(addr)) {
221      if (getDirectoryEntry(addr).DirectoryState == State:I) {
222        return true;
223      }
224    }
225    return false;
226  }
227
228  bool isBlockExclusive(Addr addr) {
229    if (directory.isPresent(addr)) {
230      if (getDirectoryEntry(addr).DirectoryState == State:I) {
231        return true;
232      }
233    }
234    return false;
235  }
236
237  // ** OUT_PORTS **
238  out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
239  out_port(responseNetwork_out, ResponseMsg, responseFromDir);
240
241  // ** IN_PORTS **
242
243  in_port(unblockNetwork_in, ResponseMsg, responseToDir, rank=2) {
244    if (unblockNetwork_in.isReady(clockEdge())) {
245      peek(unblockNetwork_in, ResponseMsg) {
246        if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
247          if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
248            trigger(Event:Last_Unblock, in_msg.addr,
249                    TBEs[in_msg.addr]);
250          } else {
251            trigger(Event:Unblock, in_msg.addr,
252                    TBEs[in_msg.addr]);
253          }
254        } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
255          trigger(Event:Exclusive_Unblock, in_msg.addr,
256                  TBEs[in_msg.addr]);
257        } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
258          trigger(Event:Data, in_msg.addr,
259                  TBEs[in_msg.addr]);
260        } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
261          trigger(Event:DMA_ACK, in_msg.addr,
262                  TBEs[in_msg.addr]);
263        } else {
264          error("Invalid message");
265        }
266      }
267    }
268  }
269
270  in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
271    if (requestQueue_in.isReady(clockEdge())) {
272      peek(requestQueue_in, RequestMsg) {
273        if (in_msg.Type == CoherenceRequestType:GETS) {
274          trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
275        } else if (in_msg.Type == CoherenceRequestType:GETX) {
276          trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
277        } else if (in_msg.Type == CoherenceRequestType:PUTX) {
278          trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
279        } else if (in_msg.Type == CoherenceRequestType:PUTO) {
280          trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
281        } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
282          trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
283        } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_DIRTY_DATA) {
284          trigger(Event:Dirty_Writeback, in_msg.addr,
285                  TBEs[in_msg.addr]);
286        } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_CLEAN_ACK) {
287          trigger(Event:Clean_Writeback, in_msg.addr,
288                  TBEs[in_msg.addr]);
289        } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
290          trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
291                  TBEs[makeLineAddress(in_msg.addr)]);
292        } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
293          trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
294                  TBEs[makeLineAddress(in_msg.addr)]);
295        } else {
296          error("Invalid message");
297        }
298      }
299    }
300  }
301
302  // off-chip memory request/response is done
303  in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=0) {
304    if (memQueue_in.isReady(clockEdge())) {
305      peek(memQueue_in, MemoryMsg) {
306        if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
307          trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
308        } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
309          trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
310        } else {
311          DPRINTF(RubySlicc, "%s\n", in_msg.Type);
312          error("Invalid message");
313        }
314      }
315    }
316  }
317
318  // Actions
319
320  action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
321    peek(requestQueue_in, RequestMsg) {
322      enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
323        out_msg.addr := address;
324        out_msg.Type := CoherenceResponseType:WB_ACK;
325        out_msg.Sender := in_msg.Requestor;
326        out_msg.SenderMachine := MachineType:Directory;
327        out_msg.Destination.add(in_msg.Requestor);
328        out_msg.MessageSize := MessageSizeType:Writeback_Control;
329      }
330    }
331  }
332
333  action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
334    peek(requestQueue_in, RequestMsg) {
335      enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
336        out_msg.addr := address;
337        out_msg.Type := CoherenceResponseType:WB_NACK;
338        out_msg.Sender := in_msg.Requestor;
339        out_msg.SenderMachine := MachineType:Directory;
340        out_msg.Destination.add(in_msg.Requestor);
341        out_msg.MessageSize := MessageSizeType:Writeback_Control;
342      }
343    }
344  }
345
346  action(c_clearOwner, "c", desc="Clear the owner field") {
347    getDirectoryEntry(address).Owner.clear();
348  }
349
350  action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
351    getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
352    getDirectoryEntry(address).Owner.clear();
353  }
354
355  action(cc_clearSharers, "\c", desc="Clear the sharers field") {
356    getDirectoryEntry(address).Sharers.clear();
357  }
358
359  action(d_sendDataMsg, "d", desc="Send data to requestor") {
360    peek(memQueue_in, MemoryMsg) {
361      enqueue(responseNetwork_out, ResponseMsg, 1) {
362        out_msg.addr := address;
363        out_msg.Sender := machineID;
364        out_msg.SenderMachine := MachineType:Directory;
365        out_msg.Destination.add(in_msg.OriginalRequestorMachId);
366        out_msg.DataBlk := in_msg.DataBlk;
367        out_msg.Dirty := false; // By definition, the block is now clean
368        out_msg.Acks := in_msg.Acks;
369        if (in_msg.ReadX) {
370          out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
371        } else {
372          out_msg.Type := CoherenceResponseType:DATA;
373        }
374        out_msg.MessageSize := MessageSizeType:Response_Data;
375      }
376    }
377  }
378
379  action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
380    peek(requestQueue_in, RequestMsg) {
381      enqueue(responseNetwork_out, ResponseMsg, 1) {
382        out_msg.addr := address;
383        out_msg.Sender := machineID;
384        out_msg.SenderMachine := MachineType:Directory;
385        out_msg.Destination.add(in_msg.Requestor);
386        out_msg.Dirty := false; // By definition, the block is now clean
387        out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
388        out_msg.MessageSize := MessageSizeType:Response_Data;
389      }
390    }
391  }
392
393  action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
394    peek(unblockNetwork_in, ResponseMsg) {
395      getDirectoryEntry(address).Owner.clear();
396      getDirectoryEntry(address).Owner.add(in_msg.Sender);
397    }
398  }
399
400  action(f_forwardRequest, "f", desc="Forward request to owner") {
401    peek(requestQueue_in, RequestMsg) {
402      enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
403        out_msg.addr := address;
404        out_msg.Type := in_msg.Type;
405        out_msg.Requestor := in_msg.Requestor;
406        out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
407        out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
408        out_msg.Acks := getDirectoryEntry(address).Sharers.count();
409        if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
410          out_msg.Acks := out_msg.Acks - 1;
411        }
412        out_msg.MessageSize := MessageSizeType:Forwarded_Control;
413      }
414    }
415  }
416
417  action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
418    peek(requestQueue_in, RequestMsg) {
419      enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
420        out_msg.addr := address;
421        out_msg.Type := in_msg.Type;
422        out_msg.Requestor := machineID;
423        out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
424        out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
425        out_msg.Acks := getDirectoryEntry(address).Sharers.count();
426        if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
427          out_msg.Acks := out_msg.Acks - 1;
428        }
429        out_msg.MessageSize := MessageSizeType:Forwarded_Control;
430      }
431    }
432  }
433
434  action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
435    peek(requestQueue_in, RequestMsg) {
436      if ((getDirectoryEntry(in_msg.addr).Sharers.count() > 1) ||
437          ((getDirectoryEntry(in_msg.addr).Sharers.count() > 0) &&
438           (getDirectoryEntry(in_msg.addr).Sharers.isElement(in_msg.Requestor) == false))) {
439        enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
440          out_msg.addr := address;
441          out_msg.Type := CoherenceRequestType:INV;
442          out_msg.Requestor := in_msg.Requestor;
443          out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
444          // out_msg.Destination := getDirectoryEntry(in_msg.addr).Sharers;
445          out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Sharers);
446          out_msg.Destination.remove(in_msg.Requestor);
447          out_msg.MessageSize := MessageSizeType:Invalidate_Control;
448        }
449      }
450    }
451  }
452
453  action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
454    requestQueue_in.dequeue(clockEdge());
455  }
456
457  action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
458    unblockNetwork_in.dequeue(clockEdge());
459  }
460
461  action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
462    peek(unblockNetwork_in, ResponseMsg) {
463      getDirectoryEntry(address).Sharers.add(in_msg.Sender);
464    }
465  }
466
467  action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
468    getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
469  }
470
471  action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
472    getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
473    assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
474  }
475
476  action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
477    memQueue_in.dequeue(clockEdge());
478  }
479
480  action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
481    peek(requestQueue_in, RequestMsg) {
482      queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
483    }
484  }
485
486  action(qw_queueMemoryWBFromCacheRequest, "qw", desc="Queue off-chip writeback request") {
487    peek(requestQueue_in, RequestMsg) {
488      if (is_valid(tbe)) {
489        queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
490                         in_msg.DataBlk);
491      } else {
492        queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
493                         in_msg.DataBlk);
494      }
495    }
496  }
497
498  action(qw_queueMemoryWBRequestFromMessageAndTBE, "qwmt",
499    desc="Queue off-chip writeback request") {
500    peek(unblockNetwork_in, ResponseMsg) {
501      DataBlock DataBlk := in_msg.DataBlk;
502      DataBlk.copyPartial(tbe.DataBlk, getOffset(tbe.PhysicalAddress),
503                          tbe.Len);
504      queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
505                       DataBlk);
506    }
507  }
508
509  action(qw_queueMemoryWBFromDMARequest, "/qw", desc="Queue off-chip writeback request") {
510    peek(requestQueue_in, RequestMsg) {
511      queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
512                       in_msg.DataBlk);
513    }
514  }
515
516  action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
517    requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
518  }
519
520  action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
521    peek(requestQueue_in, RequestMsg) {
522      enqueue(responseNetwork_out, ResponseMsg, 1) {
523      out_msg.addr := address;
524      out_msg.Sender := machineID;
525      out_msg.SenderMachine := MachineType:Directory;
526      out_msg.Destination.add(in_msg.Requestor);
527      out_msg.DataBlk := in_msg.DataBlk;
528      out_msg.Acks := getDirectoryEntry(address).Sharers.count();  // for dma requests
529      out_msg.Type := CoherenceResponseType:DMA_ACK;
530      out_msg.MessageSize := MessageSizeType:Writeback_Control;
531      }
532    }
533  }
534
535  action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
536    peek(unblockNetwork_in, ResponseMsg) {
537      enqueue(responseNetwork_out, ResponseMsg, 1) {
538      out_msg.addr := address;
539      out_msg.Sender := machineID;
540      out_msg.SenderMachine := MachineType:Directory;
541      if (is_valid(tbe)) {
542        out_msg.Destination.add(tbe.Requestor);
543      }
544      out_msg.DataBlk := in_msg.DataBlk;
545      out_msg.Acks := getDirectoryEntry(address).Sharers.count();  // for dma requests
546      out_msg.Type := CoherenceResponseType:DMA_ACK;
547      out_msg.MessageSize := MessageSizeType:Writeback_Control;
548      }
549    }
550  }
551
552  action(v_allocateTBE, "v", desc="Allocate TBE entry") {
553    peek (requestQueue_in, RequestMsg) {
554      TBEs.allocate(address);
555      set_tbe(TBEs[address]);
556      tbe.PhysicalAddress := in_msg.addr;
557      tbe.Len := in_msg.Len;
558      tbe.DataBlk := in_msg.DataBlk;
559      tbe.Requestor := in_msg.Requestor;
560    }
561  }
562
563  action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
564    TBEs.deallocate(address);
565    unset_tbe();
566  }
567
568
569  // TRANSITIONS
570  transition(I, GETX, MM) {
571    qf_queueMemoryFetchRequest;
572    i_popIncomingRequestQueue;
573  }
574
575  transition(I, DMA_READ, XI_M) {
576    qf_queueMemoryFetchRequest;
577    i_popIncomingRequestQueue;
578  }
579
580  transition(I, DMA_WRITE, XI_U) {
581    qw_queueMemoryWBFromDMARequest;
582    a_sendDMAAck;  // ack count may be zero
583    i_popIncomingRequestQueue;
584  }
585
586  transition(XI_M, Memory_Data, I) {
587    d_sendDataMsg;  // ack count may be zero
588    q_popMemQueue;
589  }
590
591  transition(XI_U, Exclusive_Unblock, I) {
592    cc_clearSharers;
593    c_clearOwner;
594    j_popIncomingUnblockQueue;
595  }
596
597  transition(S, GETX, MM) {
598    qf_queueMemoryFetchRequest;
599    g_sendInvalidations;
600    i_popIncomingRequestQueue;
601  }
602
603  transition(S, DMA_READ) {
604    //qf_queueMemoryFetchRequest;
605    p_fwdDataToDMA;
606    //g_sendInvalidations;  // the DMA will collect the invalidations then send an Unblock Exclusive
607    i_popIncomingRequestQueue;
608  }
609
610  transition(S, DMA_WRITE, XI_U) {
611    qw_queueMemoryWBFromDMARequest;
612    a_sendDMAAck;  // ack count may be zero
613    g_sendInvalidations;  // the DMA will collect invalidations
614    i_popIncomingRequestQueue;
615  }
616
617  transition(I, GETS, IS) {
618    qf_queueMemoryFetchRequest;
619    i_popIncomingRequestQueue;
620  }
621
622  transition({S, SS}, GETS, SS) {
623    qf_queueMemoryFetchRequest;
624    n_incrementOutstanding;
625    i_popIncomingRequestQueue;
626  }
627
628  transition({I, S}, PUTO) {
629    b_sendWriteBackNack;
630    i_popIncomingRequestQueue;
631  }
632
633  transition({I, S, O}, PUTX) {
634    b_sendWriteBackNack;
635    i_popIncomingRequestQueue;
636  }
637
638  transition(O, GETX, MM) {
639    f_forwardRequest;
640    g_sendInvalidations;
641    i_popIncomingRequestQueue;
642  }
643
644  transition(O, DMA_READ, OD) {
645    f_forwardRequest;     // this will cause the data to go to DMA directly
646    //g_sendInvalidations;  // this will cause acks to be sent to the DMA
647    i_popIncomingRequestQueue;
648  }
649
650  transition(OD, DMA_ACK, O) {
651    j_popIncomingUnblockQueue;
652  }
653
654  transition({O,M}, DMA_WRITE, OI_D) {
655    f_forwardRequestDirIsRequestor;    // need the modified data before we can proceed
656    g_sendInvalidations;               // these go to the DMA Controller
657    v_allocateTBE;
658    i_popIncomingRequestQueue;
659  }
660
661  transition(OI_D, Data, XI_U) {
662    qw_queueMemoryWBRequestFromMessageAndTBE;
663    a_sendDMAAck2;  // ack count may be zero
664    w_deallocateTBE;
665    j_popIncomingUnblockQueue;
666  }
667
668  transition({O, OO}, GETS, OO) {
669    f_forwardRequest;
670    n_incrementOutstanding;
671    i_popIncomingRequestQueue;
672  }
673
674  transition(M, GETX, MM) {
675    f_forwardRequest;
676    i_popIncomingRequestQueue;
677  }
678
679  // no exclusive unblock will show up to the directory
680  transition(M, DMA_READ, MD) {
681    f_forwardRequest;     // this will cause the data to go to DMA directly
682    i_popIncomingRequestQueue;
683  }
684
685  transition(MD, DMA_ACK, M) {
686    j_popIncomingUnblockQueue;
687  }
688
689  transition(M, GETS, MO) {
690    f_forwardRequest;
691    i_popIncomingRequestQueue;
692  }
693
694  transition(M, PUTX, MI) {
695    a_sendWriteBackAck;
696    i_popIncomingRequestQueue;
697  }
698
699  // happens if M->O transition happens on-chip
700  transition(M, PUTO, MI) {
701    a_sendWriteBackAck;
702    i_popIncomingRequestQueue;
703  }
704
705  transition(M, PUTO_SHARERS, MIS) {
706    a_sendWriteBackAck;
707    i_popIncomingRequestQueue;
708  }
709
710  transition(O, PUTO, OS) {
711    a_sendWriteBackAck;
712    i_popIncomingRequestQueue;
713  }
714
715  transition(O, PUTO_SHARERS, OSS) {
716    a_sendWriteBackAck;
717    i_popIncomingRequestQueue;
718  }
719
720
721  transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D, OD, MD}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
722    zz_recycleRequest;
723  }
724
725  transition({MM, MO}, Exclusive_Unblock, M) {
726    cc_clearSharers;
727    e_ownerIsUnblocker;
728    j_popIncomingUnblockQueue;
729  }
730
731  transition(MO, Unblock, O) {
732    m_addUnlockerToSharers;
733    j_popIncomingUnblockQueue;
734  }
735
736  transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
737    zz_recycleRequest;
738  }
739
740  transition(IS, GETS) {
741    zz_recycleRequest;
742  }
743
744  transition(IS, Unblock, S) {
745    m_addUnlockerToSharers;
746    j_popIncomingUnblockQueue;
747  }
748
749  transition(IS, Exclusive_Unblock, M) {
750    cc_clearSharers;
751    e_ownerIsUnblocker;
752    j_popIncomingUnblockQueue;
753  }
754
755  transition(SS, Unblock) {
756    m_addUnlockerToSharers;
757    o_decrementOutstanding;
758    j_popIncomingUnblockQueue;
759  }
760
761  transition(SS, Last_Unblock, S) {
762    m_addUnlockerToSharers;
763    o_decrementOutstanding;
764    j_popIncomingUnblockQueue;
765  }
766
767  transition(OO, Unblock) {
768    m_addUnlockerToSharers;
769    o_decrementOutstanding;
770    j_popIncomingUnblockQueue;
771  }
772
773  transition(OO, Last_Unblock, O) {
774    m_addUnlockerToSharers;
775    o_decrementOutstanding;
776    j_popIncomingUnblockQueue;
777  }
778
779  transition(MI, Dirty_Writeback, I) {
780    c_clearOwner;
781    cc_clearSharers;
782    qw_queueMemoryWBFromCacheRequest;
783    i_popIncomingRequestQueue;
784  }
785
786  transition(MIS, Dirty_Writeback, S) {
787    c_moveOwnerToSharer;
788    qw_queueMemoryWBFromCacheRequest;
789    i_popIncomingRequestQueue;
790  }
791
792  transition(MIS, Clean_Writeback, S) {
793    c_moveOwnerToSharer;
794    i_popIncomingRequestQueue;
795  }
796
797  transition(OS, Dirty_Writeback, S) {
798    c_clearOwner;
799    qw_queueMemoryWBFromCacheRequest;
800    i_popIncomingRequestQueue;
801  }
802
803  transition(OSS, Dirty_Writeback, S) {
804    c_moveOwnerToSharer;
805    qw_queueMemoryWBFromCacheRequest;
806    i_popIncomingRequestQueue;
807  }
808
809  transition(OSS, Clean_Writeback, S) {
810    c_moveOwnerToSharer;
811    i_popIncomingRequestQueue;
812  }
813
814  transition(MI, Clean_Writeback, I) {
815    c_clearOwner;
816    cc_clearSharers;
817    i_popIncomingRequestQueue;
818  }
819
820  transition(OS, Clean_Writeback, S) {
821    c_clearOwner;
822    i_popIncomingRequestQueue;
823  }
824
825  transition({MI, MIS}, Unblock, M) {
826    j_popIncomingUnblockQueue;
827  }
828
829  transition({OS, OSS}, Unblock, O) {
830    j_popIncomingUnblockQueue;
831  }
832
833  transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
834    d_sendDataMsg;
835    q_popMemQueue;
836  }
837
838  transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {
839    //a_sendAck;
840    q_popMemQueue;
841  }
842
843}
844