1/*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30machine(MachineType:Directory, "Directory protocol") 
31    : DirectoryMemory * directory;
32      Cycles directory_latency := 12;
33      Cycles to_memory_controller_latency := 1;
34
35      MessageBuffer * forwardFromDir, network="To", virtual_network="3",
36            vnet_type="forward";
37      MessageBuffer * responseFromDir, network="To", virtual_network="4",
38            vnet_type="response";
39      MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
40            vnet_type="response";
41
42      MessageBuffer * requestToDir, network="From", virtual_network="2",
43            vnet_type="request";
44      MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
45            vnet_type="request";
46      MessageBuffer * responseFromMemory;
47{
48  // STATES
49  state_declaration(State, desc="Directory states", default="Directory_State_I") {
50    // Base states
51    I, AccessPermission:Read_Write, desc="Invalid";
52    M, AccessPermission:Invalid, desc="Modified";
53
54    M_DRD, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA read";
55    M_DWR, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA write";
56
57    M_DWRI, AccessPermission:Busy, desc="Intermediate state M_DWR-->I"; 
58    M_DRDI, AccessPermission:Busy, desc="Intermediate state M_DRD-->I";
59
60    IM, AccessPermission:Busy, desc="Intermediate state I-->M";
61    MI, AccessPermission:Busy, desc="Intermediate state M-->I";
62    ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
63    ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
64  }
65
66  // Events
67  enumeration(Event, desc="Directory events") {
68    // processor requests
69    GETX, desc="A GETX arrives";
70    GETS, desc="A GETS arrives";
71    PUTX, desc="A PUTX arrives";
72    PUTX_NotOwner, desc="A PUTX arrives";
73
74    // DMA requests
75    DMA_READ, desc="A DMA Read memory request";
76    DMA_WRITE, desc="A DMA Write memory request";
77
78    // Memory Controller
79    Memory_Data, desc="Fetched data from memory arrives";
80    Memory_Ack, desc="Writeback Ack from memory arrives";
81  }
82
83  // TYPES
84
85  // DirectoryEntry
86  structure(Entry, desc="...", interface="AbstractEntry") {
87    State DirectoryState,          desc="Directory state";
88    NetDest Sharers,                   desc="Sharers for this block";
89    NetDest Owner,                     desc="Owner of this block";
90  }
91
92  // TBE entries for DMA requests
93  structure(TBE, desc="TBE entries for outstanding DMA requests") {
94    Addr PhysicalAddress, desc="physical address";
95    State TBEState,        desc="Transient State";
96    DataBlock DataBlk,     desc="Data to be written (DMA write only)";
97    int Len,               desc="...";
98    MachineID DmaRequestor, desc="DMA requestor";
99  }
100
101  structure(TBETable, external="yes") {
102    TBE lookup(Addr);
103    void allocate(Addr);
104    void deallocate(Addr);
105    bool isPresent(Addr);
106  }
107
108  // ** OBJECTS **
109  TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
110
111  Tick clockEdge();
112  Cycles ticksToCycles(Tick t);
113  Tick cyclesToTicks(Cycles c);
114  void set_tbe(TBE b);
115  void unset_tbe();
116
117  Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
118    Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
119
120    if (is_valid(dir_entry)) {
121      return dir_entry;
122    }
123
124    dir_entry :=  static_cast(Entry, "pointer",
125                              directory.allocate(addr, new Entry));
126    return dir_entry;
127  }
128 
129  State getState(TBE tbe, Addr addr) {
130    if (is_valid(tbe)) {
131      return tbe.TBEState;
132    } else if (directory.isPresent(addr)) {
133      return getDirectoryEntry(addr).DirectoryState;
134    } else {
135      return State:I;
136    }
137  }
138
139  void setState(TBE tbe, Addr addr, State state) {
140
141    if (is_valid(tbe)) {
142      tbe.TBEState := state;
143    }
144
145    if (directory.isPresent(addr)) {
146
147      if (state == State:M) {
148        assert(getDirectoryEntry(addr).Owner.count() == 1);
149        assert(getDirectoryEntry(addr).Sharers.count() == 0);
150      }
151
152      getDirectoryEntry(addr).DirectoryState := state;
153    
154      if (state == State:I)  {
155        assert(getDirectoryEntry(addr).Owner.count() == 0);
156        assert(getDirectoryEntry(addr).Sharers.count() == 0);
157      }
158    }
159  }
160
161  AccessPermission getAccessPermission(Addr addr) {
162    TBE tbe := TBEs[addr];
163    if(is_valid(tbe)) {
164      return Directory_State_to_permission(tbe.TBEState);
165    }
166
167    if(directory.isPresent(addr)) {
168      return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
169    }
170
171    return AccessPermission:NotPresent;
172  }
173
174  void setAccessPermission(Addr addr, State state) {
175    if (directory.isPresent(addr)) {
176      getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
177    }
178  }
179
180  void functionalRead(Addr addr, Packet *pkt) {
181    TBE tbe := TBEs[addr];
182    if(is_valid(tbe)) {
183      testAndRead(addr, tbe.DataBlk, pkt);
184    } else {
185      functionalMemoryRead(pkt);
186    }
187  }
188
189  int functionalWrite(Addr addr, Packet *pkt) {
190    int num_functional_writes := 0;
191
192    TBE tbe := TBEs[addr];
193    if(is_valid(tbe)) {
194      num_functional_writes := num_functional_writes +
195            testAndWrite(addr, tbe.DataBlk, pkt);
196    }
197
198    num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
199    return num_functional_writes;
200  }
201
202  // ** OUT_PORTS **
203  out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
204  out_port(responseNetwork_out, ResponseMsg, responseFromDir);
205  out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
206  out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
207
208  // ** IN_PORTS **
209  in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
210    if (dmaRequestQueue_in.isReady(clockEdge())) {
211      peek(dmaRequestQueue_in, DMARequestMsg) {
212        TBE tbe := TBEs[in_msg.LineAddress];
213        if (in_msg.Type == DMARequestType:READ) {
214          trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
215        } else if (in_msg.Type == DMARequestType:WRITE) {
216          trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
217        } else {
218          error("Invalid message");
219        }
220      }
221    }
222  }
223
224  in_port(requestQueue_in, RequestMsg, requestToDir) {
225    if (requestQueue_in.isReady(clockEdge())) {
226      peek(requestQueue_in, RequestMsg) {
227        TBE tbe := TBEs[in_msg.addr];
228        if (in_msg.Type == CoherenceRequestType:GETS) {
229          trigger(Event:GETS, in_msg.addr, tbe);
230        } else if (in_msg.Type == CoherenceRequestType:GETX) {
231          trigger(Event:GETX, in_msg.addr, tbe);
232        } else if (in_msg.Type == CoherenceRequestType:PUTX) {
233          if (getDirectoryEntry(in_msg.addr).Owner.isElement(in_msg.Requestor)) {
234            trigger(Event:PUTX, in_msg.addr, tbe);
235          } else {
236            trigger(Event:PUTX_NotOwner, in_msg.addr, tbe);
237          }
238        } else {
239          error("Invalid message");
240        }
241      }
242    }
243  }
244
245//added by SS
246  // off-chip memory request/response is done
247  in_port(memQueue_in, MemoryMsg, responseFromMemory) {
248    if (memQueue_in.isReady(clockEdge())) {
249      peek(memQueue_in, MemoryMsg) {
250        TBE tbe := TBEs[in_msg.addr];
251        if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
252          trigger(Event:Memory_Data, in_msg.addr, tbe);
253        } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
254          trigger(Event:Memory_Ack, in_msg.addr, tbe);
255        } else {
256          DPRINTF(RubySlicc,"%s\n", in_msg.Type);
257          error("Invalid message");
258        }
259      }
260    }
261  }
262
263  // Actions
264
265  action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
266    peek(requestQueue_in, RequestMsg) {
267      enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
268        out_msg.addr := address;
269        out_msg.Type := CoherenceRequestType:WB_ACK;
270        out_msg.Requestor := in_msg.Requestor;
271        out_msg.Destination.add(in_msg.Requestor);
272        out_msg.MessageSize := MessageSizeType:Writeback_Control;
273      }
274    }
275  }
276
277  action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
278    peek(memQueue_in, MemoryMsg) {
279      enqueue(forwardNetwork_out, RequestMsg, 1) {
280        out_msg.addr := address;
281        out_msg.Type := CoherenceRequestType:WB_ACK;
282        out_msg.Requestor := in_msg.OriginalRequestorMachId;
283        out_msg.Destination.add(in_msg.OriginalRequestorMachId);
284        out_msg.MessageSize := MessageSizeType:Writeback_Control;
285      }
286    }
287  }
288
289  action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
290    peek(requestQueue_in, RequestMsg) {
291      enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
292        out_msg.addr := address;
293        out_msg.Type := CoherenceRequestType:WB_NACK;
294        out_msg.Requestor := in_msg.Requestor;
295        out_msg.Destination.add(in_msg.Requestor);
296        out_msg.MessageSize := MessageSizeType:Writeback_Control;
297      }
298    }
299  }
300
301  action(c_clearOwner, "c", desc="Clear the owner field") {
302    getDirectoryEntry(address).Owner.clear();
303  }
304
305  action(d_sendData, "d", desc="Send data to requestor") {
306    peek(memQueue_in, MemoryMsg) {
307      enqueue(responseNetwork_out, ResponseMsg, 1) {
308        out_msg.addr := address;
309        out_msg.Type := CoherenceResponseType:DATA;
310        out_msg.Sender := machineID;
311        out_msg.Destination.add(in_msg.OriginalRequestorMachId);
312        out_msg.DataBlk := in_msg.DataBlk;
313        out_msg.MessageSize := MessageSizeType:Response_Data;
314      }
315    }
316  }
317
318  action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
319    peek(memQueue_in, MemoryMsg) {
320      enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
321        assert(is_valid(tbe));
322        out_msg.PhysicalAddress := address;
323        out_msg.LineAddress := address;
324        out_msg.Type := DMAResponseType:DATA;
325        out_msg.DataBlk := in_msg.DataBlk;   // we send the entire data block and rely on the dma controller to split it up if need be
326        out_msg.Destination.add(tbe.DmaRequestor);
327        out_msg.MessageSize := MessageSizeType:Response_Data;
328      }
329    }
330  }
331
332
333
334  action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
335    peek(requestQueue_in, RequestMsg) {
336      enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
337        assert(is_valid(tbe));
338        out_msg.PhysicalAddress := address;
339        out_msg.LineAddress := address;
340        out_msg.Type := DMAResponseType:DATA;
341
342        // we send the entire data block and rely on the dma controller
343        // to split it up if need be
344        out_msg.DataBlk := in_msg.DataBlk;
345        out_msg.Destination.add(tbe.DmaRequestor);
346        out_msg.MessageSize := MessageSizeType:Response_Data;
347      }
348    }
349  }
350
351  action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
352      enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
353        assert(is_valid(tbe));
354        out_msg.PhysicalAddress := address;
355        out_msg.LineAddress := address;
356        out_msg.Type := DMAResponseType:ACK;
357        out_msg.Destination.add(tbe.DmaRequestor); 
358        out_msg.MessageSize := MessageSizeType:Writeback_Control;
359      }
360  }
361
362  action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
363    peek(requestQueue_in, RequestMsg) {
364      getDirectoryEntry(address).Owner.clear();
365      getDirectoryEntry(address).Owner.add(in_msg.Requestor);
366    }
367  }
368
369  action(f_forwardRequest, "f", desc="Forward request to owner") {
370    peek(requestQueue_in, RequestMsg) {
371      APPEND_TRANSITION_COMMENT("Own: ");
372      APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.addr).Owner);
373      APPEND_TRANSITION_COMMENT("Req: ");
374      APPEND_TRANSITION_COMMENT(in_msg.Requestor);
375      enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
376        out_msg.addr := address;
377        out_msg.Type := in_msg.Type;
378        out_msg.Requestor := in_msg.Requestor;
379        out_msg.Destination := getDirectoryEntry(in_msg.addr).Owner;
380        out_msg.MessageSize := MessageSizeType:Writeback_Control;
381      }
382    }
383  }
384
385  action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
386    peek(dmaRequestQueue_in, DMARequestMsg) {
387      enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
388        out_msg.addr := address;
389        out_msg.Type := CoherenceRequestType:INV;
390        out_msg.Requestor := machineID;
391        out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
392        out_msg.MessageSize := MessageSizeType:Writeback_Control;
393      }
394    }
395  }
396
397  action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
398    requestQueue_in.dequeue(clockEdge());
399  }
400
401  action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
402    dmaRequestQueue_in.dequeue(clockEdge());
403  }
404  
405  action(v_allocateTBE, "v", desc="Allocate TBE") {
406    peek(dmaRequestQueue_in, DMARequestMsg) {
407      TBEs.allocate(address);
408      set_tbe(TBEs[address]);
409      tbe.DataBlk := in_msg.DataBlk;
410      tbe.PhysicalAddress := in_msg.PhysicalAddress;
411      tbe.Len := in_msg.Len;
412      tbe.DmaRequestor := in_msg.Requestor;
413    }
414  }
415
416  action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
417    peek(dmaRequestQueue_in, DMARequestMsg) {
418      TBEs.allocate(address);
419      set_tbe(TBEs[address]);
420      tbe.DmaRequestor := in_msg.Requestor;
421    }
422  }
423
424  action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
425    peek(requestQueue_in, RequestMsg) {
426      TBEs.allocate(address);
427      set_tbe(TBEs[address]);
428      tbe.DataBlk := in_msg.DataBlk;
429    }
430  }
431
432  action(w_deallocateTBE, "w", desc="Deallocate TBE") {
433    TBEs.deallocate(address);
434    unset_tbe();
435  }
436
437  action(z_recycleRequestQueue, "z", desc="recycle request queue") {
438    requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
439  }
440
441  action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
442    dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
443  }
444
445
446  action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
447    peek(requestQueue_in, RequestMsg) {
448      queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
449    }
450  }
451
452  action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
453    peek(dmaRequestQueue_in, DMARequestMsg) {
454      queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
455    }
456  }
457
458  action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
459    peek(dmaRequestQueue_in, DMARequestMsg) {
460      queueMemoryWritePartial(in_msg.Requestor, address,
461                              to_memory_controller_latency, in_msg.DataBlk,
462                              in_msg.Len);
463    }
464  }
465
466  action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
467    peek(requestQueue_in, RequestMsg) {
468      queueMemoryWritePartial(in_msg.Requestor, address,
469                              to_memory_controller_latency, tbe.DataBlk,
470                              tbe.Len);
471    }
472  }
473
474  action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
475    peek(requestQueue_in, RequestMsg) {
476      queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
477                       in_msg.DataBlk);
478    }
479  }
480
481  action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
482    memQueue_in.dequeue(clockEdge());
483  }
484
485  // TRANSITIONS
486  transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
487    z_recycleRequestQueue;
488  }
489
490  transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
491    z_recycleRequestQueue;
492  }
493 
494  transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
495    y_recycleDMARequestQueue;
496  }
497
498
499  transition(I, GETX, IM) {
500    //d_sendData;
501    v_allocateTBEFromRequestNet;
502    qf_queueMemoryFetchRequest;
503    e_ownerIsRequestor;
504    i_popIncomingRequestQueue;
505  }
506
507  transition(IM, Memory_Data, M) {
508    d_sendData;
509    //e_ownerIsRequestor;
510    w_deallocateTBE;
511    l_popMemQueue;
512  }
513
514
515  transition(I, DMA_READ, ID) {
516    //dr_sendDMAData;
517    r_allocateTbeForDmaRead;
518    qf_queueMemoryFetchRequestDMA;
519    p_popIncomingDMARequestQueue;
520  }
521
522  transition(ID, Memory_Data, I) {
523    dr_sendDMAData;
524    //p_popIncomingDMARequestQueue;
525    w_deallocateTBE;
526    l_popMemQueue;
527  }
528
529
530
531  transition(I, DMA_WRITE, ID_W) {
532    v_allocateTBE;
533    qw_queueMemoryWBRequest_partial;
534    p_popIncomingDMARequestQueue;
535  }
536
537  transition(ID_W, Memory_Ack, I) {
538    da_sendDMAAck;
539    w_deallocateTBE;
540    l_popMemQueue;
541  }
542
543  transition(M, DMA_READ, M_DRD) {
544    v_allocateTBE;
545    inv_sendCacheInvalidate;
546    p_popIncomingDMARequestQueue;
547  }
548
549  transition(M_DRD, PUTX, M_DRDI) {     
550    drp_sendDMAData;
551    c_clearOwner;
552    l_queueMemoryWBRequest;
553    i_popIncomingRequestQueue;
554  }
555
556  transition(M_DRDI, Memory_Ack, I) {
557    l_sendWriteBackAck;
558    w_deallocateTBE;   
559    l_popMemQueue;
560  }
561
562
563  transition(M, DMA_WRITE, M_DWR) {
564    v_allocateTBE;
565    inv_sendCacheInvalidate;
566    p_popIncomingDMARequestQueue;
567  }
568
569  transition(M_DWR, PUTX, M_DWRI) {
570    qw_queueMemoryWBRequest_partialTBE;
571    c_clearOwner;
572    i_popIncomingRequestQueue;
573  }
574
575  transition(M_DWRI, Memory_Ack, I) {
576    l_sendWriteBackAck;
577    da_sendDMAAck;
578    w_deallocateTBE;
579    l_popMemQueue;
580  }
581
582  transition(M, GETX, M) {
583    f_forwardRequest;
584    e_ownerIsRequestor;
585    i_popIncomingRequestQueue;
586  }
587
588  transition(M, PUTX, MI) {
589    c_clearOwner;
590    v_allocateTBEFromRequestNet;
591    l_queueMemoryWBRequest;
592    i_popIncomingRequestQueue;
593  }
594
595  transition(MI, Memory_Ack, I) {
596    l_sendWriteBackAck;
597    w_deallocateTBE;
598    l_popMemQueue;
599  }
600
601  transition(M, PUTX_NotOwner, M) {
602    b_sendWriteBackNack;
603    i_popIncomingRequestQueue;
604  }
605
606  transition(I, PUTX_NotOwner, I) {
607    b_sendWriteBackNack;
608    i_popIncomingRequestQueue;
609  }
610}
611