MESI_Two_Level-dir.sm revision 14184
1/*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29machine(MachineType:Directory, "MESI Two Level directory protocol")
30 : DirectoryMemory * directory;
31   Cycles to_mem_ctrl_latency := 1;
32   Cycles directory_latency := 6;
33
34   MessageBuffer * requestToDir, network="From", virtual_network="0",
35        vnet_type="request";
36   MessageBuffer * responseToDir, network="From", virtual_network="1",
37        vnet_type="response";
38   MessageBuffer * responseFromDir, network="To", virtual_network="1",
39        vnet_type="response";
40
41   MessageBuffer * responseFromMemory;
42{
43  // STATES
44  state_declaration(State, desc="Directory states", default="Directory_State_I") {
45    // Base states
46    I, AccessPermission:Read_Write, desc="dir is the owner and memory is up-to-date, all other copies are Invalid";
47    ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
48    ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
49
50    M, AccessPermission:Maybe_Stale, desc="memory copy may be stale, i.e. other modified copies may exist";
51    IM, AccessPermission:Busy, desc="Intermediate State I>M";
52    MI, AccessPermission:Busy, desc="Intermediate State M>I";
53    M_DRD, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
54    M_DRDI, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
55    M_DWR, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
56    M_DWRI, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
57  }
58
59  // Events
60  enumeration(Event, desc="Directory events") {
61    Fetch, desc="A memory fetch arrives";
62    Data, desc="writeback data arrives";
63    Memory_Data, desc="Fetched data from memory arrives";
64    Memory_Ack, desc="Writeback Ack from memory arrives";
65//added by SS for dma
66    DMA_READ, desc="A DMA Read memory request";
67    DMA_WRITE, desc="A DMA Write memory request";
68    CleanReplacement, desc="Clean Replacement in L2 cache";
69
70  }
71
72  // TYPES
73
74  // DirectoryEntry
75  structure(Entry, desc="...", interface="AbstractEntry") {
76    State DirectoryState,          desc="Directory state";
77    MachineID Owner;
78  }
79
80  // TBE entries for DMA requests
81  structure(TBE, desc="TBE entries for outstanding DMA requests") {
82    Addr PhysicalAddress, desc="physical address";
83    State TBEState,        desc="Transient State";
84    DataBlock DataBlk,     desc="Data to be written (DMA write only)";
85    int Len,               desc="...";
86    MachineID Requestor,   desc="The DMA engine that sent the request";
87  }
88
89  structure(TBETable, external="yes") {
90    TBE lookup(Addr);
91    void allocate(Addr);
92    void deallocate(Addr);
93    bool isPresent(Addr);
94    bool functionalRead(Packet *pkt);
95    int functionalWrite(Packet *pkt);
96  }
97
98
99  // ** OBJECTS **
100  TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
101
102  Tick clockEdge();
103  Tick cyclesToTicks(Cycles c);
104  void set_tbe(TBE tbe);
105  void unset_tbe();
106  void wakeUpBuffers(Addr a);
107
108  Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
109    Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
110
111    if (is_valid(dir_entry)) {
112      return dir_entry;
113    }
114
115    dir_entry :=  static_cast(Entry, "pointer",
116                              directory.allocate(addr, new Entry));
117    return dir_entry;
118  }
119
120  State getState(TBE tbe, Addr addr) {
121    if (is_valid(tbe)) {
122      return tbe.TBEState;
123    } else if (directory.isPresent(addr)) {
124      return getDirectoryEntry(addr).DirectoryState;
125    } else {
126      return State:I;
127    }
128  }
129
130  void setState(TBE tbe, Addr addr, State state) {
131    if (is_valid(tbe)) {
132      tbe.TBEState := state;
133    }
134
135    if (directory.isPresent(addr)) {
136      getDirectoryEntry(addr).DirectoryState := state;
137    }
138  }
139
140  AccessPermission getAccessPermission(Addr addr) {
141    TBE tbe := TBEs[addr];
142    if(is_valid(tbe)) {
143      DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
144      return Directory_State_to_permission(tbe.TBEState);
145    }
146
147    if(directory.isPresent(addr)) {
148      DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
149      return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
150    }
151
152    DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
153    return AccessPermission:NotPresent;
154  }
155
156  void functionalRead(Addr addr, Packet *pkt) {
157    TBE tbe := TBEs[addr];
158    if(is_valid(tbe)) {
159      testAndRead(addr, tbe.DataBlk, pkt);
160    } else {
161      functionalMemoryRead(pkt);
162    }
163  }
164
165  int functionalWrite(Addr addr, Packet *pkt) {
166    int num_functional_writes := 0;
167
168    TBE tbe := TBEs[addr];
169    if(is_valid(tbe)) {
170      num_functional_writes := num_functional_writes +
171        testAndWrite(addr, tbe.DataBlk, pkt);
172    }
173
174    num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
175    return num_functional_writes;
176  }
177
178  void setAccessPermission(Addr addr, State state) {
179    if (directory.isPresent(addr)) {
180      getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
181    }
182  }
183
184  bool isGETRequest(CoherenceRequestType type) {
185    return (type == CoherenceRequestType:GETS) ||
186      (type == CoherenceRequestType:GET_INSTR) ||
187      (type == CoherenceRequestType:GETX);
188  }
189
190  // ** OUT_PORTS **
191  out_port(responseNetwork_out, ResponseMsg, responseFromDir);
192
193  // ** IN_PORTS **
194
195  in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
196    if (requestNetwork_in.isReady(clockEdge())) {
197      peek(requestNetwork_in, RequestMsg) {
198        assert(in_msg.Destination.isElement(machineID));
199        if (isGETRequest(in_msg.Type)) {
200          trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]);
201        } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
202          trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
203                  TBEs[makeLineAddress(in_msg.addr)]);
204        } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
205          trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
206                  TBEs[makeLineAddress(in_msg.addr)]);
207        } else {
208          DPRINTF(RubySlicc, "%s\n", in_msg);
209          error("Invalid message");
210        }
211      }
212    }
213  }
214
215  in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
216    if (responseNetwork_in.isReady(clockEdge())) {
217      peek(responseNetwork_in, ResponseMsg) {
218        assert(in_msg.Destination.isElement(machineID));
219        if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
220          trigger(Event:Data, in_msg.addr, TBEs[in_msg.addr]);
221        } else if (in_msg.Type == CoherenceResponseType:ACK) {
222          trigger(Event:CleanReplacement, in_msg.addr, TBEs[in_msg.addr]);
223        } else {
224          DPRINTF(RubySlicc, "%s\n", in_msg.Type);
225          error("Invalid message");
226        }
227      }
228    }
229  }
230
231  // off-chip memory request/response is done
232  in_port(memQueue_in, MemoryMsg, responseFromMemory, rank = 2) {
233    if (memQueue_in.isReady(clockEdge())) {
234      peek(memQueue_in, MemoryMsg) {
235        if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
236          trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
237        } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
238          trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
239        } else {
240          DPRINTF(RubySlicc, "%s\n", in_msg.Type);
241          error("Invalid message");
242        }
243      }
244    }
245  }
246
247
248  // Actions
249  action(a_sendAck, "a", desc="Send ack to L2") {
250    peek(responseNetwork_in, ResponseMsg) {
251      enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
252        out_msg.addr := address;
253        out_msg.Type := CoherenceResponseType:MEMORY_ACK;
254        out_msg.Sender := machineID;
255        out_msg.Destination.add(in_msg.Sender);
256        out_msg.MessageSize := MessageSizeType:Response_Control;
257      }
258    }
259  }
260
261  action(d_sendData, "d", desc="Send data to requestor") {
262    peek(memQueue_in, MemoryMsg) {
263      enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
264        out_msg.addr := address;
265        out_msg.Type := CoherenceResponseType:MEMORY_DATA;
266        out_msg.Sender := machineID;
267        out_msg.Destination.add(in_msg.OriginalRequestorMachId);
268        out_msg.DataBlk := in_msg.DataBlk;
269        out_msg.Dirty := false;
270        out_msg.MessageSize := MessageSizeType:Response_Data;
271
272        Entry e := getDirectoryEntry(in_msg.addr);
273        e.Owner := in_msg.OriginalRequestorMachId;
274      }
275    }
276  }
277
278  // Actions
279  action(aa_sendAck, "aa", desc="Send ack to L2") {
280    peek(memQueue_in, MemoryMsg) {
281      enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
282        out_msg.addr := address;
283        out_msg.Type := CoherenceResponseType:MEMORY_ACK;
284        out_msg.Sender := machineID;
285        out_msg.Destination.add(in_msg.OriginalRequestorMachId);
286        out_msg.MessageSize := MessageSizeType:Response_Control;
287      }
288    }
289  }
290
291  action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
292    requestNetwork_in.dequeue(clockEdge());
293  }
294
295  action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
296    responseNetwork_in.dequeue(clockEdge());
297  }
298
299  action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
300    memQueue_in.dequeue(clockEdge());
301  }
302
303  action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
304    wakeUpBuffers(address);
305  }
306
307  action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
308    peek(requestNetwork_in, RequestMsg) {
309      queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
310    }
311  }
312
313  action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
314    peek(responseNetwork_in, ResponseMsg) {
315      queueMemoryWrite(in_msg.Sender, address, to_mem_ctrl_latency,
316                       in_msg.DataBlk);
317    }
318  }
319
320//added by SS for dma
321  action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
322    peek(requestNetwork_in, RequestMsg) {
323      queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
324    }
325  }
326
327  action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
328    requestNetwork_in.dequeue(clockEdge());
329  }
330
331  action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
332    peek(memQueue_in, MemoryMsg) {
333      enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
334        assert(is_valid(tbe));
335        out_msg.addr := address;
336        out_msg.Type := CoherenceResponseType:DATA;
337        out_msg.DataBlk := in_msg.DataBlk;   // we send the entire data block and rely on the dma controller to split it up if need be
338        out_msg.Destination.add(tbe.Requestor);
339        out_msg.MessageSize := MessageSizeType:Response_Data;
340      }
341    }
342  }
343
344  action(qw_queueMemoryWBRequest_partial, "qwp",
345         desc="Queue off-chip writeback request") {
346    peek(requestNetwork_in, RequestMsg) {
347      queueMemoryWritePartial(machineID, address, to_mem_ctrl_latency,
348                              in_msg.DataBlk, in_msg.Len);
349    }
350  }
351
352  action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
353      enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
354        assert(is_valid(tbe));
355        out_msg.addr := address;
356        out_msg.Type := CoherenceResponseType:ACK;
357        out_msg.Destination.add(tbe.Requestor);
358        out_msg.MessageSize := MessageSizeType:Writeback_Control;
359      }
360  }
361
362  action(z_stallAndWaitRequest, "z", desc="recycle request queue") {
363    stall_and_wait(requestNetwork_in, address);
364  }
365
366  action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
367    requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
368  }
369
370  action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
371    peek(requestNetwork_in, RequestMsg) {
372      enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
373        out_msg.addr := address;
374        out_msg.Type := CoherenceResponseType:INV;
375        out_msg.Sender := machineID;
376        out_msg.Destination.add(getDirectoryEntry(address).Owner);
377        out_msg.MessageSize := MessageSizeType:Response_Control;
378      }
379    }
380  }
381
382
383  action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
384    peek(responseNetwork_in, ResponseMsg) {
385      enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
386        assert(is_valid(tbe));
387        out_msg.addr := address;
388        out_msg.Type := CoherenceResponseType:DATA;
389        out_msg.DataBlk := in_msg.DataBlk;   // we send the entire data block and rely on the dma controller to split it up if need be
390        out_msg.Destination.add(tbe.Requestor);
391        out_msg.MessageSize := MessageSizeType:Response_Data;
392      }
393    }
394  }
395
396  action(v_allocateTBE, "v", desc="Allocate TBE") {
397    peek(requestNetwork_in, RequestMsg) {
398      TBEs.allocate(address);
399      set_tbe(TBEs[address]);
400      tbe.DataBlk := in_msg.DataBlk;
401      tbe.PhysicalAddress := in_msg.addr;
402      tbe.Len := in_msg.Len;
403      tbe.Requestor := in_msg.Requestor;
404    }
405  }
406
407  action(qw_queueMemoryWBRequest_partialTBE, "qwt",
408         desc="Queue off-chip writeback request") {
409    peek(responseNetwork_in, ResponseMsg) {
410      queueMemoryWritePartial(in_msg.Sender, tbe.PhysicalAddress,
411                              to_mem_ctrl_latency, tbe.DataBlk, tbe.Len);
412    }
413  }
414
415  action(w_deallocateTBE, "w", desc="Deallocate TBE") {
416    TBEs.deallocate(address);
417    unset_tbe();
418  }
419
420
421  // TRANSITIONS
422
423  transition(I, Fetch, IM) {
424    qf_queueMemoryFetchRequest;
425    j_popIncomingRequestQueue;
426  }
427
428  transition(M, Fetch) {
429    inv_sendCacheInvalidate;
430    z_stallAndWaitRequest;
431  }
432
433  transition(IM, Memory_Data, M) {
434    d_sendData;
435    l_popMemQueue;
436    kd_wakeUpDependents;
437  }
438//added by SS
439  transition(M, CleanReplacement, I) {
440    a_sendAck;
441    k_popIncomingResponseQueue;
442    kd_wakeUpDependents;
443  }
444
445  transition(M, Data, MI) {
446    qw_queueMemoryWBRequest;
447    k_popIncomingResponseQueue;
448  }
449
450  transition(MI, Memory_Ack, I) {
451    aa_sendAck;
452    l_popMemQueue;
453    kd_wakeUpDependents;
454  }
455
456
457//added by SS for dma support
458  transition(I, DMA_READ, ID) {
459    v_allocateTBE;
460    qf_queueMemoryFetchRequestDMA;
461    j_popIncomingRequestQueue;
462  }
463
464  transition(ID, Memory_Data, I) {
465    dr_sendDMAData;
466    w_deallocateTBE;
467    l_popMemQueue;
468    kd_wakeUpDependents;
469  }
470
471  transition(I, DMA_WRITE, ID_W) {
472    v_allocateTBE;
473    qw_queueMemoryWBRequest_partial;
474    j_popIncomingRequestQueue;
475  }
476
477  transition(ID_W, Memory_Ack, I) {
478    da_sendDMAAck;
479    w_deallocateTBE;
480    l_popMemQueue;
481    kd_wakeUpDependents;
482  }
483
484  transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) {
485    z_stallAndWaitRequest;
486  }
487
488  transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) {
489    zz_recycleDMAQueue;
490  }
491
492
493  transition(M, DMA_READ, M_DRD) {
494    v_allocateTBE;
495    inv_sendCacheInvalidate;
496    j_popIncomingRequestQueue;
497  }
498
499  transition(M_DRD, Data, M_DRDI) {
500    drp_sendDMAData;
501    w_deallocateTBE;
502    qw_queueMemoryWBRequest;
503    k_popIncomingResponseQueue;
504  }
505
506  transition(M_DRDI, Memory_Ack, I) {
507    aa_sendAck;
508    l_popMemQueue;
509    kd_wakeUpDependents;
510  }
511
512  transition(M, DMA_WRITE, M_DWR) {
513    v_allocateTBE;
514    inv_sendCacheInvalidate;
515    j_popIncomingRequestQueue;
516  }
517
518  transition(M_DWR, Data, M_DWRI) {
519    qw_queueMemoryWBRequest_partialTBE;
520    k_popIncomingResponseQueue;
521  }
522
523  transition(M_DWRI, Memory_Ack, I) {
524    aa_sendAck;
525    da_sendDMAAck;
526    w_deallocateTBE;
527    l_popMemQueue;
528    kd_wakeUpDependents;
529  }
530}
531