1/*
2 * Copyright (c) 2017 Jason Lowe-Power
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/**
30 * This file contains the directory controller of a simple example MSI protocol
31 *
32 * In Ruby the directory controller both contains the directory coherence state
33 * but also functions as the memory controller in many ways. There are states
34 * in the directory that are both memory-centric and cache-centric. Be careful!
35 *
36 * The protocol in this file is based off of the MSI protocol found in
37 * A Primer on Memory Consistency and Cache Coherence
38 *      Daniel J. Sorin, Mark D. Hill, and David A. Wood
39 *      Synthesis Lectures on Computer Architecture 2011 6:3, 141-149
40 *
41 * Table 8.2 contains the transitions and actions found in this file and
42 * section 8.2.4 explains the protocol in detail.
43 *
44 * See Learning gem5 Part 3: Ruby for more details.
45 *
46 * Authors: Jason Lowe-Power
47 */
48
49machine(MachineType:Directory, "Directory protocol")
50    :
51      // This "DirectoryMemory" is a little weird. It is initially allocated
52      // so that it *can* cover all of memory (i.e., there are pointers for
53      // every 64-byte block in memory). However, the entries are lazily
54      // created in getDirEntry()
55      DirectoryMemory * directory;
56      // You can put any parameters you want here. They will be exported as
57      // normal SimObject parameters (like in the SimObject description file)
58      // and you can set these parameters at runtime via the python config
59      // file. If there is no default here (like directory), it is mandatory
60      // to set the parameter in the python config. Otherwise, it uses the
61      // default value set here.
62      Cycles toMemLatency := 1;
63
64    // Forwarding requests from the directory *to* the caches.
65    MessageBuffer *forwardToCache, network="To", virtual_network="1",
66          vnet_type="forward";
67    // Response from the directory *to* the cache.
68    MessageBuffer *responseToCache, network="To", virtual_network="2",
69          vnet_type="response";
70
71    // Requests *from* the cache to the directory
72    MessageBuffer *requestFromCache, network="From", virtual_network="0",
73          vnet_type="request";
74
75    // Responses *from* the cache to the directory
76    MessageBuffer *responseFromCache, network="From", virtual_network="2",
77          vnet_type="response";
78
79    // Special buffer for memory responses. Kind of like the mandatory queue
80    MessageBuffer *responseFromMemory;
81
82{
83    // For many things in SLICC you can specify a default. However, this
84    // default must use the C++ name (mangled SLICC name). For the state below
85    // you have to use the controller name and the name we use for states.
86    state_declaration(State, desc="Directory states",
87                      default="Directory_State_I") {
88        // Stable states.
89        // NOTE: Thise are "cache-centric" states like in Sorin et al.
90        // However, The access permissions are memory-centric.
91        I, AccessPermission:Read_Write,  desc="Invalid in the caches.";
92        S, AccessPermission:Read_Only,   desc="At least one cache has the blk";
93        M, AccessPermission:Invalid,     desc="A cache has the block in M";
94
95        // Transient states
96        S_D, AccessPermission:Busy,      desc="Moving to S, but need data";
97
98        // Waiting for data from memory
99        S_m, AccessPermission:Read_Write, desc="In S waiting for mem";
100        M_m, AccessPermission:Read_Write, desc="Moving to M waiting for mem";
101
102        // Waiting for write-ack from memory
103        MI_m, AccessPermission:Busy,       desc="Moving to I waiting for ack";
104        SS_m, AccessPermission:Busy,       desc="Moving to S waiting for ack";
105    }
106
107    enumeration(Event, desc="Directory events") {
108        // Data requests from the cache
109        GetS,         desc="Request for read-only data from cache";
110        GetM,         desc="Request for read-write data from cache";
111
112        // Writeback requests from the cache
113        PutSNotLast,  desc="PutS and the block has other sharers";
114        PutSLast,     desc="PutS and the block has no other sharers";
115        PutMOwner,    desc="Dirty data writeback from the owner";
116        PutMNonOwner, desc="Dirty data writeback from non-owner";
117
118        // Cache responses
119        Data,         desc="Response to fwd request with data";
120
121        // From Memory
122        MemData,      desc="Data from memory";
123        MemAck,       desc="Ack from memory that write is complete";
124    }
125
126    // NOTE: We use a netdest for the sharers and the owner so we can simply
127    // copy the structure into the message we send as a response.
128    structure(Entry, desc="...", interface="AbstractEntry") {
129        State DirState,         desc="Directory state";
130        NetDest Sharers,        desc="Sharers for this block";
131        NetDest Owner,          desc="Owner of this block";
132    }
133
134    Tick clockEdge();
135
136    // This either returns the valid directory entry, or, if it hasn't been
137    // allocated yet, this allocates the entry. This may save some host memory
138    // since this is lazily populated.
139    Entry getDirectoryEntry(Addr addr), return_by_pointer = "yes" {
140        Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
141        if (is_invalid(dir_entry)) {
142            // This first time we see this address allocate an entry for it.
143            dir_entry := static_cast(Entry, "pointer",
144                                     directory.allocate(addr, new Entry));
145        }
146        return dir_entry;
147    }
148
149    /*************************************************************************/
150    // Functions that we need to define/override to use our specific structures
151    // in this implementation.
152    // NOTE: we don't have TBE in this machine, so we don't need to pass it
153    // to these overridden functions.
154
155    State getState(Addr addr) {
156        if (directory.isPresent(addr)) {
157            return getDirectoryEntry(addr).DirState;
158        } else {
159            return State:I;
160        }
161    }
162
163    void setState(Addr addr, State state) {
164        if (directory.isPresent(addr)) {
165            if (state == State:M) {
166                DPRINTF(RubySlicc, "Owner %s\n", getDirectoryEntry(addr).Owner);
167                assert(getDirectoryEntry(addr).Owner.count() == 1);
168                assert(getDirectoryEntry(addr).Sharers.count() == 0);
169            }
170            getDirectoryEntry(addr).DirState := state;
171            if (state == State:I)  {
172                assert(getDirectoryEntry(addr).Owner.count() == 0);
173                assert(getDirectoryEntry(addr).Sharers.count() == 0);
174            }
175        }
176    }
177
178    // This is really the access permissions of memory.
179    // TODO: I don't understand this at the directory.
180    AccessPermission getAccessPermission(Addr addr) {
181        if (directory.isPresent(addr)) {
182            Entry e := getDirectoryEntry(addr);
183            return Directory_State_to_permission(e.DirState);
184        } else  {
185            return AccessPermission:NotPresent;
186        }
187    }
188    void setAccessPermission(Addr addr, State state) {
189        if (directory.isPresent(addr)) {
190            Entry e := getDirectoryEntry(addr);
191            e.changePermission(Directory_State_to_permission(state));
192        }
193    }
194
195    void functionalRead(Addr addr, Packet *pkt) {
196        functionalMemoryRead(pkt);
197    }
198
199    // This returns the number of writes. So, if we write then return 1
200    int functionalWrite(Addr addr, Packet *pkt) {
201        if (functionalMemoryWrite(pkt)) {
202            return 1;
203        } else {
204            return 0;
205        }
206    }
207
208
209    /*************************************************************************/
210    // Network ports
211
212    out_port(forward_out, RequestMsg, forwardToCache);
213    out_port(response_out, ResponseMsg, responseToCache);
214
215    in_port(memQueue_in, MemoryMsg, responseFromMemory) {
216        if (memQueue_in.isReady(clockEdge())) {
217            peek(memQueue_in, MemoryMsg) {
218                if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
219                    trigger(Event:MemData, in_msg.addr);
220                } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
221                    trigger(Event:MemAck, in_msg.addr);
222                } else {
223                    error("Invalid message");
224                }
225            }
226        }
227    }
228
229    in_port(response_in, ResponseMsg, responseFromCache) {
230        if (response_in.isReady(clockEdge())) {
231            peek(response_in, ResponseMsg) {
232                if (in_msg.Type == CoherenceResponseType:Data) {
233                    trigger(Event:Data, in_msg.addr);
234                } else {
235                    error("Unexpected message type.");
236                }
237            }
238        }
239    }
240
241    in_port(request_in, RequestMsg, requestFromCache) {
242        if (request_in.isReady(clockEdge())) {
243            peek(request_in, RequestMsg) {
244                Entry entry := getDirectoryEntry(in_msg.addr);
245                if (in_msg.Type == CoherenceRequestType:GetS) {
246                    // NOTE: Since we don't have a TBE in this machine, there
247                    // is no need to pass a TBE into trigger. Also, for the
248                    // directory there is no cache entry.
249                    trigger(Event:GetS, in_msg.addr);
250                } else if (in_msg.Type == CoherenceRequestType:GetM) {
251                    trigger(Event:GetM, in_msg.addr);
252                } else if (in_msg.Type == CoherenceRequestType:PutS) {
253                    assert(is_valid(entry));
254                    // If there is only a single sharer (i.e., the requestor)
255                    if (entry.Sharers.count() == 1) {
256                        assert(entry.Sharers.isElement(in_msg.Requestor));
257                        trigger(Event:PutSLast, in_msg.addr);
258                    } else {
259                        trigger(Event:PutSNotLast, in_msg.addr);
260                    }
261                } else if (in_msg.Type == CoherenceRequestType:PutM) {
262                    assert(is_valid(entry));
263                    if (entry.Owner.isElement(in_msg.Requestor)) {
264                        trigger(Event:PutMOwner, in_msg.addr);
265                    } else {
266                        trigger(Event:PutMNonOwner, in_msg.addr);
267                    }
268                } else {
269                    error("Unexpected message type.");
270                }
271            }
272        }
273    }
274
275
276
277    /*************************************************************************/
278    // Actions
279
280    // Memory actions.
281
282    action(sendMemRead, "r", desc="Send a memory read request") {
283        peek(request_in, RequestMsg) {
284            // Special function from AbstractController that will send a new
285            // packet out of the "Ruby" black box to the memory side. At some
286            // point the response will be on the memory queue.
287            // Like enqeue, this takes a latency for the request.
288            queueMemoryRead(in_msg.Requestor, address, toMemLatency);
289        }
290    }
291
292    action(sendDataToMem, "w", desc="Write data to memory") {
293        peek(request_in, RequestMsg) {
294            DPRINTF(RubySlicc, "Writing memory for %#x\n", address);
295            DPRINTF(RubySlicc, "Writing %s\n", in_msg.DataBlk);
296            queueMemoryWrite(in_msg.Requestor, address, toMemLatency,
297                             in_msg.DataBlk);
298        }
299    }
300
301    action(sendRespDataToMem, "rw", desc="Write data to memory from resp") {
302        peek(response_in, ResponseMsg) {
303            DPRINTF(RubySlicc, "Writing memory for %#x\n", address);
304            DPRINTF(RubySlicc, "Writing %s\n", in_msg.DataBlk);
305            queueMemoryWrite(in_msg.Sender, address, toMemLatency,
306                             in_msg.DataBlk);
307        }
308    }
309
310    // Sharer/owner actions
311
312    action(addReqToSharers, "aS", desc="Add requestor to sharer list") {
313        peek(request_in, RequestMsg) {
314            getDirectoryEntry(address).Sharers.add(in_msg.Requestor);
315        }
316    }
317
318    action(setOwner, "sO", desc="Set the owner") {
319        peek(request_in, RequestMsg) {
320            getDirectoryEntry(address).Owner.add(in_msg.Requestor);
321        }
322    }
323
324    action(addOwnerToSharers, "oS", desc="Add the owner to sharers") {
325        Entry e := getDirectoryEntry(address);
326        assert(e.Owner.count() == 1);
327        e.Sharers.addNetDest(e.Owner);
328    }
329
330    action(removeReqFromSharers, "rS", desc="Remove requestor from sharers") {
331        peek(request_in, RequestMsg) {
332            getDirectoryEntry(address).Sharers.remove(in_msg.Requestor);
333        }
334    }
335
336    action(clearSharers, "cS", desc="Clear the sharer list") {
337        getDirectoryEntry(address).Sharers.clear();
338    }
339
340    action(clearOwner, "cO", desc="Clear the owner") {
341        getDirectoryEntry(address).Owner.clear();
342    }
343
344    // Invalidates and forwards
345
346    action(sendInvToSharers, "i", desc="Send invalidate to all sharers") {
347        peek(request_in, RequestMsg) {
348            enqueue(forward_out, RequestMsg, 1) {
349                out_msg.addr := address;
350                out_msg.Type := CoherenceRequestType:Inv;
351                out_msg.Requestor := in_msg.Requestor;
352                out_msg.Destination := getDirectoryEntry(address).Sharers;
353                out_msg.MessageSize := MessageSizeType:Control;
354            }
355        }
356    }
357
358    action(sendFwdGetS, "fS", desc="Send forward getS to owner") {
359        assert(getDirectoryEntry(address).Owner.count() == 1);
360        peek(request_in, RequestMsg) {
361            enqueue(forward_out, RequestMsg, 1) {
362                out_msg.addr := address;
363                out_msg.Type := CoherenceRequestType:GetS;
364                out_msg.Requestor := in_msg.Requestor;
365                out_msg.Destination := getDirectoryEntry(address).Owner;
366                out_msg.MessageSize := MessageSizeType:Control;
367            }
368        }
369    }
370
371    action(sendFwdGetM, "fM", desc="Send forward getM to owner") {
372        assert(getDirectoryEntry(address).Owner.count() == 1);
373        peek(request_in, RequestMsg) {
374            enqueue(forward_out, RequestMsg, 1) {
375                out_msg.addr := address;
376                out_msg.Type := CoherenceRequestType:GetM;
377                out_msg.Requestor := in_msg.Requestor;
378                out_msg.Destination := getDirectoryEntry(address).Owner;
379                out_msg.MessageSize := MessageSizeType:Control;
380            }
381        }
382    }
383
384    // Responses to requests
385
386    // This also needs to send along the number of sharers!!!!
387    action(sendDataToReq, "d", desc="Send data from memory to requestor. ") {
388                                    //"May need to send sharer number, too") {
389        peek(memQueue_in, MemoryMsg) {
390            enqueue(response_out, ResponseMsg, 1) {
391                out_msg.addr := address;
392                out_msg.Type := CoherenceResponseType:Data;
393                out_msg.Sender := machineID;
394                out_msg.Destination.add(in_msg.OriginalRequestorMachId);
395                out_msg.DataBlk := in_msg.DataBlk;
396                out_msg.MessageSize := MessageSizeType:Data;
397                Entry e := getDirectoryEntry(address);
398                // Only need to include acks if we are the owner.
399                if (e.Owner.isElement(in_msg.OriginalRequestorMachId)) {
400                    out_msg.Acks := e.Sharers.count();
401                } else {
402                    out_msg.Acks := 0;
403                }
404                assert(out_msg.Acks >= 0);
405            }
406        }
407    }
408
409    action(sendPutAck, "a", desc="Send the put ack") {
410        peek(request_in, RequestMsg) {
411            enqueue(forward_out, RequestMsg, 1) {
412                out_msg.addr := address;
413                out_msg.Type := CoherenceRequestType:PutAck;
414                out_msg.Requestor := machineID;
415                out_msg.Destination.add(in_msg.Requestor);
416                out_msg.MessageSize := MessageSizeType:Control;
417            }
418        }
419    }
420
421    // Queue management
422
423    action(popResponseQueue, "pR", desc="Pop the response queue") {
424        response_in.dequeue(clockEdge());
425    }
426
427    action(popRequestQueue, "pQ", desc="Pop the request queue") {
428        request_in.dequeue(clockEdge());
429    }
430
431    action(popMemQueue, "pM", desc="Pop the memory queue") {
432        memQueue_in.dequeue(clockEdge());
433    }
434
435    // Stalling actions
436    action(stall, "z", desc="Stall the incoming request") {
437        // Do nothing.
438    }
439
440
441    /*************************************************************************/
442    // transitions
443
444    transition({I, S}, GetS, S_m) {
445        sendMemRead;
446        addReqToSharers;
447        popRequestQueue;
448    }
449
450    transition(I, {PutSNotLast, PutSLast, PutMNonOwner}) {
451        sendPutAck;
452        popRequestQueue;
453    }
454
455    transition(S_m, MemData, S) {
456        sendDataToReq;
457        popMemQueue;
458    }
459
460    transition(I, GetM, M_m) {
461        sendMemRead;
462        setOwner;
463        popRequestQueue;
464    }
465
466    transition(M_m, MemData, M) {
467        sendDataToReq;
468        clearSharers; // NOTE: This isn't *required* in some cases.
469        popMemQueue;
470    }
471
472    transition(S, GetM, M_m) {
473        sendMemRead;
474        removeReqFromSharers;
475        sendInvToSharers;
476        setOwner;
477        popRequestQueue;
478    }
479
480    transition({S, S_D, SS_m, S_m}, {PutSNotLast, PutMNonOwner}) {
481        removeReqFromSharers;
482        sendPutAck;
483        popRequestQueue;
484    }
485
486    transition(S, PutSLast, I) {
487        removeReqFromSharers;
488        sendPutAck;
489        popRequestQueue;
490    }
491
492    transition(M, GetS, S_D) {
493        sendFwdGetS;
494        addReqToSharers;
495        addOwnerToSharers;
496        clearOwner;
497        popRequestQueue;
498    }
499
500    transition(M, GetM) {
501        sendFwdGetM;
502        clearOwner;
503        setOwner;
504        popRequestQueue;
505    }
506
507    transition({M, M_m, MI_m}, {PutSNotLast, PutSLast, PutMNonOwner}) {
508        sendPutAck;
509        popRequestQueue;
510    }
511
512    transition(M, PutMOwner, MI_m) {
513        sendDataToMem;
514        clearOwner;
515        sendPutAck;
516        popRequestQueue;
517    }
518
519    transition(MI_m, MemAck, I) {
520        popMemQueue;
521    }
522
523    transition(S_D, {GetS, GetM}) {
524        stall;
525    }
526
527    transition(S_D, PutSLast) {
528        removeReqFromSharers;
529        sendPutAck;
530        popRequestQueue;
531    }
532
533    transition(S_D, Data, SS_m) {
534        sendRespDataToMem;
535        popResponseQueue;
536    }
537
538    transition(SS_m, MemAck, S) {
539        popMemQueue;
540    }
541
542    // If we get another request for a block that's waiting on memory,
543    // stall that request.
544    transition({MI_m, SS_m, S_m, M_m}, {GetS, GetM}) {
545        stall;
546    }
547
548}
549