1/*
2 * Copyright (c) 2017 Jason Lowe-Power
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/**
30 * This file contains a simple example MSI protocol.
31 *
32 * The protocol in this file is based off of the MSI protocol found in
33 * A Primer on Memory Consistency and Cache Coherence
34 *      Daniel J. Sorin, Mark D. Hill, and David A. Wood
35 *      Synthesis Lectures on Computer Architecture 2011 6:3, 141-149
36 *
37 * Table 8.1 contains the transitions and actions found in this file and
38 * section 8.2.4 explains the protocol in detail.
39 *
40 * See Learning gem5 Part 3: Ruby for more details.
41 *
42 * Authors: Jason Lowe-Power
43 */
44
45/// Declare a machine with type L1Cache.
46machine(MachineType:L1Cache, "MSI cache")
47    : Sequencer *sequencer; // Incoming request from CPU come from this
48      CacheMemory *cacheMemory; // This stores the data and cache states
49      bool send_evictions; // Needed to support O3 CPU and mwait
50
51      // Other declarations
52      // Message buffers are required to send and receive data from the Ruby
53      // network. The from/to and request/response can be confusing!
54      // Virtual networks are needed to prevent deadlock (e.g., it is bad if a
55      // response gets stuck behind a stalled request). In this protocol, we are
56      // using three virtual networks. The highest priority is responses,
57      // followed by forwarded requests, then requests have the lowest priority.
58
59      // Requests *to* the directory
60      MessageBuffer * requestToDir, network="To", virtual_network="0",
61            vnet_type="request";
62      // Responses *to* the directory or other caches
63      MessageBuffer * responseToDirOrSibling, network="To", virtual_network="2",
64            vnet_type="response";
65
66      // Requests *from* the directory for fwds, invs, and put acks.
67      MessageBuffer * forwardFromDir, network="From", virtual_network="1",
68            vnet_type="forward";
69      // Responses *from* directory and other caches for this cache's reqs.
70      MessageBuffer * responseFromDirOrSibling, network="From",
71            virtual_network="2", vnet_type="response";
72
73      // This is all of the incoming requests from the core via the sequencer
74      MessageBuffer * mandatoryQueue;
75{
76    // Declare the states that this cache will use. These are both stable
77    // states (no underscore) and transient states (with underscore). Letters
78    // after the underscores are superscript in Sorin et al.
79    // Underscores and "desc" are used when generating HTML tables.
80    // Access permissions are used for functional accesses. For reads, the
81    // functional access reads *all* of the blocks with a matching address that
82    // have read-only or read-write permission. For functional writes, all
83    // blocks are updated with new data if they have busy, read-only, or
84    // read-write permission.
85    state_declaration(State, desc="Cache states") {
86        I,      AccessPermission:Invalid,
87                    desc="Not present/Invalid";
88
89        // States moving out of I
90        IS_D,   AccessPermission:Invalid,
91                    desc="Invalid, moving to S, waiting for data";
92        IM_AD,  AccessPermission:Invalid,
93                    desc="Invalid, moving to M, waiting for acks and data";
94        IM_A,   AccessPermission:Busy,
95                    desc="Invalid, moving to M, waiting for acks";
96
97        S,      AccessPermission:Read_Only,
98                    desc="Shared. Read-only, other caches may have the block";
99
100        // States moving out of S
101        SM_AD,  AccessPermission:Read_Only,
102                    desc="Shared, moving to M, waiting for acks and 'data'";
103        SM_A,   AccessPermission:Read_Only,
104                    desc="Shared, moving to M, waiting for acks";
105
106        M,      AccessPermission:Read_Write,
107                    desc="Modified. Read & write permissions. Owner of block";
108
109        // States moving to Invalid
110        MI_A,   AccessPermission:Busy,
111                    desc="Was modified, moving to I, waiting for put ack";
112        SI_A,   AccessPermission:Busy,
113                    desc="Was shared, moving to I, waiting for put ack";
114        II_A,   AccessPermission:Invalid,
115                    desc="Sent valid data before receiving put ack. ";
116                         //"Waiting for put ack.";
117    }
118
119    // Events that can be triggered on incoming messages. These are the events
120    // that will trigger transitions
121    enumeration(Event, desc="Cache events") {
122        // From the processor/sequencer/mandatory queue
123        Load,           desc="Load from processor";
124        Store,          desc="Store from processor";
125
126        // Internal event (only triggered from processor requests)
127        Replacement,    desc="Triggered when block is chosen as victim";
128
129        // Forwarded reqeust from other cache via dir on the forward network
130        FwdGetS,        desc="Directory sent us a request to satisfy GetS. ";
131                             //"We must have the block in M to respond to this.";
132        FwdGetM,        desc="Directory sent us a request to satisfy GetM. ";
133                             //"We must have the block in M to respond to this.";
134        Inv,            desc="Invalidate from the directory.";
135        PutAck,         desc="Response from directory after we issue a put. ";
136                             //"This must be on the fwd network to avoid";
137                             //"deadlock.";
138
139        // Responses from directory
140        DataDirNoAcks,  desc="Data from directory (acks = 0)";
141        DataDirAcks,    desc="Data from directory (acks > 0)";
142
143        // Responses from other caches
144        DataOwner,      desc="Data from owner";
145        InvAck,         desc="Invalidation ack from other cache after Inv";
146
147        // Special internally triggered event to simplify implementation
148        LastInvAck,     desc="Triggered after the last ack is received";
149    }
150
151    // A structure for the cache entry. This stores the cache data and state
152    // as defined above. You can put any other information here you like.
153    // The AbstractCacheEntry is defined in
154    // src/mem/ruby/slic_interface/AbstractCacheEntry.hh
155    // If you want to use any of the functions in the abstract entry declare
156    // them here.
157    structure(Entry, desc="Cache entry", interface="AbstractCacheEntry") {
158        State CacheState,        desc="cache state";
159        DataBlock DataBlk,       desc="Data in the block";
160    }
161
162    // TBE is the "transaction buffer entry". This stores information needed
163    // during transient states. This is *like* an MSHR. It functions as an MSHR
164    // in this protocol, but the entry is also allocated for other uses.
165    structure(TBE, desc="Entry for transient requests") {
166        State TBEState,         desc="State of block";
167        DataBlock DataBlk,      desc="Data for the block. Needed for MI_A";
168        int AcksOutstanding, default=0, desc="Number of acks left to receive.";
169    }
170
171    // Table of TBE entries. This is defined externally in
172    // src/mem/ruby/structures/TBETable.hh. It is templatized on the TBE
173    // structure defined above.
174    structure(TBETable, external="yes") {
175      TBE lookup(Addr);
176      void allocate(Addr);
177      void deallocate(Addr);
178      bool isPresent(Addr);
179    }
180
181    /*************************************************************************/
182    // Some declarations of member functions and member variables.
183
184    // The TBE table for this machine. It is templatized under the covers.
185    // NOTE: SLICC mangles names with the machine type. Thus, the TBE declared
186    //       above will be L1Cache_TBE in C++.
187    // We also have to pass through a parameter to the machine to the TBETable.
188    TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
189
190    // Declare all of the functions of the AbstractController that we may use
191    // in this file.
192    // Functions from clocked object
193    Tick clockEdge();
194
195    // Functions we must use to set things up for the transitions to execute
196    // correctly.
197    // These next set/unset functions are used to populate the implicit
198    // variables used in actions. This is required when a transition has
199    // multiple actions.
200    void set_cache_entry(AbstractCacheEntry a);
201    void unset_cache_entry();
202    void set_tbe(TBE b);
203    void unset_tbe();
204
205    // Given an address and machine type this queries the network to check
206    // where it should be sent. In a real implementation, this might be fixed
207    // at design time, but this function gives us flexibility at runtime.
208    // For example, if you have multiple memory channels, this function will
209    // tell you which addresses to send to which memory controller.
210    MachineID mapAddressToMachine(Addr addr, MachineType mtype);
211
212    // Convience function to look up the cache entry.
213    // Needs a pointer so it will be a reference and can be updated in actions
214    Entry getCacheEntry(Addr address), return_by_pointer="yes" {
215        return static_cast(Entry, "pointer", cacheMemory.lookup(address));
216    }
217
218    /*************************************************************************/
219    // Functions that we need to define/override to use our specific structures
220    // in this implementation.
221
222    // Required function for getting the current state of the block.
223    // This is called from the transition to know which transition to execute
224    State getState(TBE tbe, Entry cache_entry, Addr addr) {
225        // The TBE state will override the state in cache memory, if valid
226        if (is_valid(tbe)) { return tbe.TBEState; }
227        // Next, if the cache entry is valid, it holds the state
228        else if (is_valid(cache_entry)) { return cache_entry.CacheState; }
229        // If the block isn't present, then it's state must be I.
230        else { return State:I; }
231    }
232
233
234    // Required function for setting the current state of the block.
235    // This is called from the transition to set the ending state.
236    // Needs to set both the TBE and the cache entry state.
237    // This is also called when transitioning to I so it's possible the TBE and/
238    // or the cache_entry is invalid.
239    void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
240      if (is_valid(tbe)) { tbe.TBEState := state; }
241      if (is_valid(cache_entry)) { cache_entry.CacheState := state; }
242    }
243
244    // Required function to override. Used for functional access to know where
245    // the valid data is. NOTE: L1Cache_State_to_permission is automatically
246    // created based on the access permissions in the state_declaration.
247    // This is mangled by both the MachineType and the name of the state
248    // declaration ("State" in this case)
249    AccessPermission getAccessPermission(Addr addr) {
250        TBE tbe := TBEs[addr];
251        if(is_valid(tbe)) {
252            return L1Cache_State_to_permission(tbe.TBEState);
253        }
254
255        Entry cache_entry := getCacheEntry(addr);
256        if(is_valid(cache_entry)) {
257            return L1Cache_State_to_permission(cache_entry.CacheState);
258        }
259
260        return AccessPermission:NotPresent;
261    }
262
263    // Required function to override. Like above function, but sets thte state.
264    void setAccessPermission(Entry cache_entry, Addr addr, State state) {
265        if (is_valid(cache_entry)) {
266            cache_entry.changePermission(L1Cache_State_to_permission(state));
267        }
268    }
269
270    // Required function to override for functionally reading/writing data.
271    // NOTE: testAndRead/Write defined in src/mem/ruby/slicc_interface/Util.hh
272    void functionalRead(Addr addr, Packet *pkt) {
273        TBE tbe := TBEs[addr];
274        if(is_valid(tbe)) {
275            testAndRead(addr, tbe.DataBlk, pkt);
276        } else {
277            testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
278        }
279    }
280
281    int functionalWrite(Addr addr, Packet *pkt) {
282        TBE tbe := TBEs[addr];
283        if(is_valid(tbe)) {
284            if (testAndWrite(addr, tbe.DataBlk, pkt)) {
285                return 1;
286            } else {
287                return 0;
288            }
289        } else {
290            if (testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt)) {
291                return 1;
292            } else {
293                return 0;
294            }
295        }
296    }
297
298    /*************************************************************************/
299    // Input/output network definitions
300
301    // Output ports. This defines the message types that will flow ocross the
302    // output buffers as defined above. These must be "to" networks.
303    // "request_out" is the name we'll use later to send requests.
304    // "RequestMsg" is the message type we will send (see MSI-msg.sm)
305    // "requestToDir" is the name of the MessageBuffer declared above that
306    //      we are sending these requests out of.
307    out_port(request_out, RequestMsg, requestToDir);
308    out_port(response_out, ResponseMsg, responseToDirOrSibling);
309
310    // Input ports. The order here is/(can be) important. The code in each
311    // in_port is executed in the order specified in this file (or by the rank
312    // parameter). Thus, we must sort these based on the network priority.
313    // In this cache, the order is responses from other caches, forwards, then
314    // requests from the CPU.
315
316    // Like the out_port above
317    // "response_in" is the name we'll use later when we refer to this port
318    // "ResponseMsg" is the type of message we expect on this port
319    // "responseFromDirOrSibling" is the name of the buffer this in_port is
320    // connected to for responses from other caches and the directory.
321    in_port(response_in, ResponseMsg, responseFromDirOrSibling) {
322        // NOTE: You have to check to make sure the message buffer has a valid
323        // message at the head. The code in in_port is executed either way.
324        if (response_in.isReady(clockEdge())) {
325            // Peek is a special function. Any code inside a peek statement has
326            // a special variable declared and populated: in_msg. This contains
327            // the message (of type RequestMsg in this case) at the head.
328            // "forward_in" is the port we want to peek into
329            // "RequestMsg" is the type of message we expect.
330            peek(response_in, ResponseMsg) {
331                // Grab the entry and tbe if they exist.
332                Entry cache_entry := getCacheEntry(in_msg.addr);
333                TBE tbe := TBEs[in_msg.addr];
334                // The TBE better exist since this is a response and we need to
335                // be able to check the remaining acks.
336                assert(is_valid(tbe));
337
338                // If it's from the directory...
339                if (machineIDToMachineType(in_msg.Sender) ==
340                            MachineType:Directory) {
341                    if (in_msg.Type != CoherenceResponseType:Data) {
342                        error("Directory should only reply with data");
343                    }
344                    // Take the in_msg acks and add (sub) the Acks we've seen.
345                    // The InvAck will decrement the acks we're waiting for in
346                    // tbe.AcksOutstanding to below 0 if we haven't gotten the
347                    // dir resp yet. So, if this is 0 we don't need to wait
348                    assert(in_msg.Acks + tbe.AcksOutstanding >= 0);
349                    if (in_msg.Acks + tbe.AcksOutstanding == 0) {
350                        trigger(Event:DataDirNoAcks, in_msg.addr, cache_entry,
351                                tbe);
352                    } else {
353                        // If it's not 0, then we need to wait for more acks
354                        // and we'll trigger LastInvAck later.
355                        trigger(Event:DataDirAcks, in_msg.addr, cache_entry,
356                                tbe);
357                    }
358                } else {
359                    // This is from another cache.
360                    if (in_msg.Type == CoherenceResponseType:Data) {
361                        trigger(Event:DataOwner, in_msg.addr, cache_entry,
362                                tbe);
363                    } else if (in_msg.Type == CoherenceResponseType:InvAck) {
364                        DPRINTF(RubySlicc, "Got inv ack. %d left\n",
365                                tbe.AcksOutstanding);
366                        if (tbe.AcksOutstanding == 1) {
367                            // If there is exactly one ack remaining then we
368                            // know it is the last ack.
369                            trigger(Event:LastInvAck, in_msg.addr, cache_entry,
370                                    tbe);
371                        } else {
372                            trigger(Event:InvAck, in_msg.addr, cache_entry,
373                                    tbe);
374                        }
375                    } else {
376                        error("Unexpected response from other cache");
377                    }
378                }
379            }
380        }
381    }
382
383    // Forward requests for other caches.
384    in_port(forward_in, RequestMsg, forwardFromDir) {
385        if (forward_in.isReady(clockEdge())) {
386            peek(forward_in, RequestMsg) {
387                // Grab the entry and tbe if they exist.
388                Entry cache_entry := getCacheEntry(in_msg.addr);
389                TBE tbe := TBEs[in_msg.addr];
390
391                if (in_msg.Type == CoherenceRequestType:GetS) {
392                    // This is a special function that will trigger a
393                    // transition (as defined below). It *must* have these
394                    // parameters.
395                    trigger(Event:FwdGetS, in_msg.addr, cache_entry, tbe);
396                } else if (in_msg.Type == CoherenceRequestType:GetM) {
397                    trigger(Event:FwdGetM, in_msg.addr, cache_entry, tbe);
398                } else if (in_msg.Type == CoherenceRequestType:Inv) {
399                    trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
400                } else if (in_msg.Type == CoherenceRequestType:PutAck) {
401                    trigger(Event:PutAck, in_msg.addr, cache_entry, tbe);
402                } else {
403                    error("Unexpected forward message!");
404                }
405            }
406        }
407    }
408
409    // The "mandatory queue" is the port/queue from the CPU or other processor.
410    // This is *always* a RubyRequest
411    in_port(mandatory_in, RubyRequest, mandatoryQueue) {
412        if (mandatory_in.isReady(clockEdge())) {
413            // Block all requests if there is already an outstanding request
414            // that has the same line address. This is unblocked when we
415            // finally respond to the request.
416            peek(mandatory_in, RubyRequest, block_on="LineAddress") {
417                // NOTE: Using LineAddress here to promote smaller requests to
418                // full cache block requests.
419                Entry cache_entry := getCacheEntry(in_msg.LineAddress);
420                TBE tbe := TBEs[in_msg.LineAddress];
421                // If there isn't a matching entry and no room in the cache,
422                // then we need to find a victim.
423                if (is_invalid(cache_entry) &&
424                        cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
425                    // make room for the block
426                    // The "cacheProbe" function looks at the cache set for
427                    // the address and queries the replacement protocol for
428                    // the address to replace. It returns the address to repl.
429                    Addr addr := cacheMemory.cacheProbe(in_msg.LineAddress);
430                    Entry victim_entry := getCacheEntry(addr);
431                    TBE victim_tbe := TBEs[addr];
432                    trigger(Event:Replacement, addr, victim_entry, victim_tbe);
433                } else {
434                    if (in_msg.Type == RubyRequestType:LD ||
435                            in_msg.Type == RubyRequestType:IFETCH) {
436                        trigger(Event:Load, in_msg.LineAddress, cache_entry,
437                                tbe);
438                    } else if (in_msg.Type == RubyRequestType:ST) {
439                        trigger(Event:Store, in_msg.LineAddress, cache_entry,
440                                tbe);
441                    } else {
442                        error("Unexpected type from processor");
443                    }
444                }
445            }
446        }
447    }
448
449
450    /*************************************************************************/
451    // Below are all of the actions that might be taken on a transition.
452
453    // Each actions has a name, a shorthand, and a description.
454    // The shorthand is used when generating the HTML tables for the protocol.
455    // "\" in the shorthand cause that letter to be bold. Underscores insert a
456    // space, ^ makes the rest of the letters superscript.
457    // The description is also shown in the HTML table when clicked
458
459    // The first set of actions are things we will do to interact with the
460    // rest of the system. Things like sending requests/responses.
461
462    // Action blocks define a number of implicit variables that are useful.
463    // These variables come straight from the trigger() call in the in_port
464    // blocks.
465    // address: The address passed in the trigger (usually the in_msg.addr,
466    //          though it can be different. E.g., on a replacement it is the
467    //          victim address).
468    // cache_entry: The cache entry passed in the trigger call
469    // tbe: The TBE passed in the trigger call
470    action(sendGetS, 'gS', desc="Send GetS to the directory") {
471        // The syntax for enqueue is a lot like peek. Instead of populating
472        // in_msg, enqueue has an out_msg reference. Whatever you set on out_msg
473        // is sent through the out port specified.  "request_out" is the port
474        // we're sending the message out of "RequestMsg" is the type of message
475        // we're sending "1" is the latency (in cycles) the port waits before
476        // sending the message.
477        enqueue(request_out, RequestMsg, 1) {
478            out_msg.addr := address;
479            // This type is defined in MSI-msg.sm for this protocol.
480            out_msg.Type := CoherenceRequestType:GetS;
481            // The destination may change depending on the address striping
482            // across different directories, so query the network.
483            out_msg.Destination.add(mapAddressToMachine(address,
484                                    MachineType:Directory));
485            // See mem/ruby/protocol/RubySlicc_Exports.sm for possible sizes.
486            out_msg.MessageSize := MessageSizeType:Control;
487            // Set that the reqeustor is this machine so we get the response.
488            out_msg.Requestor := machineID;
489        }
490    }
491
492    action(sendGetM, "gM", desc="Send GetM to the directory") {
493        enqueue(request_out, RequestMsg, 1) {
494            out_msg.addr := address;
495            out_msg.Type := CoherenceRequestType:GetM;
496            out_msg.Destination.add(mapAddressToMachine(address,
497                                    MachineType:Directory));
498            out_msg.MessageSize := MessageSizeType:Control;
499            out_msg.Requestor := machineID;
500        }
501    }
502
503    // NOTE: Clean evict. Required to keep the directory state up-to-date
504    action(sendPutS, "pS", desc="Send PutS to the directory") {
505        enqueue(request_out, RequestMsg, 1) {
506            out_msg.addr := address;
507            out_msg.Type := CoherenceRequestType:PutS;
508            out_msg.Destination.add(mapAddressToMachine(address,
509                                    MachineType:Directory));
510            out_msg.MessageSize := MessageSizeType:Control;
511            out_msg.Requestor := machineID;
512        }
513    }
514
515    action(sendPutM, "pM", desc="Send putM+data to the directory") {
516        enqueue(request_out, RequestMsg, 1) {
517            out_msg.addr := address;
518            out_msg.Type := CoherenceRequestType:PutM;
519            out_msg.Destination.add(mapAddressToMachine(address,
520                                    MachineType:Directory));
521            out_msg.DataBlk := cache_entry.DataBlk;
522            out_msg.MessageSize := MessageSizeType:Data;
523            out_msg.Requestor := machineID;
524        }
525    }
526
527    action(sendCacheDataToReq, "cdR", desc="Send cache data to requestor") {
528        // We have to peek into the request to see who to send to.
529        // If we are in both the peek and the enqueue block then we have access
530        // to both in_msg and out_msg.
531        assert(is_valid(cache_entry));
532        peek(forward_in, RequestMsg) {
533            enqueue(response_out, ResponseMsg, 1) {
534                out_msg.addr := address;
535                out_msg.Type := CoherenceResponseType:Data;
536                out_msg.Destination.add(in_msg.Requestor);
537                out_msg.DataBlk := cache_entry.DataBlk;
538                out_msg.MessageSize := MessageSizeType:Data;
539                out_msg.Sender := machineID;
540            }
541        }
542    }
543
544    action(sendCacheDataToDir, "cdD", desc="Send the cache data to the dir") {
545        enqueue(response_out, ResponseMsg, 1) {
546            out_msg.addr := address;
547            out_msg.Type := CoherenceResponseType:Data;
548            out_msg.Destination.add(mapAddressToMachine(address,
549                                    MachineType:Directory));
550            out_msg.DataBlk := cache_entry.DataBlk;
551            out_msg.MessageSize := MessageSizeType:Data;
552            out_msg.Sender := machineID;
553        }
554    }
555
556    action(sendInvAcktoReq, "iaR", desc="Send inv-ack to requestor") {
557        peek(forward_in, RequestMsg) {
558            enqueue(response_out, ResponseMsg, 1) {
559                out_msg.addr := address;
560                out_msg.Type := CoherenceResponseType:InvAck;
561                out_msg.Destination.add(in_msg.Requestor);
562                out_msg.DataBlk := cache_entry.DataBlk;
563                out_msg.MessageSize := MessageSizeType:Control;
564                out_msg.Sender := machineID;
565            }
566        }
567    }
568
569    action(decrAcks, "da", desc="Decrement the number of acks") {
570        assert(is_valid(tbe));
571        tbe.AcksOutstanding := tbe.AcksOutstanding - 1;
572        // This annotates the protocol trace
573        APPEND_TRANSITION_COMMENT("Acks: ");
574        APPEND_TRANSITION_COMMENT(tbe.AcksOutstanding);
575    }
576
577    action(storeAcks, "sa", desc="Store the needed acks to the TBE") {
578        assert(is_valid(tbe));
579        peek(response_in, ResponseMsg) {
580            tbe.AcksOutstanding := in_msg.Acks + tbe.AcksOutstanding;
581        }
582        assert(tbe.AcksOutstanding > 0);
583    }
584
585    // Responses to CPU requests (e.g., hits and store acks)
586
587    action(loadHit, "Lh", desc="Load hit") {
588        assert(is_valid(cache_entry));
589        // Set this entry as the most recently used for the replacement policy
590        cacheMemory.setMRU(cache_entry);
591        // Send the data back to the sequencer/CPU. NOTE: False means it was
592        // not an "external hit", but hit in this local cache.
593        sequencer.readCallback(address, cache_entry.DataBlk, false);
594    }
595
596    action(externalLoadHit, "xLh", desc="External load hit (was a miss)") {
597        assert(is_valid(cache_entry));
598        peek(response_in, ResponseMsg) {
599            cacheMemory.setMRU(cache_entry);
600            // Forward the type of machine that responded to this request
601            // E.g., another cache or the directory. This is used for tracking
602            // statistics.
603            sequencer.readCallback(address, cache_entry.DataBlk, true,
604                                   machineIDToMachineType(in_msg.Sender));
605        }
606    }
607
608    action(storeHit, "Sh", desc="Store hit") {
609        assert(is_valid(cache_entry));
610        cacheMemory.setMRU(cache_entry);
611        // The same as the read callback above.
612        sequencer.writeCallback(address, cache_entry.DataBlk, false);
613    }
614
615    action(externalStoreHit, "xSh", desc="External store hit (was a miss)") {
616        assert(is_valid(cache_entry));
617        peek(response_in, ResponseMsg) {
618            cacheMemory.setMRU(cache_entry);
619            sequencer.writeCallback(address, cache_entry.DataBlk, true,
620                                   // Note: this could be the last ack.
621                                   machineIDToMachineType(in_msg.Sender));
622        }
623    }
624
625    action(forwardEviction, "e", desc="sends eviction notification to CPU") {
626        if (send_evictions) {
627            sequencer.evictionCallback(address);
628        }
629    }
630
631    // Cache management actions
632
633    action(allocateCacheBlock, "a", desc="Allocate a cache block") {
634        assert(is_invalid(cache_entry));
635        assert(cacheMemory.cacheAvail(address));
636        // Create a new entry and update cache_entry to the new entry
637        set_cache_entry(cacheMemory.allocate(address, new Entry));
638    }
639
640    action(deallocateCacheBlock, "d", desc="Deallocate a cache block") {
641        assert(is_valid(cache_entry));
642        cacheMemory.deallocate(address);
643        // clear the cache_entry variable (now it's invalid)
644        unset_cache_entry();
645    }
646
647    action(writeDataToCache, "wd", desc="Write data to the cache") {
648        peek(response_in, ResponseMsg) {
649            assert(is_valid(cache_entry));
650            cache_entry.DataBlk := in_msg.DataBlk;
651        }
652    }
653
654    action(allocateTBE, "aT", desc="Allocate TBE") {
655        assert(is_invalid(tbe));
656        TBEs.allocate(address);
657        // this updates the tbe variable for other actions
658        set_tbe(TBEs[address]);
659    }
660
661    action(deallocateTBE, "dT", desc="Deallocate TBE") {
662        assert(is_valid(tbe));
663        TBEs.deallocate(address);
664        // this makes the tbe varible invalid
665        unset_tbe();
666    }
667
668    // Queue management actions
669
670    action(popMandatoryQueue, "pQ", desc="Pop the mandatory queue") {
671        mandatory_in.dequeue(clockEdge());
672    }
673
674    action(popResponseQueue, "pR", desc="Pop the response queue") {
675        response_in.dequeue(clockEdge());
676    }
677
678    action(popForwardQueue, "pF", desc="Pop the forward queue") {
679        forward_in.dequeue(clockEdge());
680    }
681
682    // Stalling actions
683
684    action(stall, "z", desc="Stall the incoming request") {
685        // Do nothing. However, the transition must have some action to be
686        // valid which is why this is needed.
687        // NOTE: There are other more complicated but higher performing stalls
688        // in Ruby like recycle() or stall_and_wait.
689        // z_stall stalls everything in the queue behind this request.
690    }
691
692
693    /*************************************************************************/
694    // These are the transition definition. These are simply each cell in the
695    // table from Sorin et al. These are mostly in upper-left to bottom-right
696    // order
697
698    // Each transtiion has (up to) 3 parameters, the current state, the
699    // triggering event and the final state. Thus, the below transition reads
700    // "Move from state I on a Load event to state IS_D". Below are other
701    // examples of transition statements.
702    // Within the transition statement is a set of action to take during the
703    // transition. These actions are executed atomically (i.e., all or nothing)
704    transition(I, Load, IS_D) {
705        // Make sure there is room in the cache to put the block whenever the
706        // miss returns. Otherwise we could deadlock.
707        allocateCacheBlock;
708        // We may need to track acks for this block and only the TBE holds an
709        // ack count. Thus, we need to allocate both a TBE and cache block.
710        allocateTBE;
711        // Actually send the request to the directory
712        sendGetS;
713        // Since we have handled this request on the mandatory queue, we can pop
714        popMandatoryQueue;
715    }
716
717    transition(I, Store, IM_AD) {
718        allocateCacheBlock;
719        allocateTBE;
720        sendGetM;
721        popMandatoryQueue;
722    }
723
724    // You can use {} to specify multiple states or events for which the
725    // transition applies. For instance, below. If we are in IS_D, then on any
726    // of the following Events (Load, Store, Replacement, Inv) we should stall
727    // When there is no third parameter to transition, it means that we want
728    // to stay in the initial state.
729    transition(IS_D, {Load, Store, Replacement, Inv}) {
730        stall;
731    }
732
733    // Similarly, on either DataDirNoAcks or DataOwner we should go to S
734    transition(IS_D, {DataDirNoAcks, DataOwner}, S) {
735        writeDataToCache;
736        deallocateTBE;
737        externalLoadHit;
738        popResponseQueue;
739    }
740
741    transition({IM_AD, IM_A}, {Load, Store, Replacement, FwdGetS, FwdGetM}) {
742        stall;
743    }
744
745    transition({IM_AD, SM_AD}, {DataDirNoAcks, DataOwner}, M) {
746        writeDataToCache;
747        deallocateTBE;
748        externalStoreHit;
749        popResponseQueue;
750    }
751
752    transition(IM_AD, DataDirAcks, IM_A) {
753        writeDataToCache;
754        storeAcks;
755        popResponseQueue;
756    }
757
758    transition({IM_AD, IM_A, SM_AD, SM_A}, InvAck) {
759        decrAcks;
760        popResponseQueue;
761    }
762
763    transition({IM_A, SM_A}, LastInvAck, M) {
764        deallocateTBE;
765        externalStoreHit;
766        popResponseQueue;
767    }
768
769    transition({S, SM_AD, SM_A, M}, Load) {
770        loadHit;
771        popMandatoryQueue;
772    }
773
774    transition(S, Store, SM_AD) {
775        allocateTBE;
776        sendGetM;
777        popMandatoryQueue;
778    }
779
780    transition(S, Replacement, SI_A) {
781        sendPutS;
782    }
783
784    transition(S, Inv, I) {
785        sendInvAcktoReq;
786        forwardEviction;
787        deallocateCacheBlock;
788        popForwardQueue;
789    }
790
791    transition({SM_AD, SM_A}, {Store, Replacement, FwdGetS, FwdGetM}) {
792        stall;
793    }
794
795    transition(SM_AD, Inv, IM_AD) {
796        sendInvAcktoReq;
797        popForwardQueue;
798    }
799
800    transition(SM_AD, DataDirAcks, SM_A) {
801        writeDataToCache;
802        storeAcks;
803        popResponseQueue;
804    }
805
806    transition(M, Store) {
807        storeHit;
808        forwardEviction;
809        popMandatoryQueue;
810    }
811
812    transition(M, Replacement, MI_A) {
813        sendPutM;
814    }
815
816    transition(M, FwdGetS, S) {
817        sendCacheDataToReq;
818        sendCacheDataToDir;
819        popForwardQueue;
820    }
821
822    transition(M, FwdGetM, I) {
823        sendCacheDataToReq;
824        deallocateCacheBlock;
825        popForwardQueue;
826    }
827
828    transition({MI_A, SI_A, II_A}, {Load, Store, Replacement}) {
829        stall;
830    }
831
832    transition(MI_A, FwdGetS, SI_A) {
833        sendCacheDataToReq;
834        sendCacheDataToDir;
835        popForwardQueue;
836    }
837
838    transition(MI_A, FwdGetM, II_A) {
839        sendCacheDataToReq;
840        popForwardQueue;
841    }
842
843    transition({MI_A, SI_A, II_A}, PutAck, I) {
844        deallocateCacheBlock;
845        popForwardQueue;
846    }
847
848    transition(SI_A, Inv, II_A) {
849        sendInvAcktoReq;
850        popForwardQueue;
851    }
852
853}
854