MI_example-cache.sm revision 14184:11ac1337c5e2
1/*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30machine(MachineType:L1Cache, "MI Example L1 Cache")
31    : Sequencer * sequencer;
32      CacheMemory * cacheMemory;
33      Cycles cache_response_latency := 12;
34      Cycles issue_latency := 2;
35      bool send_evictions;
36
37      // NETWORK BUFFERS
38      MessageBuffer * requestFromCache, network="To", virtual_network="2",
39            vnet_type="request";
40      MessageBuffer * responseFromCache, network="To", virtual_network="4",
41            vnet_type="response";
42
43      MessageBuffer * forwardToCache, network="From", virtual_network="3",
44            vnet_type="forward";
45      MessageBuffer * responseToCache, network="From", virtual_network="4",
46            vnet_type="response";
47
48      MessageBuffer * mandatoryQueue;
49{
50  // STATES
51  state_declaration(State, desc="Cache states") {
52    I, AccessPermission:Invalid, desc="Not Present/Invalid";
53    II, AccessPermission:Busy, desc="Not Present/Invalid, issued PUT";
54    M, AccessPermission:Read_Write, desc="Modified";
55    MI, AccessPermission:Busy, desc="Modified, issued PUT";
56    MII, AccessPermission:Busy, desc="Modified, issued PUTX, received nack";
57
58    IS, AccessPermission:Busy, desc="Issued request for LOAD/IFETCH";
59    IM, AccessPermission:Busy, desc="Issued request for STORE/ATOMIC";
60  }
61
62  // EVENTS
63  enumeration(Event, desc="Cache events") {
64    // From processor
65
66    Load,       desc="Load request from processor";
67    Ifetch,     desc="Ifetch request from processor";
68    Store,      desc="Store request from processor";
69
70    Data,       desc="Data from network";
71    Fwd_GETX,        desc="Forward from network";
72
73    Inv,        desc="Invalidate request from dir";
74
75    Replacement,  desc="Replace a block";
76    Writeback_Ack,   desc="Ack from the directory for a writeback";
77    Writeback_Nack,   desc="Nack from the directory for a writeback";
78  }
79
80  // STRUCTURE DEFINITIONS
81  // CacheEntry
82  structure(Entry, desc="...", interface="AbstractCacheEntry") {
83    State CacheState,        desc="cache state";
84    bool Dirty,              desc="Is the data dirty (different than memory)?";
85    DataBlock DataBlk,       desc="Data in the block";
86  }
87
88  // TBE fields
89  structure(TBE, desc="...") {
90    State TBEState,          desc="Transient state";
91    DataBlock DataBlk,       desc="data for the block, required for concurrent writebacks";
92  }
93
94  structure(TBETable, external="yes") {
95    TBE lookup(Addr);
96    void allocate(Addr);
97    void deallocate(Addr);
98    bool isPresent(Addr);
99  }
100
101
102  // STRUCTURES
103  TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
104
105  // PROTOTYPES
106  Tick clockEdge();
107  Cycles ticksToCycles(Tick t);
108  void set_cache_entry(AbstractCacheEntry a);
109  void unset_cache_entry();
110  void set_tbe(TBE b);
111  void unset_tbe();
112  void profileMsgDelay(int virtualNetworkType, Cycles b);
113  MachineID mapAddressToMachine(Addr addr, MachineType mtype);
114
115  Entry getCacheEntry(Addr address), return_by_pointer="yes" {
116    return static_cast(Entry, "pointer", cacheMemory.lookup(address));
117  }
118
119  // FUNCTIONS
120  Event mandatory_request_type_to_event(RubyRequestType type) {
121   if (type == RubyRequestType:LD) {
122      return Event:Load;
123    } else if (type == RubyRequestType:IFETCH) {
124      return Event:Ifetch;
125    } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
126      return Event:Store;
127    } else {
128      error("Invalid RubyRequestType");
129    }
130  }
131
132  State getState(TBE tbe, Entry cache_entry, Addr addr) {
133
134    if (is_valid(tbe)) {
135      return tbe.TBEState;
136    }
137    else if (is_valid(cache_entry)) {
138      return cache_entry.CacheState;
139    }
140    else {
141      return State:I;
142    }
143  }
144
145  void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
146
147    if (is_valid(tbe)) {
148      tbe.TBEState := state;
149    }
150
151    if (is_valid(cache_entry)) {
152      cache_entry.CacheState := state;
153    }
154  }
155
156  AccessPermission getAccessPermission(Addr addr) {
157    TBE tbe := TBEs[addr];
158    if(is_valid(tbe)) {
159      return L1Cache_State_to_permission(tbe.TBEState);
160    }
161
162    Entry cache_entry := getCacheEntry(addr);
163    if(is_valid(cache_entry)) {
164      return L1Cache_State_to_permission(cache_entry.CacheState);
165    }
166
167    return AccessPermission:NotPresent;
168  }
169
170  void setAccessPermission(Entry cache_entry, Addr addr, State state) {
171    if (is_valid(cache_entry)) {
172      cache_entry.changePermission(L1Cache_State_to_permission(state));
173    }
174  }
175
176  void functionalRead(Addr addr, Packet *pkt) {
177    TBE tbe := TBEs[addr];
178    if(is_valid(tbe)) {
179      testAndRead(addr, tbe.DataBlk, pkt);
180    } else {
181      testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
182    }
183  }
184
185  int functionalWrite(Addr addr, Packet *pkt) {
186    int num_functional_writes := 0;
187
188    TBE tbe := TBEs[addr];
189    if(is_valid(tbe)) {
190      num_functional_writes := num_functional_writes +
191        testAndWrite(addr, tbe.DataBlk, pkt);
192      return num_functional_writes;
193    }
194
195    num_functional_writes := num_functional_writes +
196        testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
197    return num_functional_writes;
198  }
199
200  // NETWORK PORTS
201
202  out_port(requestNetwork_out, RequestMsg, requestFromCache);
203  out_port(responseNetwork_out, ResponseMsg, responseFromCache);
204
205  in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
206    if (forwardRequestNetwork_in.isReady(clockEdge())) {
207      peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
208
209        Entry cache_entry := getCacheEntry(in_msg.addr);
210        TBE tbe := TBEs[in_msg.addr];
211
212        if (in_msg.Type == CoherenceRequestType:GETX) {
213          trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
214        }
215        else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
216          trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
217        }
218        else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
219          trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
220        }
221        else if (in_msg.Type == CoherenceRequestType:INV) {
222          trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
223        }
224        else {
225          error("Unexpected message");
226        }
227      }
228    }
229  }
230
231  in_port(responseNetwork_in, ResponseMsg, responseToCache) {
232    if (responseNetwork_in.isReady(clockEdge())) {
233      peek(responseNetwork_in, ResponseMsg, block_on="addr") {
234
235        Entry cache_entry := getCacheEntry(in_msg.addr);
236        TBE tbe := TBEs[in_msg.addr];
237
238        if (in_msg.Type == CoherenceResponseType:DATA) {
239          trigger(Event:Data, in_msg.addr, cache_entry, tbe);
240        }
241        else {
242          error("Unexpected message");
243        }
244      }
245    }
246  }
247
248    // Mandatory Queue
249  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
250    if (mandatoryQueue_in.isReady(clockEdge())) {
251      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
252
253        Entry cache_entry := getCacheEntry(in_msg.LineAddress);
254        if (is_invalid(cache_entry) &&
255            cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
256          // make room for the block
257          // Check if the line we want to evict is not locked
258          Addr addr := cacheMemory.cacheProbe(in_msg.LineAddress);
259          check_on_cache_probe(mandatoryQueue_in, addr);
260          trigger(Event:Replacement, addr,
261                  getCacheEntry(addr),
262                  TBEs[addr]);
263        }
264        else {
265          trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
266                  cache_entry, TBEs[in_msg.LineAddress]);
267        }
268      }
269    }
270  }
271
272  // ACTIONS
273
274  action(a_issueRequest, "a", desc="Issue a request") {
275    enqueue(requestNetwork_out, RequestMsg, issue_latency) {
276    out_msg.addr := address;
277      out_msg.Type := CoherenceRequestType:GETX;
278      out_msg.Requestor := machineID;
279      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
280      out_msg.MessageSize := MessageSizeType:Control;
281    }
282  }
283
284  action(b_issuePUT, "b", desc="Issue a PUT request") {
285    enqueue(requestNetwork_out, RequestMsg, issue_latency) {
286      assert(is_valid(cache_entry));
287      out_msg.addr := address;
288      out_msg.Type := CoherenceRequestType:PUTX;
289      out_msg.Requestor := machineID;
290      out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
291      out_msg.DataBlk := cache_entry.DataBlk;
292      out_msg.MessageSize := MessageSizeType:Data;
293    }
294  }
295
296  action(e_sendData, "e", desc="Send data from cache to requestor") {
297    peek(forwardRequestNetwork_in, RequestMsg) {
298      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
299        assert(is_valid(cache_entry));
300        out_msg.addr := address;
301        out_msg.Type := CoherenceResponseType:DATA;
302        out_msg.Sender := machineID;
303        out_msg.Destination.add(in_msg.Requestor);
304        out_msg.DataBlk := cache_entry.DataBlk;
305        out_msg.MessageSize := MessageSizeType:Response_Data;
306      }
307    }
308  }
309
310  action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
311    peek(forwardRequestNetwork_in, RequestMsg) {
312      enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
313        assert(is_valid(tbe));
314        out_msg.addr := address;
315        out_msg.Type := CoherenceResponseType:DATA;
316        out_msg.Sender := machineID;
317        out_msg.Destination.add(in_msg.Requestor);
318        out_msg.DataBlk := tbe.DataBlk;
319        out_msg.MessageSize := MessageSizeType:Response_Data;
320      }
321    }
322  }
323
324  action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
325    if (is_valid(cache_entry)) {
326    } else {
327      set_cache_entry(cacheMemory.allocate(address, new Entry));
328    }
329  }
330
331  action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
332    if (is_valid(cache_entry)) {
333      cacheMemory.deallocate(address);
334      unset_cache_entry();
335    }
336  }
337
338  action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
339    mandatoryQueue_in.dequeue(clockEdge());
340  }
341
342  action(n_popResponseQueue, "n", desc="Pop the response queue") {
343    Tick delay := responseNetwork_in.dequeue(clockEdge());
344    profileMsgDelay(1, ticksToCycles(delay));
345  }
346
347  action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
348    Tick delay := forwardRequestNetwork_in.dequeue(clockEdge());
349    profileMsgDelay(2, ticksToCycles(delay));
350  }
351
352  action(p_profileMiss, "pi", desc="Profile cache miss") {
353      ++cacheMemory.demand_misses;
354  }
355
356  action(p_profileHit, "ph", desc="Profile cache miss") {
357      ++cacheMemory.demand_hits;
358  }
359
360  action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
361    assert(is_valid(cache_entry));
362    DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
363    cacheMemory.setMRU(cache_entry);
364    sequencer.readCallback(address, cache_entry.DataBlk, false);
365  }
366
367  action(rx_load_hit, "rx", desc="External load completed.") {
368    peek(responseNetwork_in, ResponseMsg) {
369      assert(is_valid(cache_entry));
370      DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
371      cacheMemory.setMRU(cache_entry);
372      sequencer.readCallback(address, cache_entry.DataBlk, true,
373                             machineIDToMachineType(in_msg.Sender));
374    }
375  }
376
377  action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
378    assert(is_valid(cache_entry));
379    DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
380    cacheMemory.setMRU(cache_entry);
381    sequencer.writeCallback(address, cache_entry.DataBlk, false);
382  }
383
384  action(sx_store_hit, "sx", desc="External store completed.") {
385    peek(responseNetwork_in, ResponseMsg) {
386      assert(is_valid(cache_entry));
387      DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
388      cacheMemory.setMRU(cache_entry);
389      sequencer.writeCallback(address, cache_entry.DataBlk, true,
390                              machineIDToMachineType(in_msg.Sender));
391    }
392  }
393
394  action(u_writeDataToCache, "u", desc="Write data to the cache") {
395    peek(responseNetwork_in, ResponseMsg) {
396      assert(is_valid(cache_entry));
397      cache_entry.DataBlk := in_msg.DataBlk;
398    }
399  }
400
401  action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
402    if (send_evictions) {
403      DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
404      sequencer.evictionCallback(address);
405    }
406  }
407
408  action(v_allocateTBE, "v", desc="Allocate TBE") {
409    TBEs.allocate(address);
410    set_tbe(TBEs[address]);
411  }
412
413  action(w_deallocateTBE, "w", desc="Deallocate TBE") {
414    TBEs.deallocate(address);
415    unset_tbe();
416  }
417
418  action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
419    assert(is_valid(cache_entry));
420    assert(is_valid(tbe));
421    tbe.DataBlk := cache_entry.DataBlk;
422  }
423
424  action(z_stall, "z", desc="stall") {
425    // do nothing
426  }
427
428  // TRANSITIONS
429
430  transition({IS, IM, MI, II, MII}, {Load, Ifetch, Store, Replacement}) {
431    z_stall;
432  }
433
434  transition({IS, IM}, {Fwd_GETX, Inv}) {
435    z_stall;
436  }
437
438  transition(MI, Inv) {
439    o_popForwardedRequestQueue;
440  }
441
442  transition(M, Store) {
443    s_store_hit;
444    p_profileHit;
445    m_popMandatoryQueue;
446  }
447
448  transition(M, {Load, Ifetch}) {
449    r_load_hit;
450    p_profileHit;
451    m_popMandatoryQueue;
452  }
453
454  transition(I, Inv) {
455    o_popForwardedRequestQueue;
456  }
457
458  transition(I, Store, IM) {
459    v_allocateTBE;
460    i_allocateL1CacheBlock;
461    a_issueRequest;
462    p_profileMiss;
463    m_popMandatoryQueue;
464  }
465
466  transition(I, {Load, Ifetch}, IS) {
467    v_allocateTBE;
468    i_allocateL1CacheBlock;
469    a_issueRequest;
470    p_profileMiss;
471    m_popMandatoryQueue;
472  }
473
474  transition(IS, Data, M) {
475    u_writeDataToCache;
476    rx_load_hit;
477    w_deallocateTBE;
478    n_popResponseQueue;
479  }
480
481  transition(IM, Data, M) {
482    u_writeDataToCache;
483    sx_store_hit;
484    w_deallocateTBE;
485    n_popResponseQueue;
486  }
487
488  transition(M, Fwd_GETX, I) {
489    e_sendData;
490    forward_eviction_to_cpu;
491    o_popForwardedRequestQueue;
492  }
493
494  transition(I, Replacement) {
495     h_deallocateL1CacheBlock;
496  }
497
498  transition(M, {Replacement,Inv},  MI) {
499     v_allocateTBE;
500     b_issuePUT;
501     x_copyDataFromCacheToTBE;
502     forward_eviction_to_cpu;
503     h_deallocateL1CacheBlock;
504  }
505
506  transition(MI, Writeback_Ack, I) {
507    w_deallocateTBE;
508    o_popForwardedRequestQueue;
509  }
510
511  transition(MI, Fwd_GETX, II) {
512    ee_sendDataFromTBE;
513    o_popForwardedRequestQueue;
514  }
515
516  transition(MI, Writeback_Nack, MII) {
517    o_popForwardedRequestQueue;
518  }
519
520  transition(MII, Fwd_GETX, I) {
521    ee_sendDataFromTBE;
522    w_deallocateTBE;
523    o_popForwardedRequestQueue;
524  }
525
526  transition(II, Writeback_Nack, I) {
527    w_deallocateTBE;
528    o_popForwardedRequestQueue;
529  }
530}
531