1/*
2 * Copyright (c) 2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29machine(MachineType:L0Cache, "MESI Directory L0 Cache")
30 : Sequencer * sequencer;
31   CacheMemory * Icache;
32   CacheMemory * Dcache;
33   Cycles request_latency := 2;
34   Cycles response_latency := 2;
35   bool send_evictions;
36
37   // From this node's L0 cache to the network
38   MessageBuffer * bufferToL1, network="To";
39
40   // To this node's L0 cache FROM the network
41   MessageBuffer * bufferFromL1, network="From";
42
43   // Message queue between this controller and the processor
44   MessageBuffer * mandatoryQueue;
45{
46  // STATES
47  state_declaration(State, desc="Cache states", default="L0Cache_State_I") {
48    // Base states
49
50    // The cache entry has not been allocated.
51    I, AccessPermission:Invalid;
52
53    // The cache entry is in shared mode. The processor can read this entry
54    // but it cannot write to it.
55    S, AccessPermission:Read_Only;
56
57    // The cache entry is in exclusive mode. The processor can read this
58    // entry. It can write to this entry without informing the directory.
59    // On writing, the entry moves to M state.
60    E, AccessPermission:Read_Only;
61
62    // The processor has read and write permissions on this entry.
63    M, AccessPermission:Read_Write;
64
65    // Transient States
66
67    // The cache controller has requested an instruction.  It will be stored
68    // in the shared state so that the processor can read it.
69    Inst_IS, AccessPermission:Busy;
70
71    // The cache controller has requested that this entry be fetched in
72    // shared state so that the processor can read it.
73    IS, AccessPermission:Busy;
74
75    // The cache controller has requested that this entry be fetched in
76    // modify state so that the processor can read/write it.
77    IM, AccessPermission:Busy;
78
79    // The cache controller had read permission over the entry. But now the
80    // processor needs to write to it. So, the controller has requested for
81    // write permission.
82    SM, AccessPermission:Read_Only;
83  }
84
85  // EVENTS
86  enumeration(Event, desc="Cache events") {
87    // L0 events
88    Load,            desc="Load request from the home processor";
89    Ifetch,          desc="I-fetch request from the home processor";
90    Store,           desc="Store request from the home processor";
91
92    Inv,           desc="Invalidate request from L2 bank";
93
94    // internal generated request
95    L0_Replacement,  desc="L0 Replacement", format="!r";
96
97    // other requests
98    Fwd_GETX,   desc="GETX from other processor";
99    Fwd_GETS,   desc="GETS from other processor";
100    Fwd_GET_INSTR,   desc="GET_INSTR from other processor";
101
102    Data,               desc="Data for processor";
103    Data_Exclusive,     desc="Data for processor";
104    Data_Stale,         desc="Data for processor, but not for storage";
105
106    Ack,        desc="Ack for processor";
107    Ack_all,      desc="Last ack for processor";
108
109    WB_Ack,        desc="Ack for replacement";
110  }
111
112  // TYPES
113
114  // CacheEntry
115  structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
116    State CacheState,        desc="cache state";
117    DataBlock DataBlk,       desc="data for the block";
118    bool Dirty, default="false",   desc="data is dirty";
119  }
120
121  // TBE fields
122  structure(TBE, desc="...") {
123    Addr addr,              desc="Physical address for this TBE";
124    State TBEState,        desc="Transient state";
125    DataBlock DataBlk,                desc="Buffer for the data block";
126    bool Dirty, default="false",   desc="data is dirty";
127    int pendingAcks, default="0", desc="number of pending acks";
128  }
129
130  structure(TBETable, external="yes") {
131    TBE lookup(Addr);
132    void allocate(Addr);
133    void deallocate(Addr);
134    bool isPresent(Addr);
135  }
136
137  TBETable TBEs, template="<L0Cache_TBE>", constructor="m_number_of_TBEs";
138
139  Tick clockEdge();
140  Cycles ticksToCycles(Tick t);
141  void set_cache_entry(AbstractCacheEntry a);
142  void unset_cache_entry();
143  void set_tbe(TBE a);
144  void unset_tbe();
145  void wakeUpBuffers(Addr a);
146  void wakeUpAllBuffers(Addr a);
147  void profileMsgDelay(int virtualNetworkType, Cycles c);
148
149  // inclusive cache returns L0 entries only
150  Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
151    Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
152    if(is_valid(Dcache_entry)) {
153      return Dcache_entry;
154    }
155
156    Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
157    return Icache_entry;
158  }
159
160  Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
161    Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
162    return Dcache_entry;
163  }
164
165  Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
166    Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
167    return Icache_entry;
168  }
169
170  State getState(TBE tbe, Entry cache_entry, Addr addr) {
171    assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
172
173    if(is_valid(tbe)) {
174      return tbe.TBEState;
175    } else if (is_valid(cache_entry)) {
176      return cache_entry.CacheState;
177    }
178    return State:I;
179  }
180
181  void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
182    assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
183
184    // MUST CHANGE
185    if(is_valid(tbe)) {
186      tbe.TBEState := state;
187    }
188
189    if (is_valid(cache_entry)) {
190      cache_entry.CacheState := state;
191    }
192  }
193
194  AccessPermission getAccessPermission(Addr addr) {
195    TBE tbe := TBEs[addr];
196    if(is_valid(tbe)) {
197      DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
198      return L0Cache_State_to_permission(tbe.TBEState);
199    }
200
201    Entry cache_entry := getCacheEntry(addr);
202    if(is_valid(cache_entry)) {
203      DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(cache_entry.CacheState));
204      return L0Cache_State_to_permission(cache_entry.CacheState);
205    }
206
207    DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
208    return AccessPermission:NotPresent;
209  }
210
211  void functionalRead(Addr addr, Packet *pkt) {
212    TBE tbe := TBEs[addr];
213    if(is_valid(tbe)) {
214      testAndRead(addr, tbe.DataBlk, pkt);
215    } else {
216      testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
217    }
218  }
219
220  int functionalWrite(Addr addr, Packet *pkt) {
221    int num_functional_writes := 0;
222
223    TBE tbe := TBEs[addr];
224    if(is_valid(tbe)) {
225      num_functional_writes := num_functional_writes +
226        testAndWrite(addr, tbe.DataBlk, pkt);
227      return num_functional_writes;
228    }
229
230    num_functional_writes := num_functional_writes +
231        testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
232    return num_functional_writes;
233  }
234
235  void setAccessPermission(Entry cache_entry, Addr addr, State state) {
236    if (is_valid(cache_entry)) {
237      cache_entry.changePermission(L0Cache_State_to_permission(state));
238    }
239  }
240
241  Event mandatory_request_type_to_event(RubyRequestType type) {
242    if (type == RubyRequestType:LD) {
243      return Event:Load;
244    } else if (type == RubyRequestType:IFETCH) {
245      return Event:Ifetch;
246    } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
247      return Event:Store;
248    } else {
249      error("Invalid RubyRequestType");
250    }
251  }
252
253  int getPendingAcks(TBE tbe) {
254    return tbe.pendingAcks;
255  }
256
257  out_port(requestNetwork_out, CoherenceMsg, bufferToL1);
258
259  // Messages for this L0 cache from the L1 cache
260  in_port(messgeBuffer_in, CoherenceMsg, bufferFromL1, rank = 1) {
261    if (messgeBuffer_in.isReady(clockEdge())) {
262      peek(messgeBuffer_in, CoherenceMsg, block_on="addr") {
263        assert(in_msg.Dest == machineID);
264
265        Entry cache_entry := getCacheEntry(in_msg.addr);
266        TBE tbe := TBEs[in_msg.addr];
267
268        if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
269            trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
270        } else if(in_msg.Class == CoherenceClass:DATA) {
271            trigger(Event:Data, in_msg.addr, cache_entry, tbe);
272        } else if(in_msg.Class == CoherenceClass:STALE_DATA) {
273            trigger(Event:Data_Stale, in_msg.addr, cache_entry, tbe);
274        } else if (in_msg.Class == CoherenceClass:ACK) {
275            trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
276        } else if (in_msg.Class == CoherenceClass:WB_ACK) {
277            trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
278        } else if (in_msg.Class == CoherenceClass:INV) {
279          trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
280        } else if (in_msg.Class == CoherenceClass:GETX ||
281                   in_msg.Class == CoherenceClass:UPGRADE) {
282          // upgrade transforms to GETX due to race
283          trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
284        } else if (in_msg.Class == CoherenceClass:GETS) {
285          trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
286        } else if (in_msg.Class == CoherenceClass:GET_INSTR) {
287          trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
288        } else {
289          error("Invalid forwarded request type");
290        }
291      }
292    }
293  }
294
295  // Mandatory Queue betweens Node's CPU and it's L0 caches
296  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
297    if (mandatoryQueue_in.isReady(clockEdge())) {
298      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
299
300        // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
301
302        if (in_msg.Type == RubyRequestType:IFETCH) {
303          // ** INSTRUCTION ACCESS ***
304
305          Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
306          if (is_valid(Icache_entry)) {
307            // The tag matches for the L0, so the L0 asks the L2 for it.
308            trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
309                    Icache_entry, TBEs[in_msg.LineAddress]);
310          } else {
311
312            // Check to see if it is in the OTHER L0
313            Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
314            if (is_valid(Dcache_entry)) {
315              // The block is in the wrong L0, put the request on the queue to the shared L2
316              trigger(Event:L0_Replacement, in_msg.LineAddress,
317                      Dcache_entry, TBEs[in_msg.LineAddress]);
318            }
319
320            if (Icache.cacheAvail(in_msg.LineAddress)) {
321              // L0 does't have the line, but we have space for it
322              // in the L0 so let's see if the L2 has it
323              trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
324                      Icache_entry, TBEs[in_msg.LineAddress]);
325            } else {
326              // No room in the L0, so we need to make room in the L0
327              // Check if the line we want to evict is not locked
328              Addr addr := Icache.cacheProbe(in_msg.LineAddress);
329              check_on_cache_probe(mandatoryQueue_in, addr);
330              trigger(Event:L0_Replacement, addr,
331                      getICacheEntry(addr),
332                      TBEs[addr]);
333            }
334          }
335        } else {
336
337          // *** DATA ACCESS ***
338          Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
339          if (is_valid(Dcache_entry)) {
340            // The tag matches for the L0, so the L0 ask the L1 for it
341            trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
342                    Dcache_entry, TBEs[in_msg.LineAddress]);
343          } else {
344
345            // Check to see if it is in the OTHER L0
346            Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
347            if (is_valid(Icache_entry)) {
348              // The block is in the wrong L0, put the request on the queue to the private L1
349              trigger(Event:L0_Replacement, in_msg.LineAddress,
350                      Icache_entry, TBEs[in_msg.LineAddress]);
351            }
352
353            if (Dcache.cacheAvail(in_msg.LineAddress)) {
354              // L1 does't have the line, but we have space for it
355              // in the L0 let's see if the L1 has it
356              trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
357                      Dcache_entry, TBEs[in_msg.LineAddress]);
358            } else {
359              // No room in the L1, so we need to make room in the L0
360              // Check if the line we want to evict is not locked
361              Addr addr := Dcache.cacheProbe(in_msg.LineAddress);
362              check_on_cache_probe(mandatoryQueue_in, addr);
363              trigger(Event:L0_Replacement, addr,
364                      getDCacheEntry(addr),
365                      TBEs[addr]);
366            }
367          }
368        }
369      }
370    }
371  }
372
373  // ACTIONS
374  action(a_issueGETS, "a", desc="Issue GETS") {
375    peek(mandatoryQueue_in, RubyRequest) {
376      enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
377        out_msg.addr := address;
378        out_msg.Class := CoherenceClass:GETS;
379        out_msg.Sender := machineID;
380        out_msg.Dest := createMachineID(MachineType:L1Cache, version);
381        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
382                address, out_msg.Dest);
383        out_msg.MessageSize := MessageSizeType:Control;
384        out_msg.AccessMode := in_msg.AccessMode;
385      }
386    }
387  }
388
389  action(b_issueGETX, "b", desc="Issue GETX") {
390    peek(mandatoryQueue_in, RubyRequest) {
391      enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
392        out_msg.addr := address;
393        out_msg.Class := CoherenceClass:GETX;
394        out_msg.Sender := machineID;
395        DPRINTF(RubySlicc, "%s\n", machineID);
396        out_msg.Dest := createMachineID(MachineType:L1Cache, version);
397
398        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
399                address, out_msg.Dest);
400        out_msg.MessageSize := MessageSizeType:Control;
401        out_msg.AccessMode := in_msg.AccessMode;
402      }
403    }
404  }
405
406  action(c_issueUPGRADE, "c", desc="Issue GETX") {
407    peek(mandatoryQueue_in, RubyRequest) {
408      enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
409        out_msg.addr := address;
410        out_msg.Class := CoherenceClass:UPGRADE;
411        out_msg.Sender := machineID;
412        out_msg.Dest := createMachineID(MachineType:L1Cache, version);
413
414        DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
415                address, out_msg.Dest);
416        out_msg.MessageSize := MessageSizeType:Control;
417        out_msg.AccessMode := in_msg.AccessMode;
418      }
419    }
420  }
421
422  action(f_sendDataToL1, "f", desc="send data to the L2 cache") {
423    enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
424      assert(is_valid(cache_entry));
425      out_msg.addr := address;
426      out_msg.Class := CoherenceClass:INV_DATA;
427      out_msg.DataBlk := cache_entry.DataBlk;
428      out_msg.Dirty := cache_entry.Dirty;
429      out_msg.Sender := machineID;
430      out_msg.Dest := createMachineID(MachineType:L1Cache, version);
431      out_msg.MessageSize := MessageSizeType:Writeback_Data;
432    }
433    cache_entry.Dirty := false;
434  }
435
436  action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
437    peek(messgeBuffer_in, CoherenceMsg) {
438      enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
439        out_msg.addr := address;
440        out_msg.Class := CoherenceClass:INV_ACK;
441        out_msg.Sender := machineID;
442        out_msg.Dest := createMachineID(MachineType:L1Cache, version);
443        out_msg.MessageSize := MessageSizeType:Response_Control;
444      }
445    }
446  }
447
448  action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
449    if (send_evictions) {
450      DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
451      sequencer.evictionCallback(address);
452    }
453  }
454
455  action(g_issuePUTX, "g", desc="send data to the L2 cache") {
456    enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
457      assert(is_valid(cache_entry));
458      out_msg.addr := address;
459      out_msg.Class := CoherenceClass:PUTX;
460      out_msg.Dirty := cache_entry.Dirty;
461      out_msg.Sender:= machineID;
462      out_msg.Dest := createMachineID(MachineType:L1Cache, version);
463
464      if (cache_entry.Dirty) {
465        out_msg.MessageSize := MessageSizeType:Writeback_Data;
466        out_msg.DataBlk := cache_entry.DataBlk;
467      } else {
468        out_msg.MessageSize := MessageSizeType:Writeback_Control;
469      }
470    }
471  }
472
473  action(h_load_hit, "hd", desc="If not prefetch, notify sequencer the load completed.") {
474    assert(is_valid(cache_entry));
475    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
476    Dcache.setMRU(cache_entry);
477    sequencer.readCallback(address, cache_entry.DataBlk);
478  }
479
480  action(h_ifetch_hit, "hi", desc="If not prefetch, notify sequencer the ifetch completed.") {
481    assert(is_valid(cache_entry));
482    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
483    Icache.setMRU(cache_entry);
484    sequencer.readCallback(address, cache_entry.DataBlk);
485  }
486
487  action(hx_load_hit, "hxd", desc="notify sequencer the load completed.") {
488    assert(is_valid(cache_entry));
489    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
490    Dcache.setMRU(cache_entry);
491    sequencer.readCallback(address, cache_entry.DataBlk, true);
492  }
493
494  action(hx_ifetch_hit, "hxi", desc="notify sequencer the ifetch completed.") {
495    assert(is_valid(cache_entry));
496    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
497    Icache.setMRU(cache_entry);
498    sequencer.readCallback(address, cache_entry.DataBlk, true);
499  }
500
501  action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
502    assert(is_valid(cache_entry));
503    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
504    Dcache.setMRU(cache_entry);
505    sequencer.writeCallback(address, cache_entry.DataBlk);
506    cache_entry.Dirty := true;
507  }
508
509  action(hhx_store_hit, "\hx", desc="If not prefetch, notify sequencer that store completed.") {
510    assert(is_valid(cache_entry));
511    DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
512    Dcache.setMRU(cache_entry);
513    sequencer.writeCallback(address, cache_entry.DataBlk, true);
514    cache_entry.Dirty := true;
515  }
516
517  action(i_allocateTBE, "i", desc="Allocate TBE (number of invalidates=0)") {
518    check_allocate(TBEs);
519    assert(is_valid(cache_entry));
520    TBEs.allocate(address);
521    set_tbe(TBEs[address]);
522    tbe.Dirty := cache_entry.Dirty;
523    tbe.DataBlk := cache_entry.DataBlk;
524  }
525
526  action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
527    mandatoryQueue_in.dequeue(clockEdge());
528  }
529
530  action(l_popRequestQueue, "l",
531         desc="Pop incoming request queue and profile the delay within this virtual network") {
532    Tick delay := messgeBuffer_in.dequeue(clockEdge());
533    profileMsgDelay(2, ticksToCycles(delay));
534  }
535
536  action(o_popIncomingResponseQueue, "o",
537         desc="Pop Incoming Response queue and profile the delay within this virtual network") {
538    Tick delay := messgeBuffer_in.dequeue(clockEdge());
539    profileMsgDelay(1, ticksToCycles(delay));
540  }
541
542  action(s_deallocateTBE, "s", desc="Deallocate TBE") {
543    TBEs.deallocate(address);
544    unset_tbe();
545  }
546
547  action(u_writeDataToCache, "u", desc="Write data to cache") {
548    peek(messgeBuffer_in, CoherenceMsg) {
549      assert(is_valid(cache_entry));
550      cache_entry.DataBlk := in_msg.DataBlk;
551    }
552  }
553
554  action(u_writeInstToCache, "ui", desc="Write data to cache") {
555    peek(messgeBuffer_in, CoherenceMsg) {
556      assert(is_valid(cache_entry));
557      cache_entry.DataBlk := in_msg.DataBlk;
558    }
559  }
560
561  action(ff_deallocateCacheBlock, "\f",
562         desc="Deallocate L1 cache block.") {
563    if (Dcache.isTagPresent(address)) {
564      Dcache.deallocate(address);
565    } else {
566      Icache.deallocate(address);
567    }
568    unset_cache_entry();
569  }
570
571  action(oo_allocateDCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
572    if (is_invalid(cache_entry)) {
573      set_cache_entry(Dcache.allocate(address, new Entry));
574    }
575  }
576
577  action(pp_allocateICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
578    if (is_invalid(cache_entry)) {
579      set_cache_entry(Icache.allocate(address, new Entry));
580    }
581  }
582
583  action(z_stallAndWaitMandatoryQueue, "\z", desc="recycle cpu request queue") {
584    stall_and_wait(mandatoryQueue_in, address);
585  }
586
587  action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
588    wakeUpAllBuffers(address);
589  }
590
591  action(uu_profileInstMiss, "\ui", desc="Profile the demand miss") {
592        ++Icache.demand_misses;
593  }
594
595  action(uu_profileInstHit, "\uih", desc="Profile the demand miss") {
596        ++Icache.demand_hits;
597  }
598
599  action(uu_profileDataMiss, "\ud", desc="Profile the demand miss") {
600        ++Dcache.demand_misses;
601  }
602
603  action(uu_profileDataHit, "\udh", desc="Profile the demand miss") {
604        ++Dcache.demand_hits;
605  }
606
607  //*****************************************************
608  // TRANSITIONS
609  //*****************************************************
610
611  // Transitions for Load/Store/Replacement/WriteBack from transient states
612  transition({Inst_IS, IS, IM, SM}, {Load, Ifetch, Store, L0_Replacement}) {
613    z_stallAndWaitMandatoryQueue;
614  }
615
616  // Transitions from Idle
617  transition(I, Load, IS) {
618    oo_allocateDCacheBlock;
619    i_allocateTBE;
620    a_issueGETS;
621    uu_profileDataMiss;
622    k_popMandatoryQueue;
623  }
624
625  transition(I, Ifetch, Inst_IS) {
626    pp_allocateICacheBlock;
627    i_allocateTBE;
628    a_issueGETS;
629    uu_profileInstMiss;
630    k_popMandatoryQueue;
631  }
632
633  transition(I, Store, IM) {
634    oo_allocateDCacheBlock;
635    i_allocateTBE;
636    b_issueGETX;
637    uu_profileDataMiss;
638    k_popMandatoryQueue;
639  }
640
641  transition({I, IS, IM, Inst_IS}, Inv) {
642    fi_sendInvAck;
643    l_popRequestQueue;
644  }
645
646  transition(SM, Inv, IM) {
647    fi_sendInvAck;
648    l_popRequestQueue;
649  }
650
651  // Transitions from Shared
652  transition({S,E,M}, Load) {
653    h_load_hit;
654    uu_profileDataHit;
655    k_popMandatoryQueue;
656  }
657
658  transition({S,E,M}, Ifetch) {
659    h_ifetch_hit;
660    uu_profileInstHit;
661    k_popMandatoryQueue;
662  }
663
664  transition(S, Store, SM) {
665    i_allocateTBE;
666    c_issueUPGRADE;
667    uu_profileDataMiss;
668    k_popMandatoryQueue;
669  }
670
671  transition(S, L0_Replacement, I) {
672    forward_eviction_to_cpu;
673    ff_deallocateCacheBlock;
674  }
675
676  transition(S, Inv, I) {
677    forward_eviction_to_cpu;
678    fi_sendInvAck;
679    ff_deallocateCacheBlock;
680    l_popRequestQueue;
681  }
682
683  // Transitions from Exclusive
684  transition({E,M}, Store, M) {
685    hh_store_hit;
686    uu_profileDataHit;
687    k_popMandatoryQueue;
688  }
689
690  transition(E, L0_Replacement, I) {
691    forward_eviction_to_cpu;
692    g_issuePUTX;
693    ff_deallocateCacheBlock;
694  }
695
696  transition(E, {Inv, Fwd_GETX}, I) {
697    // don't send data
698    forward_eviction_to_cpu;
699    fi_sendInvAck;
700    ff_deallocateCacheBlock;
701    l_popRequestQueue;
702  }
703
704  transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
705    f_sendDataToL1;
706    l_popRequestQueue;
707  }
708
709  // Transitions from Modified
710  transition(M, L0_Replacement, I) {
711    forward_eviction_to_cpu;
712    g_issuePUTX;
713    ff_deallocateCacheBlock;
714  }
715
716  transition(M, {Inv, Fwd_GETX}, I) {
717    forward_eviction_to_cpu;
718    f_sendDataToL1;
719    ff_deallocateCacheBlock;
720    l_popRequestQueue;
721  }
722
723  transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
724    f_sendDataToL1;
725    l_popRequestQueue;
726  }
727
728  transition(IS, Data, S) {
729    u_writeDataToCache;
730    hx_load_hit;
731    s_deallocateTBE;
732    o_popIncomingResponseQueue;
733    kd_wakeUpDependents;
734  }
735
736  transition(IS, Data_Exclusive, E) {
737    u_writeDataToCache;
738    hx_load_hit;
739    s_deallocateTBE;
740    o_popIncomingResponseQueue;
741    kd_wakeUpDependents;
742  }
743
744  transition(IS, Data_Stale, I) {
745    u_writeDataToCache;
746    hx_load_hit;
747    s_deallocateTBE;
748    ff_deallocateCacheBlock;
749    o_popIncomingResponseQueue;
750    kd_wakeUpDependents;
751  }
752
753  transition(Inst_IS, Data, S) {
754    u_writeInstToCache;
755    hx_ifetch_hit;
756    s_deallocateTBE;
757    o_popIncomingResponseQueue;
758    kd_wakeUpDependents;
759  }
760
761  transition(Inst_IS, Data_Exclusive, E) {
762    u_writeInstToCache;
763    hx_ifetch_hit;
764    s_deallocateTBE;
765    o_popIncomingResponseQueue;
766    kd_wakeUpDependents;
767  }
768
769  transition(Inst_IS, Data_Stale, I) {
770    u_writeInstToCache;
771    hx_ifetch_hit;
772    s_deallocateTBE;
773    ff_deallocateCacheBlock;
774    o_popIncomingResponseQueue;
775    kd_wakeUpDependents;
776  }
777
778  transition({IM,SM}, Data_Exclusive, M) {
779    u_writeDataToCache;
780    hhx_store_hit;
781    s_deallocateTBE;
782    o_popIncomingResponseQueue;
783    kd_wakeUpDependents;
784  }
785}
786