GPU_RfO-SQC.sm revision 14184:11ac1337c5e2
12155SN/A/*
22155SN/A * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
32155SN/A * All rights reserved.
42155SN/A *
52155SN/A * For use for simulation and test purposes only
62155SN/A *
72155SN/A * Redistribution and use in source and binary forms, with or without
82155SN/A * modification, are permitted provided that the following conditions are met:
92155SN/A *
102155SN/A * 1. Redistributions of source code must retain the above copyright notice,
112155SN/A * this list of conditions and the following disclaimer.
122155SN/A *
132155SN/A * 2. Redistributions in binary form must reproduce the above copyright notice,
142155SN/A * this list of conditions and the following disclaimer in the documentation
152155SN/A * and/or other materials provided with the distribution.
162155SN/A *
172155SN/A * 3. Neither the name of the copyright holder nor the names of its
182155SN/A * contributors may be used to endorse or promote products derived from this
192155SN/A * software without specific prior written permission.
202155SN/A *
212155SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
222155SN/A * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
232155SN/A * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
242155SN/A * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
252155SN/A * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
262155SN/A * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
272155SN/A * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
282665Ssaidi@eecs.umich.edu * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
292665Ssaidi@eecs.umich.edu * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
302155SN/A * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
314202Sbinkertn@umich.edu * POSSIBILITY OF SUCH DAMAGE.
322155SN/A *
332178SN/A * Authors: Lisa Hsu
342178SN/A */
352178SN/A
362178SN/Amachine(MachineType:SQC, "GPU SQC (L1 I Cache)")
372178SN/A : Sequencer* sequencer;
382178SN/A   CacheMemory * L1cache;
392178SN/A   int TCC_select_num_bits;
402178SN/A   Cycles issue_latency := 80;  // time to send data down to TCC
412178SN/A   Cycles l2_hit_latency := 18;
422178SN/A
432178SN/A  MessageBuffer * requestFromSQC, network="To", virtual_network="1", vnet_type="request";
442155SN/A  MessageBuffer * responseFromSQC, network="To", virtual_network="3", vnet_type="response";
455865Sksewell@umich.edu  MessageBuffer * unblockFromCore, network="To", virtual_network="5", vnet_type="unblock";
466181Sksewell@umich.edu
476181Sksewell@umich.edu  MessageBuffer * probeToSQC, network="From", virtual_network="1", vnet_type="request";
485865Sksewell@umich.edu  MessageBuffer * responseToSQC, network="From", virtual_network="3", vnet_type="response";
493918Ssaidi@eecs.umich.edu
505865Sksewell@umich.edu  MessageBuffer * mandatoryQueue;
512623SN/A{
523918Ssaidi@eecs.umich.edu  state_declaration(State, desc="SQC Cache States", default="SQC_State_I") {
532155SN/A    I, AccessPermission:Invalid, desc="Invalid";
542155SN/A    S, AccessPermission:Read_Only, desc="Shared";
552292SN/A
566181Sksewell@umich.edu    I_S, AccessPermission:Busy, desc="Invalid, issued RdBlkS, have not seen response yet";
576181Sksewell@umich.edu    S_I, AccessPermission:Read_Only, desc="L1 replacement, waiting for clean WB ack";
583918Ssaidi@eecs.umich.edu    I_C, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from TCCdir for canceled WB";
592292SN/A  }
602292SN/A
612292SN/A  enumeration(Event, desc="SQC Events") {
623918Ssaidi@eecs.umich.edu    // Core initiated
632292SN/A    Fetch,          desc="Fetch";
642292SN/A
652766Sktlim@umich.edu    //TCC initiated
662766Sktlim@umich.edu    TCC_AckS,        desc="TCC Ack to Core Request";
672766Sktlim@umich.edu    TCC_AckWB,       desc="TCC Ack for WB";
682921Sktlim@umich.edu    TCC_NackWB,       desc="TCC Nack for WB";
692921Sktlim@umich.edu
702766Sktlim@umich.edu    // Mem sys initiated
712766Sktlim@umich.edu    Repl,           desc="Replacing block from cache";
725529Snate@binkert.org
732766Sktlim@umich.edu    // Probe Events
744762Snate@binkert.org    PrbInvData,         desc="probe, return M data";
752155SN/A    PrbInv,             desc="probe, no need for data";
762155SN/A    PrbShrData,         desc="probe downgrade, return data";
772155SN/A  }
782155SN/A
792155SN/A  enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
802155SN/A    DataArrayRead,    desc="Read the data array";
812766Sktlim@umich.edu    DataArrayWrite,   desc="Write the data array";
822155SN/A    TagArrayRead,     desc="Read the data array";
835865Sksewell@umich.edu    TagArrayWrite,    desc="Write the data array";
842155SN/A  }
852155SN/A
862155SN/A
872155SN/A  structure(Entry, desc="...", interface="AbstractCacheEntry") {
882178SN/A    State CacheState,           desc="cache state";
892178SN/A    bool Dirty,                 desc="Is the data dirty (diff than memory)?";
902178SN/A    DataBlock DataBlk,          desc="data for the block";
912766Sktlim@umich.edu    bool FromL2, default="false", desc="block just moved from L2";
922178SN/A  }
932178SN/A
946994Snate@binkert.org  structure(TBE, desc="...") {
952178SN/A    State TBEState,             desc="Transient state";
962766Sktlim@umich.edu    DataBlock DataBlk,       desc="data for the block, required for concurrent writebacks";
972766Sktlim@umich.edu    bool Dirty,              desc="Is the data dirty (different than memory)?";
982766Sktlim@umich.edu    int NumPendingMsgs,      desc="Number of acks/data messages that this processor is waiting for";
992788Sktlim@umich.edu    bool Shared,             desc="Victim hit by shared probe";
1002178SN/A   }
1012733Sktlim@umich.edu
1022733Sktlim@umich.edu  structure(TBETable, external="yes") {
1032817Sksewell@umich.edu    TBE lookup(Addr);
1042733Sktlim@umich.edu    void allocate(Addr);
1054486Sbinkertn@umich.edu    void deallocate(Addr);
1064486Sbinkertn@umich.edu    bool isPresent(Addr);
1074776Sgblack@eecs.umich.edu  }
1084776Sgblack@eecs.umich.edu
1096365Sgblack@eecs.umich.edu  TBETable TBEs, template="<SQC_TBE>", constructor="m_number_of_TBEs";
1104486Sbinkertn@umich.edu  int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
1114202Sbinkertn@umich.edu
1124202Sbinkertn@umich.edu  Tick clockEdge();
1134202Sbinkertn@umich.edu  Tick cyclesToTicks(Cycles c);
1144202Sbinkertn@umich.edu
1154202Sbinkertn@umich.edu  void set_cache_entry(AbstractCacheEntry b);
1164776Sgblack@eecs.umich.edu  void unset_cache_entry();
1176365Sgblack@eecs.umich.edu  void set_tbe(TBE b);
1184202Sbinkertn@umich.edu  void unset_tbe();
1194202Sbinkertn@umich.edu  void wakeUpAllBuffers();
1204202Sbinkertn@umich.edu  void wakeUpBuffers(Addr a);
1214202Sbinkertn@umich.edu  Cycles curCycle();
1225217Ssaidi@eecs.umich.edu
1234202Sbinkertn@umich.edu  // Internal functions
1242155SN/A  Entry getCacheEntry(Addr address), return_by_pointer="yes" {
1254202Sbinkertn@umich.edu    Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
1264486Sbinkertn@umich.edu    return cache_entry;
1274486Sbinkertn@umich.edu  }
1284202Sbinkertn@umich.edu
1294202Sbinkertn@umich.edu  DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
1302821Sktlim@umich.edu    TBE tbe := TBEs.lookup(addr);
1314776Sgblack@eecs.umich.edu    if(is_valid(tbe)) {
1324776Sgblack@eecs.umich.edu      return tbe.DataBlk;
1334776Sgblack@eecs.umich.edu    } else {
1344776Sgblack@eecs.umich.edu      return getCacheEntry(addr).DataBlk;
1352766Sktlim@umich.edu    }
1364202Sbinkertn@umich.edu  }
1375192Ssaidi@eecs.umich.edu
1382733Sktlim@umich.edu  State getState(TBE tbe, Entry cache_entry, Addr addr) {
1392733Sktlim@umich.edu    if(is_valid(tbe)) {
1402733Sktlim@umich.edu      return tbe.TBEState;
1412733Sktlim@umich.edu    } else if (is_valid(cache_entry)) {
1422733Sktlim@umich.edu      return cache_entry.CacheState;
1432874Sktlim@umich.edu    }
1442874Sktlim@umich.edu    return State:I;
1452874Sktlim@umich.edu  }
1464202Sbinkertn@umich.edu
1472733Sktlim@umich.edu  void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
1485192Ssaidi@eecs.umich.edu    if (is_valid(tbe)) {
1495192Ssaidi@eecs.umich.edu      tbe.TBEState := state;
1505192Ssaidi@eecs.umich.edu    }
1515217Ssaidi@eecs.umich.edu
1525192Ssaidi@eecs.umich.edu    if (is_valid(cache_entry)) {
1535192Ssaidi@eecs.umich.edu      cache_entry.CacheState := state;
1545192Ssaidi@eecs.umich.edu    }
1555192Ssaidi@eecs.umich.edu  }
1565192Ssaidi@eecs.umich.edu
1576667Ssteve.reinhardt@amd.com  AccessPermission getAccessPermission(Addr addr) {
1585192Ssaidi@eecs.umich.edu    TBE tbe := TBEs.lookup(addr);
1595192Ssaidi@eecs.umich.edu    if(is_valid(tbe)) {
1605192Ssaidi@eecs.umich.edu      return SQC_State_to_permission(tbe.TBEState);
1615192Ssaidi@eecs.umich.edu    }
1625192Ssaidi@eecs.umich.edu
1635192Ssaidi@eecs.umich.edu    Entry cache_entry := getCacheEntry(addr);
1645192Ssaidi@eecs.umich.edu    if(is_valid(cache_entry)) {
1655192Ssaidi@eecs.umich.edu      return SQC_State_to_permission(cache_entry.CacheState);
1665784Sgblack@eecs.umich.edu    }
1675784Sgblack@eecs.umich.edu
1685192Ssaidi@eecs.umich.edu    return AccessPermission:NotPresent;
1695192Ssaidi@eecs.umich.edu  }
1705192Ssaidi@eecs.umich.edu
1715192Ssaidi@eecs.umich.edu  void setAccessPermission(Entry cache_entry, Addr addr, State state) {
1725192Ssaidi@eecs.umich.edu    if (is_valid(cache_entry)) {
1735192Ssaidi@eecs.umich.edu      cache_entry.changePermission(SQC_State_to_permission(state));
1746667Ssteve.reinhardt@amd.com    }
1756036Sksewell@umich.edu  }
1766667Ssteve.reinhardt@amd.com
177  void functionalRead(Addr addr, Packet *pkt) {
178    TBE tbe := TBEs.lookup(addr);
179    if(is_valid(tbe)) {
180      testAndRead(addr, tbe.DataBlk, pkt);
181    } else {
182      functionalMemoryRead(pkt);
183    }
184  }
185
186  int functionalWrite(Addr addr, Packet *pkt) {
187    int num_functional_writes := 0;
188
189    TBE tbe := TBEs.lookup(addr);
190    if(is_valid(tbe)) {
191      num_functional_writes := num_functional_writes +
192            testAndWrite(addr, tbe.DataBlk, pkt);
193    }
194
195    num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
196    return num_functional_writes;
197  }
198
199  void recordRequestType(RequestType request_type, Addr addr) {
200    if (request_type == RequestType:DataArrayRead) {
201        L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
202    } else if (request_type == RequestType:DataArrayWrite) {
203        L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
204    } else if (request_type == RequestType:TagArrayRead) {
205        L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
206    } else if (request_type == RequestType:TagArrayWrite) {
207        L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
208    }
209  }
210
211  bool checkResourceAvailable(RequestType request_type, Addr addr) {
212    if (request_type == RequestType:DataArrayRead) {
213      return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
214    } else if (request_type == RequestType:DataArrayWrite) {
215      return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
216    } else if (request_type == RequestType:TagArrayRead) {
217      return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
218    } else if (request_type == RequestType:TagArrayWrite) {
219      return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
220    } else {
221      error("Invalid RequestType type in checkResourceAvailable");
222      return true;
223    }
224  }
225
226  // Out Ports
227
228  out_port(requestNetwork_out, CPURequestMsg, requestFromSQC);
229  out_port(responseNetwork_out, ResponseMsg, responseFromSQC);
230  out_port(unblockNetwork_out, UnblockMsg, unblockFromCore);
231
232  // In Ports
233
234  in_port(probeNetwork_in, TDProbeRequestMsg, probeToSQC) {
235    if (probeNetwork_in.isReady(clockEdge())) {
236      peek(probeNetwork_in, TDProbeRequestMsg, block_on="addr") {
237        Entry cache_entry := getCacheEntry(in_msg.addr);
238        TBE tbe := TBEs.lookup(in_msg.addr);
239
240        if (in_msg.Type == ProbeRequestType:PrbInv) {
241          if (in_msg.ReturnData) {
242            trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
243          } else {
244            trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
245          }
246        } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
247          assert(in_msg.ReturnData);
248          trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
249        }
250      }
251    }
252  }
253
254  in_port(responseToSQC_in, ResponseMsg, responseToSQC) {
255    if (responseToSQC_in.isReady(clockEdge())) {
256      peek(responseToSQC_in, ResponseMsg, block_on="addr") {
257
258        Entry cache_entry := getCacheEntry(in_msg.addr);
259        TBE tbe := TBEs.lookup(in_msg.addr);
260
261        if (in_msg.Type == CoherenceResponseType:TDSysResp) {
262          if (in_msg.State == CoherenceState:Shared) {
263            trigger(Event:TCC_AckS, in_msg.addr, cache_entry, tbe);
264          } else {
265            error("SQC should not receive TDSysResp other than CoherenceState:Shared");
266          }
267        } else if (in_msg.Type == CoherenceResponseType:TDSysWBAck) {
268          trigger(Event:TCC_AckWB, in_msg.addr, cache_entry, tbe);
269        } else if (in_msg.Type == CoherenceResponseType:TDSysWBNack) {
270          trigger(Event:TCC_NackWB, in_msg.addr, cache_entry, tbe);
271        } else {
272          error("Unexpected Response Message to Core");
273        }
274      }
275    }
276  }
277
278  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
279    if (mandatoryQueue_in.isReady(clockEdge())) {
280      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
281        Entry cache_entry := getCacheEntry(in_msg.LineAddress);
282        TBE tbe := TBEs.lookup(in_msg.LineAddress);
283
284        assert(in_msg.Type == RubyRequestType:IFETCH);
285        if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
286          trigger(Event:Fetch, in_msg.LineAddress, cache_entry, tbe);
287        } else {
288          Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
289          trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
290        }
291      }
292    }
293  }
294
295  // Actions
296
297  action(ic_invCache, "ic", desc="invalidate cache") {
298    if(is_valid(cache_entry)) {
299      L1cache.deallocate(address);
300    }
301    unset_cache_entry();
302  }
303
304  action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
305    enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
306      out_msg.addr := address;
307      out_msg.Type := CoherenceRequestType:RdBlkS;
308      out_msg.Requestor := machineID;
309      out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
310                              TCC_select_low_bit, TCC_select_num_bits));
311      out_msg.MessageSize := MessageSizeType:Request_Control;
312      out_msg.InitialRequestTime := curCycle();
313    }
314  }
315
316  action(vc_victim, "vc", desc="Victimize E/S Data") {
317    enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
318      out_msg.addr := address;
319      out_msg.Requestor := machineID;
320      out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
321                              TCC_select_low_bit, TCC_select_num_bits));
322      out_msg.MessageSize := MessageSizeType:Request_Control;
323      out_msg.Type := CoherenceRequestType:VicClean;
324      out_msg.InitialRequestTime := curCycle();
325      if (cache_entry.CacheState == State:S) {
326        out_msg.Shared := true;
327      } else {
328        out_msg.Shared := false;
329      }
330      out_msg.InitialRequestTime := curCycle();
331    }
332  }
333
334  action(a_allocate, "a", desc="allocate block") {
335    if (is_invalid(cache_entry)) {
336      set_cache_entry(L1cache.allocate(address, new Entry));
337    }
338  }
339
340  action(t_allocateTBE, "t", desc="allocate TBE Entry") {
341    check_allocate(TBEs);
342    assert(is_valid(cache_entry));
343    TBEs.allocate(address);
344    set_tbe(TBEs.lookup(address));
345    tbe.DataBlk := cache_entry.DataBlk;  // Data only used for WBs
346    tbe.Dirty := cache_entry.Dirty;
347    tbe.Shared := false;
348  }
349
350  action(d_deallocateTBE, "d", desc="Deallocate TBE") {
351    TBEs.deallocate(address);
352    unset_tbe();
353  }
354
355  action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
356    mandatoryQueue_in.dequeue(clockEdge());
357  }
358
359  action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
360    responseToSQC_in.dequeue(clockEdge());
361  }
362
363  action(pp_popProbeQueue, "pp", desc="pop probe queue") {
364    probeNetwork_in.dequeue(clockEdge());
365  }
366
367  action(l_loadDone, "l", desc="local load done") {
368    assert(is_valid(cache_entry));
369    sequencer.readCallback(address, cache_entry.DataBlk,
370                           false, MachineType:L1Cache);
371    APPEND_TRANSITION_COMMENT(cache_entry.DataBlk);
372  }
373
374  action(xl_loadDone, "xl", desc="remote load done") {
375    peek(responseToSQC_in, ResponseMsg) {
376      assert(is_valid(cache_entry));
377      sequencer.readCallback(address,
378                             cache_entry.DataBlk,
379                             false,
380                             machineIDToMachineType(in_msg.Sender),
381                             in_msg.InitialRequestTime,
382                             in_msg.ForwardRequestTime,
383                             in_msg.ProbeRequestStartTime);
384      APPEND_TRANSITION_COMMENT(cache_entry.DataBlk);
385    }
386  }
387
388  action(w_writeCache, "w", desc="write data to cache") {
389    peek(responseToSQC_in, ResponseMsg) {
390      assert(is_valid(cache_entry));
391      cache_entry.DataBlk := in_msg.DataBlk;
392      cache_entry.Dirty := in_msg.Dirty;
393    }
394  }
395
396  action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
397    peek(responseToSQC_in, ResponseMsg) {
398      enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
399        out_msg.addr := address;
400        out_msg.Type := CoherenceResponseType:StaleNotif;
401        out_msg.Sender := machineID;
402        out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
403                                TCC_select_low_bit, TCC_select_num_bits));
404        out_msg.MessageSize := MessageSizeType:Response_Control;
405        DPRINTF(RubySlicc, "%s\n", out_msg);
406      }
407    }
408  }
409
410  action(wb_data, "wb", desc="write back data") {
411    peek(responseToSQC_in, ResponseMsg) {
412      enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
413        out_msg.addr := address;
414        out_msg.Type := CoherenceResponseType:CPUData;
415        out_msg.Sender := machineID;
416        out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
417                                TCC_select_low_bit, TCC_select_num_bits));
418        out_msg.DataBlk := tbe.DataBlk;
419        out_msg.Dirty := tbe.Dirty;
420        if (tbe.Shared) {
421          out_msg.NbReqShared := true;
422        } else {
423          out_msg.NbReqShared := false;
424        }
425        out_msg.State := CoherenceState:Shared; // faux info
426        out_msg.MessageSize := MessageSizeType:Writeback_Data;
427        DPRINTF(RubySlicc, "%s\n", out_msg);
428      }
429    }
430  }
431
432  action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
433    enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
434      out_msg.addr := address;
435      out_msg.Type := CoherenceResponseType:CPUPrbResp;  // L3 and CPUs respond in same way to probes
436      out_msg.Sender := machineID;
437      // will this always be ok? probably not for multisocket
438      out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
439                              TCC_select_low_bit, TCC_select_num_bits));
440      out_msg.Dirty := false;
441      out_msg.Hit := false;
442      out_msg.Ntsl := true;
443      out_msg.State := CoherenceState:NA;
444      out_msg.MessageSize := MessageSizeType:Response_Control;
445    }
446  }
447
448  action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
449    enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
450      out_msg.addr := address;
451      out_msg.Type := CoherenceResponseType:CPUPrbResp;  // L3 and CPUs respond in same way to probes
452      out_msg.Sender := machineID;
453      // will this always be ok? probably not for multisocket
454      out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
455                              TCC_select_low_bit, TCC_select_num_bits));
456      out_msg.Dirty := false;
457      out_msg.Ntsl := true;
458      out_msg.Hit := false;
459      out_msg.State := CoherenceState:NA;
460      out_msg.MessageSize := MessageSizeType:Response_Control;
461    }
462  }
463
464  action(prm_sendProbeResponseMiss, "prm", desc="send probe ack PrbShrData, no data") {
465    enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
466      out_msg.addr := address;
467      out_msg.Type := CoherenceResponseType:CPUPrbResp;  // L3 and CPUs respond in same way to probes
468      out_msg.Sender := machineID;
469      // will this always be ok? probably not for multisocket
470      out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
471                              TCC_select_low_bit, TCC_select_num_bits));
472      out_msg.Dirty := false;  // only true if sending back data i think
473      out_msg.Hit := false;
474      out_msg.Ntsl := false;
475      out_msg.State := CoherenceState:NA;
476      out_msg.MessageSize := MessageSizeType:Response_Control;
477    }
478  }
479
480  action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
481    enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
482      assert(is_valid(cache_entry) || is_valid(tbe));
483      out_msg.addr := address;
484      out_msg.Type := CoherenceResponseType:CPUPrbResp;
485      out_msg.Sender := machineID;
486      // will this always be ok? probably not for multisocket
487      out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
488                              TCC_select_low_bit, TCC_select_num_bits));
489      out_msg.DataBlk := getDataBlock(address);
490      if (is_valid(tbe)) {
491        out_msg.Dirty := tbe.Dirty;
492      } else {
493        out_msg.Dirty := cache_entry.Dirty;
494      }
495      out_msg.Hit := true;
496      out_msg.State := CoherenceState:NA;
497      out_msg.MessageSize := MessageSizeType:Response_Data;
498    }
499  }
500
501  action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
502    enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
503      assert(is_valid(cache_entry) || is_valid(tbe));
504      assert(is_valid(cache_entry));
505      out_msg.addr := address;
506      out_msg.Type := CoherenceResponseType:CPUPrbResp;
507      out_msg.Sender := machineID;
508      // will this always be ok? probably not for multisocket
509      out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
510                              TCC_select_low_bit, TCC_select_num_bits));
511      out_msg.DataBlk := getDataBlock(address);
512      if (is_valid(tbe)) {
513        out_msg.Dirty := tbe.Dirty;
514      } else {
515        out_msg.Dirty := cache_entry.Dirty;
516      }
517      out_msg.Hit := true;
518      out_msg.State := CoherenceState:NA;
519      out_msg.MessageSize := MessageSizeType:Response_Data;
520    }
521  }
522
523  action(sf_setSharedFlip, "sf", desc="hit by shared probe, status may be different") {
524    assert(is_valid(tbe));
525    tbe.Shared := true;
526  }
527
528  action(uu_sendUnblock, "uu", desc="state changed, unblock") {
529    enqueue(unblockNetwork_out, UnblockMsg, issue_latency) {
530      out_msg.addr := address;
531      out_msg.Sender := machineID;
532      out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
533                              TCC_select_low_bit, TCC_select_num_bits));
534      out_msg.MessageSize := MessageSizeType:Unblock_Control;
535      DPRINTF(RubySlicc, "%s\n", out_msg);
536    }
537  }
538
539  action(yy_recycleProbeQueue, "yy", desc="recycle probe queue") {
540    probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
541  }
542
543  action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
544    mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
545  }
546
547  // Transitions
548
549  // transitions from base
550  transition(I, Fetch, I_S) {TagArrayRead, TagArrayWrite} {
551    a_allocate;
552    nS_issueRdBlkS;
553    p_popMandatoryQueue;
554  }
555
556  // simple hit transitions
557  transition(S, Fetch) {TagArrayRead, DataArrayRead} {
558    l_loadDone;
559    p_popMandatoryQueue;
560  }
561
562  // recycles from transients
563  transition({I_S, S_I, I_C}, {Fetch, Repl}) {} {
564    zz_recycleMandatoryQueue;
565  }
566
567  transition(S, Repl, S_I) {TagArrayRead} {
568    t_allocateTBE;
569    vc_victim;
570    ic_invCache;
571  }
572
573  // TCC event
574  transition(I_S, TCC_AckS, S) {DataArrayRead, DataArrayWrite} {
575    w_writeCache;
576    xl_loadDone;
577    uu_sendUnblock;
578    pr_popResponseQueue;
579  }
580
581  transition(S_I, TCC_NackWB, I){TagArrayWrite} {
582    d_deallocateTBE;
583    pr_popResponseQueue;
584  }
585
586  transition(S_I, TCC_AckWB, I) {TagArrayWrite} {
587    wb_data;
588    d_deallocateTBE;
589    pr_popResponseQueue;
590  }
591
592  transition(I_C, TCC_AckWB, I){TagArrayWrite} {
593    ss_sendStaleNotification;
594    d_deallocateTBE;
595    pr_popResponseQueue;
596  }
597
598  transition(I_C, TCC_NackWB, I) {TagArrayWrite} {
599    d_deallocateTBE;
600    pr_popResponseQueue;
601  }
602
603  // Probe transitions
604  transition({S, I}, PrbInvData, I) {TagArrayRead, TagArrayWrite} {
605    pd_sendProbeResponseData;
606    ic_invCache;
607    pp_popProbeQueue;
608  }
609
610  transition(I_C, PrbInvData, I_C) {
611    pi_sendProbeResponseInv;
612    ic_invCache;
613    pp_popProbeQueue;
614  }
615
616  transition({S, I}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
617    pi_sendProbeResponseInv;
618    ic_invCache;
619    pp_popProbeQueue;
620  }
621
622  transition({S}, PrbShrData, S) {DataArrayRead} {
623    pd_sendProbeResponseData;
624    pp_popProbeQueue;
625  }
626
627  transition({I, I_C}, PrbShrData) {TagArrayRead} {
628    prm_sendProbeResponseMiss;
629    pp_popProbeQueue;
630  }
631
632  transition(I_C, PrbInv, I_C){
633    pi_sendProbeResponseInv;
634    ic_invCache;
635    pp_popProbeQueue;
636  }
637
638  transition(I_S, {PrbInv, PrbInvData}) {} {
639    pi_sendProbeResponseInv;
640    ic_invCache;
641    a_allocate;  // but make sure there is room for incoming data when it arrives
642    pp_popProbeQueue;
643  }
644
645  transition(I_S, PrbShrData) {} {
646    prm_sendProbeResponseMiss;
647    pp_popProbeQueue;
648  }
649
650  transition(S_I, PrbInvData, I_C) {TagArrayWrite} {
651    pi_sendProbeResponseInv;
652    ic_invCache;
653    pp_popProbeQueue;
654  }
655
656  transition(S_I, PrbInv, I_C) {TagArrayWrite} {
657    pi_sendProbeResponseInv;
658    ic_invCache;
659    pp_popProbeQueue;
660  }
661
662  transition(S_I, PrbShrData) {DataArrayRead} {
663    pd_sendProbeResponseData;
664    sf_setSharedFlip;
665    pp_popProbeQueue;
666  }
667}
668