GPU_VIPER-SQC.sm revision 14184:11ac1337c5e2
1/*
2 * Copyright (c) 2012-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: Blake Hechtman
34 */
35
36machine(MachineType:SQC, "GPU SQC (L1 I Cache)")
37 : Sequencer* sequencer;
38   CacheMemory * L1cache;
39   int TCC_select_num_bits;
40   Cycles issue_latency := 80;  // time to send data down to TCC
41   Cycles l2_hit_latency := 18; // for 1MB L2, 20 for 2MB
42
43  MessageBuffer * requestFromSQC, network="To", virtual_network="1", vnet_type="request";
44
45  MessageBuffer * probeToSQC, network="From", virtual_network="1", vnet_type="request";
46  MessageBuffer * responseToSQC, network="From", virtual_network="3", vnet_type="response";
47
48  MessageBuffer * mandatoryQueue;
49{
50  state_declaration(State, desc="SQC Cache States", default="SQC_State_I") {
51    I, AccessPermission:Invalid, desc="Invalid";
52    V, AccessPermission:Read_Only, desc="Valid";
53  }
54
55  enumeration(Event, desc="SQC Events") {
56    // Core initiated
57    Fetch,          desc="Fetch";
58    // Mem sys initiated
59    Repl,           desc="Replacing block from cache";
60    Data,           desc="Received Data";
61  }
62
63  enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
64    DataArrayRead,    desc="Read the data array";
65    DataArrayWrite,   desc="Write the data array";
66    TagArrayRead,     desc="Read the data array";
67    TagArrayWrite,    desc="Write the data array";
68  }
69
70
71  structure(Entry, desc="...", interface="AbstractCacheEntry") {
72    State CacheState,           desc="cache state";
73    bool Dirty,                 desc="Is the data dirty (diff than memory)?";
74    DataBlock DataBlk,          desc="data for the block";
75    bool FromL2, default="false", desc="block just moved from L2";
76  }
77
78  structure(TBE, desc="...") {
79    State TBEState,             desc="Transient state";
80    DataBlock DataBlk,       desc="data for the block, required for concurrent writebacks";
81    bool Dirty,              desc="Is the data dirty (different than memory)?";
82    int NumPendingMsgs,      desc="Number of acks/data messages that this processor is waiting for";
83    bool Shared,             desc="Victim hit by shared probe";
84   }
85
86  structure(TBETable, external="yes") {
87    TBE lookup(Addr);
88    void allocate(Addr);
89    void deallocate(Addr);
90    bool isPresent(Addr);
91  }
92
93  TBETable TBEs, template="<SQC_TBE>", constructor="m_number_of_TBEs";
94  int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
95
96  void set_cache_entry(AbstractCacheEntry b);
97  void unset_cache_entry();
98  void set_tbe(TBE b);
99  void unset_tbe();
100  void wakeUpAllBuffers();
101  void wakeUpBuffers(Addr a);
102  Cycles curCycle();
103
104  // Internal functions
105  Tick clockEdge();
106
107  Entry getCacheEntry(Addr address), return_by_pointer="yes" {
108    Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
109    return cache_entry;
110  }
111
112  DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
113    TBE tbe := TBEs.lookup(addr);
114    if(is_valid(tbe)) {
115      return tbe.DataBlk;
116    } else {
117      return getCacheEntry(addr).DataBlk;
118    }
119  }
120
121  State getState(TBE tbe, Entry cache_entry, Addr addr) {
122    if(is_valid(tbe)) {
123      return tbe.TBEState;
124    } else if (is_valid(cache_entry)) {
125      return cache_entry.CacheState;
126    }
127    return State:I;
128  }
129
130  void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
131    if (is_valid(tbe)) {
132      tbe.TBEState := state;
133    }
134
135    if (is_valid(cache_entry)) {
136      cache_entry.CacheState := state;
137    }
138  }
139
140  void functionalRead(Addr addr, Packet *pkt) {
141    TBE tbe := TBEs.lookup(addr);
142    if(is_valid(tbe)) {
143      testAndRead(addr, tbe.DataBlk, pkt);
144    } else {
145      functionalMemoryRead(pkt);
146    }
147  }
148
149  int functionalWrite(Addr addr, Packet *pkt) {
150    int num_functional_writes := 0;
151
152    TBE tbe := TBEs.lookup(addr);
153    if(is_valid(tbe)) {
154      num_functional_writes := num_functional_writes +
155            testAndWrite(addr, tbe.DataBlk, pkt);
156    }
157
158    num_functional_writes := num_functional_writes +
159        functionalMemoryWrite(pkt);
160    return num_functional_writes;
161  }
162
163  AccessPermission getAccessPermission(Addr addr) {
164    TBE tbe := TBEs.lookup(addr);
165    if(is_valid(tbe)) {
166      return SQC_State_to_permission(tbe.TBEState);
167    }
168
169    Entry cache_entry := getCacheEntry(addr);
170    if(is_valid(cache_entry)) {
171      return SQC_State_to_permission(cache_entry.CacheState);
172    }
173
174    return AccessPermission:NotPresent;
175  }
176
177  void setAccessPermission(Entry cache_entry, Addr addr, State state) {
178    if (is_valid(cache_entry)) {
179      cache_entry.changePermission(SQC_State_to_permission(state));
180    }
181  }
182
183  void recordRequestType(RequestType request_type, Addr addr) {
184    if (request_type == RequestType:DataArrayRead) {
185        L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
186    } else if (request_type == RequestType:DataArrayWrite) {
187        L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
188    } else if (request_type == RequestType:TagArrayRead) {
189        L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
190    } else if (request_type == RequestType:TagArrayWrite) {
191        L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
192    }
193  }
194
195  bool checkResourceAvailable(RequestType request_type, Addr addr) {
196    if (request_type == RequestType:DataArrayRead) {
197      return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
198    } else if (request_type == RequestType:DataArrayWrite) {
199      return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
200    } else if (request_type == RequestType:TagArrayRead) {
201      return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
202    } else if (request_type == RequestType:TagArrayWrite) {
203      return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
204    } else {
205      error("Invalid RequestType type in checkResourceAvailable");
206      return true;
207    }
208  }
209
210  // Out Ports
211
212  out_port(requestNetwork_out, CPURequestMsg, requestFromSQC);
213
214  // In Ports
215
216  in_port(responseToSQC_in, ResponseMsg, responseToSQC) {
217    if (responseToSQC_in.isReady(clockEdge())) {
218      peek(responseToSQC_in, ResponseMsg, block_on="addr") {
219
220        Entry cache_entry := getCacheEntry(in_msg.addr);
221        TBE tbe := TBEs.lookup(in_msg.addr);
222
223        if (in_msg.Type == CoherenceResponseType:TDSysResp) {
224          if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.addr)) {
225            trigger(Event:Data, in_msg.addr, cache_entry, tbe);
226          } else {
227            Addr victim := L1cache.cacheProbe(in_msg.addr);
228            trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
229          }
230        } else {
231          error("Unexpected Response Message to Core");
232        }
233      }
234    }
235  }
236
237  in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
238    if (mandatoryQueue_in.isReady(clockEdge())) {
239      peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
240        Entry cache_entry := getCacheEntry(in_msg.LineAddress);
241        TBE tbe := TBEs.lookup(in_msg.LineAddress);
242
243        assert(in_msg.Type == RubyRequestType:IFETCH);
244        trigger(Event:Fetch, in_msg.LineAddress, cache_entry, tbe);
245      }
246    }
247  }
248
249  // Actions
250
251  action(ic_invCache, "ic", desc="invalidate cache") {
252    if(is_valid(cache_entry)) {
253      L1cache.deallocate(address);
254    }
255    unset_cache_entry();
256  }
257
258  action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
259    enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
260      out_msg.addr := address;
261      out_msg.Type := CoherenceRequestType:RdBlk;
262      out_msg.Requestor := machineID;
263      out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
264                              TCC_select_low_bit, TCC_select_num_bits));
265      out_msg.MessageSize := MessageSizeType:Request_Control;
266      out_msg.InitialRequestTime := curCycle();
267    }
268  }
269
270  action(a_allocate, "a", desc="allocate block") {
271    if (is_invalid(cache_entry)) {
272      set_cache_entry(L1cache.allocate(address, new Entry));
273    }
274  }
275
276  action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
277    mandatoryQueue_in.dequeue(clockEdge());
278  }
279
280  action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
281    responseToSQC_in.dequeue(clockEdge());
282  }
283
284  action(l_loadDone, "l", desc="local load done") {
285    assert(is_valid(cache_entry));
286    sequencer.readCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
287    APPEND_TRANSITION_COMMENT(cache_entry.DataBlk);
288  }
289
290  action(w_writeCache, "w", desc="write data to cache") {
291    peek(responseToSQC_in, ResponseMsg) {
292      assert(is_valid(cache_entry));
293      cache_entry.DataBlk := in_msg.DataBlk;
294      cache_entry.Dirty := false;
295    }
296  }
297
298  // Transitions
299
300  // transitions from base
301  transition({I, V}, Repl, I) {TagArrayRead, TagArrayWrite} {
302    ic_invCache
303  }
304
305  transition(I, Data, V) {TagArrayRead, TagArrayWrite, DataArrayRead} {
306    a_allocate;
307    w_writeCache
308    l_loadDone;
309    pr_popResponseQueue;
310  }
311
312  transition(I, Fetch) {TagArrayRead, TagArrayWrite} {
313    nS_issueRdBlkS;
314    p_popMandatoryQueue;
315  }
316
317  // simple hit transitions
318  transition(V, Fetch) {TagArrayRead, DataArrayRead} {
319    l_loadDone;
320    p_popMandatoryQueue;
321  }
322}
323