lsq_unit.hh (2698:d5f35d41e017) lsq_unit.hh (2727:91e17c7ee622)
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#ifndef __CPU_O3_LSQ_UNIT_HH__
33#define __CPU_O3_LSQ_UNIT_HH__
34
35#include <algorithm>
36#include <map>
37#include <queue>
38
39#include "arch/faults.hh"
40#include "config/full_system.hh"
41#include "base/hashmap.hh"
42#include "cpu/inst_seq.hh"
43#include "mem/packet.hh"
44#include "mem/port.hh"
45
46/**
47 * Class that implements the actual LQ and SQ for each specific
48 * thread. Both are circular queues; load entries are freed upon
49 * committing, while store entries are freed once they writeback. The
50 * LSQUnit tracks if there are memory ordering violations, and also
51 * detects partial load to store forwarding cases (a store only has
52 * part of a load's data) that requires the load to wait until the
53 * store writes back. In the former case it holds onto the instruction
54 * until the dependence unit looks at it, and in the latter it stalls
55 * the LSQ until the store writes back. At that point the load is
56 * replayed.
57 */
58template <class Impl>
59class LSQUnit {
60 protected:
61 typedef TheISA::IntReg IntReg;
62 public:
63 typedef typename Impl::Params Params;
64 typedef typename Impl::FullCPU FullCPU;
65 typedef typename Impl::DynInstPtr DynInstPtr;
66 typedef typename Impl::CPUPol::IEW IEW;
67 typedef typename Impl::CPUPol::IssueStruct IssueStruct;
68
69 public:
70 /** Constructs an LSQ unit. init() must be called prior to use. */
71 LSQUnit();
72
73 /** Initializes the LSQ unit with the specified number of entries. */
74 void init(Params *params, unsigned maxLQEntries,
75 unsigned maxSQEntries, unsigned id);
76
77 /** Returns the name of the LSQ unit. */
78 std::string name() const;
79
1/*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 * Korey Sewell
30 */
31
32#ifndef __CPU_O3_LSQ_UNIT_HH__
33#define __CPU_O3_LSQ_UNIT_HH__
34
35#include <algorithm>
36#include <map>
37#include <queue>
38
39#include "arch/faults.hh"
40#include "config/full_system.hh"
41#include "base/hashmap.hh"
42#include "cpu/inst_seq.hh"
43#include "mem/packet.hh"
44#include "mem/port.hh"
45
46/**
47 * Class that implements the actual LQ and SQ for each specific
48 * thread. Both are circular queues; load entries are freed upon
49 * committing, while store entries are freed once they writeback. The
50 * LSQUnit tracks if there are memory ordering violations, and also
51 * detects partial load to store forwarding cases (a store only has
52 * part of a load's data) that requires the load to wait until the
53 * store writes back. In the former case it holds onto the instruction
54 * until the dependence unit looks at it, and in the latter it stalls
55 * the LSQ until the store writes back. At that point the load is
56 * replayed.
57 */
58template <class Impl>
59class LSQUnit {
60 protected:
61 typedef TheISA::IntReg IntReg;
62 public:
63 typedef typename Impl::Params Params;
64 typedef typename Impl::FullCPU FullCPU;
65 typedef typename Impl::DynInstPtr DynInstPtr;
66 typedef typename Impl::CPUPol::IEW IEW;
67 typedef typename Impl::CPUPol::IssueStruct IssueStruct;
68
69 public:
70 /** Constructs an LSQ unit. init() must be called prior to use. */
71 LSQUnit();
72
73 /** Initializes the LSQ unit with the specified number of entries. */
74 void init(Params *params, unsigned maxLQEntries,
75 unsigned maxSQEntries, unsigned id);
76
77 /** Returns the name of the LSQ unit. */
78 std::string name() const;
79
80 /** Registers statistics. */
81 void regStats();
82
80 /** Sets the CPU pointer. */
81 void setCPU(FullCPU *cpu_ptr);
82
83 /** Sets the IEW stage pointer. */
84 void setIEW(IEW *iew_ptr)
85 { iewStage = iew_ptr; }
86
87 /** Switches out LSQ unit. */
88 void switchOut();
89
90 /** Takes over from another CPU's thread. */
91 void takeOverFrom();
92
93 /** Returns if the LSQ is switched out. */
94 bool isSwitchedOut() { return switchedOut; }
95
96 /** Ticks the LSQ unit, which in this case only resets the number of
97 * used cache ports.
98 * @todo: Move the number of used ports up to the LSQ level so it can
99 * be shared by all LSQ units.
100 */
101 void tick() { usedPorts = 0; }
102
103 /** Inserts an instruction. */
104 void insert(DynInstPtr &inst);
105 /** Inserts a load instruction. */
106 void insertLoad(DynInstPtr &load_inst);
107 /** Inserts a store instruction. */
108 void insertStore(DynInstPtr &store_inst);
109
110 /** Executes a load instruction. */
111 Fault executeLoad(DynInstPtr &inst);
112
113 Fault executeLoad(int lq_idx) { panic("Not implemented"); return NoFault; }
114 /** Executes a store instruction. */
115 Fault executeStore(DynInstPtr &inst);
116
117 /** Commits the head load. */
118 void commitLoad();
119 /** Commits loads older than a specific sequence number. */
120 void commitLoads(InstSeqNum &youngest_inst);
121
122 /** Commits stores older than a specific sequence number. */
123 void commitStores(InstSeqNum &youngest_inst);
124
125 /** Writes back stores. */
126 void writebackStores();
127
128 void completeDataAccess(PacketPtr pkt);
129
83 /** Sets the CPU pointer. */
84 void setCPU(FullCPU *cpu_ptr);
85
86 /** Sets the IEW stage pointer. */
87 void setIEW(IEW *iew_ptr)
88 { iewStage = iew_ptr; }
89
90 /** Switches out LSQ unit. */
91 void switchOut();
92
93 /** Takes over from another CPU's thread. */
94 void takeOverFrom();
95
96 /** Returns if the LSQ is switched out. */
97 bool isSwitchedOut() { return switchedOut; }
98
99 /** Ticks the LSQ unit, which in this case only resets the number of
100 * used cache ports.
101 * @todo: Move the number of used ports up to the LSQ level so it can
102 * be shared by all LSQ units.
103 */
104 void tick() { usedPorts = 0; }
105
106 /** Inserts an instruction. */
107 void insert(DynInstPtr &inst);
108 /** Inserts a load instruction. */
109 void insertLoad(DynInstPtr &load_inst);
110 /** Inserts a store instruction. */
111 void insertStore(DynInstPtr &store_inst);
112
113 /** Executes a load instruction. */
114 Fault executeLoad(DynInstPtr &inst);
115
116 Fault executeLoad(int lq_idx) { panic("Not implemented"); return NoFault; }
117 /** Executes a store instruction. */
118 Fault executeStore(DynInstPtr &inst);
119
120 /** Commits the head load. */
121 void commitLoad();
122 /** Commits loads older than a specific sequence number. */
123 void commitLoads(InstSeqNum &youngest_inst);
124
125 /** Commits stores older than a specific sequence number. */
126 void commitStores(InstSeqNum &youngest_inst);
127
128 /** Writes back stores. */
129 void writebackStores();
130
131 void completeDataAccess(PacketPtr pkt);
132
130 // @todo: Include stats in the LSQ unit.
131 //void regStats();
132
133 /** Clears all the entries in the LQ. */
134 void clearLQ();
135
136 /** Clears all the entries in the SQ. */
137 void clearSQ();
138
139 /** Resizes the LQ to a given size. */
140 void resizeLQ(unsigned size);
141
142 /** Resizes the SQ to a given size. */
143 void resizeSQ(unsigned size);
144
145 /** Squashes all instructions younger than a specific sequence number. */
146 void squash(const InstSeqNum &squashed_num);
147
148 /** Returns if there is a memory ordering violation. Value is reset upon
149 * call to getMemDepViolator().
150 */
151 bool violation() { return memDepViolator; }
152
153 /** Returns the memory ordering violator. */
154 DynInstPtr getMemDepViolator();
155
156 /** Returns if a load became blocked due to the memory system. */
157 bool loadBlocked()
158 { return isLoadBlocked; }
159
160 /** Clears the signal that a load became blocked. */
161 void clearLoadBlocked()
162 { isLoadBlocked = false; }
163
164 /** Returns if the blocked load was handled. */
165 bool isLoadBlockedHandled()
166 { return loadBlockedHandled; }
167
168 /** Records the blocked load as being handled. */
169 void setLoadBlockedHandled()
170 { loadBlockedHandled = true; }
171
172 /** Returns the number of free entries (min of free LQ and SQ entries). */
173 unsigned numFreeEntries();
174
175 /** Returns the number of loads ready to execute. */
176 int numLoadsReady();
177
178 /** Returns the number of loads in the LQ. */
179 int numLoads() { return loads; }
180
181 /** Returns the number of stores in the SQ. */
182 int numStores() { return stores; }
183
184 /** Returns if either the LQ or SQ is full. */
185 bool isFull() { return lqFull() || sqFull(); }
186
187 /** Returns if the LQ is full. */
188 bool lqFull() { return loads >= (LQEntries - 1); }
189
190 /** Returns if the SQ is full. */
191 bool sqFull() { return stores >= (SQEntries - 1); }
192
193 /** Returns the number of instructions in the LSQ. */
194 unsigned getCount() { return loads + stores; }
195
196 /** Returns if there are any stores to writeback. */
197 bool hasStoresToWB() { return storesToWB; }
198
199 /** Returns the number of stores to writeback. */
200 int numStoresToWB() { return storesToWB; }
201
202 /** Returns if the LSQ unit will writeback on this cycle. */
203 bool willWB() { return storeQueue[storeWBIdx].canWB &&
204 !storeQueue[storeWBIdx].completed &&
205 !isStoreBlocked; }
206
207 private:
208 /** Writes back the instruction, sending it to IEW. */
209 void writeback(DynInstPtr &inst, PacketPtr pkt);
210
211 /** Handles completing the send of a store to memory. */
212 void storePostSend(Packet *pkt);
213
214 /** Completes the store at the specified index. */
215 void completeStore(int store_idx);
216
217 /** Handles doing the retry. */
218 void recvRetry();
219
220 /** Increments the given store index (circular queue). */
221 inline void incrStIdx(int &store_idx);
222 /** Decrements the given store index (circular queue). */
223 inline void decrStIdx(int &store_idx);
224 /** Increments the given load index (circular queue). */
225 inline void incrLdIdx(int &load_idx);
226 /** Decrements the given load index (circular queue). */
227 inline void decrLdIdx(int &load_idx);
228
229 public:
230 /** Debugging function to dump instructions in the LSQ. */
231 void dumpInsts();
232
233 private:
234 /** Pointer to the CPU. */
235 FullCPU *cpu;
236
237 /** Pointer to the IEW stage. */
238 IEW *iewStage;
239
240 /** Pointer to memory object. */
241 MemObject *mem;
242
243 /** DcachePort class for this LSQ Unit. Handles doing the
244 * communication with the cache/memory.
245 * @todo: Needs to be moved to the LSQ level and have some sort
246 * of arbitration.
247 */
248 class DcachePort : public Port
249 {
250 protected:
251 /** Pointer to CPU. */
252 FullCPU *cpu;
253 /** Pointer to LSQ. */
254 LSQUnit *lsq;
255
256 public:
257 /** Default constructor. */
258 DcachePort(FullCPU *_cpu, LSQUnit *_lsq)
259 : Port(_lsq->name() + "-dport"), cpu(_cpu), lsq(_lsq)
260 { }
261
262 protected:
263 /** Atomic version of receive. Panics. */
264 virtual Tick recvAtomic(PacketPtr pkt);
265
266 /** Functional version of receive. Panics. */
267 virtual void recvFunctional(PacketPtr pkt);
268
269 /** Receives status change. Other than range changing, panics. */
270 virtual void recvStatusChange(Status status);
271
272 /** Returns the address ranges of this device. */
273 virtual void getDeviceAddressRanges(AddrRangeList &resp,
274 AddrRangeList &snoop)
275 { resp.clear(); snoop.clear(); }
276
277 /** Timing version of receive. Handles writing back and
278 * completing the load or store that has returned from
279 * memory. */
280 virtual bool recvTiming(PacketPtr pkt);
281
282 /** Handles doing a retry of the previous send. */
283 virtual void recvRetry();
284 };
285
286 /** Pointer to the D-cache. */
287 DcachePort *dcachePort;
288
289 /** Derived class to hold any sender state the LSQ needs. */
290 class LSQSenderState : public Packet::SenderState
291 {
292 public:
293 /** Default constructor. */
294 LSQSenderState()
295 : noWB(false)
296 { }
297
298 /** Instruction who initiated the access to memory. */
299 DynInstPtr inst;
300 /** Whether or not it is a load. */
301 bool isLoad;
302 /** The LQ/SQ index of the instruction. */
303 int idx;
304 /** Whether or not the instruction will need to writeback. */
305 bool noWB;
306 };
307
308 /** Writeback event, specifically for when stores forward data to loads. */
309 class WritebackEvent : public Event {
310 public:
311 /** Constructs a writeback event. */
312 WritebackEvent(DynInstPtr &_inst, PacketPtr pkt, LSQUnit *lsq_ptr);
313
314 /** Processes the writeback event. */
315 void process();
316
317 /** Returns the description of this event. */
318 const char *description();
319
320 private:
321 /** Instruction whose results are being written back. */
322 DynInstPtr inst;
323
324 /** The packet that would have been sent to memory. */
325 PacketPtr pkt;
326
327 /** The pointer to the LSQ unit that issued the store. */
328 LSQUnit<Impl> *lsqPtr;
329 };
330
331 public:
332 struct SQEntry {
333 /** Constructs an empty store queue entry. */
334 SQEntry()
335 : inst(NULL), req(NULL), size(0), data(0),
336 canWB(0), committed(0), completed(0)
337 { }
338
339 /** Constructs a store queue entry for a given instruction. */
340 SQEntry(DynInstPtr &_inst)
341 : inst(_inst), req(NULL), size(0), data(0),
342 canWB(0), committed(0), completed(0)
343 { }
344
345 /** The store instruction. */
346 DynInstPtr inst;
347 /** The request for the store. */
348 RequestPtr req;
349 /** The size of the store. */
350 int size;
351 /** The store data. */
352 IntReg data;
353 /** Whether or not the store can writeback. */
354 bool canWB;
355 /** Whether or not the store is committed. */
356 bool committed;
357 /** Whether or not the store is completed. */
358 bool completed;
359 };
360
361 private:
362 /** The LSQUnit thread id. */
363 unsigned lsqID;
364
365 /** The store queue. */
366 std::vector<SQEntry> storeQueue;
367
368 /** The load queue. */
369 std::vector<DynInstPtr> loadQueue;
370
371 /** The number of LQ entries, plus a sentinel entry (circular queue).
372 * @todo: Consider having var that records the true number of LQ entries.
373 */
374 unsigned LQEntries;
375 /** The number of SQ entries, plus a sentinel entry (circular queue).
376 * @todo: Consider having var that records the true number of SQ entries.
377 */
378 unsigned SQEntries;
379
380 /** The number of load instructions in the LQ. */
381 int loads;
382 /** The number of store instructions in the SQ. */
383 int stores;
384 /** The number of store instructions in the SQ waiting to writeback. */
385 int storesToWB;
386
387 /** The index of the head instruction in the LQ. */
388 int loadHead;
389 /** The index of the tail instruction in the LQ. */
390 int loadTail;
391
392 /** The index of the head instruction in the SQ. */
393 int storeHead;
394 /** The index of the first instruction that may be ready to be
395 * written back, and has not yet been written back.
396 */
397 int storeWBIdx;
398 /** The index of the tail instruction in the SQ. */
399 int storeTail;
400
401 /// @todo Consider moving to a more advanced model with write vs read ports
402 /** The number of cache ports available each cycle. */
403 int cachePorts;
404
405 /** The number of used cache ports in this cycle. */
406 int usedPorts;
407
408 /** Is the LSQ switched out. */
409 bool switchedOut;
410
411 //list<InstSeqNum> mshrSeqNums;
412
413 /** Wire to read information from the issue stage time queue. */
414 typename TimeBuffer<IssueStruct>::wire fromIssue;
415
416 /** Whether or not the LSQ is stalled. */
417 bool stalled;
418 /** The store that causes the stall due to partial store to load
419 * forwarding.
420 */
421 InstSeqNum stallingStoreIsn;
422 /** The index of the above store. */
423 int stallingLoadIdx;
424
425 /** The packet that needs to be retried. */
426 PacketPtr retryPkt;
427
428 /** Whehter or not a store is blocked due to the memory system. */
429 bool isStoreBlocked;
430
431 /** Whether or not a load is blocked due to the memory system. */
432 bool isLoadBlocked;
433
434 /** Has the blocked load been handled. */
435 bool loadBlockedHandled;
436
437 /** The sequence number of the blocked load. */
438 InstSeqNum blockedLoadSeqNum;
439
440 /** The oldest load that caused a memory ordering violation. */
441 DynInstPtr memDepViolator;
442
443 // Will also need how many read/write ports the Dcache has. Or keep track
444 // of that in stage that is one level up, and only call executeLoad/Store
445 // the appropriate number of times.
133 /** Clears all the entries in the LQ. */
134 void clearLQ();
135
136 /** Clears all the entries in the SQ. */
137 void clearSQ();
138
139 /** Resizes the LQ to a given size. */
140 void resizeLQ(unsigned size);
141
142 /** Resizes the SQ to a given size. */
143 void resizeSQ(unsigned size);
144
145 /** Squashes all instructions younger than a specific sequence number. */
146 void squash(const InstSeqNum &squashed_num);
147
148 /** Returns if there is a memory ordering violation. Value is reset upon
149 * call to getMemDepViolator().
150 */
151 bool violation() { return memDepViolator; }
152
153 /** Returns the memory ordering violator. */
154 DynInstPtr getMemDepViolator();
155
156 /** Returns if a load became blocked due to the memory system. */
157 bool loadBlocked()
158 { return isLoadBlocked; }
159
160 /** Clears the signal that a load became blocked. */
161 void clearLoadBlocked()
162 { isLoadBlocked = false; }
163
164 /** Returns if the blocked load was handled. */
165 bool isLoadBlockedHandled()
166 { return loadBlockedHandled; }
167
168 /** Records the blocked load as being handled. */
169 void setLoadBlockedHandled()
170 { loadBlockedHandled = true; }
171
172 /** Returns the number of free entries (min of free LQ and SQ entries). */
173 unsigned numFreeEntries();
174
175 /** Returns the number of loads ready to execute. */
176 int numLoadsReady();
177
178 /** Returns the number of loads in the LQ. */
179 int numLoads() { return loads; }
180
181 /** Returns the number of stores in the SQ. */
182 int numStores() { return stores; }
183
184 /** Returns if either the LQ or SQ is full. */
185 bool isFull() { return lqFull() || sqFull(); }
186
187 /** Returns if the LQ is full. */
188 bool lqFull() { return loads >= (LQEntries - 1); }
189
190 /** Returns if the SQ is full. */
191 bool sqFull() { return stores >= (SQEntries - 1); }
192
193 /** Returns the number of instructions in the LSQ. */
194 unsigned getCount() { return loads + stores; }
195
196 /** Returns if there are any stores to writeback. */
197 bool hasStoresToWB() { return storesToWB; }
198
199 /** Returns the number of stores to writeback. */
200 int numStoresToWB() { return storesToWB; }
201
202 /** Returns if the LSQ unit will writeback on this cycle. */
203 bool willWB() { return storeQueue[storeWBIdx].canWB &&
204 !storeQueue[storeWBIdx].completed &&
205 !isStoreBlocked; }
206
207 private:
208 /** Writes back the instruction, sending it to IEW. */
209 void writeback(DynInstPtr &inst, PacketPtr pkt);
210
211 /** Handles completing the send of a store to memory. */
212 void storePostSend(Packet *pkt);
213
214 /** Completes the store at the specified index. */
215 void completeStore(int store_idx);
216
217 /** Handles doing the retry. */
218 void recvRetry();
219
220 /** Increments the given store index (circular queue). */
221 inline void incrStIdx(int &store_idx);
222 /** Decrements the given store index (circular queue). */
223 inline void decrStIdx(int &store_idx);
224 /** Increments the given load index (circular queue). */
225 inline void incrLdIdx(int &load_idx);
226 /** Decrements the given load index (circular queue). */
227 inline void decrLdIdx(int &load_idx);
228
229 public:
230 /** Debugging function to dump instructions in the LSQ. */
231 void dumpInsts();
232
233 private:
234 /** Pointer to the CPU. */
235 FullCPU *cpu;
236
237 /** Pointer to the IEW stage. */
238 IEW *iewStage;
239
240 /** Pointer to memory object. */
241 MemObject *mem;
242
243 /** DcachePort class for this LSQ Unit. Handles doing the
244 * communication with the cache/memory.
245 * @todo: Needs to be moved to the LSQ level and have some sort
246 * of arbitration.
247 */
248 class DcachePort : public Port
249 {
250 protected:
251 /** Pointer to CPU. */
252 FullCPU *cpu;
253 /** Pointer to LSQ. */
254 LSQUnit *lsq;
255
256 public:
257 /** Default constructor. */
258 DcachePort(FullCPU *_cpu, LSQUnit *_lsq)
259 : Port(_lsq->name() + "-dport"), cpu(_cpu), lsq(_lsq)
260 { }
261
262 protected:
263 /** Atomic version of receive. Panics. */
264 virtual Tick recvAtomic(PacketPtr pkt);
265
266 /** Functional version of receive. Panics. */
267 virtual void recvFunctional(PacketPtr pkt);
268
269 /** Receives status change. Other than range changing, panics. */
270 virtual void recvStatusChange(Status status);
271
272 /** Returns the address ranges of this device. */
273 virtual void getDeviceAddressRanges(AddrRangeList &resp,
274 AddrRangeList &snoop)
275 { resp.clear(); snoop.clear(); }
276
277 /** Timing version of receive. Handles writing back and
278 * completing the load or store that has returned from
279 * memory. */
280 virtual bool recvTiming(PacketPtr pkt);
281
282 /** Handles doing a retry of the previous send. */
283 virtual void recvRetry();
284 };
285
286 /** Pointer to the D-cache. */
287 DcachePort *dcachePort;
288
289 /** Derived class to hold any sender state the LSQ needs. */
290 class LSQSenderState : public Packet::SenderState
291 {
292 public:
293 /** Default constructor. */
294 LSQSenderState()
295 : noWB(false)
296 { }
297
298 /** Instruction who initiated the access to memory. */
299 DynInstPtr inst;
300 /** Whether or not it is a load. */
301 bool isLoad;
302 /** The LQ/SQ index of the instruction. */
303 int idx;
304 /** Whether or not the instruction will need to writeback. */
305 bool noWB;
306 };
307
308 /** Writeback event, specifically for when stores forward data to loads. */
309 class WritebackEvent : public Event {
310 public:
311 /** Constructs a writeback event. */
312 WritebackEvent(DynInstPtr &_inst, PacketPtr pkt, LSQUnit *lsq_ptr);
313
314 /** Processes the writeback event. */
315 void process();
316
317 /** Returns the description of this event. */
318 const char *description();
319
320 private:
321 /** Instruction whose results are being written back. */
322 DynInstPtr inst;
323
324 /** The packet that would have been sent to memory. */
325 PacketPtr pkt;
326
327 /** The pointer to the LSQ unit that issued the store. */
328 LSQUnit<Impl> *lsqPtr;
329 };
330
331 public:
332 struct SQEntry {
333 /** Constructs an empty store queue entry. */
334 SQEntry()
335 : inst(NULL), req(NULL), size(0), data(0),
336 canWB(0), committed(0), completed(0)
337 { }
338
339 /** Constructs a store queue entry for a given instruction. */
340 SQEntry(DynInstPtr &_inst)
341 : inst(_inst), req(NULL), size(0), data(0),
342 canWB(0), committed(0), completed(0)
343 { }
344
345 /** The store instruction. */
346 DynInstPtr inst;
347 /** The request for the store. */
348 RequestPtr req;
349 /** The size of the store. */
350 int size;
351 /** The store data. */
352 IntReg data;
353 /** Whether or not the store can writeback. */
354 bool canWB;
355 /** Whether or not the store is committed. */
356 bool committed;
357 /** Whether or not the store is completed. */
358 bool completed;
359 };
360
361 private:
362 /** The LSQUnit thread id. */
363 unsigned lsqID;
364
365 /** The store queue. */
366 std::vector<SQEntry> storeQueue;
367
368 /** The load queue. */
369 std::vector<DynInstPtr> loadQueue;
370
371 /** The number of LQ entries, plus a sentinel entry (circular queue).
372 * @todo: Consider having var that records the true number of LQ entries.
373 */
374 unsigned LQEntries;
375 /** The number of SQ entries, plus a sentinel entry (circular queue).
376 * @todo: Consider having var that records the true number of SQ entries.
377 */
378 unsigned SQEntries;
379
380 /** The number of load instructions in the LQ. */
381 int loads;
382 /** The number of store instructions in the SQ. */
383 int stores;
384 /** The number of store instructions in the SQ waiting to writeback. */
385 int storesToWB;
386
387 /** The index of the head instruction in the LQ. */
388 int loadHead;
389 /** The index of the tail instruction in the LQ. */
390 int loadTail;
391
392 /** The index of the head instruction in the SQ. */
393 int storeHead;
394 /** The index of the first instruction that may be ready to be
395 * written back, and has not yet been written back.
396 */
397 int storeWBIdx;
398 /** The index of the tail instruction in the SQ. */
399 int storeTail;
400
401 /// @todo Consider moving to a more advanced model with write vs read ports
402 /** The number of cache ports available each cycle. */
403 int cachePorts;
404
405 /** The number of used cache ports in this cycle. */
406 int usedPorts;
407
408 /** Is the LSQ switched out. */
409 bool switchedOut;
410
411 //list<InstSeqNum> mshrSeqNums;
412
413 /** Wire to read information from the issue stage time queue. */
414 typename TimeBuffer<IssueStruct>::wire fromIssue;
415
416 /** Whether or not the LSQ is stalled. */
417 bool stalled;
418 /** The store that causes the stall due to partial store to load
419 * forwarding.
420 */
421 InstSeqNum stallingStoreIsn;
422 /** The index of the above store. */
423 int stallingLoadIdx;
424
425 /** The packet that needs to be retried. */
426 PacketPtr retryPkt;
427
428 /** Whehter or not a store is blocked due to the memory system. */
429 bool isStoreBlocked;
430
431 /** Whether or not a load is blocked due to the memory system. */
432 bool isLoadBlocked;
433
434 /** Has the blocked load been handled. */
435 bool loadBlockedHandled;
436
437 /** The sequence number of the blocked load. */
438 InstSeqNum blockedLoadSeqNum;
439
440 /** The oldest load that caused a memory ordering violation. */
441 DynInstPtr memDepViolator;
442
443 // Will also need how many read/write ports the Dcache has. Or keep track
444 // of that in stage that is one level up, and only call executeLoad/Store
445 // the appropriate number of times.
446/*
447 // total number of loads forwaded from LSQ stores
448 Stats::Vector<> lsq_forw_loads;
449
446
450 // total number of loads ignored due to invalid addresses
451 Stats::Vector<> inv_addr_loads;
447 /** Total number of loads forwaded from LSQ stores. */
448 Stats::Scalar<> lsqForwLoads;
452
449
453 // total number of software prefetches ignored due to invalid addresses
454 Stats::Vector<> inv_addr_swpfs;
450 /** Total number of loads ignored due to invalid addresses. */
451 Stats::Scalar<> invAddrLoads;
455
452
456 // total non-speculative bogus addresses seen (debug var)
457 Counter sim_invalid_addrs;
458 Stats::Vector<> fu_busy; //cumulative fu busy
453 /** Total number of squashed loads. */
454 Stats::Scalar<> lsqSquashedLoads;
459
455
460 // ready loads blocked due to memory disambiguation
461 Stats::Vector<> lsq_blocked_loads;
456 /** Total number of responses from the memory system that are
457 * ignored due to the instruction already being squashed. */
458 Stats::Scalar<> lsqIgnoredResponses;
462
459
463 Stats::Scalar<> lsqInversion;
464*/
460 /** Total number of squashed stores. */
461 Stats::Scalar<> lsqSquashedStores;
462
463 /** Total number of software prefetches ignored due to invalid addresses. */
464 Stats::Scalar<> invAddrSwpfs;
465
466 /** Ready loads blocked due to partial store-forwarding. */
467 Stats::Scalar<> lsqBlockedLoads;
468
469 /** Number of loads that were rescheduled. */
470 Stats::Scalar<> lsqRescheduledLoads;
471
472 /** Number of times the LSQ is blocked due to the cache. */
473 Stats::Scalar<> lsqCacheBlocked;
474
465 public:
466 /** Executes the load at the given index. */
467 template <class T>
468 Fault read(Request *req, T &data, int load_idx);
469
470 /** Executes the store at the given index. */
471 template <class T>
472 Fault write(Request *req, T &data, int store_idx);
473
474 /** Returns the index of the head load instruction. */
475 int getLoadHead() { return loadHead; }
476 /** Returns the sequence number of the head load instruction. */
477 InstSeqNum getLoadHeadSeqNum()
478 {
479 if (loadQueue[loadHead]) {
480 return loadQueue[loadHead]->seqNum;
481 } else {
482 return 0;
483 }
484
485 }
486
487 /** Returns the index of the head store instruction. */
488 int getStoreHead() { return storeHead; }
489 /** Returns the sequence number of the head store instruction. */
490 InstSeqNum getStoreHeadSeqNum()
491 {
492 if (storeQueue[storeHead].inst) {
493 return storeQueue[storeHead].inst->seqNum;
494 } else {
495 return 0;
496 }
497
498 }
499
500 /** Returns whether or not the LSQ unit is stalled. */
501 bool isStalled() { return stalled; }
502};
503
504template <class Impl>
505template <class T>
506Fault
507LSQUnit<Impl>::read(Request *req, T &data, int load_idx)
508{
509 DynInstPtr load_inst = loadQueue[load_idx];
510
511 assert(load_inst);
512
513 assert(!load_inst->isExecuted());
514
515 // Make sure this isn't an uncacheable access
516 // A bit of a hackish way to get uncached accesses to work only if they're
517 // at the head of the LSQ and are ready to commit (at the head of the ROB
518 // too).
519 if (req->getFlags() & UNCACHEABLE &&
520 (load_idx != loadHead || !load_inst->reachedCommit)) {
521 iewStage->rescheduleMemInst(load_inst);
475 public:
476 /** Executes the load at the given index. */
477 template <class T>
478 Fault read(Request *req, T &data, int load_idx);
479
480 /** Executes the store at the given index. */
481 template <class T>
482 Fault write(Request *req, T &data, int store_idx);
483
484 /** Returns the index of the head load instruction. */
485 int getLoadHead() { return loadHead; }
486 /** Returns the sequence number of the head load instruction. */
487 InstSeqNum getLoadHeadSeqNum()
488 {
489 if (loadQueue[loadHead]) {
490 return loadQueue[loadHead]->seqNum;
491 } else {
492 return 0;
493 }
494
495 }
496
497 /** Returns the index of the head store instruction. */
498 int getStoreHead() { return storeHead; }
499 /** Returns the sequence number of the head store instruction. */
500 InstSeqNum getStoreHeadSeqNum()
501 {
502 if (storeQueue[storeHead].inst) {
503 return storeQueue[storeHead].inst->seqNum;
504 } else {
505 return 0;
506 }
507
508 }
509
510 /** Returns whether or not the LSQ unit is stalled. */
511 bool isStalled() { return stalled; }
512};
513
514template <class Impl>
515template <class T>
516Fault
517LSQUnit<Impl>::read(Request *req, T &data, int load_idx)
518{
519 DynInstPtr load_inst = loadQueue[load_idx];
520
521 assert(load_inst);
522
523 assert(!load_inst->isExecuted());
524
525 // Make sure this isn't an uncacheable access
526 // A bit of a hackish way to get uncached accesses to work only if they're
527 // at the head of the LSQ and are ready to commit (at the head of the ROB
528 // too).
529 if (req->getFlags() & UNCACHEABLE &&
530 (load_idx != loadHead || !load_inst->reachedCommit)) {
531 iewStage->rescheduleMemInst(load_inst);
532 ++lsqRescheduledLoads;
522 return TheISA::genMachineCheckFault();
523 }
524
525 // Check the SQ for any previous stores that might lead to forwarding
526 int store_idx = load_inst->sqIdx;
527
528 int store_size = 0;
529
530 DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, "
531 "storeHead: %i addr: %#x\n",
532 load_idx, store_idx, storeHead, req->getPaddr());
533
534#if FULL_SYSTEM
535 if (req->getFlags() & LOCKED) {
536 cpu->lockAddr = req->getPaddr();
537 cpu->lockFlag = true;
538 }
539#endif
540
541 while (store_idx != -1) {
542 // End once we've reached the top of the LSQ
543 if (store_idx == storeWBIdx) {
544 break;
545 }
546
547 // Move the index to one younger
548 if (--store_idx < 0)
549 store_idx += SQEntries;
550
551 assert(storeQueue[store_idx].inst);
552
553 store_size = storeQueue[store_idx].size;
554
555 if (store_size == 0)
556 continue;
557
558 // Check if the store data is within the lower and upper bounds of
559 // addresses that the request needs.
560 bool store_has_lower_limit =
561 req->getVaddr() >= storeQueue[store_idx].inst->effAddr;
562 bool store_has_upper_limit =
563 (req->getVaddr() + req->getSize()) <=
564 (storeQueue[store_idx].inst->effAddr + store_size);
565 bool lower_load_has_store_part =
566 req->getVaddr() < (storeQueue[store_idx].inst->effAddr +
567 store_size);
568 bool upper_load_has_store_part =
569 (req->getVaddr() + req->getSize()) >
570 storeQueue[store_idx].inst->effAddr;
571
572 // If the store's data has all of the data needed, we can forward.
573 if (store_has_lower_limit && store_has_upper_limit) {
574 // Get shift amount for offset into the store's data.
575 int shift_amt = req->getVaddr() & (store_size - 1);
576 // @todo: Magic number, assumes byte addressing
577 shift_amt = shift_amt << 3;
578
579 // Cast this to type T?
580 data = storeQueue[store_idx].data >> shift_amt;
581
582 assert(!load_inst->memData);
583 load_inst->memData = new uint8_t[64];
584
585 memcpy(load_inst->memData, &data, req->getSize());
586
587 DPRINTF(LSQUnit, "Forwarding from store idx %i to load to "
588 "addr %#x, data %#x\n",
589 store_idx, req->getVaddr(), data);
590
591 PacketPtr data_pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast);
592 data_pkt->dataStatic(load_inst->memData);
593
594 WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
595
596 // We'll say this has a 1 cycle load-store forwarding latency
597 // for now.
598 // @todo: Need to make this a parameter.
599 wb->schedule(curTick);
600
533 return TheISA::genMachineCheckFault();
534 }
535
536 // Check the SQ for any previous stores that might lead to forwarding
537 int store_idx = load_inst->sqIdx;
538
539 int store_size = 0;
540
541 DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, "
542 "storeHead: %i addr: %#x\n",
543 load_idx, store_idx, storeHead, req->getPaddr());
544
545#if FULL_SYSTEM
546 if (req->getFlags() & LOCKED) {
547 cpu->lockAddr = req->getPaddr();
548 cpu->lockFlag = true;
549 }
550#endif
551
552 while (store_idx != -1) {
553 // End once we've reached the top of the LSQ
554 if (store_idx == storeWBIdx) {
555 break;
556 }
557
558 // Move the index to one younger
559 if (--store_idx < 0)
560 store_idx += SQEntries;
561
562 assert(storeQueue[store_idx].inst);
563
564 store_size = storeQueue[store_idx].size;
565
566 if (store_size == 0)
567 continue;
568
569 // Check if the store data is within the lower and upper bounds of
570 // addresses that the request needs.
571 bool store_has_lower_limit =
572 req->getVaddr() >= storeQueue[store_idx].inst->effAddr;
573 bool store_has_upper_limit =
574 (req->getVaddr() + req->getSize()) <=
575 (storeQueue[store_idx].inst->effAddr + store_size);
576 bool lower_load_has_store_part =
577 req->getVaddr() < (storeQueue[store_idx].inst->effAddr +
578 store_size);
579 bool upper_load_has_store_part =
580 (req->getVaddr() + req->getSize()) >
581 storeQueue[store_idx].inst->effAddr;
582
583 // If the store's data has all of the data needed, we can forward.
584 if (store_has_lower_limit && store_has_upper_limit) {
585 // Get shift amount for offset into the store's data.
586 int shift_amt = req->getVaddr() & (store_size - 1);
587 // @todo: Magic number, assumes byte addressing
588 shift_amt = shift_amt << 3;
589
590 // Cast this to type T?
591 data = storeQueue[store_idx].data >> shift_amt;
592
593 assert(!load_inst->memData);
594 load_inst->memData = new uint8_t[64];
595
596 memcpy(load_inst->memData, &data, req->getSize());
597
598 DPRINTF(LSQUnit, "Forwarding from store idx %i to load to "
599 "addr %#x, data %#x\n",
600 store_idx, req->getVaddr(), data);
601
602 PacketPtr data_pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast);
603 data_pkt->dataStatic(load_inst->memData);
604
605 WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
606
607 // We'll say this has a 1 cycle load-store forwarding latency
608 // for now.
609 // @todo: Need to make this a parameter.
610 wb->schedule(curTick);
611
601 // Should keep track of stat for forwarded data
612 ++lsqForwLoads;
602 return NoFault;
603 } else if ((store_has_lower_limit && lower_load_has_store_part) ||
604 (store_has_upper_limit && upper_load_has_store_part) ||
605 (lower_load_has_store_part && upper_load_has_store_part)) {
606 // This is the partial store-load forwarding case where a store
607 // has only part of the load's data.
608
609 // If it's already been written back, then don't worry about
610 // stalling on it.
611 if (storeQueue[store_idx].completed) {
612 continue;
613 }
614
615 // Must stall load and force it to retry, so long as it's the oldest
616 // load that needs to do so.
617 if (!stalled ||
618 (stalled &&
619 load_inst->seqNum <
620 loadQueue[stallingLoadIdx]->seqNum)) {
621 stalled = true;
622 stallingStoreIsn = storeQueue[store_idx].inst->seqNum;
623 stallingLoadIdx = load_idx;
624 }
625
626 // Tell IQ/mem dep unit that this instruction will need to be
627 // rescheduled eventually
628 iewStage->rescheduleMemInst(load_inst);
613 return NoFault;
614 } else if ((store_has_lower_limit && lower_load_has_store_part) ||
615 (store_has_upper_limit && upper_load_has_store_part) ||
616 (lower_load_has_store_part && upper_load_has_store_part)) {
617 // This is the partial store-load forwarding case where a store
618 // has only part of the load's data.
619
620 // If it's already been written back, then don't worry about
621 // stalling on it.
622 if (storeQueue[store_idx].completed) {
623 continue;
624 }
625
626 // Must stall load and force it to retry, so long as it's the oldest
627 // load that needs to do so.
628 if (!stalled ||
629 (stalled &&
630 load_inst->seqNum <
631 loadQueue[stallingLoadIdx]->seqNum)) {
632 stalled = true;
633 stallingStoreIsn = storeQueue[store_idx].inst->seqNum;
634 stallingLoadIdx = load_idx;
635 }
636
637 // Tell IQ/mem dep unit that this instruction will need to be
638 // rescheduled eventually
639 iewStage->rescheduleMemInst(load_inst);
640 ++lsqRescheduledLoads;
629
630 // Do not generate a writeback event as this instruction is not
631 // complete.
632 DPRINTF(LSQUnit, "Load-store forwarding mis-match. "
633 "Store idx %i to load addr %#x\n",
634 store_idx, req->getVaddr());
635
641
642 // Do not generate a writeback event as this instruction is not
643 // complete.
644 DPRINTF(LSQUnit, "Load-store forwarding mis-match. "
645 "Store idx %i to load addr %#x\n",
646 store_idx, req->getVaddr());
647
648 ++lsqBlockedLoads;
636 return NoFault;
637 }
638 }
639
640 // If there's no forwarding case, then go access memory
641 DPRINTF(LSQUnit, "Doing functional access for inst [sn:%lli] PC %#x\n",
642 load_inst->seqNum, load_inst->readPC());
643
644 assert(!load_inst->memData);
645 load_inst->memData = new uint8_t[64];
646
647 ++usedPorts;
648
649 DPRINTF(LSQUnit, "Doing timing access for inst PC %#x\n",
650 load_inst->readPC());
651
652 PacketPtr data_pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast);
653 data_pkt->dataStatic(load_inst->memData);
654
655 LSQSenderState *state = new LSQSenderState;
656 state->isLoad = true;
657 state->idx = load_idx;
658 state->inst = load_inst;
659 data_pkt->senderState = state;
660
661 // if we have a cache, do cache access too
662 if (!dcachePort->sendTiming(data_pkt)) {
649 return NoFault;
650 }
651 }
652
653 // If there's no forwarding case, then go access memory
654 DPRINTF(LSQUnit, "Doing functional access for inst [sn:%lli] PC %#x\n",
655 load_inst->seqNum, load_inst->readPC());
656
657 assert(!load_inst->memData);
658 load_inst->memData = new uint8_t[64];
659
660 ++usedPorts;
661
662 DPRINTF(LSQUnit, "Doing timing access for inst PC %#x\n",
663 load_inst->readPC());
664
665 PacketPtr data_pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast);
666 data_pkt->dataStatic(load_inst->memData);
667
668 LSQSenderState *state = new LSQSenderState;
669 state->isLoad = true;
670 state->idx = load_idx;
671 state->inst = load_inst;
672 data_pkt->senderState = state;
673
674 // if we have a cache, do cache access too
675 if (!dcachePort->sendTiming(data_pkt)) {
676 ++lsqCacheBlocked;
663 // There's an older load that's already going to squash.
664 if (isLoadBlocked && blockedLoadSeqNum < load_inst->seqNum)
665 return NoFault;
666
667 // Record that the load was blocked due to memory. This
668 // load will squash all instructions after it, be
669 // refetched, and re-executed.
670 isLoadBlocked = true;
671 loadBlockedHandled = false;
672 blockedLoadSeqNum = load_inst->seqNum;
673 // No fault occurred, even though the interface is blocked.
674 return NoFault;
675 }
676
677 if (data_pkt->result != Packet::Success) {
678 DPRINTF(LSQUnit, "LSQUnit: D-cache miss!\n");
679 DPRINTF(Activity, "Activity: ld accessing mem miss [sn:%lli]\n",
680 load_inst->seqNum);
681 } else {
682 DPRINTF(LSQUnit, "LSQUnit: D-cache hit!\n");
683 DPRINTF(Activity, "Activity: ld accessing mem hit [sn:%lli]\n",
684 load_inst->seqNum);
685 }
686
687 return NoFault;
688}
689
690template <class Impl>
691template <class T>
692Fault
693LSQUnit<Impl>::write(Request *req, T &data, int store_idx)
694{
695 assert(storeQueue[store_idx].inst);
696
697 DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x data %#x"
698 " | storeHead:%i [sn:%i]\n",
699 store_idx, req->getPaddr(), data, storeHead,
700 storeQueue[store_idx].inst->seqNum);
701
702 storeQueue[store_idx].req = req;
703 storeQueue[store_idx].size = sizeof(T);
704 storeQueue[store_idx].data = data;
705
706 // This function only writes the data to the store queue, so no fault
707 // can happen here.
708 return NoFault;
709}
710
711#endif // __CPU_O3_LSQ_UNIT_HH__
677 // There's an older load that's already going to squash.
678 if (isLoadBlocked && blockedLoadSeqNum < load_inst->seqNum)
679 return NoFault;
680
681 // Record that the load was blocked due to memory. This
682 // load will squash all instructions after it, be
683 // refetched, and re-executed.
684 isLoadBlocked = true;
685 loadBlockedHandled = false;
686 blockedLoadSeqNum = load_inst->seqNum;
687 // No fault occurred, even though the interface is blocked.
688 return NoFault;
689 }
690
691 if (data_pkt->result != Packet::Success) {
692 DPRINTF(LSQUnit, "LSQUnit: D-cache miss!\n");
693 DPRINTF(Activity, "Activity: ld accessing mem miss [sn:%lli]\n",
694 load_inst->seqNum);
695 } else {
696 DPRINTF(LSQUnit, "LSQUnit: D-cache hit!\n");
697 DPRINTF(Activity, "Activity: ld accessing mem hit [sn:%lli]\n",
698 load_inst->seqNum);
699 }
700
701 return NoFault;
702}
703
704template <class Impl>
705template <class T>
706Fault
707LSQUnit<Impl>::write(Request *req, T &data, int store_idx)
708{
709 assert(storeQueue[store_idx].inst);
710
711 DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x data %#x"
712 " | storeHead:%i [sn:%i]\n",
713 store_idx, req->getPaddr(), data, storeHead,
714 storeQueue[store_idx].inst->seqNum);
715
716 storeQueue[store_idx].req = req;
717 storeQueue[store_idx].size = sizeof(T);
718 storeQueue[store_idx].data = data;
719
720 // This function only writes the data to the store queue, so no fault
721 // can happen here.
722 return NoFault;
723}
724
725#endif // __CPU_O3_LSQ_UNIT_HH__