lsq_unit.hh (13472:7ceacede4f1e) | lsq_unit.hh (13590:d7e018859709) |
---|---|
1/* | 1/* |
2 * Copyright (c) 2012-2014,2017 ARM Limited | 2 * Copyright (c) 2012-2014,2017-2018 ARM Limited |
3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated --- 46 unchanged lines hidden (view full) --- 57#include "config/the_isa.hh" 58#include "cpu/inst_seq.hh" 59#include "cpu/timebuf.hh" 60#include "debug/LSQUnit.hh" 61#include "mem/packet.hh" 62#include "mem/port.hh" 63 64struct DerivO3CPUParams; | 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated --- 46 unchanged lines hidden (view full) --- 57#include "config/the_isa.hh" 58#include "cpu/inst_seq.hh" 59#include "cpu/timebuf.hh" 60#include "debug/LSQUnit.hh" 61#include "mem/packet.hh" 62#include "mem/port.hh" 63 64struct DerivO3CPUParams; |
65#include "base/circular_queue.hh" |
|
65 66/** 67 * Class that implements the actual LQ and SQ for each specific 68 * thread. Both are circular queues; load entries are freed upon 69 * committing, while store entries are freed once they writeback. The 70 * LSQUnit tracks if there are memory ordering violations, and also 71 * detects partial load to store forwarding cases (a store only has 72 * part of a load's data) that requires the load to wait until the 73 * store writes back. In the former case it holds onto the instruction 74 * until the dependence unit looks at it, and in the latter it stalls 75 * the LSQ until the store writes back. At that point the load is 76 * replayed. 77 */ 78template <class Impl> | 66 67/** 68 * Class that implements the actual LQ and SQ for each specific 69 * thread. Both are circular queues; load entries are freed upon 70 * committing, while store entries are freed once they writeback. The 71 * LSQUnit tracks if there are memory ordering violations, and also 72 * detects partial load to store forwarding cases (a store only has 73 * part of a load's data) that requires the load to wait until the 74 * store writes back. In the former case it holds onto the instruction 75 * until the dependence unit looks at it, and in the latter it stalls 76 * the LSQ until the store writes back. At that point the load is 77 * replayed. 78 */ 79template <class Impl> |
79class LSQUnit { | 80class LSQUnit 81{ |
80 public: 81 typedef typename Impl::O3CPU O3CPU; 82 typedef typename Impl::DynInstPtr DynInstPtr; 83 typedef typename Impl::CPUPol::IEW IEW; 84 typedef typename Impl::CPUPol::LSQ LSQ; 85 typedef typename Impl::CPUPol::IssueStruct IssueStruct; 86 | 82 public: 83 typedef typename Impl::O3CPU O3CPU; 84 typedef typename Impl::DynInstPtr DynInstPtr; 85 typedef typename Impl::CPUPol::IEW IEW; 86 typedef typename Impl::CPUPol::LSQ LSQ; 87 typedef typename Impl::CPUPol::IssueStruct IssueStruct; 88 |
89 using LSQSenderState = typename LSQ::LSQSenderState; 90 using LSQRequest = typename Impl::CPUPol::LSQ::LSQRequest; 91 private: 92 class LSQEntry 93 { 94 private: 95 /** The instruction. */ 96 DynInstPtr inst; 97 /** The request. */ 98 LSQRequest* req; 99 /** The size of the operation. */ 100 uint8_t _size; 101 /** Valid entry. */ 102 bool _valid; 103 public: 104 /** Constructs an empty store queue entry. */ 105 LSQEntry() 106 : inst(nullptr), req(nullptr), _size(0), _valid(false) 107 { 108 } 109 110 ~LSQEntry() 111 { 112 inst = nullptr; 113 if (req != nullptr) { 114 req->freeLSQEntry(); 115 req = nullptr; 116 } 117 } 118 119 void 120 clear() 121 { 122 inst = nullptr; 123 if (req != nullptr) { 124 req->freeLSQEntry(); 125 } 126 req = nullptr; 127 _valid = false; 128 _size = 0; 129 } 130 131 void 132 set(const DynInstPtr& inst) 133 { 134 assert(!_valid); 135 this->inst = inst; 136 _valid = true; 137 _size = 0; 138 } 139 LSQRequest* request() { return req; } 140 void setRequest(LSQRequest* r) { req = r; } 141 bool hasRequest() { return req != nullptr; } 142 /** Member accessors. */ 143 /** @{ */ 144 bool valid() const { return _valid; } 145 uint8_t& size() { return _size; } 146 const uint8_t& size() const { return _size; } 147 const DynInstPtr& instruction() const { return inst; } 148 /** @} */ 149 }; 150 151 class SQEntry : public LSQEntry 152 { 153 private: 154 /** The store data. */ 155 char _data[64]; // TODO: 64 should become a parameter 156 /** Whether or not the store can writeback. */ 157 bool _canWB; 158 /** Whether or not the store is committed. */ 159 bool _committed; 160 /** Whether or not the store is completed. */ 161 bool _completed; 162 /** Does this request write all zeros and thus doesn't 163 * have any data attached to it. Used for cache block zero 164 * style instructs (ARM DC ZVA; ALPHA WH64) 165 */ 166 bool _isAllZeros; 167 public: 168 static constexpr size_t DataSize = sizeof(_data); 169 /** Constructs an empty store queue entry. */ 170 SQEntry() 171 : _canWB(false), _committed(false), _completed(false), 172 _isAllZeros(false) 173 { 174 std::memset(_data, 0, DataSize); 175 } 176 177 ~SQEntry() 178 { 179 } 180 181 void 182 set(const DynInstPtr& inst) 183 { 184 LSQEntry::set(inst); 185 } 186 187 void 188 clear() 189 { 190 LSQEntry::clear(); 191 _canWB = _completed = _committed = _isAllZeros = false; 192 } 193 /** Member accessors. */ 194 /** @{ */ 195 bool& canWB() { return _canWB; } 196 const bool& canWB() const { return _canWB; } 197 bool& completed() { return _completed; } 198 const bool& completed() const { return _completed; } 199 bool& committed() { return _committed; } 200 const bool& committed() const { return _committed; } 201 bool& isAllZeros() { return _isAllZeros; } 202 const bool& isAllZeros() const { return _isAllZeros; } 203 char* data() { return _data; } 204 const char* data() const { return _data; } 205 /** @} */ 206 }; 207 using LQEntry = LSQEntry; 208 |
|
87 public: | 209 public: |
210 using LoadQueue = CircularQueue<LQEntry>; 211 using StoreQueue = CircularQueue<SQEntry>; 212 213 public: |
|
88 /** Constructs an LSQ unit. init() must be called prior to use. */ 89 LSQUnit(uint32_t lqEntries, uint32_t sqEntries); 90 91 /** We cannot copy LSQUnit because it has stats for which copy 92 * contructor is deleted explicitly. However, STL vector requires 93 * a valid copy constructor for the base type at compile time. 94 */ 95 LSQUnit(const LSQUnit &l) { panic("LSQUnit is not copy-able"); } --- 12 unchanged lines hidden (view full) --- 108 void setDcachePort(MasterPort *dcache_port); 109 110 /** Perform sanity checks after a drain. */ 111 void drainSanityCheck() const; 112 113 /** Takes over from another CPU's thread. */ 114 void takeOverFrom(); 115 | 214 /** Constructs an LSQ unit. init() must be called prior to use. */ 215 LSQUnit(uint32_t lqEntries, uint32_t sqEntries); 216 217 /** We cannot copy LSQUnit because it has stats for which copy 218 * contructor is deleted explicitly. However, STL vector requires 219 * a valid copy constructor for the base type at compile time. 220 */ 221 LSQUnit(const LSQUnit &l) { panic("LSQUnit is not copy-able"); } --- 12 unchanged lines hidden (view full) --- 234 void setDcachePort(MasterPort *dcache_port); 235 236 /** Perform sanity checks after a drain. */ 237 void drainSanityCheck() const; 238 239 /** Takes over from another CPU's thread. */ 240 void takeOverFrom(); 241 |
116 /** Ticks the LSQ unit, which in this case only resets the number of 117 * used cache ports. 118 * @todo: Move the number of used ports up to the LSQ level so it can 119 * be shared by all LSQ units. 120 */ 121 void tick() { usedStorePorts = 0; } 122 | |
123 /** Inserts an instruction. */ 124 void insert(const DynInstPtr &inst); 125 /** Inserts a load instruction. */ 126 void insertLoad(const DynInstPtr &load_inst); 127 /** Inserts a store instruction. */ 128 void insertStore(const DynInstPtr &store_inst); 129 130 /** Check for ordering violations in the LSQ. For a store squash if we 131 * ever find a conflicting load. For a load, only squash if we 132 * an external snoop invalidate has been seen for that load address 133 * @param load_idx index to start checking at 134 * @param inst the instruction to check 135 */ | 242 /** Inserts an instruction. */ 243 void insert(const DynInstPtr &inst); 244 /** Inserts a load instruction. */ 245 void insertLoad(const DynInstPtr &load_inst); 246 /** Inserts a store instruction. */ 247 void insertStore(const DynInstPtr &store_inst); 248 249 /** Check for ordering violations in the LSQ. For a store squash if we 250 * ever find a conflicting load. For a load, only squash if we 251 * an external snoop invalidate has been seen for that load address 252 * @param load_idx index to start checking at 253 * @param inst the instruction to check 254 */ |
136 Fault checkViolations(int load_idx, const DynInstPtr &inst); | 255 Fault checkViolations(typename LoadQueue::iterator& loadIt, 256 const DynInstPtr& inst); |
137 138 /** Check if an incoming invalidate hits in the lsq on a load 139 * that might have issued out of order wrt another load beacuse 140 * of the intermediate invalidate. 141 */ 142 void checkSnoop(PacketPtr pkt); 143 144 /** Executes a load instruction. */ --- 13 unchanged lines hidden (view full) --- 158 159 /** Writes back stores. */ 160 void writebackStores(); 161 162 /** Completes the data access that has been returned from the 163 * memory system. */ 164 void completeDataAccess(PacketPtr pkt); 165 | 257 258 /** Check if an incoming invalidate hits in the lsq on a load 259 * that might have issued out of order wrt another load beacuse 260 * of the intermediate invalidate. 261 */ 262 void checkSnoop(PacketPtr pkt); 263 264 /** Executes a load instruction. */ --- 13 unchanged lines hidden (view full) --- 278 279 /** Writes back stores. */ 280 void writebackStores(); 281 282 /** Completes the data access that has been returned from the 283 * memory system. */ 284 void completeDataAccess(PacketPtr pkt); 285 |
166 /** Clears all the entries in the LQ. */ 167 void clearLQ(); 168 169 /** Clears all the entries in the SQ. */ 170 void clearSQ(); 171 172 /** Resizes the LQ to a given size. */ 173 void resizeLQ(unsigned size); 174 175 /** Resizes the SQ to a given size. */ 176 void resizeSQ(unsigned size); 177 | |
178 /** Squashes all instructions younger than a specific sequence number. */ 179 void squash(const InstSeqNum &squashed_num); 180 181 /** Returns if there is a memory ordering violation. Value is reset upon 182 * call to getMemDepViolator(). 183 */ 184 bool violation() { return memDepViolator; } 185 --- 14 unchanged lines hidden (view full) --- 200 201 /** Returns if either the LQ or SQ is full. */ 202 bool isFull() { return lqFull() || sqFull(); } 203 204 /** Returns if both the LQ and SQ are empty. */ 205 bool isEmpty() const { return lqEmpty() && sqEmpty(); } 206 207 /** Returns if the LQ is full. */ | 286 /** Squashes all instructions younger than a specific sequence number. */ 287 void squash(const InstSeqNum &squashed_num); 288 289 /** Returns if there is a memory ordering violation. Value is reset upon 290 * call to getMemDepViolator(). 291 */ 292 bool violation() { return memDepViolator; } 293 --- 14 unchanged lines hidden (view full) --- 308 309 /** Returns if either the LQ or SQ is full. */ 310 bool isFull() { return lqFull() || sqFull(); } 311 312 /** Returns if both the LQ and SQ are empty. */ 313 bool isEmpty() const { return lqEmpty() && sqEmpty(); } 314 315 /** Returns if the LQ is full. */ |
208 bool lqFull() { return loads >= (LQEntries - 1); } | 316 bool lqFull() { return loadQueue.full(); } |
209 210 /** Returns if the SQ is full. */ | 317 318 /** Returns if the SQ is full. */ |
211 bool sqFull() { return stores >= (SQEntries - 1); } | 319 bool sqFull() { return storeQueue.full(); } |
212 213 /** Returns if the LQ is empty. */ 214 bool lqEmpty() const { return loads == 0; } 215 216 /** Returns if the SQ is empty. */ 217 bool sqEmpty() const { return stores == 0; } 218 219 /** Returns the number of instructions in the LSQ. */ 220 unsigned getCount() { return loads + stores; } 221 222 /** Returns if there are any stores to writeback. */ 223 bool hasStoresToWB() { return storesToWB; } 224 225 /** Returns the number of stores to writeback. */ 226 int numStoresToWB() { return storesToWB; } 227 228 /** Returns if the LSQ unit will writeback on this cycle. */ | 320 321 /** Returns if the LQ is empty. */ 322 bool lqEmpty() const { return loads == 0; } 323 324 /** Returns if the SQ is empty. */ 325 bool sqEmpty() const { return stores == 0; } 326 327 /** Returns the number of instructions in the LSQ. */ 328 unsigned getCount() { return loads + stores; } 329 330 /** Returns if there are any stores to writeback. */ 331 bool hasStoresToWB() { return storesToWB; } 332 333 /** Returns the number of stores to writeback. */ 334 int numStoresToWB() { return storesToWB; } 335 336 /** Returns if the LSQ unit will writeback on this cycle. */ |
229 bool willWB() { return storeQueue[storeWBIdx].canWB && 230 !storeQueue[storeWBIdx].completed && 231 !isStoreBlocked; } | 337 bool 338 willWB() 339 { 340 return storeWBIt.dereferenceable() && 341 storeWBIt->valid() && 342 storeWBIt->canWB() && 343 !storeWBIt->completed() && 344 !isStoreBlocked; 345 } |
232 233 /** Handles doing the retry. */ 234 void recvRetry(); 235 | 346 347 /** Handles doing the retry. */ 348 void recvRetry(); 349 |
350 unsigned int cacheLineSize(); |
|
236 private: 237 /** Reset the LSQ state */ 238 void resetState(); 239 240 /** Writes back the instruction, sending it to IEW. */ 241 void writeback(const DynInstPtr &inst, PacketPtr pkt); 242 | 351 private: 352 /** Reset the LSQ state */ 353 void resetState(); 354 355 /** Writes back the instruction, sending it to IEW. */ 356 void writeback(const DynInstPtr &inst, PacketPtr pkt); 357 |
243 /** Writes back a store that couldn't be completed the previous cycle. */ 244 void writebackPendingStore(); | 358 /** Try to finish a previously blocked write back attempt */ 359 void writebackBlockedStore(); |
245 | 360 |
246 /** Handles completing the send of a store to memory. */ 247 void storePostSend(PacketPtr pkt); 248 | |
249 /** Completes the store at the specified index. */ | 361 /** Completes the store at the specified index. */ |
250 void completeStore(int store_idx); | 362 void completeStore(typename StoreQueue::iterator store_idx); |
251 | 363 |
252 /** Attempts to send a store to the cache. */ 253 bool sendStore(PacketPtr data_pkt); | 364 /** Handles completing the send of a store to memory. */ 365 void storePostSend(); |
254 | 366 |
255 /** Increments the given store index (circular queue). */ 256 inline void incrStIdx(int &store_idx) const; 257 /** Decrements the given store index (circular queue). */ 258 inline void decrStIdx(int &store_idx) const; 259 /** Increments the given load index (circular queue). */ 260 inline void incrLdIdx(int &load_idx) const; 261 /** Decrements the given load index (circular queue). */ 262 inline void decrLdIdx(int &load_idx) const; 263 | |
264 public: | 367 public: |
368 /** Attempts to send a packet to the cache. 369 * Check if there are ports available. Return true if 370 * there are, false if there are not. 371 */ 372 bool trySendPacket(bool isLoad, PacketPtr data_pkt); 373 374 |
|
265 /** Debugging function to dump instructions in the LSQ. */ 266 void dumpInsts() const; 267 | 375 /** Debugging function to dump instructions in the LSQ. */ 376 void dumpInsts() const; 377 |
378 /** Schedule event for the cpu. */ 379 void schedule(Event& ev, Tick when) { cpu->schedule(ev, when); } 380 381 BaseTLB* dTLB() { return cpu->dtb; } 382 |
|
268 private: 269 /** Pointer to the CPU. */ 270 O3CPU *cpu; 271 272 /** Pointer to the IEW stage. */ 273 IEW *iewStage; 274 275 /** Pointer to the LSQ. */ 276 LSQ *lsq; 277 278 /** Pointer to the dcache port. Used only for sending. */ 279 MasterPort *dcachePort; 280 | 383 private: 384 /** Pointer to the CPU. */ 385 O3CPU *cpu; 386 387 /** Pointer to the IEW stage. */ 388 IEW *iewStage; 389 390 /** Pointer to the LSQ. */ 391 LSQ *lsq; 392 393 /** Pointer to the dcache port. Used only for sending. */ 394 MasterPort *dcachePort; 395 |
281 /** Derived class to hold any sender state the LSQ needs. */ 282 class LSQSenderState : public Packet::SenderState | 396 /** Particularisation of the LSQSenderState to the LQ. */ 397 class LQSenderState : public LSQSenderState |
283 { | 398 { |
399 using LSQSenderState::alive; |
|
284 public: | 400 public: |
285 /** Default constructor. */ 286 LSQSenderState() 287 : mainPkt(NULL), pendingPacket(NULL), idx(0), outstanding(1), 288 isLoad(false), noWB(false), isSplit(false), 289 pktToSend(false), cacheBlocked(false) 290 { } | 401 LQSenderState(typename LoadQueue::iterator idx_) 402 : LSQSenderState(idx_->request(), true), idx(idx_) { } |
291 | 403 |
292 /** Instruction who initiated the access to memory. */ 293 DynInstPtr inst; 294 /** The main packet from a split load, used during writeback. */ 295 PacketPtr mainPkt; 296 /** A second packet from a split store that needs sending. */ 297 PacketPtr pendingPacket; 298 /** The LQ/SQ index of the instruction. */ 299 uint8_t idx; 300 /** Number of outstanding packets to complete. */ 301 uint8_t outstanding; 302 /** Whether or not it is a load. */ 303 bool isLoad; 304 /** Whether or not the instruction will need to writeback. */ 305 bool noWB; 306 /** Whether or not this access is split in two. */ 307 bool isSplit; 308 /** Whether or not there is a packet that needs sending. */ 309 bool pktToSend; 310 /** Whether or not the second packet of this split load was blocked */ 311 bool cacheBlocked; | 404 /** The LQ index of the instruction. */ 405 typename LoadQueue::iterator idx; 406 //virtual LSQRequest* request() { return idx->request(); } 407 virtual void 408 complete() 409 { 410 //if (alive()) 411 // idx->request()->senderState(nullptr); 412 } 413 }; |
312 | 414 |
313 /** Completes a packet and returns whether the access is finished. */ 314 inline bool complete() { return --outstanding == 0; } | 415 /** Particularisation of the LSQSenderState to the SQ. */ 416 class SQSenderState : public LSQSenderState 417 { 418 using LSQSenderState::alive; 419 public: 420 SQSenderState(typename StoreQueue::iterator idx_) 421 : LSQSenderState(idx_->request(), false), idx(idx_) { } 422 /** The SQ index of the instruction. */ 423 typename StoreQueue::iterator idx; 424 //virtual LSQRequest* request() { return idx->request(); } 425 virtual void 426 complete() 427 { 428 //if (alive()) 429 // idx->request()->senderState(nullptr); 430 } |
315 }; 316 317 /** Writeback event, specifically for when stores forward data to loads. */ | 431 }; 432 433 /** Writeback event, specifically for when stores forward data to loads. */ |
318 class WritebackEvent : public Event { | 434 class WritebackEvent : public Event 435 { |
319 public: 320 /** Constructs a writeback event. */ 321 WritebackEvent(const DynInstPtr &_inst, PacketPtr pkt, 322 LSQUnit *lsq_ptr); 323 324 /** Processes the writeback event. */ 325 void process(); 326 --- 7 unchanged lines hidden (view full) --- 334 /** The packet that would have been sent to memory. */ 335 PacketPtr pkt; 336 337 /** The pointer to the LSQ unit that issued the store. */ 338 LSQUnit<Impl> *lsqPtr; 339 }; 340 341 public: | 436 public: 437 /** Constructs a writeback event. */ 438 WritebackEvent(const DynInstPtr &_inst, PacketPtr pkt, 439 LSQUnit *lsq_ptr); 440 441 /** Processes the writeback event. */ 442 void process(); 443 --- 7 unchanged lines hidden (view full) --- 451 /** The packet that would have been sent to memory. */ 452 PacketPtr pkt; 453 454 /** The pointer to the LSQ unit that issued the store. */ 455 LSQUnit<Impl> *lsqPtr; 456 }; 457 458 public: |
342 struct SQEntry { 343 /** Constructs an empty store queue entry. */ 344 SQEntry() 345 : inst(NULL), req(NULL), size(0), 346 canWB(0), committed(0), completed(0) 347 { 348 std::memset(data, 0, sizeof(data)); 349 } | 459 /** 460 * Handles writing back and completing the load or store that has 461 * returned from memory. 462 * 463 * @param pkt Response packet from the memory sub-system 464 */ 465 bool recvTimingResp(PacketPtr pkt); |
350 | 466 |
351 ~SQEntry() 352 { 353 inst = NULL; 354 } 355 356 /** Constructs a store queue entry for a given instruction. */ 357 SQEntry(const DynInstPtr &_inst) 358 : inst(_inst), req(NULL), sreqLow(NULL), sreqHigh(NULL), size(0), 359 isSplit(0), canWB(0), committed(0), completed(0), isAllZeros(0) 360 { 361 std::memset(data, 0, sizeof(data)); 362 } 363 /** The store data. */ 364 char data[16]; 365 /** The store instruction. */ 366 DynInstPtr inst; 367 /** The request for the store. */ 368 RequestPtr req; 369 /** The split requests for the store. */ 370 RequestPtr sreqLow; 371 RequestPtr sreqHigh; 372 /** The size of the store. */ 373 uint8_t size; 374 /** Whether or not the store is split into two requests. */ 375 bool isSplit; 376 /** Whether or not the store can writeback. */ 377 bool canWB; 378 /** Whether or not the store is committed. */ 379 bool committed; 380 /** Whether or not the store is completed. */ 381 bool completed; 382 /** Does this request write all zeros and thus doesn't 383 * have any data attached to it. Used for cache block zero 384 * style instructs (ARM DC ZVA; ALPHA WH64) 385 */ 386 bool isAllZeros; 387 }; 388 | |
389 private: 390 /** The LSQUnit thread id. */ 391 ThreadID lsqID; | 467 private: 468 /** The LSQUnit thread id. */ 469 ThreadID lsqID; |
392 | 470 public: |
393 /** The store queue. */ | 471 /** The store queue. */ |
394 std::vector<SQEntry> storeQueue; | 472 CircularQueue<SQEntry> storeQueue; |
395 396 /** The load queue. */ | 473 474 /** The load queue. */ |
397 std::vector<DynInstPtr> loadQueue; | 475 LoadQueue loadQueue; |
398 | 476 |
399 /** The number of LQ entries, plus a sentinel entry (circular queue). 400 * @todo: Consider having var that records the true number of LQ entries. 401 */ 402 unsigned LQEntries; 403 /** The number of SQ entries, plus a sentinel entry (circular queue). 404 * @todo: Consider having var that records the true number of SQ entries. 405 */ 406 unsigned SQEntries; 407 | 477 private: |
408 /** The number of places to shift addresses in the LSQ before checking 409 * for dependency violations 410 */ 411 unsigned depCheckShift; 412 413 /** Should loads be checked for dependency issues */ 414 bool checkLoads; 415 416 /** The number of load instructions in the LQ. */ 417 int loads; 418 /** The number of store instructions in the SQ. */ 419 int stores; 420 /** The number of store instructions in the SQ waiting to writeback. */ 421 int storesToWB; 422 | 478 /** The number of places to shift addresses in the LSQ before checking 479 * for dependency violations 480 */ 481 unsigned depCheckShift; 482 483 /** Should loads be checked for dependency issues */ 484 bool checkLoads; 485 486 /** The number of load instructions in the LQ. */ 487 int loads; 488 /** The number of store instructions in the SQ. */ 489 int stores; 490 /** The number of store instructions in the SQ waiting to writeback. */ 491 int storesToWB; 492 |
423 /** The index of the head instruction in the LQ. */ 424 int loadHead; 425 /** The index of the tail instruction in the LQ. */ 426 int loadTail; 427 428 /** The index of the head instruction in the SQ. */ 429 int storeHead; | |
430 /** The index of the first instruction that may be ready to be 431 * written back, and has not yet been written back. 432 */ | 493 /** The index of the first instruction that may be ready to be 494 * written back, and has not yet been written back. 495 */ |
433 int storeWBIdx; 434 /** The index of the tail instruction in the SQ. */ 435 int storeTail; | 496 typename StoreQueue::iterator storeWBIt; |
436 | 497 |
437 /// @todo Consider moving to a more advanced model with write vs read ports 438 /** The number of cache ports available each cycle (stores only). */ 439 int cacheStorePorts; 440 441 /** The number of used cache ports in this cycle by stores. */ 442 int usedStorePorts; 443 444 //list<InstSeqNum> mshrSeqNums; 445 | |
446 /** Address Mask for a cache block (e.g. ~(cache_block_size-1)) */ 447 Addr cacheBlockMask; 448 449 /** Wire to read information from the issue stage time queue. */ 450 typename TimeBuffer<IssueStruct>::wire fromIssue; 451 452 /** Whether or not the LSQ is stalled. */ 453 bool stalled; --- 13 unchanged lines hidden (view full) --- 467 /** Whether or not a store is in flight. */ 468 bool storeInFlight; 469 470 /** The oldest load that caused a memory ordering violation. */ 471 DynInstPtr memDepViolator; 472 473 /** Whether or not there is a packet that couldn't be sent because of 474 * a lack of cache ports. */ | 498 /** Address Mask for a cache block (e.g. ~(cache_block_size-1)) */ 499 Addr cacheBlockMask; 500 501 /** Wire to read information from the issue stage time queue. */ 502 typename TimeBuffer<IssueStruct>::wire fromIssue; 503 504 /** Whether or not the LSQ is stalled. */ 505 bool stalled; --- 13 unchanged lines hidden (view full) --- 519 /** Whether or not a store is in flight. */ 520 bool storeInFlight; 521 522 /** The oldest load that caused a memory ordering violation. */ 523 DynInstPtr memDepViolator; 524 525 /** Whether or not there is a packet that couldn't be sent because of 526 * a lack of cache ports. */ |
475 bool hasPendingPkt; | 527 bool hasPendingRequest; |
476 477 /** The packet that is pending free cache ports. */ | 528 529 /** The packet that is pending free cache ports. */ |
478 PacketPtr pendingPkt; | 530 LSQRequest* pendingRequest; |
479 480 /** Flag for memory model. */ 481 bool needsTSO; 482 483 // Will also need how many read/write ports the Dcache has. Or keep track 484 // of that in stage that is one level up, and only call executeLoad/Store 485 // the appropriate number of times. 486 /** Total number of loads forwaded from LSQ stores. */ --- 24 unchanged lines hidden (view full) --- 511 /** Number of loads that were rescheduled. */ 512 Stats::Scalar lsqRescheduledLoads; 513 514 /** Number of times the LSQ is blocked due to the cache. */ 515 Stats::Scalar lsqCacheBlocked; 516 517 public: 518 /** Executes the load at the given index. */ | 531 532 /** Flag for memory model. */ 533 bool needsTSO; 534 535 // Will also need how many read/write ports the Dcache has. Or keep track 536 // of that in stage that is one level up, and only call executeLoad/Store 537 // the appropriate number of times. 538 /** Total number of loads forwaded from LSQ stores. */ --- 24 unchanged lines hidden (view full) --- 563 /** Number of loads that were rescheduled. */ 564 Stats::Scalar lsqRescheduledLoads; 565 566 /** Number of times the LSQ is blocked due to the cache. */ 567 Stats::Scalar lsqCacheBlocked; 568 569 public: 570 /** Executes the load at the given index. */ |
519 Fault read(const RequestPtr &req, 520 RequestPtr &sreqLow, RequestPtr &sreqHigh, 521 int load_idx); | 571 Fault read(LSQRequest *req, int load_idx); |
522 523 /** Executes the store at the given index. */ | 572 573 /** Executes the store at the given index. */ |
524 Fault write(const RequestPtr &req, 525 const RequestPtr &sreqLow, const RequestPtr &sreqHigh, 526 uint8_t *data, int store_idx); | 574 Fault write(LSQRequest *req, uint8_t *data, int store_idx); |
527 528 /** Returns the index of the head load instruction. */ | 575 576 /** Returns the index of the head load instruction. */ |
529 int getLoadHead() { return loadHead; } | 577 int getLoadHead() { return loadQueue.head(); } 578 |
530 /** Returns the sequence number of the head load instruction. */ | 579 /** Returns the sequence number of the head load instruction. */ |
531 InstSeqNum getLoadHeadSeqNum() | 580 InstSeqNum 581 getLoadHeadSeqNum() |
532 { | 582 { |
533 if (loadQueue[loadHead]) { 534 return loadQueue[loadHead]->seqNum; 535 } else { 536 return 0; 537 } 538 | 583 return loadQueue.front().valid() 584 ? loadQueue.front().instruction()->seqNum 585 : 0; |
539 } 540 541 /** Returns the index of the head store instruction. */ | 586 } 587 588 /** Returns the index of the head store instruction. */ |
542 int getStoreHead() { return storeHead; } | 589 int getStoreHead() { return storeQueue.head(); } |
543 /** Returns the sequence number of the head store instruction. */ | 590 /** Returns the sequence number of the head store instruction. */ |
544 InstSeqNum getStoreHeadSeqNum() | 591 InstSeqNum 592 getStoreHeadSeqNum() |
545 { | 593 { |
546 if (storeQueue[storeHead].inst) { 547 return storeQueue[storeHead].inst->seqNum; 548 } else { 549 return 0; 550 } 551 | 594 return storeQueue.front().valid() 595 ? storeQueue.front().instruction()->seqNum 596 : 0; |
552 } 553 554 /** Returns whether or not the LSQ unit is stalled. */ 555 bool isStalled() { return stalled; } | 597 } 598 599 /** Returns whether or not the LSQ unit is stalled. */ 600 bool isStalled() { return stalled; } |
601 public: 602 typedef typename CircularQueue<LQEntry>::iterator LQIterator; 603 typedef typename CircularQueue<SQEntry>::iterator SQIterator; 604 typedef CircularQueue<LQEntry> LQueue; 605 typedef CircularQueue<SQEntry> SQueue; |
|
556}; 557 558template <class Impl> 559Fault | 606}; 607 608template <class Impl> 609Fault |
560LSQUnit<Impl>::read(const RequestPtr &req, 561 RequestPtr &sreqLow, RequestPtr &sreqHigh, 562 int load_idx) | 610LSQUnit<Impl>::read(LSQRequest *req, int load_idx) |
563{ | 611{ |
564 DynInstPtr load_inst = loadQueue[load_idx]; | 612 LQEntry& load_req = loadQueue[load_idx]; 613 const DynInstPtr& load_inst = load_req.instruction(); |
565 | 614 |
615 load_req.setRequest(req); |
|
566 assert(load_inst); 567 568 assert(!load_inst->isExecuted()); 569 570 // Make sure this isn't a strictly ordered load 571 // A bit of a hackish way to get strictly ordered accesses to work 572 // only if they're at the head of the LSQ and are ready to commit 573 // (at the head of the ROB too). | 616 assert(load_inst); 617 618 assert(!load_inst->isExecuted()); 619 620 // Make sure this isn't a strictly ordered load 621 // A bit of a hackish way to get strictly ordered accesses to work 622 // only if they're at the head of the LSQ and are ready to commit 623 // (at the head of the ROB too). |
574 if (req->isStrictlyOrdered() && 575 (load_idx != loadHead || !load_inst->isAtCommit())) { | 624 625 if (req->mainRequest()->isStrictlyOrdered() && 626 (load_idx != loadQueue.head() || !load_inst->isAtCommit())) { 627 // Tell IQ/mem dep unit that this instruction will need to be 628 // rescheduled eventually |
576 iewStage->rescheduleMemInst(load_inst); | 629 iewStage->rescheduleMemInst(load_inst); |
630 load_inst->clearIssued(); 631 load_inst->effAddrValid(false); |
|
577 ++lsqRescheduledLoads; 578 DPRINTF(LSQUnit, "Strictly ordered load [sn:%lli] PC %s\n", 579 load_inst->seqNum, load_inst->pcState()); 580 | 632 ++lsqRescheduledLoads; 633 DPRINTF(LSQUnit, "Strictly ordered load [sn:%lli] PC %s\n", 634 load_inst->seqNum, load_inst->pcState()); 635 |
636 // Must delete request now that it wasn't handed off to 637 // memory. This is quite ugly. @todo: Figure out the proper 638 // place to really handle request deletes. 639 load_req.setRequest(nullptr); 640 req->discard(); |
|
581 return std::make_shared<GenericISA::M5PanicFault>( 582 "Strictly ordered load [sn:%llx] PC %s\n", 583 load_inst->seqNum, load_inst->pcState()); 584 } 585 | 641 return std::make_shared<GenericISA::M5PanicFault>( 642 "Strictly ordered load [sn:%llx] PC %s\n", 643 load_inst->seqNum, load_inst->pcState()); 644 } 645 |
586 // Check the SQ for any previous stores that might lead to forwarding 587 int store_idx = load_inst->sqIdx; 588 589 int store_size = 0; 590 | |
591 DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, " 592 "storeHead: %i addr: %#x%s\n", | 646 DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, " 647 "storeHead: %i addr: %#x%s\n", |
593 load_idx, store_idx, storeHead, req->getPaddr(), 594 sreqLow ? " split" : ""); | 648 load_idx - 1, load_inst->sqIt._idx, storeQueue.head() - 1, 649 req->mainRequest()->getPaddr(), req->isSplit() ? " split" : ""); |
595 | 650 |
596 if (req->isLLSC()) { 597 assert(!sreqLow); | 651 if (req->mainRequest()->isLLSC()) { |
598 // Disable recording the result temporarily. Writing to misc 599 // regs normally updates the result, but this is not the 600 // desired behavior when handling store conditionals. 601 load_inst->recordResult(false); | 652 // Disable recording the result temporarily. Writing to misc 653 // regs normally updates the result, but this is not the 654 // desired behavior when handling store conditionals. 655 load_inst->recordResult(false); |
602 TheISA::handleLockedRead(load_inst.get(), req); | 656 TheISA::handleLockedRead(load_inst.get(), req->mainRequest()); |
603 load_inst->recordResult(true); 604 } 605 | 657 load_inst->recordResult(true); 658 } 659 |
606 if (req->isMmappedIpr()) { | 660 if (req->mainRequest()->isMmappedIpr()) { |
607 assert(!load_inst->memData); 608 load_inst->memData = new uint8_t[64]; 609 610 ThreadContext *thread = cpu->tcBase(lsqID); | 661 assert(!load_inst->memData); 662 load_inst->memData = new uint8_t[64]; 663 664 ThreadContext *thread = cpu->tcBase(lsqID); |
611 Cycles delay(0); 612 PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq); | 665 PacketPtr main_pkt = new Packet(req->mainRequest(), MemCmd::ReadReq); |
613 | 666 |
614 data_pkt->dataStatic(load_inst->memData); 615 if (!TheISA::HasUnalignedMemAcc || !sreqLow) { 616 delay = TheISA::handleIprRead(thread, data_pkt); 617 } else { 618 assert(sreqLow->isMmappedIpr() && sreqHigh->isMmappedIpr()); 619 PacketPtr fst_data_pkt = new Packet(sreqLow, MemCmd::ReadReq); 620 PacketPtr snd_data_pkt = new Packet(sreqHigh, MemCmd::ReadReq); | 667 Cycles delay = req->handleIprRead(thread, main_pkt); |
621 | 668 |
622 fst_data_pkt->dataStatic(load_inst->memData); 623 snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize()); 624 625 delay = TheISA::handleIprRead(thread, fst_data_pkt); 626 Cycles delay2 = TheISA::handleIprRead(thread, snd_data_pkt); 627 if (delay2 > delay) 628 delay = delay2; 629 630 delete fst_data_pkt; 631 delete snd_data_pkt; 632 } 633 WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this); | 669 WritebackEvent *wb = new WritebackEvent(load_inst, main_pkt, this); |
634 cpu->schedule(wb, cpu->clockEdge(delay)); 635 return NoFault; 636 } 637 | 670 cpu->schedule(wb, cpu->clockEdge(delay)); 671 return NoFault; 672 } 673 |
638 while (store_idx != -1) { 639 // End once we've reached the top of the LSQ 640 if (store_idx == storeWBIdx) { 641 break; 642 } 643 | 674 // Check the SQ for any previous stores that might lead to forwarding 675 auto store_it = load_inst->sqIt; 676 assert (store_it >= storeWBIt); 677 // End once we've reached the top of the LSQ 678 while (store_it != storeWBIt) { |
644 // Move the index to one younger | 679 // Move the index to one younger |
645 if (--store_idx < 0) 646 store_idx += SQEntries; | 680 store_it--; 681 assert(store_it->valid()); 682 assert(store_it->instruction()->seqNum < load_inst->seqNum); 683 int store_size = store_it->size(); |
647 | 684 |
648 assert(storeQueue[store_idx].inst); | 685 // Cache maintenance instructions go down via the store 686 // path but they carry no data and they shouldn't be 687 // considered for forwarding 688 if (store_size != 0 && !store_it->instruction()->strictlyOrdered() && 689 !(store_it->request()->mainRequest() && 690 store_it->request()->mainRequest()->isCacheMaintenance())) { 691 assert(store_it->instruction()->effAddrValid()); |
649 | 692 |
650 store_size = storeQueue[store_idx].size; | 693 // Check if the store data is within the lower and upper bounds of 694 // addresses that the request needs. 695 auto req_s = req->mainRequest()->getVaddr(); 696 auto req_e = req_s + req->mainRequest()->getSize(); 697 auto st_s = store_it->instruction()->effAddr; 698 auto st_e = st_s + store_size; |
651 | 699 |
652 if (!store_size || storeQueue[store_idx].inst->strictlyOrdered() || 653 (storeQueue[store_idx].req && 654 storeQueue[store_idx].req->isCacheMaintenance())) { 655 // Cache maintenance instructions go down via the store 656 // path but they carry no data and they shouldn't be 657 // considered for forwarding 658 continue; 659 } | 700 bool store_has_lower_limit = req_s >= st_s; 701 bool store_has_upper_limit = req_e <= st_e; 702 bool lower_load_has_store_part = req_s < st_e; 703 bool upper_load_has_store_part = req_e > st_s; |
660 | 704 |
661 assert(storeQueue[store_idx].inst->effAddrValid()); | 705 // If the store's data has all of the data needed and the load 706 // isn't LLSC then 707 // we can forward. 708 if (store_has_lower_limit && store_has_upper_limit && 709 !req->mainRequest()->isLLSC()) { |
662 | 710 |
663 // Check if the store data is within the lower and upper bounds of 664 // addresses that the request needs. 665 bool store_has_lower_limit = 666 req->getVaddr() >= storeQueue[store_idx].inst->effAddr; 667 bool store_has_upper_limit = 668 (req->getVaddr() + req->getSize()) <= 669 (storeQueue[store_idx].inst->effAddr + store_size); 670 bool lower_load_has_store_part = 671 req->getVaddr() < (storeQueue[store_idx].inst->effAddr + 672 store_size); 673 bool upper_load_has_store_part = 674 (req->getVaddr() + req->getSize()) > 675 storeQueue[store_idx].inst->effAddr; | 711 // Get shift amount for offset into the store's data. 712 int shift_amt = req->mainRequest()->getVaddr() - 713 store_it->instruction()->effAddr; |
676 | 714 |
677 // If the store's data has all of the data needed and the load isn't 678 // LLSC, we can forward. 679 if (store_has_lower_limit && store_has_upper_limit && !req->isLLSC()) { 680 // Get shift amount for offset into the store's data. 681 int shift_amt = req->getVaddr() - storeQueue[store_idx].inst->effAddr; | 715 // Allocate memory if this is the first time a load is issued. 716 if (!load_inst->memData) { 717 load_inst->memData = 718 new uint8_t[req->mainRequest()->getSize()]; 719 } 720 if (store_it->isAllZeros()) 721 memset(load_inst->memData, 0, 722 req->mainRequest()->getSize()); 723 else 724 memcpy(load_inst->memData, 725 store_it->data() + shift_amt, 726 req->mainRequest()->getSize()); |
682 | 727 |
683 // Allocate memory if this is the first time a load is issued. 684 if (!load_inst->memData) { 685 load_inst->memData = new uint8_t[req->getSize()]; 686 } 687 if (storeQueue[store_idx].isAllZeros) 688 memset(load_inst->memData, 0, req->getSize()); 689 else 690 memcpy(load_inst->memData, 691 storeQueue[store_idx].data + shift_amt, req->getSize()); | 728 DPRINTF(LSQUnit, "Forwarding from store idx %i to load to " 729 "addr %#x\n", store_it._idx, 730 req->mainRequest()->getVaddr()); |
692 | 731 |
693 DPRINTF(LSQUnit, "Forwarding from store idx %i to load to " 694 "addr %#x\n", store_idx, req->getVaddr()); | 732 PacketPtr data_pkt = new Packet(req->mainRequest(), 733 MemCmd::ReadReq); 734 data_pkt->dataStatic(load_inst->memData); |
695 | 735 |
696 PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq); 697 data_pkt->dataStatic(load_inst->memData); | 736 if (req->isAnyOutstandingRequest()) { 737 assert(req->_numOutstandingPackets > 0); 738 // There are memory requests packets in flight already. 739 // This may happen if the store was not complete the 740 // first time this load got executed. Signal the senderSate 741 // that response packets should be discarded. 742 req->discardSenderState(); 743 } |
698 | 744 |
699 WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this); | 745 WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, 746 this); |
700 | 747 |
701 // We'll say this has a 1 cycle load-store forwarding latency 702 // for now. 703 // @todo: Need to make this a parameter. 704 cpu->schedule(wb, curTick()); | 748 // We'll say this has a 1 cycle load-store forwarding latency 749 // for now. 750 // @todo: Need to make this a parameter. 751 cpu->schedule(wb, curTick()); |
705 | 752 |
706 ++lsqForwLoads; 707 return NoFault; 708 } else if ( 709 (!req->isLLSC() && | 753 // Don't need to do anything special for split loads. 754 ++lsqForwLoads; 755 756 return NoFault; 757 } else if ( 758 (!req->mainRequest()->isLLSC() && |
710 ((store_has_lower_limit && lower_load_has_store_part) || 711 (store_has_upper_limit && upper_load_has_store_part) || 712 (lower_load_has_store_part && upper_load_has_store_part))) || | 759 ((store_has_lower_limit && lower_load_has_store_part) || 760 (store_has_upper_limit && upper_load_has_store_part) || 761 (lower_load_has_store_part && upper_load_has_store_part))) || |
713 (req->isLLSC() && | 762 (req->mainRequest()->isLLSC() && |
714 ((store_has_lower_limit || upper_load_has_store_part) && 715 (store_has_upper_limit || lower_load_has_store_part)))) { | 763 ((store_has_lower_limit || upper_load_has_store_part) && 764 (store_has_upper_limit || lower_load_has_store_part)))) { |
716 // This is the partial store-load forwarding case where a store 717 // has only part of the load's data and the load isn't LLSC or 718 // the load is LLSC and the store has all or part of the load's 719 // data | 765 // This is the partial store-load forwarding case where a store 766 // has only part of the load's data and the load isn't LLSC or 767 // the load is LLSC and the store has all or part of the load's 768 // data |
720 | 769 |
721 // If it's already been written back, then don't worry about 722 // stalling on it. 723 if (storeQueue[store_idx].completed) { 724 panic("Should not check one of these"); 725 continue; 726 } | 770 // If it's already been written back, then don't worry about 771 // stalling on it. 772 if (store_it->completed()) { 773 panic("Should not check one of these"); 774 continue; 775 } |
727 | 776 |
728 // Must stall load and force it to retry, so long as it's the oldest 729 // load that needs to do so. 730 if (!stalled || 731 (stalled && 732 load_inst->seqNum < 733 loadQueue[stallingLoadIdx]->seqNum)) { 734 stalled = true; 735 stallingStoreIsn = storeQueue[store_idx].inst->seqNum; 736 stallingLoadIdx = load_idx; 737 } | 777 // Must stall load and force it to retry, so long as it's the 778 // oldest load that needs to do so. 779 if (!stalled || 780 (stalled && 781 load_inst->seqNum < 782 loadQueue[stallingLoadIdx].instruction()->seqNum)) { 783 stalled = true; 784 stallingStoreIsn = store_it->instruction()->seqNum; 785 stallingLoadIdx = load_idx; 786 } |
738 | 787 |
739 // Tell IQ/mem dep unit that this instruction will need to be 740 // rescheduled eventually 741 iewStage->rescheduleMemInst(load_inst); 742 load_inst->clearIssued(); 743 ++lsqRescheduledLoads; | 788 // Tell IQ/mem dep unit that this instruction will need to be 789 // rescheduled eventually 790 iewStage->rescheduleMemInst(load_inst); 791 load_inst->clearIssued(); 792 load_inst->effAddrValid(false); 793 ++lsqRescheduledLoads; |
744 | 794 |
745 // Do not generate a writeback event as this instruction is not 746 // complete. 747 DPRINTF(LSQUnit, "Load-store forwarding mis-match. " 748 "Store idx %i to load addr %#x\n", 749 store_idx, req->getVaddr()); | 795 // Do not generate a writeback event as this instruction is not 796 // complete. 797 DPRINTF(LSQUnit, "Load-store forwarding mis-match. " 798 "Store idx %i to load addr %#x\n", 799 store_it._idx, req->mainRequest()->getVaddr()); |
750 | 800 |
751 return NoFault; | 801 // Must discard the request. 802 req->discard(); 803 load_req.setRequest(nullptr); 804 return NoFault; 805 } |
752 } 753 } 754 755 // If there's no forwarding case, then go access memory 756 DPRINTF(LSQUnit, "Doing memory access for inst [sn:%lli] PC %s\n", 757 load_inst->seqNum, load_inst->pcState()); 758 759 // Allocate memory if this is the first time a load is issued. 760 if (!load_inst->memData) { | 806 } 807 } 808 809 // If there's no forwarding case, then go access memory 810 DPRINTF(LSQUnit, "Doing memory access for inst [sn:%lli] PC %s\n", 811 load_inst->seqNum, load_inst->pcState()); 812 813 // Allocate memory if this is the first time a load is issued. 814 if (!load_inst->memData) { |
761 load_inst->memData = new uint8_t[req->getSize()]; | 815 load_inst->memData = new uint8_t[req->mainRequest()->getSize()]; |
762 } 763 | 816 } 817 |
764 // if we the cache is not blocked, do cache access 765 bool completedFirst = false; 766 PacketPtr data_pkt = Packet::createRead(req); 767 PacketPtr fst_data_pkt = NULL; 768 PacketPtr snd_data_pkt = NULL; 769 770 data_pkt->dataStatic(load_inst->memData); 771 772 LSQSenderState *state = new LSQSenderState; 773 state->isLoad = true; 774 state->idx = load_idx; 775 state->inst = load_inst; 776 data_pkt->senderState = state; 777 778 if (!TheISA::HasUnalignedMemAcc || !sreqLow) { 779 // Point the first packet at the main data packet. 780 fst_data_pkt = data_pkt; 781 } else { 782 // Create the split packets. 783 fst_data_pkt = Packet::createRead(sreqLow); 784 snd_data_pkt = Packet::createRead(sreqHigh); 785 786 fst_data_pkt->dataStatic(load_inst->memData); 787 snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize()); 788 789 fst_data_pkt->senderState = state; 790 snd_data_pkt->senderState = state; 791 792 state->isSplit = true; 793 state->outstanding = 2; 794 state->mainPkt = data_pkt; 795 } 796 | |
797 // For now, load throughput is constrained by the number of 798 // load FUs only, and loads do not consume a cache port (only 799 // stores do). 800 // @todo We should account for cache port contention 801 // and arbitrate between loads and stores. | 818 // For now, load throughput is constrained by the number of 819 // load FUs only, and loads do not consume a cache port (only 820 // stores do). 821 // @todo We should account for cache port contention 822 // and arbitrate between loads and stores. |
802 bool successful_load = true; 803 if (!dcachePort->sendTimingReq(fst_data_pkt)) { 804 successful_load = false; 805 } else if (TheISA::HasUnalignedMemAcc && sreqLow) { 806 completedFirst = true; | |
807 | 823 |
808 // The first packet was sent without problems, so send this one 809 // too. If there is a problem with this packet then the whole 810 // load will be squashed, so indicate this to the state object. 811 // The first packet will return in completeDataAccess and be 812 // handled there. 813 // @todo We should also account for cache port contention 814 // here. 815 if (!dcachePort->sendTimingReq(snd_data_pkt)) { 816 // The main packet will be deleted in completeDataAccess. 817 state->complete(); 818 // Signify to 1st half that the 2nd half was blocked via state 819 state->cacheBlocked = true; 820 successful_load = false; 821 } | 824 // if we the cache is not blocked, do cache access 825 if (req->senderState() == nullptr) { 826 LQSenderState *state = new LQSenderState( 827 loadQueue.getIterator(load_idx)); 828 state->isLoad = true; 829 state->inst = load_inst; 830 state->isSplit = req->isSplit(); 831 req->senderState(state); |
822 } | 832 } |
823 824 // If the cache was blocked, or has become blocked due to the access, 825 // handle it. 826 if (!successful_load) { 827 if (!sreqLow) { 828 // Packet wasn't split, just delete main packet info 829 delete state; 830 delete data_pkt; 831 } 832 833 if (TheISA::HasUnalignedMemAcc && sreqLow) { 834 if (!completedFirst) { 835 // Split packet, but first failed. Delete all state. 836 delete state; 837 delete data_pkt; 838 delete fst_data_pkt; 839 delete snd_data_pkt; 840 sreqLow.reset(); 841 sreqHigh.reset(); 842 } else { 843 // Can't delete main packet data or state because first packet 844 // was sent to the memory system 845 delete data_pkt; 846 delete snd_data_pkt; 847 sreqHigh.reset(); 848 } 849 } 850 851 ++lsqCacheBlocked; 852 | 833 req->buildPackets(); 834 req->sendPacketToCache(); 835 if (!req->isSent()) |
853 iewStage->blockMemInst(load_inst); 854 | 836 iewStage->blockMemInst(load_inst); 837 |
855 // No fault occurred, even though the interface is blocked. 856 return NoFault; 857 } 858 | |
859 return NoFault; 860} 861 862template <class Impl> 863Fault | 838 return NoFault; 839} 840 841template <class Impl> 842Fault |
864LSQUnit<Impl>::write(const RequestPtr &req, 865 const RequestPtr &sreqLow, const RequestPtr &sreqHigh, 866 uint8_t *data, int store_idx) | 843LSQUnit<Impl>::write(LSQRequest *req, uint8_t *data, int store_idx) |
867{ | 844{ |
868 assert(storeQueue[store_idx].inst); | 845 assert(storeQueue[store_idx].valid()); |
869 | 846 |
870 DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x" 871 " | storeHead:%i [sn:%i]\n", 872 store_idx, req->getPaddr(), storeHead, 873 storeQueue[store_idx].inst->seqNum); | 847 DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x | storeHead:%i " 848 "[sn:%i]\n", 849 store_idx - 1, req->request()->getPaddr(), storeQueue.head() - 1, 850 storeQueue[store_idx].instruction()->seqNum); |
874 | 851 |
875 storeQueue[store_idx].req = req; 876 storeQueue[store_idx].sreqLow = sreqLow; 877 storeQueue[store_idx].sreqHigh = sreqHigh; 878 unsigned size = req->getSize(); 879 storeQueue[store_idx].size = size; 880 bool store_no_data = req->getFlags() & Request::STORE_NO_DATA; 881 storeQueue[store_idx].isAllZeros = store_no_data; 882 assert(size <= sizeof(storeQueue[store_idx].data) || store_no_data); | 852 storeQueue[store_idx].setRequest(req); 853 unsigned size = req->_size; 854 storeQueue[store_idx].size() = size; 855 bool store_no_data = 856 req->mainRequest()->getFlags() & Request::STORE_NO_DATA; 857 storeQueue[store_idx].isAllZeros() = store_no_data; 858 assert(size <= SQEntry::DataSize || store_no_data); |
883 | 859 |
884 // Split stores can only occur in ISAs with unaligned memory accesses. If 885 // a store request has been split, sreqLow and sreqHigh will be non-null. 886 if (TheISA::HasUnalignedMemAcc && sreqLow) { 887 storeQueue[store_idx].isSplit = true; 888 } | 860 if (!(req->request()->getFlags() & Request::CACHE_BLOCK_ZERO) && 861 !req->request()->isCacheMaintenance()) 862 memcpy(storeQueue[store_idx].data(), data, size); |
889 | 863 |
890 if (!(req->getFlags() & Request::CACHE_BLOCK_ZERO) && \ 891 !req->isCacheMaintenance()) 892 memcpy(storeQueue[store_idx].data, data, size); 893 | |
894 // This function only writes the data to the store queue, so no fault 895 // can happen here. 896 return NoFault; 897} 898 899#endif // __CPU_O3_LSQ_UNIT_HH__ | 864 // This function only writes the data to the store queue, so no fault 865 // can happen here. 866 return NoFault; 867} 868 869#endif // __CPU_O3_LSQ_UNIT_HH__ |