lsq.hh revision 10379
14030Sktlim@umich.edu/* 23096Sstever@eecs.umich.edu * Copyright (c) 2013-2014 ARM Limited 33096Sstever@eecs.umich.edu * All rights reserved 43096Sstever@eecs.umich.edu * 53096Sstever@eecs.umich.edu * The license below extends only to copyright in the software and shall 63096Sstever@eecs.umich.edu * not be construed as granting a license to any other intellectual 73096Sstever@eecs.umich.edu * property including but not limited to intellectual property relating 83096Sstever@eecs.umich.edu * to a hardware implementation of the functionality of the software 93096Sstever@eecs.umich.edu * licensed hereunder. You may use the software subject to the license 103096Sstever@eecs.umich.edu * terms below provided that you ensure that this notice is replicated 113096Sstever@eecs.umich.edu * unmodified and in its entirety in all distributions of the software, 123096Sstever@eecs.umich.edu * modified or unmodified, in source code or in binary form. 133096Sstever@eecs.umich.edu * 143096Sstever@eecs.umich.edu * Redistribution and use in source and binary forms, with or without 153096Sstever@eecs.umich.edu * modification, are permitted provided that the following conditions are 163096Sstever@eecs.umich.edu * met: redistributions of source code must retain the above copyright 173096Sstever@eecs.umich.edu * notice, this list of conditions and the following disclaimer; 183096Sstever@eecs.umich.edu * redistributions in binary form must reproduce the above copyright 193096Sstever@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the 203096Sstever@eecs.umich.edu * documentation and/or other materials provided with the distribution; 213096Sstever@eecs.umich.edu * neither the name of the copyright holders nor the names of its 223096Sstever@eecs.umich.edu * contributors may be used to endorse or promote products derived from 233096Sstever@eecs.umich.edu * this software without specific prior written permission. 243096Sstever@eecs.umich.edu * 253096Sstever@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 263096Sstever@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 273096Sstever@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 283096Sstever@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 293096Sstever@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 303096Sstever@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 313096Sstever@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 323096Sstever@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 333096Sstever@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 343096Sstever@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 353096Sstever@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 363096Sstever@eecs.umich.edu * 373096Sstever@eecs.umich.edu * Authors: Andrew Bardsley 383096Sstever@eecs.umich.edu */ 393096Sstever@eecs.umich.edu 404030Sktlim@umich.edu/** 413096Sstever@eecs.umich.edu * @file 423096Sstever@eecs.umich.edu * 434390Sktlim@umich.edu * A load/store queue that allows outstanding reads and writes. 443096Sstever@eecs.umich.edu * 453096Sstever@eecs.umich.edu */ 463096Sstever@eecs.umich.edu 473096Sstever@eecs.umich.edu#ifndef __CPU_MINOR_NEW_LSQ_HH__ 483096Sstever@eecs.umich.edu#define __CPU_MINOR_NEW_LSQ_HH__ 493096Sstever@eecs.umich.edu 503096Sstever@eecs.umich.edu#include "cpu/minor/buffers.hh" 513096Sstever@eecs.umich.edu#include "cpu/minor/cpu.hh" 52#include "cpu/minor/pipe_data.hh" 53#include "cpu/minor/trace.hh" 54 55namespace Minor 56{ 57 58/* Forward declaration */ 59class Execute; 60 61class LSQ : public Named 62{ 63 protected: 64 /** My owner(s) */ 65 MinorCPU &cpu; 66 Execute &execute; 67 68 protected: 69 /** State of memory access for head access. */ 70 enum MemoryState 71 { 72 MemoryRunning, /* Default. Step dcache queues when possible. */ 73 MemoryNeedsRetry /* Request rejected, will be asked to retry */ 74 }; 75 76 /** Print MemoryState values as shown in the enum definition */ 77 friend std::ostream &operator <<(std::ostream &os, 78 MemoryState state); 79 80 /** Coverage of one address range with another */ 81 enum AddrRangeCoverage 82 { 83 PartialAddrRangeCoverage, /* Two ranges partly overlap */ 84 FullAddrRangeCoverage, /* One range fully covers another */ 85 NoAddrRangeCoverage /* Two ranges are disjoint */ 86 }; 87 88 /** Exposable data port */ 89 class DcachePort : public MinorCPU::MinorCPUPort 90 { 91 protected: 92 /** My owner */ 93 LSQ &lsq; 94 95 public: 96 DcachePort(std::string name, LSQ &lsq_, MinorCPU &cpu) : 97 MinorCPU::MinorCPUPort(name, cpu), lsq(lsq_) 98 { } 99 100 protected: 101 bool recvTimingResp(PacketPtr pkt) 102 { return lsq.recvTimingResp(pkt); } 103 104 void recvRetry() { lsq.recvRetry(); } 105 106 void recvTimingSnoopReq(PacketPtr pkt) 107 { return lsq.recvTimingSnoopReq(pkt); } 108 }; 109 110 DcachePort dcachePort; 111 112 public: 113 /** Derived SenderState to carry data access info. through address 114 * translation, the queues in this port and back from the memory 115 * system. */ 116 class LSQRequest : 117 public BaseTLB::Translation, /* For TLB lookups */ 118 public Packet::SenderState /* For packing into a Packet */ 119 { 120 public: 121 /** Owning port */ 122 LSQ &port; 123 124 /** Instruction which made this request */ 125 MinorDynInstPtr inst; 126 127 /** Load/store indication used for building packet. This isn't 128 * carried by Request so we need to keep it here */ 129 bool isLoad; 130 131 /** Dynamically allocated and populated data carried for 132 * building write packets */ 133 PacketDataPtr data; 134 135 /* Requests carry packets on their way to the memory system. 136 * When a Packet returns from the memory system, its 137 * request needs to have its packet updated as this 138 * may have changed in flight */ 139 PacketPtr packet; 140 141 /** The underlying request of this LSQRequest */ 142 Request request; 143 144 /** Fault generated performing this request */ 145 Fault fault; 146 147 /** Res from pushRequest */ 148 uint64_t *res; 149 150 /** Was skipped. Set to indicate any reason (faulted, bad 151 * stream sequence number, in a fault shadow) that this 152 * request did not perform a memory transfer */ 153 bool skipped; 154 155 /** This in an access other than a normal cacheable load 156 * that's visited the memory system */ 157 bool issuedToMemory; 158 159 enum LSQRequestState 160 { 161 NotIssued, /* Newly created */ 162 InTranslation, /* TLB accessed, no reply yet */ 163 Translated, /* Finished address translation */ 164 Failed, /* The starting start of FailedDataRequests */ 165 RequestIssuing, /* Load/store issued to memory in the requests 166 queue */ 167 StoreToStoreBuffer, /* Store in transfers on its way to the 168 store buffer */ 169 RequestNeedsRetry, /* Retry needed for load */ 170 StoreInStoreBuffer, /* Store in the store buffer, before issuing 171 a memory transfer */ 172 StoreBufferIssuing, /* Store in store buffer and has been 173 issued */ 174 StoreBufferNeedsRetry, /* Retry needed for store */ 175 /* All completed states. Includes 176 completed loads, TLB faults and skipped requests whose 177 seqNum's no longer match */ 178 Complete 179 }; 180 181 LSQRequestState state; 182 183 protected: 184 /** BaseTLB::Translation interface */ 185 void markDelayed() { } 186 187 public: 188 LSQRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_, 189 PacketDataPtr data_ = NULL, uint64_t *res_ = NULL); 190 191 virtual ~LSQRequest(); 192 193 public: 194 /** Make a packet to use with the memory transaction */ 195 void makePacket(); 196 197 /** Was no memory access attempted for this request? */ 198 bool skippedMemAccess() { return skipped; } 199 200 /** Set this request as having been skipped before a memory 201 * transfer was attempt */ 202 void setSkipped() { skipped = true; } 203 204 /** Does address range req1 (req1_addr to req1_addr + req1_size - 1) 205 * fully cover, partially cover or not cover at all the range req2 */ 206 static AddrRangeCoverage containsAddrRangeOf( 207 Addr req1_addr, unsigned int req1_size, 208 Addr req2_addr, unsigned int req2_size); 209 210 /** Does this request's address range fully cover the range 211 * of other_request? */ 212 AddrRangeCoverage containsAddrRangeOf(LSQRequest *other_request); 213 214 /** Start the address translation process for this request. This 215 * will issue a translation request to the TLB. */ 216 virtual void startAddrTranslation() = 0; 217 218 /** Get the next packet to issue for this request. For split 219 * transfers, it will be necessary to step through the available 220 * packets by calling do { getHeadPacket ; stepToNextPacket } while 221 * (!sentAllPackets) and by retiring response using retireResponse */ 222 virtual PacketPtr getHeadPacket() = 0; 223 224 /** Step to the next packet for the next call to getHeadPacket */ 225 virtual void stepToNextPacket() = 0; 226 227 /** Have all packets been sent? */ 228 virtual bool sentAllPackets() = 0; 229 230 /** True if this request has any issued packets in the memory 231 * system and so can't be interrupted until it gets responses */ 232 virtual bool hasPacketsInMemSystem() = 0; 233 234 /** Retire a response packet into the LSQRequest packet possibly 235 * completing this transfer */ 236 virtual void retireResponse(PacketPtr packet_) = 0; 237 238 /** Is this a request a barrier? */ 239 virtual bool isBarrier(); 240 241 /** This request, once processed by the requests/transfers 242 * queues, will need to go to the store buffer */ 243 bool needsToBeSentToStoreBuffer(); 244 245 /** Set state and output trace output */ 246 void setState(LSQRequestState new_state); 247 248 /** Has this request been completed. This includes *all* reasons 249 * for completion: successful transfers, faults, skipped because 250 * of preceding faults */ 251 bool isComplete() const; 252 253 /** MinorTrace report interface */ 254 void reportData(std::ostream &os) const; 255 }; 256 257 typedef LSQRequest *LSQRequestPtr; 258 259 friend std::ostream & operator <<(std::ostream &os, 260 AddrRangeCoverage state); 261 262 friend std::ostream & operator <<(std::ostream &os, 263 LSQRequest::LSQRequestState state); 264 265 protected: 266 /** Special request types that don't actually issue memory requests */ 267 class SpecialDataRequest : public LSQRequest 268 { 269 protected: 270 /** TLB interace */ 271 void finish(const Fault &fault_, RequestPtr request_, 272 ThreadContext *tc, BaseTLB::Mode mode) 273 { } 274 275 public: 276 /** Send single translation request */ 277 void startAddrTranslation() { } 278 279 /** Get the head packet as counted by numIssuedFragments */ 280 PacketPtr getHeadPacket() 281 { fatal("No packets in a SpecialDataRequest"); } 282 283 /** Step on numIssuedFragments */ 284 void stepToNextPacket() { } 285 286 /** Has no packets to send */ 287 bool sentAllPackets() { return true; } 288 289 /** Never sends any requests */ 290 bool hasPacketsInMemSystem() { return false; } 291 292 /** Keep the given packet as the response packet 293 * LSQRequest::packet */ 294 void retireResponse(PacketPtr packet_) { } 295 296 public: 297 SpecialDataRequest(LSQ &port_, MinorDynInstPtr inst_) : 298 /* Say this is a load, not actually relevant */ 299 LSQRequest(port_, inst_, true, NULL, 0) 300 { } 301 }; 302 303 /** FailedDataRequest represents requests from instructions that 304 * failed their predicates but need to ride the requests/transfers 305 * queues to maintain trace ordering */ 306 class FailedDataRequest : public SpecialDataRequest 307 { 308 public: 309 FailedDataRequest(LSQ &port_, MinorDynInstPtr inst_) : 310 SpecialDataRequest(port_, inst_) 311 { state = Failed; } 312 }; 313 314 /** Request for doing barrier accounting in the store buffer. Not 315 * for use outside that unit */ 316 class BarrierDataRequest : public SpecialDataRequest 317 { 318 public: 319 bool isBarrier() { return true; } 320 321 public: 322 BarrierDataRequest(LSQ &port_, MinorDynInstPtr inst_) : 323 SpecialDataRequest(port_, inst_) 324 { state = Complete; } 325 }; 326 327 /** SingleDataRequest is used for requests that don't fragment */ 328 class SingleDataRequest : public LSQRequest 329 { 330 protected: 331 /** TLB interace */ 332 void finish(const Fault &fault_, RequestPtr request_, 333 ThreadContext *tc, BaseTLB::Mode mode); 334 335 /** Has my only packet been sent to the memory system but has not 336 * yet been responded to */ 337 bool packetInFlight; 338 339 /** Has the packet been at least sent to the memory system? */ 340 bool packetSent; 341 342 public: 343 /** Send single translation request */ 344 void startAddrTranslation(); 345 346 /** Get the head packet as counted by numIssuedFragments */ 347 PacketPtr getHeadPacket() { return packet; } 348 349 /** Remember that the packet has been sent */ 350 void stepToNextPacket() { packetInFlight = true; packetSent = true; } 351 352 /** Has packet been sent */ 353 bool hasPacketsInMemSystem() { return packetInFlight; } 354 355 /** packetInFlight can become false again, so need to check 356 * packetSent */ 357 bool sentAllPackets() { return packetSent; } 358 359 /** Keep the given packet as the response packet 360 * LSQRequest::packet */ 361 void retireResponse(PacketPtr packet_); 362 363 public: 364 SingleDataRequest(LSQ &port_, MinorDynInstPtr inst_, 365 bool isLoad_, PacketDataPtr data_ = NULL, uint64_t *res_ = NULL) : 366 LSQRequest(port_, inst_, isLoad_, data_, res_), 367 packetInFlight(false), 368 packetSent(false) 369 { } 370 }; 371 372 class SplitDataRequest : public LSQRequest 373 { 374 protected: 375 /** Event to step between translations */ 376 class TranslationEvent : public Event 377 { 378 protected: 379 SplitDataRequest &owner; 380 381 public: 382 TranslationEvent(SplitDataRequest &owner_) 383 : owner(owner_) { } 384 385 void process() 386 { owner.sendNextFragmentToTranslation(); } 387 }; 388 389 TranslationEvent translationEvent; 390 protected: 391 /** Number of fragments this request is split into */ 392 unsigned int numFragments; 393 394 /** Number of fragments in the address translation mechanism */ 395 unsigned int numInTranslationFragments; 396 397 /** Number of fragments that have completed address translation, 398 * (numTranslatedFragments + numInTranslationFragments) <= 399 * numFragments. When numTranslatedFramgents == numFragments, 400 * translation is complete */ 401 unsigned int numTranslatedFragments; 402 403 /** Number of fragments already issued (<= numFragments) */ 404 unsigned int numIssuedFragments; 405 406 /** Number of fragments retired back to this request */ 407 unsigned int numRetiredFragments; 408 409 /** Fragment Requests corresponding to the address ranges of 410 * each fragment */ 411 std::vector<Request *> fragmentRequests; 412 413 /** Packets matching fragmentRequests to issue fragments to memory */ 414 std::vector<Packet *> fragmentPackets; 415 416 protected: 417 /** TLB response interface */ 418 void finish(const Fault &fault_, RequestPtr request_, 419 ThreadContext *tc, BaseTLB::Mode mode); 420 421 public: 422 SplitDataRequest(LSQ &port_, MinorDynInstPtr inst_, 423 bool isLoad_, PacketDataPtr data_ = NULL, 424 uint64_t *res_ = NULL); 425 426 ~SplitDataRequest(); 427 428 public: 429 /** Make all the Requests for this transfer's fragments so that those 430 * requests can be sent for address translation */ 431 void makeFragmentRequests(); 432 433 /** Make the packets to go with the requests so they can be sent to 434 * the memory system */ 435 void makeFragmentPackets(); 436 437 /** Start a loop of do { sendNextFragmentToTranslation ; 438 * translateTiming ; finish } while (numTranslatedFragments != 439 * numFragments) to complete all this requests' fragments' address 440 * translations */ 441 void startAddrTranslation(); 442 443 /** Get the head packet as counted by numIssuedFragments */ 444 PacketPtr getHeadPacket(); 445 446 /** Step on numIssuedFragments */ 447 void stepToNextPacket(); 448 449 bool hasPacketsInMemSystem() 450 { return numIssuedFragments != numRetiredFragments; } 451 452 /** Have we stepped past the end of fragmentPackets? */ 453 bool sentAllPackets() { return numIssuedFragments == numFragments; } 454 455 /** For loads, paste the response data into the main 456 * response packet */ 457 void retireResponse(PacketPtr packet_); 458 459 /** Part of the address translation loop, see startAddTranslation */ 460 void sendNextFragmentToTranslation(); 461 }; 462 463 /** Store buffer. This contains stores which have been committed 464 * but whose memory transfers have not yet been issued. Load data 465 * can be forwarded out of the store buffer */ 466 class StoreBuffer : public Named 467 { 468 public: 469 /** My owner */ 470 LSQ &lsq; 471 472 /** Number of slots, this is a bound on the size of slots */ 473 const unsigned int numSlots; 474 475 /** Maximum number of stores that can be issued per cycle */ 476 const unsigned int storeLimitPerCycle; 477 478 public: 479 /** Queue of store requests on their way to memory */ 480 std::deque<LSQRequestPtr> slots; 481 482 /** Number of occupied slots which have not yet issued a 483 * memory access */ 484 unsigned int numUnissuedAccesses; 485 486 public: 487 StoreBuffer(std::string name_, LSQ &lsq_, 488 unsigned int store_buffer_size, 489 unsigned int store_limit_per_cycle); 490 491 public: 492 /** Can a new request be inserted into the queue? */ 493 bool canInsert() const; 494 495 /** Delete the given request and free the slot it occupied */ 496 void deleteRequest(LSQRequestPtr request); 497 498 /** Insert a request at the back of the queue */ 499 void insert(LSQRequestPtr request); 500 501 /** Look for a store which satisfies the given load. Returns an 502 * indication whether the forwarding request can be wholly, 503 * partly or not all all satisfied. If the request can be 504 * wholly satisfied, the store buffer slot number which can be used 505 * is returned in found_slot */ 506 AddrRangeCoverage canForwardDataToLoad(LSQRequestPtr request, 507 unsigned int &found_slot); 508 509 /** Fill the given packet with appropriate date from slot 510 * slot_number */ 511 void forwardStoreData(LSQRequestPtr load, unsigned int slot_number); 512 513 /** Number of stores in the store buffer which have not been 514 * completely issued to the memory system */ 515 unsigned int numUnissuedStores() { return numUnissuedAccesses; } 516 517 /** Drained if there is absolutely nothing left in the buffer */ 518 bool isDrained() const { return slots.empty(); } 519 520 /** Try to issue more stores to memory */ 521 void step(); 522 523 /** Report queue contents for MinorTrace */ 524 void minorTrace() const; 525 }; 526 527 protected: 528 /** Most recent execSeqNum of a memory barrier instruction or 529 * 0 if there are no in-flight barriers. Useful as a 530 * dependency for early-issued memory operations */ 531 InstSeqNum lastMemBarrier; 532 533 public: 534 /** Retry state of last issued memory transfer */ 535 MemoryState state; 536 537 /** Maximum number of in-flight accesses issued to the memory system */ 538 const unsigned int inMemorySystemLimit; 539 540 /** Memory system access width (and snap) in bytes */ 541 const unsigned int lineWidth; 542 543 public: 544 /** The LSQ consists of three queues: requests, transfers and the 545 * store buffer storeBuffer. */ 546 547 typedef Queue<LSQRequestPtr, 548 ReportTraitsPtrAdaptor<LSQRequestPtr>, 549 NoBubbleTraits<LSQRequestPtr> > 550 LSQQueue; 551 552 /** requests contains LSQRequests which have been issued to the TLB by 553 * calling ExecContext::readMem/writeMem (which in turn calls 554 * LSQ::pushRequest and LSQRequest::startAddrTranslation). Once they 555 * have a physical address, requests at the head of requests can be 556 * issued to the memory system. At this stage, it cannot be clear that 557 * memory accesses *must* happen (that there are no preceding faults or 558 * changes of flow of control) and so only cacheable reads are issued 559 * to memory. 560 * Cacheable stores are not issued at all (and just pass through 561 * 'transfers' in order) and all other transfers are stalled in requests 562 * until their corresponding instructions are at the head of the 563 * inMemInsts instruction queue and have the right streamSeqNum. */ 564 LSQQueue requests; 565 566 /** Once issued to memory (or, for stores, just had their 567 * state changed to StoreToStoreBuffer) LSQRequests pass through 568 * transfers waiting for memory responses. At the head of transfers, 569 * Execute::commitInst can pick up the memory response for a request 570 * using LSQ::findResponse. Responses to be committed can then 571 * have ExecContext::completeAcc on them. Stores can then be pushed 572 * into the store buffer. All other transfers will then be complete. */ 573 LSQQueue transfers; 574 575 /* The store buffer contains committed cacheable stores on 576 * their way to memory decoupled from subsequence instruction execution. 577 * Before trying to issue a cacheable read from 'requests' to memory, 578 * the store buffer is checked to see if a previous store contains the 579 * needed data (StoreBuffer::canForwardDataToLoad) which can be 580 * forwarded in lieu of a memory access. If there are outstanding 581 * stores in the transfers queue, they must be promoted to the store 582 * buffer (and so be commited) before they can be correctly checked 583 * for forwarding. */ 584 StoreBuffer storeBuffer; 585 586 protected: 587 /** Count of the number of mem. accesses which have left the 588 * requests queue and are in the 'wild' in the memory system and who 589 * *must not* be interrupted as they are not normal cacheable 590 * accesses. This is a count of the number of in-flight requests 591 * with issuedToMemory set who have visited tryToSendRequest at least 592 * once */ 593 unsigned int numAccessesInMemorySystem; 594 595 /** Number of requests in the DTLB in the requests queue */ 596 unsigned int numAccessesInDTLB; 597 598 /** The number of stores in the transfers queue. Useful when 599 * testing if the store buffer contains all the forwardable stores */ 600 unsigned int numStoresInTransfers; 601 602 /** The number of accesses which have been issued to the memory 603 * system but have not been committed/discarded *excluding* 604 * cacheable normal loads which don't need to be tracked */ 605 unsigned int numAccessesIssuedToMemory; 606 607 /** The request (from either requests or the store buffer) which is 608 * currently waiting have its memory access retried */ 609 LSQRequestPtr retryRequest; 610 611 /** Address Mask for a cache block (e.g. ~(cache_block_size-1)) */ 612 Addr cacheBlockMask; 613 614 protected: 615 /** Try and issue a memory access for a translated request at the 616 * head of the requests queue. Also tries to move the request 617 * between queues */ 618 void tryToSendToTransfers(LSQRequestPtr request); 619 620 /** Try to send (or resend) a memory request's next/only packet to 621 * the memory system. Returns true if the request was successfully 622 * sent to memory (and was also the last packet in a transfer) */ 623 bool tryToSend(LSQRequestPtr request); 624 625 /** Clear a barrier (if it's the last one marked up in lastMemBarrier) */ 626 void clearMemBarrier(MinorDynInstPtr inst); 627 628 /** Move a request between queues */ 629 void moveFromRequestsToTransfers(LSQRequestPtr request); 630 631 /** Can a request be sent to the memory system */ 632 bool canSendToMemorySystem(); 633 634 public: 635 LSQ(std::string name_, std::string dcache_port_name_, 636 MinorCPU &cpu_, Execute &execute_, 637 unsigned int max_accesses_in_memory_system, unsigned int line_width, 638 unsigned int requests_queue_size, unsigned int transfers_queue_size, 639 unsigned int store_buffer_size, 640 unsigned int store_buffer_cycle_store_limit); 641 642 virtual ~LSQ(); 643 644 public: 645 /** Step checks the queues to see if their are issuable transfers 646 * which were not otherwise picked up by tests at the end of other 647 * events. 648 * 649 * Steppable actions include deferred actions which couldn't be 650 * cascaded on the end of a memory response/TLB response event 651 * because of resource congestion. */ 652 void step(); 653 654 /** Is their space in the request queue to be able to push a request by 655 * issuing an isMemRef instruction */ 656 bool canRequest() { return requests.unreservedRemainingSpace() != 0; } 657 658 /** Returns a response if it's at the head of the transfers queue and 659 * it's either complete or can be sent on to the store buffer. After 660 * calling, the request still remains on the transfer queue until 661 * popResponse is called */ 662 LSQRequestPtr findResponse(MinorDynInstPtr inst); 663 664 /** Sanity check and pop the head response */ 665 void popResponse(LSQRequestPtr response); 666 667 /** Must check this before trying to insert into the store buffer */ 668 bool canPushIntoStoreBuffer() const { return storeBuffer.canInsert(); } 669 670 /** A store has been committed, please move it to the store buffer */ 671 void sendStoreToStoreBuffer(LSQRequestPtr request); 672 673 /** Are there any accesses other than normal cached loads in the 674 * memory system or having received responses which need to be 675 * handled for their instruction's to be completed */ 676 bool accessesInFlight() const 677 { return numAccessesIssuedToMemory != 0; } 678 679 /** A memory barrier instruction has been issued, remember its 680 * execSeqNum that we can avoid issuing memory ops until it is 681 * committed */ 682 void issuedMemBarrierInst(MinorDynInstPtr inst); 683 684 /** Get the execSeqNum of the last issued memory barrier */ 685 InstSeqNum getLastMemBarrier() const { return lastMemBarrier; } 686 687 /** Is there nothing left in the LSQ */ 688 bool isDrained(); 689 690 /** May need to be ticked next cycle as one of the queues contains 691 * an actionable transfers or address translation */ 692 bool needsToTick(); 693 694 /** Complete a barrier instruction. Where committed, makes a 695 * BarrierDataRequest and pushed it into the store buffer */ 696 void completeMemBarrierInst(MinorDynInstPtr inst, 697 bool committed); 698 699 /** Single interface for readMem/writeMem to issue requests into 700 * the LSQ */ 701 void pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data, 702 unsigned int size, Addr addr, unsigned int flags, uint64_t *res); 703 704 /** Push a predicate failed-representing request into the queues just 705 * to maintain commit order */ 706 void pushFailedRequest(MinorDynInstPtr inst); 707 708 /** Memory interface */ 709 bool recvTimingResp(PacketPtr pkt); 710 void recvRetry(); 711 void recvTimingSnoopReq(PacketPtr pkt); 712 713 /** Return the raw-bindable port */ 714 MinorCPU::MinorCPUPort &getDcachePort() { return dcachePort; } 715 716 void minorTrace() const; 717}; 718 719/** Make a suitable packet for the given request. If the request is a store, 720 * data will be the payload data. If sender_state is NULL, it won't be 721 * pushed into the packet as senderState */ 722PacketPtr makePacketForRequest(Request &request, bool isLoad, 723 Packet::SenderState *sender_state = NULL, PacketDataPtr data = NULL); 724} 725 726#endif /* __CPU_MINOR_NEW_LSQ_HH__ */ 727