1/*
2 * Copyright (c) 2013-2014, 2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andrew Bardsley
38 */
39
40/**
41 * @file
42 *
43 *  A load/store queue that allows outstanding reads and writes.
44 *
45 */
46
47#ifndef __CPU_MINOR_NEW_LSQ_HH__
48#define __CPU_MINOR_NEW_LSQ_HH__
49
50#include "cpu/minor/buffers.hh"
51#include "cpu/minor/cpu.hh"
52#include "cpu/minor/pipe_data.hh"
53#include "cpu/minor/trace.hh"
54
55namespace Minor
56{
57
58/* Forward declaration */
59class Execute;
60
61class LSQ : public Named
62{
63  protected:
64    /** My owner(s) */
65    MinorCPU &cpu;
66    Execute &execute;
67
68  protected:
69    /** State of memory access for head access. */
70    enum MemoryState
71    {
72        MemoryRunning, /* Default. Step dcache queues when possible. */
73        MemoryNeedsRetry /* Request rejected, will be asked to retry */
74    };
75
76    /** Print MemoryState values as shown in the enum definition */
77    friend std::ostream &operator <<(std::ostream &os,
78        MemoryState state);
79
80    /** Coverage of one address range with another */
81    enum AddrRangeCoverage
82    {
83        PartialAddrRangeCoverage, /* Two ranges partly overlap */
84        FullAddrRangeCoverage, /* One range fully covers another */
85        NoAddrRangeCoverage /* Two ranges are disjoint */
86    };
87
88    /** Exposable data port */
89    class DcachePort : public MinorCPU::MinorCPUPort
90    {
91      protected:
92        /** My owner */
93        LSQ &lsq;
94
95      public:
96        DcachePort(std::string name, LSQ &lsq_, MinorCPU &cpu) :
97            MinorCPU::MinorCPUPort(name, cpu), lsq(lsq_)
98        { }
99
100      protected:
101        bool recvTimingResp(PacketPtr pkt) override
102        { return lsq.recvTimingResp(pkt); }
103
104        void recvReqRetry() override { lsq.recvReqRetry(); }
105
106        bool isSnooping() const override { return true; }
107
108        void recvTimingSnoopReq(PacketPtr pkt) override
109        { return lsq.recvTimingSnoopReq(pkt); }
110
111        void recvFunctionalSnoop(PacketPtr pkt) override { }
112    };
113
114    DcachePort dcachePort;
115
116  public:
117    /** Derived SenderState to carry data access info. through address
118     *  translation, the queues in this port and back from the memory
119     *  system. */
120    class LSQRequest :
121        public BaseTLB::Translation, /* For TLB lookups */
122        public Packet::SenderState /* For packing into a Packet */
123    {
124      public:
125        /** Owning port */
126        LSQ &port;
127
128        /** Instruction which made this request */
129        MinorDynInstPtr inst;
130
131        /** Load/store indication used for building packet.  This isn't
132         *  carried by Request so we need to keep it here */
133        bool isLoad;
134
135        /** Dynamically allocated and populated data carried for
136         *  building write packets */
137        PacketDataPtr data;
138
139        /* Requests carry packets on their way to the memory system.
140         *  When a Packet returns from the memory system, its
141         *  request needs to have its packet updated as this
142         *  may have changed in flight */
143        PacketPtr packet;
144
145        /** The underlying request of this LSQRequest */
146        RequestPtr request;
147
148        /** Res from pushRequest */
149        uint64_t *res;
150
151        /** Was skipped.  Set to indicate any reason (faulted, bad
152         *  stream sequence number, in a fault shadow) that this
153         *  request did not perform a memory transfer */
154        bool skipped;
155
156        /** This in an access other than a normal cacheable load
157         *  that's visited the memory system */
158        bool issuedToMemory;
159
160        /** Address translation is delayed due to table walk */
161        bool isTranslationDelayed;
162
163        enum LSQRequestState
164        {
165            NotIssued, /* Newly created */
166            InTranslation, /* TLB accessed, no reply yet */
167            Translated, /* Finished address translation */
168            Failed, /* The starting start of FailedDataRequests */
169            RequestIssuing, /* Load/store issued to memory in the requests
170                queue */
171            StoreToStoreBuffer, /* Store in transfers on its way to the
172                store buffer */
173            RequestNeedsRetry, /* Retry needed for load */
174            StoreInStoreBuffer, /* Store in the store buffer, before issuing
175                a memory transfer */
176            StoreBufferIssuing, /* Store in store buffer and has been
177                issued */
178            StoreBufferNeedsRetry, /* Retry needed for store */
179            /* All completed states.  Includes
180                completed loads, TLB faults and skipped requests whose
181                seqNum's no longer match */
182            Complete
183        };
184
185        LSQRequestState state;
186
187      protected:
188        /** BaseTLB::Translation interface */
189        void markDelayed() { isTranslationDelayed = true; }
190
191        /** Instructions may want to suppress translation faults (e.g.
192         *  non-faulting vector loads).*/
193        void tryToSuppressFault();
194
195        void disableMemAccess();
196        void completeDisabledMemAccess();
197
198      public:
199        LSQRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_,
200            PacketDataPtr data_ = NULL, uint64_t *res_ = NULL);
201
202        virtual ~LSQRequest();
203
204      public:
205        /** Make a packet to use with the memory transaction */
206        void makePacket();
207
208        /** Was no memory access attempted for this request? */
209        bool skippedMemAccess() { return skipped; }
210
211        /** Set this request as having been skipped before a memory
212         *  transfer was attempt */
213        void setSkipped() { skipped = true; }
214
215        /** Does address range req1 (req1_addr to req1_addr + req1_size - 1)
216         *  fully cover, partially cover or not cover at all the range req2 */
217        static AddrRangeCoverage containsAddrRangeOf(
218            Addr req1_addr, unsigned int req1_size,
219            Addr req2_addr, unsigned int req2_size);
220
221        /** Does this request's address range fully cover the range
222         *  of other_request? */
223        AddrRangeCoverage containsAddrRangeOf(LSQRequest *other_request);
224
225        /** Start the address translation process for this request.  This
226         *  will issue a translation request to the TLB. */
227        virtual void startAddrTranslation() = 0;
228
229        /** Get the next packet to issue for this request.  For split
230         *  transfers, it will be necessary to step through the available
231         *  packets by calling do { getHeadPacket ; stepToNextPacket } while
232         *  (!sentAllPackets) and by retiring response using retireResponse */
233        virtual PacketPtr getHeadPacket() = 0;
234
235        /** Step to the next packet for the next call to getHeadPacket */
236        virtual void stepToNextPacket() = 0;
237
238        /** Have all packets been sent? */
239        virtual bool sentAllPackets() = 0;
240
241        /** True if this request has any issued packets in the memory
242         *  system and so can't be interrupted until it gets responses */
243        virtual bool hasPacketsInMemSystem() = 0;
244
245        /** Retire a response packet into the LSQRequest packet possibly
246         *  completing this transfer */
247        virtual void retireResponse(PacketPtr packet_) = 0;
248
249        /** Is this a request a barrier? */
250        virtual bool isBarrier();
251
252        /** This request, once processed by the requests/transfers
253         *  queues, will need to go to the store buffer */
254        bool needsToBeSentToStoreBuffer();
255
256        /** Set state and output trace output */
257        void setState(LSQRequestState new_state);
258
259        /** Has this request been completed.  This includes *all* reasons
260         *  for completion: successful transfers, faults, skipped because
261         *  of preceding faults */
262        bool isComplete() const;
263
264        /** MinorTrace report interface */
265        void reportData(std::ostream &os) const;
266    };
267
268    typedef LSQRequest *LSQRequestPtr;
269
270    friend std::ostream & operator <<(std::ostream &os,
271        AddrRangeCoverage state);
272
273    friend std::ostream & operator <<(std::ostream &os,
274        LSQRequest::LSQRequestState state);
275
276  protected:
277    /** Special request types that don't actually issue memory requests */
278    class SpecialDataRequest : public LSQRequest
279    {
280      protected:
281        /** TLB interace */
282        void finish(const Fault &fault_, const RequestPtr &request_,
283                    ThreadContext *tc, BaseTLB::Mode mode)
284        { }
285
286      public:
287        /** Send single translation request */
288        void startAddrTranslation() { }
289
290        /** Get the head packet as counted by numIssuedFragments */
291        PacketPtr getHeadPacket()
292        { fatal("No packets in a SpecialDataRequest"); }
293
294        /** Step on numIssuedFragments */
295        void stepToNextPacket() { }
296
297        /** Has no packets to send */
298        bool sentAllPackets() { return true; }
299
300        /** Never sends any requests */
301        bool hasPacketsInMemSystem() { return false; }
302
303        /** Keep the given packet as the response packet
304         *  LSQRequest::packet */
305        void retireResponse(PacketPtr packet_) { }
306
307      public:
308        SpecialDataRequest(LSQ &port_, MinorDynInstPtr inst_) :
309            /* Say this is a load, not actually relevant */
310            LSQRequest(port_, inst_, true, NULL, 0)
311        { }
312    };
313
314    /** FailedDataRequest represents requests from instructions that
315     *  failed their predicates but need to ride the requests/transfers
316     *  queues to maintain trace ordering */
317    class FailedDataRequest : public SpecialDataRequest
318    {
319      public:
320        FailedDataRequest(LSQ &port_, MinorDynInstPtr inst_) :
321            SpecialDataRequest(port_, inst_)
322        { state = Failed; }
323    };
324
325    /** Request for doing barrier accounting in the store buffer.  Not
326     *  for use outside that unit */
327    class BarrierDataRequest : public SpecialDataRequest
328    {
329      public:
330        bool isBarrier() { return true; }
331
332      public:
333        BarrierDataRequest(LSQ &port_, MinorDynInstPtr inst_) :
334            SpecialDataRequest(port_, inst_)
335        { state = Complete; }
336    };
337
338    /** SingleDataRequest is used for requests that don't fragment */
339    class SingleDataRequest : public LSQRequest
340    {
341      protected:
342        /** TLB interace */
343        void finish(const Fault &fault_, const RequestPtr &request_,
344                    ThreadContext *tc, BaseTLB::Mode mode);
345
346        /** Has my only packet been sent to the memory system but has not
347         *  yet been responded to */
348        bool packetInFlight;
349
350        /** Has the packet been at least sent to the memory system? */
351        bool packetSent;
352
353      public:
354        /** Send single translation request */
355        void startAddrTranslation();
356
357        /** Get the head packet as counted by numIssuedFragments */
358        PacketPtr getHeadPacket() { return packet; }
359
360        /** Remember that the packet has been sent */
361        void stepToNextPacket() { packetInFlight = true; packetSent = true; }
362
363        /** Has packet been sent */
364        bool hasPacketsInMemSystem() { return packetInFlight; }
365
366        /** packetInFlight can become false again, so need to check
367         *  packetSent */
368        bool sentAllPackets() { return packetSent; }
369
370        /** Keep the given packet as the response packet
371         *  LSQRequest::packet */
372        void retireResponse(PacketPtr packet_);
373
374      public:
375        SingleDataRequest(LSQ &port_, MinorDynInstPtr inst_,
376            bool isLoad_, PacketDataPtr data_ = NULL, uint64_t *res_ = NULL) :
377            LSQRequest(port_, inst_, isLoad_, data_, res_),
378            packetInFlight(false),
379            packetSent(false)
380        { }
381    };
382
383    class SplitDataRequest : public LSQRequest
384    {
385      protected:
386        /** Event to step between translations */
387        EventFunctionWrapper translationEvent;
388      protected:
389        /** Number of fragments this request is split into */
390        unsigned int numFragments;
391
392        /** Number of fragments in the address translation mechanism */
393        unsigned int numInTranslationFragments;
394
395        /** Number of fragments that have completed address translation,
396         *  (numTranslatedFragments + numInTranslationFragments) <=
397         *  numFragments.  When numTranslatedFramgents == numFragments,
398         *  translation is complete */
399        unsigned int numTranslatedFragments;
400
401        /** Number of fragments already issued (<= numFragments) */
402        unsigned int numIssuedFragments;
403
404        /** Number of fragments retired back to this request */
405        unsigned int numRetiredFragments;
406
407        /** Fragment Requests corresponding to the address ranges of
408         *  each fragment */
409        std::vector<RequestPtr> fragmentRequests;
410
411        /** Packets matching fragmentRequests to issue fragments to memory */
412        std::vector<Packet *> fragmentPackets;
413
414      protected:
415        /** TLB response interface */
416        void finish(const Fault &fault_, const RequestPtr &request_,
417                    ThreadContext *tc, BaseTLB::Mode mode);
418
419      public:
420        SplitDataRequest(LSQ &port_, MinorDynInstPtr inst_,
421            bool isLoad_, PacketDataPtr data_ = NULL,
422            uint64_t *res_ = NULL);
423
424        ~SplitDataRequest();
425
426      public:
427        /** Make all the Requests for this transfer's fragments so that those
428         *  requests can be sent for address translation */
429        void makeFragmentRequests();
430
431        /** Make the packets to go with the requests so they can be sent to
432         *  the memory system */
433        void makeFragmentPackets();
434
435        /** Start a loop of do { sendNextFragmentToTranslation ;
436         *  translateTiming ; finish } while (numTranslatedFragments !=
437         *  numFragments) to complete all this requests' fragments' address
438         *  translations */
439        void startAddrTranslation();
440
441        /** Get the head packet as counted by numIssuedFragments */
442        PacketPtr getHeadPacket();
443
444        /** Step on numIssuedFragments */
445        void stepToNextPacket();
446
447        bool hasPacketsInMemSystem()
448        { return numIssuedFragments != numRetiredFragments; }
449
450        /** Have we stepped past the end of fragmentPackets? */
451        bool sentAllPackets()
452        { return numIssuedFragments == numTranslatedFragments; }
453
454        /** For loads, paste the response data into the main
455         *  response packet */
456        void retireResponse(PacketPtr packet_);
457
458        /** Part of the address translation loop, see startAddTranslation */
459        void sendNextFragmentToTranslation();
460    };
461
462    /** Store buffer.  This contains stores which have been committed
463     *  but whose memory transfers have not yet been issued. Load data
464     *  can be forwarded out of the store buffer */
465    class StoreBuffer : public Named
466    {
467      public:
468        /** My owner */
469        LSQ &lsq;
470
471        /** Number of slots, this is a bound on the size of slots */
472        const unsigned int numSlots;
473
474        /** Maximum number of stores that can be issued per cycle */
475        const unsigned int storeLimitPerCycle;
476
477      public:
478        /** Queue of store requests on their way to memory */
479        std::deque<LSQRequestPtr> slots;
480
481        /** Number of occupied slots which have not yet issued a
482         *  memory access */
483        unsigned int numUnissuedAccesses;
484
485      public:
486        StoreBuffer(std::string name_, LSQ &lsq_,
487            unsigned int store_buffer_size,
488            unsigned int store_limit_per_cycle);
489
490      public:
491        /** Can a new request be inserted into the queue? */
492        bool canInsert() const;
493
494        /** Delete the given request and free the slot it occupied */
495        void deleteRequest(LSQRequestPtr request);
496
497        /** Insert a request at the back of the queue */
498        void insert(LSQRequestPtr request);
499
500        /** Look for a store which satisfies the given load.  Returns an
501         *  indication whether the forwarding request can be wholly,
502         *  partly or not all all satisfied.  If the request can be
503         *  wholly satisfied, the store buffer slot number which can be used
504         *  is returned in found_slot */
505        AddrRangeCoverage canForwardDataToLoad(LSQRequestPtr request,
506            unsigned int &found_slot);
507
508        /** Fill the given packet with appropriate date from slot
509         *  slot_number */
510        void forwardStoreData(LSQRequestPtr load, unsigned int slot_number);
511
512        /** Number of stores in the store buffer which have not been
513         *  completely issued to the memory system */
514        unsigned int numUnissuedStores() { return numUnissuedAccesses; }
515
516        /** Count a store being issued to memory by decrementing
517         *  numUnissuedAccesses.  Does not count barrier requests as they
518         *  will be handles as barriers are cleared from the buffer */
519        void countIssuedStore(LSQRequestPtr request);
520
521        /** Drained if there is absolutely nothing left in the buffer */
522        bool isDrained() const { return slots.empty(); }
523
524        /** Try to issue more stores to memory */
525        void step();
526
527        /** Report queue contents for MinorTrace */
528        void minorTrace() const;
529    };
530
531  protected:
532    /** Most recent execSeqNum of a memory barrier instruction or
533     *  0 if there are no in-flight barriers.  Useful as a
534     *  dependency for early-issued memory operations */
535    std::vector<InstSeqNum> lastMemBarrier;
536
537  public:
538    /** Retry state of last issued memory transfer */
539    MemoryState state;
540
541    /** Maximum number of in-flight accesses issued to the memory system */
542    const unsigned int inMemorySystemLimit;
543
544    /** Memory system access width (and snap) in bytes */
545    const unsigned int lineWidth;
546
547  public:
548    /** The LSQ consists of three queues: requests, transfers and the
549     *  store buffer storeBuffer. */
550
551    typedef Queue<LSQRequestPtr,
552        ReportTraitsPtrAdaptor<LSQRequestPtr>,
553        NoBubbleTraits<LSQRequestPtr> >
554        LSQQueue;
555
556    /** requests contains LSQRequests which have been issued to the TLB by
557     *  calling ExecContext::readMem/writeMem (which in turn calls
558     *  LSQ::pushRequest and LSQRequest::startAddrTranslation).  Once they
559     *  have a physical address, requests at the head of requests can be
560     *  issued to the memory system.  At this stage, it cannot be clear that
561     *  memory accesses *must* happen (that there are no preceding faults or
562     *  changes of flow of control) and so only cacheable reads are issued
563     *  to memory.
564     *  Cacheable stores are not issued at all (and just pass through
565     *  'transfers' in order) and all other transfers are stalled in requests
566     *  until their corresponding instructions are at the head of the
567     *  inMemInsts instruction queue and have the right streamSeqNum. */
568    LSQQueue requests;
569
570    /** Once issued to memory (or, for stores, just had their
571     *  state changed to StoreToStoreBuffer) LSQRequests pass through
572     *  transfers waiting for memory responses.  At the head of transfers,
573     *  Execute::commitInst can pick up the memory response for a request
574     *  using LSQ::findResponse.  Responses to be committed can then
575     *  have ExecContext::completeAcc on them.  Stores can then be pushed
576     *  into the store buffer.  All other transfers will then be complete. */
577    LSQQueue transfers;
578
579    /* The store buffer contains committed cacheable stores on
580     * their way to memory decoupled from subsequence instruction execution.
581     * Before trying to issue a cacheable read from 'requests' to memory,
582     * the store buffer is checked to see if a previous store contains the
583     * needed data (StoreBuffer::canForwardDataToLoad) which can be
584     * forwarded in lieu of a memory access.  If there are outstanding
585     * stores in the transfers queue, they must be promoted to the store
586     * buffer (and so be commited) before they can be correctly checked
587     * for forwarding. */
588    StoreBuffer storeBuffer;
589
590  protected:
591    /** Count of the number of mem. accesses which have left the
592     *  requests queue and are in the 'wild' in the memory system and who
593     *  *must not* be interrupted as they are not normal cacheable
594     *  accesses.  This is a count of the number of in-flight requests
595     *  with issuedToMemory set who have visited tryToSendRequest at least
596     *  once */
597    unsigned int numAccessesInMemorySystem;
598
599    /** Number of requests in the DTLB in the requests queue */
600    unsigned int numAccessesInDTLB;
601
602    /** The number of stores in the transfers queue.  Useful when
603     *  testing if the store buffer contains all the forwardable stores */
604    unsigned int numStoresInTransfers;
605
606    /** The number of accesses which have been issued to the memory
607     *  system but have not been committed/discarded *excluding*
608     *  cacheable normal loads which don't need to be tracked */
609    unsigned int numAccessesIssuedToMemory;
610
611    /** The request (from either requests or the store buffer) which is
612     *  currently waiting have its memory access retried */
613    LSQRequestPtr retryRequest;
614
615    /** Address Mask for a cache block (e.g. ~(cache_block_size-1)) */
616    Addr cacheBlockMask;
617
618  protected:
619    /** Try and issue a memory access for a translated request at the
620     *  head of the requests queue.  Also tries to move the request
621     *  between queues */
622    void tryToSendToTransfers(LSQRequestPtr request);
623
624    /** Try to send (or resend) a memory request's next/only packet to
625     *  the memory system.  Returns true if the request was successfully
626     *  sent to memory (and was also the last packet in a transfer) */
627    bool tryToSend(LSQRequestPtr request);
628
629    /** Clear a barrier (if it's the last one marked up in lastMemBarrier) */
630    void clearMemBarrier(MinorDynInstPtr inst);
631
632    /** Move a request between queues */
633    void moveFromRequestsToTransfers(LSQRequestPtr request);
634
635    /** Can a request be sent to the memory system */
636    bool canSendToMemorySystem();
637
638    /** Snoop other threads monitors on memory system accesses */
639    void threadSnoop(LSQRequestPtr request);
640
641  public:
642    LSQ(std::string name_, std::string dcache_port_name_,
643        MinorCPU &cpu_, Execute &execute_,
644        unsigned int max_accesses_in_memory_system, unsigned int line_width,
645        unsigned int requests_queue_size, unsigned int transfers_queue_size,
646        unsigned int store_buffer_size,
647        unsigned int store_buffer_cycle_store_limit);
648
649    virtual ~LSQ();
650
651  public:
652    /** Step checks the queues to see if their are issuable transfers
653     *  which were not otherwise picked up by tests at the end of other
654     *  events.
655     *
656     *  Steppable actions include deferred actions which couldn't be
657     *  cascaded on the end of a memory response/TLB response event
658     *  because of resource congestion. */
659    void step();
660
661    /** Is their space in the request queue to be able to push a request by
662     *  issuing an isMemRef instruction */
663    bool canRequest() { return requests.unreservedRemainingSpace() != 0; }
664
665    /** Returns a response if it's at the head of the transfers queue and
666     *  it's either complete or can be sent on to the store buffer.  After
667     *  calling, the request still remains on the transfer queue until
668     *  popResponse is called */
669    LSQRequestPtr findResponse(MinorDynInstPtr inst);
670
671    /** Sanity check and pop the head response */
672    void popResponse(LSQRequestPtr response);
673
674    /** Must check this before trying to insert into the store buffer */
675    bool canPushIntoStoreBuffer() const { return storeBuffer.canInsert(); }
676
677    /** A store has been committed, please move it to the store buffer */
678    void sendStoreToStoreBuffer(LSQRequestPtr request);
679
680    /** Are there any accesses other than normal cached loads in the
681     *  memory system or having received responses which need to be
682     *  handled for their instruction's to be completed */
683    bool accessesInFlight() const
684    { return numAccessesIssuedToMemory != 0; }
685
686    /** A memory barrier instruction has been issued, remember its
687     *  execSeqNum that we can avoid issuing memory ops until it is
688     *  committed */
689    void issuedMemBarrierInst(MinorDynInstPtr inst);
690
691    /** Get the execSeqNum of the last issued memory barrier */
692    InstSeqNum getLastMemBarrier(ThreadID thread_id) const
693    { return lastMemBarrier[thread_id]; }
694
695    /** Is there nothing left in the LSQ */
696    bool isDrained();
697
698    /** May need to be ticked next cycle as one of the queues contains
699     *  an actionable transfers or address translation */
700    bool needsToTick();
701
702    /** Complete a barrier instruction.  Where committed, makes a
703     *  BarrierDataRequest and pushed it into the store buffer */
704    void completeMemBarrierInst(MinorDynInstPtr inst,
705        bool committed);
706
707    /** Single interface for readMem/writeMem/amoMem to issue requests into
708     *  the LSQ */
709    Fault pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data,
710                      unsigned int size, Addr addr, Request::Flags flags,
711                      uint64_t *res, AtomicOpFunctorPtr amo_op,
712                      const std::vector<bool>& byteEnable =
713                          std::vector<bool>());
714
715    /** Push a predicate failed-representing request into the queues just
716     *  to maintain commit order */
717    void pushFailedRequest(MinorDynInstPtr inst);
718
719    /** Memory interface */
720    bool recvTimingResp(PacketPtr pkt);
721    void recvReqRetry();
722    void recvTimingSnoopReq(PacketPtr pkt);
723
724    /** Return the raw-bindable port */
725    MinorCPU::MinorCPUPort &getDcachePort() { return dcachePort; }
726
727    void minorTrace() const;
728};
729
730/** Make a suitable packet for the given request.  If the request is a store,
731 *  data will be the payload data.  If sender_state is NULL, it won't be
732 *  pushed into the packet as senderState */
733PacketPtr makePacketForRequest(const RequestPtr &request, bool isLoad,
734    Packet::SenderState *sender_state = NULL, PacketDataPtr data = NULL);
735}
736
737#endif /* __CPU_MINOR_NEW_LSQ_HH__ */
738