1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Miss Status and Handling Register (MSHR) declaration.
47 */
48
49#ifndef __MEM_CACHE_MSHR_HH__
50#define __MEM_CACHE_MSHR_HH__
51
52#include <cassert>
53#include <iosfwd>
54#include <list>
55#include <string>
56#include <vector>
57
58#include "base/printable.hh"
59#include "base/types.hh"
60#include "mem/cache/queue_entry.hh"
61#include "mem/packet.hh"
62#include "mem/request.hh"
63#include "sim/core.hh"
64
65class BaseCache;
66
67/**
68 * Miss Status and handling Register. This class keeps all the information
69 * needed to handle a cache miss including a list of target requests.
70 * @sa  \ref gem5MemorySystem "gem5 Memory System"
71 */
72class MSHR : public QueueEntry, public Printable
73{
74
75    /**
76     * Consider the queues friends to avoid making everything public.
77     */
78    template<typename Entry>
79    friend class Queue;
80    friend class MSHRQueue;
81
82  private:
83
84    /** Flag set by downstream caches */
85    bool downstreamPending;
86
87    /**
88     * Here we use one flag to track both if:
89     *
90     * 1. We are going to become owner or not, i.e., we will get the
91     * block in an ownership state (Owned or Modified) with BlkDirty
92     * set. This determines whether or not we are going to become the
93     * responder and ordering point for future requests that we snoop.
94     *
95     * 2. We know that we are going to get a writable block, i.e. we
96     * will get the block in writable state (Exclusive or Modified
97     * state) with BlkWritable set. That determines whether additional
98     * targets with needsWritable set will be able to be satisfied, or
99     * if not should be put on the deferred list to possibly wait for
100     * another request that does give us writable access.
101     *
102     * Condition 2 is actually just a shortcut that saves us from
103     * possibly building a deferred target list and calling
104     * promoteWritable() every time we get a writable block. Condition
105     * 1, tracking ownership, is what is important. However, we never
106     * receive ownership without marking the block dirty, and
107     * consequently use pendingModified to track both ownership and
108     * writability rather than having separate pendingDirty and
109     * pendingWritable flags.
110     */
111    bool pendingModified;
112
113    /** Did we snoop an invalidate while waiting for data? */
114    bool postInvalidate;
115
116    /** Did we snoop a read while waiting for data? */
117    bool postDowngrade;
118
119  public:
120
121    /** Track if we sent this as a whole line write or not */
122    bool wasWholeLineWrite;
123
124    /** True if the entry is just a simple forward from an upper level */
125    bool isForward;
126
127    class Target : public QueueEntry::Target {
128      public:
129
130        enum Source {
131            FromCPU,
132            FromSnoop,
133            FromPrefetcher
134        };
135
136        const Source source;  //!< Request from cpu, memory, or prefetcher?
137
138        /**
139         * We use this flag to track whether we have cleared the
140         * downstreamPending flag for the MSHR of the cache above
141         * where this packet originates from and guard noninitial
142         * attempts to clear it.
143         *
144         * The flag markedPending needs to be updated when the
145         * TargetList is in service which can be:
146         * 1) during the Target instantiation if the MSHR is in
147         * service and the target is not deferred,
148         * 2) when the MSHR becomes in service if the target is not
149         * deferred,
150         * 3) or when the TargetList is promoted (deferredTargets ->
151         * targets).
152         */
153        bool markedPending;
154
155        const bool allocOnFill;   //!< Should the response servicing this
156                                  //!< target list allocate in the cache?
157
158        Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
159               Source _source, bool _markedPending, bool alloc_on_fill)
160            : QueueEntry::Target(_pkt, _readyTime, _order), source(_source),
161              markedPending(_markedPending), allocOnFill(alloc_on_fill)
162        {}
163    };
164
165    class TargetList : public std::list<Target> {
166
167      public:
168        bool needsWritable;
169        bool hasUpgrade;
170        /** Set when the response should allocate on fill */
171        bool allocOnFill;
172        /**
173         * Determine whether there was at least one non-snooping
174         * target coming from another cache.
175         */
176        bool hasFromCache;
177
178        TargetList();
179
180        /**
181         * Use the provided packet and the source to update the
182         * flags of this TargetList.
183         *
184         * @param pkt Packet considered for the flag update
185         * @param source Indicates the source of the packet
186         * @param alloc_on_fill Whether the pkt would allocate on a fill
187         */
188        void updateFlags(PacketPtr pkt, Target::Source source,
189                         bool alloc_on_fill);
190
191        /**
192         * Reset state
193         *
194         * @param blk_addr Address of the cache block
195         * @param blk_size Size of the cache block
196         */
197        void init(Addr blk_addr, Addr blk_size) {
198            blkAddr = blk_addr;
199            blkSize = blk_size;
200            writesBitmap.resize(blk_size);
201
202            resetFlags();
203        }
204
205        void resetFlags() {
206            onlyWrites = true;
207            std::fill(writesBitmap.begin(), writesBitmap.end(), false);
208
209            needsWritable = false;
210            hasUpgrade = false;
211            allocOnFill = false;
212            hasFromCache = false;
213        }
214
215        /**
216         * Goes through the list of targets and uses them to populate
217         * the flags of this TargetList. When the function returns the
218         * flags are consistent with the properties of packets in the
219         * list.
220         */
221        void populateFlags();
222
223        /**
224         * Add the specified packet in the TargetList. This function
225         * stores information related to the added packet and updates
226         * accordingly the flags.
227         *
228         * @param pkt Packet considered for adding
229         */
230        void updateWriteFlags(PacketPtr pkt) {
231             const Request::FlagsType noMergeFlags =
232                 Request::UNCACHEABLE |
233                 Request::STRICT_ORDER | Request::MMAPPED_IPR |
234                 Request::PRIVILEGED | Request::LLSC |
235                 Request::MEM_SWAP | Request::MEM_SWAP_COND |
236                 Request::SECURE;
237
238             // if we have already seen writes for the full block stop
239             // here, this might be a full line write followed by
240             // other compatible requests (e.g., reads)
241             if (!isWholeLineWrite()) {
242                 bool can_merge_write = pkt->isWrite() &&
243                     ((pkt->req->getFlags() & noMergeFlags) == 0);
244                 onlyWrites &= can_merge_write;
245                 if (onlyWrites) {
246                     auto offset = pkt->getOffset(blkSize);
247                     auto begin = writesBitmap.begin() + offset;
248                     std::fill(begin, begin + pkt->getSize(), true);
249                 }
250             }
251         }
252
253        /**
254         * Tests if the flags of this TargetList have their default
255         * values.
256         *
257         * @return True if the TargetList are reset, false otherwise.
258         */
259        bool isReset() const {
260            return !needsWritable && !hasUpgrade && !allocOnFill &&
261                !hasFromCache && onlyWrites;
262        }
263
264        /**
265         * Add the specified packet in the TargetList. This function
266         * stores information related to the added packet and updates
267         * accordingly the flags.
268         *
269         * @param pkt Packet considered for adding
270         * @param readTime Tick at which the packet is processed by this cache
271         * @param order A counter giving a unique id to each target
272         * @param source Indicates the source agent of the packet
273         * @param markPending Set for deferred targets or pending MSHRs
274         * @param alloc_on_fill Whether it should allocate on a fill
275         */
276        void add(PacketPtr pkt, Tick readyTime, Counter order,
277                 Target::Source source, bool markPending, bool alloc_on_fill);
278
279        /**
280         * Convert upgrades to the equivalent request if the cache line they
281         * refer to would have been invalid (Upgrade -> ReadEx, SC* -> Fail).
282         * Used to rejig ordering between targets waiting on an MSHR. */
283        void replaceUpgrades();
284
285        void clearDownstreamPending();
286        void clearDownstreamPending(iterator begin, iterator end);
287        bool trySatisfyFunctional(PacketPtr pkt);
288        void print(std::ostream &os, int verbosity,
289                   const std::string &prefix) const;
290
291        /**
292         * Check if this list contains only compatible writes, and if they
293         * span the entire cache line. This is used as part of the
294         * miss-packet creation. Note that new requests may arrive after a
295         * miss-packet has been created, and for the fill we therefore use
296         * the wasWholeLineWrite field.
297         */
298        bool isWholeLineWrite() const
299        {
300            return onlyWrites &&
301                std::all_of(writesBitmap.begin(),
302                            writesBitmap.end(), [](bool i) { return i; });
303        }
304
305      private:
306        /** Address of the cache block for this list of targets. */
307        Addr blkAddr;
308
309        /** Size of the cache block. */
310        Addr blkSize;
311
312        /** Are we only dealing with writes. */
313        bool onlyWrites;
314
315        // NOTE: std::vector<bool> might not meet satisfy the
316        // ForwardIterator requirement and therefore cannot be used
317        // for writesBitmap.
318        /**
319         * Track which bytes are written by requests in this target
320         * list.
321         */
322        std::vector<char> writesBitmap;
323    };
324
325    /** A list of MSHRs. */
326    typedef std::list<MSHR *> List;
327    /** MSHR list iterator. */
328    typedef List::iterator Iterator;
329
330    /** The pending* and post* flags are only valid if inService is
331     *  true.  Using the accessor functions lets us detect if these
332     *  flags are accessed improperly.
333     */
334
335    /** True if we need to get a writable copy of the block. */
336    bool needsWritable() const { return targets.needsWritable; }
337
338    bool isCleaning() const {
339        PacketPtr pkt = targets.front().pkt;
340        return pkt->isClean();
341    }
342
343    bool isPendingModified() const {
344        assert(inService); return pendingModified;
345    }
346
347    bool hasPostInvalidate() const {
348        assert(inService); return postInvalidate;
349    }
350
351    bool hasPostDowngrade() const {
352        assert(inService); return postDowngrade;
353    }
354
355    bool sendPacket(BaseCache &cache) override;
356
357    bool allocOnFill() const {
358        return targets.allocOnFill;
359    }
360
361    /**
362     * Determine if there are non-deferred requests from other caches
363     *
364     * @return true if any of the targets is from another cache
365     */
366    bool hasFromCache() const {
367        return targets.hasFromCache;
368    }
369
370  private:
371    /**
372     * Promotes deferred targets that satisfy a predicate
373     *
374     * Deferred targets are promoted to the target list if they
375     * satisfy a given condition. The operation stops at the first
376     * deferred target that doesn't satisfy the condition.
377     *
378     * @param pred A condition on a Target
379     */
380    void promoteIf(const std::function<bool (Target &)>& pred);
381
382    /**
383     * Pointer to this MSHR on the ready list.
384     * @sa MissQueue, MSHRQueue::readyList
385     */
386    Iterator readyIter;
387
388    /**
389     * Pointer to this MSHR on the allocated list.
390     * @sa MissQueue, MSHRQueue::allocatedList
391     */
392    Iterator allocIter;
393
394    /** List of all requests that match the address */
395    TargetList targets;
396
397    TargetList deferredTargets;
398
399  public:
400    /**
401     * Check if this MSHR contains only compatible writes, and if they
402     * span the entire cache line. This is used as part of the
403     * miss-packet creation. Note that new requests may arrive after a
404     * miss-packet has been created, and for the fill we therefore use
405     * the wasWholeLineWrite field.
406     */
407    bool isWholeLineWrite() const {
408        return targets.isWholeLineWrite();
409    }
410
411    /**
412     * Allocate a miss to this MSHR.
413     * @param blk_addr The address of the block.
414     * @param blk_size The number of bytes to request.
415     * @param pkt The original miss.
416     * @param when_ready When should the MSHR be ready to act upon.
417     * @param _order The logical order of this MSHR
418     * @param alloc_on_fill Should the cache allocate a block on fill
419     */
420    void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
421                  Tick when_ready, Counter _order, bool alloc_on_fill);
422
423    void markInService(bool pending_modified_resp);
424
425    void clearDownstreamPending();
426
427    /**
428     * Mark this MSHR as free.
429     */
430    void deallocate();
431
432    /**
433     * Add a request to the list of targets.
434     * @param target The target.
435     */
436    void allocateTarget(PacketPtr target, Tick when, Counter order,
437                        bool alloc_on_fill);
438    bool handleSnoop(PacketPtr target, Counter order);
439
440    /** A simple constructor. */
441    MSHR();
442
443    /**
444     * Returns the current number of allocated targets.
445     * @return The current number of allocated targets.
446     */
447    int getNumTargets() const
448    { return targets.size() + deferredTargets.size(); }
449
450    /**
451     * Extracts the subset of the targets that can be serviced given a
452     * received response. This function returns the targets list
453     * unless the response is a ReadRespWithInvalidate. The
454     * ReadRespWithInvalidate is only invalidating response that its
455     * invalidation was not expected when the request (a
456     * ReadSharedReq) was sent out. For ReadRespWithInvalidate we can
457     * safely service only the first FromCPU target and all FromSnoop
458     * targets (inform all snoopers that we no longer have the block).
459     *
460     * @param pkt The response from the downstream memory
461     */
462    TargetList extractServiceableTargets(PacketPtr pkt);
463
464    /**
465     * Returns true if there are targets left.
466     * @return true if there are targets
467     */
468    bool hasTargets() const { return !targets.empty(); }
469
470    /**
471     * Returns a reference to the first target.
472     * @return A pointer to the first target.
473     */
474    QueueEntry::Target *getTarget() override
475    {
476        assert(hasTargets());
477        return &targets.front();
478    }
479
480    /**
481     * Pop first target.
482     */
483    void popTarget()
484    {
485        targets.pop_front();
486    }
487
488    bool promoteDeferredTargets();
489
490    /**
491     * Promotes deferred targets that do not require writable
492     *
493     * Move targets from the deferred targets list to the target list
494     * starting from the first deferred target until the first target
495     * that is a cache maintenance operation or needs a writable copy
496     * of the block
497     */
498    void promoteReadable();
499
500    /**
501     * Promotes deferred targets that do not require writable
502     *
503     * Requests in the deferred target list are moved to the target
504     * list up until the first target that is a cache maintenance
505     * operation or needs a writable copy of the block
506     */
507    void promoteWritable();
508
509    bool trySatisfyFunctional(PacketPtr pkt);
510
511    /**
512     * Adds a delay relative to the current tick to the current MSHR
513     * @param delay_ticks the desired delay in ticks
514     */
515    void delay(Tick delay_ticks)
516    {
517        assert(readyTime <= curTick());
518        readyTime = curTick() + delay_ticks;
519    }
520
521    /**
522     * Prints the contents of this MSHR for debugging.
523     */
524    void print(std::ostream &os,
525               int verbosity = 0,
526               const std::string &prefix = "") const override;
527    /**
528     * A no-args wrapper of print(std::ostream...)  meant to be
529     * invoked from DPRINTFs avoiding string overheads in fast mode
530     *
531     * @return string with mshr fields + [deferred]targets
532     */
533    std::string print() const;
534
535    bool matchBlockAddr(const Addr addr, const bool is_secure) const override;
536    bool matchBlockAddr(const PacketPtr pkt) const override;
537    bool conflictAddr(const QueueEntry* entry) const override;
538};
539
540#endif // __MEM_CACHE_MSHR_HH__
541