1/*
2 * Copyright (c) 2012-2013,2017-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 *          Steve Reinhardt
43 *          Ali Saidi
44 */
45
46/**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52#ifndef __MEM_REQUEST_HH__
53#define __MEM_REQUEST_HH__
54
55#include <cassert>
56#include <climits>
57
58#include "base/flags.hh"
59#include "base/logging.hh"
60#include "base/types.hh"
61#include "cpu/inst_seq.hh"
62#include "sim/core.hh"
63
64/**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72namespace ContextSwitchTaskId {
73    enum TaskId {
74        MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75        Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76        DMA = 1023, /* Mostly Table Walker */
77        Unknown = 1024,
78        NumTaskId
79    };
80}
81
82class Request;
83
84typedef std::shared_ptr<Request> RequestPtr;
85typedef uint16_t MasterID;
86
87class Request
88{
89  public:
90    typedef uint64_t FlagsType;
91    typedef uint8_t ArchFlagsType;
92    typedef ::Flags<FlagsType> Flags;
93
94    enum : FlagsType {
95        /**
96         * Architecture specific flags.
97         *
98         * These bits int the flag field are reserved for
99         * architecture-specific code. For example, SPARC uses them to
100         * represent ASIs.
101         */
102        ARCH_BITS                   = 0x000000FF,
103        /** The request was an instruction fetch. */
104        INST_FETCH                  = 0x00000100,
105        /** The virtual address is also the physical address. */
106        PHYSICAL                    = 0x00000200,
107        /**
108         * The request is to an uncacheable address.
109         *
110         * @note Uncacheable accesses may be reordered by CPU models. The
111         * STRICT_ORDER flag should be set if such reordering is
112         * undesirable.
113         */
114        UNCACHEABLE                = 0x00000400,
115        /**
116         * The request is required to be strictly ordered by <i>CPU
117         * models</i> and is non-speculative.
118         *
119         * A strictly ordered request is guaranteed to never be
120         * re-ordered or executed speculatively by a CPU model. The
121         * memory system may still reorder requests in caches unless
122         * the UNCACHEABLE flag is set as well.
123         */
124        STRICT_ORDER                = 0x00000800,
125        /** This request is to a memory mapped register. */
126        MMAPPED_IPR                 = 0x00002000,
127        /** This request is made in privileged mode. */
128        PRIVILEGED                  = 0x00008000,
129
130        /**
131         * This is a write that is targeted and zeroing an entire
132         * cache block.  There is no need for a read/modify/write
133         */
134        CACHE_BLOCK_ZERO            = 0x00010000,
135
136        /** The request should not cause a memory access. */
137        NO_ACCESS                   = 0x00080000,
138        /**
139         * This request will lock or unlock the accessed memory. When
140         * used with a load, the access locks the particular chunk of
141         * memory. When used with a store, it unlocks. The rule is
142         * that locked accesses have to be made up of a locked load,
143         * some operation on the data, and then a locked store.
144         */
145        LOCKED_RMW                  = 0x00100000,
146        /** The request is a Load locked/store conditional. */
147        LLSC                        = 0x00200000,
148        /** This request is for a memory swap. */
149        MEM_SWAP                    = 0x00400000,
150        MEM_SWAP_COND               = 0x00800000,
151
152        /** The request is a prefetch. */
153        PREFETCH                    = 0x01000000,
154        /** The request should be prefetched into the exclusive state. */
155        PF_EXCLUSIVE                = 0x02000000,
156        /** The request should be marked as LRU. */
157        EVICT_NEXT                  = 0x04000000,
158        /** The request should be marked with ACQUIRE. */
159        ACQUIRE                     = 0x00020000,
160        /** The request should be marked with RELEASE. */
161        RELEASE                     = 0x00040000,
162
163        /** The request is an atomic that returns data. */
164        ATOMIC_RETURN_OP            = 0x40000000,
165        /** The request is an atomic that does not return data. */
166        ATOMIC_NO_RETURN_OP         = 0x80000000,
167
168        /** The request should be marked with KERNEL.
169          * Used to indicate the synchronization associated with a GPU kernel
170          * launch or completion.
171          */
172        KERNEL                      = 0x00001000,
173
174        /**
175         * The request should be handled by the generic IPR code (only
176         * valid together with MMAPPED_IPR)
177         */
178        GENERIC_IPR                 = 0x08000000,
179
180        /** The request targets the secure memory space. */
181        SECURE                      = 0x10000000,
182        /** The request is a page table walk */
183        PT_WALK                     = 0x20000000,
184
185        /** The request invalidates a memory location */
186        INVALIDATE                  = 0x0000000100000000,
187        /** The request cleans a memory location */
188        CLEAN                       = 0x0000000200000000,
189
190        /** The request targets the point of unification */
191        DST_POU                     = 0x0000001000000000,
192
193        /** The request targets the point of coherence */
194        DST_POC                     = 0x0000002000000000,
195
196        /** Bits to define the destination of a request */
197        DST_BITS                    = 0x0000003000000000,
198
199        /**
200         * These flags are *not* cleared when a Request object is
201         * reused (assigned a new address).
202         */
203        STICKY_FLAGS = INST_FETCH
204    };
205    static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
206        CLEAN | INVALIDATE;
207
208    /** Master Ids that are statically allocated
209     * @{*/
210    enum : MasterID {
211        /** This master id is used for writeback requests by the caches */
212        wbMasterId = 0,
213        /**
214         * This master id is used for functional requests that
215         * don't come from a particular device
216         */
217        funcMasterId = 1,
218        /** This master id is used for message signaled interrupts */
219        intMasterId = 2,
220        /**
221         * Invalid master id for assertion checking only. It is
222         * invalid behavior to ever send this id as part of a request.
223         */
224        invldMasterId = std::numeric_limits<MasterID>::max()
225    };
226    /** @} */
227
228    typedef uint32_t MemSpaceConfigFlagsType;
229    typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
230
231    enum : MemSpaceConfigFlagsType {
232        /** Has a synchronization scope been set? */
233        SCOPE_VALID            = 0x00000001,
234        /** Access has Wavefront scope visibility */
235        WAVEFRONT_SCOPE        = 0x00000002,
236        /** Access has Workgroup scope visibility */
237        WORKGROUP_SCOPE        = 0x00000004,
238        /** Access has Device (e.g., GPU) scope visibility */
239        DEVICE_SCOPE           = 0x00000008,
240        /** Access has System (e.g., CPU + GPU) scope visibility */
241        SYSTEM_SCOPE           = 0x00000010,
242
243        /** Global Segment */
244        GLOBAL_SEGMENT         = 0x00000020,
245        /** Group Segment */
246        GROUP_SEGMENT          = 0x00000040,
247        /** Private Segment */
248        PRIVATE_SEGMENT        = 0x00000080,
249        /** Kergarg Segment */
250        KERNARG_SEGMENT        = 0x00000100,
251        /** Readonly Segment */
252        READONLY_SEGMENT       = 0x00000200,
253        /** Spill Segment */
254        SPILL_SEGMENT          = 0x00000400,
255        /** Arg Segment */
256        ARG_SEGMENT            = 0x00000800,
257    };
258
259  private:
260    typedef uint16_t PrivateFlagsType;
261    typedef ::Flags<PrivateFlagsType> PrivateFlags;
262
263    enum : PrivateFlagsType {
264        /** Whether or not the size is valid. */
265        VALID_SIZE           = 0x00000001,
266        /** Whether or not paddr is valid (has been written yet). */
267        VALID_PADDR          = 0x00000002,
268        /** Whether or not the vaddr & asid are valid. */
269        VALID_VADDR          = 0x00000004,
270        /** Whether or not the instruction sequence number is valid. */
271        VALID_INST_SEQ_NUM   = 0x00000008,
272        /** Whether or not the pc is valid. */
273        VALID_PC             = 0x00000010,
274        /** Whether or not the context ID is valid. */
275        VALID_CONTEXT_ID     = 0x00000020,
276        /** Whether or not the sc result is valid. */
277        VALID_EXTRA_DATA     = 0x00000080,
278        /** Whether or not the stream ID and substream ID is valid. */
279        VALID_STREAM_ID      = 0x00000100,
280        VALID_SUBSTREAM_ID   = 0x00000200,
281        /**
282         * These flags are *not* cleared when a Request object is reused
283         * (assigned a new address).
284         */
285        STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
286    };
287
288  private:
289
290    /**
291     * Set up a physical (e.g. device) request in a previously
292     * allocated Request object.
293     */
294    void
295    setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
296    {
297        _paddr = paddr;
298        _size = size;
299        _time = time;
300        _masterId = mid;
301        _flags.clear(~STICKY_FLAGS);
302        _flags.set(flags);
303        privateFlags.clear(~STICKY_PRIVATE_FLAGS);
304        privateFlags.set(VALID_PADDR|VALID_SIZE);
305        depth = 0;
306        accessDelta = 0;
307        //translateDelta = 0;
308    }
309
310    /**
311     * The physical address of the request. Valid only if validPaddr
312     * is set.
313     */
314    Addr _paddr;
315
316    /**
317     * The size of the request. This field must be set when vaddr or
318     * paddr is written via setVirt() or setPhys(), so it is always
319     * valid as long as one of the address fields is valid.
320     */
321    unsigned _size;
322
323    /** Byte-enable mask for writes. */
324    std::vector<bool> _byteEnable;
325
326    /** The requestor ID which is unique in the system for all ports
327     * that are capable of issuing a transaction
328     */
329    MasterID _masterId;
330
331    /** Flag structure for the request. */
332    Flags _flags;
333
334    /** Memory space configuraiton flag structure for the request. */
335    MemSpaceConfigFlags _memSpaceConfigFlags;
336
337    /** Private flags for field validity checking. */
338    PrivateFlags privateFlags;
339
340    /**
341     * The time this request was started. Used to calculate
342     * latencies. This field is set to curTick() any time paddr or vaddr
343     * is written.
344     */
345    Tick _time;
346
347    /**
348     * The task id associated with this request
349     */
350    uint32_t _taskId;
351
352    union {
353        struct {
354            /**
355             * The stream ID uniquely identifies a device behind the
356             * SMMU/IOMMU Each transaction arriving at the SMMU/IOMMU is
357             * associated with exactly one stream ID.
358             */
359            uint32_t  _streamId;
360
361            /**
362             * The substream ID identifies an "execution context" within a
363             * device behind an SMMU/IOMMU. It's intended to map 1-to-1 to
364             * PCIe PASID (Process Address Space ID). The presence of a
365             * substream ID is optional.
366             */
367            uint32_t _substreamId;
368        };
369
370        /** The address space ID. */
371        uint64_t _asid;
372    };
373
374    /** The virtual address of the request. */
375    Addr _vaddr;
376
377    /**
378     * Extra data for the request, such as the return value of
379     * store conditional or the compare value for a CAS. */
380    uint64_t _extraData;
381
382    /** The context ID (for statistics, locks, and wakeups). */
383    ContextID _contextId;
384
385    /** program counter of initiating access; for tracing/debugging */
386    Addr _pc;
387
388    /** Sequence number of the instruction that creates the request */
389    InstSeqNum _reqInstSeqNum;
390
391    /** A pointer to an atomic operation */
392    AtomicOpFunctorPtr atomicOpFunctor;
393
394  public:
395
396    /**
397     * Minimal constructor. No fields are initialized. (Note that
398     *  _flags and privateFlags are cleared by Flags default
399     *  constructor.)
400     */
401    Request()
402        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
403          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
404          _extraData(0), _contextId(0), _pc(0),
405          _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
406          accessDelta(0), depth(0)
407    {}
408
409    Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
410            InstSeqNum seq_num, ContextID cid)
411        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
412          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
413          _extraData(0), _contextId(0), _pc(0),
414          _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
415          accessDelta(0), depth(0)
416    {
417        setPhys(paddr, size, flags, mid, curTick());
418        setContext(cid);
419        privateFlags.set(VALID_INST_SEQ_NUM);
420    }
421
422    /**
423     * Constructor for physical (e.g. device) requests.  Initializes
424     * just physical address, size, flags, and timestamp (to curTick()).
425     * These fields are adequate to perform a request.
426     */
427    Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
428        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
429          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
430          _extraData(0), _contextId(0), _pc(0),
431          _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
432          accessDelta(0), depth(0)
433    {
434        setPhys(paddr, size, flags, mid, curTick());
435    }
436
437    Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
438        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
439          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
440          _extraData(0), _contextId(0), _pc(0),
441          _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
442          accessDelta(0), depth(0)
443    {
444        setPhys(paddr, size, flags, mid, time);
445    }
446
447    Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
448            Addr pc)
449        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
450          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
451          _extraData(0), _contextId(0), _pc(pc),
452          _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
453          accessDelta(0), depth(0)
454    {
455        setPhys(paddr, size, flags, mid, time);
456        privateFlags.set(VALID_PC);
457    }
458
459    Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
460            MasterID mid, Addr pc, ContextID cid)
461        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
462          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
463          _extraData(0), _contextId(0), _pc(0),
464          _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
465          accessDelta(0), depth(0)
466    {
467        setVirt(asid, vaddr, size, flags, mid, pc);
468        setContext(cid);
469    }
470
471    Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
472            MasterID mid, Addr pc, ContextID cid,
473            AtomicOpFunctorPtr atomic_op)
474    {
475        setVirt(asid, vaddr, size, flags, mid, pc, std::move(atomic_op));
476        setContext(cid);
477    }
478
479    Request(const Request& other)
480        : _paddr(other._paddr), _size(other._size),
481          _masterId(other._masterId),
482          _flags(other._flags),
483          _memSpaceConfigFlags(other._memSpaceConfigFlags),
484          privateFlags(other.privateFlags),
485          _time(other._time),
486          _taskId(other._taskId), _asid(other._asid), _vaddr(other._vaddr),
487          _extraData(other._extraData), _contextId(other._contextId),
488          _pc(other._pc), _reqInstSeqNum(other._reqInstSeqNum),
489          translateDelta(other.translateDelta),
490          accessDelta(other.accessDelta), depth(other.depth)
491    {
492
493        atomicOpFunctor.reset(other.atomicOpFunctor ?
494                                other.atomicOpFunctor->clone() : nullptr);
495    }
496
497    ~Request() {}
498
499    /**
500     * Set up Context numbers.
501     */
502    void
503    setContext(ContextID context_id)
504    {
505        _contextId = context_id;
506        privateFlags.set(VALID_CONTEXT_ID);
507    }
508
509    void
510    setStreamId(uint32_t sid)
511    {
512        _streamId = sid;
513        privateFlags.set(VALID_STREAM_ID);
514    }
515
516    void
517    setSubStreamId(uint32_t ssid)
518    {
519        assert(privateFlags.isSet(VALID_STREAM_ID));
520        _substreamId = ssid;
521        privateFlags.set(VALID_SUBSTREAM_ID);
522    }
523
524    /**
525     * Set up a virtual (e.g., CPU) request in a previously
526     * allocated Request object.
527     */
528    void
529    setVirt(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
530            MasterID mid, Addr pc, AtomicOpFunctorPtr amo_op = nullptr)
531    {
532        _asid = asid;
533        _vaddr = vaddr;
534        _size = size;
535        _masterId = mid;
536        _pc = pc;
537        _time = curTick();
538
539        _flags.clear(~STICKY_FLAGS);
540        _flags.set(flags);
541        privateFlags.clear(~STICKY_PRIVATE_FLAGS);
542        privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
543        depth = 0;
544        accessDelta = 0;
545        translateDelta = 0;
546        atomicOpFunctor = std::move(amo_op);
547    }
548
549    /**
550     * Set just the physical address.  This usually used to record the
551     * result of a translation. However, when using virtualized CPUs
552     * setPhys() is sometimes called to finalize a physical address
553     * without a virtual address, so we can't check if the virtual
554     * address is valid.
555     */
556    void
557    setPaddr(Addr paddr)
558    {
559        _paddr = paddr;
560        privateFlags.set(VALID_PADDR);
561    }
562
563    /**
564     * Generate two requests as if this request had been split into two
565     * pieces. The original request can't have been translated already.
566     */
567    // TODO: this function is still required by TimingSimpleCPU - should be
568    // removed once TimingSimpleCPU will support arbitrarily long multi-line
569    // mem. accesses
570    void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
571    {
572        assert(privateFlags.isSet(VALID_VADDR));
573        assert(privateFlags.noneSet(VALID_PADDR));
574        assert(split_addr > _vaddr && split_addr < _vaddr + _size);
575        req1 = std::make_shared<Request>(*this);
576        req2 = std::make_shared<Request>(*this);
577        req1->_size = split_addr - _vaddr;
578        req2->_vaddr = split_addr;
579        req2->_size = _size - req1->_size;
580        if (!_byteEnable.empty()) {
581            req1->_byteEnable = std::vector<bool>(
582                _byteEnable.begin(),
583                _byteEnable.begin() + req1->_size);
584            req2->_byteEnable = std::vector<bool>(
585                _byteEnable.begin() + req1->_size,
586                _byteEnable.end());
587        }
588    }
589
590    /**
591     * Accessor for paddr.
592     */
593    bool
594    hasPaddr() const
595    {
596        return privateFlags.isSet(VALID_PADDR);
597    }
598
599    Addr
600    getPaddr() const
601    {
602        assert(privateFlags.isSet(VALID_PADDR));
603        return _paddr;
604    }
605
606    /**
607     * Time for the TLB/table walker to successfully translate this request.
608     */
609    Tick translateDelta;
610
611    /**
612     * Access latency to complete this memory transaction not including
613     * translation time.
614     */
615    Tick accessDelta;
616
617    /**
618     * Level of the cache hierachy where this request was responded to
619     * (e.g. 0 = L1; 1 = L2).
620     */
621    mutable int depth;
622
623    /**
624     *  Accessor for size.
625     */
626    bool
627    hasSize() const
628    {
629        return privateFlags.isSet(VALID_SIZE);
630    }
631
632    unsigned
633    getSize() const
634    {
635        assert(privateFlags.isSet(VALID_SIZE));
636        return _size;
637    }
638
639    const std::vector<bool>&
640    getByteEnable() const
641    {
642        return _byteEnable;
643    }
644
645    void
646    setByteEnable(const std::vector<bool>& be)
647    {
648        assert(be.empty() || be.size() == _size);
649        _byteEnable = be;
650    }
651
652    /** Accessor for time. */
653    Tick
654    time() const
655    {
656        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
657        return _time;
658    }
659
660    /**
661     * Accessor for atomic-op functor.
662     */
663    bool
664    hasAtomicOpFunctor()
665    {
666        return (bool)atomicOpFunctor;
667    }
668
669    AtomicOpFunctor *
670    getAtomicOpFunctor()
671    {
672        assert(atomicOpFunctor);
673        return atomicOpFunctor.get();
674    }
675
676    /** Accessor for flags. */
677    Flags
678    getFlags()
679    {
680        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
681        return _flags;
682    }
683
684    /** Note that unlike other accessors, this function sets *specific
685        flags* (ORs them in); it does not assign its argument to the
686        _flags field.  Thus this method should rightly be called
687        setFlags() and not just flags(). */
688    void
689    setFlags(Flags flags)
690    {
691        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
692        _flags.set(flags);
693    }
694
695    void
696    setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
697    {
698        assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
699        _memSpaceConfigFlags.set(extraFlags);
700    }
701
702    /** Accessor function for vaddr.*/
703    bool
704    hasVaddr() const
705    {
706        return privateFlags.isSet(VALID_VADDR);
707    }
708
709    Addr
710    getVaddr() const
711    {
712        assert(privateFlags.isSet(VALID_VADDR));
713        return _vaddr;
714    }
715
716    /** Accesssor for the requestor id. */
717    MasterID
718    masterId() const
719    {
720        return _masterId;
721    }
722
723    uint32_t
724    taskId() const
725    {
726        return _taskId;
727    }
728
729    void
730    taskId(uint32_t id) {
731        _taskId = id;
732    }
733
734    /** Accessor function for asid.*/
735    uint64_t
736    getAsid() const
737    {
738        assert(privateFlags.isSet(VALID_VADDR));
739        return _asid;
740    }
741
742    /** Accessor function for asid.*/
743    void
744    setAsid(uint64_t asid)
745    {
746        _asid = asid;
747    }
748
749    /** Accessor function for architecture-specific flags.*/
750    ArchFlagsType
751    getArchFlags() const
752    {
753        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
754        return _flags & ARCH_BITS;
755    }
756
757    /** Accessor function to check if sc result is valid. */
758    bool
759    extraDataValid() const
760    {
761        return privateFlags.isSet(VALID_EXTRA_DATA);
762    }
763
764    /** Accessor function for store conditional return value.*/
765    uint64_t
766    getExtraData() const
767    {
768        assert(privateFlags.isSet(VALID_EXTRA_DATA));
769        return _extraData;
770    }
771
772    /** Accessor function for store conditional return value.*/
773    void
774    setExtraData(uint64_t extraData)
775    {
776        _extraData = extraData;
777        privateFlags.set(VALID_EXTRA_DATA);
778    }
779
780    bool
781    hasContextId() const
782    {
783        return privateFlags.isSet(VALID_CONTEXT_ID);
784    }
785
786    /** Accessor function for context ID.*/
787    ContextID
788    contextId() const
789    {
790        assert(privateFlags.isSet(VALID_CONTEXT_ID));
791        return _contextId;
792    }
793
794    uint32_t
795    streamId() const
796    {
797        assert(privateFlags.isSet(VALID_STREAM_ID));
798        return _streamId;
799    }
800
801    bool
802    hasSubstreamId() const
803    {
804        return privateFlags.isSet(VALID_SUBSTREAM_ID);
805    }
806
807    uint32_t
808    substreamId() const
809    {
810        assert(privateFlags.isSet(VALID_SUBSTREAM_ID));
811        return _substreamId;
812    }
813
814    void
815    setPC(Addr pc)
816    {
817        privateFlags.set(VALID_PC);
818        _pc = pc;
819    }
820
821    bool
822    hasPC() const
823    {
824        return privateFlags.isSet(VALID_PC);
825    }
826
827    /** Accessor function for pc.*/
828    Addr
829    getPC() const
830    {
831        assert(privateFlags.isSet(VALID_PC));
832        return _pc;
833    }
834
835    /**
836     * Increment/Get the depth at which this request is responded to.
837     * This currently happens when the request misses in any cache level.
838     */
839    void incAccessDepth() const { depth++; }
840    int getAccessDepth() const { return depth; }
841
842    /**
843     * Set/Get the time taken for this request to be successfully translated.
844     */
845    void setTranslateLatency() { translateDelta = curTick() - _time; }
846    Tick getTranslateLatency() const { return translateDelta; }
847
848    /**
849     * Set/Get the time taken to complete this request's access, not including
850     *  the time to successfully translate the request.
851     */
852    void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
853    Tick getAccessLatency() const { return accessDelta; }
854
855    /**
856     * Accessor for the sequence number of instruction that creates the
857     * request.
858     */
859    bool
860    hasInstSeqNum() const
861    {
862        return privateFlags.isSet(VALID_INST_SEQ_NUM);
863    }
864
865    InstSeqNum
866    getReqInstSeqNum() const
867    {
868        assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
869        return _reqInstSeqNum;
870    }
871
872    void
873    setReqInstSeqNum(const InstSeqNum seq_num)
874    {
875        privateFlags.set(VALID_INST_SEQ_NUM);
876        _reqInstSeqNum = seq_num;
877    }
878
879    /** Accessor functions for flags. Note that these are for testing
880        only; setting flags should be done via setFlags(). */
881    bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
882    bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
883    bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
884    bool isPrefetch() const { return (_flags.isSet(PREFETCH) ||
885                                      _flags.isSet(PF_EXCLUSIVE)); }
886    bool isPrefetchEx() const { return _flags.isSet(PF_EXCLUSIVE); }
887    bool isLLSC() const { return _flags.isSet(LLSC); }
888    bool isPriv() const { return _flags.isSet(PRIVILEGED); }
889    bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
890    bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
891    bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
892    bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
893    bool isSecure() const { return _flags.isSet(SECURE); }
894    bool isPTWalk() const { return _flags.isSet(PT_WALK); }
895    bool isAcquire() const { return _flags.isSet(ACQUIRE); }
896    bool isRelease() const { return _flags.isSet(RELEASE); }
897    bool isKernel() const { return _flags.isSet(KERNEL); }
898    bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
899    bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
900
901    bool
902    isAtomic() const
903    {
904        return _flags.isSet(ATOMIC_RETURN_OP) ||
905               _flags.isSet(ATOMIC_NO_RETURN_OP);
906    }
907
908    /**
909     * Accessor functions for the destination of a memory request. The
910     * destination flag can specify a point of reference for the
911     * operation (e.g. a cache block clean to the the point of
912     * unification). At the moment the destination is only used by the
913     * cache maintenance operations.
914     */
915    bool isToPOU() const { return _flags.isSet(DST_POU); }
916    bool isToPOC() const { return _flags.isSet(DST_POC); }
917    Flags getDest() const { return _flags & DST_BITS; }
918
919    /**
920     * Accessor functions for the memory space configuration flags and used by
921     * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
922     * these are for testing only; setting extraFlags should be done via
923     * setMemSpaceConfigFlags().
924     */
925    bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
926
927    bool
928    isWavefrontScope() const
929    {
930        assert(isScoped());
931        return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
932    }
933
934    bool
935    isWorkgroupScope() const
936    {
937        assert(isScoped());
938        return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
939    }
940
941    bool
942    isDeviceScope() const
943    {
944        assert(isScoped());
945        return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
946    }
947
948    bool
949    isSystemScope() const
950    {
951        assert(isScoped());
952        return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
953    }
954
955    bool
956    isGlobalSegment() const
957    {
958        return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
959               (!isGroupSegment() && !isPrivateSegment() &&
960                !isKernargSegment() && !isReadonlySegment() &&
961                !isSpillSegment() && !isArgSegment());
962    }
963
964    bool
965    isGroupSegment() const
966    {
967        return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
968    }
969
970    bool
971    isPrivateSegment() const
972    {
973        return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
974    }
975
976    bool
977    isKernargSegment() const
978    {
979        return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
980    }
981
982    bool
983    isReadonlySegment() const
984    {
985        return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
986    }
987
988    bool
989    isSpillSegment() const
990    {
991        return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
992    }
993
994    bool
995    isArgSegment() const
996    {
997        return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
998    }
999
1000    /**
1001     * Accessor functions to determine whether this request is part of
1002     * a cache maintenance operation. At the moment three operations
1003     * are supported:
1004
1005     * 1) A cache clean operation updates all copies of a memory
1006     * location to the point of reference,
1007     * 2) A cache invalidate operation invalidates all copies of the
1008     * specified block in the memory above the point of reference,
1009     * 3) A clean and invalidate operation is a combination of the two
1010     * operations.
1011     * @{ */
1012    bool isCacheClean() const { return _flags.isSet(CLEAN); }
1013    bool isCacheInvalidate() const { return _flags.isSet(INVALIDATE); }
1014    bool isCacheMaintenance() const { return _flags.isSet(CLEAN|INVALIDATE); }
1015    /** @} */
1016};
1017
1018#endif // __MEM_REQUEST_HH__
1019