request.hh revision 11305:78c1e4f5dfc5
1/*
2 * Copyright (c) 2012-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 *          Steve Reinhardt
43 *          Ali Saidi
44 */
45
46/**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52#ifndef __MEM_REQUEST_HH__
53#define __MEM_REQUEST_HH__
54
55#include <cassert>
56#include <climits>
57
58#include "base/flags.hh"
59#include "base/misc.hh"
60#include "base/types.hh"
61#include "cpu/inst_seq.hh"
62#include "sim/core.hh"
63
64/**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72namespace ContextSwitchTaskId {
73    enum TaskId {
74        MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75        Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76        DMA = 1023, /* Mostly Table Walker */
77        Unknown = 1024,
78        NumTaskId
79    };
80}
81
82class Request;
83
84typedef Request* RequestPtr;
85typedef uint16_t MasterID;
86
87class Request
88{
89  public:
90    typedef uint32_t FlagsType;
91    typedef uint8_t ArchFlagsType;
92    typedef ::Flags<FlagsType> Flags;
93
94    enum : FlagsType {
95        /**
96         * Architecture specific flags.
97         *
98         * These bits int the flag field are reserved for
99         * architecture-specific code. For example, SPARC uses them to
100         * represent ASIs.
101         */
102        ARCH_BITS                   = 0x000000FF,
103        /** The request was an instruction fetch. */
104        INST_FETCH                  = 0x00000100,
105        /** The virtual address is also the physical address. */
106        PHYSICAL                    = 0x00000200,
107        /**
108         * The request is to an uncacheable address.
109         *
110         * @note Uncacheable accesses may be reordered by CPU models. The
111         * STRICT_ORDER flag should be set if such reordering is
112         * undesirable.
113         */
114        UNCACHEABLE                = 0x00000400,
115        /**
116         * The request is required to be strictly ordered by <i>CPU
117         * models</i> and is non-speculative.
118         *
119         * A strictly ordered request is guaranteed to never be
120         * re-ordered or executed speculatively by a CPU model. The
121         * memory system may still reorder requests in caches unless
122         * the UNCACHEABLE flag is set as well.
123         */
124        STRICT_ORDER                = 0x00000800,
125        /** This request is to a memory mapped register. */
126        MMAPPED_IPR                 = 0x00002000,
127        /** This request is made in privileged mode. */
128        PRIVILEGED                  = 0x00008000,
129
130        /**
131         * This is a write that is targeted and zeroing an entire
132         * cache block.  There is no need for a read/modify/write
133         */
134        CACHE_BLOCK_ZERO            = 0x00010000,
135
136        /** The request should not cause a memory access. */
137        NO_ACCESS                   = 0x00080000,
138        /**
139         * This request will lock or unlock the accessed memory. When
140         * used with a load, the access locks the particular chunk of
141         * memory. When used with a store, it unlocks. The rule is
142         * that locked accesses have to be made up of a locked load,
143         * some operation on the data, and then a locked store.
144         */
145        LOCKED_RMW                  = 0x00100000,
146        /** The request is a Load locked/store conditional. */
147        LLSC                        = 0x00200000,
148        /** This request is for a memory swap. */
149        MEM_SWAP                    = 0x00400000,
150        MEM_SWAP_COND               = 0x00800000,
151
152        /** The request is a prefetch. */
153        PREFETCH                    = 0x01000000,
154        /** The request should be prefetched into the exclusive state. */
155        PF_EXCLUSIVE                = 0x02000000,
156        /** The request should be marked as LRU. */
157        EVICT_NEXT                  = 0x04000000,
158        /** The request should be marked with ACQUIRE. */
159        ACQUIRE                     = 0x00020000,
160        /** The request should be marked with RELEASE. */
161        RELEASE                     = 0x00040000,
162
163        /** The request should be marked with KERNEL.
164          * Used to indicate the synchronization associated with a GPU kernel
165          * launch or completion.
166          */
167        KERNEL                      = 0x00001000,
168
169        /**
170         * The request should be handled by the generic IPR code (only
171         * valid together with MMAPPED_IPR)
172         */
173        GENERIC_IPR                 = 0x08000000,
174
175        /** The request targets the secure memory space. */
176        SECURE                      = 0x10000000,
177        /** The request is a page table walk */
178        PT_WALK                     = 0x20000000,
179
180        /**
181         * These flags are *not* cleared when a Request object is
182         * reused (assigned a new address).
183         */
184        STICKY_FLAGS = INST_FETCH
185    };
186
187    /** Master Ids that are statically allocated
188     * @{*/
189    enum : MasterID {
190        /** This master id is used for writeback requests by the caches */
191        wbMasterId = 0,
192        /**
193         * This master id is used for functional requests that
194         * don't come from a particular device
195         */
196        funcMasterId = 1,
197        /** This master id is used for message signaled interrupts */
198        intMasterId = 2,
199        /**
200         * Invalid master id for assertion checking only. It is
201         * invalid behavior to ever send this id as part of a request.
202         */
203        invldMasterId = std::numeric_limits<MasterID>::max()
204    };
205    /** @} */
206
207    typedef uint32_t MemSpaceConfigFlagsType;
208    typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
209
210    enum : MemSpaceConfigFlagsType {
211        /** Has a synchronization scope been set? */
212        SCOPE_VALID            = 0x00000001,
213        /** Access has Wavefront scope visibility */
214        WAVEFRONT_SCOPE        = 0x00000002,
215        /** Access has Workgroup scope visibility */
216        WORKGROUP_SCOPE        = 0x00000004,
217        /** Access has Device (e.g., GPU) scope visibility */
218        DEVICE_SCOPE           = 0x00000008,
219        /** Access has System (e.g., CPU + GPU) scope visibility */
220        SYSTEM_SCOPE           = 0x00000010,
221
222        /** Global Segment */
223        GLOBAL_SEGMENT         = 0x00000020,
224        /** Group Segment */
225        GROUP_SEGMENT          = 0x00000040,
226        /** Private Segment */
227        PRIVATE_SEGMENT        = 0x00000080,
228        /** Kergarg Segment */
229        KERNARG_SEGMENT        = 0x00000100,
230        /** Readonly Segment */
231        READONLY_SEGMENT       = 0x00000200,
232        /** Spill Segment */
233        SPILL_SEGMENT          = 0x00000400,
234        /** Arg Segment */
235        ARG_SEGMENT            = 0x00000800,
236    };
237
238  private:
239    typedef uint8_t PrivateFlagsType;
240    typedef ::Flags<PrivateFlagsType> PrivateFlags;
241
242    enum : PrivateFlagsType {
243        /** Whether or not the size is valid. */
244        VALID_SIZE           = 0x00000001,
245        /** Whether or not paddr is valid (has been written yet). */
246        VALID_PADDR          = 0x00000002,
247        /** Whether or not the vaddr & asid are valid. */
248        VALID_VADDR          = 0x00000004,
249        /** Whether or not the instruction sequence number is valid. */
250        VALID_INST_SEQ_NUM   = 0x00000008,
251        /** Whether or not the pc is valid. */
252        VALID_PC             = 0x00000010,
253        /** Whether or not the context ID is valid. */
254        VALID_CONTEXT_ID     = 0x00000020,
255        VALID_THREAD_ID      = 0x00000040,
256        /** Whether or not the sc result is valid. */
257        VALID_EXTRA_DATA     = 0x00000080,
258        /**
259         * These flags are *not* cleared when a Request object is reused
260         * (assigned a new address).
261         */
262        STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID | VALID_THREAD_ID
263    };
264
265  private:
266
267    /**
268     * Set up a physical (e.g. device) request in a previously
269     * allocated Request object.
270     */
271    void
272    setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
273    {
274        _paddr = paddr;
275        _size = size;
276        _time = time;
277        _masterId = mid;
278        _flags.clear(~STICKY_FLAGS);
279        _flags.set(flags);
280        privateFlags.clear(~STICKY_PRIVATE_FLAGS);
281        privateFlags.set(VALID_PADDR|VALID_SIZE);
282        depth = 0;
283        accessDelta = 0;
284        //translateDelta = 0;
285    }
286
287    /**
288     * The physical address of the request. Valid only if validPaddr
289     * is set.
290     */
291    Addr _paddr;
292
293    /**
294     * The size of the request. This field must be set when vaddr or
295     * paddr is written via setVirt() or setPhys(), so it is always
296     * valid as long as one of the address fields is valid.
297     */
298    unsigned _size;
299
300    /** The requestor ID which is unique in the system for all ports
301     * that are capable of issuing a transaction
302     */
303    MasterID _masterId;
304
305    /** Flag structure for the request. */
306    Flags _flags;
307
308    /** Memory space configuraiton flag structure for the request. */
309    MemSpaceConfigFlags _memSpaceConfigFlags;
310
311    /** Private flags for field validity checking. */
312    PrivateFlags privateFlags;
313
314    /**
315     * The time this request was started. Used to calculate
316     * latencies. This field is set to curTick() any time paddr or vaddr
317     * is written.
318     */
319    Tick _time;
320
321    /**
322     * The task id associated with this request
323     */
324    uint32_t _taskId;
325
326    /** The address space ID. */
327    int _asid;
328
329    /** The virtual address of the request. */
330    Addr _vaddr;
331
332    /**
333     * Extra data for the request, such as the return value of
334     * store conditional or the compare value for a CAS. */
335    uint64_t _extraData;
336
337    /** The context ID (for statistics, typically). */
338    ContextID _contextId;
339    /** The thread ID (id within this CPU) */
340    ThreadID _threadId;
341
342    /** program counter of initiating access; for tracing/debugging */
343    Addr _pc;
344
345    /** Sequence number of the instruction that creates the request */
346    InstSeqNum _reqInstSeqNum;
347
348  public:
349
350    /**
351     * Minimal constructor. No fields are initialized. (Note that
352     *  _flags and privateFlags are cleared by Flags default
353     *  constructor.)
354     */
355    Request()
356        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
357          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
358          _extraData(0), _contextId(0), _threadId(0), _pc(0),
359          _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
360    {}
361
362    Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
363            InstSeqNum seq_num, ContextID cid, ThreadID tid)
364        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
365          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
366          _extraData(0), _contextId(0), _threadId(0), _pc(0),
367          _reqInstSeqNum(seq_num), translateDelta(0), accessDelta(0), depth(0)
368    {
369        setPhys(paddr, size, flags, mid, curTick());
370        setThreadContext(cid, tid);
371        privateFlags.set(VALID_INST_SEQ_NUM);
372    }
373
374    /**
375     * Constructor for physical (e.g. device) requests.  Initializes
376     * just physical address, size, flags, and timestamp (to curTick()).
377     * These fields are adequate to perform a request.
378     */
379    Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
380        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
381          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
382          _extraData(0), _contextId(0), _threadId(0), _pc(0),
383          _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
384    {
385        setPhys(paddr, size, flags, mid, curTick());
386    }
387
388    Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
389        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
390          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
391          _extraData(0), _contextId(0), _threadId(0), _pc(0),
392          _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
393    {
394        setPhys(paddr, size, flags, mid, time);
395    }
396
397    Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
398            Addr pc)
399        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
400          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
401          _extraData(0), _contextId(0), _threadId(0), _pc(0),
402          _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
403    {
404        setPhys(paddr, size, flags, mid, time);
405        privateFlags.set(VALID_PC);
406        _pc = pc;
407    }
408
409    Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
410            Addr pc, ContextID cid, ThreadID tid)
411        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
412          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
413          _extraData(0), _contextId(0), _threadId(0), _pc(0),
414          _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
415    {
416        setVirt(asid, vaddr, size, flags, mid, pc);
417        setThreadContext(cid, tid);
418    }
419
420    ~Request() {}
421
422    /**
423     * Set up CPU and thread numbers.
424     */
425    void
426    setThreadContext(ContextID context_id, ThreadID tid)
427    {
428        _contextId = context_id;
429        _threadId = tid;
430        privateFlags.set(VALID_CONTEXT_ID|VALID_THREAD_ID);
431    }
432
433    /**
434     * Set up a virtual (e.g., CPU) request in a previously
435     * allocated Request object.
436     */
437    void
438    setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
439            Addr pc)
440    {
441        _asid = asid;
442        _vaddr = vaddr;
443        _size = size;
444        _masterId = mid;
445        _pc = pc;
446        _time = curTick();
447
448        _flags.clear(~STICKY_FLAGS);
449        _flags.set(flags);
450        privateFlags.clear(~STICKY_PRIVATE_FLAGS);
451        privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
452        depth = 0;
453        accessDelta = 0;
454        translateDelta = 0;
455    }
456
457    /**
458     * Set just the physical address.  This usually used to record the
459     * result of a translation. However, when using virtualized CPUs
460     * setPhys() is sometimes called to finalize a physical address
461     * without a virtual address, so we can't check if the virtual
462     * address is valid.
463     */
464    void
465    setPaddr(Addr paddr)
466    {
467        _paddr = paddr;
468        privateFlags.set(VALID_PADDR);
469    }
470
471    /**
472     * Generate two requests as if this request had been split into two
473     * pieces. The original request can't have been translated already.
474     */
475    void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
476    {
477        assert(privateFlags.isSet(VALID_VADDR));
478        assert(privateFlags.noneSet(VALID_PADDR));
479        assert(split_addr > _vaddr && split_addr < _vaddr + _size);
480        req1 = new Request(*this);
481        req2 = new Request(*this);
482        req1->_size = split_addr - _vaddr;
483        req2->_vaddr = split_addr;
484        req2->_size = _size - req1->_size;
485    }
486
487    /**
488     * Accessor for paddr.
489     */
490    bool
491    hasPaddr() const
492    {
493        return privateFlags.isSet(VALID_PADDR);
494    }
495
496    Addr
497    getPaddr() const
498    {
499        assert(privateFlags.isSet(VALID_PADDR));
500        return _paddr;
501    }
502
503    /**
504     * Time for the TLB/table walker to successfully translate this request.
505     */
506    Tick translateDelta;
507
508    /**
509     * Access latency to complete this memory transaction not including
510     * translation time.
511     */
512    Tick accessDelta;
513
514    /**
515     * Level of the cache hierachy where this request was responded to
516     * (e.g. 0 = L1; 1 = L2).
517     */
518    mutable int depth;
519
520    /**
521     *  Accessor for size.
522     */
523    bool
524    hasSize() const
525    {
526        return privateFlags.isSet(VALID_SIZE);
527    }
528
529    unsigned
530    getSize() const
531    {
532        assert(privateFlags.isSet(VALID_SIZE));
533        return _size;
534    }
535
536    /** Accessor for time. */
537    Tick
538    time() const
539    {
540        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
541        return _time;
542    }
543
544    /** Accessor for flags. */
545    Flags
546    getFlags()
547    {
548        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
549        return _flags;
550    }
551
552    /** Note that unlike other accessors, this function sets *specific
553        flags* (ORs them in); it does not assign its argument to the
554        _flags field.  Thus this method should rightly be called
555        setFlags() and not just flags(). */
556    void
557    setFlags(Flags flags)
558    {
559        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
560        _flags.set(flags);
561    }
562
563    void
564    setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
565    {
566        assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
567        _memSpaceConfigFlags.set(extraFlags);
568    }
569
570    /** Accessor function for vaddr.*/
571    bool
572    hasVaddr() const
573    {
574        return privateFlags.isSet(VALID_VADDR);
575    }
576
577    Addr
578    getVaddr() const
579    {
580        assert(privateFlags.isSet(VALID_VADDR));
581        return _vaddr;
582    }
583
584    /** Accesssor for the requestor id. */
585    MasterID
586    masterId() const
587    {
588        return _masterId;
589    }
590
591    uint32_t
592    taskId() const
593    {
594        return _taskId;
595    }
596
597    void
598    taskId(uint32_t id) {
599        _taskId = id;
600    }
601
602    /** Accessor function for asid.*/
603    int
604    getAsid() const
605    {
606        assert(privateFlags.isSet(VALID_VADDR));
607        return _asid;
608    }
609
610    /** Accessor function for asid.*/
611    void
612    setAsid(int asid)
613    {
614        _asid = asid;
615    }
616
617    /** Accessor function for architecture-specific flags.*/
618    ArchFlagsType
619    getArchFlags() const
620    {
621        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
622        return _flags & ARCH_BITS;
623    }
624
625    /** Accessor function to check if sc result is valid. */
626    bool
627    extraDataValid() const
628    {
629        return privateFlags.isSet(VALID_EXTRA_DATA);
630    }
631
632    /** Accessor function for store conditional return value.*/
633    uint64_t
634    getExtraData() const
635    {
636        assert(privateFlags.isSet(VALID_EXTRA_DATA));
637        return _extraData;
638    }
639
640    /** Accessor function for store conditional return value.*/
641    void
642    setExtraData(uint64_t extraData)
643    {
644        _extraData = extraData;
645        privateFlags.set(VALID_EXTRA_DATA);
646    }
647
648    bool
649    hasContextId() const
650    {
651        return privateFlags.isSet(VALID_CONTEXT_ID);
652    }
653
654    /** Accessor function for context ID.*/
655    ContextID
656    contextId() const
657    {
658        assert(privateFlags.isSet(VALID_CONTEXT_ID));
659        return _contextId;
660    }
661
662    /** Accessor function for thread ID. */
663    ThreadID
664    threadId() const
665    {
666        assert(privateFlags.isSet(VALID_THREAD_ID));
667        return _threadId;
668    }
669
670    void
671    setPC(Addr pc)
672    {
673        privateFlags.set(VALID_PC);
674        _pc = pc;
675    }
676
677    bool
678    hasPC() const
679    {
680        return privateFlags.isSet(VALID_PC);
681    }
682
683    /** Accessor function for pc.*/
684    Addr
685    getPC() const
686    {
687        assert(privateFlags.isSet(VALID_PC));
688        return _pc;
689    }
690
691    /**
692     * Increment/Get the depth at which this request is responded to.
693     * This currently happens when the request misses in any cache level.
694     */
695    void incAccessDepth() const { depth++; }
696    int getAccessDepth() const { return depth; }
697
698    /**
699     * Set/Get the time taken for this request to be successfully translated.
700     */
701    void setTranslateLatency() { translateDelta = curTick() - _time; }
702    Tick getTranslateLatency() const { return translateDelta; }
703
704    /**
705     * Set/Get the time taken to complete this request's access, not including
706     *  the time to successfully translate the request.
707     */
708    void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
709    Tick getAccessLatency() const { return accessDelta; }
710
711    /**
712     * Accessor for the sequence number of instruction that creates the
713     * request.
714     */
715    bool
716    hasInstSeqNum() const
717    {
718        return privateFlags.isSet(VALID_INST_SEQ_NUM);
719    }
720
721    InstSeqNum
722    getReqInstSeqNum() const
723    {
724        assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
725        return _reqInstSeqNum;
726    }
727
728    void
729    setReqInstSeqNum(const InstSeqNum seq_num)
730    {
731        privateFlags.set(VALID_INST_SEQ_NUM);
732        _reqInstSeqNum = seq_num;
733    }
734
735    /** Accessor functions for flags. Note that these are for testing
736        only; setting flags should be done via setFlags(). */
737    bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
738    bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
739    bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
740    bool isPrefetch() const { return _flags.isSet(PREFETCH); }
741    bool isLLSC() const { return _flags.isSet(LLSC); }
742    bool isPriv() const { return _flags.isSet(PRIVILEGED); }
743    bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
744    bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
745    bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
746    bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
747    bool isSecure() const { return _flags.isSet(SECURE); }
748    bool isPTWalk() const { return _flags.isSet(PT_WALK); }
749    bool isAcquire() const { return _flags.isSet(ACQUIRE); }
750    bool isRelease() const { return _flags.isSet(RELEASE); }
751    bool isKernel() const { return _flags.isSet(KERNEL); }
752
753    /**
754     * Accessor functions for the memory space configuration flags and used by
755     * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
756     * these are for testing only; setting extraFlags should be done via
757     * setMemSpaceConfigFlags().
758     */
759    bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
760
761    bool
762    isWavefrontScope() const
763    {
764        assert(isScoped());
765        return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
766    }
767
768    bool
769    isWorkgroupScope() const
770    {
771        assert(isScoped());
772        return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
773    }
774
775    bool
776    isDeviceScope() const
777    {
778        assert(isScoped());
779        return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
780    }
781
782    bool
783    isSystemScope() const
784    {
785        assert(isScoped());
786        return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
787    }
788
789    bool
790    isGlobalSegment() const
791    {
792        return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
793               (!isGroupSegment() && !isPrivateSegment() &&
794                !isKernargSegment() && !isReadonlySegment() &&
795                !isSpillSegment() && !isArgSegment());
796    }
797
798    bool
799    isGroupSegment() const
800    {
801        return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
802    }
803
804    bool
805    isPrivateSegment() const
806    {
807        return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
808    }
809
810    bool
811    isKernargSegment() const
812    {
813        return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
814    }
815
816    bool
817    isReadonlySegment() const
818    {
819        return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
820    }
821
822    bool
823    isSpillSegment() const
824    {
825        return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
826    }
827
828    bool
829    isArgSegment() const
830    {
831        return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
832    }
833};
834
835#endif // __MEM_REQUEST_HH__
836