request.hh revision 12347:c4bb52d1aba4
1/*
2 * Copyright (c) 2012-2013,2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 *          Steve Reinhardt
43 *          Ali Saidi
44 */
45
46/**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52#ifndef __MEM_REQUEST_HH__
53#define __MEM_REQUEST_HH__
54
55#include <cassert>
56#include <climits>
57
58#include "base/flags.hh"
59#include "base/logging.hh"
60#include "base/types.hh"
61#include "cpu/inst_seq.hh"
62#include "sim/core.hh"
63
64/**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72namespace ContextSwitchTaskId {
73    enum TaskId {
74        MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75        Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76        DMA = 1023, /* Mostly Table Walker */
77        Unknown = 1024,
78        NumTaskId
79    };
80}
81
82class Request;
83
84typedef Request* RequestPtr;
85typedef uint16_t MasterID;
86
87class Request
88{
89  public:
90    typedef uint64_t FlagsType;
91    typedef uint8_t ArchFlagsType;
92    typedef ::Flags<FlagsType> Flags;
93
94    enum : FlagsType {
95        /**
96         * Architecture specific flags.
97         *
98         * These bits int the flag field are reserved for
99         * architecture-specific code. For example, SPARC uses them to
100         * represent ASIs.
101         */
102        ARCH_BITS                   = 0x000000FF,
103        /** The request was an instruction fetch. */
104        INST_FETCH                  = 0x00000100,
105        /** The virtual address is also the physical address. */
106        PHYSICAL                    = 0x00000200,
107        /**
108         * The request is to an uncacheable address.
109         *
110         * @note Uncacheable accesses may be reordered by CPU models. The
111         * STRICT_ORDER flag should be set if such reordering is
112         * undesirable.
113         */
114        UNCACHEABLE                = 0x00000400,
115        /**
116         * The request is required to be strictly ordered by <i>CPU
117         * models</i> and is non-speculative.
118         *
119         * A strictly ordered request is guaranteed to never be
120         * re-ordered or executed speculatively by a CPU model. The
121         * memory system may still reorder requests in caches unless
122         * the UNCACHEABLE flag is set as well.
123         */
124        STRICT_ORDER                = 0x00000800,
125        /** This request is to a memory mapped register. */
126        MMAPPED_IPR                 = 0x00002000,
127        /** This request is made in privileged mode. */
128        PRIVILEGED                  = 0x00008000,
129
130        /**
131         * This is a write that is targeted and zeroing an entire
132         * cache block.  There is no need for a read/modify/write
133         */
134        CACHE_BLOCK_ZERO            = 0x00010000,
135
136        /** The request should not cause a memory access. */
137        NO_ACCESS                   = 0x00080000,
138        /**
139         * This request will lock or unlock the accessed memory. When
140         * used with a load, the access locks the particular chunk of
141         * memory. When used with a store, it unlocks. The rule is
142         * that locked accesses have to be made up of a locked load,
143         * some operation on the data, and then a locked store.
144         */
145        LOCKED_RMW                  = 0x00100000,
146        /** The request is a Load locked/store conditional. */
147        LLSC                        = 0x00200000,
148        /** This request is for a memory swap. */
149        MEM_SWAP                    = 0x00400000,
150        MEM_SWAP_COND               = 0x00800000,
151
152        /** The request is a prefetch. */
153        PREFETCH                    = 0x01000000,
154        /** The request should be prefetched into the exclusive state. */
155        PF_EXCLUSIVE                = 0x02000000,
156        /** The request should be marked as LRU. */
157        EVICT_NEXT                  = 0x04000000,
158        /** The request should be marked with ACQUIRE. */
159        ACQUIRE                     = 0x00020000,
160        /** The request should be marked with RELEASE. */
161        RELEASE                     = 0x00040000,
162
163        /** The request is an atomic that returns data. */
164        ATOMIC_RETURN_OP            = 0x40000000,
165        /** The request is an atomic that does not return data. */
166        ATOMIC_NO_RETURN_OP         = 0x80000000,
167
168        /** The request should be marked with KERNEL.
169          * Used to indicate the synchronization associated with a GPU kernel
170          * launch or completion.
171          */
172        KERNEL                      = 0x00001000,
173
174        /**
175         * The request should be handled by the generic IPR code (only
176         * valid together with MMAPPED_IPR)
177         */
178        GENERIC_IPR                 = 0x08000000,
179
180        /** The request targets the secure memory space. */
181        SECURE                      = 0x10000000,
182        /** The request is a page table walk */
183        PT_WALK                     = 0x20000000,
184
185        /** The request invalidates a memory location */
186        INVALIDATE                  = 0x0000000100000000,
187        /** The request cleans a memory location */
188        CLEAN                       = 0x0000000200000000,
189
190        /** The request targets the point of unification */
191        DST_POU                     = 0x0000001000000000,
192
193        /** The request targets the point of coherence */
194        DST_POC                     = 0x0000002000000000,
195
196        /** Bits to define the destination of a request */
197        DST_BITS                    = 0x0000003000000000,
198
199        /**
200         * These flags are *not* cleared when a Request object is
201         * reused (assigned a new address).
202         */
203        STICKY_FLAGS = INST_FETCH
204    };
205
206    /** Master Ids that are statically allocated
207     * @{*/
208    enum : MasterID {
209        /** This master id is used for writeback requests by the caches */
210        wbMasterId = 0,
211        /**
212         * This master id is used for functional requests that
213         * don't come from a particular device
214         */
215        funcMasterId = 1,
216        /** This master id is used for message signaled interrupts */
217        intMasterId = 2,
218        /**
219         * Invalid master id for assertion checking only. It is
220         * invalid behavior to ever send this id as part of a request.
221         */
222        invldMasterId = std::numeric_limits<MasterID>::max()
223    };
224    /** @} */
225
226    typedef uint32_t MemSpaceConfigFlagsType;
227    typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
228
229    enum : MemSpaceConfigFlagsType {
230        /** Has a synchronization scope been set? */
231        SCOPE_VALID            = 0x00000001,
232        /** Access has Wavefront scope visibility */
233        WAVEFRONT_SCOPE        = 0x00000002,
234        /** Access has Workgroup scope visibility */
235        WORKGROUP_SCOPE        = 0x00000004,
236        /** Access has Device (e.g., GPU) scope visibility */
237        DEVICE_SCOPE           = 0x00000008,
238        /** Access has System (e.g., CPU + GPU) scope visibility */
239        SYSTEM_SCOPE           = 0x00000010,
240
241        /** Global Segment */
242        GLOBAL_SEGMENT         = 0x00000020,
243        /** Group Segment */
244        GROUP_SEGMENT          = 0x00000040,
245        /** Private Segment */
246        PRIVATE_SEGMENT        = 0x00000080,
247        /** Kergarg Segment */
248        KERNARG_SEGMENT        = 0x00000100,
249        /** Readonly Segment */
250        READONLY_SEGMENT       = 0x00000200,
251        /** Spill Segment */
252        SPILL_SEGMENT          = 0x00000400,
253        /** Arg Segment */
254        ARG_SEGMENT            = 0x00000800,
255    };
256
257  private:
258    typedef uint8_t PrivateFlagsType;
259    typedef ::Flags<PrivateFlagsType> PrivateFlags;
260
261    enum : PrivateFlagsType {
262        /** Whether or not the size is valid. */
263        VALID_SIZE           = 0x00000001,
264        /** Whether or not paddr is valid (has been written yet). */
265        VALID_PADDR          = 0x00000002,
266        /** Whether or not the vaddr & asid are valid. */
267        VALID_VADDR          = 0x00000004,
268        /** Whether or not the instruction sequence number is valid. */
269        VALID_INST_SEQ_NUM   = 0x00000008,
270        /** Whether or not the pc is valid. */
271        VALID_PC             = 0x00000010,
272        /** Whether or not the context ID is valid. */
273        VALID_CONTEXT_ID     = 0x00000020,
274        /** Whether or not the sc result is valid. */
275        VALID_EXTRA_DATA     = 0x00000080,
276        /**
277         * These flags are *not* cleared when a Request object is reused
278         * (assigned a new address).
279         */
280        STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
281    };
282
283  private:
284
285    /**
286     * Set up a physical (e.g. device) request in a previously
287     * allocated Request object.
288     */
289    void
290    setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
291    {
292        _paddr = paddr;
293        _size = size;
294        _time = time;
295        _masterId = mid;
296        _flags.clear(~STICKY_FLAGS);
297        _flags.set(flags);
298        privateFlags.clear(~STICKY_PRIVATE_FLAGS);
299        privateFlags.set(VALID_PADDR|VALID_SIZE);
300        depth = 0;
301        accessDelta = 0;
302        //translateDelta = 0;
303    }
304
305    /**
306     * The physical address of the request. Valid only if validPaddr
307     * is set.
308     */
309    Addr _paddr;
310
311    /**
312     * The size of the request. This field must be set when vaddr or
313     * paddr is written via setVirt() or setPhys(), so it is always
314     * valid as long as one of the address fields is valid.
315     */
316    unsigned _size;
317
318    /** The requestor ID which is unique in the system for all ports
319     * that are capable of issuing a transaction
320     */
321    MasterID _masterId;
322
323    /** Flag structure for the request. */
324    Flags _flags;
325
326    /** Memory space configuraiton flag structure for the request. */
327    MemSpaceConfigFlags _memSpaceConfigFlags;
328
329    /** Private flags for field validity checking. */
330    PrivateFlags privateFlags;
331
332    /**
333     * The time this request was started. Used to calculate
334     * latencies. This field is set to curTick() any time paddr or vaddr
335     * is written.
336     */
337    Tick _time;
338
339    /**
340     * The task id associated with this request
341     */
342    uint32_t _taskId;
343
344    /** The address space ID. */
345    int _asid;
346
347    /** The virtual address of the request. */
348    Addr _vaddr;
349
350    /**
351     * Extra data for the request, such as the return value of
352     * store conditional or the compare value for a CAS. */
353    uint64_t _extraData;
354
355    /** The context ID (for statistics, locks, and wakeups). */
356    ContextID _contextId;
357
358    /** program counter of initiating access; for tracing/debugging */
359    Addr _pc;
360
361    /** Sequence number of the instruction that creates the request */
362    InstSeqNum _reqInstSeqNum;
363
364    /** A pointer to an atomic operation */
365    AtomicOpFunctor *atomicOpFunctor;
366
367  public:
368
369    /**
370     * Minimal constructor. No fields are initialized. (Note that
371     *  _flags and privateFlags are cleared by Flags default
372     *  constructor.)
373     */
374    Request()
375        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
376          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
377          _extraData(0), _contextId(0), _pc(0),
378          _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
379          accessDelta(0), depth(0)
380    {}
381
382    Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
383            InstSeqNum seq_num, ContextID cid)
384        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
385          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
386          _extraData(0), _contextId(0), _pc(0),
387          _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
388          accessDelta(0), depth(0)
389    {
390        setPhys(paddr, size, flags, mid, curTick());
391        setContext(cid);
392        privateFlags.set(VALID_INST_SEQ_NUM);
393    }
394
395    /**
396     * Constructor for physical (e.g. device) requests.  Initializes
397     * just physical address, size, flags, and timestamp (to curTick()).
398     * These fields are adequate to perform a request.
399     */
400    Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
401        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
402          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
403          _extraData(0), _contextId(0), _pc(0),
404          _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
405          accessDelta(0), depth(0)
406    {
407        setPhys(paddr, size, flags, mid, curTick());
408    }
409
410    Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
411        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
412          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
413          _extraData(0), _contextId(0), _pc(0),
414          _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
415          accessDelta(0), depth(0)
416    {
417        setPhys(paddr, size, flags, mid, time);
418    }
419
420    Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
421            Addr pc)
422        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
423          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
424          _extraData(0), _contextId(0), _pc(pc),
425          _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
426          accessDelta(0), depth(0)
427    {
428        setPhys(paddr, size, flags, mid, time);
429        privateFlags.set(VALID_PC);
430    }
431
432    Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
433            Addr pc, ContextID cid)
434        : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
435          _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
436          _extraData(0), _contextId(0), _pc(0),
437          _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
438          accessDelta(0), depth(0)
439    {
440        setVirt(asid, vaddr, size, flags, mid, pc);
441        setContext(cid);
442    }
443
444    Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
445            Addr pc, ContextID cid, AtomicOpFunctor *atomic_op)
446        : atomicOpFunctor(atomic_op)
447    {
448        setVirt(asid, vaddr, size, flags, mid, pc);
449        setContext(cid);
450    }
451
452    ~Request()
453    {
454        if (hasAtomicOpFunctor()) {
455            delete atomicOpFunctor;
456        }
457    }
458
459    /**
460     * Set up Context numbers.
461     */
462    void
463    setContext(ContextID context_id)
464    {
465        _contextId = context_id;
466        privateFlags.set(VALID_CONTEXT_ID);
467    }
468
469    /**
470     * Set up a virtual (e.g., CPU) request in a previously
471     * allocated Request object.
472     */
473    void
474    setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
475            Addr pc)
476    {
477        _asid = asid;
478        _vaddr = vaddr;
479        _size = size;
480        _masterId = mid;
481        _pc = pc;
482        _time = curTick();
483
484        _flags.clear(~STICKY_FLAGS);
485        _flags.set(flags);
486        privateFlags.clear(~STICKY_PRIVATE_FLAGS);
487        privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
488        depth = 0;
489        accessDelta = 0;
490        translateDelta = 0;
491    }
492
493    /**
494     * Set just the physical address.  This usually used to record the
495     * result of a translation. However, when using virtualized CPUs
496     * setPhys() is sometimes called to finalize a physical address
497     * without a virtual address, so we can't check if the virtual
498     * address is valid.
499     */
500    void
501    setPaddr(Addr paddr)
502    {
503        _paddr = paddr;
504        privateFlags.set(VALID_PADDR);
505    }
506
507    /**
508     * Generate two requests as if this request had been split into two
509     * pieces. The original request can't have been translated already.
510     */
511    void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
512    {
513        assert(privateFlags.isSet(VALID_VADDR));
514        assert(privateFlags.noneSet(VALID_PADDR));
515        assert(split_addr > _vaddr && split_addr < _vaddr + _size);
516        req1 = new Request(*this);
517        req2 = new Request(*this);
518        req1->_size = split_addr - _vaddr;
519        req2->_vaddr = split_addr;
520        req2->_size = _size - req1->_size;
521    }
522
523    /**
524     * Accessor for paddr.
525     */
526    bool
527    hasPaddr() const
528    {
529        return privateFlags.isSet(VALID_PADDR);
530    }
531
532    Addr
533    getPaddr() const
534    {
535        assert(privateFlags.isSet(VALID_PADDR));
536        return _paddr;
537    }
538
539    /**
540     * Time for the TLB/table walker to successfully translate this request.
541     */
542    Tick translateDelta;
543
544    /**
545     * Access latency to complete this memory transaction not including
546     * translation time.
547     */
548    Tick accessDelta;
549
550    /**
551     * Level of the cache hierachy where this request was responded to
552     * (e.g. 0 = L1; 1 = L2).
553     */
554    mutable int depth;
555
556    /**
557     *  Accessor for size.
558     */
559    bool
560    hasSize() const
561    {
562        return privateFlags.isSet(VALID_SIZE);
563    }
564
565    unsigned
566    getSize() const
567    {
568        assert(privateFlags.isSet(VALID_SIZE));
569        return _size;
570    }
571
572    /** Accessor for time. */
573    Tick
574    time() const
575    {
576        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
577        return _time;
578    }
579
580    /**
581     * Accessor for atomic-op functor.
582     */
583    bool
584    hasAtomicOpFunctor()
585    {
586        return atomicOpFunctor != NULL;
587    }
588
589    AtomicOpFunctor *
590    getAtomicOpFunctor()
591    {
592        assert(atomicOpFunctor != NULL);
593        return atomicOpFunctor;
594    }
595
596    /** Accessor for flags. */
597    Flags
598    getFlags()
599    {
600        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
601        return _flags;
602    }
603
604    /** Note that unlike other accessors, this function sets *specific
605        flags* (ORs them in); it does not assign its argument to the
606        _flags field.  Thus this method should rightly be called
607        setFlags() and not just flags(). */
608    void
609    setFlags(Flags flags)
610    {
611        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
612        _flags.set(flags);
613    }
614
615    void
616    setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
617    {
618        assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
619        _memSpaceConfigFlags.set(extraFlags);
620    }
621
622    /** Accessor function for vaddr.*/
623    bool
624    hasVaddr() const
625    {
626        return privateFlags.isSet(VALID_VADDR);
627    }
628
629    Addr
630    getVaddr() const
631    {
632        assert(privateFlags.isSet(VALID_VADDR));
633        return _vaddr;
634    }
635
636    /** Accesssor for the requestor id. */
637    MasterID
638    masterId() const
639    {
640        return _masterId;
641    }
642
643    uint32_t
644    taskId() const
645    {
646        return _taskId;
647    }
648
649    void
650    taskId(uint32_t id) {
651        _taskId = id;
652    }
653
654    /** Accessor function for asid.*/
655    int
656    getAsid() const
657    {
658        assert(privateFlags.isSet(VALID_VADDR));
659        return _asid;
660    }
661
662    /** Accessor function for asid.*/
663    void
664    setAsid(int asid)
665    {
666        _asid = asid;
667    }
668
669    /** Accessor function for architecture-specific flags.*/
670    ArchFlagsType
671    getArchFlags() const
672    {
673        assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
674        return _flags & ARCH_BITS;
675    }
676
677    /** Accessor function to check if sc result is valid. */
678    bool
679    extraDataValid() const
680    {
681        return privateFlags.isSet(VALID_EXTRA_DATA);
682    }
683
684    /** Accessor function for store conditional return value.*/
685    uint64_t
686    getExtraData() const
687    {
688        assert(privateFlags.isSet(VALID_EXTRA_DATA));
689        return _extraData;
690    }
691
692    /** Accessor function for store conditional return value.*/
693    void
694    setExtraData(uint64_t extraData)
695    {
696        _extraData = extraData;
697        privateFlags.set(VALID_EXTRA_DATA);
698    }
699
700    bool
701    hasContextId() const
702    {
703        return privateFlags.isSet(VALID_CONTEXT_ID);
704    }
705
706    /** Accessor function for context ID.*/
707    ContextID
708    contextId() const
709    {
710        assert(privateFlags.isSet(VALID_CONTEXT_ID));
711        return _contextId;
712    }
713
714    void
715    setPC(Addr pc)
716    {
717        privateFlags.set(VALID_PC);
718        _pc = pc;
719    }
720
721    bool
722    hasPC() const
723    {
724        return privateFlags.isSet(VALID_PC);
725    }
726
727    /** Accessor function for pc.*/
728    Addr
729    getPC() const
730    {
731        assert(privateFlags.isSet(VALID_PC));
732        return _pc;
733    }
734
735    /**
736     * Increment/Get the depth at which this request is responded to.
737     * This currently happens when the request misses in any cache level.
738     */
739    void incAccessDepth() const { depth++; }
740    int getAccessDepth() const { return depth; }
741
742    /**
743     * Set/Get the time taken for this request to be successfully translated.
744     */
745    void setTranslateLatency() { translateDelta = curTick() - _time; }
746    Tick getTranslateLatency() const { return translateDelta; }
747
748    /**
749     * Set/Get the time taken to complete this request's access, not including
750     *  the time to successfully translate the request.
751     */
752    void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
753    Tick getAccessLatency() const { return accessDelta; }
754
755    /**
756     * Accessor for the sequence number of instruction that creates the
757     * request.
758     */
759    bool
760    hasInstSeqNum() const
761    {
762        return privateFlags.isSet(VALID_INST_SEQ_NUM);
763    }
764
765    InstSeqNum
766    getReqInstSeqNum() const
767    {
768        assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
769        return _reqInstSeqNum;
770    }
771
772    void
773    setReqInstSeqNum(const InstSeqNum seq_num)
774    {
775        privateFlags.set(VALID_INST_SEQ_NUM);
776        _reqInstSeqNum = seq_num;
777    }
778
779    /** Accessor functions for flags. Note that these are for testing
780        only; setting flags should be done via setFlags(). */
781    bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
782    bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
783    bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
784    bool isPrefetch() const { return _flags.isSet(PREFETCH); }
785    bool isLLSC() const { return _flags.isSet(LLSC); }
786    bool isPriv() const { return _flags.isSet(PRIVILEGED); }
787    bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
788    bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
789    bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
790    bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
791    bool isSecure() const { return _flags.isSet(SECURE); }
792    bool isPTWalk() const { return _flags.isSet(PT_WALK); }
793    bool isAcquire() const { return _flags.isSet(ACQUIRE); }
794    bool isRelease() const { return _flags.isSet(RELEASE); }
795    bool isKernel() const { return _flags.isSet(KERNEL); }
796    bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
797    bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
798
799    bool
800    isAtomic() const
801    {
802        return _flags.isSet(ATOMIC_RETURN_OP) ||
803               _flags.isSet(ATOMIC_NO_RETURN_OP);
804    }
805
806    /**
807     * Accessor functions for the destination of a memory request. The
808     * destination flag can specify a point of reference for the
809     * operation (e.g. a cache block clean to the the point of
810     * unification). At the moment the destination is only used by the
811     * cache maintenance operations.
812     */
813    bool isToPOU() const { return _flags.isSet(DST_POU); }
814    bool isToPOC() const { return _flags.isSet(DST_POC); }
815    Flags getDest() const { return _flags & DST_BITS; }
816
817    /**
818     * Accessor functions for the memory space configuration flags and used by
819     * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
820     * these are for testing only; setting extraFlags should be done via
821     * setMemSpaceConfigFlags().
822     */
823    bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
824
825    bool
826    isWavefrontScope() const
827    {
828        assert(isScoped());
829        return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
830    }
831
832    bool
833    isWorkgroupScope() const
834    {
835        assert(isScoped());
836        return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
837    }
838
839    bool
840    isDeviceScope() const
841    {
842        assert(isScoped());
843        return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
844    }
845
846    bool
847    isSystemScope() const
848    {
849        assert(isScoped());
850        return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
851    }
852
853    bool
854    isGlobalSegment() const
855    {
856        return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
857               (!isGroupSegment() && !isPrivateSegment() &&
858                !isKernargSegment() && !isReadonlySegment() &&
859                !isSpillSegment() && !isArgSegment());
860    }
861
862    bool
863    isGroupSegment() const
864    {
865        return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
866    }
867
868    bool
869    isPrivateSegment() const
870    {
871        return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
872    }
873
874    bool
875    isKernargSegment() const
876    {
877        return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
878    }
879
880    bool
881    isReadonlySegment() const
882    {
883        return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
884    }
885
886    bool
887    isSpillSegment() const
888    {
889        return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
890    }
891
892    bool
893    isArgSegment() const
894    {
895        return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
896    }
897
898    /**
899     * Accessor functions to determine whether this request is part of
900     * a cache maintenance operation. At the moment three operations
901     * are supported:
902
903     * 1) A cache clean operation updates all copies of a memory
904     * location to the point of reference,
905     * 2) A cache invalidate operation invalidates all copies of the
906     * specified block in the memory above the point of reference,
907     * 3) A clean and invalidate operation is a combination of the two
908     * operations.
909     * @{ */
910    bool isCacheClean() const { return _flags.isSet(CLEAN); }
911    bool isCacheInvalidate() const { return _flags.isSet(INVALIDATE); }
912    bool isCacheMaintenance() const { return _flags.isSet(CLEAN|INVALIDATE); }
913    /** @} */
914};
915
916#endif // __MEM_REQUEST_HH__
917