request.hh (11305:78c1e4f5dfc5) request.hh (11306:a5340a2a24f9)
1/*
2 * Copyright (c) 2012-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 */
45
46/**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52#ifndef __MEM_REQUEST_HH__
53#define __MEM_REQUEST_HH__
54
55#include <cassert>
56#include <climits>
57
58#include "base/flags.hh"
59#include "base/misc.hh"
60#include "base/types.hh"
61#include "cpu/inst_seq.hh"
62#include "sim/core.hh"
63
64/**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72namespace ContextSwitchTaskId {
73 enum TaskId {
74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76 DMA = 1023, /* Mostly Table Walker */
77 Unknown = 1024,
78 NumTaskId
79 };
80}
81
82class Request;
83
84typedef Request* RequestPtr;
85typedef uint16_t MasterID;
86
87class Request
88{
89 public:
90 typedef uint32_t FlagsType;
91 typedef uint8_t ArchFlagsType;
92 typedef ::Flags<FlagsType> Flags;
93
94 enum : FlagsType {
95 /**
96 * Architecture specific flags.
97 *
98 * These bits int the flag field are reserved for
99 * architecture-specific code. For example, SPARC uses them to
100 * represent ASIs.
101 */
102 ARCH_BITS = 0x000000FF,
103 /** The request was an instruction fetch. */
104 INST_FETCH = 0x00000100,
105 /** The virtual address is also the physical address. */
106 PHYSICAL = 0x00000200,
107 /**
108 * The request is to an uncacheable address.
109 *
110 * @note Uncacheable accesses may be reordered by CPU models. The
111 * STRICT_ORDER flag should be set if such reordering is
112 * undesirable.
113 */
114 UNCACHEABLE = 0x00000400,
115 /**
116 * The request is required to be strictly ordered by <i>CPU
117 * models</i> and is non-speculative.
118 *
119 * A strictly ordered request is guaranteed to never be
120 * re-ordered or executed speculatively by a CPU model. The
121 * memory system may still reorder requests in caches unless
122 * the UNCACHEABLE flag is set as well.
123 */
124 STRICT_ORDER = 0x00000800,
125 /** This request is to a memory mapped register. */
126 MMAPPED_IPR = 0x00002000,
127 /** This request is made in privileged mode. */
128 PRIVILEGED = 0x00008000,
129
130 /**
131 * This is a write that is targeted and zeroing an entire
132 * cache block. There is no need for a read/modify/write
133 */
134 CACHE_BLOCK_ZERO = 0x00010000,
135
136 /** The request should not cause a memory access. */
137 NO_ACCESS = 0x00080000,
138 /**
139 * This request will lock or unlock the accessed memory. When
140 * used with a load, the access locks the particular chunk of
141 * memory. When used with a store, it unlocks. The rule is
142 * that locked accesses have to be made up of a locked load,
143 * some operation on the data, and then a locked store.
144 */
145 LOCKED_RMW = 0x00100000,
146 /** The request is a Load locked/store conditional. */
147 LLSC = 0x00200000,
148 /** This request is for a memory swap. */
149 MEM_SWAP = 0x00400000,
150 MEM_SWAP_COND = 0x00800000,
151
152 /** The request is a prefetch. */
153 PREFETCH = 0x01000000,
154 /** The request should be prefetched into the exclusive state. */
155 PF_EXCLUSIVE = 0x02000000,
156 /** The request should be marked as LRU. */
157 EVICT_NEXT = 0x04000000,
158 /** The request should be marked with ACQUIRE. */
159 ACQUIRE = 0x00020000,
160 /** The request should be marked with RELEASE. */
161 RELEASE = 0x00040000,
162
1/*
2 * Copyright (c) 2012-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 */
45
46/**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52#ifndef __MEM_REQUEST_HH__
53#define __MEM_REQUEST_HH__
54
55#include <cassert>
56#include <climits>
57
58#include "base/flags.hh"
59#include "base/misc.hh"
60#include "base/types.hh"
61#include "cpu/inst_seq.hh"
62#include "sim/core.hh"
63
64/**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72namespace ContextSwitchTaskId {
73 enum TaskId {
74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76 DMA = 1023, /* Mostly Table Walker */
77 Unknown = 1024,
78 NumTaskId
79 };
80}
81
82class Request;
83
84typedef Request* RequestPtr;
85typedef uint16_t MasterID;
86
87class Request
88{
89 public:
90 typedef uint32_t FlagsType;
91 typedef uint8_t ArchFlagsType;
92 typedef ::Flags<FlagsType> Flags;
93
94 enum : FlagsType {
95 /**
96 * Architecture specific flags.
97 *
98 * These bits int the flag field are reserved for
99 * architecture-specific code. For example, SPARC uses them to
100 * represent ASIs.
101 */
102 ARCH_BITS = 0x000000FF,
103 /** The request was an instruction fetch. */
104 INST_FETCH = 0x00000100,
105 /** The virtual address is also the physical address. */
106 PHYSICAL = 0x00000200,
107 /**
108 * The request is to an uncacheable address.
109 *
110 * @note Uncacheable accesses may be reordered by CPU models. The
111 * STRICT_ORDER flag should be set if such reordering is
112 * undesirable.
113 */
114 UNCACHEABLE = 0x00000400,
115 /**
116 * The request is required to be strictly ordered by <i>CPU
117 * models</i> and is non-speculative.
118 *
119 * A strictly ordered request is guaranteed to never be
120 * re-ordered or executed speculatively by a CPU model. The
121 * memory system may still reorder requests in caches unless
122 * the UNCACHEABLE flag is set as well.
123 */
124 STRICT_ORDER = 0x00000800,
125 /** This request is to a memory mapped register. */
126 MMAPPED_IPR = 0x00002000,
127 /** This request is made in privileged mode. */
128 PRIVILEGED = 0x00008000,
129
130 /**
131 * This is a write that is targeted and zeroing an entire
132 * cache block. There is no need for a read/modify/write
133 */
134 CACHE_BLOCK_ZERO = 0x00010000,
135
136 /** The request should not cause a memory access. */
137 NO_ACCESS = 0x00080000,
138 /**
139 * This request will lock or unlock the accessed memory. When
140 * used with a load, the access locks the particular chunk of
141 * memory. When used with a store, it unlocks. The rule is
142 * that locked accesses have to be made up of a locked load,
143 * some operation on the data, and then a locked store.
144 */
145 LOCKED_RMW = 0x00100000,
146 /** The request is a Load locked/store conditional. */
147 LLSC = 0x00200000,
148 /** This request is for a memory swap. */
149 MEM_SWAP = 0x00400000,
150 MEM_SWAP_COND = 0x00800000,
151
152 /** The request is a prefetch. */
153 PREFETCH = 0x01000000,
154 /** The request should be prefetched into the exclusive state. */
155 PF_EXCLUSIVE = 0x02000000,
156 /** The request should be marked as LRU. */
157 EVICT_NEXT = 0x04000000,
158 /** The request should be marked with ACQUIRE. */
159 ACQUIRE = 0x00020000,
160 /** The request should be marked with RELEASE. */
161 RELEASE = 0x00040000,
162
163 /** The request is an atomic that returns data. */
164 ATOMIC_RETURN_OP = 0x40000000,
165 /** The request is an atomic that does not return data. */
166 ATOMIC_NO_RETURN_OP = 0x80000000,
167
163 /** The request should be marked with KERNEL.
164 * Used to indicate the synchronization associated with a GPU kernel
165 * launch or completion.
166 */
167 KERNEL = 0x00001000,
168
169 /**
170 * The request should be handled by the generic IPR code (only
171 * valid together with MMAPPED_IPR)
172 */
173 GENERIC_IPR = 0x08000000,
174
175 /** The request targets the secure memory space. */
176 SECURE = 0x10000000,
177 /** The request is a page table walk */
178 PT_WALK = 0x20000000,
179
180 /**
181 * These flags are *not* cleared when a Request object is
182 * reused (assigned a new address).
183 */
184 STICKY_FLAGS = INST_FETCH
185 };
186
187 /** Master Ids that are statically allocated
188 * @{*/
189 enum : MasterID {
190 /** This master id is used for writeback requests by the caches */
191 wbMasterId = 0,
192 /**
193 * This master id is used for functional requests that
194 * don't come from a particular device
195 */
196 funcMasterId = 1,
197 /** This master id is used for message signaled interrupts */
198 intMasterId = 2,
199 /**
200 * Invalid master id for assertion checking only. It is
201 * invalid behavior to ever send this id as part of a request.
202 */
203 invldMasterId = std::numeric_limits<MasterID>::max()
204 };
205 /** @} */
206
207 typedef uint32_t MemSpaceConfigFlagsType;
208 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
209
210 enum : MemSpaceConfigFlagsType {
211 /** Has a synchronization scope been set? */
212 SCOPE_VALID = 0x00000001,
213 /** Access has Wavefront scope visibility */
214 WAVEFRONT_SCOPE = 0x00000002,
215 /** Access has Workgroup scope visibility */
216 WORKGROUP_SCOPE = 0x00000004,
217 /** Access has Device (e.g., GPU) scope visibility */
218 DEVICE_SCOPE = 0x00000008,
219 /** Access has System (e.g., CPU + GPU) scope visibility */
220 SYSTEM_SCOPE = 0x00000010,
221
222 /** Global Segment */
223 GLOBAL_SEGMENT = 0x00000020,
224 /** Group Segment */
225 GROUP_SEGMENT = 0x00000040,
226 /** Private Segment */
227 PRIVATE_SEGMENT = 0x00000080,
228 /** Kergarg Segment */
229 KERNARG_SEGMENT = 0x00000100,
230 /** Readonly Segment */
231 READONLY_SEGMENT = 0x00000200,
232 /** Spill Segment */
233 SPILL_SEGMENT = 0x00000400,
234 /** Arg Segment */
235 ARG_SEGMENT = 0x00000800,
236 };
237
238 private:
239 typedef uint8_t PrivateFlagsType;
240 typedef ::Flags<PrivateFlagsType> PrivateFlags;
241
242 enum : PrivateFlagsType {
243 /** Whether or not the size is valid. */
244 VALID_SIZE = 0x00000001,
245 /** Whether or not paddr is valid (has been written yet). */
246 VALID_PADDR = 0x00000002,
247 /** Whether or not the vaddr & asid are valid. */
248 VALID_VADDR = 0x00000004,
249 /** Whether or not the instruction sequence number is valid. */
250 VALID_INST_SEQ_NUM = 0x00000008,
251 /** Whether or not the pc is valid. */
252 VALID_PC = 0x00000010,
253 /** Whether or not the context ID is valid. */
254 VALID_CONTEXT_ID = 0x00000020,
255 VALID_THREAD_ID = 0x00000040,
256 /** Whether or not the sc result is valid. */
257 VALID_EXTRA_DATA = 0x00000080,
258 /**
259 * These flags are *not* cleared when a Request object is reused
260 * (assigned a new address).
261 */
262 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID | VALID_THREAD_ID
263 };
264
265 private:
266
267 /**
268 * Set up a physical (e.g. device) request in a previously
269 * allocated Request object.
270 */
271 void
272 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
273 {
274 _paddr = paddr;
275 _size = size;
276 _time = time;
277 _masterId = mid;
278 _flags.clear(~STICKY_FLAGS);
279 _flags.set(flags);
280 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
281 privateFlags.set(VALID_PADDR|VALID_SIZE);
282 depth = 0;
283 accessDelta = 0;
284 //translateDelta = 0;
285 }
286
287 /**
288 * The physical address of the request. Valid only if validPaddr
289 * is set.
290 */
291 Addr _paddr;
292
293 /**
294 * The size of the request. This field must be set when vaddr or
295 * paddr is written via setVirt() or setPhys(), so it is always
296 * valid as long as one of the address fields is valid.
297 */
298 unsigned _size;
299
300 /** The requestor ID which is unique in the system for all ports
301 * that are capable of issuing a transaction
302 */
303 MasterID _masterId;
304
305 /** Flag structure for the request. */
306 Flags _flags;
307
308 /** Memory space configuraiton flag structure for the request. */
309 MemSpaceConfigFlags _memSpaceConfigFlags;
310
311 /** Private flags for field validity checking. */
312 PrivateFlags privateFlags;
313
314 /**
315 * The time this request was started. Used to calculate
316 * latencies. This field is set to curTick() any time paddr or vaddr
317 * is written.
318 */
319 Tick _time;
320
321 /**
322 * The task id associated with this request
323 */
324 uint32_t _taskId;
325
326 /** The address space ID. */
327 int _asid;
328
329 /** The virtual address of the request. */
330 Addr _vaddr;
331
332 /**
333 * Extra data for the request, such as the return value of
334 * store conditional or the compare value for a CAS. */
335 uint64_t _extraData;
336
337 /** The context ID (for statistics, typically). */
338 ContextID _contextId;
339 /** The thread ID (id within this CPU) */
340 ThreadID _threadId;
341
342 /** program counter of initiating access; for tracing/debugging */
343 Addr _pc;
344
345 /** Sequence number of the instruction that creates the request */
346 InstSeqNum _reqInstSeqNum;
347
168 /** The request should be marked with KERNEL.
169 * Used to indicate the synchronization associated with a GPU kernel
170 * launch or completion.
171 */
172 KERNEL = 0x00001000,
173
174 /**
175 * The request should be handled by the generic IPR code (only
176 * valid together with MMAPPED_IPR)
177 */
178 GENERIC_IPR = 0x08000000,
179
180 /** The request targets the secure memory space. */
181 SECURE = 0x10000000,
182 /** The request is a page table walk */
183 PT_WALK = 0x20000000,
184
185 /**
186 * These flags are *not* cleared when a Request object is
187 * reused (assigned a new address).
188 */
189 STICKY_FLAGS = INST_FETCH
190 };
191
192 /** Master Ids that are statically allocated
193 * @{*/
194 enum : MasterID {
195 /** This master id is used for writeback requests by the caches */
196 wbMasterId = 0,
197 /**
198 * This master id is used for functional requests that
199 * don't come from a particular device
200 */
201 funcMasterId = 1,
202 /** This master id is used for message signaled interrupts */
203 intMasterId = 2,
204 /**
205 * Invalid master id for assertion checking only. It is
206 * invalid behavior to ever send this id as part of a request.
207 */
208 invldMasterId = std::numeric_limits<MasterID>::max()
209 };
210 /** @} */
211
212 typedef uint32_t MemSpaceConfigFlagsType;
213 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
214
215 enum : MemSpaceConfigFlagsType {
216 /** Has a synchronization scope been set? */
217 SCOPE_VALID = 0x00000001,
218 /** Access has Wavefront scope visibility */
219 WAVEFRONT_SCOPE = 0x00000002,
220 /** Access has Workgroup scope visibility */
221 WORKGROUP_SCOPE = 0x00000004,
222 /** Access has Device (e.g., GPU) scope visibility */
223 DEVICE_SCOPE = 0x00000008,
224 /** Access has System (e.g., CPU + GPU) scope visibility */
225 SYSTEM_SCOPE = 0x00000010,
226
227 /** Global Segment */
228 GLOBAL_SEGMENT = 0x00000020,
229 /** Group Segment */
230 GROUP_SEGMENT = 0x00000040,
231 /** Private Segment */
232 PRIVATE_SEGMENT = 0x00000080,
233 /** Kergarg Segment */
234 KERNARG_SEGMENT = 0x00000100,
235 /** Readonly Segment */
236 READONLY_SEGMENT = 0x00000200,
237 /** Spill Segment */
238 SPILL_SEGMENT = 0x00000400,
239 /** Arg Segment */
240 ARG_SEGMENT = 0x00000800,
241 };
242
243 private:
244 typedef uint8_t PrivateFlagsType;
245 typedef ::Flags<PrivateFlagsType> PrivateFlags;
246
247 enum : PrivateFlagsType {
248 /** Whether or not the size is valid. */
249 VALID_SIZE = 0x00000001,
250 /** Whether or not paddr is valid (has been written yet). */
251 VALID_PADDR = 0x00000002,
252 /** Whether or not the vaddr & asid are valid. */
253 VALID_VADDR = 0x00000004,
254 /** Whether or not the instruction sequence number is valid. */
255 VALID_INST_SEQ_NUM = 0x00000008,
256 /** Whether or not the pc is valid. */
257 VALID_PC = 0x00000010,
258 /** Whether or not the context ID is valid. */
259 VALID_CONTEXT_ID = 0x00000020,
260 VALID_THREAD_ID = 0x00000040,
261 /** Whether or not the sc result is valid. */
262 VALID_EXTRA_DATA = 0x00000080,
263 /**
264 * These flags are *not* cleared when a Request object is reused
265 * (assigned a new address).
266 */
267 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID | VALID_THREAD_ID
268 };
269
270 private:
271
272 /**
273 * Set up a physical (e.g. device) request in a previously
274 * allocated Request object.
275 */
276 void
277 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
278 {
279 _paddr = paddr;
280 _size = size;
281 _time = time;
282 _masterId = mid;
283 _flags.clear(~STICKY_FLAGS);
284 _flags.set(flags);
285 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
286 privateFlags.set(VALID_PADDR|VALID_SIZE);
287 depth = 0;
288 accessDelta = 0;
289 //translateDelta = 0;
290 }
291
292 /**
293 * The physical address of the request. Valid only if validPaddr
294 * is set.
295 */
296 Addr _paddr;
297
298 /**
299 * The size of the request. This field must be set when vaddr or
300 * paddr is written via setVirt() or setPhys(), so it is always
301 * valid as long as one of the address fields is valid.
302 */
303 unsigned _size;
304
305 /** The requestor ID which is unique in the system for all ports
306 * that are capable of issuing a transaction
307 */
308 MasterID _masterId;
309
310 /** Flag structure for the request. */
311 Flags _flags;
312
313 /** Memory space configuraiton flag structure for the request. */
314 MemSpaceConfigFlags _memSpaceConfigFlags;
315
316 /** Private flags for field validity checking. */
317 PrivateFlags privateFlags;
318
319 /**
320 * The time this request was started. Used to calculate
321 * latencies. This field is set to curTick() any time paddr or vaddr
322 * is written.
323 */
324 Tick _time;
325
326 /**
327 * The task id associated with this request
328 */
329 uint32_t _taskId;
330
331 /** The address space ID. */
332 int _asid;
333
334 /** The virtual address of the request. */
335 Addr _vaddr;
336
337 /**
338 * Extra data for the request, such as the return value of
339 * store conditional or the compare value for a CAS. */
340 uint64_t _extraData;
341
342 /** The context ID (for statistics, typically). */
343 ContextID _contextId;
344 /** The thread ID (id within this CPU) */
345 ThreadID _threadId;
346
347 /** program counter of initiating access; for tracing/debugging */
348 Addr _pc;
349
350 /** Sequence number of the instruction that creates the request */
351 InstSeqNum _reqInstSeqNum;
352
353 /** A pointer to an atomic operation */
354 AtomicOpFunctor *atomicOpFunctor;
355
348 public:
349
350 /**
351 * Minimal constructor. No fields are initialized. (Note that
352 * _flags and privateFlags are cleared by Flags default
353 * constructor.)
354 */
355 Request()
356 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
357 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
358 _extraData(0), _contextId(0), _threadId(0), _pc(0),
356 public:
357
358 /**
359 * Minimal constructor. No fields are initialized. (Note that
360 * _flags and privateFlags are cleared by Flags default
361 * constructor.)
362 */
363 Request()
364 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
365 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
366 _extraData(0), _contextId(0), _threadId(0), _pc(0),
359 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
367 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
368 accessDelta(0), depth(0)
360 {}
361
362 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
363 InstSeqNum seq_num, ContextID cid, ThreadID tid)
364 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
365 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
366 _extraData(0), _contextId(0), _threadId(0), _pc(0),
369 {}
370
371 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
372 InstSeqNum seq_num, ContextID cid, ThreadID tid)
373 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
374 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
375 _extraData(0), _contextId(0), _threadId(0), _pc(0),
367 _reqInstSeqNum(seq_num), translateDelta(0), accessDelta(0), depth(0)
376 _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
377 accessDelta(0), depth(0)
368 {
369 setPhys(paddr, size, flags, mid, curTick());
370 setThreadContext(cid, tid);
371 privateFlags.set(VALID_INST_SEQ_NUM);
372 }
373
374 /**
375 * Constructor for physical (e.g. device) requests. Initializes
376 * just physical address, size, flags, and timestamp (to curTick()).
377 * These fields are adequate to perform a request.
378 */
379 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
380 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
381 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
382 _extraData(0), _contextId(0), _threadId(0), _pc(0),
378 {
379 setPhys(paddr, size, flags, mid, curTick());
380 setThreadContext(cid, tid);
381 privateFlags.set(VALID_INST_SEQ_NUM);
382 }
383
384 /**
385 * Constructor for physical (e.g. device) requests. Initializes
386 * just physical address, size, flags, and timestamp (to curTick()).
387 * These fields are adequate to perform a request.
388 */
389 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
390 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
391 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
392 _extraData(0), _contextId(0), _threadId(0), _pc(0),
383 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
393 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
394 accessDelta(0), depth(0)
384 {
385 setPhys(paddr, size, flags, mid, curTick());
386 }
387
388 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
389 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
390 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
391 _extraData(0), _contextId(0), _threadId(0), _pc(0),
395 {
396 setPhys(paddr, size, flags, mid, curTick());
397 }
398
399 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
400 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
401 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
402 _extraData(0), _contextId(0), _threadId(0), _pc(0),
392 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
403 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
404 accessDelta(0), depth(0)
393 {
394 setPhys(paddr, size, flags, mid, time);
395 }
396
397 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
398 Addr pc)
399 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
400 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
405 {
406 setPhys(paddr, size, flags, mid, time);
407 }
408
409 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
410 Addr pc)
411 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
412 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
401 _extraData(0), _contextId(0), _threadId(0), _pc(0),
402 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
413 _extraData(0), _contextId(0), _threadId(0), _pc(pc),
414 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
415 accessDelta(0), depth(0)
403 {
404 setPhys(paddr, size, flags, mid, time);
405 privateFlags.set(VALID_PC);
416 {
417 setPhys(paddr, size, flags, mid, time);
418 privateFlags.set(VALID_PC);
406 _pc = pc;
407 }
408
409 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
410 Addr pc, ContextID cid, ThreadID tid)
411 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
412 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
413 _extraData(0), _contextId(0), _threadId(0), _pc(0),
419 }
420
421 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
422 Addr pc, ContextID cid, ThreadID tid)
423 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
424 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
425 _extraData(0), _contextId(0), _threadId(0), _pc(0),
414 _reqInstSeqNum(0), translateDelta(0), accessDelta(0), depth(0)
426 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
427 accessDelta(0), depth(0)
415 {
416 setVirt(asid, vaddr, size, flags, mid, pc);
417 setThreadContext(cid, tid);
418 }
419
428 {
429 setVirt(asid, vaddr, size, flags, mid, pc);
430 setThreadContext(cid, tid);
431 }
432
420 ~Request() {}
433 Request(int asid, Addr vaddr, int size, Flags flags, MasterID mid, Addr pc,
434 int cid, ThreadID tid, AtomicOpFunctor *atomic_op)
435 : atomicOpFunctor(atomic_op)
436 {
437 setVirt(asid, vaddr, size, flags, mid, pc);
438 setThreadContext(cid, tid);
439 }
421
440
441 ~Request()
442 {
443 if (hasAtomicOpFunctor()) {
444 delete atomicOpFunctor;
445 }
446 }
447
422 /**
423 * Set up CPU and thread numbers.
424 */
425 void
426 setThreadContext(ContextID context_id, ThreadID tid)
427 {
428 _contextId = context_id;
429 _threadId = tid;
430 privateFlags.set(VALID_CONTEXT_ID|VALID_THREAD_ID);
431 }
432
433 /**
434 * Set up a virtual (e.g., CPU) request in a previously
435 * allocated Request object.
436 */
437 void
438 setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
439 Addr pc)
440 {
441 _asid = asid;
442 _vaddr = vaddr;
443 _size = size;
444 _masterId = mid;
445 _pc = pc;
446 _time = curTick();
447
448 _flags.clear(~STICKY_FLAGS);
449 _flags.set(flags);
450 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
451 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
452 depth = 0;
453 accessDelta = 0;
454 translateDelta = 0;
455 }
456
457 /**
458 * Set just the physical address. This usually used to record the
459 * result of a translation. However, when using virtualized CPUs
460 * setPhys() is sometimes called to finalize a physical address
461 * without a virtual address, so we can't check if the virtual
462 * address is valid.
463 */
464 void
465 setPaddr(Addr paddr)
466 {
467 _paddr = paddr;
468 privateFlags.set(VALID_PADDR);
469 }
470
471 /**
472 * Generate two requests as if this request had been split into two
473 * pieces. The original request can't have been translated already.
474 */
475 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
476 {
477 assert(privateFlags.isSet(VALID_VADDR));
478 assert(privateFlags.noneSet(VALID_PADDR));
479 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
480 req1 = new Request(*this);
481 req2 = new Request(*this);
482 req1->_size = split_addr - _vaddr;
483 req2->_vaddr = split_addr;
484 req2->_size = _size - req1->_size;
485 }
486
487 /**
488 * Accessor for paddr.
489 */
490 bool
491 hasPaddr() const
492 {
493 return privateFlags.isSet(VALID_PADDR);
494 }
495
496 Addr
497 getPaddr() const
498 {
499 assert(privateFlags.isSet(VALID_PADDR));
500 return _paddr;
501 }
502
503 /**
504 * Time for the TLB/table walker to successfully translate this request.
505 */
506 Tick translateDelta;
507
508 /**
509 * Access latency to complete this memory transaction not including
510 * translation time.
511 */
512 Tick accessDelta;
513
514 /**
515 * Level of the cache hierachy where this request was responded to
516 * (e.g. 0 = L1; 1 = L2).
517 */
518 mutable int depth;
519
520 /**
521 * Accessor for size.
522 */
523 bool
524 hasSize() const
525 {
526 return privateFlags.isSet(VALID_SIZE);
527 }
528
529 unsigned
530 getSize() const
531 {
532 assert(privateFlags.isSet(VALID_SIZE));
533 return _size;
534 }
535
536 /** Accessor for time. */
537 Tick
538 time() const
539 {
540 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
541 return _time;
542 }
543
448 /**
449 * Set up CPU and thread numbers.
450 */
451 void
452 setThreadContext(ContextID context_id, ThreadID tid)
453 {
454 _contextId = context_id;
455 _threadId = tid;
456 privateFlags.set(VALID_CONTEXT_ID|VALID_THREAD_ID);
457 }
458
459 /**
460 * Set up a virtual (e.g., CPU) request in a previously
461 * allocated Request object.
462 */
463 void
464 setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
465 Addr pc)
466 {
467 _asid = asid;
468 _vaddr = vaddr;
469 _size = size;
470 _masterId = mid;
471 _pc = pc;
472 _time = curTick();
473
474 _flags.clear(~STICKY_FLAGS);
475 _flags.set(flags);
476 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
477 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
478 depth = 0;
479 accessDelta = 0;
480 translateDelta = 0;
481 }
482
483 /**
484 * Set just the physical address. This usually used to record the
485 * result of a translation. However, when using virtualized CPUs
486 * setPhys() is sometimes called to finalize a physical address
487 * without a virtual address, so we can't check if the virtual
488 * address is valid.
489 */
490 void
491 setPaddr(Addr paddr)
492 {
493 _paddr = paddr;
494 privateFlags.set(VALID_PADDR);
495 }
496
497 /**
498 * Generate two requests as if this request had been split into two
499 * pieces. The original request can't have been translated already.
500 */
501 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
502 {
503 assert(privateFlags.isSet(VALID_VADDR));
504 assert(privateFlags.noneSet(VALID_PADDR));
505 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
506 req1 = new Request(*this);
507 req2 = new Request(*this);
508 req1->_size = split_addr - _vaddr;
509 req2->_vaddr = split_addr;
510 req2->_size = _size - req1->_size;
511 }
512
513 /**
514 * Accessor for paddr.
515 */
516 bool
517 hasPaddr() const
518 {
519 return privateFlags.isSet(VALID_PADDR);
520 }
521
522 Addr
523 getPaddr() const
524 {
525 assert(privateFlags.isSet(VALID_PADDR));
526 return _paddr;
527 }
528
529 /**
530 * Time for the TLB/table walker to successfully translate this request.
531 */
532 Tick translateDelta;
533
534 /**
535 * Access latency to complete this memory transaction not including
536 * translation time.
537 */
538 Tick accessDelta;
539
540 /**
541 * Level of the cache hierachy where this request was responded to
542 * (e.g. 0 = L1; 1 = L2).
543 */
544 mutable int depth;
545
546 /**
547 * Accessor for size.
548 */
549 bool
550 hasSize() const
551 {
552 return privateFlags.isSet(VALID_SIZE);
553 }
554
555 unsigned
556 getSize() const
557 {
558 assert(privateFlags.isSet(VALID_SIZE));
559 return _size;
560 }
561
562 /** Accessor for time. */
563 Tick
564 time() const
565 {
566 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
567 return _time;
568 }
569
570 /**
571 * Accessor for atomic-op functor.
572 */
573 bool
574 hasAtomicOpFunctor()
575 {
576 return atomicOpFunctor != NULL;
577 }
578
579 AtomicOpFunctor *
580 getAtomicOpFunctor()
581 {
582 assert(atomicOpFunctor != NULL);
583 return atomicOpFunctor;
584 }
585
544 /** Accessor for flags. */
545 Flags
546 getFlags()
547 {
548 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
549 return _flags;
550 }
551
552 /** Note that unlike other accessors, this function sets *specific
553 flags* (ORs them in); it does not assign its argument to the
554 _flags field. Thus this method should rightly be called
555 setFlags() and not just flags(). */
556 void
557 setFlags(Flags flags)
558 {
559 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
560 _flags.set(flags);
561 }
562
563 void
564 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
565 {
566 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
567 _memSpaceConfigFlags.set(extraFlags);
568 }
569
570 /** Accessor function for vaddr.*/
571 bool
572 hasVaddr() const
573 {
574 return privateFlags.isSet(VALID_VADDR);
575 }
576
577 Addr
578 getVaddr() const
579 {
580 assert(privateFlags.isSet(VALID_VADDR));
581 return _vaddr;
582 }
583
584 /** Accesssor for the requestor id. */
585 MasterID
586 masterId() const
587 {
588 return _masterId;
589 }
590
591 uint32_t
592 taskId() const
593 {
594 return _taskId;
595 }
596
597 void
598 taskId(uint32_t id) {
599 _taskId = id;
600 }
601
602 /** Accessor function for asid.*/
603 int
604 getAsid() const
605 {
606 assert(privateFlags.isSet(VALID_VADDR));
607 return _asid;
608 }
609
610 /** Accessor function for asid.*/
611 void
612 setAsid(int asid)
613 {
614 _asid = asid;
615 }
616
617 /** Accessor function for architecture-specific flags.*/
618 ArchFlagsType
619 getArchFlags() const
620 {
621 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
622 return _flags & ARCH_BITS;
623 }
624
625 /** Accessor function to check if sc result is valid. */
626 bool
627 extraDataValid() const
628 {
629 return privateFlags.isSet(VALID_EXTRA_DATA);
630 }
631
632 /** Accessor function for store conditional return value.*/
633 uint64_t
634 getExtraData() const
635 {
636 assert(privateFlags.isSet(VALID_EXTRA_DATA));
637 return _extraData;
638 }
639
640 /** Accessor function for store conditional return value.*/
641 void
642 setExtraData(uint64_t extraData)
643 {
644 _extraData = extraData;
645 privateFlags.set(VALID_EXTRA_DATA);
646 }
647
648 bool
649 hasContextId() const
650 {
651 return privateFlags.isSet(VALID_CONTEXT_ID);
652 }
653
654 /** Accessor function for context ID.*/
655 ContextID
656 contextId() const
657 {
658 assert(privateFlags.isSet(VALID_CONTEXT_ID));
659 return _contextId;
660 }
661
662 /** Accessor function for thread ID. */
663 ThreadID
664 threadId() const
665 {
666 assert(privateFlags.isSet(VALID_THREAD_ID));
667 return _threadId;
668 }
669
670 void
671 setPC(Addr pc)
672 {
673 privateFlags.set(VALID_PC);
674 _pc = pc;
675 }
676
677 bool
678 hasPC() const
679 {
680 return privateFlags.isSet(VALID_PC);
681 }
682
683 /** Accessor function for pc.*/
684 Addr
685 getPC() const
686 {
687 assert(privateFlags.isSet(VALID_PC));
688 return _pc;
689 }
690
691 /**
692 * Increment/Get the depth at which this request is responded to.
693 * This currently happens when the request misses in any cache level.
694 */
695 void incAccessDepth() const { depth++; }
696 int getAccessDepth() const { return depth; }
697
698 /**
699 * Set/Get the time taken for this request to be successfully translated.
700 */
701 void setTranslateLatency() { translateDelta = curTick() - _time; }
702 Tick getTranslateLatency() const { return translateDelta; }
703
704 /**
705 * Set/Get the time taken to complete this request's access, not including
706 * the time to successfully translate the request.
707 */
708 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
709 Tick getAccessLatency() const { return accessDelta; }
710
711 /**
712 * Accessor for the sequence number of instruction that creates the
713 * request.
714 */
715 bool
716 hasInstSeqNum() const
717 {
718 return privateFlags.isSet(VALID_INST_SEQ_NUM);
719 }
720
721 InstSeqNum
722 getReqInstSeqNum() const
723 {
724 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
725 return _reqInstSeqNum;
726 }
727
728 void
729 setReqInstSeqNum(const InstSeqNum seq_num)
730 {
731 privateFlags.set(VALID_INST_SEQ_NUM);
732 _reqInstSeqNum = seq_num;
733 }
734
735 /** Accessor functions for flags. Note that these are for testing
736 only; setting flags should be done via setFlags(). */
737 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
738 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
739 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
740 bool isPrefetch() const { return _flags.isSet(PREFETCH); }
741 bool isLLSC() const { return _flags.isSet(LLSC); }
742 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
743 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
744 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
745 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
746 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
747 bool isSecure() const { return _flags.isSet(SECURE); }
748 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
749 bool isAcquire() const { return _flags.isSet(ACQUIRE); }
750 bool isRelease() const { return _flags.isSet(RELEASE); }
751 bool isKernel() const { return _flags.isSet(KERNEL); }
586 /** Accessor for flags. */
587 Flags
588 getFlags()
589 {
590 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
591 return _flags;
592 }
593
594 /** Note that unlike other accessors, this function sets *specific
595 flags* (ORs them in); it does not assign its argument to the
596 _flags field. Thus this method should rightly be called
597 setFlags() and not just flags(). */
598 void
599 setFlags(Flags flags)
600 {
601 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
602 _flags.set(flags);
603 }
604
605 void
606 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
607 {
608 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
609 _memSpaceConfigFlags.set(extraFlags);
610 }
611
612 /** Accessor function for vaddr.*/
613 bool
614 hasVaddr() const
615 {
616 return privateFlags.isSet(VALID_VADDR);
617 }
618
619 Addr
620 getVaddr() const
621 {
622 assert(privateFlags.isSet(VALID_VADDR));
623 return _vaddr;
624 }
625
626 /** Accesssor for the requestor id. */
627 MasterID
628 masterId() const
629 {
630 return _masterId;
631 }
632
633 uint32_t
634 taskId() const
635 {
636 return _taskId;
637 }
638
639 void
640 taskId(uint32_t id) {
641 _taskId = id;
642 }
643
644 /** Accessor function for asid.*/
645 int
646 getAsid() const
647 {
648 assert(privateFlags.isSet(VALID_VADDR));
649 return _asid;
650 }
651
652 /** Accessor function for asid.*/
653 void
654 setAsid(int asid)
655 {
656 _asid = asid;
657 }
658
659 /** Accessor function for architecture-specific flags.*/
660 ArchFlagsType
661 getArchFlags() const
662 {
663 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
664 return _flags & ARCH_BITS;
665 }
666
667 /** Accessor function to check if sc result is valid. */
668 bool
669 extraDataValid() const
670 {
671 return privateFlags.isSet(VALID_EXTRA_DATA);
672 }
673
674 /** Accessor function for store conditional return value.*/
675 uint64_t
676 getExtraData() const
677 {
678 assert(privateFlags.isSet(VALID_EXTRA_DATA));
679 return _extraData;
680 }
681
682 /** Accessor function for store conditional return value.*/
683 void
684 setExtraData(uint64_t extraData)
685 {
686 _extraData = extraData;
687 privateFlags.set(VALID_EXTRA_DATA);
688 }
689
690 bool
691 hasContextId() const
692 {
693 return privateFlags.isSet(VALID_CONTEXT_ID);
694 }
695
696 /** Accessor function for context ID.*/
697 ContextID
698 contextId() const
699 {
700 assert(privateFlags.isSet(VALID_CONTEXT_ID));
701 return _contextId;
702 }
703
704 /** Accessor function for thread ID. */
705 ThreadID
706 threadId() const
707 {
708 assert(privateFlags.isSet(VALID_THREAD_ID));
709 return _threadId;
710 }
711
712 void
713 setPC(Addr pc)
714 {
715 privateFlags.set(VALID_PC);
716 _pc = pc;
717 }
718
719 bool
720 hasPC() const
721 {
722 return privateFlags.isSet(VALID_PC);
723 }
724
725 /** Accessor function for pc.*/
726 Addr
727 getPC() const
728 {
729 assert(privateFlags.isSet(VALID_PC));
730 return _pc;
731 }
732
733 /**
734 * Increment/Get the depth at which this request is responded to.
735 * This currently happens when the request misses in any cache level.
736 */
737 void incAccessDepth() const { depth++; }
738 int getAccessDepth() const { return depth; }
739
740 /**
741 * Set/Get the time taken for this request to be successfully translated.
742 */
743 void setTranslateLatency() { translateDelta = curTick() - _time; }
744 Tick getTranslateLatency() const { return translateDelta; }
745
746 /**
747 * Set/Get the time taken to complete this request's access, not including
748 * the time to successfully translate the request.
749 */
750 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
751 Tick getAccessLatency() const { return accessDelta; }
752
753 /**
754 * Accessor for the sequence number of instruction that creates the
755 * request.
756 */
757 bool
758 hasInstSeqNum() const
759 {
760 return privateFlags.isSet(VALID_INST_SEQ_NUM);
761 }
762
763 InstSeqNum
764 getReqInstSeqNum() const
765 {
766 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
767 return _reqInstSeqNum;
768 }
769
770 void
771 setReqInstSeqNum(const InstSeqNum seq_num)
772 {
773 privateFlags.set(VALID_INST_SEQ_NUM);
774 _reqInstSeqNum = seq_num;
775 }
776
777 /** Accessor functions for flags. Note that these are for testing
778 only; setting flags should be done via setFlags(). */
779 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
780 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
781 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
782 bool isPrefetch() const { return _flags.isSet(PREFETCH); }
783 bool isLLSC() const { return _flags.isSet(LLSC); }
784 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
785 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
786 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
787 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
788 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
789 bool isSecure() const { return _flags.isSet(SECURE); }
790 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
791 bool isAcquire() const { return _flags.isSet(ACQUIRE); }
792 bool isRelease() const { return _flags.isSet(RELEASE); }
793 bool isKernel() const { return _flags.isSet(KERNEL); }
794 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
795 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
752
796
797 bool
798 isAtomic() const
799 {
800 return _flags.isSet(ATOMIC_RETURN_OP) ||
801 _flags.isSet(ATOMIC_NO_RETURN_OP);
802 }
803
753 /**
754 * Accessor functions for the memory space configuration flags and used by
755 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
756 * these are for testing only; setting extraFlags should be done via
757 * setMemSpaceConfigFlags().
758 */
759 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
760
761 bool
762 isWavefrontScope() const
763 {
764 assert(isScoped());
765 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
766 }
767
768 bool
769 isWorkgroupScope() const
770 {
771 assert(isScoped());
772 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
773 }
774
775 bool
776 isDeviceScope() const
777 {
778 assert(isScoped());
779 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
780 }
781
782 bool
783 isSystemScope() const
784 {
785 assert(isScoped());
786 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
787 }
788
789 bool
790 isGlobalSegment() const
791 {
792 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
793 (!isGroupSegment() && !isPrivateSegment() &&
794 !isKernargSegment() && !isReadonlySegment() &&
795 !isSpillSegment() && !isArgSegment());
796 }
797
798 bool
799 isGroupSegment() const
800 {
801 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
802 }
803
804 bool
805 isPrivateSegment() const
806 {
807 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
808 }
809
810 bool
811 isKernargSegment() const
812 {
813 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
814 }
815
816 bool
817 isReadonlySegment() const
818 {
819 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
820 }
821
822 bool
823 isSpillSegment() const
824 {
825 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
826 }
827
828 bool
829 isArgSegment() const
830 {
831 return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
832 }
833};
834
835#endif // __MEM_REQUEST_HH__
804 /**
805 * Accessor functions for the memory space configuration flags and used by
806 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
807 * these are for testing only; setting extraFlags should be done via
808 * setMemSpaceConfigFlags().
809 */
810 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
811
812 bool
813 isWavefrontScope() const
814 {
815 assert(isScoped());
816 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
817 }
818
819 bool
820 isWorkgroupScope() const
821 {
822 assert(isScoped());
823 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
824 }
825
826 bool
827 isDeviceScope() const
828 {
829 assert(isScoped());
830 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
831 }
832
833 bool
834 isSystemScope() const
835 {
836 assert(isScoped());
837 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
838 }
839
840 bool
841 isGlobalSegment() const
842 {
843 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
844 (!isGroupSegment() && !isPrivateSegment() &&
845 !isKernargSegment() && !isReadonlySegment() &&
846 !isSpillSegment() && !isArgSegment());
847 }
848
849 bool
850 isGroupSegment() const
851 {
852 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
853 }
854
855 bool
856 isPrivateSegment() const
857 {
858 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
859 }
860
861 bool
862 isKernargSegment() const
863 {
864 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
865 }
866
867 bool
868 isReadonlySegment() const
869 {
870 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
871 }
872
873 bool
874 isSpillSegment() const
875 {
876 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
877 }
878
879 bool
880 isArgSegment() const
881 {
882 return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
883 }
884};
885
886#endif // __MEM_REQUEST_HH__