request.hh (12749:223c83ed9979) request.hh (12766:1c347e60c7fd)
1/*
2 * Copyright (c) 2012-2013,2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 */
45
46/**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52#ifndef __MEM_REQUEST_HH__
53#define __MEM_REQUEST_HH__
54
55#include <cassert>
56#include <climits>
57
58#include "base/flags.hh"
59#include "base/logging.hh"
60#include "base/types.hh"
61#include "cpu/inst_seq.hh"
62#include "sim/core.hh"
63
64/**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72namespace ContextSwitchTaskId {
73 enum TaskId {
74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76 DMA = 1023, /* Mostly Table Walker */
77 Unknown = 1024,
78 NumTaskId
79 };
80}
81
82class Request;
83
84typedef std::shared_ptr<Request> RequestPtr;
85typedef uint16_t MasterID;
86
87class Request
88{
89 public:
90 typedef uint64_t FlagsType;
91 typedef uint8_t ArchFlagsType;
92 typedef ::Flags<FlagsType> Flags;
93
94 enum : FlagsType {
95 /**
96 * Architecture specific flags.
97 *
98 * These bits int the flag field are reserved for
99 * architecture-specific code. For example, SPARC uses them to
100 * represent ASIs.
101 */
102 ARCH_BITS = 0x000000FF,
103 /** The request was an instruction fetch. */
104 INST_FETCH = 0x00000100,
105 /** The virtual address is also the physical address. */
106 PHYSICAL = 0x00000200,
107 /**
108 * The request is to an uncacheable address.
109 *
110 * @note Uncacheable accesses may be reordered by CPU models. The
111 * STRICT_ORDER flag should be set if such reordering is
112 * undesirable.
113 */
114 UNCACHEABLE = 0x00000400,
115 /**
116 * The request is required to be strictly ordered by <i>CPU
117 * models</i> and is non-speculative.
118 *
119 * A strictly ordered request is guaranteed to never be
120 * re-ordered or executed speculatively by a CPU model. The
121 * memory system may still reorder requests in caches unless
122 * the UNCACHEABLE flag is set as well.
123 */
124 STRICT_ORDER = 0x00000800,
125 /** This request is to a memory mapped register. */
126 MMAPPED_IPR = 0x00002000,
127 /** This request is made in privileged mode. */
128 PRIVILEGED = 0x00008000,
129
130 /**
131 * This is a write that is targeted and zeroing an entire
132 * cache block. There is no need for a read/modify/write
133 */
134 CACHE_BLOCK_ZERO = 0x00010000,
135
136 /** The request should not cause a memory access. */
137 NO_ACCESS = 0x00080000,
138 /**
139 * This request will lock or unlock the accessed memory. When
140 * used with a load, the access locks the particular chunk of
141 * memory. When used with a store, it unlocks. The rule is
142 * that locked accesses have to be made up of a locked load,
143 * some operation on the data, and then a locked store.
144 */
145 LOCKED_RMW = 0x00100000,
146 /** The request is a Load locked/store conditional. */
147 LLSC = 0x00200000,
148 /** This request is for a memory swap. */
149 MEM_SWAP = 0x00400000,
150 MEM_SWAP_COND = 0x00800000,
151
152 /** The request is a prefetch. */
153 PREFETCH = 0x01000000,
154 /** The request should be prefetched into the exclusive state. */
155 PF_EXCLUSIVE = 0x02000000,
156 /** The request should be marked as LRU. */
157 EVICT_NEXT = 0x04000000,
158 /** The request should be marked with ACQUIRE. */
159 ACQUIRE = 0x00020000,
160 /** The request should be marked with RELEASE. */
161 RELEASE = 0x00040000,
162
163 /** The request is an atomic that returns data. */
164 ATOMIC_RETURN_OP = 0x40000000,
165 /** The request is an atomic that does not return data. */
166 ATOMIC_NO_RETURN_OP = 0x80000000,
167
168 /** The request should be marked with KERNEL.
169 * Used to indicate the synchronization associated with a GPU kernel
170 * launch or completion.
171 */
172 KERNEL = 0x00001000,
173
174 /**
175 * The request should be handled by the generic IPR code (only
176 * valid together with MMAPPED_IPR)
177 */
178 GENERIC_IPR = 0x08000000,
179
180 /** The request targets the secure memory space. */
181 SECURE = 0x10000000,
182 /** The request is a page table walk */
183 PT_WALK = 0x20000000,
184
185 /** The request invalidates a memory location */
186 INVALIDATE = 0x0000000100000000,
187 /** The request cleans a memory location */
188 CLEAN = 0x0000000200000000,
189
190 /** The request targets the point of unification */
191 DST_POU = 0x0000001000000000,
192
193 /** The request targets the point of coherence */
194 DST_POC = 0x0000002000000000,
195
196 /** Bits to define the destination of a request */
197 DST_BITS = 0x0000003000000000,
198
199 /**
200 * These flags are *not* cleared when a Request object is
201 * reused (assigned a new address).
202 */
203 STICKY_FLAGS = INST_FETCH
204 };
205 static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
206 CLEAN | INVALIDATE;
207
208 /** Master Ids that are statically allocated
209 * @{*/
210 enum : MasterID {
211 /** This master id is used for writeback requests by the caches */
212 wbMasterId = 0,
213 /**
214 * This master id is used for functional requests that
215 * don't come from a particular device
216 */
217 funcMasterId = 1,
218 /** This master id is used for message signaled interrupts */
219 intMasterId = 2,
220 /**
221 * Invalid master id for assertion checking only. It is
222 * invalid behavior to ever send this id as part of a request.
223 */
224 invldMasterId = std::numeric_limits<MasterID>::max()
225 };
226 /** @} */
227
228 typedef uint32_t MemSpaceConfigFlagsType;
229 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
230
231 enum : MemSpaceConfigFlagsType {
232 /** Has a synchronization scope been set? */
233 SCOPE_VALID = 0x00000001,
234 /** Access has Wavefront scope visibility */
235 WAVEFRONT_SCOPE = 0x00000002,
236 /** Access has Workgroup scope visibility */
237 WORKGROUP_SCOPE = 0x00000004,
238 /** Access has Device (e.g., GPU) scope visibility */
239 DEVICE_SCOPE = 0x00000008,
240 /** Access has System (e.g., CPU + GPU) scope visibility */
241 SYSTEM_SCOPE = 0x00000010,
242
243 /** Global Segment */
244 GLOBAL_SEGMENT = 0x00000020,
245 /** Group Segment */
246 GROUP_SEGMENT = 0x00000040,
247 /** Private Segment */
248 PRIVATE_SEGMENT = 0x00000080,
249 /** Kergarg Segment */
250 KERNARG_SEGMENT = 0x00000100,
251 /** Readonly Segment */
252 READONLY_SEGMENT = 0x00000200,
253 /** Spill Segment */
254 SPILL_SEGMENT = 0x00000400,
255 /** Arg Segment */
256 ARG_SEGMENT = 0x00000800,
257 };
258
259 private:
260 typedef uint8_t PrivateFlagsType;
261 typedef ::Flags<PrivateFlagsType> PrivateFlags;
262
263 enum : PrivateFlagsType {
264 /** Whether or not the size is valid. */
265 VALID_SIZE = 0x00000001,
266 /** Whether or not paddr is valid (has been written yet). */
267 VALID_PADDR = 0x00000002,
268 /** Whether or not the vaddr & asid are valid. */
269 VALID_VADDR = 0x00000004,
270 /** Whether or not the instruction sequence number is valid. */
271 VALID_INST_SEQ_NUM = 0x00000008,
272 /** Whether or not the pc is valid. */
273 VALID_PC = 0x00000010,
274 /** Whether or not the context ID is valid. */
275 VALID_CONTEXT_ID = 0x00000020,
276 /** Whether or not the sc result is valid. */
277 VALID_EXTRA_DATA = 0x00000080,
278 /**
279 * These flags are *not* cleared when a Request object is reused
280 * (assigned a new address).
281 */
282 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
283 };
284
285 private:
286
287 /**
288 * Set up a physical (e.g. device) request in a previously
289 * allocated Request object.
290 */
291 void
292 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
293 {
294 _paddr = paddr;
295 _size = size;
296 _time = time;
297 _masterId = mid;
298 _flags.clear(~STICKY_FLAGS);
299 _flags.set(flags);
300 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
301 privateFlags.set(VALID_PADDR|VALID_SIZE);
302 depth = 0;
303 accessDelta = 0;
304 //translateDelta = 0;
305 }
306
307 /**
308 * The physical address of the request. Valid only if validPaddr
309 * is set.
310 */
311 Addr _paddr;
312
313 /**
314 * The size of the request. This field must be set when vaddr or
315 * paddr is written via setVirt() or setPhys(), so it is always
316 * valid as long as one of the address fields is valid.
317 */
318 unsigned _size;
319
320 /** The requestor ID which is unique in the system for all ports
321 * that are capable of issuing a transaction
322 */
323 MasterID _masterId;
324
325 /** Flag structure for the request. */
326 Flags _flags;
327
328 /** Memory space configuraiton flag structure for the request. */
329 MemSpaceConfigFlags _memSpaceConfigFlags;
330
331 /** Private flags for field validity checking. */
332 PrivateFlags privateFlags;
333
334 /**
335 * The time this request was started. Used to calculate
336 * latencies. This field is set to curTick() any time paddr or vaddr
337 * is written.
338 */
339 Tick _time;
340
341 /**
342 * The task id associated with this request
343 */
344 uint32_t _taskId;
345
346 /** The address space ID. */
347 int _asid;
348
349 /** The virtual address of the request. */
350 Addr _vaddr;
351
352 /**
353 * Extra data for the request, such as the return value of
354 * store conditional or the compare value for a CAS. */
355 uint64_t _extraData;
356
357 /** The context ID (for statistics, locks, and wakeups). */
358 ContextID _contextId;
359
360 /** program counter of initiating access; for tracing/debugging */
361 Addr _pc;
362
363 /** Sequence number of the instruction that creates the request */
364 InstSeqNum _reqInstSeqNum;
365
366 /** A pointer to an atomic operation */
367 AtomicOpFunctor *atomicOpFunctor;
368
369 public:
370
371 /**
372 * Minimal constructor. No fields are initialized. (Note that
373 * _flags and privateFlags are cleared by Flags default
374 * constructor.)
375 */
376 Request()
377 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
378 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
379 _extraData(0), _contextId(0), _pc(0),
380 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
381 accessDelta(0), depth(0)
382 {}
383
384 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
385 InstSeqNum seq_num, ContextID cid)
386 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
387 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
388 _extraData(0), _contextId(0), _pc(0),
389 _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
390 accessDelta(0), depth(0)
391 {
392 setPhys(paddr, size, flags, mid, curTick());
393 setContext(cid);
394 privateFlags.set(VALID_INST_SEQ_NUM);
395 }
396
397 /**
398 * Constructor for physical (e.g. device) requests. Initializes
399 * just physical address, size, flags, and timestamp (to curTick()).
400 * These fields are adequate to perform a request.
401 */
402 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
403 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
404 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
405 _extraData(0), _contextId(0), _pc(0),
406 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
407 accessDelta(0), depth(0)
408 {
409 setPhys(paddr, size, flags, mid, curTick());
410 }
411
412 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
413 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
414 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
415 _extraData(0), _contextId(0), _pc(0),
416 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
417 accessDelta(0), depth(0)
418 {
419 setPhys(paddr, size, flags, mid, time);
420 }
421
422 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
423 Addr pc)
424 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
425 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
426 _extraData(0), _contextId(0), _pc(pc),
427 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
428 accessDelta(0), depth(0)
429 {
430 setPhys(paddr, size, flags, mid, time);
431 privateFlags.set(VALID_PC);
432 }
433
434 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
435 Addr pc, ContextID cid)
436 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
437 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
438 _extraData(0), _contextId(0), _pc(0),
439 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
440 accessDelta(0), depth(0)
441 {
442 setVirt(asid, vaddr, size, flags, mid, pc);
443 setContext(cid);
444 }
445
446 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
447 Addr pc, ContextID cid, AtomicOpFunctor *atomic_op)
1/*
2 * Copyright (c) 2012-2013,2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 */
45
46/**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52#ifndef __MEM_REQUEST_HH__
53#define __MEM_REQUEST_HH__
54
55#include <cassert>
56#include <climits>
57
58#include "base/flags.hh"
59#include "base/logging.hh"
60#include "base/types.hh"
61#include "cpu/inst_seq.hh"
62#include "sim/core.hh"
63
64/**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72namespace ContextSwitchTaskId {
73 enum TaskId {
74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76 DMA = 1023, /* Mostly Table Walker */
77 Unknown = 1024,
78 NumTaskId
79 };
80}
81
82class Request;
83
84typedef std::shared_ptr<Request> RequestPtr;
85typedef uint16_t MasterID;
86
87class Request
88{
89 public:
90 typedef uint64_t FlagsType;
91 typedef uint8_t ArchFlagsType;
92 typedef ::Flags<FlagsType> Flags;
93
94 enum : FlagsType {
95 /**
96 * Architecture specific flags.
97 *
98 * These bits int the flag field are reserved for
99 * architecture-specific code. For example, SPARC uses them to
100 * represent ASIs.
101 */
102 ARCH_BITS = 0x000000FF,
103 /** The request was an instruction fetch. */
104 INST_FETCH = 0x00000100,
105 /** The virtual address is also the physical address. */
106 PHYSICAL = 0x00000200,
107 /**
108 * The request is to an uncacheable address.
109 *
110 * @note Uncacheable accesses may be reordered by CPU models. The
111 * STRICT_ORDER flag should be set if such reordering is
112 * undesirable.
113 */
114 UNCACHEABLE = 0x00000400,
115 /**
116 * The request is required to be strictly ordered by <i>CPU
117 * models</i> and is non-speculative.
118 *
119 * A strictly ordered request is guaranteed to never be
120 * re-ordered or executed speculatively by a CPU model. The
121 * memory system may still reorder requests in caches unless
122 * the UNCACHEABLE flag is set as well.
123 */
124 STRICT_ORDER = 0x00000800,
125 /** This request is to a memory mapped register. */
126 MMAPPED_IPR = 0x00002000,
127 /** This request is made in privileged mode. */
128 PRIVILEGED = 0x00008000,
129
130 /**
131 * This is a write that is targeted and zeroing an entire
132 * cache block. There is no need for a read/modify/write
133 */
134 CACHE_BLOCK_ZERO = 0x00010000,
135
136 /** The request should not cause a memory access. */
137 NO_ACCESS = 0x00080000,
138 /**
139 * This request will lock or unlock the accessed memory. When
140 * used with a load, the access locks the particular chunk of
141 * memory. When used with a store, it unlocks. The rule is
142 * that locked accesses have to be made up of a locked load,
143 * some operation on the data, and then a locked store.
144 */
145 LOCKED_RMW = 0x00100000,
146 /** The request is a Load locked/store conditional. */
147 LLSC = 0x00200000,
148 /** This request is for a memory swap. */
149 MEM_SWAP = 0x00400000,
150 MEM_SWAP_COND = 0x00800000,
151
152 /** The request is a prefetch. */
153 PREFETCH = 0x01000000,
154 /** The request should be prefetched into the exclusive state. */
155 PF_EXCLUSIVE = 0x02000000,
156 /** The request should be marked as LRU. */
157 EVICT_NEXT = 0x04000000,
158 /** The request should be marked with ACQUIRE. */
159 ACQUIRE = 0x00020000,
160 /** The request should be marked with RELEASE. */
161 RELEASE = 0x00040000,
162
163 /** The request is an atomic that returns data. */
164 ATOMIC_RETURN_OP = 0x40000000,
165 /** The request is an atomic that does not return data. */
166 ATOMIC_NO_RETURN_OP = 0x80000000,
167
168 /** The request should be marked with KERNEL.
169 * Used to indicate the synchronization associated with a GPU kernel
170 * launch or completion.
171 */
172 KERNEL = 0x00001000,
173
174 /**
175 * The request should be handled by the generic IPR code (only
176 * valid together with MMAPPED_IPR)
177 */
178 GENERIC_IPR = 0x08000000,
179
180 /** The request targets the secure memory space. */
181 SECURE = 0x10000000,
182 /** The request is a page table walk */
183 PT_WALK = 0x20000000,
184
185 /** The request invalidates a memory location */
186 INVALIDATE = 0x0000000100000000,
187 /** The request cleans a memory location */
188 CLEAN = 0x0000000200000000,
189
190 /** The request targets the point of unification */
191 DST_POU = 0x0000001000000000,
192
193 /** The request targets the point of coherence */
194 DST_POC = 0x0000002000000000,
195
196 /** Bits to define the destination of a request */
197 DST_BITS = 0x0000003000000000,
198
199 /**
200 * These flags are *not* cleared when a Request object is
201 * reused (assigned a new address).
202 */
203 STICKY_FLAGS = INST_FETCH
204 };
205 static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
206 CLEAN | INVALIDATE;
207
208 /** Master Ids that are statically allocated
209 * @{*/
210 enum : MasterID {
211 /** This master id is used for writeback requests by the caches */
212 wbMasterId = 0,
213 /**
214 * This master id is used for functional requests that
215 * don't come from a particular device
216 */
217 funcMasterId = 1,
218 /** This master id is used for message signaled interrupts */
219 intMasterId = 2,
220 /**
221 * Invalid master id for assertion checking only. It is
222 * invalid behavior to ever send this id as part of a request.
223 */
224 invldMasterId = std::numeric_limits<MasterID>::max()
225 };
226 /** @} */
227
228 typedef uint32_t MemSpaceConfigFlagsType;
229 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
230
231 enum : MemSpaceConfigFlagsType {
232 /** Has a synchronization scope been set? */
233 SCOPE_VALID = 0x00000001,
234 /** Access has Wavefront scope visibility */
235 WAVEFRONT_SCOPE = 0x00000002,
236 /** Access has Workgroup scope visibility */
237 WORKGROUP_SCOPE = 0x00000004,
238 /** Access has Device (e.g., GPU) scope visibility */
239 DEVICE_SCOPE = 0x00000008,
240 /** Access has System (e.g., CPU + GPU) scope visibility */
241 SYSTEM_SCOPE = 0x00000010,
242
243 /** Global Segment */
244 GLOBAL_SEGMENT = 0x00000020,
245 /** Group Segment */
246 GROUP_SEGMENT = 0x00000040,
247 /** Private Segment */
248 PRIVATE_SEGMENT = 0x00000080,
249 /** Kergarg Segment */
250 KERNARG_SEGMENT = 0x00000100,
251 /** Readonly Segment */
252 READONLY_SEGMENT = 0x00000200,
253 /** Spill Segment */
254 SPILL_SEGMENT = 0x00000400,
255 /** Arg Segment */
256 ARG_SEGMENT = 0x00000800,
257 };
258
259 private:
260 typedef uint8_t PrivateFlagsType;
261 typedef ::Flags<PrivateFlagsType> PrivateFlags;
262
263 enum : PrivateFlagsType {
264 /** Whether or not the size is valid. */
265 VALID_SIZE = 0x00000001,
266 /** Whether or not paddr is valid (has been written yet). */
267 VALID_PADDR = 0x00000002,
268 /** Whether or not the vaddr & asid are valid. */
269 VALID_VADDR = 0x00000004,
270 /** Whether or not the instruction sequence number is valid. */
271 VALID_INST_SEQ_NUM = 0x00000008,
272 /** Whether or not the pc is valid. */
273 VALID_PC = 0x00000010,
274 /** Whether or not the context ID is valid. */
275 VALID_CONTEXT_ID = 0x00000020,
276 /** Whether or not the sc result is valid. */
277 VALID_EXTRA_DATA = 0x00000080,
278 /**
279 * These flags are *not* cleared when a Request object is reused
280 * (assigned a new address).
281 */
282 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
283 };
284
285 private:
286
287 /**
288 * Set up a physical (e.g. device) request in a previously
289 * allocated Request object.
290 */
291 void
292 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
293 {
294 _paddr = paddr;
295 _size = size;
296 _time = time;
297 _masterId = mid;
298 _flags.clear(~STICKY_FLAGS);
299 _flags.set(flags);
300 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
301 privateFlags.set(VALID_PADDR|VALID_SIZE);
302 depth = 0;
303 accessDelta = 0;
304 //translateDelta = 0;
305 }
306
307 /**
308 * The physical address of the request. Valid only if validPaddr
309 * is set.
310 */
311 Addr _paddr;
312
313 /**
314 * The size of the request. This field must be set when vaddr or
315 * paddr is written via setVirt() or setPhys(), so it is always
316 * valid as long as one of the address fields is valid.
317 */
318 unsigned _size;
319
320 /** The requestor ID which is unique in the system for all ports
321 * that are capable of issuing a transaction
322 */
323 MasterID _masterId;
324
325 /** Flag structure for the request. */
326 Flags _flags;
327
328 /** Memory space configuraiton flag structure for the request. */
329 MemSpaceConfigFlags _memSpaceConfigFlags;
330
331 /** Private flags for field validity checking. */
332 PrivateFlags privateFlags;
333
334 /**
335 * The time this request was started. Used to calculate
336 * latencies. This field is set to curTick() any time paddr or vaddr
337 * is written.
338 */
339 Tick _time;
340
341 /**
342 * The task id associated with this request
343 */
344 uint32_t _taskId;
345
346 /** The address space ID. */
347 int _asid;
348
349 /** The virtual address of the request. */
350 Addr _vaddr;
351
352 /**
353 * Extra data for the request, such as the return value of
354 * store conditional or the compare value for a CAS. */
355 uint64_t _extraData;
356
357 /** The context ID (for statistics, locks, and wakeups). */
358 ContextID _contextId;
359
360 /** program counter of initiating access; for tracing/debugging */
361 Addr _pc;
362
363 /** Sequence number of the instruction that creates the request */
364 InstSeqNum _reqInstSeqNum;
365
366 /** A pointer to an atomic operation */
367 AtomicOpFunctor *atomicOpFunctor;
368
369 public:
370
371 /**
372 * Minimal constructor. No fields are initialized. (Note that
373 * _flags and privateFlags are cleared by Flags default
374 * constructor.)
375 */
376 Request()
377 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
378 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
379 _extraData(0), _contextId(0), _pc(0),
380 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
381 accessDelta(0), depth(0)
382 {}
383
384 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
385 InstSeqNum seq_num, ContextID cid)
386 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
387 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
388 _extraData(0), _contextId(0), _pc(0),
389 _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
390 accessDelta(0), depth(0)
391 {
392 setPhys(paddr, size, flags, mid, curTick());
393 setContext(cid);
394 privateFlags.set(VALID_INST_SEQ_NUM);
395 }
396
397 /**
398 * Constructor for physical (e.g. device) requests. Initializes
399 * just physical address, size, flags, and timestamp (to curTick()).
400 * These fields are adequate to perform a request.
401 */
402 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
403 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
404 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
405 _extraData(0), _contextId(0), _pc(0),
406 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
407 accessDelta(0), depth(0)
408 {
409 setPhys(paddr, size, flags, mid, curTick());
410 }
411
412 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
413 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
414 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
415 _extraData(0), _contextId(0), _pc(0),
416 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
417 accessDelta(0), depth(0)
418 {
419 setPhys(paddr, size, flags, mid, time);
420 }
421
422 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
423 Addr pc)
424 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
425 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
426 _extraData(0), _contextId(0), _pc(pc),
427 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
428 accessDelta(0), depth(0)
429 {
430 setPhys(paddr, size, flags, mid, time);
431 privateFlags.set(VALID_PC);
432 }
433
434 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
435 Addr pc, ContextID cid)
436 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
437 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
438 _extraData(0), _contextId(0), _pc(0),
439 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
440 accessDelta(0), depth(0)
441 {
442 setVirt(asid, vaddr, size, flags, mid, pc);
443 setContext(cid);
444 }
445
446 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
447 Addr pc, ContextID cid, AtomicOpFunctor *atomic_op)
448 : atomicOpFunctor(atomic_op)
449 {
448 {
450 setVirt(asid, vaddr, size, flags, mid, pc);
449 setVirt(asid, vaddr, size, flags, mid, pc, atomic_op);
451 setContext(cid);
452 }
453
450 setContext(cid);
451 }
452
453 Request(const Request& other)
454 : _paddr(other._paddr), _size(other._size),
455 _masterId(other._masterId),
456 _flags(other._flags),
457 _memSpaceConfigFlags(other._memSpaceConfigFlags),
458 privateFlags(other.privateFlags),
459 _time(other._time),
460 _taskId(other._taskId), _asid(other._asid), _vaddr(other._vaddr),
461 _extraData(other._extraData), _contextId(other._contextId),
462 _pc(other._pc), _reqInstSeqNum(other._reqInstSeqNum),
463 translateDelta(other.translateDelta),
464 accessDelta(other.accessDelta), depth(other.depth)
465 {
466 if (other.atomicOpFunctor)
467 atomicOpFunctor = (other.atomicOpFunctor)->clone();
468 else
469 atomicOpFunctor = nullptr;
470 }
471
454 ~Request()
455 {
456 if (hasAtomicOpFunctor()) {
457 delete atomicOpFunctor;
458 }
459 }
460
461 /**
462 * Set up Context numbers.
463 */
464 void
465 setContext(ContextID context_id)
466 {
467 _contextId = context_id;
468 privateFlags.set(VALID_CONTEXT_ID);
469 }
470
471 /**
472 * Set up a virtual (e.g., CPU) request in a previously
473 * allocated Request object.
474 */
475 void
476 setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
472 ~Request()
473 {
474 if (hasAtomicOpFunctor()) {
475 delete atomicOpFunctor;
476 }
477 }
478
479 /**
480 * Set up Context numbers.
481 */
482 void
483 setContext(ContextID context_id)
484 {
485 _contextId = context_id;
486 privateFlags.set(VALID_CONTEXT_ID);
487 }
488
489 /**
490 * Set up a virtual (e.g., CPU) request in a previously
491 * allocated Request object.
492 */
493 void
494 setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
477 Addr pc)
495 Addr pc, AtomicOpFunctor *amo_op = nullptr)
478 {
479 _asid = asid;
480 _vaddr = vaddr;
481 _size = size;
482 _masterId = mid;
483 _pc = pc;
484 _time = curTick();
485
486 _flags.clear(~STICKY_FLAGS);
487 _flags.set(flags);
488 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
489 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
490 depth = 0;
491 accessDelta = 0;
492 translateDelta = 0;
496 {
497 _asid = asid;
498 _vaddr = vaddr;
499 _size = size;
500 _masterId = mid;
501 _pc = pc;
502 _time = curTick();
503
504 _flags.clear(~STICKY_FLAGS);
505 _flags.set(flags);
506 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
507 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
508 depth = 0;
509 accessDelta = 0;
510 translateDelta = 0;
511 atomicOpFunctor = amo_op;
493 }
494
495 /**
496 * Set just the physical address. This usually used to record the
497 * result of a translation. However, when using virtualized CPUs
498 * setPhys() is sometimes called to finalize a physical address
499 * without a virtual address, so we can't check if the virtual
500 * address is valid.
501 */
502 void
503 setPaddr(Addr paddr)
504 {
505 _paddr = paddr;
506 privateFlags.set(VALID_PADDR);
507 }
508
509 /**
510 * Generate two requests as if this request had been split into two
511 * pieces. The original request can't have been translated already.
512 */
513 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
514 {
515 assert(privateFlags.isSet(VALID_VADDR));
516 assert(privateFlags.noneSet(VALID_PADDR));
517 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
518 req1 = std::make_shared<Request>(*this);
519 req2 = std::make_shared<Request>(*this);
520 req1->_size = split_addr - _vaddr;
521 req2->_vaddr = split_addr;
522 req2->_size = _size - req1->_size;
523 }
524
525 /**
526 * Accessor for paddr.
527 */
528 bool
529 hasPaddr() const
530 {
531 return privateFlags.isSet(VALID_PADDR);
532 }
533
534 Addr
535 getPaddr() const
536 {
537 assert(privateFlags.isSet(VALID_PADDR));
538 return _paddr;
539 }
540
541 /**
542 * Time for the TLB/table walker to successfully translate this request.
543 */
544 Tick translateDelta;
545
546 /**
547 * Access latency to complete this memory transaction not including
548 * translation time.
549 */
550 Tick accessDelta;
551
552 /**
553 * Level of the cache hierachy where this request was responded to
554 * (e.g. 0 = L1; 1 = L2).
555 */
556 mutable int depth;
557
558 /**
559 * Accessor for size.
560 */
561 bool
562 hasSize() const
563 {
564 return privateFlags.isSet(VALID_SIZE);
565 }
566
567 unsigned
568 getSize() const
569 {
570 assert(privateFlags.isSet(VALID_SIZE));
571 return _size;
572 }
573
574 /** Accessor for time. */
575 Tick
576 time() const
577 {
578 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
579 return _time;
580 }
581
582 /**
583 * Accessor for atomic-op functor.
584 */
585 bool
586 hasAtomicOpFunctor()
587 {
588 return atomicOpFunctor != NULL;
589 }
590
591 AtomicOpFunctor *
592 getAtomicOpFunctor()
593 {
594 assert(atomicOpFunctor != NULL);
595 return atomicOpFunctor;
596 }
597
598 /** Accessor for flags. */
599 Flags
600 getFlags()
601 {
602 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
603 return _flags;
604 }
605
606 /** Note that unlike other accessors, this function sets *specific
607 flags* (ORs them in); it does not assign its argument to the
608 _flags field. Thus this method should rightly be called
609 setFlags() and not just flags(). */
610 void
611 setFlags(Flags flags)
612 {
613 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
614 _flags.set(flags);
615 }
616
617 void
618 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
619 {
620 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
621 _memSpaceConfigFlags.set(extraFlags);
622 }
623
624 /** Accessor function for vaddr.*/
625 bool
626 hasVaddr() const
627 {
628 return privateFlags.isSet(VALID_VADDR);
629 }
630
631 Addr
632 getVaddr() const
633 {
634 assert(privateFlags.isSet(VALID_VADDR));
635 return _vaddr;
636 }
637
638 /** Accesssor for the requestor id. */
639 MasterID
640 masterId() const
641 {
642 return _masterId;
643 }
644
645 uint32_t
646 taskId() const
647 {
648 return _taskId;
649 }
650
651 void
652 taskId(uint32_t id) {
653 _taskId = id;
654 }
655
656 /** Accessor function for asid.*/
657 int
658 getAsid() const
659 {
660 assert(privateFlags.isSet(VALID_VADDR));
661 return _asid;
662 }
663
664 /** Accessor function for asid.*/
665 void
666 setAsid(int asid)
667 {
668 _asid = asid;
669 }
670
671 /** Accessor function for architecture-specific flags.*/
672 ArchFlagsType
673 getArchFlags() const
674 {
675 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
676 return _flags & ARCH_BITS;
677 }
678
679 /** Accessor function to check if sc result is valid. */
680 bool
681 extraDataValid() const
682 {
683 return privateFlags.isSet(VALID_EXTRA_DATA);
684 }
685
686 /** Accessor function for store conditional return value.*/
687 uint64_t
688 getExtraData() const
689 {
690 assert(privateFlags.isSet(VALID_EXTRA_DATA));
691 return _extraData;
692 }
693
694 /** Accessor function for store conditional return value.*/
695 void
696 setExtraData(uint64_t extraData)
697 {
698 _extraData = extraData;
699 privateFlags.set(VALID_EXTRA_DATA);
700 }
701
702 bool
703 hasContextId() const
704 {
705 return privateFlags.isSet(VALID_CONTEXT_ID);
706 }
707
708 /** Accessor function for context ID.*/
709 ContextID
710 contextId() const
711 {
712 assert(privateFlags.isSet(VALID_CONTEXT_ID));
713 return _contextId;
714 }
715
716 void
717 setPC(Addr pc)
718 {
719 privateFlags.set(VALID_PC);
720 _pc = pc;
721 }
722
723 bool
724 hasPC() const
725 {
726 return privateFlags.isSet(VALID_PC);
727 }
728
729 /** Accessor function for pc.*/
730 Addr
731 getPC() const
732 {
733 assert(privateFlags.isSet(VALID_PC));
734 return _pc;
735 }
736
737 /**
738 * Increment/Get the depth at which this request is responded to.
739 * This currently happens when the request misses in any cache level.
740 */
741 void incAccessDepth() const { depth++; }
742 int getAccessDepth() const { return depth; }
743
744 /**
745 * Set/Get the time taken for this request to be successfully translated.
746 */
747 void setTranslateLatency() { translateDelta = curTick() - _time; }
748 Tick getTranslateLatency() const { return translateDelta; }
749
750 /**
751 * Set/Get the time taken to complete this request's access, not including
752 * the time to successfully translate the request.
753 */
754 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
755 Tick getAccessLatency() const { return accessDelta; }
756
757 /**
758 * Accessor for the sequence number of instruction that creates the
759 * request.
760 */
761 bool
762 hasInstSeqNum() const
763 {
764 return privateFlags.isSet(VALID_INST_SEQ_NUM);
765 }
766
767 InstSeqNum
768 getReqInstSeqNum() const
769 {
770 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
771 return _reqInstSeqNum;
772 }
773
774 void
775 setReqInstSeqNum(const InstSeqNum seq_num)
776 {
777 privateFlags.set(VALID_INST_SEQ_NUM);
778 _reqInstSeqNum = seq_num;
779 }
780
781 /** Accessor functions for flags. Note that these are for testing
782 only; setting flags should be done via setFlags(). */
783 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
784 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
785 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
786 bool isPrefetch() const { return _flags.isSet(PREFETCH); }
787 bool isLLSC() const { return _flags.isSet(LLSC); }
788 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
789 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
790 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
791 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
792 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
793 bool isSecure() const { return _flags.isSet(SECURE); }
794 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
795 bool isAcquire() const { return _flags.isSet(ACQUIRE); }
796 bool isRelease() const { return _flags.isSet(RELEASE); }
797 bool isKernel() const { return _flags.isSet(KERNEL); }
798 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
799 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
800
801 bool
802 isAtomic() const
803 {
804 return _flags.isSet(ATOMIC_RETURN_OP) ||
805 _flags.isSet(ATOMIC_NO_RETURN_OP);
806 }
807
808 /**
809 * Accessor functions for the destination of a memory request. The
810 * destination flag can specify a point of reference for the
811 * operation (e.g. a cache block clean to the the point of
812 * unification). At the moment the destination is only used by the
813 * cache maintenance operations.
814 */
815 bool isToPOU() const { return _flags.isSet(DST_POU); }
816 bool isToPOC() const { return _flags.isSet(DST_POC); }
817 Flags getDest() const { return _flags & DST_BITS; }
818
819 /**
820 * Accessor functions for the memory space configuration flags and used by
821 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
822 * these are for testing only; setting extraFlags should be done via
823 * setMemSpaceConfigFlags().
824 */
825 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
826
827 bool
828 isWavefrontScope() const
829 {
830 assert(isScoped());
831 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
832 }
833
834 bool
835 isWorkgroupScope() const
836 {
837 assert(isScoped());
838 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
839 }
840
841 bool
842 isDeviceScope() const
843 {
844 assert(isScoped());
845 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
846 }
847
848 bool
849 isSystemScope() const
850 {
851 assert(isScoped());
852 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
853 }
854
855 bool
856 isGlobalSegment() const
857 {
858 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
859 (!isGroupSegment() && !isPrivateSegment() &&
860 !isKernargSegment() && !isReadonlySegment() &&
861 !isSpillSegment() && !isArgSegment());
862 }
863
864 bool
865 isGroupSegment() const
866 {
867 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
868 }
869
870 bool
871 isPrivateSegment() const
872 {
873 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
874 }
875
876 bool
877 isKernargSegment() const
878 {
879 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
880 }
881
882 bool
883 isReadonlySegment() const
884 {
885 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
886 }
887
888 bool
889 isSpillSegment() const
890 {
891 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
892 }
893
894 bool
895 isArgSegment() const
896 {
897 return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
898 }
899
900 /**
901 * Accessor functions to determine whether this request is part of
902 * a cache maintenance operation. At the moment three operations
903 * are supported:
904
905 * 1) A cache clean operation updates all copies of a memory
906 * location to the point of reference,
907 * 2) A cache invalidate operation invalidates all copies of the
908 * specified block in the memory above the point of reference,
909 * 3) A clean and invalidate operation is a combination of the two
910 * operations.
911 * @{ */
912 bool isCacheClean() const { return _flags.isSet(CLEAN); }
913 bool isCacheInvalidate() const { return _flags.isSet(INVALIDATE); }
914 bool isCacheMaintenance() const { return _flags.isSet(CLEAN|INVALIDATE); }
915 /** @} */
916};
917
918#endif // __MEM_REQUEST_HH__
512 }
513
514 /**
515 * Set just the physical address. This usually used to record the
516 * result of a translation. However, when using virtualized CPUs
517 * setPhys() is sometimes called to finalize a physical address
518 * without a virtual address, so we can't check if the virtual
519 * address is valid.
520 */
521 void
522 setPaddr(Addr paddr)
523 {
524 _paddr = paddr;
525 privateFlags.set(VALID_PADDR);
526 }
527
528 /**
529 * Generate two requests as if this request had been split into two
530 * pieces. The original request can't have been translated already.
531 */
532 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
533 {
534 assert(privateFlags.isSet(VALID_VADDR));
535 assert(privateFlags.noneSet(VALID_PADDR));
536 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
537 req1 = std::make_shared<Request>(*this);
538 req2 = std::make_shared<Request>(*this);
539 req1->_size = split_addr - _vaddr;
540 req2->_vaddr = split_addr;
541 req2->_size = _size - req1->_size;
542 }
543
544 /**
545 * Accessor for paddr.
546 */
547 bool
548 hasPaddr() const
549 {
550 return privateFlags.isSet(VALID_PADDR);
551 }
552
553 Addr
554 getPaddr() const
555 {
556 assert(privateFlags.isSet(VALID_PADDR));
557 return _paddr;
558 }
559
560 /**
561 * Time for the TLB/table walker to successfully translate this request.
562 */
563 Tick translateDelta;
564
565 /**
566 * Access latency to complete this memory transaction not including
567 * translation time.
568 */
569 Tick accessDelta;
570
571 /**
572 * Level of the cache hierachy where this request was responded to
573 * (e.g. 0 = L1; 1 = L2).
574 */
575 mutable int depth;
576
577 /**
578 * Accessor for size.
579 */
580 bool
581 hasSize() const
582 {
583 return privateFlags.isSet(VALID_SIZE);
584 }
585
586 unsigned
587 getSize() const
588 {
589 assert(privateFlags.isSet(VALID_SIZE));
590 return _size;
591 }
592
593 /** Accessor for time. */
594 Tick
595 time() const
596 {
597 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
598 return _time;
599 }
600
601 /**
602 * Accessor for atomic-op functor.
603 */
604 bool
605 hasAtomicOpFunctor()
606 {
607 return atomicOpFunctor != NULL;
608 }
609
610 AtomicOpFunctor *
611 getAtomicOpFunctor()
612 {
613 assert(atomicOpFunctor != NULL);
614 return atomicOpFunctor;
615 }
616
617 /** Accessor for flags. */
618 Flags
619 getFlags()
620 {
621 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
622 return _flags;
623 }
624
625 /** Note that unlike other accessors, this function sets *specific
626 flags* (ORs them in); it does not assign its argument to the
627 _flags field. Thus this method should rightly be called
628 setFlags() and not just flags(). */
629 void
630 setFlags(Flags flags)
631 {
632 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
633 _flags.set(flags);
634 }
635
636 void
637 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
638 {
639 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
640 _memSpaceConfigFlags.set(extraFlags);
641 }
642
643 /** Accessor function for vaddr.*/
644 bool
645 hasVaddr() const
646 {
647 return privateFlags.isSet(VALID_VADDR);
648 }
649
650 Addr
651 getVaddr() const
652 {
653 assert(privateFlags.isSet(VALID_VADDR));
654 return _vaddr;
655 }
656
657 /** Accesssor for the requestor id. */
658 MasterID
659 masterId() const
660 {
661 return _masterId;
662 }
663
664 uint32_t
665 taskId() const
666 {
667 return _taskId;
668 }
669
670 void
671 taskId(uint32_t id) {
672 _taskId = id;
673 }
674
675 /** Accessor function for asid.*/
676 int
677 getAsid() const
678 {
679 assert(privateFlags.isSet(VALID_VADDR));
680 return _asid;
681 }
682
683 /** Accessor function for asid.*/
684 void
685 setAsid(int asid)
686 {
687 _asid = asid;
688 }
689
690 /** Accessor function for architecture-specific flags.*/
691 ArchFlagsType
692 getArchFlags() const
693 {
694 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
695 return _flags & ARCH_BITS;
696 }
697
698 /** Accessor function to check if sc result is valid. */
699 bool
700 extraDataValid() const
701 {
702 return privateFlags.isSet(VALID_EXTRA_DATA);
703 }
704
705 /** Accessor function for store conditional return value.*/
706 uint64_t
707 getExtraData() const
708 {
709 assert(privateFlags.isSet(VALID_EXTRA_DATA));
710 return _extraData;
711 }
712
713 /** Accessor function for store conditional return value.*/
714 void
715 setExtraData(uint64_t extraData)
716 {
717 _extraData = extraData;
718 privateFlags.set(VALID_EXTRA_DATA);
719 }
720
721 bool
722 hasContextId() const
723 {
724 return privateFlags.isSet(VALID_CONTEXT_ID);
725 }
726
727 /** Accessor function for context ID.*/
728 ContextID
729 contextId() const
730 {
731 assert(privateFlags.isSet(VALID_CONTEXT_ID));
732 return _contextId;
733 }
734
735 void
736 setPC(Addr pc)
737 {
738 privateFlags.set(VALID_PC);
739 _pc = pc;
740 }
741
742 bool
743 hasPC() const
744 {
745 return privateFlags.isSet(VALID_PC);
746 }
747
748 /** Accessor function for pc.*/
749 Addr
750 getPC() const
751 {
752 assert(privateFlags.isSet(VALID_PC));
753 return _pc;
754 }
755
756 /**
757 * Increment/Get the depth at which this request is responded to.
758 * This currently happens when the request misses in any cache level.
759 */
760 void incAccessDepth() const { depth++; }
761 int getAccessDepth() const { return depth; }
762
763 /**
764 * Set/Get the time taken for this request to be successfully translated.
765 */
766 void setTranslateLatency() { translateDelta = curTick() - _time; }
767 Tick getTranslateLatency() const { return translateDelta; }
768
769 /**
770 * Set/Get the time taken to complete this request's access, not including
771 * the time to successfully translate the request.
772 */
773 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
774 Tick getAccessLatency() const { return accessDelta; }
775
776 /**
777 * Accessor for the sequence number of instruction that creates the
778 * request.
779 */
780 bool
781 hasInstSeqNum() const
782 {
783 return privateFlags.isSet(VALID_INST_SEQ_NUM);
784 }
785
786 InstSeqNum
787 getReqInstSeqNum() const
788 {
789 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
790 return _reqInstSeqNum;
791 }
792
793 void
794 setReqInstSeqNum(const InstSeqNum seq_num)
795 {
796 privateFlags.set(VALID_INST_SEQ_NUM);
797 _reqInstSeqNum = seq_num;
798 }
799
800 /** Accessor functions for flags. Note that these are for testing
801 only; setting flags should be done via setFlags(). */
802 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
803 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
804 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
805 bool isPrefetch() const { return _flags.isSet(PREFETCH); }
806 bool isLLSC() const { return _flags.isSet(LLSC); }
807 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
808 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
809 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
810 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
811 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
812 bool isSecure() const { return _flags.isSet(SECURE); }
813 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
814 bool isAcquire() const { return _flags.isSet(ACQUIRE); }
815 bool isRelease() const { return _flags.isSet(RELEASE); }
816 bool isKernel() const { return _flags.isSet(KERNEL); }
817 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
818 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
819
820 bool
821 isAtomic() const
822 {
823 return _flags.isSet(ATOMIC_RETURN_OP) ||
824 _flags.isSet(ATOMIC_NO_RETURN_OP);
825 }
826
827 /**
828 * Accessor functions for the destination of a memory request. The
829 * destination flag can specify a point of reference for the
830 * operation (e.g. a cache block clean to the the point of
831 * unification). At the moment the destination is only used by the
832 * cache maintenance operations.
833 */
834 bool isToPOU() const { return _flags.isSet(DST_POU); }
835 bool isToPOC() const { return _flags.isSet(DST_POC); }
836 Flags getDest() const { return _flags & DST_BITS; }
837
838 /**
839 * Accessor functions for the memory space configuration flags and used by
840 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
841 * these are for testing only; setting extraFlags should be done via
842 * setMemSpaceConfigFlags().
843 */
844 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
845
846 bool
847 isWavefrontScope() const
848 {
849 assert(isScoped());
850 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
851 }
852
853 bool
854 isWorkgroupScope() const
855 {
856 assert(isScoped());
857 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
858 }
859
860 bool
861 isDeviceScope() const
862 {
863 assert(isScoped());
864 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
865 }
866
867 bool
868 isSystemScope() const
869 {
870 assert(isScoped());
871 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
872 }
873
874 bool
875 isGlobalSegment() const
876 {
877 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
878 (!isGroupSegment() && !isPrivateSegment() &&
879 !isKernargSegment() && !isReadonlySegment() &&
880 !isSpillSegment() && !isArgSegment());
881 }
882
883 bool
884 isGroupSegment() const
885 {
886 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
887 }
888
889 bool
890 isPrivateSegment() const
891 {
892 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
893 }
894
895 bool
896 isKernargSegment() const
897 {
898 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
899 }
900
901 bool
902 isReadonlySegment() const
903 {
904 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
905 }
906
907 bool
908 isSpillSegment() const
909 {
910 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
911 }
912
913 bool
914 isArgSegment() const
915 {
916 return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
917 }
918
919 /**
920 * Accessor functions to determine whether this request is part of
921 * a cache maintenance operation. At the moment three operations
922 * are supported:
923
924 * 1) A cache clean operation updates all copies of a memory
925 * location to the point of reference,
926 * 2) A cache invalidate operation invalidates all copies of the
927 * specified block in the memory above the point of reference,
928 * 3) A clean and invalidate operation is a combination of the two
929 * operations.
930 * @{ */
931 bool isCacheClean() const { return _flags.isSet(CLEAN); }
932 bool isCacheInvalidate() const { return _flags.isSet(INVALIDATE); }
933 bool isCacheMaintenance() const { return _flags.isSet(CLEAN|INVALIDATE); }
934 /** @} */
935};
936
937#endif // __MEM_REQUEST_HH__