request.hh (12334:e0ab29a34764) request.hh (12346:9b1144d046ca)
1/*
1/*
2 * Copyright (c) 2012-2013 ARM Limited
2 * Copyright (c) 2012-2013,2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 */
45
46/**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52#ifndef __MEM_REQUEST_HH__
53#define __MEM_REQUEST_HH__
54
55#include <cassert>
56#include <climits>
57
58#include "base/flags.hh"
59#include "base/logging.hh"
60#include "base/types.hh"
61#include "cpu/inst_seq.hh"
62#include "sim/core.hh"
63
64/**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72namespace ContextSwitchTaskId {
73 enum TaskId {
74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76 DMA = 1023, /* Mostly Table Walker */
77 Unknown = 1024,
78 NumTaskId
79 };
80}
81
82class Request;
83
84typedef Request* RequestPtr;
85typedef uint16_t MasterID;
86
87class Request
88{
89 public:
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 */
45
46/**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52#ifndef __MEM_REQUEST_HH__
53#define __MEM_REQUEST_HH__
54
55#include <cassert>
56#include <climits>
57
58#include "base/flags.hh"
59#include "base/logging.hh"
60#include "base/types.hh"
61#include "cpu/inst_seq.hh"
62#include "sim/core.hh"
63
64/**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72namespace ContextSwitchTaskId {
73 enum TaskId {
74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76 DMA = 1023, /* Mostly Table Walker */
77 Unknown = 1024,
78 NumTaskId
79 };
80}
81
82class Request;
83
84typedef Request* RequestPtr;
85typedef uint16_t MasterID;
86
87class Request
88{
89 public:
90 typedef uint32_t FlagsType;
90 typedef uint64_t FlagsType;
91 typedef uint8_t ArchFlagsType;
92 typedef ::Flags<FlagsType> Flags;
93
94 enum : FlagsType {
95 /**
96 * Architecture specific flags.
97 *
98 * These bits int the flag field are reserved for
99 * architecture-specific code. For example, SPARC uses them to
100 * represent ASIs.
101 */
102 ARCH_BITS = 0x000000FF,
103 /** The request was an instruction fetch. */
104 INST_FETCH = 0x00000100,
105 /** The virtual address is also the physical address. */
106 PHYSICAL = 0x00000200,
107 /**
108 * The request is to an uncacheable address.
109 *
110 * @note Uncacheable accesses may be reordered by CPU models. The
111 * STRICT_ORDER flag should be set if such reordering is
112 * undesirable.
113 */
114 UNCACHEABLE = 0x00000400,
115 /**
116 * The request is required to be strictly ordered by <i>CPU
117 * models</i> and is non-speculative.
118 *
119 * A strictly ordered request is guaranteed to never be
120 * re-ordered or executed speculatively by a CPU model. The
121 * memory system may still reorder requests in caches unless
122 * the UNCACHEABLE flag is set as well.
123 */
124 STRICT_ORDER = 0x00000800,
125 /** This request is to a memory mapped register. */
126 MMAPPED_IPR = 0x00002000,
127 /** This request is made in privileged mode. */
128 PRIVILEGED = 0x00008000,
129
130 /**
131 * This is a write that is targeted and zeroing an entire
132 * cache block. There is no need for a read/modify/write
133 */
134 CACHE_BLOCK_ZERO = 0x00010000,
135
136 /** The request should not cause a memory access. */
137 NO_ACCESS = 0x00080000,
138 /**
139 * This request will lock or unlock the accessed memory. When
140 * used with a load, the access locks the particular chunk of
141 * memory. When used with a store, it unlocks. The rule is
142 * that locked accesses have to be made up of a locked load,
143 * some operation on the data, and then a locked store.
144 */
145 LOCKED_RMW = 0x00100000,
146 /** The request is a Load locked/store conditional. */
147 LLSC = 0x00200000,
148 /** This request is for a memory swap. */
149 MEM_SWAP = 0x00400000,
150 MEM_SWAP_COND = 0x00800000,
151
152 /** The request is a prefetch. */
153 PREFETCH = 0x01000000,
154 /** The request should be prefetched into the exclusive state. */
155 PF_EXCLUSIVE = 0x02000000,
156 /** The request should be marked as LRU. */
157 EVICT_NEXT = 0x04000000,
158 /** The request should be marked with ACQUIRE. */
159 ACQUIRE = 0x00020000,
160 /** The request should be marked with RELEASE. */
161 RELEASE = 0x00040000,
162
163 /** The request is an atomic that returns data. */
164 ATOMIC_RETURN_OP = 0x40000000,
165 /** The request is an atomic that does not return data. */
166 ATOMIC_NO_RETURN_OP = 0x80000000,
167
168 /** The request should be marked with KERNEL.
169 * Used to indicate the synchronization associated with a GPU kernel
170 * launch or completion.
171 */
172 KERNEL = 0x00001000,
173
174 /**
175 * The request should be handled by the generic IPR code (only
176 * valid together with MMAPPED_IPR)
177 */
178 GENERIC_IPR = 0x08000000,
179
180 /** The request targets the secure memory space. */
181 SECURE = 0x10000000,
182 /** The request is a page table walk */
183 PT_WALK = 0x20000000,
184
91 typedef uint8_t ArchFlagsType;
92 typedef ::Flags<FlagsType> Flags;
93
94 enum : FlagsType {
95 /**
96 * Architecture specific flags.
97 *
98 * These bits int the flag field are reserved for
99 * architecture-specific code. For example, SPARC uses them to
100 * represent ASIs.
101 */
102 ARCH_BITS = 0x000000FF,
103 /** The request was an instruction fetch. */
104 INST_FETCH = 0x00000100,
105 /** The virtual address is also the physical address. */
106 PHYSICAL = 0x00000200,
107 /**
108 * The request is to an uncacheable address.
109 *
110 * @note Uncacheable accesses may be reordered by CPU models. The
111 * STRICT_ORDER flag should be set if such reordering is
112 * undesirable.
113 */
114 UNCACHEABLE = 0x00000400,
115 /**
116 * The request is required to be strictly ordered by <i>CPU
117 * models</i> and is non-speculative.
118 *
119 * A strictly ordered request is guaranteed to never be
120 * re-ordered or executed speculatively by a CPU model. The
121 * memory system may still reorder requests in caches unless
122 * the UNCACHEABLE flag is set as well.
123 */
124 STRICT_ORDER = 0x00000800,
125 /** This request is to a memory mapped register. */
126 MMAPPED_IPR = 0x00002000,
127 /** This request is made in privileged mode. */
128 PRIVILEGED = 0x00008000,
129
130 /**
131 * This is a write that is targeted and zeroing an entire
132 * cache block. There is no need for a read/modify/write
133 */
134 CACHE_BLOCK_ZERO = 0x00010000,
135
136 /** The request should not cause a memory access. */
137 NO_ACCESS = 0x00080000,
138 /**
139 * This request will lock or unlock the accessed memory. When
140 * used with a load, the access locks the particular chunk of
141 * memory. When used with a store, it unlocks. The rule is
142 * that locked accesses have to be made up of a locked load,
143 * some operation on the data, and then a locked store.
144 */
145 LOCKED_RMW = 0x00100000,
146 /** The request is a Load locked/store conditional. */
147 LLSC = 0x00200000,
148 /** This request is for a memory swap. */
149 MEM_SWAP = 0x00400000,
150 MEM_SWAP_COND = 0x00800000,
151
152 /** The request is a prefetch. */
153 PREFETCH = 0x01000000,
154 /** The request should be prefetched into the exclusive state. */
155 PF_EXCLUSIVE = 0x02000000,
156 /** The request should be marked as LRU. */
157 EVICT_NEXT = 0x04000000,
158 /** The request should be marked with ACQUIRE. */
159 ACQUIRE = 0x00020000,
160 /** The request should be marked with RELEASE. */
161 RELEASE = 0x00040000,
162
163 /** The request is an atomic that returns data. */
164 ATOMIC_RETURN_OP = 0x40000000,
165 /** The request is an atomic that does not return data. */
166 ATOMIC_NO_RETURN_OP = 0x80000000,
167
168 /** The request should be marked with KERNEL.
169 * Used to indicate the synchronization associated with a GPU kernel
170 * launch or completion.
171 */
172 KERNEL = 0x00001000,
173
174 /**
175 * The request should be handled by the generic IPR code (only
176 * valid together with MMAPPED_IPR)
177 */
178 GENERIC_IPR = 0x08000000,
179
180 /** The request targets the secure memory space. */
181 SECURE = 0x10000000,
182 /** The request is a page table walk */
183 PT_WALK = 0x20000000,
184
185 /** The request targets the point of unification */
186 DST_POU = 0x0000001000000000,
187
188 /** The request targets the point of coherence */
189 DST_POC = 0x0000002000000000,
190
191 /** Bits to define the destination of a request */
192 DST_BITS = 0x0000003000000000,
193
185 /**
186 * These flags are *not* cleared when a Request object is
187 * reused (assigned a new address).
188 */
189 STICKY_FLAGS = INST_FETCH
190 };
191
192 /** Master Ids that are statically allocated
193 * @{*/
194 enum : MasterID {
195 /** This master id is used for writeback requests by the caches */
196 wbMasterId = 0,
197 /**
198 * This master id is used for functional requests that
199 * don't come from a particular device
200 */
201 funcMasterId = 1,
202 /** This master id is used for message signaled interrupts */
203 intMasterId = 2,
204 /**
205 * Invalid master id for assertion checking only. It is
206 * invalid behavior to ever send this id as part of a request.
207 */
208 invldMasterId = std::numeric_limits<MasterID>::max()
209 };
210 /** @} */
211
212 typedef uint32_t MemSpaceConfigFlagsType;
213 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
214
215 enum : MemSpaceConfigFlagsType {
216 /** Has a synchronization scope been set? */
217 SCOPE_VALID = 0x00000001,
218 /** Access has Wavefront scope visibility */
219 WAVEFRONT_SCOPE = 0x00000002,
220 /** Access has Workgroup scope visibility */
221 WORKGROUP_SCOPE = 0x00000004,
222 /** Access has Device (e.g., GPU) scope visibility */
223 DEVICE_SCOPE = 0x00000008,
224 /** Access has System (e.g., CPU + GPU) scope visibility */
225 SYSTEM_SCOPE = 0x00000010,
226
227 /** Global Segment */
228 GLOBAL_SEGMENT = 0x00000020,
229 /** Group Segment */
230 GROUP_SEGMENT = 0x00000040,
231 /** Private Segment */
232 PRIVATE_SEGMENT = 0x00000080,
233 /** Kergarg Segment */
234 KERNARG_SEGMENT = 0x00000100,
235 /** Readonly Segment */
236 READONLY_SEGMENT = 0x00000200,
237 /** Spill Segment */
238 SPILL_SEGMENT = 0x00000400,
239 /** Arg Segment */
240 ARG_SEGMENT = 0x00000800,
241 };
242
243 private:
244 typedef uint8_t PrivateFlagsType;
245 typedef ::Flags<PrivateFlagsType> PrivateFlags;
246
247 enum : PrivateFlagsType {
248 /** Whether or not the size is valid. */
249 VALID_SIZE = 0x00000001,
250 /** Whether or not paddr is valid (has been written yet). */
251 VALID_PADDR = 0x00000002,
252 /** Whether or not the vaddr & asid are valid. */
253 VALID_VADDR = 0x00000004,
254 /** Whether or not the instruction sequence number is valid. */
255 VALID_INST_SEQ_NUM = 0x00000008,
256 /** Whether or not the pc is valid. */
257 VALID_PC = 0x00000010,
258 /** Whether or not the context ID is valid. */
259 VALID_CONTEXT_ID = 0x00000020,
260 /** Whether or not the sc result is valid. */
261 VALID_EXTRA_DATA = 0x00000080,
262 /**
263 * These flags are *not* cleared when a Request object is reused
264 * (assigned a new address).
265 */
266 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
267 };
268
269 private:
270
271 /**
272 * Set up a physical (e.g. device) request in a previously
273 * allocated Request object.
274 */
275 void
276 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
277 {
278 _paddr = paddr;
279 _size = size;
280 _time = time;
281 _masterId = mid;
282 _flags.clear(~STICKY_FLAGS);
283 _flags.set(flags);
284 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
285 privateFlags.set(VALID_PADDR|VALID_SIZE);
286 depth = 0;
287 accessDelta = 0;
288 //translateDelta = 0;
289 }
290
291 /**
292 * The physical address of the request. Valid only if validPaddr
293 * is set.
294 */
295 Addr _paddr;
296
297 /**
298 * The size of the request. This field must be set when vaddr or
299 * paddr is written via setVirt() or setPhys(), so it is always
300 * valid as long as one of the address fields is valid.
301 */
302 unsigned _size;
303
304 /** The requestor ID which is unique in the system for all ports
305 * that are capable of issuing a transaction
306 */
307 MasterID _masterId;
308
309 /** Flag structure for the request. */
310 Flags _flags;
311
312 /** Memory space configuraiton flag structure for the request. */
313 MemSpaceConfigFlags _memSpaceConfigFlags;
314
315 /** Private flags for field validity checking. */
316 PrivateFlags privateFlags;
317
318 /**
319 * The time this request was started. Used to calculate
320 * latencies. This field is set to curTick() any time paddr or vaddr
321 * is written.
322 */
323 Tick _time;
324
325 /**
326 * The task id associated with this request
327 */
328 uint32_t _taskId;
329
330 /** The address space ID. */
331 int _asid;
332
333 /** The virtual address of the request. */
334 Addr _vaddr;
335
336 /**
337 * Extra data for the request, such as the return value of
338 * store conditional or the compare value for a CAS. */
339 uint64_t _extraData;
340
341 /** The context ID (for statistics, locks, and wakeups). */
342 ContextID _contextId;
343
344 /** program counter of initiating access; for tracing/debugging */
345 Addr _pc;
346
347 /** Sequence number of the instruction that creates the request */
348 InstSeqNum _reqInstSeqNum;
349
350 /** A pointer to an atomic operation */
351 AtomicOpFunctor *atomicOpFunctor;
352
353 public:
354
355 /**
356 * Minimal constructor. No fields are initialized. (Note that
357 * _flags and privateFlags are cleared by Flags default
358 * constructor.)
359 */
360 Request()
361 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
362 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
363 _extraData(0), _contextId(0), _pc(0),
364 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
365 accessDelta(0), depth(0)
366 {}
367
368 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
369 InstSeqNum seq_num, ContextID cid)
370 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
371 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
372 _extraData(0), _contextId(0), _pc(0),
373 _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
374 accessDelta(0), depth(0)
375 {
376 setPhys(paddr, size, flags, mid, curTick());
377 setContext(cid);
378 privateFlags.set(VALID_INST_SEQ_NUM);
379 }
380
381 /**
382 * Constructor for physical (e.g. device) requests. Initializes
383 * just physical address, size, flags, and timestamp (to curTick()).
384 * These fields are adequate to perform a request.
385 */
386 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
387 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
388 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
389 _extraData(0), _contextId(0), _pc(0),
390 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
391 accessDelta(0), depth(0)
392 {
393 setPhys(paddr, size, flags, mid, curTick());
394 }
395
396 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
397 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
398 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
399 _extraData(0), _contextId(0), _pc(0),
400 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
401 accessDelta(0), depth(0)
402 {
403 setPhys(paddr, size, flags, mid, time);
404 }
405
406 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
407 Addr pc)
408 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
409 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
410 _extraData(0), _contextId(0), _pc(pc),
411 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
412 accessDelta(0), depth(0)
413 {
414 setPhys(paddr, size, flags, mid, time);
415 privateFlags.set(VALID_PC);
416 }
417
418 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
419 Addr pc, ContextID cid)
420 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
421 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
422 _extraData(0), _contextId(0), _pc(0),
423 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
424 accessDelta(0), depth(0)
425 {
426 setVirt(asid, vaddr, size, flags, mid, pc);
427 setContext(cid);
428 }
429
430 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
431 Addr pc, ContextID cid, AtomicOpFunctor *atomic_op)
432 : atomicOpFunctor(atomic_op)
433 {
434 setVirt(asid, vaddr, size, flags, mid, pc);
435 setContext(cid);
436 }
437
438 ~Request()
439 {
440 if (hasAtomicOpFunctor()) {
441 delete atomicOpFunctor;
442 }
443 }
444
445 /**
446 * Set up Context numbers.
447 */
448 void
449 setContext(ContextID context_id)
450 {
451 _contextId = context_id;
452 privateFlags.set(VALID_CONTEXT_ID);
453 }
454
455 /**
456 * Set up a virtual (e.g., CPU) request in a previously
457 * allocated Request object.
458 */
459 void
460 setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
461 Addr pc)
462 {
463 _asid = asid;
464 _vaddr = vaddr;
465 _size = size;
466 _masterId = mid;
467 _pc = pc;
468 _time = curTick();
469
470 _flags.clear(~STICKY_FLAGS);
471 _flags.set(flags);
472 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
473 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
474 depth = 0;
475 accessDelta = 0;
476 translateDelta = 0;
477 }
478
479 /**
480 * Set just the physical address. This usually used to record the
481 * result of a translation. However, when using virtualized CPUs
482 * setPhys() is sometimes called to finalize a physical address
483 * without a virtual address, so we can't check if the virtual
484 * address is valid.
485 */
486 void
487 setPaddr(Addr paddr)
488 {
489 _paddr = paddr;
490 privateFlags.set(VALID_PADDR);
491 }
492
493 /**
494 * Generate two requests as if this request had been split into two
495 * pieces. The original request can't have been translated already.
496 */
497 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
498 {
499 assert(privateFlags.isSet(VALID_VADDR));
500 assert(privateFlags.noneSet(VALID_PADDR));
501 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
502 req1 = new Request(*this);
503 req2 = new Request(*this);
504 req1->_size = split_addr - _vaddr;
505 req2->_vaddr = split_addr;
506 req2->_size = _size - req1->_size;
507 }
508
509 /**
510 * Accessor for paddr.
511 */
512 bool
513 hasPaddr() const
514 {
515 return privateFlags.isSet(VALID_PADDR);
516 }
517
518 Addr
519 getPaddr() const
520 {
521 assert(privateFlags.isSet(VALID_PADDR));
522 return _paddr;
523 }
524
525 /**
526 * Time for the TLB/table walker to successfully translate this request.
527 */
528 Tick translateDelta;
529
530 /**
531 * Access latency to complete this memory transaction not including
532 * translation time.
533 */
534 Tick accessDelta;
535
536 /**
537 * Level of the cache hierachy where this request was responded to
538 * (e.g. 0 = L1; 1 = L2).
539 */
540 mutable int depth;
541
542 /**
543 * Accessor for size.
544 */
545 bool
546 hasSize() const
547 {
548 return privateFlags.isSet(VALID_SIZE);
549 }
550
551 unsigned
552 getSize() const
553 {
554 assert(privateFlags.isSet(VALID_SIZE));
555 return _size;
556 }
557
558 /** Accessor for time. */
559 Tick
560 time() const
561 {
562 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
563 return _time;
564 }
565
566 /**
567 * Accessor for atomic-op functor.
568 */
569 bool
570 hasAtomicOpFunctor()
571 {
572 return atomicOpFunctor != NULL;
573 }
574
575 AtomicOpFunctor *
576 getAtomicOpFunctor()
577 {
578 assert(atomicOpFunctor != NULL);
579 return atomicOpFunctor;
580 }
581
582 /** Accessor for flags. */
583 Flags
584 getFlags()
585 {
586 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
587 return _flags;
588 }
589
590 /** Note that unlike other accessors, this function sets *specific
591 flags* (ORs them in); it does not assign its argument to the
592 _flags field. Thus this method should rightly be called
593 setFlags() and not just flags(). */
594 void
595 setFlags(Flags flags)
596 {
597 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
598 _flags.set(flags);
599 }
600
601 void
602 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
603 {
604 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
605 _memSpaceConfigFlags.set(extraFlags);
606 }
607
608 /** Accessor function for vaddr.*/
609 bool
610 hasVaddr() const
611 {
612 return privateFlags.isSet(VALID_VADDR);
613 }
614
615 Addr
616 getVaddr() const
617 {
618 assert(privateFlags.isSet(VALID_VADDR));
619 return _vaddr;
620 }
621
622 /** Accesssor for the requestor id. */
623 MasterID
624 masterId() const
625 {
626 return _masterId;
627 }
628
629 uint32_t
630 taskId() const
631 {
632 return _taskId;
633 }
634
635 void
636 taskId(uint32_t id) {
637 _taskId = id;
638 }
639
640 /** Accessor function for asid.*/
641 int
642 getAsid() const
643 {
644 assert(privateFlags.isSet(VALID_VADDR));
645 return _asid;
646 }
647
648 /** Accessor function for asid.*/
649 void
650 setAsid(int asid)
651 {
652 _asid = asid;
653 }
654
655 /** Accessor function for architecture-specific flags.*/
656 ArchFlagsType
657 getArchFlags() const
658 {
659 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
660 return _flags & ARCH_BITS;
661 }
662
663 /** Accessor function to check if sc result is valid. */
664 bool
665 extraDataValid() const
666 {
667 return privateFlags.isSet(VALID_EXTRA_DATA);
668 }
669
670 /** Accessor function for store conditional return value.*/
671 uint64_t
672 getExtraData() const
673 {
674 assert(privateFlags.isSet(VALID_EXTRA_DATA));
675 return _extraData;
676 }
677
678 /** Accessor function for store conditional return value.*/
679 void
680 setExtraData(uint64_t extraData)
681 {
682 _extraData = extraData;
683 privateFlags.set(VALID_EXTRA_DATA);
684 }
685
686 bool
687 hasContextId() const
688 {
689 return privateFlags.isSet(VALID_CONTEXT_ID);
690 }
691
692 /** Accessor function for context ID.*/
693 ContextID
694 contextId() const
695 {
696 assert(privateFlags.isSet(VALID_CONTEXT_ID));
697 return _contextId;
698 }
699
700 void
701 setPC(Addr pc)
702 {
703 privateFlags.set(VALID_PC);
704 _pc = pc;
705 }
706
707 bool
708 hasPC() const
709 {
710 return privateFlags.isSet(VALID_PC);
711 }
712
713 /** Accessor function for pc.*/
714 Addr
715 getPC() const
716 {
717 assert(privateFlags.isSet(VALID_PC));
718 return _pc;
719 }
720
721 /**
722 * Increment/Get the depth at which this request is responded to.
723 * This currently happens when the request misses in any cache level.
724 */
725 void incAccessDepth() const { depth++; }
726 int getAccessDepth() const { return depth; }
727
728 /**
729 * Set/Get the time taken for this request to be successfully translated.
730 */
731 void setTranslateLatency() { translateDelta = curTick() - _time; }
732 Tick getTranslateLatency() const { return translateDelta; }
733
734 /**
735 * Set/Get the time taken to complete this request's access, not including
736 * the time to successfully translate the request.
737 */
738 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
739 Tick getAccessLatency() const { return accessDelta; }
740
741 /**
742 * Accessor for the sequence number of instruction that creates the
743 * request.
744 */
745 bool
746 hasInstSeqNum() const
747 {
748 return privateFlags.isSet(VALID_INST_SEQ_NUM);
749 }
750
751 InstSeqNum
752 getReqInstSeqNum() const
753 {
754 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
755 return _reqInstSeqNum;
756 }
757
758 void
759 setReqInstSeqNum(const InstSeqNum seq_num)
760 {
761 privateFlags.set(VALID_INST_SEQ_NUM);
762 _reqInstSeqNum = seq_num;
763 }
764
765 /** Accessor functions for flags. Note that these are for testing
766 only; setting flags should be done via setFlags(). */
767 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
768 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
769 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
770 bool isPrefetch() const { return _flags.isSet(PREFETCH); }
771 bool isLLSC() const { return _flags.isSet(LLSC); }
772 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
773 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
774 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
775 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
776 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
777 bool isSecure() const { return _flags.isSet(SECURE); }
778 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
779 bool isAcquire() const { return _flags.isSet(ACQUIRE); }
780 bool isRelease() const { return _flags.isSet(RELEASE); }
781 bool isKernel() const { return _flags.isSet(KERNEL); }
782 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
783 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
784
785 bool
786 isAtomic() const
787 {
788 return _flags.isSet(ATOMIC_RETURN_OP) ||
789 _flags.isSet(ATOMIC_NO_RETURN_OP);
790 }
791
792 /**
194 /**
195 * These flags are *not* cleared when a Request object is
196 * reused (assigned a new address).
197 */
198 STICKY_FLAGS = INST_FETCH
199 };
200
201 /** Master Ids that are statically allocated
202 * @{*/
203 enum : MasterID {
204 /** This master id is used for writeback requests by the caches */
205 wbMasterId = 0,
206 /**
207 * This master id is used for functional requests that
208 * don't come from a particular device
209 */
210 funcMasterId = 1,
211 /** This master id is used for message signaled interrupts */
212 intMasterId = 2,
213 /**
214 * Invalid master id for assertion checking only. It is
215 * invalid behavior to ever send this id as part of a request.
216 */
217 invldMasterId = std::numeric_limits<MasterID>::max()
218 };
219 /** @} */
220
221 typedef uint32_t MemSpaceConfigFlagsType;
222 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
223
224 enum : MemSpaceConfigFlagsType {
225 /** Has a synchronization scope been set? */
226 SCOPE_VALID = 0x00000001,
227 /** Access has Wavefront scope visibility */
228 WAVEFRONT_SCOPE = 0x00000002,
229 /** Access has Workgroup scope visibility */
230 WORKGROUP_SCOPE = 0x00000004,
231 /** Access has Device (e.g., GPU) scope visibility */
232 DEVICE_SCOPE = 0x00000008,
233 /** Access has System (e.g., CPU + GPU) scope visibility */
234 SYSTEM_SCOPE = 0x00000010,
235
236 /** Global Segment */
237 GLOBAL_SEGMENT = 0x00000020,
238 /** Group Segment */
239 GROUP_SEGMENT = 0x00000040,
240 /** Private Segment */
241 PRIVATE_SEGMENT = 0x00000080,
242 /** Kergarg Segment */
243 KERNARG_SEGMENT = 0x00000100,
244 /** Readonly Segment */
245 READONLY_SEGMENT = 0x00000200,
246 /** Spill Segment */
247 SPILL_SEGMENT = 0x00000400,
248 /** Arg Segment */
249 ARG_SEGMENT = 0x00000800,
250 };
251
252 private:
253 typedef uint8_t PrivateFlagsType;
254 typedef ::Flags<PrivateFlagsType> PrivateFlags;
255
256 enum : PrivateFlagsType {
257 /** Whether or not the size is valid. */
258 VALID_SIZE = 0x00000001,
259 /** Whether or not paddr is valid (has been written yet). */
260 VALID_PADDR = 0x00000002,
261 /** Whether or not the vaddr & asid are valid. */
262 VALID_VADDR = 0x00000004,
263 /** Whether or not the instruction sequence number is valid. */
264 VALID_INST_SEQ_NUM = 0x00000008,
265 /** Whether or not the pc is valid. */
266 VALID_PC = 0x00000010,
267 /** Whether or not the context ID is valid. */
268 VALID_CONTEXT_ID = 0x00000020,
269 /** Whether or not the sc result is valid. */
270 VALID_EXTRA_DATA = 0x00000080,
271 /**
272 * These flags are *not* cleared when a Request object is reused
273 * (assigned a new address).
274 */
275 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
276 };
277
278 private:
279
280 /**
281 * Set up a physical (e.g. device) request in a previously
282 * allocated Request object.
283 */
284 void
285 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
286 {
287 _paddr = paddr;
288 _size = size;
289 _time = time;
290 _masterId = mid;
291 _flags.clear(~STICKY_FLAGS);
292 _flags.set(flags);
293 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
294 privateFlags.set(VALID_PADDR|VALID_SIZE);
295 depth = 0;
296 accessDelta = 0;
297 //translateDelta = 0;
298 }
299
300 /**
301 * The physical address of the request. Valid only if validPaddr
302 * is set.
303 */
304 Addr _paddr;
305
306 /**
307 * The size of the request. This field must be set when vaddr or
308 * paddr is written via setVirt() or setPhys(), so it is always
309 * valid as long as one of the address fields is valid.
310 */
311 unsigned _size;
312
313 /** The requestor ID which is unique in the system for all ports
314 * that are capable of issuing a transaction
315 */
316 MasterID _masterId;
317
318 /** Flag structure for the request. */
319 Flags _flags;
320
321 /** Memory space configuraiton flag structure for the request. */
322 MemSpaceConfigFlags _memSpaceConfigFlags;
323
324 /** Private flags for field validity checking. */
325 PrivateFlags privateFlags;
326
327 /**
328 * The time this request was started. Used to calculate
329 * latencies. This field is set to curTick() any time paddr or vaddr
330 * is written.
331 */
332 Tick _time;
333
334 /**
335 * The task id associated with this request
336 */
337 uint32_t _taskId;
338
339 /** The address space ID. */
340 int _asid;
341
342 /** The virtual address of the request. */
343 Addr _vaddr;
344
345 /**
346 * Extra data for the request, such as the return value of
347 * store conditional or the compare value for a CAS. */
348 uint64_t _extraData;
349
350 /** The context ID (for statistics, locks, and wakeups). */
351 ContextID _contextId;
352
353 /** program counter of initiating access; for tracing/debugging */
354 Addr _pc;
355
356 /** Sequence number of the instruction that creates the request */
357 InstSeqNum _reqInstSeqNum;
358
359 /** A pointer to an atomic operation */
360 AtomicOpFunctor *atomicOpFunctor;
361
362 public:
363
364 /**
365 * Minimal constructor. No fields are initialized. (Note that
366 * _flags and privateFlags are cleared by Flags default
367 * constructor.)
368 */
369 Request()
370 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
371 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
372 _extraData(0), _contextId(0), _pc(0),
373 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
374 accessDelta(0), depth(0)
375 {}
376
377 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
378 InstSeqNum seq_num, ContextID cid)
379 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
380 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
381 _extraData(0), _contextId(0), _pc(0),
382 _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
383 accessDelta(0), depth(0)
384 {
385 setPhys(paddr, size, flags, mid, curTick());
386 setContext(cid);
387 privateFlags.set(VALID_INST_SEQ_NUM);
388 }
389
390 /**
391 * Constructor for physical (e.g. device) requests. Initializes
392 * just physical address, size, flags, and timestamp (to curTick()).
393 * These fields are adequate to perform a request.
394 */
395 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
396 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
397 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
398 _extraData(0), _contextId(0), _pc(0),
399 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
400 accessDelta(0), depth(0)
401 {
402 setPhys(paddr, size, flags, mid, curTick());
403 }
404
405 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
406 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
407 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
408 _extraData(0), _contextId(0), _pc(0),
409 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
410 accessDelta(0), depth(0)
411 {
412 setPhys(paddr, size, flags, mid, time);
413 }
414
415 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
416 Addr pc)
417 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
418 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
419 _extraData(0), _contextId(0), _pc(pc),
420 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
421 accessDelta(0), depth(0)
422 {
423 setPhys(paddr, size, flags, mid, time);
424 privateFlags.set(VALID_PC);
425 }
426
427 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
428 Addr pc, ContextID cid)
429 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
430 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
431 _extraData(0), _contextId(0), _pc(0),
432 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
433 accessDelta(0), depth(0)
434 {
435 setVirt(asid, vaddr, size, flags, mid, pc);
436 setContext(cid);
437 }
438
439 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
440 Addr pc, ContextID cid, AtomicOpFunctor *atomic_op)
441 : atomicOpFunctor(atomic_op)
442 {
443 setVirt(asid, vaddr, size, flags, mid, pc);
444 setContext(cid);
445 }
446
447 ~Request()
448 {
449 if (hasAtomicOpFunctor()) {
450 delete atomicOpFunctor;
451 }
452 }
453
454 /**
455 * Set up Context numbers.
456 */
457 void
458 setContext(ContextID context_id)
459 {
460 _contextId = context_id;
461 privateFlags.set(VALID_CONTEXT_ID);
462 }
463
464 /**
465 * Set up a virtual (e.g., CPU) request in a previously
466 * allocated Request object.
467 */
468 void
469 setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
470 Addr pc)
471 {
472 _asid = asid;
473 _vaddr = vaddr;
474 _size = size;
475 _masterId = mid;
476 _pc = pc;
477 _time = curTick();
478
479 _flags.clear(~STICKY_FLAGS);
480 _flags.set(flags);
481 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
482 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
483 depth = 0;
484 accessDelta = 0;
485 translateDelta = 0;
486 }
487
488 /**
489 * Set just the physical address. This usually used to record the
490 * result of a translation. However, when using virtualized CPUs
491 * setPhys() is sometimes called to finalize a physical address
492 * without a virtual address, so we can't check if the virtual
493 * address is valid.
494 */
495 void
496 setPaddr(Addr paddr)
497 {
498 _paddr = paddr;
499 privateFlags.set(VALID_PADDR);
500 }
501
502 /**
503 * Generate two requests as if this request had been split into two
504 * pieces. The original request can't have been translated already.
505 */
506 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
507 {
508 assert(privateFlags.isSet(VALID_VADDR));
509 assert(privateFlags.noneSet(VALID_PADDR));
510 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
511 req1 = new Request(*this);
512 req2 = new Request(*this);
513 req1->_size = split_addr - _vaddr;
514 req2->_vaddr = split_addr;
515 req2->_size = _size - req1->_size;
516 }
517
518 /**
519 * Accessor for paddr.
520 */
521 bool
522 hasPaddr() const
523 {
524 return privateFlags.isSet(VALID_PADDR);
525 }
526
527 Addr
528 getPaddr() const
529 {
530 assert(privateFlags.isSet(VALID_PADDR));
531 return _paddr;
532 }
533
534 /**
535 * Time for the TLB/table walker to successfully translate this request.
536 */
537 Tick translateDelta;
538
539 /**
540 * Access latency to complete this memory transaction not including
541 * translation time.
542 */
543 Tick accessDelta;
544
545 /**
546 * Level of the cache hierachy where this request was responded to
547 * (e.g. 0 = L1; 1 = L2).
548 */
549 mutable int depth;
550
551 /**
552 * Accessor for size.
553 */
554 bool
555 hasSize() const
556 {
557 return privateFlags.isSet(VALID_SIZE);
558 }
559
560 unsigned
561 getSize() const
562 {
563 assert(privateFlags.isSet(VALID_SIZE));
564 return _size;
565 }
566
567 /** Accessor for time. */
568 Tick
569 time() const
570 {
571 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
572 return _time;
573 }
574
575 /**
576 * Accessor for atomic-op functor.
577 */
578 bool
579 hasAtomicOpFunctor()
580 {
581 return atomicOpFunctor != NULL;
582 }
583
584 AtomicOpFunctor *
585 getAtomicOpFunctor()
586 {
587 assert(atomicOpFunctor != NULL);
588 return atomicOpFunctor;
589 }
590
591 /** Accessor for flags. */
592 Flags
593 getFlags()
594 {
595 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
596 return _flags;
597 }
598
599 /** Note that unlike other accessors, this function sets *specific
600 flags* (ORs them in); it does not assign its argument to the
601 _flags field. Thus this method should rightly be called
602 setFlags() and not just flags(). */
603 void
604 setFlags(Flags flags)
605 {
606 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
607 _flags.set(flags);
608 }
609
610 void
611 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
612 {
613 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
614 _memSpaceConfigFlags.set(extraFlags);
615 }
616
617 /** Accessor function for vaddr.*/
618 bool
619 hasVaddr() const
620 {
621 return privateFlags.isSet(VALID_VADDR);
622 }
623
624 Addr
625 getVaddr() const
626 {
627 assert(privateFlags.isSet(VALID_VADDR));
628 return _vaddr;
629 }
630
631 /** Accesssor for the requestor id. */
632 MasterID
633 masterId() const
634 {
635 return _masterId;
636 }
637
638 uint32_t
639 taskId() const
640 {
641 return _taskId;
642 }
643
644 void
645 taskId(uint32_t id) {
646 _taskId = id;
647 }
648
649 /** Accessor function for asid.*/
650 int
651 getAsid() const
652 {
653 assert(privateFlags.isSet(VALID_VADDR));
654 return _asid;
655 }
656
657 /** Accessor function for asid.*/
658 void
659 setAsid(int asid)
660 {
661 _asid = asid;
662 }
663
664 /** Accessor function for architecture-specific flags.*/
665 ArchFlagsType
666 getArchFlags() const
667 {
668 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
669 return _flags & ARCH_BITS;
670 }
671
672 /** Accessor function to check if sc result is valid. */
673 bool
674 extraDataValid() const
675 {
676 return privateFlags.isSet(VALID_EXTRA_DATA);
677 }
678
679 /** Accessor function for store conditional return value.*/
680 uint64_t
681 getExtraData() const
682 {
683 assert(privateFlags.isSet(VALID_EXTRA_DATA));
684 return _extraData;
685 }
686
687 /** Accessor function for store conditional return value.*/
688 void
689 setExtraData(uint64_t extraData)
690 {
691 _extraData = extraData;
692 privateFlags.set(VALID_EXTRA_DATA);
693 }
694
695 bool
696 hasContextId() const
697 {
698 return privateFlags.isSet(VALID_CONTEXT_ID);
699 }
700
701 /** Accessor function for context ID.*/
702 ContextID
703 contextId() const
704 {
705 assert(privateFlags.isSet(VALID_CONTEXT_ID));
706 return _contextId;
707 }
708
709 void
710 setPC(Addr pc)
711 {
712 privateFlags.set(VALID_PC);
713 _pc = pc;
714 }
715
716 bool
717 hasPC() const
718 {
719 return privateFlags.isSet(VALID_PC);
720 }
721
722 /** Accessor function for pc.*/
723 Addr
724 getPC() const
725 {
726 assert(privateFlags.isSet(VALID_PC));
727 return _pc;
728 }
729
730 /**
731 * Increment/Get the depth at which this request is responded to.
732 * This currently happens when the request misses in any cache level.
733 */
734 void incAccessDepth() const { depth++; }
735 int getAccessDepth() const { return depth; }
736
737 /**
738 * Set/Get the time taken for this request to be successfully translated.
739 */
740 void setTranslateLatency() { translateDelta = curTick() - _time; }
741 Tick getTranslateLatency() const { return translateDelta; }
742
743 /**
744 * Set/Get the time taken to complete this request's access, not including
745 * the time to successfully translate the request.
746 */
747 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
748 Tick getAccessLatency() const { return accessDelta; }
749
750 /**
751 * Accessor for the sequence number of instruction that creates the
752 * request.
753 */
754 bool
755 hasInstSeqNum() const
756 {
757 return privateFlags.isSet(VALID_INST_SEQ_NUM);
758 }
759
760 InstSeqNum
761 getReqInstSeqNum() const
762 {
763 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
764 return _reqInstSeqNum;
765 }
766
767 void
768 setReqInstSeqNum(const InstSeqNum seq_num)
769 {
770 privateFlags.set(VALID_INST_SEQ_NUM);
771 _reqInstSeqNum = seq_num;
772 }
773
774 /** Accessor functions for flags. Note that these are for testing
775 only; setting flags should be done via setFlags(). */
776 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
777 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
778 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
779 bool isPrefetch() const { return _flags.isSet(PREFETCH); }
780 bool isLLSC() const { return _flags.isSet(LLSC); }
781 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
782 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
783 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
784 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
785 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
786 bool isSecure() const { return _flags.isSet(SECURE); }
787 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
788 bool isAcquire() const { return _flags.isSet(ACQUIRE); }
789 bool isRelease() const { return _flags.isSet(RELEASE); }
790 bool isKernel() const { return _flags.isSet(KERNEL); }
791 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
792 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
793
794 bool
795 isAtomic() const
796 {
797 return _flags.isSet(ATOMIC_RETURN_OP) ||
798 _flags.isSet(ATOMIC_NO_RETURN_OP);
799 }
800
801 /**
802 * Accessor functions for the destination of a memory request. The
803 * destination flag can specify a point of reference for the
804 * operation (e.g. a cache block clean to the the point of
805 * unification). At the moment the destination is only used by the
806 * cache maintenance operations.
807 */
808 bool isToPOU() const { return _flags.isSet(DST_POU); }
809 bool isToPOC() const { return _flags.isSet(DST_POC); }
810 Flags getDest() const { return _flags & DST_BITS; }
811
812 /**
793 * Accessor functions for the memory space configuration flags and used by
794 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
795 * these are for testing only; setting extraFlags should be done via
796 * setMemSpaceConfigFlags().
797 */
798 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
799
800 bool
801 isWavefrontScope() const
802 {
803 assert(isScoped());
804 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
805 }
806
807 bool
808 isWorkgroupScope() const
809 {
810 assert(isScoped());
811 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
812 }
813
814 bool
815 isDeviceScope() const
816 {
817 assert(isScoped());
818 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
819 }
820
821 bool
822 isSystemScope() const
823 {
824 assert(isScoped());
825 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
826 }
827
828 bool
829 isGlobalSegment() const
830 {
831 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
832 (!isGroupSegment() && !isPrivateSegment() &&
833 !isKernargSegment() && !isReadonlySegment() &&
834 !isSpillSegment() && !isArgSegment());
835 }
836
837 bool
838 isGroupSegment() const
839 {
840 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
841 }
842
843 bool
844 isPrivateSegment() const
845 {
846 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
847 }
848
849 bool
850 isKernargSegment() const
851 {
852 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
853 }
854
855 bool
856 isReadonlySegment() const
857 {
858 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
859 }
860
861 bool
862 isSpillSegment() const
863 {
864 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
865 }
866
867 bool
868 isArgSegment() const
869 {
870 return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
871 }
872};
873
874#endif // __MEM_REQUEST_HH__
813 * Accessor functions for the memory space configuration flags and used by
814 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
815 * these are for testing only; setting extraFlags should be done via
816 * setMemSpaceConfigFlags().
817 */
818 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
819
820 bool
821 isWavefrontScope() const
822 {
823 assert(isScoped());
824 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
825 }
826
827 bool
828 isWorkgroupScope() const
829 {
830 assert(isScoped());
831 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
832 }
833
834 bool
835 isDeviceScope() const
836 {
837 assert(isScoped());
838 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
839 }
840
841 bool
842 isSystemScope() const
843 {
844 assert(isScoped());
845 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
846 }
847
848 bool
849 isGlobalSegment() const
850 {
851 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
852 (!isGroupSegment() && !isPrivateSegment() &&
853 !isKernargSegment() && !isReadonlySegment() &&
854 !isSpillSegment() && !isArgSegment());
855 }
856
857 bool
858 isGroupSegment() const
859 {
860 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
861 }
862
863 bool
864 isPrivateSegment() const
865 {
866 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
867 }
868
869 bool
870 isKernargSegment() const
871 {
872 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
873 }
874
875 bool
876 isReadonlySegment() const
877 {
878 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
879 }
880
881 bool
882 isSpillSegment() const
883 {
884 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
885 }
886
887 bool
888 isArgSegment() const
889 {
890 return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
891 }
892};
893
894#endif // __MEM_REQUEST_HH__