request.hh (12917:c18b776f460c) request.hh (13367:dc06baae4275)
1/*
2 * Copyright (c) 2012-2013,2017-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 */
45
46/**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52#ifndef __MEM_REQUEST_HH__
53#define __MEM_REQUEST_HH__
54
55#include <cassert>
56#include <climits>
57
58#include "base/flags.hh"
59#include "base/logging.hh"
60#include "base/types.hh"
61#include "cpu/inst_seq.hh"
62#include "sim/core.hh"
63
64/**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72namespace ContextSwitchTaskId {
73 enum TaskId {
74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76 DMA = 1023, /* Mostly Table Walker */
77 Unknown = 1024,
78 NumTaskId
79 };
80}
81
82class Request;
83
84typedef std::shared_ptr<Request> RequestPtr;
85typedef uint16_t MasterID;
86
87class Request
88{
89 public:
90 typedef uint64_t FlagsType;
91 typedef uint8_t ArchFlagsType;
92 typedef ::Flags<FlagsType> Flags;
93
94 enum : FlagsType {
95 /**
96 * Architecture specific flags.
97 *
98 * These bits int the flag field are reserved for
99 * architecture-specific code. For example, SPARC uses them to
100 * represent ASIs.
101 */
102 ARCH_BITS = 0x000000FF,
103 /** The request was an instruction fetch. */
104 INST_FETCH = 0x00000100,
105 /** The virtual address is also the physical address. */
106 PHYSICAL = 0x00000200,
107 /**
108 * The request is to an uncacheable address.
109 *
110 * @note Uncacheable accesses may be reordered by CPU models. The
111 * STRICT_ORDER flag should be set if such reordering is
112 * undesirable.
113 */
114 UNCACHEABLE = 0x00000400,
115 /**
116 * The request is required to be strictly ordered by <i>CPU
117 * models</i> and is non-speculative.
118 *
119 * A strictly ordered request is guaranteed to never be
120 * re-ordered or executed speculatively by a CPU model. The
121 * memory system may still reorder requests in caches unless
122 * the UNCACHEABLE flag is set as well.
123 */
124 STRICT_ORDER = 0x00000800,
125 /** This request is to a memory mapped register. */
126 MMAPPED_IPR = 0x00002000,
127 /** This request is made in privileged mode. */
128 PRIVILEGED = 0x00008000,
129
130 /**
131 * This is a write that is targeted and zeroing an entire
132 * cache block. There is no need for a read/modify/write
133 */
134 CACHE_BLOCK_ZERO = 0x00010000,
135
136 /** The request should not cause a memory access. */
137 NO_ACCESS = 0x00080000,
138 /**
139 * This request will lock or unlock the accessed memory. When
140 * used with a load, the access locks the particular chunk of
141 * memory. When used with a store, it unlocks. The rule is
142 * that locked accesses have to be made up of a locked load,
143 * some operation on the data, and then a locked store.
144 */
145 LOCKED_RMW = 0x00100000,
146 /** The request is a Load locked/store conditional. */
147 LLSC = 0x00200000,
148 /** This request is for a memory swap. */
149 MEM_SWAP = 0x00400000,
150 MEM_SWAP_COND = 0x00800000,
151
152 /** The request is a prefetch. */
153 PREFETCH = 0x01000000,
154 /** The request should be prefetched into the exclusive state. */
155 PF_EXCLUSIVE = 0x02000000,
156 /** The request should be marked as LRU. */
157 EVICT_NEXT = 0x04000000,
158 /** The request should be marked with ACQUIRE. */
159 ACQUIRE = 0x00020000,
160 /** The request should be marked with RELEASE. */
161 RELEASE = 0x00040000,
162
163 /** The request is an atomic that returns data. */
164 ATOMIC_RETURN_OP = 0x40000000,
165 /** The request is an atomic that does not return data. */
166 ATOMIC_NO_RETURN_OP = 0x80000000,
167
168 /** The request should be marked with KERNEL.
169 * Used to indicate the synchronization associated with a GPU kernel
170 * launch or completion.
171 */
172 KERNEL = 0x00001000,
173
174 /**
175 * The request should be handled by the generic IPR code (only
176 * valid together with MMAPPED_IPR)
177 */
178 GENERIC_IPR = 0x08000000,
179
180 /** The request targets the secure memory space. */
181 SECURE = 0x10000000,
182 /** The request is a page table walk */
183 PT_WALK = 0x20000000,
184
185 /** The request invalidates a memory location */
186 INVALIDATE = 0x0000000100000000,
187 /** The request cleans a memory location */
188 CLEAN = 0x0000000200000000,
189
190 /** The request targets the point of unification */
191 DST_POU = 0x0000001000000000,
192
193 /** The request targets the point of coherence */
194 DST_POC = 0x0000002000000000,
195
196 /** Bits to define the destination of a request */
197 DST_BITS = 0x0000003000000000,
198
199 /**
200 * These flags are *not* cleared when a Request object is
201 * reused (assigned a new address).
202 */
203 STICKY_FLAGS = INST_FETCH
204 };
205 static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
206 CLEAN | INVALIDATE;
207
208 /** Master Ids that are statically allocated
209 * @{*/
210 enum : MasterID {
211 /** This master id is used for writeback requests by the caches */
212 wbMasterId = 0,
213 /**
214 * This master id is used for functional requests that
215 * don't come from a particular device
216 */
217 funcMasterId = 1,
218 /** This master id is used for message signaled interrupts */
219 intMasterId = 2,
220 /**
221 * Invalid master id for assertion checking only. It is
222 * invalid behavior to ever send this id as part of a request.
223 */
224 invldMasterId = std::numeric_limits<MasterID>::max()
225 };
226 /** @} */
227
228 typedef uint32_t MemSpaceConfigFlagsType;
229 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
230
231 enum : MemSpaceConfigFlagsType {
232 /** Has a synchronization scope been set? */
233 SCOPE_VALID = 0x00000001,
234 /** Access has Wavefront scope visibility */
235 WAVEFRONT_SCOPE = 0x00000002,
236 /** Access has Workgroup scope visibility */
237 WORKGROUP_SCOPE = 0x00000004,
238 /** Access has Device (e.g., GPU) scope visibility */
239 DEVICE_SCOPE = 0x00000008,
240 /** Access has System (e.g., CPU + GPU) scope visibility */
241 SYSTEM_SCOPE = 0x00000010,
242
243 /** Global Segment */
244 GLOBAL_SEGMENT = 0x00000020,
245 /** Group Segment */
246 GROUP_SEGMENT = 0x00000040,
247 /** Private Segment */
248 PRIVATE_SEGMENT = 0x00000080,
249 /** Kergarg Segment */
250 KERNARG_SEGMENT = 0x00000100,
251 /** Readonly Segment */
252 READONLY_SEGMENT = 0x00000200,
253 /** Spill Segment */
254 SPILL_SEGMENT = 0x00000400,
255 /** Arg Segment */
256 ARG_SEGMENT = 0x00000800,
257 };
258
259 private:
260 typedef uint16_t PrivateFlagsType;
261 typedef ::Flags<PrivateFlagsType> PrivateFlags;
262
263 enum : PrivateFlagsType {
264 /** Whether or not the size is valid. */
265 VALID_SIZE = 0x00000001,
266 /** Whether or not paddr is valid (has been written yet). */
267 VALID_PADDR = 0x00000002,
268 /** Whether or not the vaddr & asid are valid. */
269 VALID_VADDR = 0x00000004,
270 /** Whether or not the instruction sequence number is valid. */
271 VALID_INST_SEQ_NUM = 0x00000008,
272 /** Whether or not the pc is valid. */
273 VALID_PC = 0x00000010,
274 /** Whether or not the context ID is valid. */
275 VALID_CONTEXT_ID = 0x00000020,
276 /** Whether or not the sc result is valid. */
277 VALID_EXTRA_DATA = 0x00000080,
278 /** Whether or not the stream ID and substream ID is valid. */
279 VALID_STREAM_ID = 0x00000100,
280 VALID_SUBSTREAM_ID = 0x00000200,
281 /**
282 * These flags are *not* cleared when a Request object is reused
283 * (assigned a new address).
284 */
285 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
286 };
287
288 private:
289
290 /**
291 * Set up a physical (e.g. device) request in a previously
292 * allocated Request object.
293 */
294 void
295 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
296 {
297 _paddr = paddr;
298 _size = size;
299 _time = time;
300 _masterId = mid;
301 _flags.clear(~STICKY_FLAGS);
302 _flags.set(flags);
303 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
304 privateFlags.set(VALID_PADDR|VALID_SIZE);
305 depth = 0;
306 accessDelta = 0;
307 //translateDelta = 0;
308 }
309
310 /**
311 * The physical address of the request. Valid only if validPaddr
312 * is set.
313 */
314 Addr _paddr;
315
316 /**
317 * The size of the request. This field must be set when vaddr or
318 * paddr is written via setVirt() or setPhys(), so it is always
319 * valid as long as one of the address fields is valid.
320 */
321 unsigned _size;
322
323 /** The requestor ID which is unique in the system for all ports
324 * that are capable of issuing a transaction
325 */
326 MasterID _masterId;
327
328 /** Flag structure for the request. */
329 Flags _flags;
330
331 /** Memory space configuraiton flag structure for the request. */
332 MemSpaceConfigFlags _memSpaceConfigFlags;
333
334 /** Private flags for field validity checking. */
335 PrivateFlags privateFlags;
336
337 /**
338 * The time this request was started. Used to calculate
339 * latencies. This field is set to curTick() any time paddr or vaddr
340 * is written.
341 */
342 Tick _time;
343
344 /**
345 * The task id associated with this request
346 */
347 uint32_t _taskId;
348
349 union {
350 struct {
351 /**
352 * The stream ID uniquely identifies a device behind the
353 * SMMU/IOMMU Each transaction arriving at the SMMU/IOMMU is
354 * associated with exactly one stream ID.
355 */
356 uint32_t _streamId;
357
358 /**
359 * The substream ID identifies an "execution context" within a
360 * device behind an SMMU/IOMMU. It's intended to map 1-to-1 to
361 * PCIe PASID (Process Address Space ID). The presence of a
362 * substream ID is optional.
363 */
364 uint32_t _substreamId;
365 };
366
367 /** The address space ID. */
368 uint64_t _asid;
369 };
370
371 /** The virtual address of the request. */
372 Addr _vaddr;
373
374 /**
375 * Extra data for the request, such as the return value of
376 * store conditional or the compare value for a CAS. */
377 uint64_t _extraData;
378
379 /** The context ID (for statistics, locks, and wakeups). */
380 ContextID _contextId;
381
382 /** program counter of initiating access; for tracing/debugging */
383 Addr _pc;
384
385 /** Sequence number of the instruction that creates the request */
386 InstSeqNum _reqInstSeqNum;
387
388 /** A pointer to an atomic operation */
389 AtomicOpFunctor *atomicOpFunctor;
390
391 public:
392
393 /**
394 * Minimal constructor. No fields are initialized. (Note that
395 * _flags and privateFlags are cleared by Flags default
396 * constructor.)
397 */
398 Request()
399 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
400 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
401 _extraData(0), _contextId(0), _pc(0),
402 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
403 accessDelta(0), depth(0)
404 {}
405
406 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
407 InstSeqNum seq_num, ContextID cid)
408 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
409 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
410 _extraData(0), _contextId(0), _pc(0),
411 _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
412 accessDelta(0), depth(0)
413 {
414 setPhys(paddr, size, flags, mid, curTick());
415 setContext(cid);
416 privateFlags.set(VALID_INST_SEQ_NUM);
417 }
418
419 /**
420 * Constructor for physical (e.g. device) requests. Initializes
421 * just physical address, size, flags, and timestamp (to curTick()).
422 * These fields are adequate to perform a request.
423 */
424 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
425 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
426 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
427 _extraData(0), _contextId(0), _pc(0),
428 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
429 accessDelta(0), depth(0)
430 {
431 setPhys(paddr, size, flags, mid, curTick());
432 }
433
434 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
435 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
436 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
437 _extraData(0), _contextId(0), _pc(0),
438 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
439 accessDelta(0), depth(0)
440 {
441 setPhys(paddr, size, flags, mid, time);
442 }
443
444 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
445 Addr pc)
446 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
447 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
448 _extraData(0), _contextId(0), _pc(pc),
449 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
450 accessDelta(0), depth(0)
451 {
452 setPhys(paddr, size, flags, mid, time);
453 privateFlags.set(VALID_PC);
454 }
455
456 Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
457 MasterID mid, Addr pc, ContextID cid)
458 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
459 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
460 _extraData(0), _contextId(0), _pc(0),
461 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
462 accessDelta(0), depth(0)
463 {
464 setVirt(asid, vaddr, size, flags, mid, pc);
465 setContext(cid);
466 }
467
468 Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
469 MasterID mid, Addr pc, ContextID cid,
470 AtomicOpFunctor *atomic_op)
471 {
472 setVirt(asid, vaddr, size, flags, mid, pc, atomic_op);
473 setContext(cid);
474 }
475
476 Request(const Request& other)
477 : _paddr(other._paddr), _size(other._size),
478 _masterId(other._masterId),
479 _flags(other._flags),
480 _memSpaceConfigFlags(other._memSpaceConfigFlags),
481 privateFlags(other.privateFlags),
482 _time(other._time),
483 _taskId(other._taskId), _asid(other._asid), _vaddr(other._vaddr),
484 _extraData(other._extraData), _contextId(other._contextId),
485 _pc(other._pc), _reqInstSeqNum(other._reqInstSeqNum),
486 translateDelta(other.translateDelta),
487 accessDelta(other.accessDelta), depth(other.depth)
488 {
489 if (other.atomicOpFunctor)
490 atomicOpFunctor = (other.atomicOpFunctor)->clone();
491 else
492 atomicOpFunctor = nullptr;
493 }
494
495 ~Request()
496 {
497 if (hasAtomicOpFunctor()) {
498 delete atomicOpFunctor;
499 }
500 }
501
502 /**
503 * Set up Context numbers.
504 */
505 void
506 setContext(ContextID context_id)
507 {
508 _contextId = context_id;
509 privateFlags.set(VALID_CONTEXT_ID);
510 }
511
512 void
513 setStreamId(uint32_t sid)
514 {
515 _streamId = sid;
516 privateFlags.set(VALID_STREAM_ID);
517 }
518
519 void
520 setSubStreamId(uint32_t ssid)
521 {
522 assert(privateFlags.isSet(VALID_STREAM_ID));
523 _substreamId = ssid;
524 privateFlags.set(VALID_SUBSTREAM_ID);
525 }
526
527 /**
528 * Set up a virtual (e.g., CPU) request in a previously
529 * allocated Request object.
530 */
531 void
532 setVirt(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
533 MasterID mid, Addr pc, AtomicOpFunctor *amo_op = nullptr)
534 {
535 _asid = asid;
536 _vaddr = vaddr;
537 _size = size;
538 _masterId = mid;
539 _pc = pc;
540 _time = curTick();
541
542 _flags.clear(~STICKY_FLAGS);
543 _flags.set(flags);
544 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
545 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
546 depth = 0;
547 accessDelta = 0;
548 translateDelta = 0;
549 atomicOpFunctor = amo_op;
550 }
551
552 /**
553 * Set just the physical address. This usually used to record the
554 * result of a translation. However, when using virtualized CPUs
555 * setPhys() is sometimes called to finalize a physical address
556 * without a virtual address, so we can't check if the virtual
557 * address is valid.
558 */
559 void
560 setPaddr(Addr paddr)
561 {
562 _paddr = paddr;
563 privateFlags.set(VALID_PADDR);
564 }
565
566 /**
567 * Generate two requests as if this request had been split into two
568 * pieces. The original request can't have been translated already.
569 */
570 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
571 {
572 assert(privateFlags.isSet(VALID_VADDR));
573 assert(privateFlags.noneSet(VALID_PADDR));
574 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
575 req1 = std::make_shared<Request>(*this);
576 req2 = std::make_shared<Request>(*this);
577 req1->_size = split_addr - _vaddr;
578 req2->_vaddr = split_addr;
579 req2->_size = _size - req1->_size;
580 }
581
582 /**
583 * Accessor for paddr.
584 */
585 bool
586 hasPaddr() const
587 {
588 return privateFlags.isSet(VALID_PADDR);
589 }
590
591 Addr
592 getPaddr() const
593 {
594 assert(privateFlags.isSet(VALID_PADDR));
595 return _paddr;
596 }
597
598 /**
599 * Time for the TLB/table walker to successfully translate this request.
600 */
601 Tick translateDelta;
602
603 /**
604 * Access latency to complete this memory transaction not including
605 * translation time.
606 */
607 Tick accessDelta;
608
609 /**
610 * Level of the cache hierachy where this request was responded to
611 * (e.g. 0 = L1; 1 = L2).
612 */
613 mutable int depth;
614
615 /**
616 * Accessor for size.
617 */
618 bool
619 hasSize() const
620 {
621 return privateFlags.isSet(VALID_SIZE);
622 }
623
624 unsigned
625 getSize() const
626 {
627 assert(privateFlags.isSet(VALID_SIZE));
628 return _size;
629 }
630
631 /** Accessor for time. */
632 Tick
633 time() const
634 {
635 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
636 return _time;
637 }
638
639 /**
640 * Accessor for atomic-op functor.
641 */
642 bool
643 hasAtomicOpFunctor()
644 {
645 return atomicOpFunctor != NULL;
646 }
647
648 AtomicOpFunctor *
649 getAtomicOpFunctor()
650 {
651 assert(atomicOpFunctor != NULL);
652 return atomicOpFunctor;
653 }
654
655 /** Accessor for flags. */
656 Flags
657 getFlags()
658 {
659 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
660 return _flags;
661 }
662
663 /** Note that unlike other accessors, this function sets *specific
664 flags* (ORs them in); it does not assign its argument to the
665 _flags field. Thus this method should rightly be called
666 setFlags() and not just flags(). */
667 void
668 setFlags(Flags flags)
669 {
670 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
671 _flags.set(flags);
672 }
673
674 void
675 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
676 {
677 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
678 _memSpaceConfigFlags.set(extraFlags);
679 }
680
681 /** Accessor function for vaddr.*/
682 bool
683 hasVaddr() const
684 {
685 return privateFlags.isSet(VALID_VADDR);
686 }
687
688 Addr
689 getVaddr() const
690 {
691 assert(privateFlags.isSet(VALID_VADDR));
692 return _vaddr;
693 }
694
695 /** Accesssor for the requestor id. */
696 MasterID
697 masterId() const
698 {
699 return _masterId;
700 }
701
702 uint32_t
703 taskId() const
704 {
705 return _taskId;
706 }
707
708 void
709 taskId(uint32_t id) {
710 _taskId = id;
711 }
712
713 /** Accessor function for asid.*/
714 uint64_t
715 getAsid() const
716 {
717 assert(privateFlags.isSet(VALID_VADDR));
718 return _asid;
719 }
720
721 /** Accessor function for asid.*/
722 void
723 setAsid(uint64_t asid)
724 {
725 _asid = asid;
726 }
727
728 /** Accessor function for architecture-specific flags.*/
729 ArchFlagsType
730 getArchFlags() const
731 {
732 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
733 return _flags & ARCH_BITS;
734 }
735
736 /** Accessor function to check if sc result is valid. */
737 bool
738 extraDataValid() const
739 {
740 return privateFlags.isSet(VALID_EXTRA_DATA);
741 }
742
743 /** Accessor function for store conditional return value.*/
744 uint64_t
745 getExtraData() const
746 {
747 assert(privateFlags.isSet(VALID_EXTRA_DATA));
748 return _extraData;
749 }
750
751 /** Accessor function for store conditional return value.*/
752 void
753 setExtraData(uint64_t extraData)
754 {
755 _extraData = extraData;
756 privateFlags.set(VALID_EXTRA_DATA);
757 }
758
759 bool
760 hasContextId() const
761 {
762 return privateFlags.isSet(VALID_CONTEXT_ID);
763 }
764
765 /** Accessor function for context ID.*/
766 ContextID
767 contextId() const
768 {
769 assert(privateFlags.isSet(VALID_CONTEXT_ID));
770 return _contextId;
771 }
772
773 uint32_t
774 streamId() const
775 {
776 assert(privateFlags.isSet(VALID_STREAM_ID));
777 return _streamId;
778 }
779
780 bool
781 hasSubstreamId() const
782 {
783 return privateFlags.isSet(VALID_SUBSTREAM_ID);
784 }
785
786 uint32_t
787 substreamId() const
788 {
789 assert(privateFlags.isSet(VALID_SUBSTREAM_ID));
790 return _substreamId;
791 }
792
793 void
794 setPC(Addr pc)
795 {
796 privateFlags.set(VALID_PC);
797 _pc = pc;
798 }
799
800 bool
801 hasPC() const
802 {
803 return privateFlags.isSet(VALID_PC);
804 }
805
806 /** Accessor function for pc.*/
807 Addr
808 getPC() const
809 {
810 assert(privateFlags.isSet(VALID_PC));
811 return _pc;
812 }
813
814 /**
815 * Increment/Get the depth at which this request is responded to.
816 * This currently happens when the request misses in any cache level.
817 */
818 void incAccessDepth() const { depth++; }
819 int getAccessDepth() const { return depth; }
820
821 /**
822 * Set/Get the time taken for this request to be successfully translated.
823 */
824 void setTranslateLatency() { translateDelta = curTick() - _time; }
825 Tick getTranslateLatency() const { return translateDelta; }
826
827 /**
828 * Set/Get the time taken to complete this request's access, not including
829 * the time to successfully translate the request.
830 */
831 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
832 Tick getAccessLatency() const { return accessDelta; }
833
834 /**
835 * Accessor for the sequence number of instruction that creates the
836 * request.
837 */
838 bool
839 hasInstSeqNum() const
840 {
841 return privateFlags.isSet(VALID_INST_SEQ_NUM);
842 }
843
844 InstSeqNum
845 getReqInstSeqNum() const
846 {
847 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
848 return _reqInstSeqNum;
849 }
850
851 void
852 setReqInstSeqNum(const InstSeqNum seq_num)
853 {
854 privateFlags.set(VALID_INST_SEQ_NUM);
855 _reqInstSeqNum = seq_num;
856 }
857
858 /** Accessor functions for flags. Note that these are for testing
859 only; setting flags should be done via setFlags(). */
860 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
861 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
862 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
1/*
2 * Copyright (c) 2012-2013,2017-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Ron Dreslinski
42 * Steve Reinhardt
43 * Ali Saidi
44 */
45
46/**
47 * @file
48 * Declaration of a request, the overall memory request consisting of
49 the parts of the request that are persistent throughout the transaction.
50 */
51
52#ifndef __MEM_REQUEST_HH__
53#define __MEM_REQUEST_HH__
54
55#include <cassert>
56#include <climits>
57
58#include "base/flags.hh"
59#include "base/logging.hh"
60#include "base/types.hh"
61#include "cpu/inst_seq.hh"
62#include "sim/core.hh"
63
64/**
65 * Special TaskIds that are used for per-context-switch stats dumps
66 * and Cache Occupancy. Having too many tasks seems to be a problem
67 * with vector stats. 1024 seems to be a reasonable number that
68 * doesn't cause a problem with stats and is large enough to realistic
69 * benchmarks (Linux/Android boot, BBench, etc.)
70 */
71
72namespace ContextSwitchTaskId {
73 enum TaskId {
74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */
76 DMA = 1023, /* Mostly Table Walker */
77 Unknown = 1024,
78 NumTaskId
79 };
80}
81
82class Request;
83
84typedef std::shared_ptr<Request> RequestPtr;
85typedef uint16_t MasterID;
86
87class Request
88{
89 public:
90 typedef uint64_t FlagsType;
91 typedef uint8_t ArchFlagsType;
92 typedef ::Flags<FlagsType> Flags;
93
94 enum : FlagsType {
95 /**
96 * Architecture specific flags.
97 *
98 * These bits int the flag field are reserved for
99 * architecture-specific code. For example, SPARC uses them to
100 * represent ASIs.
101 */
102 ARCH_BITS = 0x000000FF,
103 /** The request was an instruction fetch. */
104 INST_FETCH = 0x00000100,
105 /** The virtual address is also the physical address. */
106 PHYSICAL = 0x00000200,
107 /**
108 * The request is to an uncacheable address.
109 *
110 * @note Uncacheable accesses may be reordered by CPU models. The
111 * STRICT_ORDER flag should be set if such reordering is
112 * undesirable.
113 */
114 UNCACHEABLE = 0x00000400,
115 /**
116 * The request is required to be strictly ordered by <i>CPU
117 * models</i> and is non-speculative.
118 *
119 * A strictly ordered request is guaranteed to never be
120 * re-ordered or executed speculatively by a CPU model. The
121 * memory system may still reorder requests in caches unless
122 * the UNCACHEABLE flag is set as well.
123 */
124 STRICT_ORDER = 0x00000800,
125 /** This request is to a memory mapped register. */
126 MMAPPED_IPR = 0x00002000,
127 /** This request is made in privileged mode. */
128 PRIVILEGED = 0x00008000,
129
130 /**
131 * This is a write that is targeted and zeroing an entire
132 * cache block. There is no need for a read/modify/write
133 */
134 CACHE_BLOCK_ZERO = 0x00010000,
135
136 /** The request should not cause a memory access. */
137 NO_ACCESS = 0x00080000,
138 /**
139 * This request will lock or unlock the accessed memory. When
140 * used with a load, the access locks the particular chunk of
141 * memory. When used with a store, it unlocks. The rule is
142 * that locked accesses have to be made up of a locked load,
143 * some operation on the data, and then a locked store.
144 */
145 LOCKED_RMW = 0x00100000,
146 /** The request is a Load locked/store conditional. */
147 LLSC = 0x00200000,
148 /** This request is for a memory swap. */
149 MEM_SWAP = 0x00400000,
150 MEM_SWAP_COND = 0x00800000,
151
152 /** The request is a prefetch. */
153 PREFETCH = 0x01000000,
154 /** The request should be prefetched into the exclusive state. */
155 PF_EXCLUSIVE = 0x02000000,
156 /** The request should be marked as LRU. */
157 EVICT_NEXT = 0x04000000,
158 /** The request should be marked with ACQUIRE. */
159 ACQUIRE = 0x00020000,
160 /** The request should be marked with RELEASE. */
161 RELEASE = 0x00040000,
162
163 /** The request is an atomic that returns data. */
164 ATOMIC_RETURN_OP = 0x40000000,
165 /** The request is an atomic that does not return data. */
166 ATOMIC_NO_RETURN_OP = 0x80000000,
167
168 /** The request should be marked with KERNEL.
169 * Used to indicate the synchronization associated with a GPU kernel
170 * launch or completion.
171 */
172 KERNEL = 0x00001000,
173
174 /**
175 * The request should be handled by the generic IPR code (only
176 * valid together with MMAPPED_IPR)
177 */
178 GENERIC_IPR = 0x08000000,
179
180 /** The request targets the secure memory space. */
181 SECURE = 0x10000000,
182 /** The request is a page table walk */
183 PT_WALK = 0x20000000,
184
185 /** The request invalidates a memory location */
186 INVALIDATE = 0x0000000100000000,
187 /** The request cleans a memory location */
188 CLEAN = 0x0000000200000000,
189
190 /** The request targets the point of unification */
191 DST_POU = 0x0000001000000000,
192
193 /** The request targets the point of coherence */
194 DST_POC = 0x0000002000000000,
195
196 /** Bits to define the destination of a request */
197 DST_BITS = 0x0000003000000000,
198
199 /**
200 * These flags are *not* cleared when a Request object is
201 * reused (assigned a new address).
202 */
203 STICKY_FLAGS = INST_FETCH
204 };
205 static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
206 CLEAN | INVALIDATE;
207
208 /** Master Ids that are statically allocated
209 * @{*/
210 enum : MasterID {
211 /** This master id is used for writeback requests by the caches */
212 wbMasterId = 0,
213 /**
214 * This master id is used for functional requests that
215 * don't come from a particular device
216 */
217 funcMasterId = 1,
218 /** This master id is used for message signaled interrupts */
219 intMasterId = 2,
220 /**
221 * Invalid master id for assertion checking only. It is
222 * invalid behavior to ever send this id as part of a request.
223 */
224 invldMasterId = std::numeric_limits<MasterID>::max()
225 };
226 /** @} */
227
228 typedef uint32_t MemSpaceConfigFlagsType;
229 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
230
231 enum : MemSpaceConfigFlagsType {
232 /** Has a synchronization scope been set? */
233 SCOPE_VALID = 0x00000001,
234 /** Access has Wavefront scope visibility */
235 WAVEFRONT_SCOPE = 0x00000002,
236 /** Access has Workgroup scope visibility */
237 WORKGROUP_SCOPE = 0x00000004,
238 /** Access has Device (e.g., GPU) scope visibility */
239 DEVICE_SCOPE = 0x00000008,
240 /** Access has System (e.g., CPU + GPU) scope visibility */
241 SYSTEM_SCOPE = 0x00000010,
242
243 /** Global Segment */
244 GLOBAL_SEGMENT = 0x00000020,
245 /** Group Segment */
246 GROUP_SEGMENT = 0x00000040,
247 /** Private Segment */
248 PRIVATE_SEGMENT = 0x00000080,
249 /** Kergarg Segment */
250 KERNARG_SEGMENT = 0x00000100,
251 /** Readonly Segment */
252 READONLY_SEGMENT = 0x00000200,
253 /** Spill Segment */
254 SPILL_SEGMENT = 0x00000400,
255 /** Arg Segment */
256 ARG_SEGMENT = 0x00000800,
257 };
258
259 private:
260 typedef uint16_t PrivateFlagsType;
261 typedef ::Flags<PrivateFlagsType> PrivateFlags;
262
263 enum : PrivateFlagsType {
264 /** Whether or not the size is valid. */
265 VALID_SIZE = 0x00000001,
266 /** Whether or not paddr is valid (has been written yet). */
267 VALID_PADDR = 0x00000002,
268 /** Whether or not the vaddr & asid are valid. */
269 VALID_VADDR = 0x00000004,
270 /** Whether or not the instruction sequence number is valid. */
271 VALID_INST_SEQ_NUM = 0x00000008,
272 /** Whether or not the pc is valid. */
273 VALID_PC = 0x00000010,
274 /** Whether or not the context ID is valid. */
275 VALID_CONTEXT_ID = 0x00000020,
276 /** Whether or not the sc result is valid. */
277 VALID_EXTRA_DATA = 0x00000080,
278 /** Whether or not the stream ID and substream ID is valid. */
279 VALID_STREAM_ID = 0x00000100,
280 VALID_SUBSTREAM_ID = 0x00000200,
281 /**
282 * These flags are *not* cleared when a Request object is reused
283 * (assigned a new address).
284 */
285 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
286 };
287
288 private:
289
290 /**
291 * Set up a physical (e.g. device) request in a previously
292 * allocated Request object.
293 */
294 void
295 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
296 {
297 _paddr = paddr;
298 _size = size;
299 _time = time;
300 _masterId = mid;
301 _flags.clear(~STICKY_FLAGS);
302 _flags.set(flags);
303 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
304 privateFlags.set(VALID_PADDR|VALID_SIZE);
305 depth = 0;
306 accessDelta = 0;
307 //translateDelta = 0;
308 }
309
310 /**
311 * The physical address of the request. Valid only if validPaddr
312 * is set.
313 */
314 Addr _paddr;
315
316 /**
317 * The size of the request. This field must be set when vaddr or
318 * paddr is written via setVirt() or setPhys(), so it is always
319 * valid as long as one of the address fields is valid.
320 */
321 unsigned _size;
322
323 /** The requestor ID which is unique in the system for all ports
324 * that are capable of issuing a transaction
325 */
326 MasterID _masterId;
327
328 /** Flag structure for the request. */
329 Flags _flags;
330
331 /** Memory space configuraiton flag structure for the request. */
332 MemSpaceConfigFlags _memSpaceConfigFlags;
333
334 /** Private flags for field validity checking. */
335 PrivateFlags privateFlags;
336
337 /**
338 * The time this request was started. Used to calculate
339 * latencies. This field is set to curTick() any time paddr or vaddr
340 * is written.
341 */
342 Tick _time;
343
344 /**
345 * The task id associated with this request
346 */
347 uint32_t _taskId;
348
349 union {
350 struct {
351 /**
352 * The stream ID uniquely identifies a device behind the
353 * SMMU/IOMMU Each transaction arriving at the SMMU/IOMMU is
354 * associated with exactly one stream ID.
355 */
356 uint32_t _streamId;
357
358 /**
359 * The substream ID identifies an "execution context" within a
360 * device behind an SMMU/IOMMU. It's intended to map 1-to-1 to
361 * PCIe PASID (Process Address Space ID). The presence of a
362 * substream ID is optional.
363 */
364 uint32_t _substreamId;
365 };
366
367 /** The address space ID. */
368 uint64_t _asid;
369 };
370
371 /** The virtual address of the request. */
372 Addr _vaddr;
373
374 /**
375 * Extra data for the request, such as the return value of
376 * store conditional or the compare value for a CAS. */
377 uint64_t _extraData;
378
379 /** The context ID (for statistics, locks, and wakeups). */
380 ContextID _contextId;
381
382 /** program counter of initiating access; for tracing/debugging */
383 Addr _pc;
384
385 /** Sequence number of the instruction that creates the request */
386 InstSeqNum _reqInstSeqNum;
387
388 /** A pointer to an atomic operation */
389 AtomicOpFunctor *atomicOpFunctor;
390
391 public:
392
393 /**
394 * Minimal constructor. No fields are initialized. (Note that
395 * _flags and privateFlags are cleared by Flags default
396 * constructor.)
397 */
398 Request()
399 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
400 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
401 _extraData(0), _contextId(0), _pc(0),
402 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
403 accessDelta(0), depth(0)
404 {}
405
406 Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
407 InstSeqNum seq_num, ContextID cid)
408 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
409 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
410 _extraData(0), _contextId(0), _pc(0),
411 _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
412 accessDelta(0), depth(0)
413 {
414 setPhys(paddr, size, flags, mid, curTick());
415 setContext(cid);
416 privateFlags.set(VALID_INST_SEQ_NUM);
417 }
418
419 /**
420 * Constructor for physical (e.g. device) requests. Initializes
421 * just physical address, size, flags, and timestamp (to curTick()).
422 * These fields are adequate to perform a request.
423 */
424 Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
425 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
426 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
427 _extraData(0), _contextId(0), _pc(0),
428 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
429 accessDelta(0), depth(0)
430 {
431 setPhys(paddr, size, flags, mid, curTick());
432 }
433
434 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
435 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
436 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
437 _extraData(0), _contextId(0), _pc(0),
438 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
439 accessDelta(0), depth(0)
440 {
441 setPhys(paddr, size, flags, mid, time);
442 }
443
444 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
445 Addr pc)
446 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
447 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
448 _extraData(0), _contextId(0), _pc(pc),
449 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
450 accessDelta(0), depth(0)
451 {
452 setPhys(paddr, size, flags, mid, time);
453 privateFlags.set(VALID_PC);
454 }
455
456 Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
457 MasterID mid, Addr pc, ContextID cid)
458 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
459 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
460 _extraData(0), _contextId(0), _pc(0),
461 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
462 accessDelta(0), depth(0)
463 {
464 setVirt(asid, vaddr, size, flags, mid, pc);
465 setContext(cid);
466 }
467
468 Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
469 MasterID mid, Addr pc, ContextID cid,
470 AtomicOpFunctor *atomic_op)
471 {
472 setVirt(asid, vaddr, size, flags, mid, pc, atomic_op);
473 setContext(cid);
474 }
475
476 Request(const Request& other)
477 : _paddr(other._paddr), _size(other._size),
478 _masterId(other._masterId),
479 _flags(other._flags),
480 _memSpaceConfigFlags(other._memSpaceConfigFlags),
481 privateFlags(other.privateFlags),
482 _time(other._time),
483 _taskId(other._taskId), _asid(other._asid), _vaddr(other._vaddr),
484 _extraData(other._extraData), _contextId(other._contextId),
485 _pc(other._pc), _reqInstSeqNum(other._reqInstSeqNum),
486 translateDelta(other.translateDelta),
487 accessDelta(other.accessDelta), depth(other.depth)
488 {
489 if (other.atomicOpFunctor)
490 atomicOpFunctor = (other.atomicOpFunctor)->clone();
491 else
492 atomicOpFunctor = nullptr;
493 }
494
495 ~Request()
496 {
497 if (hasAtomicOpFunctor()) {
498 delete atomicOpFunctor;
499 }
500 }
501
502 /**
503 * Set up Context numbers.
504 */
505 void
506 setContext(ContextID context_id)
507 {
508 _contextId = context_id;
509 privateFlags.set(VALID_CONTEXT_ID);
510 }
511
512 void
513 setStreamId(uint32_t sid)
514 {
515 _streamId = sid;
516 privateFlags.set(VALID_STREAM_ID);
517 }
518
519 void
520 setSubStreamId(uint32_t ssid)
521 {
522 assert(privateFlags.isSet(VALID_STREAM_ID));
523 _substreamId = ssid;
524 privateFlags.set(VALID_SUBSTREAM_ID);
525 }
526
527 /**
528 * Set up a virtual (e.g., CPU) request in a previously
529 * allocated Request object.
530 */
531 void
532 setVirt(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
533 MasterID mid, Addr pc, AtomicOpFunctor *amo_op = nullptr)
534 {
535 _asid = asid;
536 _vaddr = vaddr;
537 _size = size;
538 _masterId = mid;
539 _pc = pc;
540 _time = curTick();
541
542 _flags.clear(~STICKY_FLAGS);
543 _flags.set(flags);
544 privateFlags.clear(~STICKY_PRIVATE_FLAGS);
545 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
546 depth = 0;
547 accessDelta = 0;
548 translateDelta = 0;
549 atomicOpFunctor = amo_op;
550 }
551
552 /**
553 * Set just the physical address. This usually used to record the
554 * result of a translation. However, when using virtualized CPUs
555 * setPhys() is sometimes called to finalize a physical address
556 * without a virtual address, so we can't check if the virtual
557 * address is valid.
558 */
559 void
560 setPaddr(Addr paddr)
561 {
562 _paddr = paddr;
563 privateFlags.set(VALID_PADDR);
564 }
565
566 /**
567 * Generate two requests as if this request had been split into two
568 * pieces. The original request can't have been translated already.
569 */
570 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
571 {
572 assert(privateFlags.isSet(VALID_VADDR));
573 assert(privateFlags.noneSet(VALID_PADDR));
574 assert(split_addr > _vaddr && split_addr < _vaddr + _size);
575 req1 = std::make_shared<Request>(*this);
576 req2 = std::make_shared<Request>(*this);
577 req1->_size = split_addr - _vaddr;
578 req2->_vaddr = split_addr;
579 req2->_size = _size - req1->_size;
580 }
581
582 /**
583 * Accessor for paddr.
584 */
585 bool
586 hasPaddr() const
587 {
588 return privateFlags.isSet(VALID_PADDR);
589 }
590
591 Addr
592 getPaddr() const
593 {
594 assert(privateFlags.isSet(VALID_PADDR));
595 return _paddr;
596 }
597
598 /**
599 * Time for the TLB/table walker to successfully translate this request.
600 */
601 Tick translateDelta;
602
603 /**
604 * Access latency to complete this memory transaction not including
605 * translation time.
606 */
607 Tick accessDelta;
608
609 /**
610 * Level of the cache hierachy where this request was responded to
611 * (e.g. 0 = L1; 1 = L2).
612 */
613 mutable int depth;
614
615 /**
616 * Accessor for size.
617 */
618 bool
619 hasSize() const
620 {
621 return privateFlags.isSet(VALID_SIZE);
622 }
623
624 unsigned
625 getSize() const
626 {
627 assert(privateFlags.isSet(VALID_SIZE));
628 return _size;
629 }
630
631 /** Accessor for time. */
632 Tick
633 time() const
634 {
635 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
636 return _time;
637 }
638
639 /**
640 * Accessor for atomic-op functor.
641 */
642 bool
643 hasAtomicOpFunctor()
644 {
645 return atomicOpFunctor != NULL;
646 }
647
648 AtomicOpFunctor *
649 getAtomicOpFunctor()
650 {
651 assert(atomicOpFunctor != NULL);
652 return atomicOpFunctor;
653 }
654
655 /** Accessor for flags. */
656 Flags
657 getFlags()
658 {
659 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
660 return _flags;
661 }
662
663 /** Note that unlike other accessors, this function sets *specific
664 flags* (ORs them in); it does not assign its argument to the
665 _flags field. Thus this method should rightly be called
666 setFlags() and not just flags(). */
667 void
668 setFlags(Flags flags)
669 {
670 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
671 _flags.set(flags);
672 }
673
674 void
675 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
676 {
677 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
678 _memSpaceConfigFlags.set(extraFlags);
679 }
680
681 /** Accessor function for vaddr.*/
682 bool
683 hasVaddr() const
684 {
685 return privateFlags.isSet(VALID_VADDR);
686 }
687
688 Addr
689 getVaddr() const
690 {
691 assert(privateFlags.isSet(VALID_VADDR));
692 return _vaddr;
693 }
694
695 /** Accesssor for the requestor id. */
696 MasterID
697 masterId() const
698 {
699 return _masterId;
700 }
701
702 uint32_t
703 taskId() const
704 {
705 return _taskId;
706 }
707
708 void
709 taskId(uint32_t id) {
710 _taskId = id;
711 }
712
713 /** Accessor function for asid.*/
714 uint64_t
715 getAsid() const
716 {
717 assert(privateFlags.isSet(VALID_VADDR));
718 return _asid;
719 }
720
721 /** Accessor function for asid.*/
722 void
723 setAsid(uint64_t asid)
724 {
725 _asid = asid;
726 }
727
728 /** Accessor function for architecture-specific flags.*/
729 ArchFlagsType
730 getArchFlags() const
731 {
732 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
733 return _flags & ARCH_BITS;
734 }
735
736 /** Accessor function to check if sc result is valid. */
737 bool
738 extraDataValid() const
739 {
740 return privateFlags.isSet(VALID_EXTRA_DATA);
741 }
742
743 /** Accessor function for store conditional return value.*/
744 uint64_t
745 getExtraData() const
746 {
747 assert(privateFlags.isSet(VALID_EXTRA_DATA));
748 return _extraData;
749 }
750
751 /** Accessor function for store conditional return value.*/
752 void
753 setExtraData(uint64_t extraData)
754 {
755 _extraData = extraData;
756 privateFlags.set(VALID_EXTRA_DATA);
757 }
758
759 bool
760 hasContextId() const
761 {
762 return privateFlags.isSet(VALID_CONTEXT_ID);
763 }
764
765 /** Accessor function for context ID.*/
766 ContextID
767 contextId() const
768 {
769 assert(privateFlags.isSet(VALID_CONTEXT_ID));
770 return _contextId;
771 }
772
773 uint32_t
774 streamId() const
775 {
776 assert(privateFlags.isSet(VALID_STREAM_ID));
777 return _streamId;
778 }
779
780 bool
781 hasSubstreamId() const
782 {
783 return privateFlags.isSet(VALID_SUBSTREAM_ID);
784 }
785
786 uint32_t
787 substreamId() const
788 {
789 assert(privateFlags.isSet(VALID_SUBSTREAM_ID));
790 return _substreamId;
791 }
792
793 void
794 setPC(Addr pc)
795 {
796 privateFlags.set(VALID_PC);
797 _pc = pc;
798 }
799
800 bool
801 hasPC() const
802 {
803 return privateFlags.isSet(VALID_PC);
804 }
805
806 /** Accessor function for pc.*/
807 Addr
808 getPC() const
809 {
810 assert(privateFlags.isSet(VALID_PC));
811 return _pc;
812 }
813
814 /**
815 * Increment/Get the depth at which this request is responded to.
816 * This currently happens when the request misses in any cache level.
817 */
818 void incAccessDepth() const { depth++; }
819 int getAccessDepth() const { return depth; }
820
821 /**
822 * Set/Get the time taken for this request to be successfully translated.
823 */
824 void setTranslateLatency() { translateDelta = curTick() - _time; }
825 Tick getTranslateLatency() const { return translateDelta; }
826
827 /**
828 * Set/Get the time taken to complete this request's access, not including
829 * the time to successfully translate the request.
830 */
831 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
832 Tick getAccessLatency() const { return accessDelta; }
833
834 /**
835 * Accessor for the sequence number of instruction that creates the
836 * request.
837 */
838 bool
839 hasInstSeqNum() const
840 {
841 return privateFlags.isSet(VALID_INST_SEQ_NUM);
842 }
843
844 InstSeqNum
845 getReqInstSeqNum() const
846 {
847 assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
848 return _reqInstSeqNum;
849 }
850
851 void
852 setReqInstSeqNum(const InstSeqNum seq_num)
853 {
854 privateFlags.set(VALID_INST_SEQ_NUM);
855 _reqInstSeqNum = seq_num;
856 }
857
858 /** Accessor functions for flags. Note that these are for testing
859 only; setting flags should be done via setFlags(). */
860 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
861 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
862 bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
863 bool isPrefetch() const { return _flags.isSet(PREFETCH); }
863 bool isPrefetch() const { return (_flags.isSet(PREFETCH) ||
864 _flags.isSet(PF_EXCLUSIVE)); }
865 bool isPrefetchEx() const { return _flags.isSet(PF_EXCLUSIVE); }
864 bool isLLSC() const { return _flags.isSet(LLSC); }
865 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
866 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
867 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
868 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
869 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
870 bool isSecure() const { return _flags.isSet(SECURE); }
871 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
872 bool isAcquire() const { return _flags.isSet(ACQUIRE); }
873 bool isRelease() const { return _flags.isSet(RELEASE); }
874 bool isKernel() const { return _flags.isSet(KERNEL); }
875 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
876 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
877
878 bool
879 isAtomic() const
880 {
881 return _flags.isSet(ATOMIC_RETURN_OP) ||
882 _flags.isSet(ATOMIC_NO_RETURN_OP);
883 }
884
885 /**
886 * Accessor functions for the destination of a memory request. The
887 * destination flag can specify a point of reference for the
888 * operation (e.g. a cache block clean to the the point of
889 * unification). At the moment the destination is only used by the
890 * cache maintenance operations.
891 */
892 bool isToPOU() const { return _flags.isSet(DST_POU); }
893 bool isToPOC() const { return _flags.isSet(DST_POC); }
894 Flags getDest() const { return _flags & DST_BITS; }
895
896 /**
897 * Accessor functions for the memory space configuration flags and used by
898 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
899 * these are for testing only; setting extraFlags should be done via
900 * setMemSpaceConfigFlags().
901 */
902 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
903
904 bool
905 isWavefrontScope() const
906 {
907 assert(isScoped());
908 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
909 }
910
911 bool
912 isWorkgroupScope() const
913 {
914 assert(isScoped());
915 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
916 }
917
918 bool
919 isDeviceScope() const
920 {
921 assert(isScoped());
922 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
923 }
924
925 bool
926 isSystemScope() const
927 {
928 assert(isScoped());
929 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
930 }
931
932 bool
933 isGlobalSegment() const
934 {
935 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
936 (!isGroupSegment() && !isPrivateSegment() &&
937 !isKernargSegment() && !isReadonlySegment() &&
938 !isSpillSegment() && !isArgSegment());
939 }
940
941 bool
942 isGroupSegment() const
943 {
944 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
945 }
946
947 bool
948 isPrivateSegment() const
949 {
950 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
951 }
952
953 bool
954 isKernargSegment() const
955 {
956 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
957 }
958
959 bool
960 isReadonlySegment() const
961 {
962 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
963 }
964
965 bool
966 isSpillSegment() const
967 {
968 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
969 }
970
971 bool
972 isArgSegment() const
973 {
974 return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
975 }
976
977 /**
978 * Accessor functions to determine whether this request is part of
979 * a cache maintenance operation. At the moment three operations
980 * are supported:
981
982 * 1) A cache clean operation updates all copies of a memory
983 * location to the point of reference,
984 * 2) A cache invalidate operation invalidates all copies of the
985 * specified block in the memory above the point of reference,
986 * 3) A clean and invalidate operation is a combination of the two
987 * operations.
988 * @{ */
989 bool isCacheClean() const { return _flags.isSet(CLEAN); }
990 bool isCacheInvalidate() const { return _flags.isSet(INVALIDATE); }
991 bool isCacheMaintenance() const { return _flags.isSet(CLEAN|INVALIDATE); }
992 /** @} */
993};
994
995#endif // __MEM_REQUEST_HH__
866 bool isLLSC() const { return _flags.isSet(LLSC); }
867 bool isPriv() const { return _flags.isSet(PRIVILEGED); }
868 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
869 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
870 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
871 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
872 bool isSecure() const { return _flags.isSet(SECURE); }
873 bool isPTWalk() const { return _flags.isSet(PT_WALK); }
874 bool isAcquire() const { return _flags.isSet(ACQUIRE); }
875 bool isRelease() const { return _flags.isSet(RELEASE); }
876 bool isKernel() const { return _flags.isSet(KERNEL); }
877 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
878 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
879
880 bool
881 isAtomic() const
882 {
883 return _flags.isSet(ATOMIC_RETURN_OP) ||
884 _flags.isSet(ATOMIC_NO_RETURN_OP);
885 }
886
887 /**
888 * Accessor functions for the destination of a memory request. The
889 * destination flag can specify a point of reference for the
890 * operation (e.g. a cache block clean to the the point of
891 * unification). At the moment the destination is only used by the
892 * cache maintenance operations.
893 */
894 bool isToPOU() const { return _flags.isSet(DST_POU); }
895 bool isToPOC() const { return _flags.isSet(DST_POC); }
896 Flags getDest() const { return _flags & DST_BITS; }
897
898 /**
899 * Accessor functions for the memory space configuration flags and used by
900 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
901 * these are for testing only; setting extraFlags should be done via
902 * setMemSpaceConfigFlags().
903 */
904 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
905
906 bool
907 isWavefrontScope() const
908 {
909 assert(isScoped());
910 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
911 }
912
913 bool
914 isWorkgroupScope() const
915 {
916 assert(isScoped());
917 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
918 }
919
920 bool
921 isDeviceScope() const
922 {
923 assert(isScoped());
924 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
925 }
926
927 bool
928 isSystemScope() const
929 {
930 assert(isScoped());
931 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
932 }
933
934 bool
935 isGlobalSegment() const
936 {
937 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
938 (!isGroupSegment() && !isPrivateSegment() &&
939 !isKernargSegment() && !isReadonlySegment() &&
940 !isSpillSegment() && !isArgSegment());
941 }
942
943 bool
944 isGroupSegment() const
945 {
946 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
947 }
948
949 bool
950 isPrivateSegment() const
951 {
952 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
953 }
954
955 bool
956 isKernargSegment() const
957 {
958 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
959 }
960
961 bool
962 isReadonlySegment() const
963 {
964 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
965 }
966
967 bool
968 isSpillSegment() const
969 {
970 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
971 }
972
973 bool
974 isArgSegment() const
975 {
976 return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
977 }
978
979 /**
980 * Accessor functions to determine whether this request is part of
981 * a cache maintenance operation. At the moment three operations
982 * are supported:
983
984 * 1) A cache clean operation updates all copies of a memory
985 * location to the point of reference,
986 * 2) A cache invalidate operation invalidates all copies of the
987 * specified block in the memory above the point of reference,
988 * 3) A clean and invalidate operation is a combination of the two
989 * operations.
990 * @{ */
991 bool isCacheClean() const { return _flags.isSet(CLEAN); }
992 bool isCacheInvalidate() const { return _flags.isSet(INVALIDATE); }
993 bool isCacheMaintenance() const { return _flags.isSet(CLEAN|INVALIDATE); }
994 /** @} */
995};
996
997#endif // __MEM_REQUEST_HH__