60#include "base/types.hh" 61#include "cpu/inst_seq.hh" 62#include "sim/core.hh" 63 64/** 65 * Special TaskIds that are used for per-context-switch stats dumps 66 * and Cache Occupancy. Having too many tasks seems to be a problem 67 * with vector stats. 1024 seems to be a reasonable number that 68 * doesn't cause a problem with stats and is large enough to realistic 69 * benchmarks (Linux/Android boot, BBench, etc.) 70 */ 71 72namespace ContextSwitchTaskId { 73 enum TaskId { 74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */ 75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */ 76 DMA = 1023, /* Mostly Table Walker */ 77 Unknown = 1024, 78 NumTaskId 79 }; 80} 81 82class Request; 83 84typedef Request* RequestPtr; 85typedef uint16_t MasterID; 86 87class Request 88{ 89 public: 90 typedef uint32_t FlagsType; 91 typedef uint8_t ArchFlagsType; 92 typedef ::Flags<FlagsType> Flags; 93 94 enum : FlagsType { 95 /** 96 * Architecture specific flags. 97 * 98 * These bits int the flag field are reserved for 99 * architecture-specific code. For example, SPARC uses them to 100 * represent ASIs. 101 */ 102 ARCH_BITS = 0x000000FF, 103 /** The request was an instruction fetch. */ 104 INST_FETCH = 0x00000100, 105 /** The virtual address is also the physical address. */ 106 PHYSICAL = 0x00000200, 107 /** 108 * The request is to an uncacheable address. 109 * 110 * @note Uncacheable accesses may be reordered by CPU models. The 111 * STRICT_ORDER flag should be set if such reordering is 112 * undesirable. 113 */ 114 UNCACHEABLE = 0x00000400, 115 /** 116 * The request is required to be strictly ordered by <i>CPU 117 * models</i> and is non-speculative. 118 * 119 * A strictly ordered request is guaranteed to never be 120 * re-ordered or executed speculatively by a CPU model. The 121 * memory system may still reorder requests in caches unless 122 * the UNCACHEABLE flag is set as well. 123 */ 124 STRICT_ORDER = 0x00000800, 125 /** This request is to a memory mapped register. */ 126 MMAPPED_IPR = 0x00002000, 127 /** This request is made in privileged mode. */ 128 PRIVILEGED = 0x00008000, 129 130 /** 131 * This is a write that is targeted and zeroing an entire 132 * cache block. There is no need for a read/modify/write 133 */ 134 CACHE_BLOCK_ZERO = 0x00010000, 135 136 /** The request should not cause a memory access. */ 137 NO_ACCESS = 0x00080000, 138 /** 139 * This request will lock or unlock the accessed memory. When 140 * used with a load, the access locks the particular chunk of 141 * memory. When used with a store, it unlocks. The rule is 142 * that locked accesses have to be made up of a locked load, 143 * some operation on the data, and then a locked store. 144 */ 145 LOCKED_RMW = 0x00100000, 146 /** The request is a Load locked/store conditional. */ 147 LLSC = 0x00200000, 148 /** This request is for a memory swap. */ 149 MEM_SWAP = 0x00400000, 150 MEM_SWAP_COND = 0x00800000, 151 152 /** The request is a prefetch. */ 153 PREFETCH = 0x01000000, 154 /** The request should be prefetched into the exclusive state. */ 155 PF_EXCLUSIVE = 0x02000000, 156 /** The request should be marked as LRU. */ 157 EVICT_NEXT = 0x04000000, 158 /** The request should be marked with ACQUIRE. */ 159 ACQUIRE = 0x00020000, 160 /** The request should be marked with RELEASE. */ 161 RELEASE = 0x00040000, 162 163 /** The request is an atomic that returns data. */ 164 ATOMIC_RETURN_OP = 0x40000000, 165 /** The request is an atomic that does not return data. */ 166 ATOMIC_NO_RETURN_OP = 0x80000000, 167 168 /** The request should be marked with KERNEL. 169 * Used to indicate the synchronization associated with a GPU kernel 170 * launch or completion. 171 */ 172 KERNEL = 0x00001000, 173 174 /** 175 * The request should be handled by the generic IPR code (only 176 * valid together with MMAPPED_IPR) 177 */ 178 GENERIC_IPR = 0x08000000, 179 180 /** The request targets the secure memory space. */ 181 SECURE = 0x10000000, 182 /** The request is a page table walk */ 183 PT_WALK = 0x20000000, 184 185 /** 186 * These flags are *not* cleared when a Request object is 187 * reused (assigned a new address). 188 */ 189 STICKY_FLAGS = INST_FETCH 190 }; 191 192 /** Master Ids that are statically allocated 193 * @{*/ 194 enum : MasterID { 195 /** This master id is used for writeback requests by the caches */ 196 wbMasterId = 0, 197 /** 198 * This master id is used for functional requests that 199 * don't come from a particular device 200 */ 201 funcMasterId = 1, 202 /** This master id is used for message signaled interrupts */ 203 intMasterId = 2, 204 /** 205 * Invalid master id for assertion checking only. It is 206 * invalid behavior to ever send this id as part of a request. 207 */ 208 invldMasterId = std::numeric_limits<MasterID>::max() 209 }; 210 /** @} */ 211 212 typedef uint32_t MemSpaceConfigFlagsType; 213 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags; 214 215 enum : MemSpaceConfigFlagsType { 216 /** Has a synchronization scope been set? */ 217 SCOPE_VALID = 0x00000001, 218 /** Access has Wavefront scope visibility */ 219 WAVEFRONT_SCOPE = 0x00000002, 220 /** Access has Workgroup scope visibility */ 221 WORKGROUP_SCOPE = 0x00000004, 222 /** Access has Device (e.g., GPU) scope visibility */ 223 DEVICE_SCOPE = 0x00000008, 224 /** Access has System (e.g., CPU + GPU) scope visibility */ 225 SYSTEM_SCOPE = 0x00000010, 226 227 /** Global Segment */ 228 GLOBAL_SEGMENT = 0x00000020, 229 /** Group Segment */ 230 GROUP_SEGMENT = 0x00000040, 231 /** Private Segment */ 232 PRIVATE_SEGMENT = 0x00000080, 233 /** Kergarg Segment */ 234 KERNARG_SEGMENT = 0x00000100, 235 /** Readonly Segment */ 236 READONLY_SEGMENT = 0x00000200, 237 /** Spill Segment */ 238 SPILL_SEGMENT = 0x00000400, 239 /** Arg Segment */ 240 ARG_SEGMENT = 0x00000800, 241 }; 242 243 private: 244 typedef uint8_t PrivateFlagsType; 245 typedef ::Flags<PrivateFlagsType> PrivateFlags; 246 247 enum : PrivateFlagsType { 248 /** Whether or not the size is valid. */ 249 VALID_SIZE = 0x00000001, 250 /** Whether or not paddr is valid (has been written yet). */ 251 VALID_PADDR = 0x00000002, 252 /** Whether or not the vaddr & asid are valid. */ 253 VALID_VADDR = 0x00000004, 254 /** Whether or not the instruction sequence number is valid. */ 255 VALID_INST_SEQ_NUM = 0x00000008, 256 /** Whether or not the pc is valid. */ 257 VALID_PC = 0x00000010, 258 /** Whether or not the context ID is valid. */ 259 VALID_CONTEXT_ID = 0x00000020, 260 /** Whether or not the sc result is valid. */ 261 VALID_EXTRA_DATA = 0x00000080, 262 /** 263 * These flags are *not* cleared when a Request object is reused 264 * (assigned a new address). 265 */ 266 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID 267 }; 268 269 private: 270 271 /** 272 * Set up a physical (e.g. device) request in a previously 273 * allocated Request object. 274 */ 275 void 276 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time) 277 { 278 _paddr = paddr; 279 _size = size; 280 _time = time; 281 _masterId = mid; 282 _flags.clear(~STICKY_FLAGS); 283 _flags.set(flags); 284 privateFlags.clear(~STICKY_PRIVATE_FLAGS); 285 privateFlags.set(VALID_PADDR|VALID_SIZE); 286 depth = 0; 287 accessDelta = 0; 288 //translateDelta = 0; 289 } 290 291 /** 292 * The physical address of the request. Valid only if validPaddr 293 * is set. 294 */ 295 Addr _paddr; 296 297 /** 298 * The size of the request. This field must be set when vaddr or 299 * paddr is written via setVirt() or setPhys(), so it is always 300 * valid as long as one of the address fields is valid. 301 */ 302 unsigned _size; 303 304 /** The requestor ID which is unique in the system for all ports 305 * that are capable of issuing a transaction 306 */ 307 MasterID _masterId; 308 309 /** Flag structure for the request. */ 310 Flags _flags; 311 312 /** Memory space configuraiton flag structure for the request. */ 313 MemSpaceConfigFlags _memSpaceConfigFlags; 314 315 /** Private flags for field validity checking. */ 316 PrivateFlags privateFlags; 317 318 /** 319 * The time this request was started. Used to calculate 320 * latencies. This field is set to curTick() any time paddr or vaddr 321 * is written. 322 */ 323 Tick _time; 324 325 /** 326 * The task id associated with this request 327 */ 328 uint32_t _taskId; 329 330 /** The address space ID. */ 331 int _asid; 332 333 /** The virtual address of the request. */ 334 Addr _vaddr; 335 336 /** 337 * Extra data for the request, such as the return value of 338 * store conditional or the compare value for a CAS. */ 339 uint64_t _extraData; 340 341 /** The context ID (for statistics, locks, and wakeups). */ 342 ContextID _contextId; 343 344 /** program counter of initiating access; for tracing/debugging */ 345 Addr _pc; 346 347 /** Sequence number of the instruction that creates the request */ 348 InstSeqNum _reqInstSeqNum; 349 350 /** A pointer to an atomic operation */ 351 AtomicOpFunctor *atomicOpFunctor; 352 353 public: 354 355 /** 356 * Minimal constructor. No fields are initialized. (Note that 357 * _flags and privateFlags are cleared by Flags default 358 * constructor.) 359 */ 360 Request() 361 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 362 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 363 _extraData(0), _contextId(0), _pc(0), 364 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0), 365 accessDelta(0), depth(0) 366 {} 367 368 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, 369 InstSeqNum seq_num, ContextID cid) 370 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 371 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 372 _extraData(0), _contextId(0), _pc(0), 373 _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0), 374 accessDelta(0), depth(0) 375 { 376 setPhys(paddr, size, flags, mid, curTick()); 377 setContext(cid); 378 privateFlags.set(VALID_INST_SEQ_NUM); 379 } 380 381 /** 382 * Constructor for physical (e.g. device) requests. Initializes 383 * just physical address, size, flags, and timestamp (to curTick()). 384 * These fields are adequate to perform a request. 385 */ 386 Request(Addr paddr, unsigned size, Flags flags, MasterID mid) 387 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 388 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 389 _extraData(0), _contextId(0), _pc(0), 390 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0), 391 accessDelta(0), depth(0) 392 { 393 setPhys(paddr, size, flags, mid, curTick()); 394 } 395 396 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time) 397 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 398 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 399 _extraData(0), _contextId(0), _pc(0), 400 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0), 401 accessDelta(0), depth(0) 402 { 403 setPhys(paddr, size, flags, mid, time); 404 } 405 406 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time, 407 Addr pc) 408 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 409 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 410 _extraData(0), _contextId(0), _pc(pc), 411 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0), 412 accessDelta(0), depth(0) 413 { 414 setPhys(paddr, size, flags, mid, time); 415 privateFlags.set(VALID_PC); 416 } 417 418 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid, 419 Addr pc, ContextID cid) 420 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 421 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 422 _extraData(0), _contextId(0), _pc(0), 423 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0), 424 accessDelta(0), depth(0) 425 { 426 setVirt(asid, vaddr, size, flags, mid, pc); 427 setContext(cid); 428 } 429 430 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid, 431 Addr pc, ContextID cid, AtomicOpFunctor *atomic_op) 432 : atomicOpFunctor(atomic_op) 433 { 434 setVirt(asid, vaddr, size, flags, mid, pc); 435 setContext(cid); 436 } 437 438 ~Request() 439 { 440 if (hasAtomicOpFunctor()) { 441 delete atomicOpFunctor; 442 } 443 } 444 445 /** 446 * Set up Context numbers. 447 */ 448 void 449 setContext(ContextID context_id) 450 { 451 _contextId = context_id; 452 privateFlags.set(VALID_CONTEXT_ID); 453 } 454 455 /** 456 * Set up a virtual (e.g., CPU) request in a previously 457 * allocated Request object. 458 */ 459 void 460 setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid, 461 Addr pc) 462 { 463 _asid = asid; 464 _vaddr = vaddr; 465 _size = size; 466 _masterId = mid; 467 _pc = pc; 468 _time = curTick(); 469 470 _flags.clear(~STICKY_FLAGS); 471 _flags.set(flags); 472 privateFlags.clear(~STICKY_PRIVATE_FLAGS); 473 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC); 474 depth = 0; 475 accessDelta = 0; 476 translateDelta = 0; 477 } 478 479 /** 480 * Set just the physical address. This usually used to record the 481 * result of a translation. However, when using virtualized CPUs 482 * setPhys() is sometimes called to finalize a physical address 483 * without a virtual address, so we can't check if the virtual 484 * address is valid. 485 */ 486 void 487 setPaddr(Addr paddr) 488 { 489 _paddr = paddr; 490 privateFlags.set(VALID_PADDR); 491 } 492 493 /** 494 * Generate two requests as if this request had been split into two 495 * pieces. The original request can't have been translated already. 496 */ 497 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2) 498 { 499 assert(privateFlags.isSet(VALID_VADDR)); 500 assert(privateFlags.noneSet(VALID_PADDR)); 501 assert(split_addr > _vaddr && split_addr < _vaddr + _size); 502 req1 = new Request(*this); 503 req2 = new Request(*this); 504 req1->_size = split_addr - _vaddr; 505 req2->_vaddr = split_addr; 506 req2->_size = _size - req1->_size; 507 } 508 509 /** 510 * Accessor for paddr. 511 */ 512 bool 513 hasPaddr() const 514 { 515 return privateFlags.isSet(VALID_PADDR); 516 } 517 518 Addr 519 getPaddr() const 520 { 521 assert(privateFlags.isSet(VALID_PADDR)); 522 return _paddr; 523 } 524 525 /** 526 * Time for the TLB/table walker to successfully translate this request. 527 */ 528 Tick translateDelta; 529 530 /** 531 * Access latency to complete this memory transaction not including 532 * translation time. 533 */ 534 Tick accessDelta; 535 536 /** 537 * Level of the cache hierachy where this request was responded to 538 * (e.g. 0 = L1; 1 = L2). 539 */ 540 mutable int depth; 541 542 /** 543 * Accessor for size. 544 */ 545 bool 546 hasSize() const 547 { 548 return privateFlags.isSet(VALID_SIZE); 549 } 550 551 unsigned 552 getSize() const 553 { 554 assert(privateFlags.isSet(VALID_SIZE)); 555 return _size; 556 } 557 558 /** Accessor for time. */ 559 Tick 560 time() const 561 { 562 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR)); 563 return _time; 564 } 565 566 /** 567 * Accessor for atomic-op functor. 568 */ 569 bool 570 hasAtomicOpFunctor() 571 { 572 return atomicOpFunctor != NULL; 573 } 574 575 AtomicOpFunctor * 576 getAtomicOpFunctor() 577 { 578 assert(atomicOpFunctor != NULL); 579 return atomicOpFunctor; 580 } 581 582 /** Accessor for flags. */ 583 Flags 584 getFlags() 585 { 586 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR)); 587 return _flags; 588 } 589 590 /** Note that unlike other accessors, this function sets *specific 591 flags* (ORs them in); it does not assign its argument to the 592 _flags field. Thus this method should rightly be called 593 setFlags() and not just flags(). */ 594 void 595 setFlags(Flags flags) 596 { 597 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR)); 598 _flags.set(flags); 599 } 600 601 void 602 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags) 603 { 604 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR)); 605 _memSpaceConfigFlags.set(extraFlags); 606 } 607 608 /** Accessor function for vaddr.*/ 609 bool 610 hasVaddr() const 611 { 612 return privateFlags.isSet(VALID_VADDR); 613 } 614 615 Addr 616 getVaddr() const 617 { 618 assert(privateFlags.isSet(VALID_VADDR)); 619 return _vaddr; 620 } 621 622 /** Accesssor for the requestor id. */ 623 MasterID 624 masterId() const 625 { 626 return _masterId; 627 } 628 629 uint32_t 630 taskId() const 631 { 632 return _taskId; 633 } 634 635 void 636 taskId(uint32_t id) { 637 _taskId = id; 638 } 639 640 /** Accessor function for asid.*/ 641 int 642 getAsid() const 643 { 644 assert(privateFlags.isSet(VALID_VADDR)); 645 return _asid; 646 } 647 648 /** Accessor function for asid.*/ 649 void 650 setAsid(int asid) 651 { 652 _asid = asid; 653 } 654 655 /** Accessor function for architecture-specific flags.*/ 656 ArchFlagsType 657 getArchFlags() const 658 { 659 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR)); 660 return _flags & ARCH_BITS; 661 } 662 663 /** Accessor function to check if sc result is valid. */ 664 bool 665 extraDataValid() const 666 { 667 return privateFlags.isSet(VALID_EXTRA_DATA); 668 } 669 670 /** Accessor function for store conditional return value.*/ 671 uint64_t 672 getExtraData() const 673 { 674 assert(privateFlags.isSet(VALID_EXTRA_DATA)); 675 return _extraData; 676 } 677 678 /** Accessor function for store conditional return value.*/ 679 void 680 setExtraData(uint64_t extraData) 681 { 682 _extraData = extraData; 683 privateFlags.set(VALID_EXTRA_DATA); 684 } 685 686 bool 687 hasContextId() const 688 { 689 return privateFlags.isSet(VALID_CONTEXT_ID); 690 } 691 692 /** Accessor function for context ID.*/ 693 ContextID 694 contextId() const 695 { 696 assert(privateFlags.isSet(VALID_CONTEXT_ID)); 697 return _contextId; 698 } 699 700 void 701 setPC(Addr pc) 702 { 703 privateFlags.set(VALID_PC); 704 _pc = pc; 705 } 706 707 bool 708 hasPC() const 709 { 710 return privateFlags.isSet(VALID_PC); 711 } 712 713 /** Accessor function for pc.*/ 714 Addr 715 getPC() const 716 { 717 assert(privateFlags.isSet(VALID_PC)); 718 return _pc; 719 } 720 721 /** 722 * Increment/Get the depth at which this request is responded to. 723 * This currently happens when the request misses in any cache level. 724 */ 725 void incAccessDepth() const { depth++; } 726 int getAccessDepth() const { return depth; } 727 728 /** 729 * Set/Get the time taken for this request to be successfully translated. 730 */ 731 void setTranslateLatency() { translateDelta = curTick() - _time; } 732 Tick getTranslateLatency() const { return translateDelta; } 733 734 /** 735 * Set/Get the time taken to complete this request's access, not including 736 * the time to successfully translate the request. 737 */ 738 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; } 739 Tick getAccessLatency() const { return accessDelta; } 740 741 /** 742 * Accessor for the sequence number of instruction that creates the 743 * request. 744 */ 745 bool 746 hasInstSeqNum() const 747 { 748 return privateFlags.isSet(VALID_INST_SEQ_NUM); 749 } 750 751 InstSeqNum 752 getReqInstSeqNum() const 753 { 754 assert(privateFlags.isSet(VALID_INST_SEQ_NUM)); 755 return _reqInstSeqNum; 756 } 757 758 void 759 setReqInstSeqNum(const InstSeqNum seq_num) 760 { 761 privateFlags.set(VALID_INST_SEQ_NUM); 762 _reqInstSeqNum = seq_num; 763 } 764 765 /** Accessor functions for flags. Note that these are for testing 766 only; setting flags should be done via setFlags(). */ 767 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); } 768 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); } 769 bool isInstFetch() const { return _flags.isSet(INST_FETCH); } 770 bool isPrefetch() const { return _flags.isSet(PREFETCH); } 771 bool isLLSC() const { return _flags.isSet(LLSC); } 772 bool isPriv() const { return _flags.isSet(PRIVILEGED); } 773 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); } 774 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); } 775 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); } 776 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); } 777 bool isSecure() const { return _flags.isSet(SECURE); } 778 bool isPTWalk() const { return _flags.isSet(PT_WALK); } 779 bool isAcquire() const { return _flags.isSet(ACQUIRE); } 780 bool isRelease() const { return _flags.isSet(RELEASE); } 781 bool isKernel() const { return _flags.isSet(KERNEL); } 782 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); } 783 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); } 784 785 bool 786 isAtomic() const 787 { 788 return _flags.isSet(ATOMIC_RETURN_OP) || 789 _flags.isSet(ATOMIC_NO_RETURN_OP); 790 } 791 792 /** 793 * Accessor functions for the memory space configuration flags and used by 794 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that 795 * these are for testing only; setting extraFlags should be done via 796 * setMemSpaceConfigFlags(). 797 */ 798 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); } 799 800 bool 801 isWavefrontScope() const 802 { 803 assert(isScoped()); 804 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE); 805 } 806 807 bool 808 isWorkgroupScope() const 809 { 810 assert(isScoped()); 811 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE); 812 } 813 814 bool 815 isDeviceScope() const 816 { 817 assert(isScoped()); 818 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE); 819 } 820 821 bool 822 isSystemScope() const 823 { 824 assert(isScoped()); 825 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE); 826 } 827 828 bool 829 isGlobalSegment() const 830 { 831 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) || 832 (!isGroupSegment() && !isPrivateSegment() && 833 !isKernargSegment() && !isReadonlySegment() && 834 !isSpillSegment() && !isArgSegment()); 835 } 836 837 bool 838 isGroupSegment() const 839 { 840 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT); 841 } 842 843 bool 844 isPrivateSegment() const 845 { 846 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT); 847 } 848 849 bool 850 isKernargSegment() const 851 { 852 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT); 853 } 854 855 bool 856 isReadonlySegment() const 857 { 858 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT); 859 } 860 861 bool 862 isSpillSegment() const 863 { 864 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT); 865 } 866 867 bool 868 isArgSegment() const 869 { 870 return _memSpaceConfigFlags.isSet(ARG_SEGMENT); 871 } 872}; 873 874#endif // __MEM_REQUEST_HH__
|