request.hh revision 11306:a5340a2a24f9
1/* 2 * Copyright (c) 2012-2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Ron Dreslinski 42 * Steve Reinhardt 43 * Ali Saidi 44 */ 45 46/** 47 * @file 48 * Declaration of a request, the overall memory request consisting of 49 the parts of the request that are persistent throughout the transaction. 50 */ 51 52#ifndef __MEM_REQUEST_HH__ 53#define __MEM_REQUEST_HH__ 54 55#include <cassert> 56#include <climits> 57 58#include "base/flags.hh" 59#include "base/misc.hh" 60#include "base/types.hh" 61#include "cpu/inst_seq.hh" 62#include "sim/core.hh" 63 64/** 65 * Special TaskIds that are used for per-context-switch stats dumps 66 * and Cache Occupancy. Having too many tasks seems to be a problem 67 * with vector stats. 1024 seems to be a reasonable number that 68 * doesn't cause a problem with stats and is large enough to realistic 69 * benchmarks (Linux/Android boot, BBench, etc.) 70 */ 71 72namespace ContextSwitchTaskId { 73 enum TaskId { 74 MaxNormalTaskId = 1021, /* Maximum number of normal tasks */ 75 Prefetcher = 1022, /* For cache lines brought in by prefetcher */ 76 DMA = 1023, /* Mostly Table Walker */ 77 Unknown = 1024, 78 NumTaskId 79 }; 80} 81 82class Request; 83 84typedef Request* RequestPtr; 85typedef uint16_t MasterID; 86 87class Request 88{ 89 public: 90 typedef uint32_t FlagsType; 91 typedef uint8_t ArchFlagsType; 92 typedef ::Flags<FlagsType> Flags; 93 94 enum : FlagsType { 95 /** 96 * Architecture specific flags. 97 * 98 * These bits int the flag field are reserved for 99 * architecture-specific code. For example, SPARC uses them to 100 * represent ASIs. 101 */ 102 ARCH_BITS = 0x000000FF, 103 /** The request was an instruction fetch. */ 104 INST_FETCH = 0x00000100, 105 /** The virtual address is also the physical address. */ 106 PHYSICAL = 0x00000200, 107 /** 108 * The request is to an uncacheable address. 109 * 110 * @note Uncacheable accesses may be reordered by CPU models. The 111 * STRICT_ORDER flag should be set if such reordering is 112 * undesirable. 113 */ 114 UNCACHEABLE = 0x00000400, 115 /** 116 * The request is required to be strictly ordered by <i>CPU 117 * models</i> and is non-speculative. 118 * 119 * A strictly ordered request is guaranteed to never be 120 * re-ordered or executed speculatively by a CPU model. The 121 * memory system may still reorder requests in caches unless 122 * the UNCACHEABLE flag is set as well. 123 */ 124 STRICT_ORDER = 0x00000800, 125 /** This request is to a memory mapped register. */ 126 MMAPPED_IPR = 0x00002000, 127 /** This request is made in privileged mode. */ 128 PRIVILEGED = 0x00008000, 129 130 /** 131 * This is a write that is targeted and zeroing an entire 132 * cache block. There is no need for a read/modify/write 133 */ 134 CACHE_BLOCK_ZERO = 0x00010000, 135 136 /** The request should not cause a memory access. */ 137 NO_ACCESS = 0x00080000, 138 /** 139 * This request will lock or unlock the accessed memory. When 140 * used with a load, the access locks the particular chunk of 141 * memory. When used with a store, it unlocks. The rule is 142 * that locked accesses have to be made up of a locked load, 143 * some operation on the data, and then a locked store. 144 */ 145 LOCKED_RMW = 0x00100000, 146 /** The request is a Load locked/store conditional. */ 147 LLSC = 0x00200000, 148 /** This request is for a memory swap. */ 149 MEM_SWAP = 0x00400000, 150 MEM_SWAP_COND = 0x00800000, 151 152 /** The request is a prefetch. */ 153 PREFETCH = 0x01000000, 154 /** The request should be prefetched into the exclusive state. */ 155 PF_EXCLUSIVE = 0x02000000, 156 /** The request should be marked as LRU. */ 157 EVICT_NEXT = 0x04000000, 158 /** The request should be marked with ACQUIRE. */ 159 ACQUIRE = 0x00020000, 160 /** The request should be marked with RELEASE. */ 161 RELEASE = 0x00040000, 162 163 /** The request is an atomic that returns data. */ 164 ATOMIC_RETURN_OP = 0x40000000, 165 /** The request is an atomic that does not return data. */ 166 ATOMIC_NO_RETURN_OP = 0x80000000, 167 168 /** The request should be marked with KERNEL. 169 * Used to indicate the synchronization associated with a GPU kernel 170 * launch or completion. 171 */ 172 KERNEL = 0x00001000, 173 174 /** 175 * The request should be handled by the generic IPR code (only 176 * valid together with MMAPPED_IPR) 177 */ 178 GENERIC_IPR = 0x08000000, 179 180 /** The request targets the secure memory space. */ 181 SECURE = 0x10000000, 182 /** The request is a page table walk */ 183 PT_WALK = 0x20000000, 184 185 /** 186 * These flags are *not* cleared when a Request object is 187 * reused (assigned a new address). 188 */ 189 STICKY_FLAGS = INST_FETCH 190 }; 191 192 /** Master Ids that are statically allocated 193 * @{*/ 194 enum : MasterID { 195 /** This master id is used for writeback requests by the caches */ 196 wbMasterId = 0, 197 /** 198 * This master id is used for functional requests that 199 * don't come from a particular device 200 */ 201 funcMasterId = 1, 202 /** This master id is used for message signaled interrupts */ 203 intMasterId = 2, 204 /** 205 * Invalid master id for assertion checking only. It is 206 * invalid behavior to ever send this id as part of a request. 207 */ 208 invldMasterId = std::numeric_limits<MasterID>::max() 209 }; 210 /** @} */ 211 212 typedef uint32_t MemSpaceConfigFlagsType; 213 typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags; 214 215 enum : MemSpaceConfigFlagsType { 216 /** Has a synchronization scope been set? */ 217 SCOPE_VALID = 0x00000001, 218 /** Access has Wavefront scope visibility */ 219 WAVEFRONT_SCOPE = 0x00000002, 220 /** Access has Workgroup scope visibility */ 221 WORKGROUP_SCOPE = 0x00000004, 222 /** Access has Device (e.g., GPU) scope visibility */ 223 DEVICE_SCOPE = 0x00000008, 224 /** Access has System (e.g., CPU + GPU) scope visibility */ 225 SYSTEM_SCOPE = 0x00000010, 226 227 /** Global Segment */ 228 GLOBAL_SEGMENT = 0x00000020, 229 /** Group Segment */ 230 GROUP_SEGMENT = 0x00000040, 231 /** Private Segment */ 232 PRIVATE_SEGMENT = 0x00000080, 233 /** Kergarg Segment */ 234 KERNARG_SEGMENT = 0x00000100, 235 /** Readonly Segment */ 236 READONLY_SEGMENT = 0x00000200, 237 /** Spill Segment */ 238 SPILL_SEGMENT = 0x00000400, 239 /** Arg Segment */ 240 ARG_SEGMENT = 0x00000800, 241 }; 242 243 private: 244 typedef uint8_t PrivateFlagsType; 245 typedef ::Flags<PrivateFlagsType> PrivateFlags; 246 247 enum : PrivateFlagsType { 248 /** Whether or not the size is valid. */ 249 VALID_SIZE = 0x00000001, 250 /** Whether or not paddr is valid (has been written yet). */ 251 VALID_PADDR = 0x00000002, 252 /** Whether or not the vaddr & asid are valid. */ 253 VALID_VADDR = 0x00000004, 254 /** Whether or not the instruction sequence number is valid. */ 255 VALID_INST_SEQ_NUM = 0x00000008, 256 /** Whether or not the pc is valid. */ 257 VALID_PC = 0x00000010, 258 /** Whether or not the context ID is valid. */ 259 VALID_CONTEXT_ID = 0x00000020, 260 VALID_THREAD_ID = 0x00000040, 261 /** Whether or not the sc result is valid. */ 262 VALID_EXTRA_DATA = 0x00000080, 263 /** 264 * These flags are *not* cleared when a Request object is reused 265 * (assigned a new address). 266 */ 267 STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID | VALID_THREAD_ID 268 }; 269 270 private: 271 272 /** 273 * Set up a physical (e.g. device) request in a previously 274 * allocated Request object. 275 */ 276 void 277 setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time) 278 { 279 _paddr = paddr; 280 _size = size; 281 _time = time; 282 _masterId = mid; 283 _flags.clear(~STICKY_FLAGS); 284 _flags.set(flags); 285 privateFlags.clear(~STICKY_PRIVATE_FLAGS); 286 privateFlags.set(VALID_PADDR|VALID_SIZE); 287 depth = 0; 288 accessDelta = 0; 289 //translateDelta = 0; 290 } 291 292 /** 293 * The physical address of the request. Valid only if validPaddr 294 * is set. 295 */ 296 Addr _paddr; 297 298 /** 299 * The size of the request. This field must be set when vaddr or 300 * paddr is written via setVirt() or setPhys(), so it is always 301 * valid as long as one of the address fields is valid. 302 */ 303 unsigned _size; 304 305 /** The requestor ID which is unique in the system for all ports 306 * that are capable of issuing a transaction 307 */ 308 MasterID _masterId; 309 310 /** Flag structure for the request. */ 311 Flags _flags; 312 313 /** Memory space configuraiton flag structure for the request. */ 314 MemSpaceConfigFlags _memSpaceConfigFlags; 315 316 /** Private flags for field validity checking. */ 317 PrivateFlags privateFlags; 318 319 /** 320 * The time this request was started. Used to calculate 321 * latencies. This field is set to curTick() any time paddr or vaddr 322 * is written. 323 */ 324 Tick _time; 325 326 /** 327 * The task id associated with this request 328 */ 329 uint32_t _taskId; 330 331 /** The address space ID. */ 332 int _asid; 333 334 /** The virtual address of the request. */ 335 Addr _vaddr; 336 337 /** 338 * Extra data for the request, such as the return value of 339 * store conditional or the compare value for a CAS. */ 340 uint64_t _extraData; 341 342 /** The context ID (for statistics, typically). */ 343 ContextID _contextId; 344 /** The thread ID (id within this CPU) */ 345 ThreadID _threadId; 346 347 /** program counter of initiating access; for tracing/debugging */ 348 Addr _pc; 349 350 /** Sequence number of the instruction that creates the request */ 351 InstSeqNum _reqInstSeqNum; 352 353 /** A pointer to an atomic operation */ 354 AtomicOpFunctor *atomicOpFunctor; 355 356 public: 357 358 /** 359 * Minimal constructor. No fields are initialized. (Note that 360 * _flags and privateFlags are cleared by Flags default 361 * constructor.) 362 */ 363 Request() 364 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 365 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 366 _extraData(0), _contextId(0), _threadId(0), _pc(0), 367 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0), 368 accessDelta(0), depth(0) 369 {} 370 371 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, 372 InstSeqNum seq_num, ContextID cid, ThreadID tid) 373 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 374 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 375 _extraData(0), _contextId(0), _threadId(0), _pc(0), 376 _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0), 377 accessDelta(0), depth(0) 378 { 379 setPhys(paddr, size, flags, mid, curTick()); 380 setThreadContext(cid, tid); 381 privateFlags.set(VALID_INST_SEQ_NUM); 382 } 383 384 /** 385 * Constructor for physical (e.g. device) requests. Initializes 386 * just physical address, size, flags, and timestamp (to curTick()). 387 * These fields are adequate to perform a request. 388 */ 389 Request(Addr paddr, unsigned size, Flags flags, MasterID mid) 390 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 391 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 392 _extraData(0), _contextId(0), _threadId(0), _pc(0), 393 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0), 394 accessDelta(0), depth(0) 395 { 396 setPhys(paddr, size, flags, mid, curTick()); 397 } 398 399 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time) 400 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 401 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 402 _extraData(0), _contextId(0), _threadId(0), _pc(0), 403 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0), 404 accessDelta(0), depth(0) 405 { 406 setPhys(paddr, size, flags, mid, time); 407 } 408 409 Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time, 410 Addr pc) 411 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 412 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 413 _extraData(0), _contextId(0), _threadId(0), _pc(pc), 414 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0), 415 accessDelta(0), depth(0) 416 { 417 setPhys(paddr, size, flags, mid, time); 418 privateFlags.set(VALID_PC); 419 } 420 421 Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid, 422 Addr pc, ContextID cid, ThreadID tid) 423 : _paddr(0), _size(0), _masterId(invldMasterId), _time(0), 424 _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0), 425 _extraData(0), _contextId(0), _threadId(0), _pc(0), 426 _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0), 427 accessDelta(0), depth(0) 428 { 429 setVirt(asid, vaddr, size, flags, mid, pc); 430 setThreadContext(cid, tid); 431 } 432 433 Request(int asid, Addr vaddr, int size, Flags flags, MasterID mid, Addr pc, 434 int cid, ThreadID tid, AtomicOpFunctor *atomic_op) 435 : atomicOpFunctor(atomic_op) 436 { 437 setVirt(asid, vaddr, size, flags, mid, pc); 438 setThreadContext(cid, tid); 439 } 440 441 ~Request() 442 { 443 if (hasAtomicOpFunctor()) { 444 delete atomicOpFunctor; 445 } 446 } 447 448 /** 449 * Set up CPU and thread numbers. 450 */ 451 void 452 setThreadContext(ContextID context_id, ThreadID tid) 453 { 454 _contextId = context_id; 455 _threadId = tid; 456 privateFlags.set(VALID_CONTEXT_ID|VALID_THREAD_ID); 457 } 458 459 /** 460 * Set up a virtual (e.g., CPU) request in a previously 461 * allocated Request object. 462 */ 463 void 464 setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid, 465 Addr pc) 466 { 467 _asid = asid; 468 _vaddr = vaddr; 469 _size = size; 470 _masterId = mid; 471 _pc = pc; 472 _time = curTick(); 473 474 _flags.clear(~STICKY_FLAGS); 475 _flags.set(flags); 476 privateFlags.clear(~STICKY_PRIVATE_FLAGS); 477 privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC); 478 depth = 0; 479 accessDelta = 0; 480 translateDelta = 0; 481 } 482 483 /** 484 * Set just the physical address. This usually used to record the 485 * result of a translation. However, when using virtualized CPUs 486 * setPhys() is sometimes called to finalize a physical address 487 * without a virtual address, so we can't check if the virtual 488 * address is valid. 489 */ 490 void 491 setPaddr(Addr paddr) 492 { 493 _paddr = paddr; 494 privateFlags.set(VALID_PADDR); 495 } 496 497 /** 498 * Generate two requests as if this request had been split into two 499 * pieces. The original request can't have been translated already. 500 */ 501 void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2) 502 { 503 assert(privateFlags.isSet(VALID_VADDR)); 504 assert(privateFlags.noneSet(VALID_PADDR)); 505 assert(split_addr > _vaddr && split_addr < _vaddr + _size); 506 req1 = new Request(*this); 507 req2 = new Request(*this); 508 req1->_size = split_addr - _vaddr; 509 req2->_vaddr = split_addr; 510 req2->_size = _size - req1->_size; 511 } 512 513 /** 514 * Accessor for paddr. 515 */ 516 bool 517 hasPaddr() const 518 { 519 return privateFlags.isSet(VALID_PADDR); 520 } 521 522 Addr 523 getPaddr() const 524 { 525 assert(privateFlags.isSet(VALID_PADDR)); 526 return _paddr; 527 } 528 529 /** 530 * Time for the TLB/table walker to successfully translate this request. 531 */ 532 Tick translateDelta; 533 534 /** 535 * Access latency to complete this memory transaction not including 536 * translation time. 537 */ 538 Tick accessDelta; 539 540 /** 541 * Level of the cache hierachy where this request was responded to 542 * (e.g. 0 = L1; 1 = L2). 543 */ 544 mutable int depth; 545 546 /** 547 * Accessor for size. 548 */ 549 bool 550 hasSize() const 551 { 552 return privateFlags.isSet(VALID_SIZE); 553 } 554 555 unsigned 556 getSize() const 557 { 558 assert(privateFlags.isSet(VALID_SIZE)); 559 return _size; 560 } 561 562 /** Accessor for time. */ 563 Tick 564 time() const 565 { 566 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR)); 567 return _time; 568 } 569 570 /** 571 * Accessor for atomic-op functor. 572 */ 573 bool 574 hasAtomicOpFunctor() 575 { 576 return atomicOpFunctor != NULL; 577 } 578 579 AtomicOpFunctor * 580 getAtomicOpFunctor() 581 { 582 assert(atomicOpFunctor != NULL); 583 return atomicOpFunctor; 584 } 585 586 /** Accessor for flags. */ 587 Flags 588 getFlags() 589 { 590 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR)); 591 return _flags; 592 } 593 594 /** Note that unlike other accessors, this function sets *specific 595 flags* (ORs them in); it does not assign its argument to the 596 _flags field. Thus this method should rightly be called 597 setFlags() and not just flags(). */ 598 void 599 setFlags(Flags flags) 600 { 601 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR)); 602 _flags.set(flags); 603 } 604 605 void 606 setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags) 607 { 608 assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR)); 609 _memSpaceConfigFlags.set(extraFlags); 610 } 611 612 /** Accessor function for vaddr.*/ 613 bool 614 hasVaddr() const 615 { 616 return privateFlags.isSet(VALID_VADDR); 617 } 618 619 Addr 620 getVaddr() const 621 { 622 assert(privateFlags.isSet(VALID_VADDR)); 623 return _vaddr; 624 } 625 626 /** Accesssor for the requestor id. */ 627 MasterID 628 masterId() const 629 { 630 return _masterId; 631 } 632 633 uint32_t 634 taskId() const 635 { 636 return _taskId; 637 } 638 639 void 640 taskId(uint32_t id) { 641 _taskId = id; 642 } 643 644 /** Accessor function for asid.*/ 645 int 646 getAsid() const 647 { 648 assert(privateFlags.isSet(VALID_VADDR)); 649 return _asid; 650 } 651 652 /** Accessor function for asid.*/ 653 void 654 setAsid(int asid) 655 { 656 _asid = asid; 657 } 658 659 /** Accessor function for architecture-specific flags.*/ 660 ArchFlagsType 661 getArchFlags() const 662 { 663 assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR)); 664 return _flags & ARCH_BITS; 665 } 666 667 /** Accessor function to check if sc result is valid. */ 668 bool 669 extraDataValid() const 670 { 671 return privateFlags.isSet(VALID_EXTRA_DATA); 672 } 673 674 /** Accessor function for store conditional return value.*/ 675 uint64_t 676 getExtraData() const 677 { 678 assert(privateFlags.isSet(VALID_EXTRA_DATA)); 679 return _extraData; 680 } 681 682 /** Accessor function for store conditional return value.*/ 683 void 684 setExtraData(uint64_t extraData) 685 { 686 _extraData = extraData; 687 privateFlags.set(VALID_EXTRA_DATA); 688 } 689 690 bool 691 hasContextId() const 692 { 693 return privateFlags.isSet(VALID_CONTEXT_ID); 694 } 695 696 /** Accessor function for context ID.*/ 697 ContextID 698 contextId() const 699 { 700 assert(privateFlags.isSet(VALID_CONTEXT_ID)); 701 return _contextId; 702 } 703 704 /** Accessor function for thread ID. */ 705 ThreadID 706 threadId() const 707 { 708 assert(privateFlags.isSet(VALID_THREAD_ID)); 709 return _threadId; 710 } 711 712 void 713 setPC(Addr pc) 714 { 715 privateFlags.set(VALID_PC); 716 _pc = pc; 717 } 718 719 bool 720 hasPC() const 721 { 722 return privateFlags.isSet(VALID_PC); 723 } 724 725 /** Accessor function for pc.*/ 726 Addr 727 getPC() const 728 { 729 assert(privateFlags.isSet(VALID_PC)); 730 return _pc; 731 } 732 733 /** 734 * Increment/Get the depth at which this request is responded to. 735 * This currently happens when the request misses in any cache level. 736 */ 737 void incAccessDepth() const { depth++; } 738 int getAccessDepth() const { return depth; } 739 740 /** 741 * Set/Get the time taken for this request to be successfully translated. 742 */ 743 void setTranslateLatency() { translateDelta = curTick() - _time; } 744 Tick getTranslateLatency() const { return translateDelta; } 745 746 /** 747 * Set/Get the time taken to complete this request's access, not including 748 * the time to successfully translate the request. 749 */ 750 void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; } 751 Tick getAccessLatency() const { return accessDelta; } 752 753 /** 754 * Accessor for the sequence number of instruction that creates the 755 * request. 756 */ 757 bool 758 hasInstSeqNum() const 759 { 760 return privateFlags.isSet(VALID_INST_SEQ_NUM); 761 } 762 763 InstSeqNum 764 getReqInstSeqNum() const 765 { 766 assert(privateFlags.isSet(VALID_INST_SEQ_NUM)); 767 return _reqInstSeqNum; 768 } 769 770 void 771 setReqInstSeqNum(const InstSeqNum seq_num) 772 { 773 privateFlags.set(VALID_INST_SEQ_NUM); 774 _reqInstSeqNum = seq_num; 775 } 776 777 /** Accessor functions for flags. Note that these are for testing 778 only; setting flags should be done via setFlags(). */ 779 bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); } 780 bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); } 781 bool isInstFetch() const { return _flags.isSet(INST_FETCH); } 782 bool isPrefetch() const { return _flags.isSet(PREFETCH); } 783 bool isLLSC() const { return _flags.isSet(LLSC); } 784 bool isPriv() const { return _flags.isSet(PRIVILEGED); } 785 bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); } 786 bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); } 787 bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); } 788 bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); } 789 bool isSecure() const { return _flags.isSet(SECURE); } 790 bool isPTWalk() const { return _flags.isSet(PT_WALK); } 791 bool isAcquire() const { return _flags.isSet(ACQUIRE); } 792 bool isRelease() const { return _flags.isSet(RELEASE); } 793 bool isKernel() const { return _flags.isSet(KERNEL); } 794 bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); } 795 bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); } 796 797 bool 798 isAtomic() const 799 { 800 return _flags.isSet(ATOMIC_RETURN_OP) || 801 _flags.isSet(ATOMIC_NO_RETURN_OP); 802 } 803 804 /** 805 * Accessor functions for the memory space configuration flags and used by 806 * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that 807 * these are for testing only; setting extraFlags should be done via 808 * setMemSpaceConfigFlags(). 809 */ 810 bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); } 811 812 bool 813 isWavefrontScope() const 814 { 815 assert(isScoped()); 816 return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE); 817 } 818 819 bool 820 isWorkgroupScope() const 821 { 822 assert(isScoped()); 823 return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE); 824 } 825 826 bool 827 isDeviceScope() const 828 { 829 assert(isScoped()); 830 return _memSpaceConfigFlags.isSet(DEVICE_SCOPE); 831 } 832 833 bool 834 isSystemScope() const 835 { 836 assert(isScoped()); 837 return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE); 838 } 839 840 bool 841 isGlobalSegment() const 842 { 843 return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) || 844 (!isGroupSegment() && !isPrivateSegment() && 845 !isKernargSegment() && !isReadonlySegment() && 846 !isSpillSegment() && !isArgSegment()); 847 } 848 849 bool 850 isGroupSegment() const 851 { 852 return _memSpaceConfigFlags.isSet(GROUP_SEGMENT); 853 } 854 855 bool 856 isPrivateSegment() const 857 { 858 return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT); 859 } 860 861 bool 862 isKernargSegment() const 863 { 864 return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT); 865 } 866 867 bool 868 isReadonlySegment() const 869 { 870 return _memSpaceConfigFlags.isSet(READONLY_SEGMENT); 871 } 872 873 bool 874 isSpillSegment() const 875 { 876 return _memSpaceConfigFlags.isSet(SPILL_SEGMENT); 877 } 878 879 bool 880 isArgSegment() const 881 { 882 return _memSpaceConfigFlags.isSet(ARG_SEGMENT); 883 } 884}; 885 886#endif // __MEM_REQUEST_HH__ 887