1/* 2 * Copyright (c) 2012-2019 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2006 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Ron Dreslinski 42 * Steve Reinhardt 43 * Ali Saidi 44 * Andreas Hansson 45 * Nikos Nikoleris 46 */ 47 48/** 49 * @file 50 * Declaration of the Packet class. 51 */ 52 53#ifndef __MEM_PACKET_HH__ 54#define __MEM_PACKET_HH__ 55 56#include <bitset> 57#include <cassert> 58#include <list> 59 60#include "base/addr_range.hh" 61#include "base/cast.hh" 62#include "base/compiler.hh" 63#include "base/flags.hh" 64#include "base/logging.hh" 65#include "base/printable.hh" 66#include "base/types.hh" 67#include "config/the_isa.hh" 68#include "mem/request.hh" 69#include "sim/core.hh" 70 71class Packet; 72typedef Packet *PacketPtr; 73typedef uint8_t* PacketDataPtr; 74typedef std::list<PacketPtr> PacketList; 75typedef uint64_t PacketId; 76 77class MemCmd 78{ 79 friend class Packet; 80 81 public: 82 /** 83 * List of all commands associated with a packet. 84 */ 85 enum Command 86 { 87 InvalidCmd, 88 ReadReq, 89 ReadResp, 90 ReadRespWithInvalidate, 91 WriteReq, 92 WriteResp, 93 WritebackDirty, 94 WritebackClean, 95 WriteClean, // writes dirty data below without evicting 96 CleanEvict, 97 SoftPFReq, 98 SoftPFExReq, 99 HardPFReq, 100 SoftPFResp, 101 HardPFResp, 102 WriteLineReq, 103 UpgradeReq, 104 SCUpgradeReq, // Special "weak" upgrade for StoreCond 105 UpgradeResp, 106 SCUpgradeFailReq, // Failed SCUpgradeReq in MSHR (never sent) 107 UpgradeFailResp, // Valid for SCUpgradeReq only 108 ReadExReq, 109 ReadExResp, 110 ReadCleanReq, 111 ReadSharedReq, 112 LoadLockedReq, 113 StoreCondReq, 114 StoreCondFailReq, // Failed StoreCondReq in MSHR (never sent) 115 StoreCondResp, 116 SwapReq, 117 SwapResp, 118 MessageReq, 119 MessageResp, 120 MemFenceReq, 121 MemFenceResp, 122 CleanSharedReq, 123 CleanSharedResp, 124 CleanInvalidReq, 125 CleanInvalidResp, 126 // Error responses 127 // @TODO these should be classified as responses rather than 128 // requests; coding them as requests initially for backwards 129 // compatibility 130 InvalidDestError, // packet dest field invalid 131 BadAddressError, // memory address invalid 132 FunctionalReadError, // unable to fulfill functional read 133 FunctionalWriteError, // unable to fulfill functional write 134 // Fake simulator-only commands 135 PrintReq, // Print state matching address 136 FlushReq, //request for a cache flush 137 InvalidateReq, // request for address to be invalidated 138 InvalidateResp, 139 NUM_MEM_CMDS 140 }; 141 142 private: 143 /** 144 * List of command attributes. 145 */ 146 enum Attribute 147 { 148 IsRead, //!< Data flows from responder to requester 149 IsWrite, //!< Data flows from requester to responder 150 IsUpgrade, 151 IsInvalidate, 152 IsClean, //!< Cleans any existing dirty blocks 153 NeedsWritable, //!< Requires writable copy to complete in-cache 154 IsRequest, //!< Issued by requester 155 IsResponse, //!< Issue by responder 156 NeedsResponse, //!< Requester needs response from target 157 IsEviction, 158 IsSWPrefetch, 159 IsHWPrefetch, 160 IsLlsc, //!< Alpha/MIPS LL or SC access 161 HasData, //!< There is an associated payload 162 IsError, //!< Error response 163 IsPrint, //!< Print state matching address (for debugging) 164 IsFlush, //!< Flush the address from caches 165 FromCache, //!< Request originated from a caching agent 166 NUM_COMMAND_ATTRIBUTES 167 }; 168 169 /** 170 * Structure that defines attributes and other data associated 171 * with a Command. 172 */ 173 struct CommandInfo 174 { 175 /// Set of attribute flags. 176 const std::bitset<NUM_COMMAND_ATTRIBUTES> attributes; 177 /// Corresponding response for requests; InvalidCmd if no 178 /// response is applicable. 179 const Command response; 180 /// String representation (for printing) 181 const std::string str; 182 }; 183 184 /// Array to map Command enum to associated info. 185 static const CommandInfo commandInfo[]; 186 187 private: 188 189 Command cmd; 190 191 bool 192 testCmdAttrib(MemCmd::Attribute attrib) const 193 { 194 return commandInfo[cmd].attributes[attrib] != 0; 195 } 196 197 public: 198 199 bool isRead() const { return testCmdAttrib(IsRead); } 200 bool isWrite() const { return testCmdAttrib(IsWrite); } 201 bool isUpgrade() const { return testCmdAttrib(IsUpgrade); } 202 bool isRequest() const { return testCmdAttrib(IsRequest); } 203 bool isResponse() const { return testCmdAttrib(IsResponse); } 204 bool needsWritable() const { return testCmdAttrib(NeedsWritable); } 205 bool needsResponse() const { return testCmdAttrib(NeedsResponse); } 206 bool isInvalidate() const { return testCmdAttrib(IsInvalidate); } 207 bool isEviction() const { return testCmdAttrib(IsEviction); } 208 bool isClean() const { return testCmdAttrib(IsClean); } 209 bool fromCache() const { return testCmdAttrib(FromCache); } 210 211 /** 212 * A writeback is an eviction that carries data. 213 */ 214 bool isWriteback() const { return testCmdAttrib(IsEviction) && 215 testCmdAttrib(HasData); } 216 217 /** 218 * Check if this particular packet type carries payload data. Note 219 * that this does not reflect if the data pointer of the packet is 220 * valid or not. 221 */ 222 bool hasData() const { return testCmdAttrib(HasData); } 223 bool isLLSC() const { return testCmdAttrib(IsLlsc); } 224 bool isSWPrefetch() const { return testCmdAttrib(IsSWPrefetch); } 225 bool isHWPrefetch() const { return testCmdAttrib(IsHWPrefetch); } 226 bool isPrefetch() const { return testCmdAttrib(IsSWPrefetch) || 227 testCmdAttrib(IsHWPrefetch); } 228 bool isError() const { return testCmdAttrib(IsError); } 229 bool isPrint() const { return testCmdAttrib(IsPrint); } 230 bool isFlush() const { return testCmdAttrib(IsFlush); } 231 232 Command 233 responseCommand() const 234 { 235 return commandInfo[cmd].response; 236 } 237 238 /// Return the string to a cmd given by idx. 239 const std::string &toString() const { return commandInfo[cmd].str; } 240 int toInt() const { return (int)cmd; } 241 242 MemCmd(Command _cmd) : cmd(_cmd) { } 243 MemCmd(int _cmd) : cmd((Command)_cmd) { } 244 MemCmd() : cmd(InvalidCmd) { } 245 246 bool operator==(MemCmd c2) const { return (cmd == c2.cmd); } 247 bool operator!=(MemCmd c2) const { return (cmd != c2.cmd); } 248}; 249 250/** 251 * A Packet is used to encapsulate a transfer between two objects in 252 * the memory system (e.g., the L1 and L2 cache). (In contrast, a 253 * single Request travels all the way from the requester to the 254 * ultimate destination and back, possibly being conveyed by several 255 * different Packets along the way.) 256 */ 257class Packet : public Printable 258{ 259 public: 260 typedef uint32_t FlagsType; 261 typedef ::Flags<FlagsType> Flags; 262 263 private: 264 265 enum : FlagsType { 266 // Flags to transfer across when copying a packet 267 COPY_FLAGS = 0x0000003F, 268 269 // Flags that are used to create reponse packets 270 RESPONDER_FLAGS = 0x00000009, 271 272 // Does this packet have sharers (which means it should not be 273 // considered writable) or not. See setHasSharers below. 274 HAS_SHARERS = 0x00000001, 275 276 // Special control flags 277 /// Special timing-mode atomic snoop for multi-level coherence. 278 EXPRESS_SNOOP = 0x00000002, 279 280 /// Allow a responding cache to inform the cache hierarchy 281 /// that it had a writable copy before responding. See 282 /// setResponderHadWritable below. 283 RESPONDER_HAD_WRITABLE = 0x00000004, 284 285 // Snoop co-ordination flag to indicate that a cache is 286 // responding to a snoop. See setCacheResponding below. 287 CACHE_RESPONDING = 0x00000008, 288 289 // The writeback/writeclean should be propagated further 290 // downstream by the receiver 291 WRITE_THROUGH = 0x00000010, 292 293 // Response co-ordination flag for cache maintenance 294 // operations 295 SATISFIED = 0x00000020, 296 297 /// Are the 'addr' and 'size' fields valid? 298 VALID_ADDR = 0x00000100, 299 VALID_SIZE = 0x00000200, 300 301 /// Is the data pointer set to a value that shouldn't be freed 302 /// when the packet is destroyed? 303 STATIC_DATA = 0x00001000, 304 /// The data pointer points to a value that should be freed when 305 /// the packet is destroyed. The pointer is assumed to be pointing 306 /// to an array, and delete [] is consequently called 307 DYNAMIC_DATA = 0x00002000, 308 309 /// suppress the error if this packet encounters a functional 310 /// access failure. 311 SUPPRESS_FUNC_ERROR = 0x00008000, 312 313 // Signal block present to squash prefetch and cache evict packets 314 // through express snoop flag 315 BLOCK_CACHED = 0x00010000 316 }; 317 318 Flags flags; 319 320 public: 321 typedef MemCmd::Command Command; 322 323 /// The command field of the packet. 324 MemCmd cmd; 325 326 const PacketId id; 327 328 /// A pointer to the original request. 329 RequestPtr req; 330 331 private: 332 /** 333 * A pointer to the data being transferred. It can be different 334 * sizes at each level of the hierarchy so it belongs to the 335 * packet, not request. This may or may not be populated when a 336 * responder receives the packet. If not populated memory should 337 * be allocated. 338 */ 339 PacketDataPtr data; 340 341 /// The address of the request. This address could be virtual or 342 /// physical, depending on the system configuration. 343 Addr addr; 344 345 /// True if the request targets the secure memory space. 346 bool _isSecure; 347 348 /// The size of the request or transfer. 349 unsigned size; 350 351 /** 352 * Track the bytes found that satisfy a functional read. 353 */ 354 std::vector<bool> bytesValid; 355 356 // Quality of Service priority value 357 uint8_t _qosValue; 358 359 public: 360 361 /** 362 * The extra delay from seeing the packet until the header is 363 * transmitted. This delay is used to communicate the crossbar 364 * forwarding latency to the neighbouring object (e.g. a cache) 365 * that actually makes the packet wait. As the delay is relative, 366 * a 32-bit unsigned should be sufficient. 367 */ 368 uint32_t headerDelay; 369 370 /** 371 * Keep track of the extra delay incurred by snooping upwards 372 * before sending a request down the memory system. This is used 373 * by the coherent crossbar to account for the additional request 374 * delay. 375 */ 376 uint32_t snoopDelay; 377 378 /** 379 * The extra pipelining delay from seeing the packet until the end of 380 * payload is transmitted by the component that provided it (if 381 * any). This includes the header delay. Similar to the header 382 * delay, this is used to make up for the fact that the 383 * crossbar does not make the packet wait. As the delay is 384 * relative, a 32-bit unsigned should be sufficient. 385 */ 386 uint32_t payloadDelay; 387 388 /** 389 * A virtual base opaque structure used to hold state associated 390 * with the packet (e.g., an MSHR), specific to a SimObject that 391 * sees the packet. A pointer to this state is returned in the 392 * packet's response so that the SimObject in question can quickly 393 * look up the state needed to process it. A specific subclass 394 * would be derived from this to carry state specific to a 395 * particular sending device. 396 * 397 * As multiple SimObjects may add their SenderState throughout the 398 * memory system, the SenderStates create a stack, where a 399 * SimObject can add a new Senderstate, as long as the 400 * predecessing SenderState is restored when the response comes 401 * back. For this reason, the predecessor should always be 402 * populated with the current SenderState of a packet before 403 * modifying the senderState field in the request packet. 404 */ 405 struct SenderState 406 { 407 SenderState* predecessor; 408 SenderState() : predecessor(NULL) {} 409 virtual ~SenderState() {} 410 }; 411 412 /** 413 * Object used to maintain state of a PrintReq. The senderState 414 * field of a PrintReq should always be of this type. 415 */ 416 class PrintReqState : public SenderState 417 { 418 private: 419 /** 420 * An entry in the label stack. 421 */ 422 struct LabelStackEntry 423 { 424 const std::string label; 425 std::string *prefix; 426 bool labelPrinted; 427 LabelStackEntry(const std::string &_label, std::string *_prefix); 428 }; 429 430 typedef std::list<LabelStackEntry> LabelStack; 431 LabelStack labelStack; 432 433 std::string *curPrefixPtr; 434 435 public: 436 std::ostream &os; 437 const int verbosity; 438 439 PrintReqState(std::ostream &os, int verbosity = 0); 440 ~PrintReqState(); 441 442 /** 443 * Returns the current line prefix. 444 */ 445 const std::string &curPrefix() { return *curPrefixPtr; } 446 447 /** 448 * Push a label onto the label stack, and prepend the given 449 * prefix string onto the current prefix. Labels will only be 450 * printed if an object within the label's scope is printed. 451 */ 452 void pushLabel(const std::string &lbl, 453 const std::string &prefix = " "); 454 455 /** 456 * Pop a label off the label stack. 457 */ 458 void popLabel(); 459 460 /** 461 * Print all of the pending unprinted labels on the 462 * stack. Called by printObj(), so normally not called by 463 * users unless bypassing printObj(). 464 */ 465 void printLabels(); 466 467 /** 468 * Print a Printable object to os, because it matched the 469 * address on a PrintReq. 470 */ 471 void printObj(Printable *obj); 472 }; 473 474 /** 475 * This packet's sender state. Devices should use dynamic_cast<> 476 * to cast to the state appropriate to the sender. The intent of 477 * this variable is to allow a device to attach extra information 478 * to a request. A response packet must return the sender state 479 * that was attached to the original request (even if a new packet 480 * is created). 481 */ 482 SenderState *senderState; 483 484 /** 485 * Push a new sender state to the packet and make the current 486 * sender state the predecessor of the new one. This should be 487 * prefered over direct manipulation of the senderState member 488 * variable. 489 * 490 * @param sender_state SenderState to push at the top of the stack 491 */ 492 void pushSenderState(SenderState *sender_state); 493 494 /** 495 * Pop the top of the state stack and return a pointer to it. This 496 * assumes the current sender state is not NULL. This should be 497 * preferred over direct manipulation of the senderState member 498 * variable. 499 * 500 * @return The current top of the stack 501 */ 502 SenderState *popSenderState(); 503 504 /** 505 * Go through the sender state stack and return the first instance 506 * that is of type T (as determined by a dynamic_cast). If there 507 * is no sender state of type T, NULL is returned. 508 * 509 * @return The topmost state of type T 510 */ 511 template <typename T> 512 T * findNextSenderState() const 513 { 514 T *t = NULL; 515 SenderState* sender_state = senderState; 516 while (t == NULL && sender_state != NULL) { 517 t = dynamic_cast<T*>(sender_state); 518 sender_state = sender_state->predecessor; 519 } 520 return t; 521 } 522 523 /// Return the string name of the cmd field (for debugging and 524 /// tracing). 525 const std::string &cmdString() const { return cmd.toString(); } 526 527 /// Return the index of this command. 528 inline int cmdToIndex() const { return cmd.toInt(); } 529 530 bool isRead() const { return cmd.isRead(); } 531 bool isWrite() const { return cmd.isWrite(); } 532 bool isUpgrade() const { return cmd.isUpgrade(); } 533 bool isRequest() const { return cmd.isRequest(); } 534 bool isResponse() const { return cmd.isResponse(); } 535 bool needsWritable() const 536 { 537 // we should never check if a response needsWritable, the 538 // request has this flag, and for a response we should rather 539 // look at the hasSharers flag (if not set, the response is to 540 // be considered writable) 541 assert(isRequest()); 542 return cmd.needsWritable(); 543 } 544 bool needsResponse() const { return cmd.needsResponse(); } 545 bool isInvalidate() const { return cmd.isInvalidate(); } 546 bool isEviction() const { return cmd.isEviction(); } 547 bool isClean() const { return cmd.isClean(); } 548 bool fromCache() const { return cmd.fromCache(); } 549 bool isWriteback() const { return cmd.isWriteback(); } 550 bool hasData() const { return cmd.hasData(); } 551 bool hasRespData() const 552 { 553 MemCmd resp_cmd = cmd.responseCommand(); 554 return resp_cmd.hasData(); 555 } 556 bool isLLSC() const { return cmd.isLLSC(); } 557 bool isError() const { return cmd.isError(); } 558 bool isPrint() const { return cmd.isPrint(); } 559 bool isFlush() const { return cmd.isFlush(); } 560 561 bool isWholeLineWrite(unsigned blk_size) 562 { 563 return (cmd == MemCmd::WriteReq || cmd == MemCmd::WriteLineReq) && 564 getOffset(blk_size) == 0 && getSize() == blk_size; 565 } 566 567 //@{ 568 /// Snoop flags 569 /** 570 * Set the cacheResponding flag. This is used by the caches to 571 * signal another cache that they are responding to a request. A 572 * cache will only respond to snoops if it has the line in either 573 * Modified or Owned state. Note that on snoop hits we always pass 574 * the line as Modified and never Owned. In the case of an Owned 575 * line we proceed to invalidate all other copies. 576 * 577 * On a cache fill (see Cache::handleFill), we check hasSharers 578 * first, ignoring the cacheResponding flag if hasSharers is set. 579 * A line is consequently allocated as: 580 * 581 * hasSharers cacheResponding state 582 * true false Shared 583 * true true Shared 584 * false false Exclusive 585 * false true Modified 586 */ 587 void setCacheResponding() 588 { 589 assert(isRequest()); 590 assert(!flags.isSet(CACHE_RESPONDING)); 591 flags.set(CACHE_RESPONDING); 592 } 593 bool cacheResponding() const { return flags.isSet(CACHE_RESPONDING); } 594 /** 595 * On fills, the hasSharers flag is used by the caches in 596 * combination with the cacheResponding flag, as clarified 597 * above. If the hasSharers flag is not set, the packet is passing 598 * writable. Thus, a response from a memory passes the line as 599 * writable by default. 600 * 601 * The hasSharers flag is also used by upstream caches to inform a 602 * downstream cache that they have the block (by calling 603 * setHasSharers on snoop request packets that hit in upstream 604 * cachs tags or MSHRs). If the snoop packet has sharers, a 605 * downstream cache is prevented from passing a dirty line upwards 606 * if it was not explicitly asked for a writable copy. See 607 * Cache::satisfyCpuSideRequest. 608 * 609 * The hasSharers flag is also used on writebacks, in 610 * combination with the WritbackClean or WritebackDirty commands, 611 * to allocate the block downstream either as: 612 * 613 * command hasSharers state 614 * WritebackDirty false Modified 615 * WritebackDirty true Owned 616 * WritebackClean false Exclusive 617 * WritebackClean true Shared 618 */ 619 void setHasSharers() { flags.set(HAS_SHARERS); } 620 bool hasSharers() const { return flags.isSet(HAS_SHARERS); } 621 //@} 622 623 /** 624 * The express snoop flag is used for two purposes. Firstly, it is 625 * used to bypass flow control for normal (non-snoop) requests 626 * going downstream in the memory system. In cases where a cache 627 * is responding to a snoop from another cache (it had a dirty 628 * line), but the line is not writable (and there are possibly 629 * other copies), the express snoop flag is set by the downstream 630 * cache to invalidate all other copies in zero time. Secondly, 631 * the express snoop flag is also set to be able to distinguish 632 * snoop packets that came from a downstream cache, rather than 633 * snoop packets from neighbouring caches. 634 */ 635 void setExpressSnoop() { flags.set(EXPRESS_SNOOP); } 636 bool isExpressSnoop() const { return flags.isSet(EXPRESS_SNOOP); } 637 638 /** 639 * On responding to a snoop request (which only happens for 640 * Modified or Owned lines), make sure that we can transform an 641 * Owned response to a Modified one. If this flag is not set, the 642 * responding cache had the line in the Owned state, and there are 643 * possibly other Shared copies in the memory system. A downstream 644 * cache helps in orchestrating the invalidation of these copies 645 * by sending out the appropriate express snoops. 646 */ 647 void setResponderHadWritable() 648 { 649 assert(cacheResponding()); 650 assert(!responderHadWritable()); 651 flags.set(RESPONDER_HAD_WRITABLE); 652 } 653 bool responderHadWritable() const 654 { return flags.isSet(RESPONDER_HAD_WRITABLE); } 655 656 /** 657 * Copy the reponse flags from an input packet to this packet. The 658 * reponse flags determine whether a responder has been found and 659 * the state at which the block will be at the destination. 660 * 661 * @pkt The packet that we will copy flags from 662 */ 663 void copyResponderFlags(const PacketPtr pkt); 664 665 /** 666 * A writeback/writeclean cmd gets propagated further downstream 667 * by the receiver when the flag is set. 668 */ 669 void setWriteThrough() 670 { 671 assert(cmd.isWrite() && 672 (cmd.isEviction() || cmd == MemCmd::WriteClean)); 673 flags.set(WRITE_THROUGH); 674 } 675 void clearWriteThrough() { flags.clear(WRITE_THROUGH); } 676 bool writeThrough() const { return flags.isSet(WRITE_THROUGH); } 677 678 /** 679 * Set when a request hits in a cache and the cache is not going 680 * to respond. This is used by the crossbar to coordinate 681 * responses for cache maintenance operations. 682 */ 683 void setSatisfied() 684 { 685 assert(cmd.isClean()); 686 assert(!flags.isSet(SATISFIED)); 687 flags.set(SATISFIED); 688 } 689 bool satisfied() const { return flags.isSet(SATISFIED); } 690 691 void setSuppressFuncError() { flags.set(SUPPRESS_FUNC_ERROR); } 692 bool suppressFuncError() const { return flags.isSet(SUPPRESS_FUNC_ERROR); } 693 void setBlockCached() { flags.set(BLOCK_CACHED); } 694 bool isBlockCached() const { return flags.isSet(BLOCK_CACHED); } 695 void clearBlockCached() { flags.clear(BLOCK_CACHED); } 696 697 /** 698 * QoS Value getter 699 * Returns 0 if QoS value was never set (constructor default). 700 * 701 * @return QoS priority value of the packet 702 */ 703 inline uint8_t qosValue() const { return _qosValue; } 704 705 /** 706 * QoS Value setter 707 * Interface for setting QoS priority value of the packet. 708 * 709 * @param qos_value QoS priority value 710 */ 711 inline void qosValue(const uint8_t qos_value) 712 { _qosValue = qos_value; } 713 714 inline MasterID masterId() const { return req->masterId(); } 715 716 // Network error conditions... encapsulate them as methods since 717 // their encoding keeps changing (from result field to command 718 // field, etc.) 719 void 720 setBadAddress() 721 { 722 assert(isResponse()); 723 cmd = MemCmd::BadAddressError; 724 } 725 726 void copyError(Packet *pkt) { assert(pkt->isError()); cmd = pkt->cmd; } 727 728 Addr getAddr() const { assert(flags.isSet(VALID_ADDR)); return addr; } 729 /** 730 * Update the address of this packet mid-transaction. This is used 731 * by the address mapper to change an already set address to a new 732 * one based on the system configuration. It is intended to remap 733 * an existing address, so it asserts that the current address is 734 * valid. 735 */ 736 void setAddr(Addr _addr) { assert(flags.isSet(VALID_ADDR)); addr = _addr; } 737 738 unsigned getSize() const { assert(flags.isSet(VALID_SIZE)); return size; } 739 740 /** 741 * Get address range to which this packet belongs. 742 * 743 * @return Address range of this packet. 744 */ 745 AddrRange getAddrRange() const; 746 747 Addr getOffset(unsigned int blk_size) const 748 { 749 return getAddr() & Addr(blk_size - 1); 750 } 751 752 Addr getBlockAddr(unsigned int blk_size) const 753 { 754 return getAddr() & ~(Addr(blk_size - 1)); 755 } 756 757 bool isSecure() const 758 { 759 assert(flags.isSet(VALID_ADDR)); 760 return _isSecure; 761 } 762 763 /** 764 * Accessor function to atomic op. 765 */ 766 AtomicOpFunctor *getAtomicOp() const { return req->getAtomicOpFunctor(); } 767 bool isAtomicOp() const { return req->isAtomic(); } 768 769 /** 770 * It has been determined that the SC packet should successfully update 771 * memory. Therefore, convert this SC packet to a normal write. 772 */ 773 void 774 convertScToWrite() 775 { 776 assert(isLLSC()); 777 assert(isWrite()); 778 cmd = MemCmd::WriteReq; 779 } 780 781 /** 782 * When ruby is in use, Ruby will monitor the cache line and the 783 * phys memory should treat LL ops as normal reads. 784 */ 785 void 786 convertLlToRead() 787 { 788 assert(isLLSC()); 789 assert(isRead()); 790 cmd = MemCmd::ReadReq; 791 } 792 793 /** 794 * Constructor. Note that a Request object must be constructed 795 * first, but the Requests's physical address and size fields need 796 * not be valid. The command must be supplied. 797 */ 798 Packet(const RequestPtr &_req, MemCmd _cmd) 799 : cmd(_cmd), id((PacketId)_req.get()), req(_req), 800 data(nullptr), addr(0), _isSecure(false), size(0), 801 _qosValue(0), headerDelay(0), snoopDelay(0), 802 payloadDelay(0), senderState(NULL) 803 { 804 if (req->hasPaddr()) { 805 addr = req->getPaddr(); 806 flags.set(VALID_ADDR); 807 _isSecure = req->isSecure(); 808 } 809 if (req->hasSize()) { 810 size = req->getSize(); 811 flags.set(VALID_SIZE); 812 } 813 } 814 815 /** 816 * Alternate constructor if you are trying to create a packet with 817 * a request that is for a whole block, not the address from the 818 * req. this allows for overriding the size/addr of the req. 819 */ 820 Packet(const RequestPtr &_req, MemCmd _cmd, int _blkSize, PacketId _id = 0) 821 : cmd(_cmd), id(_id ? _id : (PacketId)_req.get()), req(_req), 822 data(nullptr), addr(0), _isSecure(false), 823 _qosValue(0), headerDelay(0), 824 snoopDelay(0), payloadDelay(0), senderState(NULL) 825 { 826 if (req->hasPaddr()) { 827 addr = req->getPaddr() & ~(_blkSize - 1); 828 flags.set(VALID_ADDR); 829 _isSecure = req->isSecure(); 830 } 831 size = _blkSize; 832 flags.set(VALID_SIZE); 833 } 834 835 /** 836 * Alternate constructor for copying a packet. Copy all fields 837 * *except* if the original packet's data was dynamic, don't copy 838 * that, as we can't guarantee that the new packet's lifetime is 839 * less than that of the original packet. In this case the new 840 * packet should allocate its own data. 841 */ 842 Packet(const PacketPtr pkt, bool clear_flags, bool alloc_data) 843 : cmd(pkt->cmd), id(pkt->id), req(pkt->req), 844 data(nullptr), 845 addr(pkt->addr), _isSecure(pkt->_isSecure), size(pkt->size), 846 bytesValid(pkt->bytesValid), 847 _qosValue(pkt->qosValue()), 848 headerDelay(pkt->headerDelay), 849 snoopDelay(0), 850 payloadDelay(pkt->payloadDelay), 851 senderState(pkt->senderState) 852 { 853 if (!clear_flags) 854 flags.set(pkt->flags & COPY_FLAGS); 855 856 flags.set(pkt->flags & (VALID_ADDR|VALID_SIZE)); 857 858 // should we allocate space for data, or not, the express 859 // snoops do not need to carry any data as they only serve to 860 // co-ordinate state changes 861 if (alloc_data) { 862 // even if asked to allocate data, if the original packet 863 // holds static data, then the sender will not be doing 864 // any memcpy on receiving the response, thus we simply 865 // carry the pointer forward 866 if (pkt->flags.isSet(STATIC_DATA)) { 867 data = pkt->data; 868 flags.set(STATIC_DATA); 869 } else { 870 allocate(); 871 } 872 } 873 } 874 875 /** 876 * Generate the appropriate read MemCmd based on the Request flags. 877 */ 878 static MemCmd 879 makeReadCmd(const RequestPtr &req) 880 { 881 if (req->isLLSC()) 882 return MemCmd::LoadLockedReq; 883 else if (req->isPrefetchEx()) 884 return MemCmd::SoftPFExReq; 885 else if (req->isPrefetch()) 886 return MemCmd::SoftPFReq; 887 else 888 return MemCmd::ReadReq; 889 } 890 891 /** 892 * Generate the appropriate write MemCmd based on the Request flags. 893 */ 894 static MemCmd 895 makeWriteCmd(const RequestPtr &req) 896 { 897 if (req->isLLSC()) 898 return MemCmd::StoreCondReq; 899 else if (req->isSwap() || req->isAtomic()) 900 return MemCmd::SwapReq; 901 else if (req->isCacheInvalidate()) { 902 return req->isCacheClean() ? MemCmd::CleanInvalidReq : 903 MemCmd::InvalidateReq; 904 } else if (req->isCacheClean()) { 905 return MemCmd::CleanSharedReq; 906 } else 907 return MemCmd::WriteReq; 908 } 909 910 /** 911 * Constructor-like methods that return Packets based on Request objects. 912 * Fine-tune the MemCmd type if it's not a vanilla read or write. 913 */ 914 static PacketPtr 915 createRead(const RequestPtr &req) 916 { 917 return new Packet(req, makeReadCmd(req)); 918 } 919 920 static PacketPtr 921 createWrite(const RequestPtr &req) 922 { 923 return new Packet(req, makeWriteCmd(req)); 924 } 925 926 /** 927 * clean up packet variables 928 */ 929 ~Packet() 930 { 931 deleteData(); 932 } 933 934 /** 935 * Take a request packet and modify it in place to be suitable for 936 * returning as a response to that request. 937 */ 938 void 939 makeResponse() 940 { 941 assert(needsResponse()); 942 assert(isRequest()); 943 cmd = cmd.responseCommand(); 944 945 // responses are never express, even if the snoop that 946 // triggered them was 947 flags.clear(EXPRESS_SNOOP); 948 } 949 950 void 951 makeAtomicResponse() 952 { 953 makeResponse(); 954 } 955 956 void 957 makeTimingResponse() 958 { 959 makeResponse(); 960 } 961 962 void 963 setFunctionalResponseStatus(bool success) 964 { 965 if (!success) { 966 if (isWrite()) { 967 cmd = MemCmd::FunctionalWriteError; 968 } else { 969 cmd = MemCmd::FunctionalReadError; 970 } 971 } 972 } 973 974 void 975 setSize(unsigned size) 976 { 977 assert(!flags.isSet(VALID_SIZE)); 978 979 this->size = size; 980 flags.set(VALID_SIZE); 981 } 982 983 /** 984 * Check if packet corresponds to a given block-aligned address and 985 * address space. 986 * 987 * @param addr The address to compare against. 988 * @param is_secure Whether addr belongs to the secure address space. 989 * @param blk_size Block size in bytes. 990 * @return Whether packet matches description. 991 */ 992 bool matchBlockAddr(const Addr addr, const bool is_secure, 993 const int blk_size) const; 994 995 /** 996 * Check if this packet refers to the same block-aligned address and 997 * address space as another packet. 998 * 999 * @param pkt The packet to compare against. 1000 * @param blk_size Block size in bytes. 1001 * @return Whether packet matches description. 1002 */ 1003 bool matchBlockAddr(const PacketPtr pkt, const int blk_size) const; 1004 1005 /** 1006 * Check if packet corresponds to a given address and address space. 1007 * 1008 * @param addr The address to compare against. 1009 * @param is_secure Whether addr belongs to the secure address space. 1010 * @return Whether packet matches description. 1011 */ 1012 bool matchAddr(const Addr addr, const bool is_secure) const; 1013 1014 /** 1015 * Check if this packet refers to the same address and address space as 1016 * another packet. 1017 * 1018 * @param pkt The packet to compare against. 1019 * @return Whether packet matches description. 1020 */ 1021 bool matchAddr(const PacketPtr pkt) const; 1022 1023 public: 1024 /** 1025 * @{ 1026 * @name Data accessor mehtods 1027 */ 1028 1029 /** 1030 * Set the data pointer to the following value that should not be 1031 * freed. Static data allows us to do a single memcpy even if 1032 * multiple packets are required to get from source to destination 1033 * and back. In essence the pointer is set calling dataStatic on 1034 * the original packet, and whenever this packet is copied and 1035 * forwarded the same pointer is passed on. When a packet 1036 * eventually reaches the destination holding the data, it is 1037 * copied once into the location originally set. On the way back 1038 * to the source, no copies are necessary. 1039 */ 1040 template <typename T> 1041 void 1042 dataStatic(T *p) 1043 { 1044 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 1045 data = (PacketDataPtr)p; 1046 flags.set(STATIC_DATA); 1047 } 1048 1049 /** 1050 * Set the data pointer to the following value that should not be 1051 * freed. This version of the function allows the pointer passed 1052 * to us to be const. To avoid issues down the line we cast the 1053 * constness away, the alternative would be to keep both a const 1054 * and non-const data pointer and cleverly choose between 1055 * them. Note that this is only allowed for static data. 1056 */ 1057 template <typename T> 1058 void 1059 dataStaticConst(const T *p) 1060 { 1061 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 1062 data = const_cast<PacketDataPtr>(p); 1063 flags.set(STATIC_DATA); 1064 } 1065 1066 /** 1067 * Set the data pointer to a value that should have delete [] 1068 * called on it. Dynamic data is local to this packet, and as the 1069 * packet travels from source to destination, forwarded packets 1070 * will allocate their own data. When a packet reaches the final 1071 * destination it will populate the dynamic data of that specific 1072 * packet, and on the way back towards the source, memcpy will be 1073 * invoked in every step where a new packet was created e.g. in 1074 * the caches. Ultimately when the response reaches the source a 1075 * final memcpy is needed to extract the data from the packet 1076 * before it is deallocated. 1077 */ 1078 template <typename T> 1079 void 1080 dataDynamic(T *p) 1081 { 1082 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 1083 data = (PacketDataPtr)p; 1084 flags.set(DYNAMIC_DATA); 1085 } 1086 1087 /** 1088 * get a pointer to the data ptr. 1089 */ 1090 template <typename T> 1091 T* 1092 getPtr() 1093 { 1094 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA)); 1095 assert(!isMaskedWrite()); 1096 return (T*)data; 1097 } 1098 1099 template <typename T> 1100 const T* 1101 getConstPtr() const 1102 { 1103 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA)); 1104 return (const T*)data; 1105 } 1106 1107 /** 1108 * Get the data in the packet byte swapped from big endian to 1109 * host endian. 1110 */ 1111 template <typename T> 1112 T getBE() const; 1113 1114 /** 1115 * Get the data in the packet byte swapped from little endian to 1116 * host endian. 1117 */ 1118 template <typename T> 1119 T getLE() const; 1120 1121 /** 1122 * Get the data in the packet byte swapped from the specified 1123 * endianness. 1124 */ 1125 template <typename T> 1126 T get(ByteOrder endian) const; 1127 1128#if THE_ISA != NULL_ISA 1129 /** 1130 * Get the data in the packet byte swapped from guest to host 1131 * endian. 1132 */ 1133 template <typename T> 1134 T get() const 1135 M5_DEPRECATED_MSG("The memory system should be ISA independent."); 1136#endif 1137 1138 /** Set the value in the data pointer to v as big endian. */ 1139 template <typename T> 1140 void setBE(T v); 1141 1142 /** Set the value in the data pointer to v as little endian. */ 1143 template <typename T> 1144 void setLE(T v); 1145 1146 /** 1147 * Set the value in the data pointer to v using the specified 1148 * endianness. 1149 */ 1150 template <typename T> 1151 void set(T v, ByteOrder endian); 1152 1153#if THE_ISA != NULL_ISA 1154 /** Set the value in the data pointer to v as guest endian. */ 1155 template <typename T> 1156 void set(T v) 1157 M5_DEPRECATED_MSG("The memory system should be ISA independent."); 1158#endif 1159 1160 /** 1161 * Get the data in the packet byte swapped from the specified 1162 * endianness and zero-extended to 64 bits. 1163 */ 1164 uint64_t getUintX(ByteOrder endian) const; 1165 1166 /** 1167 * Set the value in the word w after truncating it to the length 1168 * of the packet and then byteswapping it to the desired 1169 * endianness. 1170 */ 1171 void setUintX(uint64_t w, ByteOrder endian); 1172 1173 /** 1174 * Copy data into the packet from the provided pointer. 1175 */ 1176 void 1177 setData(const uint8_t *p) 1178 { 1179 // we should never be copying data onto itself, which means we 1180 // must idenfity packets with static data, as they carry the 1181 // same pointer from source to destination and back 1182 assert(p != getPtr<uint8_t>() || flags.isSet(STATIC_DATA)); 1183 1184 if (p != getPtr<uint8_t>()) { 1185 // for packet with allocated dynamic data, we copy data from 1186 // one to the other, e.g. a forwarded response to a response 1187 std::memcpy(getPtr<uint8_t>(), p, getSize()); 1188 } 1189 } 1190 1191 /** 1192 * Copy data into the packet from the provided block pointer, 1193 * which is aligned to the given block size. 1194 */ 1195 void 1196 setDataFromBlock(const uint8_t *blk_data, int blkSize) 1197 { 1198 setData(blk_data + getOffset(blkSize)); 1199 } 1200 1201 /** 1202 * Copy data from the packet to the memory at the provided pointer. 1203 * @param p Pointer to which data will be copied. 1204 */ 1205 void 1206 writeData(uint8_t *p) const 1207 { 1208 if (!isMaskedWrite()) { 1209 std::memcpy(p, getConstPtr<uint8_t>(), getSize()); 1210 } else { 1211 assert(req->getByteEnable().size() == getSize()); 1212 // Write only the enabled bytes 1213 const uint8_t *base = getConstPtr<uint8_t>(); 1214 for (int i = 0; i < getSize(); i++) { 1215 if (req->getByteEnable()[i]) { 1216 p[i] = *(base + i); 1217 } 1218 // Disabled bytes stay untouched 1219 } 1220 } 1221 } 1222 1223 /** 1224 * Copy data from the packet to the provided block pointer, which 1225 * is aligned to the given block size. 1226 * @param blk_data Pointer to block to which data will be copied. 1227 * @param blkSize Block size in bytes. 1228 */ 1229 void 1230 writeDataToBlock(uint8_t *blk_data, int blkSize) const 1231 { 1232 writeData(blk_data + getOffset(blkSize)); 1233 } 1234 1235 /** 1236 * delete the data pointed to in the data pointer. Ok to call to 1237 * matter how data was allocted. 1238 */ 1239 void 1240 deleteData() 1241 { 1242 if (flags.isSet(DYNAMIC_DATA)) 1243 delete [] data; 1244 1245 flags.clear(STATIC_DATA|DYNAMIC_DATA); 1246 data = NULL; 1247 } 1248 1249 /** Allocate memory for the packet. */ 1250 void 1251 allocate() 1252 { 1253 // if either this command or the response command has a data 1254 // payload, actually allocate space 1255 if (hasData() || hasRespData()) { 1256 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 1257 flags.set(DYNAMIC_DATA); 1258 data = new uint8_t[getSize()]; 1259 } 1260 } 1261 1262 /** @} */ 1263 1264 /** Get the data in the packet without byte swapping. */ 1265 template <typename T> 1266 T getRaw() const; 1267 1268 /** Set the value in the data pointer to v without byte swapping. */ 1269 template <typename T> 1270 void setRaw(T v); 1271 1272 public: 1273 /** 1274 * Check a functional request against a memory value stored in 1275 * another packet (i.e. an in-transit request or 1276 * response). Returns true if the current packet is a read, and 1277 * the other packet provides the data, which is then copied to the 1278 * current packet. If the current packet is a write, and the other 1279 * packet intersects this one, then we update the data 1280 * accordingly. 1281 */ 1282 bool 1283 trySatisfyFunctional(PacketPtr other) 1284 { 1285 if (other->isMaskedWrite()) { 1286 // Do not forward data if overlapping with a masked write 1287 if (_isSecure == other->isSecure() && 1288 getAddr() <= (other->getAddr() + other->getSize() - 1) && 1289 other->getAddr() <= (getAddr() + getSize() - 1)) { 1290 warn("Trying to check against a masked write, skipping." 1291 " (addr: 0x%x, other addr: 0x%x)", getAddr(), 1292 other->getAddr()); 1293 } 1294 return false; 1295 } 1296 // all packets that are carrying a payload should have a valid 1297 // data pointer 1298 return trySatisfyFunctional(other, other->getAddr(), other->isSecure(), 1299 other->getSize(), 1300 other->hasData() ? 1301 other->getPtr<uint8_t>() : NULL); 1302 } 1303 1304 /** 1305 * Does the request need to check for cached copies of the same block 1306 * in the memory hierarchy above. 1307 **/ 1308 bool 1309 mustCheckAbove() const 1310 { 1311 return cmd == MemCmd::HardPFReq || isEviction(); 1312 } 1313 1314 /** 1315 * Is this packet a clean eviction, including both actual clean 1316 * evict packets, but also clean writebacks. 1317 */ 1318 bool 1319 isCleanEviction() const 1320 { 1321 return cmd == MemCmd::CleanEvict || cmd == MemCmd::WritebackClean; 1322 } 1323 1324 bool 1325 isMaskedWrite() const 1326 { 1327 return (cmd == MemCmd::WriteReq && !req->getByteEnable().empty()); 1328 } 1329 1330 /** 1331 * Check a functional request against a memory value represented 1332 * by a base/size pair and an associated data array. If the 1333 * current packet is a read, it may be satisfied by the memory 1334 * value. If the current packet is a write, it may update the 1335 * memory value. 1336 */ 1337 bool 1338 trySatisfyFunctional(Printable *obj, Addr base, bool is_secure, int size, 1339 uint8_t *_data); 1340 1341 /** 1342 * Push label for PrintReq (safe to call unconditionally). 1343 */ 1344 void 1345 pushLabel(const std::string &lbl) 1346 { 1347 if (isPrint()) 1348 safe_cast<PrintReqState*>(senderState)->pushLabel(lbl); 1349 } 1350 1351 /** 1352 * Pop label for PrintReq (safe to call unconditionally). 1353 */ 1354 void 1355 popLabel() 1356 { 1357 if (isPrint()) 1358 safe_cast<PrintReqState*>(senderState)->popLabel(); 1359 } 1360 1361 void print(std::ostream &o, int verbosity = 0, 1362 const std::string &prefix = "") const; 1363 1364 /** 1365 * A no-args wrapper of print(std::ostream...) 1366 * meant to be invoked from DPRINTFs 1367 * avoiding string overheads in fast mode 1368 * @return string with the request's type and start<->end addresses 1369 */ 1370 std::string print() const; 1371}; 1372 1373#endif //__MEM_PACKET_HH 1374