1/* 2 * Copyright (c) 2012-2017 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2006 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Ron Dreslinski 42 * Steve Reinhardt 43 * Ali Saidi 44 * Andreas Hansson 45 * Nikos Nikoleris 46 */ 47 48/** 49 * @file 50 * Declaration of the Packet class. 51 */ 52 53#ifndef __MEM_PACKET_HH__ 54#define __MEM_PACKET_HH__ 55 56#include <bitset> 57#include <cassert> 58#include <list> 59 60#include "base/cast.hh" 61#include "base/compiler.hh" 62#include "base/flags.hh" 63#include "base/logging.hh" 64#include "base/printable.hh" 65#include "base/types.hh" 66#include "mem/request.hh" 67#include "sim/core.hh" 68 69class Packet; 70typedef Packet *PacketPtr; 71typedef uint8_t* PacketDataPtr; 72typedef std::list<PacketPtr> PacketList;
| 1/* 2 * Copyright (c) 2012-2017 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2006 The Regents of The University of Michigan 15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc. 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Ron Dreslinski 42 * Steve Reinhardt 43 * Ali Saidi 44 * Andreas Hansson 45 * Nikos Nikoleris 46 */ 47 48/** 49 * @file 50 * Declaration of the Packet class. 51 */ 52 53#ifndef __MEM_PACKET_HH__ 54#define __MEM_PACKET_HH__ 55 56#include <bitset> 57#include <cassert> 58#include <list> 59 60#include "base/cast.hh" 61#include "base/compiler.hh" 62#include "base/flags.hh" 63#include "base/logging.hh" 64#include "base/printable.hh" 65#include "base/types.hh" 66#include "mem/request.hh" 67#include "sim/core.hh" 68 69class Packet; 70typedef Packet *PacketPtr; 71typedef uint8_t* PacketDataPtr; 72typedef std::list<PacketPtr> PacketList;
|
| 73typedef uint64_t PacketId;
|
73 74class MemCmd 75{ 76 friend class Packet; 77 78 public: 79 /** 80 * List of all commands associated with a packet. 81 */ 82 enum Command 83 { 84 InvalidCmd, 85 ReadReq, 86 ReadResp, 87 ReadRespWithInvalidate, 88 WriteReq, 89 WriteResp, 90 WritebackDirty, 91 WritebackClean, 92 WriteClean, // writes dirty data below without evicting 93 CleanEvict, 94 SoftPFReq, 95 HardPFReq, 96 SoftPFResp, 97 HardPFResp, 98 WriteLineReq, 99 UpgradeReq, 100 SCUpgradeReq, // Special "weak" upgrade for StoreCond 101 UpgradeResp, 102 SCUpgradeFailReq, // Failed SCUpgradeReq in MSHR (never sent) 103 UpgradeFailResp, // Valid for SCUpgradeReq only 104 ReadExReq, 105 ReadExResp, 106 ReadCleanReq, 107 ReadSharedReq, 108 LoadLockedReq, 109 StoreCondReq, 110 StoreCondFailReq, // Failed StoreCondReq in MSHR (never sent) 111 StoreCondResp, 112 SwapReq, 113 SwapResp, 114 MessageReq, 115 MessageResp, 116 MemFenceReq, 117 MemFenceResp, 118 CleanSharedReq, 119 CleanSharedResp, 120 CleanInvalidReq, 121 CleanInvalidResp, 122 // Error responses 123 // @TODO these should be classified as responses rather than 124 // requests; coding them as requests initially for backwards 125 // compatibility 126 InvalidDestError, // packet dest field invalid 127 BadAddressError, // memory address invalid 128 FunctionalReadError, // unable to fulfill functional read 129 FunctionalWriteError, // unable to fulfill functional write 130 // Fake simulator-only commands 131 PrintReq, // Print state matching address 132 FlushReq, //request for a cache flush 133 InvalidateReq, // request for address to be invalidated 134 InvalidateResp, 135 NUM_MEM_CMDS 136 }; 137 138 private: 139 /** 140 * List of command attributes. 141 */ 142 enum Attribute 143 { 144 IsRead, //!< Data flows from responder to requester 145 IsWrite, //!< Data flows from requester to responder 146 IsUpgrade, 147 IsInvalidate, 148 IsClean, //!< Cleans any existing dirty blocks 149 NeedsWritable, //!< Requires writable copy to complete in-cache 150 IsRequest, //!< Issued by requester 151 IsResponse, //!< Issue by responder 152 NeedsResponse, //!< Requester needs response from target 153 IsEviction, 154 IsSWPrefetch, 155 IsHWPrefetch, 156 IsLlsc, //!< Alpha/MIPS LL or SC access 157 HasData, //!< There is an associated payload 158 IsError, //!< Error response 159 IsPrint, //!< Print state matching address (for debugging) 160 IsFlush, //!< Flush the address from caches 161 FromCache, //!< Request originated from a caching agent 162 NUM_COMMAND_ATTRIBUTES 163 }; 164 165 /** 166 * Structure that defines attributes and other data associated 167 * with a Command. 168 */ 169 struct CommandInfo 170 { 171 /// Set of attribute flags. 172 const std::bitset<NUM_COMMAND_ATTRIBUTES> attributes; 173 /// Corresponding response for requests; InvalidCmd if no 174 /// response is applicable. 175 const Command response; 176 /// String representation (for printing) 177 const std::string str; 178 }; 179 180 /// Array to map Command enum to associated info. 181 static const CommandInfo commandInfo[]; 182 183 private: 184 185 Command cmd; 186 187 bool 188 testCmdAttrib(MemCmd::Attribute attrib) const 189 { 190 return commandInfo[cmd].attributes[attrib] != 0; 191 } 192 193 public: 194 195 bool isRead() const { return testCmdAttrib(IsRead); } 196 bool isWrite() const { return testCmdAttrib(IsWrite); } 197 bool isUpgrade() const { return testCmdAttrib(IsUpgrade); } 198 bool isRequest() const { return testCmdAttrib(IsRequest); } 199 bool isResponse() const { return testCmdAttrib(IsResponse); } 200 bool needsWritable() const { return testCmdAttrib(NeedsWritable); } 201 bool needsResponse() const { return testCmdAttrib(NeedsResponse); } 202 bool isInvalidate() const { return testCmdAttrib(IsInvalidate); } 203 bool isEviction() const { return testCmdAttrib(IsEviction); } 204 bool isClean() const { return testCmdAttrib(IsClean); } 205 bool fromCache() const { return testCmdAttrib(FromCache); } 206 207 /** 208 * A writeback is an eviction that carries data. 209 */ 210 bool isWriteback() const { return testCmdAttrib(IsEviction) && 211 testCmdAttrib(HasData); } 212 213 /** 214 * Check if this particular packet type carries payload data. Note 215 * that this does not reflect if the data pointer of the packet is 216 * valid or not. 217 */ 218 bool hasData() const { return testCmdAttrib(HasData); } 219 bool isLLSC() const { return testCmdAttrib(IsLlsc); } 220 bool isSWPrefetch() const { return testCmdAttrib(IsSWPrefetch); } 221 bool isHWPrefetch() const { return testCmdAttrib(IsHWPrefetch); } 222 bool isPrefetch() const { return testCmdAttrib(IsSWPrefetch) || 223 testCmdAttrib(IsHWPrefetch); } 224 bool isError() const { return testCmdAttrib(IsError); } 225 bool isPrint() const { return testCmdAttrib(IsPrint); } 226 bool isFlush() const { return testCmdAttrib(IsFlush); } 227 228 Command 229 responseCommand() const 230 { 231 return commandInfo[cmd].response; 232 } 233 234 /// Return the string to a cmd given by idx. 235 const std::string &toString() const { return commandInfo[cmd].str; } 236 int toInt() const { return (int)cmd; } 237 238 MemCmd(Command _cmd) : cmd(_cmd) { } 239 MemCmd(int _cmd) : cmd((Command)_cmd) { } 240 MemCmd() : cmd(InvalidCmd) { } 241 242 bool operator==(MemCmd c2) const { return (cmd == c2.cmd); } 243 bool operator!=(MemCmd c2) const { return (cmd != c2.cmd); } 244}; 245 246/** 247 * A Packet is used to encapsulate a transfer between two objects in 248 * the memory system (e.g., the L1 and L2 cache). (In contrast, a 249 * single Request travels all the way from the requester to the 250 * ultimate destination and back, possibly being conveyed by several 251 * different Packets along the way.) 252 */ 253class Packet : public Printable 254{ 255 public: 256 typedef uint32_t FlagsType; 257 typedef ::Flags<FlagsType> Flags; 258 259 private: 260 261 enum : FlagsType { 262 // Flags to transfer across when copying a packet 263 COPY_FLAGS = 0x0000003F, 264 265 // Does this packet have sharers (which means it should not be 266 // considered writable) or not. See setHasSharers below. 267 HAS_SHARERS = 0x00000001, 268 269 // Special control flags 270 /// Special timing-mode atomic snoop for multi-level coherence. 271 EXPRESS_SNOOP = 0x00000002, 272 273 /// Allow a responding cache to inform the cache hierarchy 274 /// that it had a writable copy before responding. See 275 /// setResponderHadWritable below. 276 RESPONDER_HAD_WRITABLE = 0x00000004, 277 278 // Snoop co-ordination flag to indicate that a cache is 279 // responding to a snoop. See setCacheResponding below. 280 CACHE_RESPONDING = 0x00000008, 281 282 // The writeback/writeclean should be propagated further 283 // downstream by the receiver 284 WRITE_THROUGH = 0x00000010, 285 286 // Response co-ordination flag for cache maintenance 287 // operations 288 SATISFIED = 0x00000020, 289 290 /// Are the 'addr' and 'size' fields valid? 291 VALID_ADDR = 0x00000100, 292 VALID_SIZE = 0x00000200, 293 294 /// Is the data pointer set to a value that shouldn't be freed 295 /// when the packet is destroyed? 296 STATIC_DATA = 0x00001000, 297 /// The data pointer points to a value that should be freed when 298 /// the packet is destroyed. The pointer is assumed to be pointing 299 /// to an array, and delete [] is consequently called 300 DYNAMIC_DATA = 0x00002000, 301 302 /// suppress the error if this packet encounters a functional 303 /// access failure. 304 SUPPRESS_FUNC_ERROR = 0x00008000, 305 306 // Signal block present to squash prefetch and cache evict packets 307 // through express snoop flag 308 BLOCK_CACHED = 0x00010000 309 }; 310 311 Flags flags; 312 313 public: 314 typedef MemCmd::Command Command; 315 316 /// The command field of the packet. 317 MemCmd cmd; 318
| 74 75class MemCmd 76{ 77 friend class Packet; 78 79 public: 80 /** 81 * List of all commands associated with a packet. 82 */ 83 enum Command 84 { 85 InvalidCmd, 86 ReadReq, 87 ReadResp, 88 ReadRespWithInvalidate, 89 WriteReq, 90 WriteResp, 91 WritebackDirty, 92 WritebackClean, 93 WriteClean, // writes dirty data below without evicting 94 CleanEvict, 95 SoftPFReq, 96 HardPFReq, 97 SoftPFResp, 98 HardPFResp, 99 WriteLineReq, 100 UpgradeReq, 101 SCUpgradeReq, // Special "weak" upgrade for StoreCond 102 UpgradeResp, 103 SCUpgradeFailReq, // Failed SCUpgradeReq in MSHR (never sent) 104 UpgradeFailResp, // Valid for SCUpgradeReq only 105 ReadExReq, 106 ReadExResp, 107 ReadCleanReq, 108 ReadSharedReq, 109 LoadLockedReq, 110 StoreCondReq, 111 StoreCondFailReq, // Failed StoreCondReq in MSHR (never sent) 112 StoreCondResp, 113 SwapReq, 114 SwapResp, 115 MessageReq, 116 MessageResp, 117 MemFenceReq, 118 MemFenceResp, 119 CleanSharedReq, 120 CleanSharedResp, 121 CleanInvalidReq, 122 CleanInvalidResp, 123 // Error responses 124 // @TODO these should be classified as responses rather than 125 // requests; coding them as requests initially for backwards 126 // compatibility 127 InvalidDestError, // packet dest field invalid 128 BadAddressError, // memory address invalid 129 FunctionalReadError, // unable to fulfill functional read 130 FunctionalWriteError, // unable to fulfill functional write 131 // Fake simulator-only commands 132 PrintReq, // Print state matching address 133 FlushReq, //request for a cache flush 134 InvalidateReq, // request for address to be invalidated 135 InvalidateResp, 136 NUM_MEM_CMDS 137 }; 138 139 private: 140 /** 141 * List of command attributes. 142 */ 143 enum Attribute 144 { 145 IsRead, //!< Data flows from responder to requester 146 IsWrite, //!< Data flows from requester to responder 147 IsUpgrade, 148 IsInvalidate, 149 IsClean, //!< Cleans any existing dirty blocks 150 NeedsWritable, //!< Requires writable copy to complete in-cache 151 IsRequest, //!< Issued by requester 152 IsResponse, //!< Issue by responder 153 NeedsResponse, //!< Requester needs response from target 154 IsEviction, 155 IsSWPrefetch, 156 IsHWPrefetch, 157 IsLlsc, //!< Alpha/MIPS LL or SC access 158 HasData, //!< There is an associated payload 159 IsError, //!< Error response 160 IsPrint, //!< Print state matching address (for debugging) 161 IsFlush, //!< Flush the address from caches 162 FromCache, //!< Request originated from a caching agent 163 NUM_COMMAND_ATTRIBUTES 164 }; 165 166 /** 167 * Structure that defines attributes and other data associated 168 * with a Command. 169 */ 170 struct CommandInfo 171 { 172 /// Set of attribute flags. 173 const std::bitset<NUM_COMMAND_ATTRIBUTES> attributes; 174 /// Corresponding response for requests; InvalidCmd if no 175 /// response is applicable. 176 const Command response; 177 /// String representation (for printing) 178 const std::string str; 179 }; 180 181 /// Array to map Command enum to associated info. 182 static const CommandInfo commandInfo[]; 183 184 private: 185 186 Command cmd; 187 188 bool 189 testCmdAttrib(MemCmd::Attribute attrib) const 190 { 191 return commandInfo[cmd].attributes[attrib] != 0; 192 } 193 194 public: 195 196 bool isRead() const { return testCmdAttrib(IsRead); } 197 bool isWrite() const { return testCmdAttrib(IsWrite); } 198 bool isUpgrade() const { return testCmdAttrib(IsUpgrade); } 199 bool isRequest() const { return testCmdAttrib(IsRequest); } 200 bool isResponse() const { return testCmdAttrib(IsResponse); } 201 bool needsWritable() const { return testCmdAttrib(NeedsWritable); } 202 bool needsResponse() const { return testCmdAttrib(NeedsResponse); } 203 bool isInvalidate() const { return testCmdAttrib(IsInvalidate); } 204 bool isEviction() const { return testCmdAttrib(IsEviction); } 205 bool isClean() const { return testCmdAttrib(IsClean); } 206 bool fromCache() const { return testCmdAttrib(FromCache); } 207 208 /** 209 * A writeback is an eviction that carries data. 210 */ 211 bool isWriteback() const { return testCmdAttrib(IsEviction) && 212 testCmdAttrib(HasData); } 213 214 /** 215 * Check if this particular packet type carries payload data. Note 216 * that this does not reflect if the data pointer of the packet is 217 * valid or not. 218 */ 219 bool hasData() const { return testCmdAttrib(HasData); } 220 bool isLLSC() const { return testCmdAttrib(IsLlsc); } 221 bool isSWPrefetch() const { return testCmdAttrib(IsSWPrefetch); } 222 bool isHWPrefetch() const { return testCmdAttrib(IsHWPrefetch); } 223 bool isPrefetch() const { return testCmdAttrib(IsSWPrefetch) || 224 testCmdAttrib(IsHWPrefetch); } 225 bool isError() const { return testCmdAttrib(IsError); } 226 bool isPrint() const { return testCmdAttrib(IsPrint); } 227 bool isFlush() const { return testCmdAttrib(IsFlush); } 228 229 Command 230 responseCommand() const 231 { 232 return commandInfo[cmd].response; 233 } 234 235 /// Return the string to a cmd given by idx. 236 const std::string &toString() const { return commandInfo[cmd].str; } 237 int toInt() const { return (int)cmd; } 238 239 MemCmd(Command _cmd) : cmd(_cmd) { } 240 MemCmd(int _cmd) : cmd((Command)_cmd) { } 241 MemCmd() : cmd(InvalidCmd) { } 242 243 bool operator==(MemCmd c2) const { return (cmd == c2.cmd); } 244 bool operator!=(MemCmd c2) const { return (cmd != c2.cmd); } 245}; 246 247/** 248 * A Packet is used to encapsulate a transfer between two objects in 249 * the memory system (e.g., the L1 and L2 cache). (In contrast, a 250 * single Request travels all the way from the requester to the 251 * ultimate destination and back, possibly being conveyed by several 252 * different Packets along the way.) 253 */ 254class Packet : public Printable 255{ 256 public: 257 typedef uint32_t FlagsType; 258 typedef ::Flags<FlagsType> Flags; 259 260 private: 261 262 enum : FlagsType { 263 // Flags to transfer across when copying a packet 264 COPY_FLAGS = 0x0000003F, 265 266 // Does this packet have sharers (which means it should not be 267 // considered writable) or not. See setHasSharers below. 268 HAS_SHARERS = 0x00000001, 269 270 // Special control flags 271 /// Special timing-mode atomic snoop for multi-level coherence. 272 EXPRESS_SNOOP = 0x00000002, 273 274 /// Allow a responding cache to inform the cache hierarchy 275 /// that it had a writable copy before responding. See 276 /// setResponderHadWritable below. 277 RESPONDER_HAD_WRITABLE = 0x00000004, 278 279 // Snoop co-ordination flag to indicate that a cache is 280 // responding to a snoop. See setCacheResponding below. 281 CACHE_RESPONDING = 0x00000008, 282 283 // The writeback/writeclean should be propagated further 284 // downstream by the receiver 285 WRITE_THROUGH = 0x00000010, 286 287 // Response co-ordination flag for cache maintenance 288 // operations 289 SATISFIED = 0x00000020, 290 291 /// Are the 'addr' and 'size' fields valid? 292 VALID_ADDR = 0x00000100, 293 VALID_SIZE = 0x00000200, 294 295 /// Is the data pointer set to a value that shouldn't be freed 296 /// when the packet is destroyed? 297 STATIC_DATA = 0x00001000, 298 /// The data pointer points to a value that should be freed when 299 /// the packet is destroyed. The pointer is assumed to be pointing 300 /// to an array, and delete [] is consequently called 301 DYNAMIC_DATA = 0x00002000, 302 303 /// suppress the error if this packet encounters a functional 304 /// access failure. 305 SUPPRESS_FUNC_ERROR = 0x00008000, 306 307 // Signal block present to squash prefetch and cache evict packets 308 // through express snoop flag 309 BLOCK_CACHED = 0x00010000 310 }; 311 312 Flags flags; 313 314 public: 315 typedef MemCmd::Command Command; 316 317 /// The command field of the packet. 318 MemCmd cmd; 319
|
| 320 const PacketId id; 321
|
319 /// A pointer to the original request. 320 const RequestPtr req; 321 322 private: 323 /** 324 * A pointer to the data being transfered. It can be differnt 325 * sizes at each level of the heirarchy so it belongs in the 326 * packet, not request. This may or may not be populated when a 327 * responder recieves the packet. If not populated it memory should 328 * be allocated. 329 */ 330 PacketDataPtr data; 331 332 /// The address of the request. This address could be virtual or 333 /// physical, depending on the system configuration. 334 Addr addr; 335 336 /// True if the request targets the secure memory space. 337 bool _isSecure; 338 339 /// The size of the request or transfer. 340 unsigned size; 341 342 /** 343 * Track the bytes found that satisfy a functional read. 344 */ 345 std::vector<bool> bytesValid; 346 347 public: 348 349 /** 350 * The extra delay from seeing the packet until the header is 351 * transmitted. This delay is used to communicate the crossbar 352 * forwarding latency to the neighbouring object (e.g. a cache) 353 * that actually makes the packet wait. As the delay is relative, 354 * a 32-bit unsigned should be sufficient. 355 */ 356 uint32_t headerDelay; 357 358 /** 359 * Keep track of the extra delay incurred by snooping upwards 360 * before sending a request down the memory system. This is used 361 * by the coherent crossbar to account for the additional request 362 * delay. 363 */ 364 uint32_t snoopDelay; 365 366 /** 367 * The extra pipelining delay from seeing the packet until the end of 368 * payload is transmitted by the component that provided it (if 369 * any). This includes the header delay. Similar to the header 370 * delay, this is used to make up for the fact that the 371 * crossbar does not make the packet wait. As the delay is 372 * relative, a 32-bit unsigned should be sufficient. 373 */ 374 uint32_t payloadDelay; 375 376 /** 377 * A virtual base opaque structure used to hold state associated 378 * with the packet (e.g., an MSHR), specific to a MemObject that 379 * sees the packet. A pointer to this state is returned in the 380 * packet's response so that the MemObject in question can quickly 381 * look up the state needed to process it. A specific subclass 382 * would be derived from this to carry state specific to a 383 * particular sending device. 384 * 385 * As multiple MemObjects may add their SenderState throughout the 386 * memory system, the SenderStates create a stack, where a 387 * MemObject can add a new Senderstate, as long as the 388 * predecessing SenderState is restored when the response comes 389 * back. For this reason, the predecessor should always be 390 * populated with the current SenderState of a packet before 391 * modifying the senderState field in the request packet. 392 */ 393 struct SenderState 394 { 395 SenderState* predecessor; 396 SenderState() : predecessor(NULL) {} 397 virtual ~SenderState() {} 398 }; 399 400 /** 401 * Object used to maintain state of a PrintReq. The senderState 402 * field of a PrintReq should always be of this type. 403 */ 404 class PrintReqState : public SenderState 405 { 406 private: 407 /** 408 * An entry in the label stack. 409 */ 410 struct LabelStackEntry 411 { 412 const std::string label; 413 std::string *prefix; 414 bool labelPrinted; 415 LabelStackEntry(const std::string &_label, std::string *_prefix); 416 }; 417 418 typedef std::list<LabelStackEntry> LabelStack; 419 LabelStack labelStack; 420 421 std::string *curPrefixPtr; 422 423 public: 424 std::ostream &os; 425 const int verbosity; 426 427 PrintReqState(std::ostream &os, int verbosity = 0); 428 ~PrintReqState(); 429 430 /** 431 * Returns the current line prefix. 432 */ 433 const std::string &curPrefix() { return *curPrefixPtr; } 434 435 /** 436 * Push a label onto the label stack, and prepend the given 437 * prefix string onto the current prefix. Labels will only be 438 * printed if an object within the label's scope is printed. 439 */ 440 void pushLabel(const std::string &lbl, 441 const std::string &prefix = " "); 442 443 /** 444 * Pop a label off the label stack. 445 */ 446 void popLabel(); 447 448 /** 449 * Print all of the pending unprinted labels on the 450 * stack. Called by printObj(), so normally not called by 451 * users unless bypassing printObj(). 452 */ 453 void printLabels(); 454 455 /** 456 * Print a Printable object to os, because it matched the 457 * address on a PrintReq. 458 */ 459 void printObj(Printable *obj); 460 }; 461 462 /** 463 * This packet's sender state. Devices should use dynamic_cast<> 464 * to cast to the state appropriate to the sender. The intent of 465 * this variable is to allow a device to attach extra information 466 * to a request. A response packet must return the sender state 467 * that was attached to the original request (even if a new packet 468 * is created). 469 */ 470 SenderState *senderState; 471 472 /** 473 * Push a new sender state to the packet and make the current 474 * sender state the predecessor of the new one. This should be 475 * prefered over direct manipulation of the senderState member 476 * variable. 477 * 478 * @param sender_state SenderState to push at the top of the stack 479 */ 480 void pushSenderState(SenderState *sender_state); 481 482 /** 483 * Pop the top of the state stack and return a pointer to it. This 484 * assumes the current sender state is not NULL. This should be 485 * preferred over direct manipulation of the senderState member 486 * variable. 487 * 488 * @return The current top of the stack 489 */ 490 SenderState *popSenderState(); 491 492 /** 493 * Go through the sender state stack and return the first instance 494 * that is of type T (as determined by a dynamic_cast). If there 495 * is no sender state of type T, NULL is returned. 496 * 497 * @return The topmost state of type T 498 */ 499 template <typename T> 500 T * findNextSenderState() const 501 { 502 T *t = NULL; 503 SenderState* sender_state = senderState; 504 while (t == NULL && sender_state != NULL) { 505 t = dynamic_cast<T*>(sender_state); 506 sender_state = sender_state->predecessor; 507 } 508 return t; 509 } 510 511 /// Return the string name of the cmd field (for debugging and 512 /// tracing). 513 const std::string &cmdString() const { return cmd.toString(); } 514 515 /// Return the index of this command. 516 inline int cmdToIndex() const { return cmd.toInt(); } 517 518 bool isRead() const { return cmd.isRead(); } 519 bool isWrite() const { return cmd.isWrite(); } 520 bool isUpgrade() const { return cmd.isUpgrade(); } 521 bool isRequest() const { return cmd.isRequest(); } 522 bool isResponse() const { return cmd.isResponse(); } 523 bool needsWritable() const 524 { 525 // we should never check if a response needsWritable, the 526 // request has this flag, and for a response we should rather 527 // look at the hasSharers flag (if not set, the response is to 528 // be considered writable) 529 assert(isRequest()); 530 return cmd.needsWritable(); 531 } 532 bool needsResponse() const { return cmd.needsResponse(); } 533 bool isInvalidate() const { return cmd.isInvalidate(); } 534 bool isEviction() const { return cmd.isEviction(); } 535 bool isClean() const { return cmd.isClean(); } 536 bool fromCache() const { return cmd.fromCache(); } 537 bool isWriteback() const { return cmd.isWriteback(); } 538 bool hasData() const { return cmd.hasData(); } 539 bool hasRespData() const 540 { 541 MemCmd resp_cmd = cmd.responseCommand(); 542 return resp_cmd.hasData(); 543 } 544 bool isLLSC() const { return cmd.isLLSC(); } 545 bool isError() const { return cmd.isError(); } 546 bool isPrint() const { return cmd.isPrint(); } 547 bool isFlush() const { return cmd.isFlush(); } 548 549 //@{ 550 /// Snoop flags 551 /** 552 * Set the cacheResponding flag. This is used by the caches to 553 * signal another cache that they are responding to a request. A 554 * cache will only respond to snoops if it has the line in either 555 * Modified or Owned state. Note that on snoop hits we always pass 556 * the line as Modified and never Owned. In the case of an Owned 557 * line we proceed to invalidate all other copies. 558 * 559 * On a cache fill (see Cache::handleFill), we check hasSharers 560 * first, ignoring the cacheResponding flag if hasSharers is set. 561 * A line is consequently allocated as: 562 * 563 * hasSharers cacheResponding state 564 * true false Shared 565 * true true Shared 566 * false false Exclusive 567 * false true Modified 568 */ 569 void setCacheResponding() 570 { 571 assert(isRequest()); 572 assert(!flags.isSet(CACHE_RESPONDING)); 573 flags.set(CACHE_RESPONDING); 574 } 575 bool cacheResponding() const { return flags.isSet(CACHE_RESPONDING); } 576 /** 577 * On fills, the hasSharers flag is used by the caches in 578 * combination with the cacheResponding flag, as clarified 579 * above. If the hasSharers flag is not set, the packet is passing 580 * writable. Thus, a response from a memory passes the line as 581 * writable by default. 582 * 583 * The hasSharers flag is also used by upstream caches to inform a 584 * downstream cache that they have the block (by calling 585 * setHasSharers on snoop request packets that hit in upstream 586 * cachs tags or MSHRs). If the snoop packet has sharers, a 587 * downstream cache is prevented from passing a dirty line upwards 588 * if it was not explicitly asked for a writable copy. See 589 * Cache::satisfyCpuSideRequest. 590 * 591 * The hasSharers flag is also used on writebacks, in 592 * combination with the WritbackClean or WritebackDirty commands, 593 * to allocate the block downstream either as: 594 * 595 * command hasSharers state 596 * WritebackDirty false Modified 597 * WritebackDirty true Owned 598 * WritebackClean false Exclusive 599 * WritebackClean true Shared 600 */ 601 void setHasSharers() { flags.set(HAS_SHARERS); } 602 bool hasSharers() const { return flags.isSet(HAS_SHARERS); } 603 //@} 604 605 /** 606 * The express snoop flag is used for two purposes. Firstly, it is 607 * used to bypass flow control for normal (non-snoop) requests 608 * going downstream in the memory system. In cases where a cache 609 * is responding to a snoop from another cache (it had a dirty 610 * line), but the line is not writable (and there are possibly 611 * other copies), the express snoop flag is set by the downstream 612 * cache to invalidate all other copies in zero time. Secondly, 613 * the express snoop flag is also set to be able to distinguish 614 * snoop packets that came from a downstream cache, rather than 615 * snoop packets from neighbouring caches. 616 */ 617 void setExpressSnoop() { flags.set(EXPRESS_SNOOP); } 618 bool isExpressSnoop() const { return flags.isSet(EXPRESS_SNOOP); } 619 620 /** 621 * On responding to a snoop request (which only happens for 622 * Modified or Owned lines), make sure that we can transform an 623 * Owned response to a Modified one. If this flag is not set, the 624 * responding cache had the line in the Owned state, and there are 625 * possibly other Shared copies in the memory system. A downstream 626 * cache helps in orchestrating the invalidation of these copies 627 * by sending out the appropriate express snoops. 628 */ 629 void setResponderHadWritable() 630 { 631 assert(cacheResponding()); 632 assert(!responderHadWritable()); 633 flags.set(RESPONDER_HAD_WRITABLE); 634 } 635 bool responderHadWritable() const 636 { return flags.isSet(RESPONDER_HAD_WRITABLE); } 637 638 /** 639 * A writeback/writeclean cmd gets propagated further downstream 640 * by the receiver when the flag is set. 641 */ 642 void setWriteThrough() 643 { 644 assert(cmd.isWrite() && 645 (cmd.isEviction() || cmd == MemCmd::WriteClean)); 646 flags.set(WRITE_THROUGH); 647 } 648 void clearWriteThrough() { flags.clear(WRITE_THROUGH); } 649 bool writeThrough() const { return flags.isSet(WRITE_THROUGH); } 650 651 /** 652 * Set when a request hits in a cache and the cache is not going 653 * to respond. This is used by the crossbar to coordinate 654 * responses for cache maintenance operations. 655 */ 656 void setSatisfied() 657 { 658 assert(cmd.isClean()); 659 assert(!flags.isSet(SATISFIED)); 660 flags.set(SATISFIED); 661 } 662 bool satisfied() const { return flags.isSet(SATISFIED); } 663 664 void setSuppressFuncError() { flags.set(SUPPRESS_FUNC_ERROR); } 665 bool suppressFuncError() const { return flags.isSet(SUPPRESS_FUNC_ERROR); } 666 void setBlockCached() { flags.set(BLOCK_CACHED); } 667 bool isBlockCached() const { return flags.isSet(BLOCK_CACHED); } 668 void clearBlockCached() { flags.clear(BLOCK_CACHED); } 669 670 // Network error conditions... encapsulate them as methods since 671 // their encoding keeps changing (from result field to command 672 // field, etc.) 673 void 674 setBadAddress() 675 { 676 assert(isResponse()); 677 cmd = MemCmd::BadAddressError; 678 } 679 680 void copyError(Packet *pkt) { assert(pkt->isError()); cmd = pkt->cmd; } 681 682 Addr getAddr() const { assert(flags.isSet(VALID_ADDR)); return addr; } 683 /** 684 * Update the address of this packet mid-transaction. This is used 685 * by the address mapper to change an already set address to a new 686 * one based on the system configuration. It is intended to remap 687 * an existing address, so it asserts that the current address is 688 * valid. 689 */ 690 void setAddr(Addr _addr) { assert(flags.isSet(VALID_ADDR)); addr = _addr; } 691 692 unsigned getSize() const { assert(flags.isSet(VALID_SIZE)); return size; } 693 694 Addr getOffset(unsigned int blk_size) const 695 { 696 return getAddr() & Addr(blk_size - 1); 697 } 698 699 Addr getBlockAddr(unsigned int blk_size) const 700 { 701 return getAddr() & ~(Addr(blk_size - 1)); 702 } 703 704 bool isSecure() const 705 { 706 assert(flags.isSet(VALID_ADDR)); 707 return _isSecure; 708 } 709 710 /** 711 * Accessor function to atomic op. 712 */ 713 AtomicOpFunctor *getAtomicOp() const { return req->getAtomicOpFunctor(); } 714 bool isAtomicOp() const { return req->isAtomic(); } 715 716 /** 717 * It has been determined that the SC packet should successfully update 718 * memory. Therefore, convert this SC packet to a normal write. 719 */ 720 void 721 convertScToWrite() 722 { 723 assert(isLLSC()); 724 assert(isWrite()); 725 cmd = MemCmd::WriteReq; 726 } 727 728 /** 729 * When ruby is in use, Ruby will monitor the cache line and the 730 * phys memory should treat LL ops as normal reads. 731 */ 732 void 733 convertLlToRead() 734 { 735 assert(isLLSC()); 736 assert(isRead()); 737 cmd = MemCmd::ReadReq; 738 } 739 740 /** 741 * Constructor. Note that a Request object must be constructed 742 * first, but the Requests's physical address and size fields need 743 * not be valid. The command must be supplied. 744 */ 745 Packet(const RequestPtr _req, MemCmd _cmd)
| 322 /// A pointer to the original request. 323 const RequestPtr req; 324 325 private: 326 /** 327 * A pointer to the data being transfered. It can be differnt 328 * sizes at each level of the heirarchy so it belongs in the 329 * packet, not request. This may or may not be populated when a 330 * responder recieves the packet. If not populated it memory should 331 * be allocated. 332 */ 333 PacketDataPtr data; 334 335 /// The address of the request. This address could be virtual or 336 /// physical, depending on the system configuration. 337 Addr addr; 338 339 /// True if the request targets the secure memory space. 340 bool _isSecure; 341 342 /// The size of the request or transfer. 343 unsigned size; 344 345 /** 346 * Track the bytes found that satisfy a functional read. 347 */ 348 std::vector<bool> bytesValid; 349 350 public: 351 352 /** 353 * The extra delay from seeing the packet until the header is 354 * transmitted. This delay is used to communicate the crossbar 355 * forwarding latency to the neighbouring object (e.g. a cache) 356 * that actually makes the packet wait. As the delay is relative, 357 * a 32-bit unsigned should be sufficient. 358 */ 359 uint32_t headerDelay; 360 361 /** 362 * Keep track of the extra delay incurred by snooping upwards 363 * before sending a request down the memory system. This is used 364 * by the coherent crossbar to account for the additional request 365 * delay. 366 */ 367 uint32_t snoopDelay; 368 369 /** 370 * The extra pipelining delay from seeing the packet until the end of 371 * payload is transmitted by the component that provided it (if 372 * any). This includes the header delay. Similar to the header 373 * delay, this is used to make up for the fact that the 374 * crossbar does not make the packet wait. As the delay is 375 * relative, a 32-bit unsigned should be sufficient. 376 */ 377 uint32_t payloadDelay; 378 379 /** 380 * A virtual base opaque structure used to hold state associated 381 * with the packet (e.g., an MSHR), specific to a MemObject that 382 * sees the packet. A pointer to this state is returned in the 383 * packet's response so that the MemObject in question can quickly 384 * look up the state needed to process it. A specific subclass 385 * would be derived from this to carry state specific to a 386 * particular sending device. 387 * 388 * As multiple MemObjects may add their SenderState throughout the 389 * memory system, the SenderStates create a stack, where a 390 * MemObject can add a new Senderstate, as long as the 391 * predecessing SenderState is restored when the response comes 392 * back. For this reason, the predecessor should always be 393 * populated with the current SenderState of a packet before 394 * modifying the senderState field in the request packet. 395 */ 396 struct SenderState 397 { 398 SenderState* predecessor; 399 SenderState() : predecessor(NULL) {} 400 virtual ~SenderState() {} 401 }; 402 403 /** 404 * Object used to maintain state of a PrintReq. The senderState 405 * field of a PrintReq should always be of this type. 406 */ 407 class PrintReqState : public SenderState 408 { 409 private: 410 /** 411 * An entry in the label stack. 412 */ 413 struct LabelStackEntry 414 { 415 const std::string label; 416 std::string *prefix; 417 bool labelPrinted; 418 LabelStackEntry(const std::string &_label, std::string *_prefix); 419 }; 420 421 typedef std::list<LabelStackEntry> LabelStack; 422 LabelStack labelStack; 423 424 std::string *curPrefixPtr; 425 426 public: 427 std::ostream &os; 428 const int verbosity; 429 430 PrintReqState(std::ostream &os, int verbosity = 0); 431 ~PrintReqState(); 432 433 /** 434 * Returns the current line prefix. 435 */ 436 const std::string &curPrefix() { return *curPrefixPtr; } 437 438 /** 439 * Push a label onto the label stack, and prepend the given 440 * prefix string onto the current prefix. Labels will only be 441 * printed if an object within the label's scope is printed. 442 */ 443 void pushLabel(const std::string &lbl, 444 const std::string &prefix = " "); 445 446 /** 447 * Pop a label off the label stack. 448 */ 449 void popLabel(); 450 451 /** 452 * Print all of the pending unprinted labels on the 453 * stack. Called by printObj(), so normally not called by 454 * users unless bypassing printObj(). 455 */ 456 void printLabels(); 457 458 /** 459 * Print a Printable object to os, because it matched the 460 * address on a PrintReq. 461 */ 462 void printObj(Printable *obj); 463 }; 464 465 /** 466 * This packet's sender state. Devices should use dynamic_cast<> 467 * to cast to the state appropriate to the sender. The intent of 468 * this variable is to allow a device to attach extra information 469 * to a request. A response packet must return the sender state 470 * that was attached to the original request (even if a new packet 471 * is created). 472 */ 473 SenderState *senderState; 474 475 /** 476 * Push a new sender state to the packet and make the current 477 * sender state the predecessor of the new one. This should be 478 * prefered over direct manipulation of the senderState member 479 * variable. 480 * 481 * @param sender_state SenderState to push at the top of the stack 482 */ 483 void pushSenderState(SenderState *sender_state); 484 485 /** 486 * Pop the top of the state stack and return a pointer to it. This 487 * assumes the current sender state is not NULL. This should be 488 * preferred over direct manipulation of the senderState member 489 * variable. 490 * 491 * @return The current top of the stack 492 */ 493 SenderState *popSenderState(); 494 495 /** 496 * Go through the sender state stack and return the first instance 497 * that is of type T (as determined by a dynamic_cast). If there 498 * is no sender state of type T, NULL is returned. 499 * 500 * @return The topmost state of type T 501 */ 502 template <typename T> 503 T * findNextSenderState() const 504 { 505 T *t = NULL; 506 SenderState* sender_state = senderState; 507 while (t == NULL && sender_state != NULL) { 508 t = dynamic_cast<T*>(sender_state); 509 sender_state = sender_state->predecessor; 510 } 511 return t; 512 } 513 514 /// Return the string name of the cmd field (for debugging and 515 /// tracing). 516 const std::string &cmdString() const { return cmd.toString(); } 517 518 /// Return the index of this command. 519 inline int cmdToIndex() const { return cmd.toInt(); } 520 521 bool isRead() const { return cmd.isRead(); } 522 bool isWrite() const { return cmd.isWrite(); } 523 bool isUpgrade() const { return cmd.isUpgrade(); } 524 bool isRequest() const { return cmd.isRequest(); } 525 bool isResponse() const { return cmd.isResponse(); } 526 bool needsWritable() const 527 { 528 // we should never check if a response needsWritable, the 529 // request has this flag, and for a response we should rather 530 // look at the hasSharers flag (if not set, the response is to 531 // be considered writable) 532 assert(isRequest()); 533 return cmd.needsWritable(); 534 } 535 bool needsResponse() const { return cmd.needsResponse(); } 536 bool isInvalidate() const { return cmd.isInvalidate(); } 537 bool isEviction() const { return cmd.isEviction(); } 538 bool isClean() const { return cmd.isClean(); } 539 bool fromCache() const { return cmd.fromCache(); } 540 bool isWriteback() const { return cmd.isWriteback(); } 541 bool hasData() const { return cmd.hasData(); } 542 bool hasRespData() const 543 { 544 MemCmd resp_cmd = cmd.responseCommand(); 545 return resp_cmd.hasData(); 546 } 547 bool isLLSC() const { return cmd.isLLSC(); } 548 bool isError() const { return cmd.isError(); } 549 bool isPrint() const { return cmd.isPrint(); } 550 bool isFlush() const { return cmd.isFlush(); } 551 552 //@{ 553 /// Snoop flags 554 /** 555 * Set the cacheResponding flag. This is used by the caches to 556 * signal another cache that they are responding to a request. A 557 * cache will only respond to snoops if it has the line in either 558 * Modified or Owned state. Note that on snoop hits we always pass 559 * the line as Modified and never Owned. In the case of an Owned 560 * line we proceed to invalidate all other copies. 561 * 562 * On a cache fill (see Cache::handleFill), we check hasSharers 563 * first, ignoring the cacheResponding flag if hasSharers is set. 564 * A line is consequently allocated as: 565 * 566 * hasSharers cacheResponding state 567 * true false Shared 568 * true true Shared 569 * false false Exclusive 570 * false true Modified 571 */ 572 void setCacheResponding() 573 { 574 assert(isRequest()); 575 assert(!flags.isSet(CACHE_RESPONDING)); 576 flags.set(CACHE_RESPONDING); 577 } 578 bool cacheResponding() const { return flags.isSet(CACHE_RESPONDING); } 579 /** 580 * On fills, the hasSharers flag is used by the caches in 581 * combination with the cacheResponding flag, as clarified 582 * above. If the hasSharers flag is not set, the packet is passing 583 * writable. Thus, a response from a memory passes the line as 584 * writable by default. 585 * 586 * The hasSharers flag is also used by upstream caches to inform a 587 * downstream cache that they have the block (by calling 588 * setHasSharers on snoop request packets that hit in upstream 589 * cachs tags or MSHRs). If the snoop packet has sharers, a 590 * downstream cache is prevented from passing a dirty line upwards 591 * if it was not explicitly asked for a writable copy. See 592 * Cache::satisfyCpuSideRequest. 593 * 594 * The hasSharers flag is also used on writebacks, in 595 * combination with the WritbackClean or WritebackDirty commands, 596 * to allocate the block downstream either as: 597 * 598 * command hasSharers state 599 * WritebackDirty false Modified 600 * WritebackDirty true Owned 601 * WritebackClean false Exclusive 602 * WritebackClean true Shared 603 */ 604 void setHasSharers() { flags.set(HAS_SHARERS); } 605 bool hasSharers() const { return flags.isSet(HAS_SHARERS); } 606 //@} 607 608 /** 609 * The express snoop flag is used for two purposes. Firstly, it is 610 * used to bypass flow control for normal (non-snoop) requests 611 * going downstream in the memory system. In cases where a cache 612 * is responding to a snoop from another cache (it had a dirty 613 * line), but the line is not writable (and there are possibly 614 * other copies), the express snoop flag is set by the downstream 615 * cache to invalidate all other copies in zero time. Secondly, 616 * the express snoop flag is also set to be able to distinguish 617 * snoop packets that came from a downstream cache, rather than 618 * snoop packets from neighbouring caches. 619 */ 620 void setExpressSnoop() { flags.set(EXPRESS_SNOOP); } 621 bool isExpressSnoop() const { return flags.isSet(EXPRESS_SNOOP); } 622 623 /** 624 * On responding to a snoop request (which only happens for 625 * Modified or Owned lines), make sure that we can transform an 626 * Owned response to a Modified one. If this flag is not set, the 627 * responding cache had the line in the Owned state, and there are 628 * possibly other Shared copies in the memory system. A downstream 629 * cache helps in orchestrating the invalidation of these copies 630 * by sending out the appropriate express snoops. 631 */ 632 void setResponderHadWritable() 633 { 634 assert(cacheResponding()); 635 assert(!responderHadWritable()); 636 flags.set(RESPONDER_HAD_WRITABLE); 637 } 638 bool responderHadWritable() const 639 { return flags.isSet(RESPONDER_HAD_WRITABLE); } 640 641 /** 642 * A writeback/writeclean cmd gets propagated further downstream 643 * by the receiver when the flag is set. 644 */ 645 void setWriteThrough() 646 { 647 assert(cmd.isWrite() && 648 (cmd.isEviction() || cmd == MemCmd::WriteClean)); 649 flags.set(WRITE_THROUGH); 650 } 651 void clearWriteThrough() { flags.clear(WRITE_THROUGH); } 652 bool writeThrough() const { return flags.isSet(WRITE_THROUGH); } 653 654 /** 655 * Set when a request hits in a cache and the cache is not going 656 * to respond. This is used by the crossbar to coordinate 657 * responses for cache maintenance operations. 658 */ 659 void setSatisfied() 660 { 661 assert(cmd.isClean()); 662 assert(!flags.isSet(SATISFIED)); 663 flags.set(SATISFIED); 664 } 665 bool satisfied() const { return flags.isSet(SATISFIED); } 666 667 void setSuppressFuncError() { flags.set(SUPPRESS_FUNC_ERROR); } 668 bool suppressFuncError() const { return flags.isSet(SUPPRESS_FUNC_ERROR); } 669 void setBlockCached() { flags.set(BLOCK_CACHED); } 670 bool isBlockCached() const { return flags.isSet(BLOCK_CACHED); } 671 void clearBlockCached() { flags.clear(BLOCK_CACHED); } 672 673 // Network error conditions... encapsulate them as methods since 674 // their encoding keeps changing (from result field to command 675 // field, etc.) 676 void 677 setBadAddress() 678 { 679 assert(isResponse()); 680 cmd = MemCmd::BadAddressError; 681 } 682 683 void copyError(Packet *pkt) { assert(pkt->isError()); cmd = pkt->cmd; } 684 685 Addr getAddr() const { assert(flags.isSet(VALID_ADDR)); return addr; } 686 /** 687 * Update the address of this packet mid-transaction. This is used 688 * by the address mapper to change an already set address to a new 689 * one based on the system configuration. It is intended to remap 690 * an existing address, so it asserts that the current address is 691 * valid. 692 */ 693 void setAddr(Addr _addr) { assert(flags.isSet(VALID_ADDR)); addr = _addr; } 694 695 unsigned getSize() const { assert(flags.isSet(VALID_SIZE)); return size; } 696 697 Addr getOffset(unsigned int blk_size) const 698 { 699 return getAddr() & Addr(blk_size - 1); 700 } 701 702 Addr getBlockAddr(unsigned int blk_size) const 703 { 704 return getAddr() & ~(Addr(blk_size - 1)); 705 } 706 707 bool isSecure() const 708 { 709 assert(flags.isSet(VALID_ADDR)); 710 return _isSecure; 711 } 712 713 /** 714 * Accessor function to atomic op. 715 */ 716 AtomicOpFunctor *getAtomicOp() const { return req->getAtomicOpFunctor(); } 717 bool isAtomicOp() const { return req->isAtomic(); } 718 719 /** 720 * It has been determined that the SC packet should successfully update 721 * memory. Therefore, convert this SC packet to a normal write. 722 */ 723 void 724 convertScToWrite() 725 { 726 assert(isLLSC()); 727 assert(isWrite()); 728 cmd = MemCmd::WriteReq; 729 } 730 731 /** 732 * When ruby is in use, Ruby will monitor the cache line and the 733 * phys memory should treat LL ops as normal reads. 734 */ 735 void 736 convertLlToRead() 737 { 738 assert(isLLSC()); 739 assert(isRead()); 740 cmd = MemCmd::ReadReq; 741 } 742 743 /** 744 * Constructor. Note that a Request object must be constructed 745 * first, but the Requests's physical address and size fields need 746 * not be valid. The command must be supplied. 747 */ 748 Packet(const RequestPtr _req, MemCmd _cmd)
|
746 : cmd(_cmd), req(_req), data(nullptr), addr(0), _isSecure(false), 747 size(0), headerDelay(0), snoopDelay(0), payloadDelay(0), 748 senderState(NULL)
| 749 : cmd(_cmd), id((PacketId)_req), req(_req), data(nullptr), addr(0), 750 _isSecure(false), size(0), headerDelay(0), snoopDelay(0), 751 payloadDelay(0), senderState(NULL)
|
749 { 750 if (req->hasPaddr()) { 751 addr = req->getPaddr(); 752 flags.set(VALID_ADDR); 753 _isSecure = req->isSecure(); 754 } 755 if (req->hasSize()) { 756 size = req->getSize(); 757 flags.set(VALID_SIZE); 758 } 759 } 760 761 /** 762 * Alternate constructor if you are trying to create a packet with 763 * a request that is for a whole block, not the address from the 764 * req. this allows for overriding the size/addr of the req. 765 */
| 752 { 753 if (req->hasPaddr()) { 754 addr = req->getPaddr(); 755 flags.set(VALID_ADDR); 756 _isSecure = req->isSecure(); 757 } 758 if (req->hasSize()) { 759 size = req->getSize(); 760 flags.set(VALID_SIZE); 761 } 762 } 763 764 /** 765 * Alternate constructor if you are trying to create a packet with 766 * a request that is for a whole block, not the address from the 767 * req. this allows for overriding the size/addr of the req. 768 */
|
766 Packet(const RequestPtr _req, MemCmd _cmd, int _blkSize) 767 : cmd(_cmd), req(_req), data(nullptr), addr(0), _isSecure(false), 768 headerDelay(0), snoopDelay(0), payloadDelay(0), 769 senderState(NULL)
| 769 Packet(const RequestPtr _req, MemCmd _cmd, int _blkSize, PacketId _id = 0) 770 : cmd(_cmd), id(_id ? _id : (PacketId)_req), req(_req), data(nullptr), 771 addr(0), _isSecure(false), headerDelay(0), snoopDelay(0), 772 payloadDelay(0), senderState(NULL)
|
770 { 771 if (req->hasPaddr()) { 772 addr = req->getPaddr() & ~(_blkSize - 1); 773 flags.set(VALID_ADDR); 774 _isSecure = req->isSecure(); 775 } 776 size = _blkSize; 777 flags.set(VALID_SIZE); 778 } 779 780 /** 781 * Alternate constructor for copying a packet. Copy all fields 782 * *except* if the original packet's data was dynamic, don't copy 783 * that, as we can't guarantee that the new packet's lifetime is 784 * less than that of the original packet. In this case the new 785 * packet should allocate its own data. 786 */ 787 Packet(const PacketPtr pkt, bool clear_flags, bool alloc_data)
| 773 { 774 if (req->hasPaddr()) { 775 addr = req->getPaddr() & ~(_blkSize - 1); 776 flags.set(VALID_ADDR); 777 _isSecure = req->isSecure(); 778 } 779 size = _blkSize; 780 flags.set(VALID_SIZE); 781 } 782 783 /** 784 * Alternate constructor for copying a packet. Copy all fields 785 * *except* if the original packet's data was dynamic, don't copy 786 * that, as we can't guarantee that the new packet's lifetime is 787 * less than that of the original packet. In this case the new 788 * packet should allocate its own data. 789 */ 790 Packet(const PacketPtr pkt, bool clear_flags, bool alloc_data)
|
788 : cmd(pkt->cmd), req(pkt->req),
| 791 : cmd(pkt->cmd), id(pkt->id), req(pkt->req),
|
789 data(nullptr), 790 addr(pkt->addr), _isSecure(pkt->_isSecure), size(pkt->size), 791 bytesValid(pkt->bytesValid), 792 headerDelay(pkt->headerDelay), 793 snoopDelay(0), 794 payloadDelay(pkt->payloadDelay), 795 senderState(pkt->senderState) 796 { 797 if (!clear_flags) 798 flags.set(pkt->flags & COPY_FLAGS); 799 800 flags.set(pkt->flags & (VALID_ADDR|VALID_SIZE)); 801 802 // should we allocate space for data, or not, the express 803 // snoops do not need to carry any data as they only serve to 804 // co-ordinate state changes 805 if (alloc_data) { 806 // even if asked to allocate data, if the original packet 807 // holds static data, then the sender will not be doing 808 // any memcpy on receiving the response, thus we simply 809 // carry the pointer forward 810 if (pkt->flags.isSet(STATIC_DATA)) { 811 data = pkt->data; 812 flags.set(STATIC_DATA); 813 } else { 814 allocate(); 815 } 816 } 817 } 818 819 /** 820 * Generate the appropriate read MemCmd based on the Request flags. 821 */ 822 static MemCmd 823 makeReadCmd(const RequestPtr req) 824 { 825 if (req->isLLSC()) 826 return MemCmd::LoadLockedReq; 827 else if (req->isPrefetch()) 828 return MemCmd::SoftPFReq; 829 else 830 return MemCmd::ReadReq; 831 } 832 833 /** 834 * Generate the appropriate write MemCmd based on the Request flags. 835 */ 836 static MemCmd 837 makeWriteCmd(const RequestPtr req) 838 { 839 if (req->isLLSC()) 840 return MemCmd::StoreCondReq; 841 else if (req->isSwap()) 842 return MemCmd::SwapReq; 843 else if (req->isCacheInvalidate()) { 844 return req->isCacheClean() ? MemCmd::CleanInvalidReq : 845 MemCmd::InvalidateReq; 846 } else if (req->isCacheClean()) { 847 return MemCmd::CleanSharedReq; 848 } else 849 return MemCmd::WriteReq; 850 } 851 852 /** 853 * Constructor-like methods that return Packets based on Request objects. 854 * Fine-tune the MemCmd type if it's not a vanilla read or write. 855 */ 856 static PacketPtr 857 createRead(const RequestPtr req) 858 { 859 return new Packet(req, makeReadCmd(req)); 860 } 861 862 static PacketPtr 863 createWrite(const RequestPtr req) 864 { 865 return new Packet(req, makeWriteCmd(req)); 866 } 867 868 /** 869 * clean up packet variables 870 */ 871 ~Packet() 872 { 873 // Delete the request object if this is a request packet which 874 // does not need a response, because the requester will not get 875 // a chance. If the request packet needs a response then the 876 // request will be deleted on receipt of the response 877 // packet. We also make sure to never delete the request for 878 // express snoops, even for cases when responses are not 879 // needed (CleanEvict and Writeback), since the snoop packet 880 // re-uses the same request. 881 if (req && isRequest() && !needsResponse() && 882 !isExpressSnoop()) { 883 delete req; 884 } 885 deleteData(); 886 } 887 888 /** 889 * Take a request packet and modify it in place to be suitable for 890 * returning as a response to that request. 891 */ 892 void 893 makeResponse() 894 { 895 assert(needsResponse()); 896 assert(isRequest()); 897 cmd = cmd.responseCommand(); 898 899 // responses are never express, even if the snoop that 900 // triggered them was 901 flags.clear(EXPRESS_SNOOP); 902 } 903 904 void 905 makeAtomicResponse() 906 { 907 makeResponse(); 908 } 909 910 void 911 makeTimingResponse() 912 { 913 makeResponse(); 914 } 915 916 void 917 setFunctionalResponseStatus(bool success) 918 { 919 if (!success) { 920 if (isWrite()) { 921 cmd = MemCmd::FunctionalWriteError; 922 } else { 923 cmd = MemCmd::FunctionalReadError; 924 } 925 } 926 } 927 928 void 929 setSize(unsigned size) 930 { 931 assert(!flags.isSet(VALID_SIZE)); 932 933 this->size = size; 934 flags.set(VALID_SIZE); 935 } 936 937 938 public: 939 /** 940 * @{ 941 * @name Data accessor mehtods 942 */ 943 944 /** 945 * Set the data pointer to the following value that should not be 946 * freed. Static data allows us to do a single memcpy even if 947 * multiple packets are required to get from source to destination 948 * and back. In essence the pointer is set calling dataStatic on 949 * the original packet, and whenever this packet is copied and 950 * forwarded the same pointer is passed on. When a packet 951 * eventually reaches the destination holding the data, it is 952 * copied once into the location originally set. On the way back 953 * to the source, no copies are necessary. 954 */ 955 template <typename T> 956 void 957 dataStatic(T *p) 958 { 959 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 960 data = (PacketDataPtr)p; 961 flags.set(STATIC_DATA); 962 } 963 964 /** 965 * Set the data pointer to the following value that should not be 966 * freed. This version of the function allows the pointer passed 967 * to us to be const. To avoid issues down the line we cast the 968 * constness away, the alternative would be to keep both a const 969 * and non-const data pointer and cleverly choose between 970 * them. Note that this is only allowed for static data. 971 */ 972 template <typename T> 973 void 974 dataStaticConst(const T *p) 975 { 976 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 977 data = const_cast<PacketDataPtr>(p); 978 flags.set(STATIC_DATA); 979 } 980 981 /** 982 * Set the data pointer to a value that should have delete [] 983 * called on it. Dynamic data is local to this packet, and as the 984 * packet travels from source to destination, forwarded packets 985 * will allocate their own data. When a packet reaches the final 986 * destination it will populate the dynamic data of that specific 987 * packet, and on the way back towards the source, memcpy will be 988 * invoked in every step where a new packet was created e.g. in 989 * the caches. Ultimately when the response reaches the source a 990 * final memcpy is needed to extract the data from the packet 991 * before it is deallocated. 992 */ 993 template <typename T> 994 void 995 dataDynamic(T *p) 996 { 997 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 998 data = (PacketDataPtr)p; 999 flags.set(DYNAMIC_DATA); 1000 } 1001 1002 /** 1003 * get a pointer to the data ptr. 1004 */ 1005 template <typename T> 1006 T* 1007 getPtr() 1008 { 1009 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA)); 1010 return (T*)data; 1011 } 1012 1013 template <typename T> 1014 const T* 1015 getConstPtr() const 1016 { 1017 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA)); 1018 return (const T*)data; 1019 } 1020 1021 /** 1022 * Get the data in the packet byte swapped from big endian to 1023 * host endian. 1024 */ 1025 template <typename T> 1026 T getBE() const; 1027 1028 /** 1029 * Get the data in the packet byte swapped from little endian to 1030 * host endian. 1031 */ 1032 template <typename T> 1033 T getLE() const; 1034 1035 /** 1036 * Get the data in the packet byte swapped from the specified 1037 * endianness. 1038 */ 1039 template <typename T> 1040 T get(ByteOrder endian) const; 1041 1042 /** 1043 * Get the data in the packet byte swapped from guest to host 1044 * endian. 1045 */ 1046 template <typename T> 1047 T get() const; 1048 1049 /** Set the value in the data pointer to v as big endian. */ 1050 template <typename T> 1051 void setBE(T v); 1052 1053 /** Set the value in the data pointer to v as little endian. */ 1054 template <typename T> 1055 void setLE(T v); 1056 1057 /** 1058 * Set the value in the data pointer to v using the specified 1059 * endianness. 1060 */ 1061 template <typename T> 1062 void set(T v, ByteOrder endian); 1063 1064 /** Set the value in the data pointer to v as guest endian. */ 1065 template <typename T> 1066 void set(T v); 1067 1068 /** 1069 * Copy data into the packet from the provided pointer. 1070 */ 1071 void 1072 setData(const uint8_t *p) 1073 { 1074 // we should never be copying data onto itself, which means we 1075 // must idenfity packets with static data, as they carry the 1076 // same pointer from source to destination and back 1077 assert(p != getPtr<uint8_t>() || flags.isSet(STATIC_DATA)); 1078 1079 if (p != getPtr<uint8_t>()) 1080 // for packet with allocated dynamic data, we copy data from 1081 // one to the other, e.g. a forwarded response to a response 1082 std::memcpy(getPtr<uint8_t>(), p, getSize()); 1083 } 1084 1085 /** 1086 * Copy data into the packet from the provided block pointer, 1087 * which is aligned to the given block size. 1088 */ 1089 void 1090 setDataFromBlock(const uint8_t *blk_data, int blkSize) 1091 { 1092 setData(blk_data + getOffset(blkSize)); 1093 } 1094 1095 /** 1096 * Copy data from the packet to the provided block pointer, which 1097 * is aligned to the given block size. 1098 */ 1099 void 1100 writeData(uint8_t *p) const 1101 { 1102 std::memcpy(p, getConstPtr<uint8_t>(), getSize()); 1103 } 1104 1105 /** 1106 * Copy data from the packet to the memory at the provided pointer. 1107 */ 1108 void 1109 writeDataToBlock(uint8_t *blk_data, int blkSize) const 1110 { 1111 writeData(blk_data + getOffset(blkSize)); 1112 } 1113 1114 /** 1115 * delete the data pointed to in the data pointer. Ok to call to 1116 * matter how data was allocted. 1117 */ 1118 void 1119 deleteData() 1120 { 1121 if (flags.isSet(DYNAMIC_DATA)) 1122 delete [] data; 1123 1124 flags.clear(STATIC_DATA|DYNAMIC_DATA); 1125 data = NULL; 1126 } 1127 1128 /** Allocate memory for the packet. */ 1129 void 1130 allocate() 1131 { 1132 // if either this command or the response command has a data 1133 // payload, actually allocate space 1134 if (hasData() || hasRespData()) { 1135 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 1136 flags.set(DYNAMIC_DATA); 1137 data = new uint8_t[getSize()]; 1138 } 1139 } 1140 1141 /** @} */ 1142 1143 private: // Private data accessor methods 1144 /** Get the data in the packet without byte swapping. */ 1145 template <typename T> 1146 T getRaw() const; 1147 1148 /** Set the value in the data pointer to v without byte swapping. */ 1149 template <typename T> 1150 void setRaw(T v); 1151 1152 public: 1153 /** 1154 * Check a functional request against a memory value stored in 1155 * another packet (i.e. an in-transit request or 1156 * response). Returns true if the current packet is a read, and 1157 * the other packet provides the data, which is then copied to the 1158 * current packet. If the current packet is a write, and the other 1159 * packet intersects this one, then we update the data 1160 * accordingly. 1161 */ 1162 bool 1163 checkFunctional(PacketPtr other) 1164 { 1165 // all packets that are carrying a payload should have a valid 1166 // data pointer 1167 return checkFunctional(other, other->getAddr(), other->isSecure(), 1168 other->getSize(), 1169 other->hasData() ? 1170 other->getPtr<uint8_t>() : NULL); 1171 } 1172 1173 /** 1174 * Does the request need to check for cached copies of the same block 1175 * in the memory hierarchy above. 1176 **/ 1177 bool 1178 mustCheckAbove() const 1179 { 1180 return cmd == MemCmd::HardPFReq || isEviction(); 1181 } 1182 1183 /** 1184 * Is this packet a clean eviction, including both actual clean 1185 * evict packets, but also clean writebacks. 1186 */ 1187 bool 1188 isCleanEviction() const 1189 { 1190 return cmd == MemCmd::CleanEvict || cmd == MemCmd::WritebackClean; 1191 } 1192 1193 /** 1194 * Check a functional request against a memory value represented 1195 * by a base/size pair and an associated data array. If the 1196 * current packet is a read, it may be satisfied by the memory 1197 * value. If the current packet is a write, it may update the 1198 * memory value. 1199 */ 1200 bool 1201 checkFunctional(Printable *obj, Addr base, bool is_secure, int size, 1202 uint8_t *_data); 1203 1204 /** 1205 * Push label for PrintReq (safe to call unconditionally). 1206 */ 1207 void 1208 pushLabel(const std::string &lbl) 1209 { 1210 if (isPrint()) 1211 safe_cast<PrintReqState*>(senderState)->pushLabel(lbl); 1212 } 1213 1214 /** 1215 * Pop label for PrintReq (safe to call unconditionally). 1216 */ 1217 void 1218 popLabel() 1219 { 1220 if (isPrint()) 1221 safe_cast<PrintReqState*>(senderState)->popLabel(); 1222 } 1223 1224 void print(std::ostream &o, int verbosity = 0, 1225 const std::string &prefix = "") const; 1226 1227 /** 1228 * A no-args wrapper of print(std::ostream...) 1229 * meant to be invoked from DPRINTFs 1230 * avoiding string overheads in fast mode 1231 * @return string with the request's type and start<->end addresses 1232 */ 1233 std::string print() const; 1234}; 1235 1236#endif //__MEM_PACKET_HH
| 792 data(nullptr), 793 addr(pkt->addr), _isSecure(pkt->_isSecure), size(pkt->size), 794 bytesValid(pkt->bytesValid), 795 headerDelay(pkt->headerDelay), 796 snoopDelay(0), 797 payloadDelay(pkt->payloadDelay), 798 senderState(pkt->senderState) 799 { 800 if (!clear_flags) 801 flags.set(pkt->flags & COPY_FLAGS); 802 803 flags.set(pkt->flags & (VALID_ADDR|VALID_SIZE)); 804 805 // should we allocate space for data, or not, the express 806 // snoops do not need to carry any data as they only serve to 807 // co-ordinate state changes 808 if (alloc_data) { 809 // even if asked to allocate data, if the original packet 810 // holds static data, then the sender will not be doing 811 // any memcpy on receiving the response, thus we simply 812 // carry the pointer forward 813 if (pkt->flags.isSet(STATIC_DATA)) { 814 data = pkt->data; 815 flags.set(STATIC_DATA); 816 } else { 817 allocate(); 818 } 819 } 820 } 821 822 /** 823 * Generate the appropriate read MemCmd based on the Request flags. 824 */ 825 static MemCmd 826 makeReadCmd(const RequestPtr req) 827 { 828 if (req->isLLSC()) 829 return MemCmd::LoadLockedReq; 830 else if (req->isPrefetch()) 831 return MemCmd::SoftPFReq; 832 else 833 return MemCmd::ReadReq; 834 } 835 836 /** 837 * Generate the appropriate write MemCmd based on the Request flags. 838 */ 839 static MemCmd 840 makeWriteCmd(const RequestPtr req) 841 { 842 if (req->isLLSC()) 843 return MemCmd::StoreCondReq; 844 else if (req->isSwap()) 845 return MemCmd::SwapReq; 846 else if (req->isCacheInvalidate()) { 847 return req->isCacheClean() ? MemCmd::CleanInvalidReq : 848 MemCmd::InvalidateReq; 849 } else if (req->isCacheClean()) { 850 return MemCmd::CleanSharedReq; 851 } else 852 return MemCmd::WriteReq; 853 } 854 855 /** 856 * Constructor-like methods that return Packets based on Request objects. 857 * Fine-tune the MemCmd type if it's not a vanilla read or write. 858 */ 859 static PacketPtr 860 createRead(const RequestPtr req) 861 { 862 return new Packet(req, makeReadCmd(req)); 863 } 864 865 static PacketPtr 866 createWrite(const RequestPtr req) 867 { 868 return new Packet(req, makeWriteCmd(req)); 869 } 870 871 /** 872 * clean up packet variables 873 */ 874 ~Packet() 875 { 876 // Delete the request object if this is a request packet which 877 // does not need a response, because the requester will not get 878 // a chance. If the request packet needs a response then the 879 // request will be deleted on receipt of the response 880 // packet. We also make sure to never delete the request for 881 // express snoops, even for cases when responses are not 882 // needed (CleanEvict and Writeback), since the snoop packet 883 // re-uses the same request. 884 if (req && isRequest() && !needsResponse() && 885 !isExpressSnoop()) { 886 delete req; 887 } 888 deleteData(); 889 } 890 891 /** 892 * Take a request packet and modify it in place to be suitable for 893 * returning as a response to that request. 894 */ 895 void 896 makeResponse() 897 { 898 assert(needsResponse()); 899 assert(isRequest()); 900 cmd = cmd.responseCommand(); 901 902 // responses are never express, even if the snoop that 903 // triggered them was 904 flags.clear(EXPRESS_SNOOP); 905 } 906 907 void 908 makeAtomicResponse() 909 { 910 makeResponse(); 911 } 912 913 void 914 makeTimingResponse() 915 { 916 makeResponse(); 917 } 918 919 void 920 setFunctionalResponseStatus(bool success) 921 { 922 if (!success) { 923 if (isWrite()) { 924 cmd = MemCmd::FunctionalWriteError; 925 } else { 926 cmd = MemCmd::FunctionalReadError; 927 } 928 } 929 } 930 931 void 932 setSize(unsigned size) 933 { 934 assert(!flags.isSet(VALID_SIZE)); 935 936 this->size = size; 937 flags.set(VALID_SIZE); 938 } 939 940 941 public: 942 /** 943 * @{ 944 * @name Data accessor mehtods 945 */ 946 947 /** 948 * Set the data pointer to the following value that should not be 949 * freed. Static data allows us to do a single memcpy even if 950 * multiple packets are required to get from source to destination 951 * and back. In essence the pointer is set calling dataStatic on 952 * the original packet, and whenever this packet is copied and 953 * forwarded the same pointer is passed on. When a packet 954 * eventually reaches the destination holding the data, it is 955 * copied once into the location originally set. On the way back 956 * to the source, no copies are necessary. 957 */ 958 template <typename T> 959 void 960 dataStatic(T *p) 961 { 962 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 963 data = (PacketDataPtr)p; 964 flags.set(STATIC_DATA); 965 } 966 967 /** 968 * Set the data pointer to the following value that should not be 969 * freed. This version of the function allows the pointer passed 970 * to us to be const. To avoid issues down the line we cast the 971 * constness away, the alternative would be to keep both a const 972 * and non-const data pointer and cleverly choose between 973 * them. Note that this is only allowed for static data. 974 */ 975 template <typename T> 976 void 977 dataStaticConst(const T *p) 978 { 979 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 980 data = const_cast<PacketDataPtr>(p); 981 flags.set(STATIC_DATA); 982 } 983 984 /** 985 * Set the data pointer to a value that should have delete [] 986 * called on it. Dynamic data is local to this packet, and as the 987 * packet travels from source to destination, forwarded packets 988 * will allocate their own data. When a packet reaches the final 989 * destination it will populate the dynamic data of that specific 990 * packet, and on the way back towards the source, memcpy will be 991 * invoked in every step where a new packet was created e.g. in 992 * the caches. Ultimately when the response reaches the source a 993 * final memcpy is needed to extract the data from the packet 994 * before it is deallocated. 995 */ 996 template <typename T> 997 void 998 dataDynamic(T *p) 999 { 1000 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 1001 data = (PacketDataPtr)p; 1002 flags.set(DYNAMIC_DATA); 1003 } 1004 1005 /** 1006 * get a pointer to the data ptr. 1007 */ 1008 template <typename T> 1009 T* 1010 getPtr() 1011 { 1012 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA)); 1013 return (T*)data; 1014 } 1015 1016 template <typename T> 1017 const T* 1018 getConstPtr() const 1019 { 1020 assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA)); 1021 return (const T*)data; 1022 } 1023 1024 /** 1025 * Get the data in the packet byte swapped from big endian to 1026 * host endian. 1027 */ 1028 template <typename T> 1029 T getBE() const; 1030 1031 /** 1032 * Get the data in the packet byte swapped from little endian to 1033 * host endian. 1034 */ 1035 template <typename T> 1036 T getLE() const; 1037 1038 /** 1039 * Get the data in the packet byte swapped from the specified 1040 * endianness. 1041 */ 1042 template <typename T> 1043 T get(ByteOrder endian) const; 1044 1045 /** 1046 * Get the data in the packet byte swapped from guest to host 1047 * endian. 1048 */ 1049 template <typename T> 1050 T get() const; 1051 1052 /** Set the value in the data pointer to v as big endian. */ 1053 template <typename T> 1054 void setBE(T v); 1055 1056 /** Set the value in the data pointer to v as little endian. */ 1057 template <typename T> 1058 void setLE(T v); 1059 1060 /** 1061 * Set the value in the data pointer to v using the specified 1062 * endianness. 1063 */ 1064 template <typename T> 1065 void set(T v, ByteOrder endian); 1066 1067 /** Set the value in the data pointer to v as guest endian. */ 1068 template <typename T> 1069 void set(T v); 1070 1071 /** 1072 * Copy data into the packet from the provided pointer. 1073 */ 1074 void 1075 setData(const uint8_t *p) 1076 { 1077 // we should never be copying data onto itself, which means we 1078 // must idenfity packets with static data, as they carry the 1079 // same pointer from source to destination and back 1080 assert(p != getPtr<uint8_t>() || flags.isSet(STATIC_DATA)); 1081 1082 if (p != getPtr<uint8_t>()) 1083 // for packet with allocated dynamic data, we copy data from 1084 // one to the other, e.g. a forwarded response to a response 1085 std::memcpy(getPtr<uint8_t>(), p, getSize()); 1086 } 1087 1088 /** 1089 * Copy data into the packet from the provided block pointer, 1090 * which is aligned to the given block size. 1091 */ 1092 void 1093 setDataFromBlock(const uint8_t *blk_data, int blkSize) 1094 { 1095 setData(blk_data + getOffset(blkSize)); 1096 } 1097 1098 /** 1099 * Copy data from the packet to the provided block pointer, which 1100 * is aligned to the given block size. 1101 */ 1102 void 1103 writeData(uint8_t *p) const 1104 { 1105 std::memcpy(p, getConstPtr<uint8_t>(), getSize()); 1106 } 1107 1108 /** 1109 * Copy data from the packet to the memory at the provided pointer. 1110 */ 1111 void 1112 writeDataToBlock(uint8_t *blk_data, int blkSize) const 1113 { 1114 writeData(blk_data + getOffset(blkSize)); 1115 } 1116 1117 /** 1118 * delete the data pointed to in the data pointer. Ok to call to 1119 * matter how data was allocted. 1120 */ 1121 void 1122 deleteData() 1123 { 1124 if (flags.isSet(DYNAMIC_DATA)) 1125 delete [] data; 1126 1127 flags.clear(STATIC_DATA|DYNAMIC_DATA); 1128 data = NULL; 1129 } 1130 1131 /** Allocate memory for the packet. */ 1132 void 1133 allocate() 1134 { 1135 // if either this command or the response command has a data 1136 // payload, actually allocate space 1137 if (hasData() || hasRespData()) { 1138 assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA)); 1139 flags.set(DYNAMIC_DATA); 1140 data = new uint8_t[getSize()]; 1141 } 1142 } 1143 1144 /** @} */ 1145 1146 private: // Private data accessor methods 1147 /** Get the data in the packet without byte swapping. */ 1148 template <typename T> 1149 T getRaw() const; 1150 1151 /** Set the value in the data pointer to v without byte swapping. */ 1152 template <typename T> 1153 void setRaw(T v); 1154 1155 public: 1156 /** 1157 * Check a functional request against a memory value stored in 1158 * another packet (i.e. an in-transit request or 1159 * response). Returns true if the current packet is a read, and 1160 * the other packet provides the data, which is then copied to the 1161 * current packet. If the current packet is a write, and the other 1162 * packet intersects this one, then we update the data 1163 * accordingly. 1164 */ 1165 bool 1166 checkFunctional(PacketPtr other) 1167 { 1168 // all packets that are carrying a payload should have a valid 1169 // data pointer 1170 return checkFunctional(other, other->getAddr(), other->isSecure(), 1171 other->getSize(), 1172 other->hasData() ? 1173 other->getPtr<uint8_t>() : NULL); 1174 } 1175 1176 /** 1177 * Does the request need to check for cached copies of the same block 1178 * in the memory hierarchy above. 1179 **/ 1180 bool 1181 mustCheckAbove() const 1182 { 1183 return cmd == MemCmd::HardPFReq || isEviction(); 1184 } 1185 1186 /** 1187 * Is this packet a clean eviction, including both actual clean 1188 * evict packets, but also clean writebacks. 1189 */ 1190 bool 1191 isCleanEviction() const 1192 { 1193 return cmd == MemCmd::CleanEvict || cmd == MemCmd::WritebackClean; 1194 } 1195 1196 /** 1197 * Check a functional request against a memory value represented 1198 * by a base/size pair and an associated data array. If the 1199 * current packet is a read, it may be satisfied by the memory 1200 * value. If the current packet is a write, it may update the 1201 * memory value. 1202 */ 1203 bool 1204 checkFunctional(Printable *obj, Addr base, bool is_secure, int size, 1205 uint8_t *_data); 1206 1207 /** 1208 * Push label for PrintReq (safe to call unconditionally). 1209 */ 1210 void 1211 pushLabel(const std::string &lbl) 1212 { 1213 if (isPrint()) 1214 safe_cast<PrintReqState*>(senderState)->pushLabel(lbl); 1215 } 1216 1217 /** 1218 * Pop label for PrintReq (safe to call unconditionally). 1219 */ 1220 void 1221 popLabel() 1222 { 1223 if (isPrint()) 1224 safe_cast<PrintReqState*>(senderState)->popLabel(); 1225 } 1226 1227 void print(std::ostream &o, int verbosity = 0, 1228 const std::string &prefix = "") const; 1229 1230 /** 1231 * A no-args wrapper of print(std::ostream...) 1232 * meant to be invoked from DPRINTFs 1233 * avoiding string overheads in fast mode 1234 * @return string with the request's type and start<->end addresses 1235 */ 1236 std::string print() const; 1237}; 1238 1239#endif //__MEM_PACKET_HH
|