mshr.hh (12823:ba630bc7a36d) mshr.hh (13349:20890038e8a0)
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
41 */
42
43/**
44 * @file
45 * Miss Status and Handling Register (MSHR) declaration.
46 */
47
48#ifndef __MEM_CACHE_MSHR_HH__
49#define __MEM_CACHE_MSHR_HH__
50
51#include <cassert>
52#include <iosfwd>
53#include <list>
54#include <string>
42 */
43
44/**
45 * @file
46 * Miss Status and Handling Register (MSHR) declaration.
47 */
48
49#ifndef __MEM_CACHE_MSHR_HH__
50#define __MEM_CACHE_MSHR_HH__
51
52#include <cassert>
53#include <iosfwd>
54#include <list>
55#include <string>
56#include <vector>
55
56#include "base/printable.hh"
57#include "base/types.hh"
58#include "mem/cache/queue_entry.hh"
59#include "mem/packet.hh"
57
58#include "base/printable.hh"
59#include "base/types.hh"
60#include "mem/cache/queue_entry.hh"
61#include "mem/packet.hh"
62#include "mem/request.hh"
60#include "sim/core.hh"
61
62class BaseCache;
63
64/**
65 * Miss Status and handling Register. This class keeps all the information
66 * needed to handle a cache miss including a list of target requests.
67 * @sa \ref gem5MemorySystem "gem5 Memory System"
68 */
69class MSHR : public QueueEntry, public Printable
70{
71
72 /**
73 * Consider the queues friends to avoid making everything public.
74 */
75 template<typename Entry>
76 friend class Queue;
77 friend class MSHRQueue;
78
79 private:
80
81 /** Flag set by downstream caches */
82 bool downstreamPending;
83
84 /**
85 * Here we use one flag to track both if:
86 *
87 * 1. We are going to become owner or not, i.e., we will get the
88 * block in an ownership state (Owned or Modified) with BlkDirty
89 * set. This determines whether or not we are going to become the
90 * responder and ordering point for future requests that we snoop.
91 *
92 * 2. We know that we are going to get a writable block, i.e. we
93 * will get the block in writable state (Exclusive or Modified
94 * state) with BlkWritable set. That determines whether additional
95 * targets with needsWritable set will be able to be satisfied, or
96 * if not should be put on the deferred list to possibly wait for
97 * another request that does give us writable access.
98 *
99 * Condition 2 is actually just a shortcut that saves us from
100 * possibly building a deferred target list and calling
101 * promoteWritable() every time we get a writable block. Condition
102 * 1, tracking ownership, is what is important. However, we never
103 * receive ownership without marking the block dirty, and
104 * consequently use pendingModified to track both ownership and
105 * writability rather than having separate pendingDirty and
106 * pendingWritable flags.
107 */
108 bool pendingModified;
109
110 /** Did we snoop an invalidate while waiting for data? */
111 bool postInvalidate;
112
113 /** Did we snoop a read while waiting for data? */
114 bool postDowngrade;
115
116 public:
117
63#include "sim/core.hh"
64
65class BaseCache;
66
67/**
68 * Miss Status and handling Register. This class keeps all the information
69 * needed to handle a cache miss including a list of target requests.
70 * @sa \ref gem5MemorySystem "gem5 Memory System"
71 */
72class MSHR : public QueueEntry, public Printable
73{
74
75 /**
76 * Consider the queues friends to avoid making everything public.
77 */
78 template<typename Entry>
79 friend class Queue;
80 friend class MSHRQueue;
81
82 private:
83
84 /** Flag set by downstream caches */
85 bool downstreamPending;
86
87 /**
88 * Here we use one flag to track both if:
89 *
90 * 1. We are going to become owner or not, i.e., we will get the
91 * block in an ownership state (Owned or Modified) with BlkDirty
92 * set. This determines whether or not we are going to become the
93 * responder and ordering point for future requests that we snoop.
94 *
95 * 2. We know that we are going to get a writable block, i.e. we
96 * will get the block in writable state (Exclusive or Modified
97 * state) with BlkWritable set. That determines whether additional
98 * targets with needsWritable set will be able to be satisfied, or
99 * if not should be put on the deferred list to possibly wait for
100 * another request that does give us writable access.
101 *
102 * Condition 2 is actually just a shortcut that saves us from
103 * possibly building a deferred target list and calling
104 * promoteWritable() every time we get a writable block. Condition
105 * 1, tracking ownership, is what is important. However, we never
106 * receive ownership without marking the block dirty, and
107 * consequently use pendingModified to track both ownership and
108 * writability rather than having separate pendingDirty and
109 * pendingWritable flags.
110 */
111 bool pendingModified;
112
113 /** Did we snoop an invalidate while waiting for data? */
114 bool postInvalidate;
115
116 /** Did we snoop a read while waiting for data? */
117 bool postDowngrade;
118
119 public:
120
121 /** Track if we sent this as a whole line write or not */
122 bool wasWholeLineWrite;
123
118 /** True if the entry is just a simple forward from an upper level */
119 bool isForward;
120
121 class Target {
122 public:
123
124 enum Source {
125 FromCPU,
126 FromSnoop,
127 FromPrefetcher
128 };
129
130 const Tick recvTime; //!< Time when request was received (for stats)
131 const Tick readyTime; //!< Time when request is ready to be serviced
132 const Counter order; //!< Global order (for memory consistency mgmt)
133 const PacketPtr pkt; //!< Pending request packet.
134 const Source source; //!< Request from cpu, memory, or prefetcher?
135
136 /**
137 * We use this flag to track whether we have cleared the
138 * downstreamPending flag for the MSHR of the cache above
139 * where this packet originates from and guard noninitial
140 * attempts to clear it.
141 *
142 * The flag markedPending needs to be updated when the
143 * TargetList is in service which can be:
144 * 1) during the Target instantiation if the MSHR is in
145 * service and the target is not deferred,
146 * 2) when the MSHR becomes in service if the target is not
147 * deferred,
148 * 3) or when the TargetList is promoted (deferredTargets ->
149 * targets).
150 */
151 bool markedPending;
152
153 const bool allocOnFill; //!< Should the response servicing this
154 //!< target list allocate in the cache?
155
156 Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
157 Source _source, bool _markedPending, bool alloc_on_fill)
158 : recvTime(curTick()), readyTime(_readyTime), order(_order),
159 pkt(_pkt), source(_source), markedPending(_markedPending),
160 allocOnFill(alloc_on_fill)
161 {}
162 };
163
164 class TargetList : public std::list<Target> {
165
166 public:
167 bool needsWritable;
168 bool hasUpgrade;
169 /** Set when the response should allocate on fill */
170 bool allocOnFill;
171 /**
172 * Determine whether there was at least one non-snooping
173 * target coming from another cache.
174 */
175 bool hasFromCache;
176
177 TargetList();
178
179 /**
180 * Use the provided packet and the source to update the
181 * flags of this TargetList.
182 *
183 * @param pkt Packet considered for the flag update
184 * @param source Indicates the source of the packet
185 * @param alloc_on_fill Whether the pkt would allocate on a fill
186 */
187 void updateFlags(PacketPtr pkt, Target::Source source,
188 bool alloc_on_fill);
189
124 /** True if the entry is just a simple forward from an upper level */
125 bool isForward;
126
127 class Target {
128 public:
129
130 enum Source {
131 FromCPU,
132 FromSnoop,
133 FromPrefetcher
134 };
135
136 const Tick recvTime; //!< Time when request was received (for stats)
137 const Tick readyTime; //!< Time when request is ready to be serviced
138 const Counter order; //!< Global order (for memory consistency mgmt)
139 const PacketPtr pkt; //!< Pending request packet.
140 const Source source; //!< Request from cpu, memory, or prefetcher?
141
142 /**
143 * We use this flag to track whether we have cleared the
144 * downstreamPending flag for the MSHR of the cache above
145 * where this packet originates from and guard noninitial
146 * attempts to clear it.
147 *
148 * The flag markedPending needs to be updated when the
149 * TargetList is in service which can be:
150 * 1) during the Target instantiation if the MSHR is in
151 * service and the target is not deferred,
152 * 2) when the MSHR becomes in service if the target is not
153 * deferred,
154 * 3) or when the TargetList is promoted (deferredTargets ->
155 * targets).
156 */
157 bool markedPending;
158
159 const bool allocOnFill; //!< Should the response servicing this
160 //!< target list allocate in the cache?
161
162 Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
163 Source _source, bool _markedPending, bool alloc_on_fill)
164 : recvTime(curTick()), readyTime(_readyTime), order(_order),
165 pkt(_pkt), source(_source), markedPending(_markedPending),
166 allocOnFill(alloc_on_fill)
167 {}
168 };
169
170 class TargetList : public std::list<Target> {
171
172 public:
173 bool needsWritable;
174 bool hasUpgrade;
175 /** Set when the response should allocate on fill */
176 bool allocOnFill;
177 /**
178 * Determine whether there was at least one non-snooping
179 * target coming from another cache.
180 */
181 bool hasFromCache;
182
183 TargetList();
184
185 /**
186 * Use the provided packet and the source to update the
187 * flags of this TargetList.
188 *
189 * @param pkt Packet considered for the flag update
190 * @param source Indicates the source of the packet
191 * @param alloc_on_fill Whether the pkt would allocate on a fill
192 */
193 void updateFlags(PacketPtr pkt, Target::Source source,
194 bool alloc_on_fill);
195
196 /**
197 * Reset state
198 *
199 * @param blk_addr Address of the cache block
200 * @param blk_size Size of the cache block
201 */
202 void init(Addr blk_addr, Addr blk_size) {
203 blkAddr = blk_addr;
204 blkSize = blk_size;
205 writesBitmap.resize(blk_size);
206
207 resetFlags();
208 }
209
190 void resetFlags() {
210 void resetFlags() {
211 onlyWrites = true;
212 std::fill(writesBitmap.begin(), writesBitmap.end(), false);
213
191 needsWritable = false;
192 hasUpgrade = false;
193 allocOnFill = false;
194 hasFromCache = false;
195 }
196
197 /**
198 * Goes through the list of targets and uses them to populate
199 * the flags of this TargetList. When the function returns the
200 * flags are consistent with the properties of packets in the
201 * list.
202 */
203 void populateFlags();
204
205 /**
214 needsWritable = false;
215 hasUpgrade = false;
216 allocOnFill = false;
217 hasFromCache = false;
218 }
219
220 /**
221 * Goes through the list of targets and uses them to populate
222 * the flags of this TargetList. When the function returns the
223 * flags are consistent with the properties of packets in the
224 * list.
225 */
226 void populateFlags();
227
228 /**
229 * Add the specified packet in the TargetList. This function
230 * stores information related to the added packet and updates
231 * accordingly the flags.
232 *
233 * @param pkt Packet considered for adding
234 */
235 void updateWriteFlags(PacketPtr pkt) {
236 const Request::FlagsType noMergeFlags =
237 Request::UNCACHEABLE |
238 Request::STRICT_ORDER | Request::MMAPPED_IPR |
239 Request::PRIVILEGED | Request::LLSC |
240 Request::MEM_SWAP | Request::MEM_SWAP_COND |
241 Request::SECURE;
242
243 // if we have already seen writes for the full block stop
244 // here, this might be a full line write followed by
245 // other compatible requests (e.g., reads)
246 if (!isWholeLineWrite()) {
247 bool can_merge_write = pkt->isWrite() &&
248 ((pkt->req->getFlags() & noMergeFlags) == 0);
249 onlyWrites &= can_merge_write;
250 if (onlyWrites) {
251 auto offset = pkt->getOffset(blkSize);
252 auto begin = writesBitmap.begin() + offset;
253 std::fill(begin, begin + pkt->getSize(), true);
254 }
255 }
256 }
257
258 /**
206 * Tests if the flags of this TargetList have their default
207 * values.
259 * Tests if the flags of this TargetList have their default
260 * values.
261 *
262 * @return True if the TargetList are reset, false otherwise.
208 */
209 bool isReset() const {
210 return !needsWritable && !hasUpgrade && !allocOnFill &&
263 */
264 bool isReset() const {
265 return !needsWritable && !hasUpgrade && !allocOnFill &&
211 !hasFromCache;
266 !hasFromCache && onlyWrites;
212 }
213
214 /**
215 * Add the specified packet in the TargetList. This function
216 * stores information related to the added packet and updates
217 * accordingly the flags.
218 *
219 * @param pkt Packet considered for adding
220 * @param readTime Tick at which the packet is processed by this cache
221 * @param order A counter giving a unique id to each target
222 * @param source Indicates the source agent of the packet
223 * @param markPending Set for deferred targets or pending MSHRs
224 * @param alloc_on_fill Whether it should allocate on a fill
225 */
226 void add(PacketPtr pkt, Tick readyTime, Counter order,
267 }
268
269 /**
270 * Add the specified packet in the TargetList. This function
271 * stores information related to the added packet and updates
272 * accordingly the flags.
273 *
274 * @param pkt Packet considered for adding
275 * @param readTime Tick at which the packet is processed by this cache
276 * @param order A counter giving a unique id to each target
277 * @param source Indicates the source agent of the packet
278 * @param markPending Set for deferred targets or pending MSHRs
279 * @param alloc_on_fill Whether it should allocate on a fill
280 */
281 void add(PacketPtr pkt, Tick readyTime, Counter order,
227 Target::Source source, bool markPending,
228 bool alloc_on_fill);
282 Target::Source source, bool markPending, bool alloc_on_fill);
229
230 /**
231 * Convert upgrades to the equivalent request if the cache line they
232 * refer to would have been invalid (Upgrade -> ReadEx, SC* -> Fail).
233 * Used to rejig ordering between targets waiting on an MSHR. */
234 void replaceUpgrades();
235
236 void clearDownstreamPending();
237 void clearDownstreamPending(iterator begin, iterator end);
238 bool trySatisfyFunctional(PacketPtr pkt);
239 void print(std::ostream &os, int verbosity,
240 const std::string &prefix) const;
283
284 /**
285 * Convert upgrades to the equivalent request if the cache line they
286 * refer to would have been invalid (Upgrade -> ReadEx, SC* -> Fail).
287 * Used to rejig ordering between targets waiting on an MSHR. */
288 void replaceUpgrades();
289
290 void clearDownstreamPending();
291 void clearDownstreamPending(iterator begin, iterator end);
292 bool trySatisfyFunctional(PacketPtr pkt);
293 void print(std::ostream &os, int verbosity,
294 const std::string &prefix) const;
295
296 /**
297 * Check if this list contains only compatible writes, and if they
298 * span the entire cache line. This is used as part of the
299 * miss-packet creation. Note that new requests may arrive after a
300 * miss-packet has been created, and for the fill we therefore use
301 * the wasWholeLineWrite field.
302 */
303 bool isWholeLineWrite() const
304 {
305 return onlyWrites &&
306 std::all_of(writesBitmap.begin(),
307 writesBitmap.end(), [](bool i) { return i; });
308 }
309
310 private:
311 /** Address of the cache block for this list of targets. */
312 Addr blkAddr;
313
314 /** Size of the cache block. */
315 Addr blkSize;
316
317 /** Are we only dealing with writes. */
318 bool onlyWrites;
319
320 // NOTE: std::vector<bool> might not meet satisfy the
321 // ForwardIterator requirement and therefore cannot be used
322 // for writesBitmap.
323 /**
324 * Track which bytes are written by requests in this target
325 * list.
326 */
327 std::vector<char> writesBitmap;
241 };
242
243 /** A list of MSHRs. */
244 typedef std::list<MSHR *> List;
245 /** MSHR list iterator. */
246 typedef List::iterator Iterator;
247
248 /** The pending* and post* flags are only valid if inService is
249 * true. Using the accessor functions lets us detect if these
250 * flags are accessed improperly.
251 */
252
253 /** True if we need to get a writable copy of the block. */
254 bool needsWritable() const { return targets.needsWritable; }
255
256 bool isCleaning() const {
257 PacketPtr pkt = targets.front().pkt;
258 return pkt->isClean();
259 }
260
261 bool isPendingModified() const {
262 assert(inService); return pendingModified;
263 }
264
265 bool hasPostInvalidate() const {
266 assert(inService); return postInvalidate;
267 }
268
269 bool hasPostDowngrade() const {
270 assert(inService); return postDowngrade;
271 }
272
273 bool sendPacket(BaseCache &cache);
274
275 bool allocOnFill() const {
276 return targets.allocOnFill;
277 }
278
279 /**
280 * Determine if there are non-deferred requests from other caches
281 *
282 * @return true if any of the targets is from another cache
283 */
284 bool hasFromCache() const {
285 return targets.hasFromCache;
286 }
287
288 private:
289 /**
290 * Promotes deferred targets that satisfy a predicate
291 *
292 * Deferred targets are promoted to the target list if they
293 * satisfy a given condition. The operation stops at the first
294 * deferred target that doesn't satisfy the condition.
295 *
296 * @param pred A condition on a Target
297 */
298 void promoteIf(const std::function<bool (Target &)>& pred);
299
300 /**
301 * Pointer to this MSHR on the ready list.
302 * @sa MissQueue, MSHRQueue::readyList
303 */
304 Iterator readyIter;
305
306 /**
307 * Pointer to this MSHR on the allocated list.
308 * @sa MissQueue, MSHRQueue::allocatedList
309 */
310 Iterator allocIter;
311
312 /** List of all requests that match the address */
313 TargetList targets;
314
315 TargetList deferredTargets;
316
317 public:
328 };
329
330 /** A list of MSHRs. */
331 typedef std::list<MSHR *> List;
332 /** MSHR list iterator. */
333 typedef List::iterator Iterator;
334
335 /** The pending* and post* flags are only valid if inService is
336 * true. Using the accessor functions lets us detect if these
337 * flags are accessed improperly.
338 */
339
340 /** True if we need to get a writable copy of the block. */
341 bool needsWritable() const { return targets.needsWritable; }
342
343 bool isCleaning() const {
344 PacketPtr pkt = targets.front().pkt;
345 return pkt->isClean();
346 }
347
348 bool isPendingModified() const {
349 assert(inService); return pendingModified;
350 }
351
352 bool hasPostInvalidate() const {
353 assert(inService); return postInvalidate;
354 }
355
356 bool hasPostDowngrade() const {
357 assert(inService); return postDowngrade;
358 }
359
360 bool sendPacket(BaseCache &cache);
361
362 bool allocOnFill() const {
363 return targets.allocOnFill;
364 }
365
366 /**
367 * Determine if there are non-deferred requests from other caches
368 *
369 * @return true if any of the targets is from another cache
370 */
371 bool hasFromCache() const {
372 return targets.hasFromCache;
373 }
374
375 private:
376 /**
377 * Promotes deferred targets that satisfy a predicate
378 *
379 * Deferred targets are promoted to the target list if they
380 * satisfy a given condition. The operation stops at the first
381 * deferred target that doesn't satisfy the condition.
382 *
383 * @param pred A condition on a Target
384 */
385 void promoteIf(const std::function<bool (Target &)>& pred);
386
387 /**
388 * Pointer to this MSHR on the ready list.
389 * @sa MissQueue, MSHRQueue::readyList
390 */
391 Iterator readyIter;
392
393 /**
394 * Pointer to this MSHR on the allocated list.
395 * @sa MissQueue, MSHRQueue::allocatedList
396 */
397 Iterator allocIter;
398
399 /** List of all requests that match the address */
400 TargetList targets;
401
402 TargetList deferredTargets;
403
404 public:
405 /**
406 * Check if this MSHR contains only compatible writes, and if they
407 * span the entire cache line. This is used as part of the
408 * miss-packet creation. Note that new requests may arrive after a
409 * miss-packet has been created, and for the fill we therefore use
410 * the wasWholeLineWrite field.
411 */
412 bool isWholeLineWrite() const {
413 return targets.isWholeLineWrite();
414 }
318
319 /**
320 * Allocate a miss to this MSHR.
321 * @param blk_addr The address of the block.
322 * @param blk_size The number of bytes to request.
323 * @param pkt The original miss.
324 * @param when_ready When should the MSHR be ready to act upon.
325 * @param _order The logical order of this MSHR
326 * @param alloc_on_fill Should the cache allocate a block on fill
327 */
328 void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
329 Tick when_ready, Counter _order, bool alloc_on_fill);
330
331 void markInService(bool pending_modified_resp);
332
333 void clearDownstreamPending();
334
335 /**
336 * Mark this MSHR as free.
337 */
338 void deallocate();
339
340 /**
341 * Add a request to the list of targets.
342 * @param target The target.
343 */
344 void allocateTarget(PacketPtr target, Tick when, Counter order,
345 bool alloc_on_fill);
346 bool handleSnoop(PacketPtr target, Counter order);
347
348 /** A simple constructor. */
349 MSHR();
350
351 /**
352 * Returns the current number of allocated targets.
353 * @return The current number of allocated targets.
354 */
355 int getNumTargets() const
356 { return targets.size() + deferredTargets.size(); }
357
358 /**
359 * Extracts the subset of the targets that can be serviced given a
360 * received response. This function returns the targets list
361 * unless the response is a ReadRespWithInvalidate. The
362 * ReadRespWithInvalidate is only invalidating response that its
363 * invalidation was not expected when the request (a
364 * ReadSharedReq) was sent out. For ReadRespWithInvalidate we can
365 * safely service only the first FromCPU target and all FromSnoop
366 * targets (inform all snoopers that we no longer have the block).
367 *
368 * @param pkt The response from the downstream memory
369 */
370 TargetList extractServiceableTargets(PacketPtr pkt);
371
372 /**
373 * Returns true if there are targets left.
374 * @return true if there are targets
375 */
376 bool hasTargets() const { return !targets.empty(); }
377
378 /**
379 * Returns a reference to the first target.
380 * @return A pointer to the first target.
381 */
382 Target *getTarget()
383 {
384 assert(hasTargets());
385 return &targets.front();
386 }
387
388 /**
389 * Pop first target.
390 */
391 void popTarget()
392 {
393 targets.pop_front();
394 }
395
396 bool promoteDeferredTargets();
397
398 /**
399 * Promotes deferred targets that do not require writable
400 *
401 * Move targets from the deferred targets list to the target list
402 * starting from the first deferred target until the first target
403 * that is a cache maintenance operation or needs a writable copy
404 * of the block
405 */
406 void promoteReadable();
407
408 /**
409 * Promotes deferred targets that do not require writable
410 *
411 * Requests in the deferred target list are moved to the target
412 * list up until the first target that is a cache maintenance
413 * operation or needs a writable copy of the block
414 */
415 void promoteWritable();
416
417 bool trySatisfyFunctional(PacketPtr pkt);
418
419 /**
420 * Prints the contents of this MSHR for debugging.
421 */
422 void print(std::ostream &os,
423 int verbosity = 0,
424 const std::string &prefix = "") const;
425 /**
426 * A no-args wrapper of print(std::ostream...) meant to be
427 * invoked from DPRINTFs avoiding string overheads in fast mode
428 *
429 * @return string with mshr fields + [deferred]targets
430 */
431 std::string print() const;
432};
433
434#endif // __MEM_CACHE_MSHR_HH__
415
416 /**
417 * Allocate a miss to this MSHR.
418 * @param blk_addr The address of the block.
419 * @param blk_size The number of bytes to request.
420 * @param pkt The original miss.
421 * @param when_ready When should the MSHR be ready to act upon.
422 * @param _order The logical order of this MSHR
423 * @param alloc_on_fill Should the cache allocate a block on fill
424 */
425 void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
426 Tick when_ready, Counter _order, bool alloc_on_fill);
427
428 void markInService(bool pending_modified_resp);
429
430 void clearDownstreamPending();
431
432 /**
433 * Mark this MSHR as free.
434 */
435 void deallocate();
436
437 /**
438 * Add a request to the list of targets.
439 * @param target The target.
440 */
441 void allocateTarget(PacketPtr target, Tick when, Counter order,
442 bool alloc_on_fill);
443 bool handleSnoop(PacketPtr target, Counter order);
444
445 /** A simple constructor. */
446 MSHR();
447
448 /**
449 * Returns the current number of allocated targets.
450 * @return The current number of allocated targets.
451 */
452 int getNumTargets() const
453 { return targets.size() + deferredTargets.size(); }
454
455 /**
456 * Extracts the subset of the targets that can be serviced given a
457 * received response. This function returns the targets list
458 * unless the response is a ReadRespWithInvalidate. The
459 * ReadRespWithInvalidate is only invalidating response that its
460 * invalidation was not expected when the request (a
461 * ReadSharedReq) was sent out. For ReadRespWithInvalidate we can
462 * safely service only the first FromCPU target and all FromSnoop
463 * targets (inform all snoopers that we no longer have the block).
464 *
465 * @param pkt The response from the downstream memory
466 */
467 TargetList extractServiceableTargets(PacketPtr pkt);
468
469 /**
470 * Returns true if there are targets left.
471 * @return true if there are targets
472 */
473 bool hasTargets() const { return !targets.empty(); }
474
475 /**
476 * Returns a reference to the first target.
477 * @return A pointer to the first target.
478 */
479 Target *getTarget()
480 {
481 assert(hasTargets());
482 return &targets.front();
483 }
484
485 /**
486 * Pop first target.
487 */
488 void popTarget()
489 {
490 targets.pop_front();
491 }
492
493 bool promoteDeferredTargets();
494
495 /**
496 * Promotes deferred targets that do not require writable
497 *
498 * Move targets from the deferred targets list to the target list
499 * starting from the first deferred target until the first target
500 * that is a cache maintenance operation or needs a writable copy
501 * of the block
502 */
503 void promoteReadable();
504
505 /**
506 * Promotes deferred targets that do not require writable
507 *
508 * Requests in the deferred target list are moved to the target
509 * list up until the first target that is a cache maintenance
510 * operation or needs a writable copy of the block
511 */
512 void promoteWritable();
513
514 bool trySatisfyFunctional(PacketPtr pkt);
515
516 /**
517 * Prints the contents of this MSHR for debugging.
518 */
519 void print(std::ostream &os,
520 int verbosity = 0,
521 const std::string &prefix = "") const;
522 /**
523 * A no-args wrapper of print(std::ostream...) meant to be
524 * invoked from DPRINTFs avoiding string overheads in fast mode
525 *
526 * @return string with mshr fields + [deferred]targets
527 */
528 std::string print() const;
529};
530
531#endif // __MEM_CACHE_MSHR_HH__