mshr.hh (13351:1d456a63bfbc) mshr.hh (13859:4156ac0c7257)
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Miss Status and Handling Register (MSHR) declaration.
47 */
48
49#ifndef __MEM_CACHE_MSHR_HH__
50#define __MEM_CACHE_MSHR_HH__
51
52#include <cassert>
53#include <iosfwd>
54#include <list>
55#include <string>
56#include <vector>
57
58#include "base/printable.hh"
59#include "base/types.hh"
60#include "mem/cache/queue_entry.hh"
61#include "mem/packet.hh"
62#include "mem/request.hh"
63#include "sim/core.hh"
64
65class BaseCache;
66
67/**
68 * Miss Status and handling Register. This class keeps all the information
69 * needed to handle a cache miss including a list of target requests.
70 * @sa \ref gem5MemorySystem "gem5 Memory System"
71 */
72class MSHR : public QueueEntry, public Printable
73{
74
75 /**
76 * Consider the queues friends to avoid making everything public.
77 */
78 template<typename Entry>
79 friend class Queue;
80 friend class MSHRQueue;
81
82 private:
83
84 /** Flag set by downstream caches */
85 bool downstreamPending;
86
87 /**
88 * Here we use one flag to track both if:
89 *
90 * 1. We are going to become owner or not, i.e., we will get the
91 * block in an ownership state (Owned or Modified) with BlkDirty
92 * set. This determines whether or not we are going to become the
93 * responder and ordering point for future requests that we snoop.
94 *
95 * 2. We know that we are going to get a writable block, i.e. we
96 * will get the block in writable state (Exclusive or Modified
97 * state) with BlkWritable set. That determines whether additional
98 * targets with needsWritable set will be able to be satisfied, or
99 * if not should be put on the deferred list to possibly wait for
100 * another request that does give us writable access.
101 *
102 * Condition 2 is actually just a shortcut that saves us from
103 * possibly building a deferred target list and calling
104 * promoteWritable() every time we get a writable block. Condition
105 * 1, tracking ownership, is what is important. However, we never
106 * receive ownership without marking the block dirty, and
107 * consequently use pendingModified to track both ownership and
108 * writability rather than having separate pendingDirty and
109 * pendingWritable flags.
110 */
111 bool pendingModified;
112
113 /** Did we snoop an invalidate while waiting for data? */
114 bool postInvalidate;
115
116 /** Did we snoop a read while waiting for data? */
117 bool postDowngrade;
118
119 public:
120
121 /** Track if we sent this as a whole line write or not */
122 bool wasWholeLineWrite;
123
124 /** True if the entry is just a simple forward from an upper level */
125 bool isForward;
126
1/*
2 * Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44/**
45 * @file
46 * Miss Status and Handling Register (MSHR) declaration.
47 */
48
49#ifndef __MEM_CACHE_MSHR_HH__
50#define __MEM_CACHE_MSHR_HH__
51
52#include <cassert>
53#include <iosfwd>
54#include <list>
55#include <string>
56#include <vector>
57
58#include "base/printable.hh"
59#include "base/types.hh"
60#include "mem/cache/queue_entry.hh"
61#include "mem/packet.hh"
62#include "mem/request.hh"
63#include "sim/core.hh"
64
65class BaseCache;
66
67/**
68 * Miss Status and handling Register. This class keeps all the information
69 * needed to handle a cache miss including a list of target requests.
70 * @sa \ref gem5MemorySystem "gem5 Memory System"
71 */
72class MSHR : public QueueEntry, public Printable
73{
74
75 /**
76 * Consider the queues friends to avoid making everything public.
77 */
78 template<typename Entry>
79 friend class Queue;
80 friend class MSHRQueue;
81
82 private:
83
84 /** Flag set by downstream caches */
85 bool downstreamPending;
86
87 /**
88 * Here we use one flag to track both if:
89 *
90 * 1. We are going to become owner or not, i.e., we will get the
91 * block in an ownership state (Owned or Modified) with BlkDirty
92 * set. This determines whether or not we are going to become the
93 * responder and ordering point for future requests that we snoop.
94 *
95 * 2. We know that we are going to get a writable block, i.e. we
96 * will get the block in writable state (Exclusive or Modified
97 * state) with BlkWritable set. That determines whether additional
98 * targets with needsWritable set will be able to be satisfied, or
99 * if not should be put on the deferred list to possibly wait for
100 * another request that does give us writable access.
101 *
102 * Condition 2 is actually just a shortcut that saves us from
103 * possibly building a deferred target list and calling
104 * promoteWritable() every time we get a writable block. Condition
105 * 1, tracking ownership, is what is important. However, we never
106 * receive ownership without marking the block dirty, and
107 * consequently use pendingModified to track both ownership and
108 * writability rather than having separate pendingDirty and
109 * pendingWritable flags.
110 */
111 bool pendingModified;
112
113 /** Did we snoop an invalidate while waiting for data? */
114 bool postInvalidate;
115
116 /** Did we snoop a read while waiting for data? */
117 bool postDowngrade;
118
119 public:
120
121 /** Track if we sent this as a whole line write or not */
122 bool wasWholeLineWrite;
123
124 /** True if the entry is just a simple forward from an upper level */
125 bool isForward;
126
127 class Target {
127 class Target : public QueueEntry::Target {
128 public:
129
130 enum Source {
131 FromCPU,
132 FromSnoop,
133 FromPrefetcher
134 };
135
128 public:
129
130 enum Source {
131 FromCPU,
132 FromSnoop,
133 FromPrefetcher
134 };
135
136 const Tick recvTime; //!< Time when request was received (for stats)
137 const Tick readyTime; //!< Time when request is ready to be serviced
138 const Counter order; //!< Global order (for memory consistency mgmt)
139 const PacketPtr pkt; //!< Pending request packet.
140 const Source source; //!< Request from cpu, memory, or prefetcher?
141
142 /**
143 * We use this flag to track whether we have cleared the
144 * downstreamPending flag for the MSHR of the cache above
145 * where this packet originates from and guard noninitial
146 * attempts to clear it.
147 *
148 * The flag markedPending needs to be updated when the
149 * TargetList is in service which can be:
150 * 1) during the Target instantiation if the MSHR is in
151 * service and the target is not deferred,
152 * 2) when the MSHR becomes in service if the target is not
153 * deferred,
154 * 3) or when the TargetList is promoted (deferredTargets ->
155 * targets).
156 */
157 bool markedPending;
158
159 const bool allocOnFill; //!< Should the response servicing this
160 //!< target list allocate in the cache?
161
162 Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
163 Source _source, bool _markedPending, bool alloc_on_fill)
136 const Source source; //!< Request from cpu, memory, or prefetcher?
137
138 /**
139 * We use this flag to track whether we have cleared the
140 * downstreamPending flag for the MSHR of the cache above
141 * where this packet originates from and guard noninitial
142 * attempts to clear it.
143 *
144 * The flag markedPending needs to be updated when the
145 * TargetList is in service which can be:
146 * 1) during the Target instantiation if the MSHR is in
147 * service and the target is not deferred,
148 * 2) when the MSHR becomes in service if the target is not
149 * deferred,
150 * 3) or when the TargetList is promoted (deferredTargets ->
151 * targets).
152 */
153 bool markedPending;
154
155 const bool allocOnFill; //!< Should the response servicing this
156 //!< target list allocate in the cache?
157
158 Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
159 Source _source, bool _markedPending, bool alloc_on_fill)
164 : recvTime(curTick()), readyTime(_readyTime), order(_order),
165 pkt(_pkt), source(_source), markedPending(_markedPending),
166 allocOnFill(alloc_on_fill)
160 : QueueEntry::Target(_pkt, _readyTime, _order), source(_source),
161 markedPending(_markedPending), allocOnFill(alloc_on_fill)
167 {}
168 };
169
170 class TargetList : public std::list<Target> {
171
172 public:
173 bool needsWritable;
174 bool hasUpgrade;
175 /** Set when the response should allocate on fill */
176 bool allocOnFill;
177 /**
178 * Determine whether there was at least one non-snooping
179 * target coming from another cache.
180 */
181 bool hasFromCache;
182
183 TargetList();
184
185 /**
186 * Use the provided packet and the source to update the
187 * flags of this TargetList.
188 *
189 * @param pkt Packet considered for the flag update
190 * @param source Indicates the source of the packet
191 * @param alloc_on_fill Whether the pkt would allocate on a fill
192 */
193 void updateFlags(PacketPtr pkt, Target::Source source,
194 bool alloc_on_fill);
195
196 /**
197 * Reset state
198 *
199 * @param blk_addr Address of the cache block
200 * @param blk_size Size of the cache block
201 */
202 void init(Addr blk_addr, Addr blk_size) {
203 blkAddr = blk_addr;
204 blkSize = blk_size;
205 writesBitmap.resize(blk_size);
206
207 resetFlags();
208 }
209
210 void resetFlags() {
211 onlyWrites = true;
212 std::fill(writesBitmap.begin(), writesBitmap.end(), false);
213
214 needsWritable = false;
215 hasUpgrade = false;
216 allocOnFill = false;
217 hasFromCache = false;
218 }
219
220 /**
221 * Goes through the list of targets and uses them to populate
222 * the flags of this TargetList. When the function returns the
223 * flags are consistent with the properties of packets in the
224 * list.
225 */
226 void populateFlags();
227
228 /**
229 * Add the specified packet in the TargetList. This function
230 * stores information related to the added packet and updates
231 * accordingly the flags.
232 *
233 * @param pkt Packet considered for adding
234 */
235 void updateWriteFlags(PacketPtr pkt) {
236 const Request::FlagsType noMergeFlags =
237 Request::UNCACHEABLE |
238 Request::STRICT_ORDER | Request::MMAPPED_IPR |
239 Request::PRIVILEGED | Request::LLSC |
240 Request::MEM_SWAP | Request::MEM_SWAP_COND |
241 Request::SECURE;
242
243 // if we have already seen writes for the full block stop
244 // here, this might be a full line write followed by
245 // other compatible requests (e.g., reads)
246 if (!isWholeLineWrite()) {
247 bool can_merge_write = pkt->isWrite() &&
248 ((pkt->req->getFlags() & noMergeFlags) == 0);
249 onlyWrites &= can_merge_write;
250 if (onlyWrites) {
251 auto offset = pkt->getOffset(blkSize);
252 auto begin = writesBitmap.begin() + offset;
253 std::fill(begin, begin + pkt->getSize(), true);
254 }
255 }
256 }
257
258 /**
259 * Tests if the flags of this TargetList have their default
260 * values.
261 *
262 * @return True if the TargetList are reset, false otherwise.
263 */
264 bool isReset() const {
265 return !needsWritable && !hasUpgrade && !allocOnFill &&
266 !hasFromCache && onlyWrites;
267 }
268
269 /**
270 * Add the specified packet in the TargetList. This function
271 * stores information related to the added packet and updates
272 * accordingly the flags.
273 *
274 * @param pkt Packet considered for adding
275 * @param readTime Tick at which the packet is processed by this cache
276 * @param order A counter giving a unique id to each target
277 * @param source Indicates the source agent of the packet
278 * @param markPending Set for deferred targets or pending MSHRs
279 * @param alloc_on_fill Whether it should allocate on a fill
280 */
281 void add(PacketPtr pkt, Tick readyTime, Counter order,
282 Target::Source source, bool markPending, bool alloc_on_fill);
283
284 /**
285 * Convert upgrades to the equivalent request if the cache line they
286 * refer to would have been invalid (Upgrade -> ReadEx, SC* -> Fail).
287 * Used to rejig ordering between targets waiting on an MSHR. */
288 void replaceUpgrades();
289
290 void clearDownstreamPending();
291 void clearDownstreamPending(iterator begin, iterator end);
292 bool trySatisfyFunctional(PacketPtr pkt);
293 void print(std::ostream &os, int verbosity,
294 const std::string &prefix) const;
295
296 /**
297 * Check if this list contains only compatible writes, and if they
298 * span the entire cache line. This is used as part of the
299 * miss-packet creation. Note that new requests may arrive after a
300 * miss-packet has been created, and for the fill we therefore use
301 * the wasWholeLineWrite field.
302 */
303 bool isWholeLineWrite() const
304 {
305 return onlyWrites &&
306 std::all_of(writesBitmap.begin(),
307 writesBitmap.end(), [](bool i) { return i; });
308 }
309
310 private:
311 /** Address of the cache block for this list of targets. */
312 Addr blkAddr;
313
314 /** Size of the cache block. */
315 Addr blkSize;
316
317 /** Are we only dealing with writes. */
318 bool onlyWrites;
319
320 // NOTE: std::vector<bool> might not meet satisfy the
321 // ForwardIterator requirement and therefore cannot be used
322 // for writesBitmap.
323 /**
324 * Track which bytes are written by requests in this target
325 * list.
326 */
327 std::vector<char> writesBitmap;
328 };
329
330 /** A list of MSHRs. */
331 typedef std::list<MSHR *> List;
332 /** MSHR list iterator. */
333 typedef List::iterator Iterator;
334
335 /** The pending* and post* flags are only valid if inService is
336 * true. Using the accessor functions lets us detect if these
337 * flags are accessed improperly.
338 */
339
340 /** True if we need to get a writable copy of the block. */
341 bool needsWritable() const { return targets.needsWritable; }
342
343 bool isCleaning() const {
344 PacketPtr pkt = targets.front().pkt;
345 return pkt->isClean();
346 }
347
348 bool isPendingModified() const {
349 assert(inService); return pendingModified;
350 }
351
352 bool hasPostInvalidate() const {
353 assert(inService); return postInvalidate;
354 }
355
356 bool hasPostDowngrade() const {
357 assert(inService); return postDowngrade;
358 }
359
360 bool sendPacket(BaseCache &cache);
361
362 bool allocOnFill() const {
363 return targets.allocOnFill;
364 }
365
366 /**
367 * Determine if there are non-deferred requests from other caches
368 *
369 * @return true if any of the targets is from another cache
370 */
371 bool hasFromCache() const {
372 return targets.hasFromCache;
373 }
374
375 private:
376 /**
377 * Promotes deferred targets that satisfy a predicate
378 *
379 * Deferred targets are promoted to the target list if they
380 * satisfy a given condition. The operation stops at the first
381 * deferred target that doesn't satisfy the condition.
382 *
383 * @param pred A condition on a Target
384 */
385 void promoteIf(const std::function<bool (Target &)>& pred);
386
387 /**
388 * Pointer to this MSHR on the ready list.
389 * @sa MissQueue, MSHRQueue::readyList
390 */
391 Iterator readyIter;
392
393 /**
394 * Pointer to this MSHR on the allocated list.
395 * @sa MissQueue, MSHRQueue::allocatedList
396 */
397 Iterator allocIter;
398
399 /** List of all requests that match the address */
400 TargetList targets;
401
402 TargetList deferredTargets;
403
404 public:
405 /**
406 * Check if this MSHR contains only compatible writes, and if they
407 * span the entire cache line. This is used as part of the
408 * miss-packet creation. Note that new requests may arrive after a
409 * miss-packet has been created, and for the fill we therefore use
410 * the wasWholeLineWrite field.
411 */
412 bool isWholeLineWrite() const {
413 return targets.isWholeLineWrite();
414 }
415
416 /**
417 * Allocate a miss to this MSHR.
418 * @param blk_addr The address of the block.
419 * @param blk_size The number of bytes to request.
420 * @param pkt The original miss.
421 * @param when_ready When should the MSHR be ready to act upon.
422 * @param _order The logical order of this MSHR
423 * @param alloc_on_fill Should the cache allocate a block on fill
424 */
425 void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
426 Tick when_ready, Counter _order, bool alloc_on_fill);
427
428 void markInService(bool pending_modified_resp);
429
430 void clearDownstreamPending();
431
432 /**
433 * Mark this MSHR as free.
434 */
435 void deallocate();
436
437 /**
438 * Add a request to the list of targets.
439 * @param target The target.
440 */
441 void allocateTarget(PacketPtr target, Tick when, Counter order,
442 bool alloc_on_fill);
443 bool handleSnoop(PacketPtr target, Counter order);
444
445 /** A simple constructor. */
446 MSHR();
447
448 /**
449 * Returns the current number of allocated targets.
450 * @return The current number of allocated targets.
451 */
452 int getNumTargets() const
453 { return targets.size() + deferredTargets.size(); }
454
455 /**
456 * Extracts the subset of the targets that can be serviced given a
457 * received response. This function returns the targets list
458 * unless the response is a ReadRespWithInvalidate. The
459 * ReadRespWithInvalidate is only invalidating response that its
460 * invalidation was not expected when the request (a
461 * ReadSharedReq) was sent out. For ReadRespWithInvalidate we can
462 * safely service only the first FromCPU target and all FromSnoop
463 * targets (inform all snoopers that we no longer have the block).
464 *
465 * @param pkt The response from the downstream memory
466 */
467 TargetList extractServiceableTargets(PacketPtr pkt);
468
469 /**
470 * Returns true if there are targets left.
471 * @return true if there are targets
472 */
473 bool hasTargets() const { return !targets.empty(); }
474
475 /**
476 * Returns a reference to the first target.
477 * @return A pointer to the first target.
478 */
162 {}
163 };
164
165 class TargetList : public std::list<Target> {
166
167 public:
168 bool needsWritable;
169 bool hasUpgrade;
170 /** Set when the response should allocate on fill */
171 bool allocOnFill;
172 /**
173 * Determine whether there was at least one non-snooping
174 * target coming from another cache.
175 */
176 bool hasFromCache;
177
178 TargetList();
179
180 /**
181 * Use the provided packet and the source to update the
182 * flags of this TargetList.
183 *
184 * @param pkt Packet considered for the flag update
185 * @param source Indicates the source of the packet
186 * @param alloc_on_fill Whether the pkt would allocate on a fill
187 */
188 void updateFlags(PacketPtr pkt, Target::Source source,
189 bool alloc_on_fill);
190
191 /**
192 * Reset state
193 *
194 * @param blk_addr Address of the cache block
195 * @param blk_size Size of the cache block
196 */
197 void init(Addr blk_addr, Addr blk_size) {
198 blkAddr = blk_addr;
199 blkSize = blk_size;
200 writesBitmap.resize(blk_size);
201
202 resetFlags();
203 }
204
205 void resetFlags() {
206 onlyWrites = true;
207 std::fill(writesBitmap.begin(), writesBitmap.end(), false);
208
209 needsWritable = false;
210 hasUpgrade = false;
211 allocOnFill = false;
212 hasFromCache = false;
213 }
214
215 /**
216 * Goes through the list of targets and uses them to populate
217 * the flags of this TargetList. When the function returns the
218 * flags are consistent with the properties of packets in the
219 * list.
220 */
221 void populateFlags();
222
223 /**
224 * Add the specified packet in the TargetList. This function
225 * stores information related to the added packet and updates
226 * accordingly the flags.
227 *
228 * @param pkt Packet considered for adding
229 */
230 void updateWriteFlags(PacketPtr pkt) {
231 const Request::FlagsType noMergeFlags =
232 Request::UNCACHEABLE |
233 Request::STRICT_ORDER | Request::MMAPPED_IPR |
234 Request::PRIVILEGED | Request::LLSC |
235 Request::MEM_SWAP | Request::MEM_SWAP_COND |
236 Request::SECURE;
237
238 // if we have already seen writes for the full block stop
239 // here, this might be a full line write followed by
240 // other compatible requests (e.g., reads)
241 if (!isWholeLineWrite()) {
242 bool can_merge_write = pkt->isWrite() &&
243 ((pkt->req->getFlags() & noMergeFlags) == 0);
244 onlyWrites &= can_merge_write;
245 if (onlyWrites) {
246 auto offset = pkt->getOffset(blkSize);
247 auto begin = writesBitmap.begin() + offset;
248 std::fill(begin, begin + pkt->getSize(), true);
249 }
250 }
251 }
252
253 /**
254 * Tests if the flags of this TargetList have their default
255 * values.
256 *
257 * @return True if the TargetList are reset, false otherwise.
258 */
259 bool isReset() const {
260 return !needsWritable && !hasUpgrade && !allocOnFill &&
261 !hasFromCache && onlyWrites;
262 }
263
264 /**
265 * Add the specified packet in the TargetList. This function
266 * stores information related to the added packet and updates
267 * accordingly the flags.
268 *
269 * @param pkt Packet considered for adding
270 * @param readTime Tick at which the packet is processed by this cache
271 * @param order A counter giving a unique id to each target
272 * @param source Indicates the source agent of the packet
273 * @param markPending Set for deferred targets or pending MSHRs
274 * @param alloc_on_fill Whether it should allocate on a fill
275 */
276 void add(PacketPtr pkt, Tick readyTime, Counter order,
277 Target::Source source, bool markPending, bool alloc_on_fill);
278
279 /**
280 * Convert upgrades to the equivalent request if the cache line they
281 * refer to would have been invalid (Upgrade -> ReadEx, SC* -> Fail).
282 * Used to rejig ordering between targets waiting on an MSHR. */
283 void replaceUpgrades();
284
285 void clearDownstreamPending();
286 void clearDownstreamPending(iterator begin, iterator end);
287 bool trySatisfyFunctional(PacketPtr pkt);
288 void print(std::ostream &os, int verbosity,
289 const std::string &prefix) const;
290
291 /**
292 * Check if this list contains only compatible writes, and if they
293 * span the entire cache line. This is used as part of the
294 * miss-packet creation. Note that new requests may arrive after a
295 * miss-packet has been created, and for the fill we therefore use
296 * the wasWholeLineWrite field.
297 */
298 bool isWholeLineWrite() const
299 {
300 return onlyWrites &&
301 std::all_of(writesBitmap.begin(),
302 writesBitmap.end(), [](bool i) { return i; });
303 }
304
305 private:
306 /** Address of the cache block for this list of targets. */
307 Addr blkAddr;
308
309 /** Size of the cache block. */
310 Addr blkSize;
311
312 /** Are we only dealing with writes. */
313 bool onlyWrites;
314
315 // NOTE: std::vector<bool> might not meet satisfy the
316 // ForwardIterator requirement and therefore cannot be used
317 // for writesBitmap.
318 /**
319 * Track which bytes are written by requests in this target
320 * list.
321 */
322 std::vector<char> writesBitmap;
323 };
324
325 /** A list of MSHRs. */
326 typedef std::list<MSHR *> List;
327 /** MSHR list iterator. */
328 typedef List::iterator Iterator;
329
330 /** The pending* and post* flags are only valid if inService is
331 * true. Using the accessor functions lets us detect if these
332 * flags are accessed improperly.
333 */
334
335 /** True if we need to get a writable copy of the block. */
336 bool needsWritable() const { return targets.needsWritable; }
337
338 bool isCleaning() const {
339 PacketPtr pkt = targets.front().pkt;
340 return pkt->isClean();
341 }
342
343 bool isPendingModified() const {
344 assert(inService); return pendingModified;
345 }
346
347 bool hasPostInvalidate() const {
348 assert(inService); return postInvalidate;
349 }
350
351 bool hasPostDowngrade() const {
352 assert(inService); return postDowngrade;
353 }
354
355 bool sendPacket(BaseCache &cache);
356
357 bool allocOnFill() const {
358 return targets.allocOnFill;
359 }
360
361 /**
362 * Determine if there are non-deferred requests from other caches
363 *
364 * @return true if any of the targets is from another cache
365 */
366 bool hasFromCache() const {
367 return targets.hasFromCache;
368 }
369
370 private:
371 /**
372 * Promotes deferred targets that satisfy a predicate
373 *
374 * Deferred targets are promoted to the target list if they
375 * satisfy a given condition. The operation stops at the first
376 * deferred target that doesn't satisfy the condition.
377 *
378 * @param pred A condition on a Target
379 */
380 void promoteIf(const std::function<bool (Target &)>& pred);
381
382 /**
383 * Pointer to this MSHR on the ready list.
384 * @sa MissQueue, MSHRQueue::readyList
385 */
386 Iterator readyIter;
387
388 /**
389 * Pointer to this MSHR on the allocated list.
390 * @sa MissQueue, MSHRQueue::allocatedList
391 */
392 Iterator allocIter;
393
394 /** List of all requests that match the address */
395 TargetList targets;
396
397 TargetList deferredTargets;
398
399 public:
400 /**
401 * Check if this MSHR contains only compatible writes, and if they
402 * span the entire cache line. This is used as part of the
403 * miss-packet creation. Note that new requests may arrive after a
404 * miss-packet has been created, and for the fill we therefore use
405 * the wasWholeLineWrite field.
406 */
407 bool isWholeLineWrite() const {
408 return targets.isWholeLineWrite();
409 }
410
411 /**
412 * Allocate a miss to this MSHR.
413 * @param blk_addr The address of the block.
414 * @param blk_size The number of bytes to request.
415 * @param pkt The original miss.
416 * @param when_ready When should the MSHR be ready to act upon.
417 * @param _order The logical order of this MSHR
418 * @param alloc_on_fill Should the cache allocate a block on fill
419 */
420 void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
421 Tick when_ready, Counter _order, bool alloc_on_fill);
422
423 void markInService(bool pending_modified_resp);
424
425 void clearDownstreamPending();
426
427 /**
428 * Mark this MSHR as free.
429 */
430 void deallocate();
431
432 /**
433 * Add a request to the list of targets.
434 * @param target The target.
435 */
436 void allocateTarget(PacketPtr target, Tick when, Counter order,
437 bool alloc_on_fill);
438 bool handleSnoop(PacketPtr target, Counter order);
439
440 /** A simple constructor. */
441 MSHR();
442
443 /**
444 * Returns the current number of allocated targets.
445 * @return The current number of allocated targets.
446 */
447 int getNumTargets() const
448 { return targets.size() + deferredTargets.size(); }
449
450 /**
451 * Extracts the subset of the targets that can be serviced given a
452 * received response. This function returns the targets list
453 * unless the response is a ReadRespWithInvalidate. The
454 * ReadRespWithInvalidate is only invalidating response that its
455 * invalidation was not expected when the request (a
456 * ReadSharedReq) was sent out. For ReadRespWithInvalidate we can
457 * safely service only the first FromCPU target and all FromSnoop
458 * targets (inform all snoopers that we no longer have the block).
459 *
460 * @param pkt The response from the downstream memory
461 */
462 TargetList extractServiceableTargets(PacketPtr pkt);
463
464 /**
465 * Returns true if there are targets left.
466 * @return true if there are targets
467 */
468 bool hasTargets() const { return !targets.empty(); }
469
470 /**
471 * Returns a reference to the first target.
472 * @return A pointer to the first target.
473 */
479 Target *getTarget()
474 QueueEntry::Target *getTarget() override
480 {
481 assert(hasTargets());
482 return &targets.front();
483 }
484
485 /**
486 * Pop first target.
487 */
488 void popTarget()
489 {
490 targets.pop_front();
491 }
492
493 bool promoteDeferredTargets();
494
495 /**
496 * Promotes deferred targets that do not require writable
497 *
498 * Move targets from the deferred targets list to the target list
499 * starting from the first deferred target until the first target
500 * that is a cache maintenance operation or needs a writable copy
501 * of the block
502 */
503 void promoteReadable();
504
505 /**
506 * Promotes deferred targets that do not require writable
507 *
508 * Requests in the deferred target list are moved to the target
509 * list up until the first target that is a cache maintenance
510 * operation or needs a writable copy of the block
511 */
512 void promoteWritable();
513
514 bool trySatisfyFunctional(PacketPtr pkt);
515
516 /**
517 * Adds a delay relative to the current tick to the current MSHR
518 * @param delay_ticks the desired delay in ticks
519 */
520 void delay(Tick delay_ticks)
521 {
522 assert(readyTime <= curTick());
523 readyTime = curTick() + delay_ticks;
524 }
525
526 /**
527 * Prints the contents of this MSHR for debugging.
528 */
529 void print(std::ostream &os,
530 int verbosity = 0,
531 const std::string &prefix = "") const;
532 /**
533 * A no-args wrapper of print(std::ostream...) meant to be
534 * invoked from DPRINTFs avoiding string overheads in fast mode
535 *
536 * @return string with mshr fields + [deferred]targets
537 */
538 std::string print() const;
539};
540
541#endif // __MEM_CACHE_MSHR_HH__
475 {
476 assert(hasTargets());
477 return &targets.front();
478 }
479
480 /**
481 * Pop first target.
482 */
483 void popTarget()
484 {
485 targets.pop_front();
486 }
487
488 bool promoteDeferredTargets();
489
490 /**
491 * Promotes deferred targets that do not require writable
492 *
493 * Move targets from the deferred targets list to the target list
494 * starting from the first deferred target until the first target
495 * that is a cache maintenance operation or needs a writable copy
496 * of the block
497 */
498 void promoteReadable();
499
500 /**
501 * Promotes deferred targets that do not require writable
502 *
503 * Requests in the deferred target list are moved to the target
504 * list up until the first target that is a cache maintenance
505 * operation or needs a writable copy of the block
506 */
507 void promoteWritable();
508
509 bool trySatisfyFunctional(PacketPtr pkt);
510
511 /**
512 * Adds a delay relative to the current tick to the current MSHR
513 * @param delay_ticks the desired delay in ticks
514 */
515 void delay(Tick delay_ticks)
516 {
517 assert(readyTime <= curTick());
518 readyTime = curTick() + delay_ticks;
519 }
520
521 /**
522 * Prints the contents of this MSHR for debugging.
523 */
524 void print(std::ostream &os,
525 int verbosity = 0,
526 const std::string &prefix = "") const;
527 /**
528 * A no-args wrapper of print(std::ostream...) meant to be
529 * invoked from DPRINTFs avoiding string overheads in fast mode
530 *
531 * @return string with mshr fields + [deferred]targets
532 */
533 std::string print() const;
534};
535
536#endif // __MEM_CACHE_MSHR_HH__