mshr.cc (12823:ba630bc7a36d) mshr.cc (13349:20890038e8a0)
1/*
2 * Copyright (c) 2012-2013, 2015-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
1/*
2 * Copyright (c) 2012-2013, 2015-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nikos Nikoleris
43 */
44
45/**
46 * @file
47 * Miss Status and Handling Register (MSHR) definitions.
48 */
49
50#include "mem/cache/mshr.hh"
51
52#include <cassert>
53#include <string>
54
55#include "base/logging.hh"
56#include "base/trace.hh"
57#include "base/types.hh"
58#include "debug/Cache.hh"
59#include "mem/cache/base.hh"
60#include "mem/request.hh"
61#include "sim/core.hh"
62
63MSHR::MSHR() : downstreamPending(false),
64 pendingModified(false),
65 postInvalidate(false), postDowngrade(false),
44 */
45
46/**
47 * @file
48 * Miss Status and Handling Register (MSHR) definitions.
49 */
50
51#include "mem/cache/mshr.hh"
52
53#include <cassert>
54#include <string>
55
56#include "base/logging.hh"
57#include "base/trace.hh"
58#include "base/types.hh"
59#include "debug/Cache.hh"
60#include "mem/cache/base.hh"
61#include "mem/request.hh"
62#include "sim/core.hh"
63
64MSHR::MSHR() : downstreamPending(false),
65 pendingModified(false),
66 postInvalidate(false), postDowngrade(false),
66 isForward(false)
67 wasWholeLineWrite(false), isForward(false)
67{
68}
69
70MSHR::TargetList::TargetList()
71 : needsWritable(false), hasUpgrade(false), allocOnFill(false),
72 hasFromCache(false)
73{}
74
75
76void
77MSHR::TargetList::updateFlags(PacketPtr pkt, Target::Source source,
78 bool alloc_on_fill)
79{
80 if (source != Target::FromSnoop) {
81 if (pkt->needsWritable()) {
82 needsWritable = true;
83 }
84
85 // StoreCondReq is effectively an upgrade if it's in an MSHR
86 // since it would have been failed already if we didn't have a
87 // read-only copy
88 if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
89 hasUpgrade = true;
90 }
91
92 // potentially re-evaluate whether we should allocate on a fill or
93 // not
94 allocOnFill = allocOnFill || alloc_on_fill;
95
96 if (source != Target::FromPrefetcher) {
97 hasFromCache = hasFromCache || pkt->fromCache();
68{
69}
70
71MSHR::TargetList::TargetList()
72 : needsWritable(false), hasUpgrade(false), allocOnFill(false),
73 hasFromCache(false)
74{}
75
76
77void
78MSHR::TargetList::updateFlags(PacketPtr pkt, Target::Source source,
79 bool alloc_on_fill)
80{
81 if (source != Target::FromSnoop) {
82 if (pkt->needsWritable()) {
83 needsWritable = true;
84 }
85
86 // StoreCondReq is effectively an upgrade if it's in an MSHR
87 // since it would have been failed already if we didn't have a
88 // read-only copy
89 if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
90 hasUpgrade = true;
91 }
92
93 // potentially re-evaluate whether we should allocate on a fill or
94 // not
95 allocOnFill = allocOnFill || alloc_on_fill;
96
97 if (source != Target::FromPrefetcher) {
98 hasFromCache = hasFromCache || pkt->fromCache();
99
100 updateWriteFlags(pkt);
98 }
99 }
100}
101
102void
103MSHR::TargetList::populateFlags()
104{
105 resetFlags();
106 for (auto& t: *this) {
107 updateFlags(t.pkt, t.source, t.allocOnFill);
108 }
109}
110
111inline void
112MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
113 Counter order, Target::Source source, bool markPending,
114 bool alloc_on_fill)
115{
116 updateFlags(pkt, source, alloc_on_fill);
117 if (markPending) {
118 // Iterate over the SenderState stack and see if we find
119 // an MSHR entry. If we do, set the downstreamPending
120 // flag. Otherwise, do nothing.
121 MSHR *mshr = pkt->findNextSenderState<MSHR>();
122 if (mshr != nullptr) {
123 assert(!mshr->downstreamPending);
124 mshr->downstreamPending = true;
125 } else {
126 // No need to clear downstreamPending later
127 markPending = false;
128 }
129 }
130
131 emplace_back(pkt, readyTime, order, source, markPending, alloc_on_fill);
132}
133
134
135static void
136replaceUpgrade(PacketPtr pkt)
137{
138 // remember if the current packet has data allocated
139 bool has_data = pkt->hasData() || pkt->hasRespData();
140
141 if (pkt->cmd == MemCmd::UpgradeReq) {
142 pkt->cmd = MemCmd::ReadExReq;
143 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
144 } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
145 pkt->cmd = MemCmd::SCUpgradeFailReq;
146 DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
147 } else if (pkt->cmd == MemCmd::StoreCondReq) {
148 pkt->cmd = MemCmd::StoreCondFailReq;
149 DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n");
150 }
151
152 if (!has_data) {
153 // there is no sensible way of setting the data field if the
154 // new command actually would carry data
155 assert(!pkt->hasData());
156
157 if (pkt->hasRespData()) {
158 // we went from a packet that had no data (neither request,
159 // nor response), to one that does, and therefore we need to
160 // actually allocate space for the data payload
161 pkt->allocate();
162 }
163 }
164}
165
166
167void
168MSHR::TargetList::replaceUpgrades()
169{
170 if (!hasUpgrade)
171 return;
172
173 for (auto& t : *this) {
174 replaceUpgrade(t.pkt);
175 }
176
177 hasUpgrade = false;
178}
179
180
181void
182MSHR::TargetList::clearDownstreamPending(MSHR::TargetList::iterator begin,
183 MSHR::TargetList::iterator end)
184{
185 for (auto t = begin; t != end; t++) {
186 if (t->markedPending) {
187 // Iterate over the SenderState stack and see if we find
188 // an MSHR entry. If we find one, clear the
189 // downstreamPending flag by calling
190 // clearDownstreamPending(). This recursively clears the
191 // downstreamPending flag in all caches this packet has
192 // passed through.
193 MSHR *mshr = t->pkt->findNextSenderState<MSHR>();
194 if (mshr != nullptr) {
195 mshr->clearDownstreamPending();
196 }
197 t->markedPending = false;
198 }
199 }
200}
201
202void
203MSHR::TargetList::clearDownstreamPending()
204{
205 clearDownstreamPending(begin(), end());
206}
207
208
209bool
210MSHR::TargetList::trySatisfyFunctional(PacketPtr pkt)
211{
212 for (auto& t : *this) {
213 if (pkt->trySatisfyFunctional(t.pkt)) {
214 return true;
215 }
216 }
217
218 return false;
219}
220
221
222void
223MSHR::TargetList::print(std::ostream &os, int verbosity,
224 const std::string &prefix) const
225{
226 for (auto& t : *this) {
227 const char *s;
228 switch (t.source) {
229 case Target::FromCPU:
230 s = "FromCPU";
231 break;
232 case Target::FromSnoop:
233 s = "FromSnoop";
234 break;
235 case Target::FromPrefetcher:
236 s = "FromPrefetcher";
237 break;
238 default:
239 s = "";
240 break;
241 }
242 ccprintf(os, "%s%s: ", prefix, s);
243 t.pkt->print(os, verbosity, "");
244 ccprintf(os, "\n");
245 }
246}
247
248
249void
250MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
251 Tick when_ready, Counter _order, bool alloc_on_fill)
252{
253 blkAddr = blk_addr;
254 blkSize = blk_size;
255 isSecure = target->isSecure();
256 readyTime = when_ready;
257 order = _order;
258 assert(target);
259 isForward = false;
101 }
102 }
103}
104
105void
106MSHR::TargetList::populateFlags()
107{
108 resetFlags();
109 for (auto& t: *this) {
110 updateFlags(t.pkt, t.source, t.allocOnFill);
111 }
112}
113
114inline void
115MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
116 Counter order, Target::Source source, bool markPending,
117 bool alloc_on_fill)
118{
119 updateFlags(pkt, source, alloc_on_fill);
120 if (markPending) {
121 // Iterate over the SenderState stack and see if we find
122 // an MSHR entry. If we do, set the downstreamPending
123 // flag. Otherwise, do nothing.
124 MSHR *mshr = pkt->findNextSenderState<MSHR>();
125 if (mshr != nullptr) {
126 assert(!mshr->downstreamPending);
127 mshr->downstreamPending = true;
128 } else {
129 // No need to clear downstreamPending later
130 markPending = false;
131 }
132 }
133
134 emplace_back(pkt, readyTime, order, source, markPending, alloc_on_fill);
135}
136
137
138static void
139replaceUpgrade(PacketPtr pkt)
140{
141 // remember if the current packet has data allocated
142 bool has_data = pkt->hasData() || pkt->hasRespData();
143
144 if (pkt->cmd == MemCmd::UpgradeReq) {
145 pkt->cmd = MemCmd::ReadExReq;
146 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
147 } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
148 pkt->cmd = MemCmd::SCUpgradeFailReq;
149 DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
150 } else if (pkt->cmd == MemCmd::StoreCondReq) {
151 pkt->cmd = MemCmd::StoreCondFailReq;
152 DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n");
153 }
154
155 if (!has_data) {
156 // there is no sensible way of setting the data field if the
157 // new command actually would carry data
158 assert(!pkt->hasData());
159
160 if (pkt->hasRespData()) {
161 // we went from a packet that had no data (neither request,
162 // nor response), to one that does, and therefore we need to
163 // actually allocate space for the data payload
164 pkt->allocate();
165 }
166 }
167}
168
169
170void
171MSHR::TargetList::replaceUpgrades()
172{
173 if (!hasUpgrade)
174 return;
175
176 for (auto& t : *this) {
177 replaceUpgrade(t.pkt);
178 }
179
180 hasUpgrade = false;
181}
182
183
184void
185MSHR::TargetList::clearDownstreamPending(MSHR::TargetList::iterator begin,
186 MSHR::TargetList::iterator end)
187{
188 for (auto t = begin; t != end; t++) {
189 if (t->markedPending) {
190 // Iterate over the SenderState stack and see if we find
191 // an MSHR entry. If we find one, clear the
192 // downstreamPending flag by calling
193 // clearDownstreamPending(). This recursively clears the
194 // downstreamPending flag in all caches this packet has
195 // passed through.
196 MSHR *mshr = t->pkt->findNextSenderState<MSHR>();
197 if (mshr != nullptr) {
198 mshr->clearDownstreamPending();
199 }
200 t->markedPending = false;
201 }
202 }
203}
204
205void
206MSHR::TargetList::clearDownstreamPending()
207{
208 clearDownstreamPending(begin(), end());
209}
210
211
212bool
213MSHR::TargetList::trySatisfyFunctional(PacketPtr pkt)
214{
215 for (auto& t : *this) {
216 if (pkt->trySatisfyFunctional(t.pkt)) {
217 return true;
218 }
219 }
220
221 return false;
222}
223
224
225void
226MSHR::TargetList::print(std::ostream &os, int verbosity,
227 const std::string &prefix) const
228{
229 for (auto& t : *this) {
230 const char *s;
231 switch (t.source) {
232 case Target::FromCPU:
233 s = "FromCPU";
234 break;
235 case Target::FromSnoop:
236 s = "FromSnoop";
237 break;
238 case Target::FromPrefetcher:
239 s = "FromPrefetcher";
240 break;
241 default:
242 s = "";
243 break;
244 }
245 ccprintf(os, "%s%s: ", prefix, s);
246 t.pkt->print(os, verbosity, "");
247 ccprintf(os, "\n");
248 }
249}
250
251
252void
253MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
254 Tick when_ready, Counter _order, bool alloc_on_fill)
255{
256 blkAddr = blk_addr;
257 blkSize = blk_size;
258 isSecure = target->isSecure();
259 readyTime = when_ready;
260 order = _order;
261 assert(target);
262 isForward = false;
263 wasWholeLineWrite = false;
260 _isUncacheable = target->req->isUncacheable();
261 inService = false;
262 downstreamPending = false;
264 _isUncacheable = target->req->isUncacheable();
265 inService = false;
266 downstreamPending = false;
263 assert(targets.isReset());
267
268 targets.init(blkAddr, blkSize);
269 deferredTargets.init(blkAddr, blkSize);
270
264 // Don't know of a case where we would allocate a new MSHR for a
265 // snoop (mem-side request), so set source according to request here
266 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
267 Target::FromPrefetcher : Target::FromCPU;
268 targets.add(target, when_ready, _order, source, true, alloc_on_fill);
271 // Don't know of a case where we would allocate a new MSHR for a
272 // snoop (mem-side request), so set source according to request here
273 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
274 Target::FromPrefetcher : Target::FromCPU;
275 targets.add(target, when_ready, _order, source, true, alloc_on_fill);
269 assert(deferredTargets.isReset());
270}
271
272
273void
274MSHR::clearDownstreamPending()
275{
276 assert(downstreamPending);
277 downstreamPending = false;
278 // recursively clear flag on any MSHRs we will be forwarding
279 // responses to
280 targets.clearDownstreamPending();
281}
282
283void
284MSHR::markInService(bool pending_modified_resp)
285{
286 assert(!inService);
287
288 inService = true;
289 pendingModified = targets.needsWritable || pending_modified_resp;
290 postInvalidate = postDowngrade = false;
291
292 if (!downstreamPending) {
293 // let upstream caches know that the request has made it to a
294 // level where it's going to get a response
295 targets.clearDownstreamPending();
296 }
276}
277
278
279void
280MSHR::clearDownstreamPending()
281{
282 assert(downstreamPending);
283 downstreamPending = false;
284 // recursively clear flag on any MSHRs we will be forwarding
285 // responses to
286 targets.clearDownstreamPending();
287}
288
289void
290MSHR::markInService(bool pending_modified_resp)
291{
292 assert(!inService);
293
294 inService = true;
295 pendingModified = targets.needsWritable || pending_modified_resp;
296 postInvalidate = postDowngrade = false;
297
298 if (!downstreamPending) {
299 // let upstream caches know that the request has made it to a
300 // level where it's going to get a response
301 targets.clearDownstreamPending();
302 }
303 // if the line is not considered a whole-line write when sent
304 // downstream, make sure it is also not considered a whole-line
305 // write when receiving the response, and vice versa
306 wasWholeLineWrite = isWholeLineWrite();
297}
298
299
300void
301MSHR::deallocate()
302{
303 assert(targets.empty());
304 targets.resetFlags();
305 assert(deferredTargets.isReset());
306 inService = false;
307}
308
309/*
310 * Adds a target to an MSHR
311 */
312void
313MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order,
314 bool alloc_on_fill)
315{
316 // assume we'd never issue a prefetch when we've got an
317 // outstanding miss
318 assert(pkt->cmd != MemCmd::HardPFReq);
319
320 // if there's a request already in service for this MSHR, we will
321 // have to defer the new target until after the response if any of
322 // the following are true:
323 // - there are other targets already deferred
324 // - there's a pending invalidate to be applied after the response
325 // comes back (but before this target is processed)
326 // - the MSHR's first (and only) non-deferred target is a cache
327 // maintenance packet
328 // - the new target is a cache maintenance packet (this is probably
329 // overly conservative but certainly safe)
330 // - this target requires a writable block and either we're not
331 // getting a writable block back or we have already snooped
332 // another read request that will downgrade our writable block
333 // to non-writable (Shared or Owned)
334 PacketPtr tgt_pkt = targets.front().pkt;
335 if (pkt->req->isCacheMaintenance() ||
336 tgt_pkt->req->isCacheMaintenance() ||
337 !deferredTargets.empty() ||
338 (inService &&
339 (hasPostInvalidate() ||
340 (pkt->needsWritable() &&
341 (!isPendingModified() || hasPostDowngrade() || isForward))))) {
342 // need to put on deferred list
343 if (inService && hasPostInvalidate())
344 replaceUpgrade(pkt);
345 deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true,
346 alloc_on_fill);
347 } else {
348 // No request outstanding, or still OK to append to
349 // outstanding request: append to regular target list. Only
350 // mark pending if current request hasn't been issued yet
351 // (isn't in service).
352 targets.add(pkt, whenReady, _order, Target::FromCPU, !inService,
353 alloc_on_fill);
354 }
355}
356
357bool
358MSHR::handleSnoop(PacketPtr pkt, Counter _order)
359{
360 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
361
362 // when we snoop packets the needsWritable and isInvalidate flags
363 // should always be the same, however, this assumes that we never
364 // snoop writes as they are currently not marked as invalidations
365 panic_if((pkt->needsWritable() != pkt->isInvalidate()) &&
366 !pkt->req->isCacheMaintenance(),
367 "%s got snoop %s where needsWritable, "
368 "does not match isInvalidate", name(), pkt->print());
369
370 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
371 // Request has not been issued yet, or it's been issued
372 // locally but is buffered unissued at some downstream cache
373 // which is forwarding us this snoop. Either way, the packet
374 // we're snooping logically precedes this MSHR's request, so
375 // the snoop has no impact on the MSHR, but must be processed
376 // in the standard way by the cache. The only exception is
377 // that if we're an L2+ cache buffering an UpgradeReq from a
378 // higher-level cache, and the snoop is invalidating, then our
379 // buffered upgrades must be converted to read exclusives,
380 // since the upper-level cache no longer has a valid copy.
381 // That is, even though the upper-level cache got out on its
382 // local bus first, some other invalidating transaction
383 // reached the global bus before the upgrade did.
384 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
385 targets.replaceUpgrades();
386 deferredTargets.replaceUpgrades();
387 }
388
389 return false;
390 }
391
392 // From here on down, the request issued by this MSHR logically
393 // precedes the request we're snooping.
394 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
395 // snooped request still precedes the re-request we'll have to
396 // issue for deferred targets, if any...
397 deferredTargets.replaceUpgrades();
398 }
399
400 PacketPtr tgt_pkt = targets.front().pkt;
401 if (hasPostInvalidate() || tgt_pkt->req->isCacheInvalidate()) {
402 // a prior snoop has already appended an invalidation or a
403 // cache invalidation operation is in progress, so logically
404 // we don't have the block anymore; no need for further
405 // snooping.
406 return true;
407 }
408
409 if (isPendingModified() || pkt->isInvalidate()) {
410 // We need to save and replay the packet in two cases:
411 // 1. We're awaiting a writable copy (Modified or Exclusive),
412 // so this MSHR is the orgering point, and we need to respond
413 // after we receive data.
414 // 2. It's an invalidation (e.g., UpgradeReq), and we need
415 // to forward the snoop up the hierarchy after the current
416 // transaction completes.
417
418 // Start by determining if we will eventually respond or not,
419 // matching the conditions checked in Cache::handleSnoop
420 bool will_respond = isPendingModified() && pkt->needsResponse() &&
421 !pkt->isClean();
422
423 // The packet we are snooping may be deleted by the time we
424 // actually process the target, and we consequently need to
425 // save a copy here. Clear flags and also allocate new data as
426 // the original packet data storage may have been deleted by
427 // the time we get to process this packet. In the cases where
428 // we are not responding after handling the snoop we also need
429 // to create a copy of the request to be on the safe side. In
430 // the latter case the cache is responsible for deleting both
431 // the packet and the request as part of handling the deferred
432 // snoop.
433 PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
434 new Packet(std::make_shared<Request>(*pkt->req), pkt->cmd,
435 blkSize, pkt->id);
436
437 if (will_respond) {
438 // we are the ordering point, and will consequently
439 // respond, and depending on whether the packet
440 // needsWritable or not we either pass a Shared line or a
441 // Modified line
442 pkt->setCacheResponding();
443
444 // inform the cache hierarchy that this cache had the line
445 // in the Modified state, even if the response is passed
446 // as Shared (and thus non-writable)
447 pkt->setResponderHadWritable();
448
449 // in the case of an uncacheable request there is no need
450 // to set the responderHadWritable flag, but since the
451 // recipient does not care there is no harm in doing so
452 }
453 targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
454 downstreamPending && targets.needsWritable, false);
455
456 if (pkt->needsWritable() || pkt->isInvalidate()) {
457 // This transaction will take away our pending copy
458 postInvalidate = true;
459 }
460
461 if (isPendingModified() && pkt->isClean()) {
462 pkt->setSatisfied();
463 }
464 }
465
466 if (!pkt->needsWritable() && !pkt->req->isUncacheable()) {
467 // This transaction will get a read-shared copy, downgrading
468 // our copy if we had a writable one
469 postDowngrade = true;
470 // make sure that any downstream cache does not respond with a
471 // writable (and dirty) copy even if it has one, unless it was
472 // explicitly asked for one
473 pkt->setHasSharers();
474 }
475
476 return true;
477}
478
479MSHR::TargetList
480MSHR::extractServiceableTargets(PacketPtr pkt)
481{
482 TargetList ready_targets;
307}
308
309
310void
311MSHR::deallocate()
312{
313 assert(targets.empty());
314 targets.resetFlags();
315 assert(deferredTargets.isReset());
316 inService = false;
317}
318
319/*
320 * Adds a target to an MSHR
321 */
322void
323MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order,
324 bool alloc_on_fill)
325{
326 // assume we'd never issue a prefetch when we've got an
327 // outstanding miss
328 assert(pkt->cmd != MemCmd::HardPFReq);
329
330 // if there's a request already in service for this MSHR, we will
331 // have to defer the new target until after the response if any of
332 // the following are true:
333 // - there are other targets already deferred
334 // - there's a pending invalidate to be applied after the response
335 // comes back (but before this target is processed)
336 // - the MSHR's first (and only) non-deferred target is a cache
337 // maintenance packet
338 // - the new target is a cache maintenance packet (this is probably
339 // overly conservative but certainly safe)
340 // - this target requires a writable block and either we're not
341 // getting a writable block back or we have already snooped
342 // another read request that will downgrade our writable block
343 // to non-writable (Shared or Owned)
344 PacketPtr tgt_pkt = targets.front().pkt;
345 if (pkt->req->isCacheMaintenance() ||
346 tgt_pkt->req->isCacheMaintenance() ||
347 !deferredTargets.empty() ||
348 (inService &&
349 (hasPostInvalidate() ||
350 (pkt->needsWritable() &&
351 (!isPendingModified() || hasPostDowngrade() || isForward))))) {
352 // need to put on deferred list
353 if (inService && hasPostInvalidate())
354 replaceUpgrade(pkt);
355 deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true,
356 alloc_on_fill);
357 } else {
358 // No request outstanding, or still OK to append to
359 // outstanding request: append to regular target list. Only
360 // mark pending if current request hasn't been issued yet
361 // (isn't in service).
362 targets.add(pkt, whenReady, _order, Target::FromCPU, !inService,
363 alloc_on_fill);
364 }
365}
366
367bool
368MSHR::handleSnoop(PacketPtr pkt, Counter _order)
369{
370 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
371
372 // when we snoop packets the needsWritable and isInvalidate flags
373 // should always be the same, however, this assumes that we never
374 // snoop writes as they are currently not marked as invalidations
375 panic_if((pkt->needsWritable() != pkt->isInvalidate()) &&
376 !pkt->req->isCacheMaintenance(),
377 "%s got snoop %s where needsWritable, "
378 "does not match isInvalidate", name(), pkt->print());
379
380 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
381 // Request has not been issued yet, or it's been issued
382 // locally but is buffered unissued at some downstream cache
383 // which is forwarding us this snoop. Either way, the packet
384 // we're snooping logically precedes this MSHR's request, so
385 // the snoop has no impact on the MSHR, but must be processed
386 // in the standard way by the cache. The only exception is
387 // that if we're an L2+ cache buffering an UpgradeReq from a
388 // higher-level cache, and the snoop is invalidating, then our
389 // buffered upgrades must be converted to read exclusives,
390 // since the upper-level cache no longer has a valid copy.
391 // That is, even though the upper-level cache got out on its
392 // local bus first, some other invalidating transaction
393 // reached the global bus before the upgrade did.
394 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
395 targets.replaceUpgrades();
396 deferredTargets.replaceUpgrades();
397 }
398
399 return false;
400 }
401
402 // From here on down, the request issued by this MSHR logically
403 // precedes the request we're snooping.
404 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
405 // snooped request still precedes the re-request we'll have to
406 // issue for deferred targets, if any...
407 deferredTargets.replaceUpgrades();
408 }
409
410 PacketPtr tgt_pkt = targets.front().pkt;
411 if (hasPostInvalidate() || tgt_pkt->req->isCacheInvalidate()) {
412 // a prior snoop has already appended an invalidation or a
413 // cache invalidation operation is in progress, so logically
414 // we don't have the block anymore; no need for further
415 // snooping.
416 return true;
417 }
418
419 if (isPendingModified() || pkt->isInvalidate()) {
420 // We need to save and replay the packet in two cases:
421 // 1. We're awaiting a writable copy (Modified or Exclusive),
422 // so this MSHR is the orgering point, and we need to respond
423 // after we receive data.
424 // 2. It's an invalidation (e.g., UpgradeReq), and we need
425 // to forward the snoop up the hierarchy after the current
426 // transaction completes.
427
428 // Start by determining if we will eventually respond or not,
429 // matching the conditions checked in Cache::handleSnoop
430 bool will_respond = isPendingModified() && pkt->needsResponse() &&
431 !pkt->isClean();
432
433 // The packet we are snooping may be deleted by the time we
434 // actually process the target, and we consequently need to
435 // save a copy here. Clear flags and also allocate new data as
436 // the original packet data storage may have been deleted by
437 // the time we get to process this packet. In the cases where
438 // we are not responding after handling the snoop we also need
439 // to create a copy of the request to be on the safe side. In
440 // the latter case the cache is responsible for deleting both
441 // the packet and the request as part of handling the deferred
442 // snoop.
443 PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
444 new Packet(std::make_shared<Request>(*pkt->req), pkt->cmd,
445 blkSize, pkt->id);
446
447 if (will_respond) {
448 // we are the ordering point, and will consequently
449 // respond, and depending on whether the packet
450 // needsWritable or not we either pass a Shared line or a
451 // Modified line
452 pkt->setCacheResponding();
453
454 // inform the cache hierarchy that this cache had the line
455 // in the Modified state, even if the response is passed
456 // as Shared (and thus non-writable)
457 pkt->setResponderHadWritable();
458
459 // in the case of an uncacheable request there is no need
460 // to set the responderHadWritable flag, but since the
461 // recipient does not care there is no harm in doing so
462 }
463 targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
464 downstreamPending && targets.needsWritable, false);
465
466 if (pkt->needsWritable() || pkt->isInvalidate()) {
467 // This transaction will take away our pending copy
468 postInvalidate = true;
469 }
470
471 if (isPendingModified() && pkt->isClean()) {
472 pkt->setSatisfied();
473 }
474 }
475
476 if (!pkt->needsWritable() && !pkt->req->isUncacheable()) {
477 // This transaction will get a read-shared copy, downgrading
478 // our copy if we had a writable one
479 postDowngrade = true;
480 // make sure that any downstream cache does not respond with a
481 // writable (and dirty) copy even if it has one, unless it was
482 // explicitly asked for one
483 pkt->setHasSharers();
484 }
485
486 return true;
487}
488
489MSHR::TargetList
490MSHR::extractServiceableTargets(PacketPtr pkt)
491{
492 TargetList ready_targets;
493 ready_targets.init(blkAddr, blkSize);
483 // If the downstream MSHR got an invalidation request then we only
484 // service the first of the FromCPU targets and any other
485 // non-FromCPU target. This way the remaining FromCPU targets
486 // issue a new request and get a fresh copy of the block and we
487 // avoid memory consistency violations.
488 if (pkt->cmd == MemCmd::ReadRespWithInvalidate) {
489 auto it = targets.begin();
490 assert((it->source == Target::FromCPU) ||
491 (it->source == Target::FromPrefetcher));
492 ready_targets.push_back(*it);
493 it = targets.erase(it);
494 while (it != targets.end()) {
495 if (it->source == Target::FromCPU) {
496 it++;
497 } else {
498 assert(it->source == Target::FromSnoop);
499 ready_targets.push_back(*it);
500 it = targets.erase(it);
501 }
502 }
503 ready_targets.populateFlags();
504 } else {
505 std::swap(ready_targets, targets);
506 }
507 targets.populateFlags();
508
509 return ready_targets;
510}
511
512bool
513MSHR::promoteDeferredTargets()
514{
515 if (targets.empty() && deferredTargets.empty()) {
516 // nothing to promote
517 return false;
518 }
519
520 // the deferred targets can be generally promoted unless they
521 // contain a cache maintenance request
522
523 // find the first target that is a cache maintenance request
524 auto it = std::find_if(deferredTargets.begin(), deferredTargets.end(),
525 [](MSHR::Target &t) {
526 return t.pkt->req->isCacheMaintenance();
527 });
528 if (it == deferredTargets.begin()) {
529 // if the first deferred target is a cache maintenance packet
530 // then we can promote provided the targets list is empty and
531 // we can service it on its own
532 if (targets.empty()) {
533 targets.splice(targets.end(), deferredTargets, it);
534 }
535 } else {
536 // if a cache maintenance operation exists, we promote all the
537 // deferred targets that precede it, or all deferred targets
538 // otherwise
539 targets.splice(targets.end(), deferredTargets,
540 deferredTargets.begin(), it);
541 }
542
543 deferredTargets.populateFlags();
544 targets.populateFlags();
545 order = targets.front().order;
546 readyTime = std::max(curTick(), targets.front().readyTime);
547
548 return true;
549}
550
551void
552MSHR::promoteIf(const std::function<bool (Target &)>& pred)
553{
554 // if any of the deferred targets were upper-level cache
555 // requests marked downstreamPending, need to clear that
556 assert(!downstreamPending); // not pending here anymore
557
558 // find the first target does not satisfy the condition
559 auto last_it = std::find_if_not(deferredTargets.begin(),
560 deferredTargets.end(),
561 pred);
562
563 // for the prefix of the deferredTargets [begin(), last_it) clear
564 // the downstreamPending flag and move them to the target list
565 deferredTargets.clearDownstreamPending(deferredTargets.begin(),
566 last_it);
567 targets.splice(targets.end(), deferredTargets,
568 deferredTargets.begin(), last_it);
569 // We need to update the flags for the target lists after the
570 // modifications
571 deferredTargets.populateFlags();
572}
573
574void
575MSHR::promoteReadable()
576{
577 if (!deferredTargets.empty() && !hasPostInvalidate()) {
578 // We got a non invalidating response, and we have the block
579 // but we have deferred targets which are waiting and they do
580 // not need writable. This can happen if the original request
581 // was for a cache clean operation and we had a copy of the
582 // block. Since we serviced the cache clean operation and we
583 // have the block, there's no need to defer the targets, so
584 // move them up to the regular target list.
585
586 auto pred = [](Target &t) {
587 assert(t.source == Target::FromCPU);
588 return !t.pkt->req->isCacheInvalidate() &&
589 !t.pkt->needsWritable();
590 };
591 promoteIf(pred);
592 }
593}
594
595void
596MSHR::promoteWritable()
597{
598 if (deferredTargets.needsWritable &&
599 !(hasPostInvalidate() || hasPostDowngrade())) {
600 // We got a writable response, but we have deferred targets
601 // which are waiting to request a writable copy (not because
602 // of a pending invalidate). This can happen if the original
603 // request was for a read-only block, but we got a writable
604 // response anyway. Since we got the writable copy there's no
605 // need to defer the targets, so move them up to the regular
606 // target list.
607 assert(!targets.needsWritable);
608 targets.needsWritable = true;
609
610 auto pred = [](Target &t) {
611 assert(t.source == Target::FromCPU);
612 return !t.pkt->req->isCacheInvalidate();
613 };
614
615 promoteIf(pred);
616 }
617}
618
619
620bool
621MSHR::trySatisfyFunctional(PacketPtr pkt)
622{
623 // For printing, we treat the MSHR as a whole as single entity.
624 // For other requests, we iterate over the individual targets
625 // since that's where the actual data lies.
626 if (pkt->isPrint()) {
627 pkt->trySatisfyFunctional(this, blkAddr, isSecure, blkSize, nullptr);
628 return false;
629 } else {
630 return (targets.trySatisfyFunctional(pkt) ||
631 deferredTargets.trySatisfyFunctional(pkt));
632 }
633}
634
635bool
636MSHR::sendPacket(BaseCache &cache)
637{
638 return cache.sendMSHRQueuePacket(this);
639}
640
641void
642MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
643{
644 ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s %s\n",
645 prefix, blkAddr, blkAddr + blkSize - 1,
646 isSecure ? "s" : "ns",
647 isForward ? "Forward" : "",
648 allocOnFill() ? "AllocOnFill" : "",
649 needsWritable() ? "Wrtbl" : "",
650 _isUncacheable ? "Unc" : "",
651 inService ? "InSvc" : "",
652 downstreamPending ? "DwnPend" : "",
653 postInvalidate ? "PostInv" : "",
654 postDowngrade ? "PostDowngr" : "",
655 hasFromCache() ? "HasFromCache" : "");
656
657 if (!targets.empty()) {
658 ccprintf(os, "%s Targets:\n", prefix);
659 targets.print(os, verbosity, prefix + " ");
660 }
661 if (!deferredTargets.empty()) {
662 ccprintf(os, "%s Deferred Targets:\n", prefix);
663 deferredTargets.print(os, verbosity, prefix + " ");
664 }
665}
666
667std::string
668MSHR::print() const
669{
670 std::ostringstream str;
671 print(str);
672 return str.str();
673}
494 // If the downstream MSHR got an invalidation request then we only
495 // service the first of the FromCPU targets and any other
496 // non-FromCPU target. This way the remaining FromCPU targets
497 // issue a new request and get a fresh copy of the block and we
498 // avoid memory consistency violations.
499 if (pkt->cmd == MemCmd::ReadRespWithInvalidate) {
500 auto it = targets.begin();
501 assert((it->source == Target::FromCPU) ||
502 (it->source == Target::FromPrefetcher));
503 ready_targets.push_back(*it);
504 it = targets.erase(it);
505 while (it != targets.end()) {
506 if (it->source == Target::FromCPU) {
507 it++;
508 } else {
509 assert(it->source == Target::FromSnoop);
510 ready_targets.push_back(*it);
511 it = targets.erase(it);
512 }
513 }
514 ready_targets.populateFlags();
515 } else {
516 std::swap(ready_targets, targets);
517 }
518 targets.populateFlags();
519
520 return ready_targets;
521}
522
523bool
524MSHR::promoteDeferredTargets()
525{
526 if (targets.empty() && deferredTargets.empty()) {
527 // nothing to promote
528 return false;
529 }
530
531 // the deferred targets can be generally promoted unless they
532 // contain a cache maintenance request
533
534 // find the first target that is a cache maintenance request
535 auto it = std::find_if(deferredTargets.begin(), deferredTargets.end(),
536 [](MSHR::Target &t) {
537 return t.pkt->req->isCacheMaintenance();
538 });
539 if (it == deferredTargets.begin()) {
540 // if the first deferred target is a cache maintenance packet
541 // then we can promote provided the targets list is empty and
542 // we can service it on its own
543 if (targets.empty()) {
544 targets.splice(targets.end(), deferredTargets, it);
545 }
546 } else {
547 // if a cache maintenance operation exists, we promote all the
548 // deferred targets that precede it, or all deferred targets
549 // otherwise
550 targets.splice(targets.end(), deferredTargets,
551 deferredTargets.begin(), it);
552 }
553
554 deferredTargets.populateFlags();
555 targets.populateFlags();
556 order = targets.front().order;
557 readyTime = std::max(curTick(), targets.front().readyTime);
558
559 return true;
560}
561
562void
563MSHR::promoteIf(const std::function<bool (Target &)>& pred)
564{
565 // if any of the deferred targets were upper-level cache
566 // requests marked downstreamPending, need to clear that
567 assert(!downstreamPending); // not pending here anymore
568
569 // find the first target does not satisfy the condition
570 auto last_it = std::find_if_not(deferredTargets.begin(),
571 deferredTargets.end(),
572 pred);
573
574 // for the prefix of the deferredTargets [begin(), last_it) clear
575 // the downstreamPending flag and move them to the target list
576 deferredTargets.clearDownstreamPending(deferredTargets.begin(),
577 last_it);
578 targets.splice(targets.end(), deferredTargets,
579 deferredTargets.begin(), last_it);
580 // We need to update the flags for the target lists after the
581 // modifications
582 deferredTargets.populateFlags();
583}
584
585void
586MSHR::promoteReadable()
587{
588 if (!deferredTargets.empty() && !hasPostInvalidate()) {
589 // We got a non invalidating response, and we have the block
590 // but we have deferred targets which are waiting and they do
591 // not need writable. This can happen if the original request
592 // was for a cache clean operation and we had a copy of the
593 // block. Since we serviced the cache clean operation and we
594 // have the block, there's no need to defer the targets, so
595 // move them up to the regular target list.
596
597 auto pred = [](Target &t) {
598 assert(t.source == Target::FromCPU);
599 return !t.pkt->req->isCacheInvalidate() &&
600 !t.pkt->needsWritable();
601 };
602 promoteIf(pred);
603 }
604}
605
606void
607MSHR::promoteWritable()
608{
609 if (deferredTargets.needsWritable &&
610 !(hasPostInvalidate() || hasPostDowngrade())) {
611 // We got a writable response, but we have deferred targets
612 // which are waiting to request a writable copy (not because
613 // of a pending invalidate). This can happen if the original
614 // request was for a read-only block, but we got a writable
615 // response anyway. Since we got the writable copy there's no
616 // need to defer the targets, so move them up to the regular
617 // target list.
618 assert(!targets.needsWritable);
619 targets.needsWritable = true;
620
621 auto pred = [](Target &t) {
622 assert(t.source == Target::FromCPU);
623 return !t.pkt->req->isCacheInvalidate();
624 };
625
626 promoteIf(pred);
627 }
628}
629
630
631bool
632MSHR::trySatisfyFunctional(PacketPtr pkt)
633{
634 // For printing, we treat the MSHR as a whole as single entity.
635 // For other requests, we iterate over the individual targets
636 // since that's where the actual data lies.
637 if (pkt->isPrint()) {
638 pkt->trySatisfyFunctional(this, blkAddr, isSecure, blkSize, nullptr);
639 return false;
640 } else {
641 return (targets.trySatisfyFunctional(pkt) ||
642 deferredTargets.trySatisfyFunctional(pkt));
643 }
644}
645
646bool
647MSHR::sendPacket(BaseCache &cache)
648{
649 return cache.sendMSHRQueuePacket(this);
650}
651
652void
653MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
654{
655 ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s %s\n",
656 prefix, blkAddr, blkAddr + blkSize - 1,
657 isSecure ? "s" : "ns",
658 isForward ? "Forward" : "",
659 allocOnFill() ? "AllocOnFill" : "",
660 needsWritable() ? "Wrtbl" : "",
661 _isUncacheable ? "Unc" : "",
662 inService ? "InSvc" : "",
663 downstreamPending ? "DwnPend" : "",
664 postInvalidate ? "PostInv" : "",
665 postDowngrade ? "PostDowngr" : "",
666 hasFromCache() ? "HasFromCache" : "");
667
668 if (!targets.empty()) {
669 ccprintf(os, "%s Targets:\n", prefix);
670 targets.print(os, verbosity, prefix + " ");
671 }
672 if (!deferredTargets.empty()) {
673 ccprintf(os, "%s Deferred Targets:\n", prefix);
674 deferredTargets.print(os, verbosity, prefix + " ");
675 }
676}
677
678std::string
679MSHR::print() const
680{
681 std::ostringstream str;
682 print(str);
683 return str.str();
684}