mshr.cc (12715:0c8b4f376378) mshr.cc (12724:4f6fac3191d2)
1/*
2 * Copyright (c) 2012-2013, 2015-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 */
44
45/**
46 * @file
47 * Miss Status and Handling Register (MSHR) definitions.
48 */
49
50#include "mem/cache/mshr.hh"
51
52#include <algorithm>
53#include <cassert>
54#include <string>
55#include <vector>
56
57#include "base/logging.hh"
58#include "base/types.hh"
59#include "debug/Cache.hh"
60#include "mem/cache/cache.hh"
61#include "sim/core.hh"
62
63MSHR::MSHR() : downstreamPending(false),
64 pendingModified(false),
65 postInvalidate(false), postDowngrade(false),
66 isForward(false)
67{
68}
69
70MSHR::TargetList::TargetList()
71 : needsWritable(false), hasUpgrade(false), allocOnFill(false),
72 hasFromCache(false)
73{}
74
75
76void
77MSHR::TargetList::updateFlags(PacketPtr pkt, Target::Source source,
78 bool alloc_on_fill)
79{
80 if (source != Target::FromSnoop) {
81 if (pkt->needsWritable()) {
82 needsWritable = true;
83 }
84
85 // StoreCondReq is effectively an upgrade if it's in an MSHR
86 // since it would have been failed already if we didn't have a
87 // read-only copy
88 if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
89 hasUpgrade = true;
90 }
91
92 // potentially re-evaluate whether we should allocate on a fill or
93 // not
94 allocOnFill = allocOnFill || alloc_on_fill;
95
96 if (source != Target::FromPrefetcher) {
97 hasFromCache = hasFromCache || pkt->fromCache();
98 }
99 }
100}
101
102void
103MSHR::TargetList::populateFlags()
104{
105 resetFlags();
106 for (auto& t: *this) {
107 updateFlags(t.pkt, t.source, t.allocOnFill);
108 }
109}
110
111inline void
112MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
113 Counter order, Target::Source source, bool markPending,
114 bool alloc_on_fill)
115{
116 updateFlags(pkt, source, alloc_on_fill);
117 if (markPending) {
118 // Iterate over the SenderState stack and see if we find
119 // an MSHR entry. If we do, set the downstreamPending
120 // flag. Otherwise, do nothing.
121 MSHR *mshr = pkt->findNextSenderState<MSHR>();
122 if (mshr != nullptr) {
123 assert(!mshr->downstreamPending);
124 mshr->downstreamPending = true;
125 } else {
126 // No need to clear downstreamPending later
127 markPending = false;
128 }
129 }
130
131 emplace_back(pkt, readyTime, order, source, markPending, alloc_on_fill);
132}
133
134
135static void
136replaceUpgrade(PacketPtr pkt)
137{
138 // remember if the current packet has data allocated
139 bool has_data = pkt->hasData() || pkt->hasRespData();
140
141 if (pkt->cmd == MemCmd::UpgradeReq) {
142 pkt->cmd = MemCmd::ReadExReq;
143 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
144 } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
145 pkt->cmd = MemCmd::SCUpgradeFailReq;
146 DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
147 } else if (pkt->cmd == MemCmd::StoreCondReq) {
148 pkt->cmd = MemCmd::StoreCondFailReq;
149 DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n");
150 }
151
152 if (!has_data) {
153 // there is no sensible way of setting the data field if the
154 // new command actually would carry data
155 assert(!pkt->hasData());
156
157 if (pkt->hasRespData()) {
158 // we went from a packet that had no data (neither request,
159 // nor response), to one that does, and therefore we need to
160 // actually allocate space for the data payload
161 pkt->allocate();
162 }
163 }
164}
165
166
167void
168MSHR::TargetList::replaceUpgrades()
169{
170 if (!hasUpgrade)
171 return;
172
173 for (auto& t : *this) {
174 replaceUpgrade(t.pkt);
175 }
176
177 hasUpgrade = false;
178}
179
180
181void
182MSHR::TargetList::clearDownstreamPending()
183{
184 for (auto& t : *this) {
185 if (t.markedPending) {
186 // Iterate over the SenderState stack and see if we find
187 // an MSHR entry. If we find one, clear the
188 // downstreamPending flag by calling
189 // clearDownstreamPending(). This recursively clears the
190 // downstreamPending flag in all caches this packet has
191 // passed through.
192 MSHR *mshr = t.pkt->findNextSenderState<MSHR>();
193 if (mshr != nullptr) {
194 mshr->clearDownstreamPending();
195 }
196 t.markedPending = false;
197 }
198 }
199}
200
201
202bool
203MSHR::TargetList::checkFunctional(PacketPtr pkt)
204{
205 for (auto& t : *this) {
206 if (pkt->checkFunctional(t.pkt)) {
207 return true;
208 }
209 }
210
211 return false;
212}
213
214
215void
216MSHR::TargetList::print(std::ostream &os, int verbosity,
217 const std::string &prefix) const
218{
219 for (auto& t : *this) {
220 const char *s;
221 switch (t.source) {
222 case Target::FromCPU:
223 s = "FromCPU";
224 break;
225 case Target::FromSnoop:
226 s = "FromSnoop";
227 break;
228 case Target::FromPrefetcher:
229 s = "FromPrefetcher";
230 break;
231 default:
232 s = "";
233 break;
234 }
235 ccprintf(os, "%s%s: ", prefix, s);
236 t.pkt->print(os, verbosity, "");
237 ccprintf(os, "\n");
238 }
239}
240
241
242void
243MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
244 Tick when_ready, Counter _order, bool alloc_on_fill)
245{
246 blkAddr = blk_addr;
247 blkSize = blk_size;
248 isSecure = target->isSecure();
249 readyTime = when_ready;
250 order = _order;
251 assert(target);
252 isForward = false;
253 _isUncacheable = target->req->isUncacheable();
254 inService = false;
255 downstreamPending = false;
256 assert(targets.isReset());
257 // Don't know of a case where we would allocate a new MSHR for a
258 // snoop (mem-side request), so set source according to request here
259 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
260 Target::FromPrefetcher : Target::FromCPU;
261 targets.add(target, when_ready, _order, source, true, alloc_on_fill);
262 assert(deferredTargets.isReset());
263}
264
265
266void
267MSHR::clearDownstreamPending()
268{
269 assert(downstreamPending);
270 downstreamPending = false;
271 // recursively clear flag on any MSHRs we will be forwarding
272 // responses to
273 targets.clearDownstreamPending();
274}
275
276void
277MSHR::markInService(bool pending_modified_resp)
278{
279 assert(!inService);
280
281 inService = true;
282 pendingModified = targets.needsWritable || pending_modified_resp;
283 postInvalidate = postDowngrade = false;
284
285 if (!downstreamPending) {
286 // let upstream caches know that the request has made it to a
287 // level where it's going to get a response
288 targets.clearDownstreamPending();
289 }
290}
291
292
293void
294MSHR::deallocate()
295{
296 assert(targets.empty());
297 targets.resetFlags();
298 assert(deferredTargets.isReset());
299 inService = false;
300}
301
302/*
303 * Adds a target to an MSHR
304 */
305void
306MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order,
307 bool alloc_on_fill)
308{
309 // assume we'd never issue a prefetch when we've got an
310 // outstanding miss
311 assert(pkt->cmd != MemCmd::HardPFReq);
312
313 // uncacheable accesses always allocate a new MSHR, and cacheable
314 // accesses ignore any uncacheable MSHRs, thus we should never
315 // have targets addded if originally allocated uncacheable
316 assert(!_isUncacheable);
317
318 // if there's a request already in service for this MSHR, we will
319 // have to defer the new target until after the response if any of
320 // the following are true:
321 // - there are other targets already deferred
322 // - there's a pending invalidate to be applied after the response
323 // comes back (but before this target is processed)
324 // - the MSHR's first (and only) non-deferred target is a cache
325 // maintenance packet
326 // - the new target is a cache maintenance packet (this is probably
327 // overly conservative but certainly safe)
328 // - this target requires a writable block and either we're not
329 // getting a writable block back or we have already snooped
330 // another read request that will downgrade our writable block
331 // to non-writable (Shared or Owned)
332 PacketPtr tgt_pkt = targets.front().pkt;
333 if (pkt->req->isCacheMaintenance() ||
334 tgt_pkt->req->isCacheMaintenance() ||
335 !deferredTargets.empty() ||
336 (inService &&
337 (hasPostInvalidate() ||
338 (pkt->needsWritable() &&
339 (!isPendingModified() || hasPostDowngrade() || isForward))))) {
340 // need to put on deferred list
341 if (inService && hasPostInvalidate())
342 replaceUpgrade(pkt);
343 deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true,
344 alloc_on_fill);
345 } else {
346 // No request outstanding, or still OK to append to
347 // outstanding request: append to regular target list. Only
348 // mark pending if current request hasn't been issued yet
349 // (isn't in service).
350 targets.add(pkt, whenReady, _order, Target::FromCPU, !inService,
351 alloc_on_fill);
352 }
353}
354
355bool
356MSHR::handleSnoop(PacketPtr pkt, Counter _order)
357{
358 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
359
360 // when we snoop packets the needsWritable and isInvalidate flags
361 // should always be the same, however, this assumes that we never
362 // snoop writes as they are currently not marked as invalidations
363 panic_if((pkt->needsWritable() != pkt->isInvalidate()) &&
364 !pkt->req->isCacheMaintenance(),
365 "%s got snoop %s where needsWritable, "
366 "does not match isInvalidate", name(), pkt->print());
367
368 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
369 // Request has not been issued yet, or it's been issued
370 // locally but is buffered unissued at some downstream cache
371 // which is forwarding us this snoop. Either way, the packet
372 // we're snooping logically precedes this MSHR's request, so
373 // the snoop has no impact on the MSHR, but must be processed
374 // in the standard way by the cache. The only exception is
375 // that if we're an L2+ cache buffering an UpgradeReq from a
376 // higher-level cache, and the snoop is invalidating, then our
377 // buffered upgrades must be converted to read exclusives,
378 // since the upper-level cache no longer has a valid copy.
379 // That is, even though the upper-level cache got out on its
380 // local bus first, some other invalidating transaction
381 // reached the global bus before the upgrade did.
382 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
383 targets.replaceUpgrades();
384 deferredTargets.replaceUpgrades();
385 }
386
387 return false;
388 }
389
390 // From here on down, the request issued by this MSHR logically
391 // precedes the request we're snooping.
392 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
393 // snooped request still precedes the re-request we'll have to
394 // issue for deferred targets, if any...
395 deferredTargets.replaceUpgrades();
396 }
397
398 PacketPtr tgt_pkt = targets.front().pkt;
399 if (hasPostInvalidate() || tgt_pkt->req->isCacheInvalidate()) {
400 // a prior snoop has already appended an invalidation or a
401 // cache invalidation operation is in progress, so logically
402 // we don't have the block anymore; no need for further
403 // snooping.
404 return true;
405 }
406
407 if (isPendingModified() || pkt->isInvalidate()) {
408 // We need to save and replay the packet in two cases:
409 // 1. We're awaiting a writable copy (Modified or Exclusive),
410 // so this MSHR is the orgering point, and we need to respond
411 // after we receive data.
412 // 2. It's an invalidation (e.g., UpgradeReq), and we need
413 // to forward the snoop up the hierarchy after the current
414 // transaction completes.
415
416 // Start by determining if we will eventually respond or not,
417 // matching the conditions checked in Cache::handleSnoop
418 bool will_respond = isPendingModified() && pkt->needsResponse() &&
419 !pkt->isClean();
420
421 // The packet we are snooping may be deleted by the time we
422 // actually process the target, and we consequently need to
423 // save a copy here. Clear flags and also allocate new data as
424 // the original packet data storage may have been deleted by
425 // the time we get to process this packet. In the cases where
426 // we are not responding after handling the snoop we also need
427 // to create a copy of the request to be on the safe side. In
428 // the latter case the cache is responsible for deleting both
429 // the packet and the request as part of handling the deferred
430 // snoop.
431 PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
432 new Packet(new Request(*pkt->req), pkt->cmd, blkSize, pkt->id);
433
434 if (will_respond) {
435 // we are the ordering point, and will consequently
436 // respond, and depending on whether the packet
437 // needsWritable or not we either pass a Shared line or a
438 // Modified line
439 pkt->setCacheResponding();
440
441 // inform the cache hierarchy that this cache had the line
442 // in the Modified state, even if the response is passed
443 // as Shared (and thus non-writable)
444 pkt->setResponderHadWritable();
445
446 // in the case of an uncacheable request there is no need
447 // to set the responderHadWritable flag, but since the
448 // recipient does not care there is no harm in doing so
449 }
450 targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
451 downstreamPending && targets.needsWritable, false);
452
453 if (pkt->needsWritable() || pkt->isInvalidate()) {
454 // This transaction will take away our pending copy
455 postInvalidate = true;
456 }
457
458 if (isPendingModified() && pkt->isClean()) {
459 pkt->setSatisfied();
460 }
461 }
462
463 if (!pkt->needsWritable() && !pkt->req->isUncacheable()) {
464 // This transaction will get a read-shared copy, downgrading
465 // our copy if we had a writable one
466 postDowngrade = true;
467 // make sure that any downstream cache does not respond with a
468 // writable (and dirty) copy even if it has one, unless it was
469 // explicitly asked for one
470 pkt->setHasSharers();
471 }
472
473 return true;
474}
475
476MSHR::TargetList
477MSHR::extractServiceableTargets(PacketPtr pkt)
478{
479 TargetList ready_targets;
480 // If the downstream MSHR got an invalidation request then we only
481 // service the first of the FromCPU targets and any other
482 // non-FromCPU target. This way the remaining FromCPU targets
483 // issue a new request and get a fresh copy of the block and we
484 // avoid memory consistency violations.
485 if (pkt->cmd == MemCmd::ReadRespWithInvalidate) {
486 auto it = targets.begin();
487 assert((it->source == Target::FromCPU) ||
488 (it->source == Target::FromPrefetcher));
489 ready_targets.push_back(*it);
490 it = targets.erase(it);
491 while (it != targets.end()) {
492 if (it->source == Target::FromCPU) {
493 it++;
494 } else {
495 assert(it->source == Target::FromSnoop);
496 ready_targets.push_back(*it);
497 it = targets.erase(it);
498 }
499 }
500 ready_targets.populateFlags();
501 } else {
502 std::swap(ready_targets, targets);
503 }
504 targets.populateFlags();
505
506 return ready_targets;
507}
508
509bool
510MSHR::promoteDeferredTargets()
511{
512 if (targets.empty() && deferredTargets.empty()) {
513 // nothing to promote
514 return false;
515 }
516
517 // the deferred targets can be generally promoted unless they
518 // contain a cache maintenance request
519
520 // find the first target that is a cache maintenance request
521 auto it = std::find_if(deferredTargets.begin(), deferredTargets.end(),
522 [](MSHR::Target &t) {
523 return t.pkt->req->isCacheMaintenance();
524 });
525 if (it == deferredTargets.begin()) {
526 // if the first deferred target is a cache maintenance packet
527 // then we can promote provided the targets list is empty and
528 // we can service it on its own
529 if (targets.empty()) {
530 targets.splice(targets.end(), deferredTargets, it);
531 }
532 } else {
533 // if a cache maintenance operation exists, we promote all the
534 // deferred targets that precede it, or all deferred targets
535 // otherwise
536 targets.splice(targets.end(), deferredTargets,
537 deferredTargets.begin(), it);
538 }
539
540 deferredTargets.populateFlags();
541 targets.populateFlags();
542 order = targets.front().order;
543 readyTime = std::max(curTick(), targets.front().readyTime);
544
545 return true;
546}
547
548
549void
550MSHR::promoteWritable()
551{
552 if (deferredTargets.needsWritable &&
553 !(hasPostInvalidate() || hasPostDowngrade())) {
554 // We got a writable response, but we have deferred targets
555 // which are waiting to request a writable copy (not because
556 // of a pending invalidate). This can happen if the original
557 // request was for a read-only block, but we got a writable
558 // response anyway. Since we got the writable copy there's no
559 // need to defer the targets, so move them up to the regular
560 // target list.
561 assert(!targets.needsWritable);
562 targets.needsWritable = true;
563 // if any of the deferred targets were upper-level cache
564 // requests marked downstreamPending, need to clear that
565 assert(!downstreamPending); // not pending here anymore
566 deferredTargets.clearDownstreamPending();
567 // this clears out deferredTargets too
568 targets.splice(targets.end(), deferredTargets);
569 deferredTargets.resetFlags();
570 }
571}
572
573
574bool
575MSHR::checkFunctional(PacketPtr pkt)
576{
577 // For printing, we treat the MSHR as a whole as single entity.
578 // For other requests, we iterate over the individual targets
579 // since that's where the actual data lies.
580 if (pkt->isPrint()) {
581 pkt->checkFunctional(this, blkAddr, isSecure, blkSize, nullptr);
582 return false;
583 } else {
584 return (targets.checkFunctional(pkt) ||
585 deferredTargets.checkFunctional(pkt));
586 }
587}
588
589bool
1/*
2 * Copyright (c) 2012-2013, 2015-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 */
44
45/**
46 * @file
47 * Miss Status and Handling Register (MSHR) definitions.
48 */
49
50#include "mem/cache/mshr.hh"
51
52#include <algorithm>
53#include <cassert>
54#include <string>
55#include <vector>
56
57#include "base/logging.hh"
58#include "base/types.hh"
59#include "debug/Cache.hh"
60#include "mem/cache/cache.hh"
61#include "sim/core.hh"
62
63MSHR::MSHR() : downstreamPending(false),
64 pendingModified(false),
65 postInvalidate(false), postDowngrade(false),
66 isForward(false)
67{
68}
69
70MSHR::TargetList::TargetList()
71 : needsWritable(false), hasUpgrade(false), allocOnFill(false),
72 hasFromCache(false)
73{}
74
75
76void
77MSHR::TargetList::updateFlags(PacketPtr pkt, Target::Source source,
78 bool alloc_on_fill)
79{
80 if (source != Target::FromSnoop) {
81 if (pkt->needsWritable()) {
82 needsWritable = true;
83 }
84
85 // StoreCondReq is effectively an upgrade if it's in an MSHR
86 // since it would have been failed already if we didn't have a
87 // read-only copy
88 if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
89 hasUpgrade = true;
90 }
91
92 // potentially re-evaluate whether we should allocate on a fill or
93 // not
94 allocOnFill = allocOnFill || alloc_on_fill;
95
96 if (source != Target::FromPrefetcher) {
97 hasFromCache = hasFromCache || pkt->fromCache();
98 }
99 }
100}
101
102void
103MSHR::TargetList::populateFlags()
104{
105 resetFlags();
106 for (auto& t: *this) {
107 updateFlags(t.pkt, t.source, t.allocOnFill);
108 }
109}
110
111inline void
112MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
113 Counter order, Target::Source source, bool markPending,
114 bool alloc_on_fill)
115{
116 updateFlags(pkt, source, alloc_on_fill);
117 if (markPending) {
118 // Iterate over the SenderState stack and see if we find
119 // an MSHR entry. If we do, set the downstreamPending
120 // flag. Otherwise, do nothing.
121 MSHR *mshr = pkt->findNextSenderState<MSHR>();
122 if (mshr != nullptr) {
123 assert(!mshr->downstreamPending);
124 mshr->downstreamPending = true;
125 } else {
126 // No need to clear downstreamPending later
127 markPending = false;
128 }
129 }
130
131 emplace_back(pkt, readyTime, order, source, markPending, alloc_on_fill);
132}
133
134
135static void
136replaceUpgrade(PacketPtr pkt)
137{
138 // remember if the current packet has data allocated
139 bool has_data = pkt->hasData() || pkt->hasRespData();
140
141 if (pkt->cmd == MemCmd::UpgradeReq) {
142 pkt->cmd = MemCmd::ReadExReq;
143 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
144 } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
145 pkt->cmd = MemCmd::SCUpgradeFailReq;
146 DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
147 } else if (pkt->cmd == MemCmd::StoreCondReq) {
148 pkt->cmd = MemCmd::StoreCondFailReq;
149 DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n");
150 }
151
152 if (!has_data) {
153 // there is no sensible way of setting the data field if the
154 // new command actually would carry data
155 assert(!pkt->hasData());
156
157 if (pkt->hasRespData()) {
158 // we went from a packet that had no data (neither request,
159 // nor response), to one that does, and therefore we need to
160 // actually allocate space for the data payload
161 pkt->allocate();
162 }
163 }
164}
165
166
167void
168MSHR::TargetList::replaceUpgrades()
169{
170 if (!hasUpgrade)
171 return;
172
173 for (auto& t : *this) {
174 replaceUpgrade(t.pkt);
175 }
176
177 hasUpgrade = false;
178}
179
180
181void
182MSHR::TargetList::clearDownstreamPending()
183{
184 for (auto& t : *this) {
185 if (t.markedPending) {
186 // Iterate over the SenderState stack and see if we find
187 // an MSHR entry. If we find one, clear the
188 // downstreamPending flag by calling
189 // clearDownstreamPending(). This recursively clears the
190 // downstreamPending flag in all caches this packet has
191 // passed through.
192 MSHR *mshr = t.pkt->findNextSenderState<MSHR>();
193 if (mshr != nullptr) {
194 mshr->clearDownstreamPending();
195 }
196 t.markedPending = false;
197 }
198 }
199}
200
201
202bool
203MSHR::TargetList::checkFunctional(PacketPtr pkt)
204{
205 for (auto& t : *this) {
206 if (pkt->checkFunctional(t.pkt)) {
207 return true;
208 }
209 }
210
211 return false;
212}
213
214
215void
216MSHR::TargetList::print(std::ostream &os, int verbosity,
217 const std::string &prefix) const
218{
219 for (auto& t : *this) {
220 const char *s;
221 switch (t.source) {
222 case Target::FromCPU:
223 s = "FromCPU";
224 break;
225 case Target::FromSnoop:
226 s = "FromSnoop";
227 break;
228 case Target::FromPrefetcher:
229 s = "FromPrefetcher";
230 break;
231 default:
232 s = "";
233 break;
234 }
235 ccprintf(os, "%s%s: ", prefix, s);
236 t.pkt->print(os, verbosity, "");
237 ccprintf(os, "\n");
238 }
239}
240
241
242void
243MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
244 Tick when_ready, Counter _order, bool alloc_on_fill)
245{
246 blkAddr = blk_addr;
247 blkSize = blk_size;
248 isSecure = target->isSecure();
249 readyTime = when_ready;
250 order = _order;
251 assert(target);
252 isForward = false;
253 _isUncacheable = target->req->isUncacheable();
254 inService = false;
255 downstreamPending = false;
256 assert(targets.isReset());
257 // Don't know of a case where we would allocate a new MSHR for a
258 // snoop (mem-side request), so set source according to request here
259 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
260 Target::FromPrefetcher : Target::FromCPU;
261 targets.add(target, when_ready, _order, source, true, alloc_on_fill);
262 assert(deferredTargets.isReset());
263}
264
265
266void
267MSHR::clearDownstreamPending()
268{
269 assert(downstreamPending);
270 downstreamPending = false;
271 // recursively clear flag on any MSHRs we will be forwarding
272 // responses to
273 targets.clearDownstreamPending();
274}
275
276void
277MSHR::markInService(bool pending_modified_resp)
278{
279 assert(!inService);
280
281 inService = true;
282 pendingModified = targets.needsWritable || pending_modified_resp;
283 postInvalidate = postDowngrade = false;
284
285 if (!downstreamPending) {
286 // let upstream caches know that the request has made it to a
287 // level where it's going to get a response
288 targets.clearDownstreamPending();
289 }
290}
291
292
293void
294MSHR::deallocate()
295{
296 assert(targets.empty());
297 targets.resetFlags();
298 assert(deferredTargets.isReset());
299 inService = false;
300}
301
302/*
303 * Adds a target to an MSHR
304 */
305void
306MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order,
307 bool alloc_on_fill)
308{
309 // assume we'd never issue a prefetch when we've got an
310 // outstanding miss
311 assert(pkt->cmd != MemCmd::HardPFReq);
312
313 // uncacheable accesses always allocate a new MSHR, and cacheable
314 // accesses ignore any uncacheable MSHRs, thus we should never
315 // have targets addded if originally allocated uncacheable
316 assert(!_isUncacheable);
317
318 // if there's a request already in service for this MSHR, we will
319 // have to defer the new target until after the response if any of
320 // the following are true:
321 // - there are other targets already deferred
322 // - there's a pending invalidate to be applied after the response
323 // comes back (but before this target is processed)
324 // - the MSHR's first (and only) non-deferred target is a cache
325 // maintenance packet
326 // - the new target is a cache maintenance packet (this is probably
327 // overly conservative but certainly safe)
328 // - this target requires a writable block and either we're not
329 // getting a writable block back or we have already snooped
330 // another read request that will downgrade our writable block
331 // to non-writable (Shared or Owned)
332 PacketPtr tgt_pkt = targets.front().pkt;
333 if (pkt->req->isCacheMaintenance() ||
334 tgt_pkt->req->isCacheMaintenance() ||
335 !deferredTargets.empty() ||
336 (inService &&
337 (hasPostInvalidate() ||
338 (pkt->needsWritable() &&
339 (!isPendingModified() || hasPostDowngrade() || isForward))))) {
340 // need to put on deferred list
341 if (inService && hasPostInvalidate())
342 replaceUpgrade(pkt);
343 deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true,
344 alloc_on_fill);
345 } else {
346 // No request outstanding, or still OK to append to
347 // outstanding request: append to regular target list. Only
348 // mark pending if current request hasn't been issued yet
349 // (isn't in service).
350 targets.add(pkt, whenReady, _order, Target::FromCPU, !inService,
351 alloc_on_fill);
352 }
353}
354
355bool
356MSHR::handleSnoop(PacketPtr pkt, Counter _order)
357{
358 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
359
360 // when we snoop packets the needsWritable and isInvalidate flags
361 // should always be the same, however, this assumes that we never
362 // snoop writes as they are currently not marked as invalidations
363 panic_if((pkt->needsWritable() != pkt->isInvalidate()) &&
364 !pkt->req->isCacheMaintenance(),
365 "%s got snoop %s where needsWritable, "
366 "does not match isInvalidate", name(), pkt->print());
367
368 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
369 // Request has not been issued yet, or it's been issued
370 // locally but is buffered unissued at some downstream cache
371 // which is forwarding us this snoop. Either way, the packet
372 // we're snooping logically precedes this MSHR's request, so
373 // the snoop has no impact on the MSHR, but must be processed
374 // in the standard way by the cache. The only exception is
375 // that if we're an L2+ cache buffering an UpgradeReq from a
376 // higher-level cache, and the snoop is invalidating, then our
377 // buffered upgrades must be converted to read exclusives,
378 // since the upper-level cache no longer has a valid copy.
379 // That is, even though the upper-level cache got out on its
380 // local bus first, some other invalidating transaction
381 // reached the global bus before the upgrade did.
382 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
383 targets.replaceUpgrades();
384 deferredTargets.replaceUpgrades();
385 }
386
387 return false;
388 }
389
390 // From here on down, the request issued by this MSHR logically
391 // precedes the request we're snooping.
392 if (pkt->needsWritable() || pkt->req->isCacheInvalidate()) {
393 // snooped request still precedes the re-request we'll have to
394 // issue for deferred targets, if any...
395 deferredTargets.replaceUpgrades();
396 }
397
398 PacketPtr tgt_pkt = targets.front().pkt;
399 if (hasPostInvalidate() || tgt_pkt->req->isCacheInvalidate()) {
400 // a prior snoop has already appended an invalidation or a
401 // cache invalidation operation is in progress, so logically
402 // we don't have the block anymore; no need for further
403 // snooping.
404 return true;
405 }
406
407 if (isPendingModified() || pkt->isInvalidate()) {
408 // We need to save and replay the packet in two cases:
409 // 1. We're awaiting a writable copy (Modified or Exclusive),
410 // so this MSHR is the orgering point, and we need to respond
411 // after we receive data.
412 // 2. It's an invalidation (e.g., UpgradeReq), and we need
413 // to forward the snoop up the hierarchy after the current
414 // transaction completes.
415
416 // Start by determining if we will eventually respond or not,
417 // matching the conditions checked in Cache::handleSnoop
418 bool will_respond = isPendingModified() && pkt->needsResponse() &&
419 !pkt->isClean();
420
421 // The packet we are snooping may be deleted by the time we
422 // actually process the target, and we consequently need to
423 // save a copy here. Clear flags and also allocate new data as
424 // the original packet data storage may have been deleted by
425 // the time we get to process this packet. In the cases where
426 // we are not responding after handling the snoop we also need
427 // to create a copy of the request to be on the safe side. In
428 // the latter case the cache is responsible for deleting both
429 // the packet and the request as part of handling the deferred
430 // snoop.
431 PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
432 new Packet(new Request(*pkt->req), pkt->cmd, blkSize, pkt->id);
433
434 if (will_respond) {
435 // we are the ordering point, and will consequently
436 // respond, and depending on whether the packet
437 // needsWritable or not we either pass a Shared line or a
438 // Modified line
439 pkt->setCacheResponding();
440
441 // inform the cache hierarchy that this cache had the line
442 // in the Modified state, even if the response is passed
443 // as Shared (and thus non-writable)
444 pkt->setResponderHadWritable();
445
446 // in the case of an uncacheable request there is no need
447 // to set the responderHadWritable flag, but since the
448 // recipient does not care there is no harm in doing so
449 }
450 targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
451 downstreamPending && targets.needsWritable, false);
452
453 if (pkt->needsWritable() || pkt->isInvalidate()) {
454 // This transaction will take away our pending copy
455 postInvalidate = true;
456 }
457
458 if (isPendingModified() && pkt->isClean()) {
459 pkt->setSatisfied();
460 }
461 }
462
463 if (!pkt->needsWritable() && !pkt->req->isUncacheable()) {
464 // This transaction will get a read-shared copy, downgrading
465 // our copy if we had a writable one
466 postDowngrade = true;
467 // make sure that any downstream cache does not respond with a
468 // writable (and dirty) copy even if it has one, unless it was
469 // explicitly asked for one
470 pkt->setHasSharers();
471 }
472
473 return true;
474}
475
476MSHR::TargetList
477MSHR::extractServiceableTargets(PacketPtr pkt)
478{
479 TargetList ready_targets;
480 // If the downstream MSHR got an invalidation request then we only
481 // service the first of the FromCPU targets and any other
482 // non-FromCPU target. This way the remaining FromCPU targets
483 // issue a new request and get a fresh copy of the block and we
484 // avoid memory consistency violations.
485 if (pkt->cmd == MemCmd::ReadRespWithInvalidate) {
486 auto it = targets.begin();
487 assert((it->source == Target::FromCPU) ||
488 (it->source == Target::FromPrefetcher));
489 ready_targets.push_back(*it);
490 it = targets.erase(it);
491 while (it != targets.end()) {
492 if (it->source == Target::FromCPU) {
493 it++;
494 } else {
495 assert(it->source == Target::FromSnoop);
496 ready_targets.push_back(*it);
497 it = targets.erase(it);
498 }
499 }
500 ready_targets.populateFlags();
501 } else {
502 std::swap(ready_targets, targets);
503 }
504 targets.populateFlags();
505
506 return ready_targets;
507}
508
509bool
510MSHR::promoteDeferredTargets()
511{
512 if (targets.empty() && deferredTargets.empty()) {
513 // nothing to promote
514 return false;
515 }
516
517 // the deferred targets can be generally promoted unless they
518 // contain a cache maintenance request
519
520 // find the first target that is a cache maintenance request
521 auto it = std::find_if(deferredTargets.begin(), deferredTargets.end(),
522 [](MSHR::Target &t) {
523 return t.pkt->req->isCacheMaintenance();
524 });
525 if (it == deferredTargets.begin()) {
526 // if the first deferred target is a cache maintenance packet
527 // then we can promote provided the targets list is empty and
528 // we can service it on its own
529 if (targets.empty()) {
530 targets.splice(targets.end(), deferredTargets, it);
531 }
532 } else {
533 // if a cache maintenance operation exists, we promote all the
534 // deferred targets that precede it, or all deferred targets
535 // otherwise
536 targets.splice(targets.end(), deferredTargets,
537 deferredTargets.begin(), it);
538 }
539
540 deferredTargets.populateFlags();
541 targets.populateFlags();
542 order = targets.front().order;
543 readyTime = std::max(curTick(), targets.front().readyTime);
544
545 return true;
546}
547
548
549void
550MSHR::promoteWritable()
551{
552 if (deferredTargets.needsWritable &&
553 !(hasPostInvalidate() || hasPostDowngrade())) {
554 // We got a writable response, but we have deferred targets
555 // which are waiting to request a writable copy (not because
556 // of a pending invalidate). This can happen if the original
557 // request was for a read-only block, but we got a writable
558 // response anyway. Since we got the writable copy there's no
559 // need to defer the targets, so move them up to the regular
560 // target list.
561 assert(!targets.needsWritable);
562 targets.needsWritable = true;
563 // if any of the deferred targets were upper-level cache
564 // requests marked downstreamPending, need to clear that
565 assert(!downstreamPending); // not pending here anymore
566 deferredTargets.clearDownstreamPending();
567 // this clears out deferredTargets too
568 targets.splice(targets.end(), deferredTargets);
569 deferredTargets.resetFlags();
570 }
571}
572
573
574bool
575MSHR::checkFunctional(PacketPtr pkt)
576{
577 // For printing, we treat the MSHR as a whole as single entity.
578 // For other requests, we iterate over the individual targets
579 // since that's where the actual data lies.
580 if (pkt->isPrint()) {
581 pkt->checkFunctional(this, blkAddr, isSecure, blkSize, nullptr);
582 return false;
583 } else {
584 return (targets.checkFunctional(pkt) ||
585 deferredTargets.checkFunctional(pkt));
586 }
587}
588
589bool
590MSHR::sendPacket(Cache &cache)
590MSHR::sendPacket(BaseCache &cache)
591{
592 return cache.sendMSHRQueuePacket(this);
593}
594
595void
596MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
597{
598 ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s %s\n",
599 prefix, blkAddr, blkAddr + blkSize - 1,
600 isSecure ? "s" : "ns",
601 isForward ? "Forward" : "",
602 allocOnFill() ? "AllocOnFill" : "",
603 needsWritable() ? "Wrtbl" : "",
604 _isUncacheable ? "Unc" : "",
605 inService ? "InSvc" : "",
606 downstreamPending ? "DwnPend" : "",
607 postInvalidate ? "PostInv" : "",
608 postDowngrade ? "PostDowngr" : "",
609 hasFromCache() ? "HasFromCache" : "");
610
611 if (!targets.empty()) {
612 ccprintf(os, "%s Targets:\n", prefix);
613 targets.print(os, verbosity, prefix + " ");
614 }
615 if (!deferredTargets.empty()) {
616 ccprintf(os, "%s Deferred Targets:\n", prefix);
617 deferredTargets.print(os, verbosity, prefix + " ");
618 }
619}
620
621std::string
622MSHR::print() const
623{
624 std::ostringstream str;
625 print(str);
626 return str.str();
627}
591{
592 return cache.sendMSHRQueuePacket(this);
593}
594
595void
596MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
597{
598 ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s %s\n",
599 prefix, blkAddr, blkAddr + blkSize - 1,
600 isSecure ? "s" : "ns",
601 isForward ? "Forward" : "",
602 allocOnFill() ? "AllocOnFill" : "",
603 needsWritable() ? "Wrtbl" : "",
604 _isUncacheable ? "Unc" : "",
605 inService ? "InSvc" : "",
606 downstreamPending ? "DwnPend" : "",
607 postInvalidate ? "PostInv" : "",
608 postDowngrade ? "PostDowngr" : "",
609 hasFromCache() ? "HasFromCache" : "");
610
611 if (!targets.empty()) {
612 ccprintf(os, "%s Targets:\n", prefix);
613 targets.print(os, verbosity, prefix + " ");
614 }
615 if (!deferredTargets.empty()) {
616 ccprintf(os, "%s Deferred Targets:\n", prefix);
617 deferredTargets.print(os, verbosity, prefix + " ");
618 }
619}
620
621std::string
622MSHR::print() const
623{
624 std::ostringstream str;
625 print(str);
626 return str.str();
627}