mshr.cc (9086:496304c8017d) mshr.cc (9543:a373b2e664ff)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * Copyright (c) 2010 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Erik Hallnor
30 * Dave Greene
31 */
32
33/**
34 * @file
35 * Miss Status and Handling Register (MSHR) definitions.
36 */
37
38#include <algorithm>
39#include <cassert>
40#include <string>
41#include <vector>
42
43#include "base/misc.hh"
44#include "base/types.hh"
45#include "debug/Cache.hh"
46#include "mem/cache/cache.hh"
47#include "mem/cache/mshr.hh"
48#include "sim/core.hh"
49
50using namespace std;
51
52MSHR::MSHR()
53{
54 inService = false;
55 ntargets = 0;
56 threadNum = InvalidThreadID;
57 targets = new TargetList();
58 deferredTargets = new TargetList();
59}
60
61
62MSHR::TargetList::TargetList()
63 : needsExclusive(false), hasUpgrade(false)
64{}
65
66
67inline void
68MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
69 Counter order, Target::Source source, bool markPending)
70{
71 if (source != Target::FromSnoop) {
72 if (pkt->needsExclusive()) {
73 needsExclusive = true;
74 }
75
76 // StoreCondReq is effectively an upgrade if it's in an MSHR
77 // since it would have been failed already if we didn't have a
78 // read-only copy
79 if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
80 hasUpgrade = true;
81 }
82 }
83
84 if (markPending) {
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * Copyright (c) 2010 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Erik Hallnor
30 * Dave Greene
31 */
32
33/**
34 * @file
35 * Miss Status and Handling Register (MSHR) definitions.
36 */
37
38#include <algorithm>
39#include <cassert>
40#include <string>
41#include <vector>
42
43#include "base/misc.hh"
44#include "base/types.hh"
45#include "debug/Cache.hh"
46#include "mem/cache/cache.hh"
47#include "mem/cache/mshr.hh"
48#include "sim/core.hh"
49
50using namespace std;
51
52MSHR::MSHR()
53{
54 inService = false;
55 ntargets = 0;
56 threadNum = InvalidThreadID;
57 targets = new TargetList();
58 deferredTargets = new TargetList();
59}
60
61
62MSHR::TargetList::TargetList()
63 : needsExclusive(false), hasUpgrade(false)
64{}
65
66
67inline void
68MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
69 Counter order, Target::Source source, bool markPending)
70{
71 if (source != Target::FromSnoop) {
72 if (pkt->needsExclusive()) {
73 needsExclusive = true;
74 }
75
76 // StoreCondReq is effectively an upgrade if it's in an MSHR
77 // since it would have been failed already if we didn't have a
78 // read-only copy
79 if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
80 hasUpgrade = true;
81 }
82 }
83
84 if (markPending) {
85 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
85 // Iterate over the SenderState stack and see if we find
86 // an MSHR entry. If we do, set the downstreamPending
87 // flag. Otherwise, do nothing.
88 MSHR *mshr = pkt->findNextSenderState<MSHR>();
86 if (mshr != NULL) {
87 assert(!mshr->downstreamPending);
88 mshr->downstreamPending = true;
89 }
90 }
91
92 push_back(Target(pkt, readyTime, order, source, markPending));
93}
94
95
96static void
97replaceUpgrade(PacketPtr pkt)
98{
99 if (pkt->cmd == MemCmd::UpgradeReq) {
100 pkt->cmd = MemCmd::ReadExReq;
101 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
102 } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
103 pkt->cmd = MemCmd::SCUpgradeFailReq;
104 DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
105 } else if (pkt->cmd == MemCmd::StoreCondReq) {
106 pkt->cmd = MemCmd::StoreCondFailReq;
107 DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n");
108 }
109}
110
111
112void
113MSHR::TargetList::replaceUpgrades()
114{
115 if (!hasUpgrade)
116 return;
117
118 Iterator end_i = end();
119 for (Iterator i = begin(); i != end_i; ++i) {
120 replaceUpgrade(i->pkt);
121 }
122
123 hasUpgrade = false;
124}
125
126
127void
128MSHR::TargetList::clearDownstreamPending()
129{
130 Iterator end_i = end();
131 for (Iterator i = begin(); i != end_i; ++i) {
132 if (i->markedPending) {
89 if (mshr != NULL) {
90 assert(!mshr->downstreamPending);
91 mshr->downstreamPending = true;
92 }
93 }
94
95 push_back(Target(pkt, readyTime, order, source, markPending));
96}
97
98
99static void
100replaceUpgrade(PacketPtr pkt)
101{
102 if (pkt->cmd == MemCmd::UpgradeReq) {
103 pkt->cmd = MemCmd::ReadExReq;
104 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
105 } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
106 pkt->cmd = MemCmd::SCUpgradeFailReq;
107 DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
108 } else if (pkt->cmd == MemCmd::StoreCondReq) {
109 pkt->cmd = MemCmd::StoreCondFailReq;
110 DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n");
111 }
112}
113
114
115void
116MSHR::TargetList::replaceUpgrades()
117{
118 if (!hasUpgrade)
119 return;
120
121 Iterator end_i = end();
122 for (Iterator i = begin(); i != end_i; ++i) {
123 replaceUpgrade(i->pkt);
124 }
125
126 hasUpgrade = false;
127}
128
129
130void
131MSHR::TargetList::clearDownstreamPending()
132{
133 Iterator end_i = end();
134 for (Iterator i = begin(); i != end_i; ++i) {
135 if (i->markedPending) {
133 MSHR *mshr = dynamic_cast<MSHR*>(i->pkt->senderState);
136 // Iterate over the SenderState stack and see if we find
137 // an MSHR entry. If we find one, clear the
138 // downstreamPending flag by calling
139 // clearDownstreamPending(). This recursively clears the
140 // downstreamPending flag in all caches this packet has
141 // passed through.
142 MSHR *mshr = i->pkt->findNextSenderState<MSHR>();
134 if (mshr != NULL) {
135 mshr->clearDownstreamPending();
136 }
137 }
138 }
139}
140
141
142bool
143MSHR::TargetList::checkFunctional(PacketPtr pkt)
144{
145 Iterator end_i = end();
146 for (Iterator i = begin(); i != end_i; ++i) {
147 if (pkt->checkFunctional(i->pkt)) {
148 return true;
149 }
150 }
151
152 return false;
153}
154
155
156void
157MSHR::TargetList::
158print(std::ostream &os, int verbosity, const std::string &prefix) const
159{
160 ConstIterator end_i = end();
161 for (ConstIterator i = begin(); i != end_i; ++i) {
162 const char *s;
163 switch (i->source) {
164 case Target::FromCPU:
165 s = "FromCPU";
166 break;
167 case Target::FromSnoop:
168 s = "FromSnoop";
169 break;
170 case Target::FromPrefetcher:
171 s = "FromPrefetcher";
172 break;
173 default:
174 s = "";
175 break;
176 }
177 ccprintf(os, "%s%s: ", prefix, s);
178 i->pkt->print(os, verbosity, "");
179 }
180}
181
182
183void
184MSHR::allocate(Addr _addr, int _size, PacketPtr target,
185 Tick whenReady, Counter _order)
186{
187 addr = _addr;
188 size = _size;
189 readyTime = whenReady;
190 order = _order;
191 assert(target);
192 isForward = false;
193 _isUncacheable = target->req->isUncacheable();
194 inService = false;
195 downstreamPending = false;
196 threadNum = 0;
197 ntargets = 1;
198 assert(targets->isReset());
199 // Don't know of a case where we would allocate a new MSHR for a
200 // snoop (mem-side request), so set source according to request here
201 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
202 Target::FromPrefetcher : Target::FromCPU;
203 targets->add(target, whenReady, _order, source, true);
204 assert(deferredTargets->isReset());
205 data = NULL;
206}
207
208
209void
210MSHR::clearDownstreamPending()
211{
212 assert(downstreamPending);
213 downstreamPending = false;
214 // recursively clear flag on any MSHRs we will be forwarding
215 // responses to
216 targets->clearDownstreamPending();
217}
218
219bool
220MSHR::markInService(PacketPtr pkt)
221{
222 assert(!inService);
223 if (isForwardNoResponse()) {
224 // we just forwarded the request packet & don't expect a
225 // response, so get rid of it
226 assert(getNumTargets() == 1);
227 popTarget();
228 return true;
229 }
230 inService = true;
231 pendingDirty = (targets->needsExclusive ||
232 (!pkt->sharedAsserted() && pkt->memInhibitAsserted()));
233 postInvalidate = postDowngrade = false;
234
235 if (!downstreamPending) {
236 // let upstream caches know that the request has made it to a
237 // level where it's going to get a response
238 targets->clearDownstreamPending();
239 }
240 return false;
241}
242
243
244void
245MSHR::deallocate()
246{
247 assert(targets->empty());
248 targets->resetFlags();
249 assert(deferredTargets->isReset());
250 assert(ntargets == 0);
251 inService = false;
252}
253
254/*
255 * Adds a target to an MSHR
256 */
257void
258MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
259{
260 // if there's a request already in service for this MSHR, we will
261 // have to defer the new target until after the response if any of
262 // the following are true:
263 // - there are other targets already deferred
264 // - there's a pending invalidate to be applied after the response
265 // comes back (but before this target is processed)
266 // - this target requires an exclusive block and either we're not
267 // getting an exclusive block back or we have already snooped
268 // another read request that will downgrade our exclusive block
269 // to shared
270
271 // assume we'd never issue a prefetch when we've got an
272 // outstanding miss
273 assert(pkt->cmd != MemCmd::HardPFReq);
274
275 if (inService &&
276 (!deferredTargets->empty() || hasPostInvalidate() ||
277 (pkt->needsExclusive() &&
278 (!isPendingDirty() || hasPostDowngrade() || isForward)))) {
279 // need to put on deferred list
280 if (hasPostInvalidate())
281 replaceUpgrade(pkt);
282 deferredTargets->add(pkt, whenReady, _order, Target::FromCPU, true);
283 } else {
284 // No request outstanding, or still OK to append to
285 // outstanding request: append to regular target list. Only
286 // mark pending if current request hasn't been issued yet
287 // (isn't in service).
288 targets->add(pkt, whenReady, _order, Target::FromCPU, !inService);
289 }
290
291 ++ntargets;
292}
293
294bool
295MSHR::handleSnoop(PacketPtr pkt, Counter _order)
296{
297 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
298 // Request has not been issued yet, or it's been issued
299 // locally but is buffered unissued at some downstream cache
300 // which is forwarding us this snoop. Either way, the packet
301 // we're snooping logically precedes this MSHR's request, so
302 // the snoop has no impact on the MSHR, but must be processed
303 // in the standard way by the cache. The only exception is
304 // that if we're an L2+ cache buffering an UpgradeReq from a
305 // higher-level cache, and the snoop is invalidating, then our
306 // buffered upgrades must be converted to read exclusives,
307 // since the upper-level cache no longer has a valid copy.
308 // That is, even though the upper-level cache got out on its
309 // local bus first, some other invalidating transaction
310 // reached the global bus before the upgrade did.
311 if (pkt->needsExclusive()) {
312 targets->replaceUpgrades();
313 deferredTargets->replaceUpgrades();
314 }
315
316 return false;
317 }
318
319 // From here on down, the request issued by this MSHR logically
320 // precedes the request we're snooping.
321 if (pkt->needsExclusive()) {
322 // snooped request still precedes the re-request we'll have to
323 // issue for deferred targets, if any...
324 deferredTargets->replaceUpgrades();
325 }
326
327 if (hasPostInvalidate()) {
328 // a prior snoop has already appended an invalidation, so
329 // logically we don't have the block anymore; no need for
330 // further snooping.
331 return true;
332 }
333
334 if (isPendingDirty() || pkt->isInvalidate()) {
335 // We need to save and replay the packet in two cases:
336 // 1. We're awaiting an exclusive copy, so ownership is pending,
337 // and we need to respond after we receive data.
338 // 2. It's an invalidation (e.g., UpgradeReq), and we need
339 // to forward the snoop up the hierarchy after the current
340 // transaction completes.
341
342 // Actual target device (typ. a memory) will delete the
343 // packet on reception, so we need to save a copy here.
344 PacketPtr cp_pkt = new Packet(pkt, true);
345 targets->add(cp_pkt, curTick(), _order, Target::FromSnoop,
346 downstreamPending && targets->needsExclusive);
347 ++ntargets;
348
349 if (isPendingDirty()) {
350 pkt->assertMemInhibit();
351 pkt->setSupplyExclusive();
352 }
353
354 if (pkt->needsExclusive()) {
355 // This transaction will take away our pending copy
356 postInvalidate = true;
357 }
358 }
359
360 if (!pkt->needsExclusive()) {
361 // This transaction will get a read-shared copy, downgrading
362 // our copy if we had an exclusive one
363 postDowngrade = true;
364 pkt->assertShared();
365 }
366
367 return true;
368}
369
370
371bool
372MSHR::promoteDeferredTargets()
373{
374 assert(targets->empty());
375 if (deferredTargets->empty()) {
376 return false;
377 }
378
379 // swap targets & deferredTargets lists
380 TargetList *tmp = targets;
381 targets = deferredTargets;
382 deferredTargets = tmp;
383
384 assert(targets->size() == ntargets);
385
386 // clear deferredTargets flags
387 deferredTargets->resetFlags();
388
389 order = targets->front().order;
390 readyTime = std::max(curTick(), targets->front().readyTime);
391
392 return true;
393}
394
395
396void
397MSHR::handleFill(Packet *pkt, CacheBlk *blk)
398{
399 if (!pkt->sharedAsserted()
400 && !(hasPostInvalidate() || hasPostDowngrade())
401 && deferredTargets->needsExclusive) {
402 // We got an exclusive response, but we have deferred targets
403 // which are waiting to request an exclusive copy (not because
404 // of a pending invalidate). This can happen if the original
405 // request was for a read-only (non-exclusive) block, but we
406 // got an exclusive copy anyway because of the E part of the
407 // MOESI/MESI protocol. Since we got the exclusive copy
408 // there's no need to defer the targets, so move them up to
409 // the regular target list.
410 assert(!targets->needsExclusive);
411 targets->needsExclusive = true;
412 // if any of the deferred targets were upper-level cache
413 // requests marked downstreamPending, need to clear that
414 assert(!downstreamPending); // not pending here anymore
415 deferredTargets->clearDownstreamPending();
416 // this clears out deferredTargets too
417 targets->splice(targets->end(), *deferredTargets);
418 deferredTargets->resetFlags();
419 }
420}
421
422
423bool
424MSHR::checkFunctional(PacketPtr pkt)
425{
426 // For printing, we treat the MSHR as a whole as single entity.
427 // For other requests, we iterate over the individual targets
428 // since that's where the actual data lies.
429 if (pkt->isPrint()) {
430 pkt->checkFunctional(this, addr, size, NULL);
431 return false;
432 } else {
433 return (targets->checkFunctional(pkt) ||
434 deferredTargets->checkFunctional(pkt));
435 }
436}
437
438
439void
440MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
441{
442 ccprintf(os, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
443 prefix, addr, addr+size-1,
444 isForward ? "Forward" : "",
445 isForwardNoResponse() ? "ForwNoResp" : "",
446 needsExclusive() ? "Excl" : "",
447 _isUncacheable ? "Unc" : "",
448 inService ? "InSvc" : "",
449 downstreamPending ? "DwnPend" : "",
450 hasPostInvalidate() ? "PostInv" : "",
451 hasPostDowngrade() ? "PostDowngr" : "");
452
453 ccprintf(os, "%s Targets:\n", prefix);
454 targets->print(os, verbosity, prefix + " ");
455 if (!deferredTargets->empty()) {
456 ccprintf(os, "%s Deferred Targets:\n", prefix);
457 deferredTargets->print(os, verbosity, prefix + " ");
458 }
459}
460
461MSHR::~MSHR()
462{
463 delete[] targets;
464 delete[] deferredTargets;
465}
143 if (mshr != NULL) {
144 mshr->clearDownstreamPending();
145 }
146 }
147 }
148}
149
150
151bool
152MSHR::TargetList::checkFunctional(PacketPtr pkt)
153{
154 Iterator end_i = end();
155 for (Iterator i = begin(); i != end_i; ++i) {
156 if (pkt->checkFunctional(i->pkt)) {
157 return true;
158 }
159 }
160
161 return false;
162}
163
164
165void
166MSHR::TargetList::
167print(std::ostream &os, int verbosity, const std::string &prefix) const
168{
169 ConstIterator end_i = end();
170 for (ConstIterator i = begin(); i != end_i; ++i) {
171 const char *s;
172 switch (i->source) {
173 case Target::FromCPU:
174 s = "FromCPU";
175 break;
176 case Target::FromSnoop:
177 s = "FromSnoop";
178 break;
179 case Target::FromPrefetcher:
180 s = "FromPrefetcher";
181 break;
182 default:
183 s = "";
184 break;
185 }
186 ccprintf(os, "%s%s: ", prefix, s);
187 i->pkt->print(os, verbosity, "");
188 }
189}
190
191
192void
193MSHR::allocate(Addr _addr, int _size, PacketPtr target,
194 Tick whenReady, Counter _order)
195{
196 addr = _addr;
197 size = _size;
198 readyTime = whenReady;
199 order = _order;
200 assert(target);
201 isForward = false;
202 _isUncacheable = target->req->isUncacheable();
203 inService = false;
204 downstreamPending = false;
205 threadNum = 0;
206 ntargets = 1;
207 assert(targets->isReset());
208 // Don't know of a case where we would allocate a new MSHR for a
209 // snoop (mem-side request), so set source according to request here
210 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
211 Target::FromPrefetcher : Target::FromCPU;
212 targets->add(target, whenReady, _order, source, true);
213 assert(deferredTargets->isReset());
214 data = NULL;
215}
216
217
218void
219MSHR::clearDownstreamPending()
220{
221 assert(downstreamPending);
222 downstreamPending = false;
223 // recursively clear flag on any MSHRs we will be forwarding
224 // responses to
225 targets->clearDownstreamPending();
226}
227
228bool
229MSHR::markInService(PacketPtr pkt)
230{
231 assert(!inService);
232 if (isForwardNoResponse()) {
233 // we just forwarded the request packet & don't expect a
234 // response, so get rid of it
235 assert(getNumTargets() == 1);
236 popTarget();
237 return true;
238 }
239 inService = true;
240 pendingDirty = (targets->needsExclusive ||
241 (!pkt->sharedAsserted() && pkt->memInhibitAsserted()));
242 postInvalidate = postDowngrade = false;
243
244 if (!downstreamPending) {
245 // let upstream caches know that the request has made it to a
246 // level where it's going to get a response
247 targets->clearDownstreamPending();
248 }
249 return false;
250}
251
252
253void
254MSHR::deallocate()
255{
256 assert(targets->empty());
257 targets->resetFlags();
258 assert(deferredTargets->isReset());
259 assert(ntargets == 0);
260 inService = false;
261}
262
263/*
264 * Adds a target to an MSHR
265 */
266void
267MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
268{
269 // if there's a request already in service for this MSHR, we will
270 // have to defer the new target until after the response if any of
271 // the following are true:
272 // - there are other targets already deferred
273 // - there's a pending invalidate to be applied after the response
274 // comes back (but before this target is processed)
275 // - this target requires an exclusive block and either we're not
276 // getting an exclusive block back or we have already snooped
277 // another read request that will downgrade our exclusive block
278 // to shared
279
280 // assume we'd never issue a prefetch when we've got an
281 // outstanding miss
282 assert(pkt->cmd != MemCmd::HardPFReq);
283
284 if (inService &&
285 (!deferredTargets->empty() || hasPostInvalidate() ||
286 (pkt->needsExclusive() &&
287 (!isPendingDirty() || hasPostDowngrade() || isForward)))) {
288 // need to put on deferred list
289 if (hasPostInvalidate())
290 replaceUpgrade(pkt);
291 deferredTargets->add(pkt, whenReady, _order, Target::FromCPU, true);
292 } else {
293 // No request outstanding, or still OK to append to
294 // outstanding request: append to regular target list. Only
295 // mark pending if current request hasn't been issued yet
296 // (isn't in service).
297 targets->add(pkt, whenReady, _order, Target::FromCPU, !inService);
298 }
299
300 ++ntargets;
301}
302
303bool
304MSHR::handleSnoop(PacketPtr pkt, Counter _order)
305{
306 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
307 // Request has not been issued yet, or it's been issued
308 // locally but is buffered unissued at some downstream cache
309 // which is forwarding us this snoop. Either way, the packet
310 // we're snooping logically precedes this MSHR's request, so
311 // the snoop has no impact on the MSHR, but must be processed
312 // in the standard way by the cache. The only exception is
313 // that if we're an L2+ cache buffering an UpgradeReq from a
314 // higher-level cache, and the snoop is invalidating, then our
315 // buffered upgrades must be converted to read exclusives,
316 // since the upper-level cache no longer has a valid copy.
317 // That is, even though the upper-level cache got out on its
318 // local bus first, some other invalidating transaction
319 // reached the global bus before the upgrade did.
320 if (pkt->needsExclusive()) {
321 targets->replaceUpgrades();
322 deferredTargets->replaceUpgrades();
323 }
324
325 return false;
326 }
327
328 // From here on down, the request issued by this MSHR logically
329 // precedes the request we're snooping.
330 if (pkt->needsExclusive()) {
331 // snooped request still precedes the re-request we'll have to
332 // issue for deferred targets, if any...
333 deferredTargets->replaceUpgrades();
334 }
335
336 if (hasPostInvalidate()) {
337 // a prior snoop has already appended an invalidation, so
338 // logically we don't have the block anymore; no need for
339 // further snooping.
340 return true;
341 }
342
343 if (isPendingDirty() || pkt->isInvalidate()) {
344 // We need to save and replay the packet in two cases:
345 // 1. We're awaiting an exclusive copy, so ownership is pending,
346 // and we need to respond after we receive data.
347 // 2. It's an invalidation (e.g., UpgradeReq), and we need
348 // to forward the snoop up the hierarchy after the current
349 // transaction completes.
350
351 // Actual target device (typ. a memory) will delete the
352 // packet on reception, so we need to save a copy here.
353 PacketPtr cp_pkt = new Packet(pkt, true);
354 targets->add(cp_pkt, curTick(), _order, Target::FromSnoop,
355 downstreamPending && targets->needsExclusive);
356 ++ntargets;
357
358 if (isPendingDirty()) {
359 pkt->assertMemInhibit();
360 pkt->setSupplyExclusive();
361 }
362
363 if (pkt->needsExclusive()) {
364 // This transaction will take away our pending copy
365 postInvalidate = true;
366 }
367 }
368
369 if (!pkt->needsExclusive()) {
370 // This transaction will get a read-shared copy, downgrading
371 // our copy if we had an exclusive one
372 postDowngrade = true;
373 pkt->assertShared();
374 }
375
376 return true;
377}
378
379
380bool
381MSHR::promoteDeferredTargets()
382{
383 assert(targets->empty());
384 if (deferredTargets->empty()) {
385 return false;
386 }
387
388 // swap targets & deferredTargets lists
389 TargetList *tmp = targets;
390 targets = deferredTargets;
391 deferredTargets = tmp;
392
393 assert(targets->size() == ntargets);
394
395 // clear deferredTargets flags
396 deferredTargets->resetFlags();
397
398 order = targets->front().order;
399 readyTime = std::max(curTick(), targets->front().readyTime);
400
401 return true;
402}
403
404
405void
406MSHR::handleFill(Packet *pkt, CacheBlk *blk)
407{
408 if (!pkt->sharedAsserted()
409 && !(hasPostInvalidate() || hasPostDowngrade())
410 && deferredTargets->needsExclusive) {
411 // We got an exclusive response, but we have deferred targets
412 // which are waiting to request an exclusive copy (not because
413 // of a pending invalidate). This can happen if the original
414 // request was for a read-only (non-exclusive) block, but we
415 // got an exclusive copy anyway because of the E part of the
416 // MOESI/MESI protocol. Since we got the exclusive copy
417 // there's no need to defer the targets, so move them up to
418 // the regular target list.
419 assert(!targets->needsExclusive);
420 targets->needsExclusive = true;
421 // if any of the deferred targets were upper-level cache
422 // requests marked downstreamPending, need to clear that
423 assert(!downstreamPending); // not pending here anymore
424 deferredTargets->clearDownstreamPending();
425 // this clears out deferredTargets too
426 targets->splice(targets->end(), *deferredTargets);
427 deferredTargets->resetFlags();
428 }
429}
430
431
432bool
433MSHR::checkFunctional(PacketPtr pkt)
434{
435 // For printing, we treat the MSHR as a whole as single entity.
436 // For other requests, we iterate over the individual targets
437 // since that's where the actual data lies.
438 if (pkt->isPrint()) {
439 pkt->checkFunctional(this, addr, size, NULL);
440 return false;
441 } else {
442 return (targets->checkFunctional(pkt) ||
443 deferredTargets->checkFunctional(pkt));
444 }
445}
446
447
448void
449MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
450{
451 ccprintf(os, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
452 prefix, addr, addr+size-1,
453 isForward ? "Forward" : "",
454 isForwardNoResponse() ? "ForwNoResp" : "",
455 needsExclusive() ? "Excl" : "",
456 _isUncacheable ? "Unc" : "",
457 inService ? "InSvc" : "",
458 downstreamPending ? "DwnPend" : "",
459 hasPostInvalidate() ? "PostInv" : "",
460 hasPostDowngrade() ? "PostDowngr" : "");
461
462 ccprintf(os, "%s Targets:\n", prefix);
463 targets->print(os, verbosity, prefix + " ");
464 if (!deferredTargets->empty()) {
465 ccprintf(os, "%s Deferred Targets:\n", prefix);
466 deferredTargets->print(os, verbosity, prefix + " ");
467 }
468}
469
470MSHR::~MSHR()
471{
472 delete[] targets;
473 delete[] deferredTargets;
474}