mshr.cc (5730:dea5fcd1ead0) mshr.cc (5875:d82be3235ab4)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 */
31
32/**
33 * @file
34 * Miss Status and Handling Register (MSHR) definitions.
35 */
36
37#include <assert.h>
38#include <string>
39#include <vector>
40#include <algorithm>
41
42#include "mem/cache/mshr.hh"
43#include "sim/core.hh" // for curTick
44#include "sim/host.hh"
45#include "base/misc.hh"
46#include "mem/cache/cache.hh"
47
48using namespace std;
49
50MSHR::MSHR()
51{
52 inService = false;
53 ntargets = 0;
54 threadNum = -1;
55 targets = new TargetList();
56 deferredTargets = new TargetList();
57}
58
59
60MSHR::TargetList::TargetList()
61 : needsExclusive(false), hasUpgrade(false)
62{}
63
64
65inline void
66MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 */
31
32/**
33 * @file
34 * Miss Status and Handling Register (MSHR) definitions.
35 */
36
37#include <assert.h>
38#include <string>
39#include <vector>
40#include <algorithm>
41
42#include "mem/cache/mshr.hh"
43#include "sim/core.hh" // for curTick
44#include "sim/host.hh"
45#include "base/misc.hh"
46#include "mem/cache/cache.hh"
47
48using namespace std;
49
50MSHR::MSHR()
51{
52 inService = false;
53 ntargets = 0;
54 threadNum = -1;
55 targets = new TargetList();
56 deferredTargets = new TargetList();
57}
58
59
60MSHR::TargetList::TargetList()
61 : needsExclusive(false), hasUpgrade(false)
62{}
63
64
65inline void
66MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
67 Counter order, bool cpuSide, bool markPending)
67 Counter order, Target::Source source, bool markPending)
68{
68{
69 if (cpuSide) {
69 if (source != Target::FromSnoop) {
70 if (pkt->needsExclusive()) {
71 needsExclusive = true;
72 }
73
74 if (pkt->cmd == MemCmd::UpgradeReq) {
75 hasUpgrade = true;
76 }
77 }
78
79 if (markPending) {
80 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
81 if (mshr != NULL) {
82 assert(!mshr->downstreamPending);
83 mshr->downstreamPending = true;
84 }
85 }
86
70 if (pkt->needsExclusive()) {
71 needsExclusive = true;
72 }
73
74 if (pkt->cmd == MemCmd::UpgradeReq) {
75 hasUpgrade = true;
76 }
77 }
78
79 if (markPending) {
80 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
81 if (mshr != NULL) {
82 assert(!mshr->downstreamPending);
83 mshr->downstreamPending = true;
84 }
85 }
86
87 push_back(Target(pkt, readyTime, order, cpuSide, markPending));
87 push_back(Target(pkt, readyTime, order, source, markPending));
88}
89
90
91void
92MSHR::TargetList::replaceUpgrades()
93{
94 if (!hasUpgrade)
95 return;
96
97 Iterator end_i = end();
98 for (Iterator i = begin(); i != end_i; ++i) {
99 if (i->pkt->cmd == MemCmd::UpgradeReq) {
100 i->pkt->cmd = MemCmd::ReadExReq;
101 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
102 }
103 }
104
105 hasUpgrade = false;
106}
107
108
109void
110MSHR::TargetList::clearDownstreamPending()
111{
112 Iterator end_i = end();
113 for (Iterator i = begin(); i != end_i; ++i) {
114 if (i->markedPending) {
115 MSHR *mshr = dynamic_cast<MSHR*>(i->pkt->senderState);
116 if (mshr != NULL) {
117 mshr->clearDownstreamPending();
118 }
119 }
120 }
121}
122
123
124bool
125MSHR::TargetList::checkFunctional(PacketPtr pkt)
126{
127 Iterator end_i = end();
128 for (Iterator i = begin(); i != end_i; ++i) {
129 if (pkt->checkFunctional(i->pkt)) {
130 return true;
131 }
132 }
133
134 return false;
135}
136
137
138void
139MSHR::TargetList::
140print(std::ostream &os, int verbosity, const std::string &prefix) const
141{
142 ConstIterator end_i = end();
143 for (ConstIterator i = begin(); i != end_i; ++i) {
88}
89
90
91void
92MSHR::TargetList::replaceUpgrades()
93{
94 if (!hasUpgrade)
95 return;
96
97 Iterator end_i = end();
98 for (Iterator i = begin(); i != end_i; ++i) {
99 if (i->pkt->cmd == MemCmd::UpgradeReq) {
100 i->pkt->cmd = MemCmd::ReadExReq;
101 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
102 }
103 }
104
105 hasUpgrade = false;
106}
107
108
109void
110MSHR::TargetList::clearDownstreamPending()
111{
112 Iterator end_i = end();
113 for (Iterator i = begin(); i != end_i; ++i) {
114 if (i->markedPending) {
115 MSHR *mshr = dynamic_cast<MSHR*>(i->pkt->senderState);
116 if (mshr != NULL) {
117 mshr->clearDownstreamPending();
118 }
119 }
120 }
121}
122
123
124bool
125MSHR::TargetList::checkFunctional(PacketPtr pkt)
126{
127 Iterator end_i = end();
128 for (Iterator i = begin(); i != end_i; ++i) {
129 if (pkt->checkFunctional(i->pkt)) {
130 return true;
131 }
132 }
133
134 return false;
135}
136
137
138void
139MSHR::TargetList::
140print(std::ostream &os, int verbosity, const std::string &prefix) const
141{
142 ConstIterator end_i = end();
143 for (ConstIterator i = begin(); i != end_i; ++i) {
144 ccprintf(os, "%s%s: ", prefix, i->isCpuSide() ? "cpu" : "mem");
144 const char *s;
145 switch (i->source) {
146 case Target::FromCPU: s = "FromCPU";
147 case Target::FromSnoop: s = "FromSnoop";
148 case Target::FromPrefetcher: s = "FromPrefetcher";
149 default: s = "";
150 }
151 ccprintf(os, "%s%s: ", prefix, s);
145 i->pkt->print(os, verbosity, "");
146 }
147}
148
149
150void
151MSHR::allocate(Addr _addr, int _size, PacketPtr target,
152 Tick whenReady, Counter _order)
153{
154 addr = _addr;
155 size = _size;
156 readyTime = whenReady;
157 order = _order;
158 assert(target);
159 isForward = false;
160 _isUncacheable = target->req->isUncacheable();
161 inService = false;
162 downstreamPending = false;
163 threadNum = 0;
164 ntargets = 1;
152 i->pkt->print(os, verbosity, "");
153 }
154}
155
156
157void
158MSHR::allocate(Addr _addr, int _size, PacketPtr target,
159 Tick whenReady, Counter _order)
160{
161 addr = _addr;
162 size = _size;
163 readyTime = whenReady;
164 order = _order;
165 assert(target);
166 isForward = false;
167 _isUncacheable = target->req->isUncacheable();
168 inService = false;
169 downstreamPending = false;
170 threadNum = 0;
171 ntargets = 1;
165 // Don't know of a case where we would allocate a new MSHR for a
166 // snoop (mem-side request), so set cpuSide to true here.
167 assert(targets->isReset());
172 assert(targets->isReset());
168 targets->add(target, whenReady, _order, true, true);
173 // Don't know of a case where we would allocate a new MSHR for a
174 // snoop (mem-side request), so set source according to request here
175 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
176 Target::FromPrefetcher : Target::FromCPU;
177 targets->add(target, whenReady, _order, source, true);
169 assert(deferredTargets->isReset());
170 pendingInvalidate = false;
171 pendingShared = false;
172 data = NULL;
173}
174
175
176void
177MSHR::clearDownstreamPending()
178{
179 assert(downstreamPending);
180 downstreamPending = false;
181 // recursively clear flag on any MSHRs we will be forwarding
182 // responses to
183 targets->clearDownstreamPending();
184}
185
186bool
187MSHR::markInService()
188{
189 assert(!inService);
190 if (isForwardNoResponse()) {
191 // we just forwarded the request packet & don't expect a
192 // response, so get rid of it
193 assert(getNumTargets() == 1);
194 popTarget();
195 return true;
196 }
197 inService = true;
198 if (!downstreamPending) {
199 // let upstream caches know that the request has made it to a
200 // level where it's going to get a response
201 targets->clearDownstreamPending();
202 }
203 return false;
204}
205
206
207void
208MSHR::deallocate()
209{
210 assert(targets->empty());
211 targets->resetFlags();
212 assert(deferredTargets->isReset());
213 assert(ntargets == 0);
214 inService = false;
215 //allocIter = NULL;
216 //readyIter = NULL;
217}
218
219/*
220 * Adds a target to an MSHR
221 */
222void
223MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
224{
225 // if there's a request already in service for this MSHR, we will
226 // have to defer the new target until after the response if any of
227 // the following are true:
228 // - there are other targets already deferred
229 // - there's a pending invalidate to be applied after the response
230 // comes back (but before this target is processed)
231 // - the outstanding request is for a non-exclusive block and this
232 // target requires an exclusive block
178 assert(deferredTargets->isReset());
179 pendingInvalidate = false;
180 pendingShared = false;
181 data = NULL;
182}
183
184
185void
186MSHR::clearDownstreamPending()
187{
188 assert(downstreamPending);
189 downstreamPending = false;
190 // recursively clear flag on any MSHRs we will be forwarding
191 // responses to
192 targets->clearDownstreamPending();
193}
194
195bool
196MSHR::markInService()
197{
198 assert(!inService);
199 if (isForwardNoResponse()) {
200 // we just forwarded the request packet & don't expect a
201 // response, so get rid of it
202 assert(getNumTargets() == 1);
203 popTarget();
204 return true;
205 }
206 inService = true;
207 if (!downstreamPending) {
208 // let upstream caches know that the request has made it to a
209 // level where it's going to get a response
210 targets->clearDownstreamPending();
211 }
212 return false;
213}
214
215
216void
217MSHR::deallocate()
218{
219 assert(targets->empty());
220 targets->resetFlags();
221 assert(deferredTargets->isReset());
222 assert(ntargets == 0);
223 inService = false;
224 //allocIter = NULL;
225 //readyIter = NULL;
226}
227
228/*
229 * Adds a target to an MSHR
230 */
231void
232MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
233{
234 // if there's a request already in service for this MSHR, we will
235 // have to defer the new target until after the response if any of
236 // the following are true:
237 // - there are other targets already deferred
238 // - there's a pending invalidate to be applied after the response
239 // comes back (but before this target is processed)
240 // - the outstanding request is for a non-exclusive block and this
241 // target requires an exclusive block
242
243 // assume we'd never issue a prefetch when we've got an
244 // outstanding miss
245 assert(pkt->cmd != MemCmd::HardPFReq);
246
233 if (inService &&
234 (!deferredTargets->empty() || pendingInvalidate ||
235 (!targets->needsExclusive && pkt->needsExclusive()))) {
236 // need to put on deferred list
247 if (inService &&
248 (!deferredTargets->empty() || pendingInvalidate ||
249 (!targets->needsExclusive && pkt->needsExclusive()))) {
250 // need to put on deferred list
237 deferredTargets->add(pkt, whenReady, _order, true, true);
251 deferredTargets->add(pkt, whenReady, _order, Target::FromCPU, true);
238 } else {
239 // No request outstanding, or still OK to append to
240 // outstanding request: append to regular target list. Only
241 // mark pending if current request hasn't been issued yet
242 // (isn't in service).
252 } else {
253 // No request outstanding, or still OK to append to
254 // outstanding request: append to regular target list. Only
255 // mark pending if current request hasn't been issued yet
256 // (isn't in service).
243 targets->add(pkt, whenReady, _order, true, !inService);
257 targets->add(pkt, whenReady, _order, Target::FromCPU, !inService);
244 }
245
246 ++ntargets;
247}
248
249bool
250MSHR::handleSnoop(PacketPtr pkt, Counter _order)
251{
252 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
253 // Request has not been issued yet, or it's been issued
254 // locally but is buffered unissued at some downstream cache
255 // which is forwarding us this snoop. Either way, the packet
256 // we're snooping logically precedes this MSHR's request, so
257 // the snoop has no impact on the MSHR, but must be processed
258 // in the standard way by the cache. The only exception is
259 // that if we're an L2+ cache buffering an UpgradeReq from a
260 // higher-level cache, and the snoop is invalidating, then our
261 // buffered upgrades must be converted to read exclusives,
262 // since the upper-level cache no longer has a valid copy.
263 // That is, even though the upper-level cache got out on its
264 // local bus first, some other invalidating transaction
265 // reached the global bus before the upgrade did.
266 if (pkt->needsExclusive()) {
267 targets->replaceUpgrades();
268 deferredTargets->replaceUpgrades();
269 }
270
271 return false;
272 }
273
274 // From here on down, the request issued by this MSHR logically
275 // precedes the request we're snooping.
276
277 if (pkt->needsExclusive()) {
278 // snooped request still precedes the re-request we'll have to
279 // issue for deferred targets, if any...
280 deferredTargets->replaceUpgrades();
281 }
282
283 if (pendingInvalidate) {
284 // a prior snoop has already appended an invalidation, so
285 // logically we don't have the block anymore; no need for
286 // further snooping.
287 return true;
288 }
289
290 if (targets->needsExclusive || pkt->needsExclusive()) {
291 // actual target device (typ. PhysicalMemory) will delete the
292 // packet on reception, so we need to save a copy here
293 PacketPtr cp_pkt = new Packet(pkt, true);
258 }
259
260 ++ntargets;
261}
262
263bool
264MSHR::handleSnoop(PacketPtr pkt, Counter _order)
265{
266 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
267 // Request has not been issued yet, or it's been issued
268 // locally but is buffered unissued at some downstream cache
269 // which is forwarding us this snoop. Either way, the packet
270 // we're snooping logically precedes this MSHR's request, so
271 // the snoop has no impact on the MSHR, but must be processed
272 // in the standard way by the cache. The only exception is
273 // that if we're an L2+ cache buffering an UpgradeReq from a
274 // higher-level cache, and the snoop is invalidating, then our
275 // buffered upgrades must be converted to read exclusives,
276 // since the upper-level cache no longer has a valid copy.
277 // That is, even though the upper-level cache got out on its
278 // local bus first, some other invalidating transaction
279 // reached the global bus before the upgrade did.
280 if (pkt->needsExclusive()) {
281 targets->replaceUpgrades();
282 deferredTargets->replaceUpgrades();
283 }
284
285 return false;
286 }
287
288 // From here on down, the request issued by this MSHR logically
289 // precedes the request we're snooping.
290
291 if (pkt->needsExclusive()) {
292 // snooped request still precedes the re-request we'll have to
293 // issue for deferred targets, if any...
294 deferredTargets->replaceUpgrades();
295 }
296
297 if (pendingInvalidate) {
298 // a prior snoop has already appended an invalidation, so
299 // logically we don't have the block anymore; no need for
300 // further snooping.
301 return true;
302 }
303
304 if (targets->needsExclusive || pkt->needsExclusive()) {
305 // actual target device (typ. PhysicalMemory) will delete the
306 // packet on reception, so we need to save a copy here
307 PacketPtr cp_pkt = new Packet(pkt, true);
294 targets->add(cp_pkt, curTick, _order, false,
308 targets->add(cp_pkt, curTick, _order, Target::FromSnoop,
295 downstreamPending && targets->needsExclusive);
296 ++ntargets;
297
298 if (targets->needsExclusive) {
299 // We're awaiting an exclusive copy, so ownership is pending.
300 // It's up to us to respond once the data arrives.
301 pkt->assertMemInhibit();
302 pkt->setSupplyExclusive();
303 } else {
304 // Someone else may respond before we get around to
305 // processing this snoop, which means the copied request
306 // pointer will no longer be valid
307 cp_pkt->req = NULL;
308 }
309
310 if (pkt->needsExclusive()) {
311 // This transaction will take away our pending copy
312 pendingInvalidate = true;
313 }
314 } else {
315 // Read to a read: no conflict, so no need to record as
316 // target, but make sure neither reader thinks he's getting an
317 // exclusive copy
318 pendingShared = true;
319 pkt->assertShared();
320 }
321
322 return true;
323}
324
325
326bool
327MSHR::promoteDeferredTargets()
328{
329 assert(targets->empty());
330 if (deferredTargets->empty()) {
331 return false;
332 }
333
334 // swap targets & deferredTargets lists
335 TargetList *tmp = targets;
336 targets = deferredTargets;
337 deferredTargets = tmp;
338
339 assert(targets->size() == ntargets);
340
341 // clear deferredTargets flags
342 deferredTargets->resetFlags();
343
344 pendingInvalidate = false;
345 pendingShared = false;
346 order = targets->front().order;
347 readyTime = std::max(curTick, targets->front().readyTime);
348
349 return true;
350}
351
352
353void
354MSHR::handleFill(Packet *pkt, CacheBlk *blk)
355{
356 if (pendingShared) {
357 // we snooped another read while this read was in
358 // service... assert shared line on its behalf
359 pkt->assertShared();
360 }
361
362 if (!pkt->sharedAsserted() && !pendingInvalidate
363 && deferredTargets->needsExclusive) {
364 // We got an exclusive response, but we have deferred targets
365 // which are waiting to request an exclusive copy (not because
366 // of a pending invalidate). This can happen if the original
367 // request was for a read-only (non-exclusive) block, but we
368 // got an exclusive copy anyway because of the E part of the
369 // MOESI/MESI protocol. Since we got the exclusive copy
370 // there's no need to defer the targets, so move them up to
371 // the regular target list.
372 assert(!targets->needsExclusive);
373 targets->needsExclusive = true;
374 // if any of the deferred targets were upper-level cache
375 // requests marked downstreamPending, need to clear that
376 assert(!downstreamPending); // not pending here anymore
377 deferredTargets->clearDownstreamPending();
378 // this clears out deferredTargets too
379 targets->splice(targets->end(), *deferredTargets);
380 deferredTargets->resetFlags();
381 }
382}
383
384
385bool
386MSHR::checkFunctional(PacketPtr pkt)
387{
388 // For printing, we treat the MSHR as a whole as single entity.
389 // For other requests, we iterate over the individual targets
390 // since that's where the actual data lies.
391 if (pkt->isPrint()) {
392 pkt->checkFunctional(this, addr, size, NULL);
393 return false;
394 } else {
395 return (targets->checkFunctional(pkt) ||
396 deferredTargets->checkFunctional(pkt));
397 }
398}
399
400
401void
402MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
403{
404 ccprintf(os, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
405 prefix, addr, addr+size-1,
406 isForward ? "Forward" : "",
407 isForwardNoResponse() ? "ForwNoResp" : "",
408 needsExclusive() ? "Excl" : "",
409 _isUncacheable ? "Unc" : "",
410 inService ? "InSvc" : "",
411 downstreamPending ? "DwnPend" : "",
412 pendingInvalidate ? "PendInv" : "",
413 pendingShared ? "PendShared" : "");
414
415 ccprintf(os, "%s Targets:\n", prefix);
416 targets->print(os, verbosity, prefix + " ");
417 if (!deferredTargets->empty()) {
418 ccprintf(os, "%s Deferred Targets:\n", prefix);
419 deferredTargets->print(os, verbosity, prefix + " ");
420 }
421}
422
423MSHR::~MSHR()
424{
425}
309 downstreamPending && targets->needsExclusive);
310 ++ntargets;
311
312 if (targets->needsExclusive) {
313 // We're awaiting an exclusive copy, so ownership is pending.
314 // It's up to us to respond once the data arrives.
315 pkt->assertMemInhibit();
316 pkt->setSupplyExclusive();
317 } else {
318 // Someone else may respond before we get around to
319 // processing this snoop, which means the copied request
320 // pointer will no longer be valid
321 cp_pkt->req = NULL;
322 }
323
324 if (pkt->needsExclusive()) {
325 // This transaction will take away our pending copy
326 pendingInvalidate = true;
327 }
328 } else {
329 // Read to a read: no conflict, so no need to record as
330 // target, but make sure neither reader thinks he's getting an
331 // exclusive copy
332 pendingShared = true;
333 pkt->assertShared();
334 }
335
336 return true;
337}
338
339
340bool
341MSHR::promoteDeferredTargets()
342{
343 assert(targets->empty());
344 if (deferredTargets->empty()) {
345 return false;
346 }
347
348 // swap targets & deferredTargets lists
349 TargetList *tmp = targets;
350 targets = deferredTargets;
351 deferredTargets = tmp;
352
353 assert(targets->size() == ntargets);
354
355 // clear deferredTargets flags
356 deferredTargets->resetFlags();
357
358 pendingInvalidate = false;
359 pendingShared = false;
360 order = targets->front().order;
361 readyTime = std::max(curTick, targets->front().readyTime);
362
363 return true;
364}
365
366
367void
368MSHR::handleFill(Packet *pkt, CacheBlk *blk)
369{
370 if (pendingShared) {
371 // we snooped another read while this read was in
372 // service... assert shared line on its behalf
373 pkt->assertShared();
374 }
375
376 if (!pkt->sharedAsserted() && !pendingInvalidate
377 && deferredTargets->needsExclusive) {
378 // We got an exclusive response, but we have deferred targets
379 // which are waiting to request an exclusive copy (not because
380 // of a pending invalidate). This can happen if the original
381 // request was for a read-only (non-exclusive) block, but we
382 // got an exclusive copy anyway because of the E part of the
383 // MOESI/MESI protocol. Since we got the exclusive copy
384 // there's no need to defer the targets, so move them up to
385 // the regular target list.
386 assert(!targets->needsExclusive);
387 targets->needsExclusive = true;
388 // if any of the deferred targets were upper-level cache
389 // requests marked downstreamPending, need to clear that
390 assert(!downstreamPending); // not pending here anymore
391 deferredTargets->clearDownstreamPending();
392 // this clears out deferredTargets too
393 targets->splice(targets->end(), *deferredTargets);
394 deferredTargets->resetFlags();
395 }
396}
397
398
399bool
400MSHR::checkFunctional(PacketPtr pkt)
401{
402 // For printing, we treat the MSHR as a whole as single entity.
403 // For other requests, we iterate over the individual targets
404 // since that's where the actual data lies.
405 if (pkt->isPrint()) {
406 pkt->checkFunctional(this, addr, size, NULL);
407 return false;
408 } else {
409 return (targets->checkFunctional(pkt) ||
410 deferredTargets->checkFunctional(pkt));
411 }
412}
413
414
415void
416MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
417{
418 ccprintf(os, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
419 prefix, addr, addr+size-1,
420 isForward ? "Forward" : "",
421 isForwardNoResponse() ? "ForwNoResp" : "",
422 needsExclusive() ? "Excl" : "",
423 _isUncacheable ? "Unc" : "",
424 inService ? "InSvc" : "",
425 downstreamPending ? "DwnPend" : "",
426 pendingInvalidate ? "PendInv" : "",
427 pendingShared ? "PendShared" : "");
428
429 ccprintf(os, "%s Targets:\n", prefix);
430 targets->print(os, verbosity, prefix + " ");
431 if (!deferredTargets->empty()) {
432 ccprintf(os, "%s Deferred Targets:\n", prefix);
433 deferredTargets->print(os, verbosity, prefix + " ");
434 }
435}
436
437MSHR::~MSHR()
438{
439}