mshr.cc revision 5314
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 *          Dave Greene
30 */
31
32/**
33 * @file
34 * Miss Status and Handling Register (MSHR) definitions.
35 */
36
37#include <assert.h>
38#include <string>
39#include <vector>
40#include <algorithm>
41
42#include "mem/cache/miss/mshr.hh"
43#include "sim/core.hh" // for curTick
44#include "sim/host.hh"
45#include "base/misc.hh"
46#include "mem/cache/cache.hh"
47
48using namespace std;
49
50MSHR::MSHR()
51{
52    inService = false;
53    ntargets = 0;
54    threadNum = -1;
55    targets = new TargetList();
56    deferredTargets = new TargetList();
57}
58
59
60MSHR::TargetList::TargetList()
61    : needsExclusive(false), hasUpgrade(false)
62{}
63
64
65inline void
66MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
67                      Counter order, bool cpuSide)
68{
69    if (cpuSide) {
70        if (pkt->needsExclusive()) {
71            needsExclusive = true;
72        }
73
74        if (pkt->cmd == MemCmd::UpgradeReq) {
75            hasUpgrade = true;
76        }
77
78        MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
79        if (mshr != NULL) {
80            assert(!mshr->downstreamPending);
81            mshr->downstreamPending = true;
82        }
83    }
84
85    push_back(Target(pkt, readyTime, order, cpuSide));
86}
87
88
89void
90MSHR::TargetList::replaceUpgrades()
91{
92    if (!hasUpgrade)
93        return;
94
95    Iterator end_i = end();
96    for (Iterator i = begin(); i != end_i; ++i) {
97        if (i->pkt->cmd == MemCmd::UpgradeReq) {
98            i->pkt->cmd = MemCmd::ReadExReq;
99            DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
100        }
101    }
102
103    hasUpgrade = false;
104}
105
106
107void
108MSHR::TargetList::clearDownstreamPending()
109{
110    Iterator end_i = end();
111    for (Iterator i = begin(); i != end_i; ++i) {
112        MSHR *mshr = dynamic_cast<MSHR*>(i->pkt->senderState);
113        if (mshr != NULL) {
114            assert(mshr->downstreamPending);
115            mshr->downstreamPending = false;
116        }
117    }
118}
119
120
121bool
122MSHR::TargetList::checkFunctional(PacketPtr pkt)
123{
124    Iterator end_i = end();
125    for (Iterator i = begin(); i != end_i; ++i) {
126        if (pkt->checkFunctional(i->pkt)) {
127            return true;
128        }
129    }
130
131    return false;
132}
133
134
135void
136MSHR::TargetList::
137print(std::ostream &os, int verbosity, const std::string &prefix) const
138{
139    ConstIterator end_i = end();
140    for (ConstIterator i = begin(); i != end_i; ++i) {
141        ccprintf(os, "%s%s: ", prefix, i->isCpuSide() ? "cpu" : "mem");
142        i->pkt->print(os, verbosity, "");
143    }
144}
145
146
147void
148MSHR::allocate(Addr _addr, int _size, PacketPtr target,
149               Tick whenReady, Counter _order)
150{
151    addr = _addr;
152    size = _size;
153    readyTime = whenReady;
154    order = _order;
155    assert(target);
156    isCacheFill = false;
157    _isUncacheable = target->req->isUncacheable();
158    inService = false;
159    downstreamPending = false;
160    threadNum = 0;
161    ntargets = 1;
162    // Don't know of a case where we would allocate a new MSHR for a
163    // snoop (mem-side request), so set cpuSide to true here.
164    assert(targets->isReset());
165    targets->add(target, whenReady, _order, true);
166    assert(deferredTargets->isReset());
167    pendingInvalidate = false;
168    pendingShared = false;
169    data = NULL;
170}
171
172
173bool
174MSHR::markInService()
175{
176    assert(!inService);
177    if (isSimpleForward()) {
178        // we just forwarded the request packet & don't expect a
179        // response, so get rid of it
180        assert(getNumTargets() == 1);
181        popTarget();
182        return true;
183    }
184    inService = true;
185    if (!downstreamPending) {
186        // let upstream caches know that the request has made it to a
187        // level where it's going to get a response
188        targets->clearDownstreamPending();
189    }
190    return false;
191}
192
193
194void
195MSHR::deallocate()
196{
197    assert(targets->empty());
198    targets->resetFlags();
199    assert(deferredTargets->isReset());
200    assert(ntargets == 0);
201    inService = false;
202    //allocIter = NULL;
203    //readyIter = NULL;
204}
205
206/*
207 * Adds a target to an MSHR
208 */
209void
210MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
211{
212    // if there's a request already in service for this MSHR, we will
213    // have to defer the new target until after the response if any of
214    // the following are true:
215    // - there are other targets already deferred
216    // - there's a pending invalidate to be applied after the response
217    //   comes back (but before this target is processed)
218    // - the outstanding request is for a non-exclusive block and this
219    //   target requires an exclusive block
220    if (inService &&
221        (!deferredTargets->empty() || pendingInvalidate ||
222         (!targets->needsExclusive && pkt->needsExclusive()))) {
223        // need to put on deferred list
224        deferredTargets->add(pkt, whenReady, _order, true);
225    } else {
226        // no request outstanding, or still OK to append to
227        // outstanding request
228        targets->add(pkt, whenReady, _order, true);
229    }
230
231    ++ntargets;
232}
233
234bool
235MSHR::handleSnoop(PacketPtr pkt, Counter _order)
236{
237    if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
238        // Request has not been issued yet, or it's been issued
239        // locally but is buffered unissued at some downstream cache
240        // which is forwarding us this snoop.  Either way, the packet
241        // we're snooping logically precedes this MSHR's request, so
242        // the snoop has no impact on the MSHR, but must be processed
243        // in the standard way by the cache.  The only exception is
244        // that if we're an L2+ cache buffering an UpgradeReq from a
245        // higher-level cache, and the snoop is invalidating, then our
246        // buffered upgrades must be converted to read exclusives,
247        // since the upper-level cache no longer has a valid copy.
248        // That is, even though the upper-level cache got out on its
249        // local bus first, some other invalidating transaction
250        // reached the global bus before the upgrade did.
251        if (pkt->needsExclusive()) {
252            targets->replaceUpgrades();
253            deferredTargets->replaceUpgrades();
254        }
255
256        return false;
257    }
258
259    // From here on down, the request issued by this MSHR logically
260    // precedes the request we're snooping.
261
262    if (pkt->needsExclusive()) {
263        // snooped request still precedes the re-request we'll have to
264        // issue for deferred targets, if any...
265        deferredTargets->replaceUpgrades();
266    }
267
268    if (pendingInvalidate) {
269        // a prior snoop has already appended an invalidation, so
270        // logically we don't have the block anymore; no need for
271        // further snooping.
272        return true;
273    }
274
275    if (targets->needsExclusive || pkt->needsExclusive()) {
276        // actual target device (typ. PhysicalMemory) will delete the
277        // packet on reception, so we need to save a copy here
278        PacketPtr cp_pkt = new Packet(pkt, true);
279        targets->add(cp_pkt, curTick, _order, false);
280        ++ntargets;
281
282        if (targets->needsExclusive) {
283            // We're awaiting an exclusive copy, so ownership is pending.
284            // It's up to us to respond once the data arrives.
285            pkt->assertMemInhibit();
286            pkt->setSupplyExclusive();
287        } else {
288            // Someone else may respond before we get around to
289            // processing this snoop, which means the copied request
290            // pointer will no longer be valid
291            cp_pkt->req = NULL;
292        }
293
294        if (pkt->needsExclusive()) {
295            // This transaction will take away our pending copy
296            pendingInvalidate = true;
297        }
298    } else {
299        // Read to a read: no conflict, so no need to record as
300        // target, but make sure neither reader thinks he's getting an
301        // exclusive copy
302        pendingShared = true;
303        pkt->assertShared();
304    }
305
306    return true;
307}
308
309
310bool
311MSHR::promoteDeferredTargets()
312{
313    assert(targets->empty());
314    if (deferredTargets->empty()) {
315        return false;
316    }
317
318    // swap targets & deferredTargets lists
319    TargetList *tmp = targets;
320    targets = deferredTargets;
321    deferredTargets = tmp;
322
323    assert(targets->size() == ntargets);
324
325    // clear deferredTargets flags
326    deferredTargets->resetFlags();
327
328    pendingInvalidate = false;
329    pendingShared = false;
330    order = targets->front().order;
331    readyTime = std::max(curTick, targets->front().readyTime);
332
333    return true;
334}
335
336
337void
338MSHR::handleFill(Packet *pkt, CacheBlk *blk)
339{
340    if (pendingShared) {
341        // we snooped another read while this read was in
342        // service... assert shared line on its behalf
343        pkt->assertShared();
344    }
345
346    if (!pkt->sharedAsserted() && !pendingInvalidate
347        && deferredTargets->needsExclusive) {
348        // We got an exclusive response, but we have deferred targets
349        // which are waiting to request an exclusive copy (not because
350        // of a pending invalidate).  This can happen if the original
351        // request was for a read-only (non-exclusive) block, but we
352        // got an exclusive copy anyway because of the E part of the
353        // MOESI/MESI protocol.  Since we got the exclusive copy
354        // there's no need to defer the targets, so move them up to
355        // the regular target list.
356        assert(!targets->needsExclusive);
357        targets->needsExclusive = true;
358        // this clears out deferredTargets too
359        targets->splice(targets->end(), *deferredTargets);
360        deferredTargets->resetFlags();
361    }
362}
363
364
365bool
366MSHR::checkFunctional(PacketPtr pkt)
367{
368    // For printing, we treat the MSHR as a whole as single entity.
369    // For other requests, we iterate over the individual targets
370    // since that's where the actual data lies.
371    if (pkt->isPrint()) {
372        pkt->checkFunctional(this, addr, size, NULL);
373        return false;
374    } else {
375        return (targets->checkFunctional(pkt) ||
376                deferredTargets->checkFunctional(pkt));
377    }
378}
379
380
381void
382MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
383{
384    ccprintf(os, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
385             prefix, addr, addr+size-1,
386             isCacheFill ? "Fill" : "",
387             needsExclusive() ? "Excl" : "",
388             _isUncacheable ? "Unc" : "",
389             inService ? "InSvc" : "",
390             downstreamPending ? "DwnPend" : "",
391             pendingInvalidate ? "PendInv" : "",
392             pendingShared ? "PendShared" : "");
393
394    ccprintf(os, "%s  Targets:\n", prefix);
395    targets->print(os, verbosity, prefix + "    ");
396    if (!deferredTargets->empty()) {
397        ccprintf(os, "%s  Deferred Targets:\n", prefix);
398        deferredTargets->print(os, verbosity, prefix + "      ");
399    }
400}
401
402MSHR::~MSHR()
403{
404}
405