mshr.cc revision 9543
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * Copyright (c) 2010 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Erik Hallnor
30 *          Dave Greene
31 */
32
33/**
34 * @file
35 * Miss Status and Handling Register (MSHR) definitions.
36 */
37
38#include <algorithm>
39#include <cassert>
40#include <string>
41#include <vector>
42
43#include "base/misc.hh"
44#include "base/types.hh"
45#include "debug/Cache.hh"
46#include "mem/cache/cache.hh"
47#include "mem/cache/mshr.hh"
48#include "sim/core.hh"
49
50using namespace std;
51
52MSHR::MSHR()
53{
54    inService = false;
55    ntargets = 0;
56    threadNum = InvalidThreadID;
57    targets = new TargetList();
58    deferredTargets = new TargetList();
59}
60
61
62MSHR::TargetList::TargetList()
63    : needsExclusive(false), hasUpgrade(false)
64{}
65
66
67inline void
68MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
69                      Counter order, Target::Source source, bool markPending)
70{
71    if (source != Target::FromSnoop) {
72        if (pkt->needsExclusive()) {
73            needsExclusive = true;
74        }
75
76        // StoreCondReq is effectively an upgrade if it's in an MSHR
77        // since it would have been failed already if we didn't have a
78        // read-only copy
79        if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
80            hasUpgrade = true;
81        }
82    }
83
84    if (markPending) {
85        // Iterate over the SenderState stack and see if we find
86        // an MSHR entry. If we do, set the downstreamPending
87        // flag. Otherwise, do nothing.
88        MSHR *mshr = pkt->findNextSenderState<MSHR>();
89        if (mshr != NULL) {
90            assert(!mshr->downstreamPending);
91            mshr->downstreamPending = true;
92        }
93    }
94
95    push_back(Target(pkt, readyTime, order, source, markPending));
96}
97
98
99static void
100replaceUpgrade(PacketPtr pkt)
101{
102    if (pkt->cmd == MemCmd::UpgradeReq) {
103        pkt->cmd = MemCmd::ReadExReq;
104        DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
105    } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
106        pkt->cmd = MemCmd::SCUpgradeFailReq;
107        DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
108    } else if (pkt->cmd == MemCmd::StoreCondReq) {
109        pkt->cmd = MemCmd::StoreCondFailReq;
110        DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n");
111    }
112}
113
114
115void
116MSHR::TargetList::replaceUpgrades()
117{
118    if (!hasUpgrade)
119        return;
120
121    Iterator end_i = end();
122    for (Iterator i = begin(); i != end_i; ++i) {
123        replaceUpgrade(i->pkt);
124    }
125
126    hasUpgrade = false;
127}
128
129
130void
131MSHR::TargetList::clearDownstreamPending()
132{
133    Iterator end_i = end();
134    for (Iterator i = begin(); i != end_i; ++i) {
135        if (i->markedPending) {
136            // Iterate over the SenderState stack and see if we find
137            // an MSHR entry. If we find one, clear the
138            // downstreamPending flag by calling
139            // clearDownstreamPending(). This recursively clears the
140            // downstreamPending flag in all caches this packet has
141            // passed through.
142            MSHR *mshr = i->pkt->findNextSenderState<MSHR>();
143            if (mshr != NULL) {
144                mshr->clearDownstreamPending();
145            }
146        }
147    }
148}
149
150
151bool
152MSHR::TargetList::checkFunctional(PacketPtr pkt)
153{
154    Iterator end_i = end();
155    for (Iterator i = begin(); i != end_i; ++i) {
156        if (pkt->checkFunctional(i->pkt)) {
157            return true;
158        }
159    }
160
161    return false;
162}
163
164
165void
166MSHR::TargetList::
167print(std::ostream &os, int verbosity, const std::string &prefix) const
168{
169    ConstIterator end_i = end();
170    for (ConstIterator i = begin(); i != end_i; ++i) {
171        const char *s;
172        switch (i->source) {
173          case Target::FromCPU:
174            s = "FromCPU";
175            break;
176          case Target::FromSnoop:
177            s = "FromSnoop";
178            break;
179          case Target::FromPrefetcher:
180            s = "FromPrefetcher";
181            break;
182          default:
183            s = "";
184            break;
185        }
186        ccprintf(os, "%s%s: ", prefix, s);
187        i->pkt->print(os, verbosity, "");
188    }
189}
190
191
192void
193MSHR::allocate(Addr _addr, int _size, PacketPtr target,
194               Tick whenReady, Counter _order)
195{
196    addr = _addr;
197    size = _size;
198    readyTime = whenReady;
199    order = _order;
200    assert(target);
201    isForward = false;
202    _isUncacheable = target->req->isUncacheable();
203    inService = false;
204    downstreamPending = false;
205    threadNum = 0;
206    ntargets = 1;
207    assert(targets->isReset());
208    // Don't know of a case where we would allocate a new MSHR for a
209    // snoop (mem-side request), so set source according to request here
210    Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
211        Target::FromPrefetcher : Target::FromCPU;
212    targets->add(target, whenReady, _order, source, true);
213    assert(deferredTargets->isReset());
214    data = NULL;
215}
216
217
218void
219MSHR::clearDownstreamPending()
220{
221    assert(downstreamPending);
222    downstreamPending = false;
223    // recursively clear flag on any MSHRs we will be forwarding
224    // responses to
225    targets->clearDownstreamPending();
226}
227
228bool
229MSHR::markInService(PacketPtr pkt)
230{
231    assert(!inService);
232    if (isForwardNoResponse()) {
233        // we just forwarded the request packet & don't expect a
234        // response, so get rid of it
235        assert(getNumTargets() == 1);
236        popTarget();
237        return true;
238    }
239    inService = true;
240    pendingDirty = (targets->needsExclusive ||
241                    (!pkt->sharedAsserted() && pkt->memInhibitAsserted()));
242    postInvalidate = postDowngrade = false;
243
244    if (!downstreamPending) {
245        // let upstream caches know that the request has made it to a
246        // level where it's going to get a response
247        targets->clearDownstreamPending();
248    }
249    return false;
250}
251
252
253void
254MSHR::deallocate()
255{
256    assert(targets->empty());
257    targets->resetFlags();
258    assert(deferredTargets->isReset());
259    assert(ntargets == 0);
260    inService = false;
261}
262
263/*
264 * Adds a target to an MSHR
265 */
266void
267MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
268{
269    // if there's a request already in service for this MSHR, we will
270    // have to defer the new target until after the response if any of
271    // the following are true:
272    // - there are other targets already deferred
273    // - there's a pending invalidate to be applied after the response
274    //   comes back (but before this target is processed)
275    // - this target requires an exclusive block and either we're not
276    //   getting an exclusive block back or we have already snooped
277    //   another read request that will downgrade our exclusive block
278    //   to shared
279
280    // assume we'd never issue a prefetch when we've got an
281    // outstanding miss
282    assert(pkt->cmd != MemCmd::HardPFReq);
283
284    if (inService &&
285        (!deferredTargets->empty() || hasPostInvalidate() ||
286         (pkt->needsExclusive() &&
287          (!isPendingDirty() || hasPostDowngrade() || isForward)))) {
288        // need to put on deferred list
289        if (hasPostInvalidate())
290            replaceUpgrade(pkt);
291        deferredTargets->add(pkt, whenReady, _order, Target::FromCPU, true);
292    } else {
293        // No request outstanding, or still OK to append to
294        // outstanding request: append to regular target list.  Only
295        // mark pending if current request hasn't been issued yet
296        // (isn't in service).
297        targets->add(pkt, whenReady, _order, Target::FromCPU, !inService);
298    }
299
300    ++ntargets;
301}
302
303bool
304MSHR::handleSnoop(PacketPtr pkt, Counter _order)
305{
306    if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
307        // Request has not been issued yet, or it's been issued
308        // locally but is buffered unissued at some downstream cache
309        // which is forwarding us this snoop.  Either way, the packet
310        // we're snooping logically precedes this MSHR's request, so
311        // the snoop has no impact on the MSHR, but must be processed
312        // in the standard way by the cache.  The only exception is
313        // that if we're an L2+ cache buffering an UpgradeReq from a
314        // higher-level cache, and the snoop is invalidating, then our
315        // buffered upgrades must be converted to read exclusives,
316        // since the upper-level cache no longer has a valid copy.
317        // That is, even though the upper-level cache got out on its
318        // local bus first, some other invalidating transaction
319        // reached the global bus before the upgrade did.
320        if (pkt->needsExclusive()) {
321            targets->replaceUpgrades();
322            deferredTargets->replaceUpgrades();
323        }
324
325        return false;
326    }
327
328    // From here on down, the request issued by this MSHR logically
329    // precedes the request we're snooping.
330    if (pkt->needsExclusive()) {
331        // snooped request still precedes the re-request we'll have to
332        // issue for deferred targets, if any...
333        deferredTargets->replaceUpgrades();
334    }
335
336    if (hasPostInvalidate()) {
337        // a prior snoop has already appended an invalidation, so
338        // logically we don't have the block anymore; no need for
339        // further snooping.
340        return true;
341    }
342
343    if (isPendingDirty() || pkt->isInvalidate()) {
344        // We need to save and replay the packet in two cases:
345        // 1. We're awaiting an exclusive copy, so ownership is pending,
346        //    and we need to respond after we receive data.
347        // 2. It's an invalidation (e.g., UpgradeReq), and we need
348        //    to forward the snoop up the hierarchy after the current
349        //    transaction completes.
350
351        // Actual target device (typ. a memory) will delete the
352        // packet on reception, so we need to save a copy here.
353        PacketPtr cp_pkt = new Packet(pkt, true);
354        targets->add(cp_pkt, curTick(), _order, Target::FromSnoop,
355                     downstreamPending && targets->needsExclusive);
356        ++ntargets;
357
358        if (isPendingDirty()) {
359            pkt->assertMemInhibit();
360            pkt->setSupplyExclusive();
361        }
362
363        if (pkt->needsExclusive()) {
364            // This transaction will take away our pending copy
365            postInvalidate = true;
366        }
367    }
368
369    if (!pkt->needsExclusive()) {
370        // This transaction will get a read-shared copy, downgrading
371        // our copy if we had an exclusive one
372        postDowngrade = true;
373        pkt->assertShared();
374    }
375
376    return true;
377}
378
379
380bool
381MSHR::promoteDeferredTargets()
382{
383    assert(targets->empty());
384    if (deferredTargets->empty()) {
385        return false;
386    }
387
388    // swap targets & deferredTargets lists
389    TargetList *tmp = targets;
390    targets = deferredTargets;
391    deferredTargets = tmp;
392
393    assert(targets->size() == ntargets);
394
395    // clear deferredTargets flags
396    deferredTargets->resetFlags();
397
398    order = targets->front().order;
399    readyTime = std::max(curTick(), targets->front().readyTime);
400
401    return true;
402}
403
404
405void
406MSHR::handleFill(Packet *pkt, CacheBlk *blk)
407{
408    if (!pkt->sharedAsserted()
409        && !(hasPostInvalidate() || hasPostDowngrade())
410        && deferredTargets->needsExclusive) {
411        // We got an exclusive response, but we have deferred targets
412        // which are waiting to request an exclusive copy (not because
413        // of a pending invalidate).  This can happen if the original
414        // request was for a read-only (non-exclusive) block, but we
415        // got an exclusive copy anyway because of the E part of the
416        // MOESI/MESI protocol.  Since we got the exclusive copy
417        // there's no need to defer the targets, so move them up to
418        // the regular target list.
419        assert(!targets->needsExclusive);
420        targets->needsExclusive = true;
421        // if any of the deferred targets were upper-level cache
422        // requests marked downstreamPending, need to clear that
423        assert(!downstreamPending);  // not pending here anymore
424        deferredTargets->clearDownstreamPending();
425        // this clears out deferredTargets too
426        targets->splice(targets->end(), *deferredTargets);
427        deferredTargets->resetFlags();
428    }
429}
430
431
432bool
433MSHR::checkFunctional(PacketPtr pkt)
434{
435    // For printing, we treat the MSHR as a whole as single entity.
436    // For other requests, we iterate over the individual targets
437    // since that's where the actual data lies.
438    if (pkt->isPrint()) {
439        pkt->checkFunctional(this, addr, size, NULL);
440        return false;
441    } else {
442        return (targets->checkFunctional(pkt) ||
443                deferredTargets->checkFunctional(pkt));
444    }
445}
446
447
448void
449MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
450{
451    ccprintf(os, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
452             prefix, addr, addr+size-1,
453             isForward ? "Forward" : "",
454             isForwardNoResponse() ? "ForwNoResp" : "",
455             needsExclusive() ? "Excl" : "",
456             _isUncacheable ? "Unc" : "",
457             inService ? "InSvc" : "",
458             downstreamPending ? "DwnPend" : "",
459             hasPostInvalidate() ? "PostInv" : "",
460             hasPostDowngrade() ? "PostDowngr" : "");
461
462    ccprintf(os, "%s  Targets:\n", prefix);
463    targets->print(os, verbosity, prefix + "    ");
464    if (!deferredTargets->empty()) {
465        ccprintf(os, "%s  Deferred Targets:\n", prefix);
466        deferredTargets->print(os, verbosity, prefix + "      ");
467    }
468}
469
470MSHR::~MSHR()
471{
472    delete[] targets;
473    delete[] deferredTargets;
474}
475