mshr.cc revision 11486:f09bb73b3050
12459SN/A/*
22459SN/A * Copyright (c) 2012-2013, 2015-2016 ARM Limited
32459SN/A * All rights reserved.
42459SN/A *
52459SN/A * The license below extends only to copyright in the software and shall
62459SN/A * not be construed as granting a license to any other intellectual
72459SN/A * property including but not limited to intellectual property relating
82459SN/A * to a hardware implementation of the functionality of the software
92459SN/A * licensed hereunder.  You may use the software subject to the license
102459SN/A * terms below provided that you ensure that this notice is replicated
112459SN/A * unmodified and in its entirety in all distributions of the software,
122459SN/A * modified or unmodified, in source code or in binary form.
132459SN/A *
142459SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan
152459SN/A * Copyright (c) 2010 Advanced Micro Devices, Inc.
162459SN/A * All rights reserved.
172459SN/A *
182459SN/A * Redistribution and use in source and binary forms, with or without
192459SN/A * modification, are permitted provided that the following conditions are
202459SN/A * met: redistributions of source code must retain the above copyright
212459SN/A * notice, this list of conditions and the following disclaimer;
222459SN/A * redistributions in binary form must reproduce the above copyright
232459SN/A * notice, this list of conditions and the following disclaimer in the
242459SN/A * documentation and/or other materials provided with the distribution;
252459SN/A * neither the name of the copyright holders nor the names of its
262459SN/A * contributors may be used to endorse or promote products derived from
272665SN/A * this software without specific prior written permission.
282665SN/A *
292665SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
302459SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
312459SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
326329Sgblack@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
336329Sgblack@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
342459SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
356329Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
366329Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
376320SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
386329Sgblack@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
392459SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
402459SN/A *
412459SN/A * Authors: Erik Hallnor
426329Sgblack@eecs.umich.edu *          Dave Greene
436329Sgblack@eecs.umich.edu */
446329Sgblack@eecs.umich.edu
456329Sgblack@eecs.umich.edu/**
466329Sgblack@eecs.umich.edu * @file
476329Sgblack@eecs.umich.edu * Miss Status and Handling Register (MSHR) definitions.
486329Sgblack@eecs.umich.edu */
496329Sgblack@eecs.umich.edu
506329Sgblack@eecs.umich.edu#include "mem/cache/mshr.hh"
516329Sgblack@eecs.umich.edu
526329Sgblack@eecs.umich.edu#include <algorithm>
536329Sgblack@eecs.umich.edu#include <cassert>
546329Sgblack@eecs.umich.edu#include <string>
556329Sgblack@eecs.umich.edu#include <vector>
566329Sgblack@eecs.umich.edu
576329Sgblack@eecs.umich.edu#include "base/misc.hh"
586329Sgblack@eecs.umich.edu#include "base/types.hh"
596329Sgblack@eecs.umich.edu#include "debug/Cache.hh"
606329Sgblack@eecs.umich.edu#include "mem/cache/cache.hh"
616329Sgblack@eecs.umich.edu#include "sim/core.hh"
626329Sgblack@eecs.umich.edu
636329Sgblack@eecs.umich.eduusing namespace std;
646329Sgblack@eecs.umich.edu
656329Sgblack@eecs.umich.eduMSHR::MSHR() : downstreamPending(false),
666329Sgblack@eecs.umich.edu               pendingModified(false),
676329Sgblack@eecs.umich.edu               postInvalidate(false), postDowngrade(false),
686329Sgblack@eecs.umich.edu               isForward(false), allocOnFill(false)
696329Sgblack@eecs.umich.edu{
706329Sgblack@eecs.umich.edu}
716329Sgblack@eecs.umich.edu
726329Sgblack@eecs.umich.eduMSHR::TargetList::TargetList()
736329Sgblack@eecs.umich.edu    : needsWritable(false), hasUpgrade(false)
746329Sgblack@eecs.umich.edu{}
756320SN/A
766320SN/A
776320SN/Ainline void
782459SN/AMSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
792459SN/A                      Counter order, Target::Source source, bool markPending)
802459SN/A{
81    if (source != Target::FromSnoop) {
82        if (pkt->needsWritable()) {
83            needsWritable = true;
84        }
85
86        // StoreCondReq is effectively an upgrade if it's in an MSHR
87        // since it would have been failed already if we didn't have a
88        // read-only copy
89        if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
90            hasUpgrade = true;
91        }
92    }
93
94    if (markPending) {
95        // Iterate over the SenderState stack and see if we find
96        // an MSHR entry. If we do, set the downstreamPending
97        // flag. Otherwise, do nothing.
98        MSHR *mshr = pkt->findNextSenderState<MSHR>();
99        if (mshr != nullptr) {
100            assert(!mshr->downstreamPending);
101            mshr->downstreamPending = true;
102        } else {
103            // No need to clear downstreamPending later
104            markPending = false;
105        }
106    }
107
108    emplace_back(pkt, readyTime, order, source, markPending);
109}
110
111
112static void
113replaceUpgrade(PacketPtr pkt)
114{
115    // remember if the current packet has data allocated
116    bool has_data = pkt->hasData() || pkt->hasRespData();
117
118    if (pkt->cmd == MemCmd::UpgradeReq) {
119        pkt->cmd = MemCmd::ReadExReq;
120        DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
121    } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
122        pkt->cmd = MemCmd::SCUpgradeFailReq;
123        DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
124    } else if (pkt->cmd == MemCmd::StoreCondReq) {
125        pkt->cmd = MemCmd::StoreCondFailReq;
126        DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n");
127    }
128
129    if (!has_data) {
130        // there is no sensible way of setting the data field if the
131        // new command actually would carry data
132        assert(!pkt->hasData());
133
134        if (pkt->hasRespData()) {
135            // we went from a packet that had no data (neither request,
136            // nor response), to one that does, and therefore we need to
137            // actually allocate space for the data payload
138            pkt->allocate();
139        }
140    }
141}
142
143
144void
145MSHR::TargetList::replaceUpgrades()
146{
147    if (!hasUpgrade)
148        return;
149
150    for (auto& t : *this) {
151        replaceUpgrade(t.pkt);
152    }
153
154    hasUpgrade = false;
155}
156
157
158void
159MSHR::TargetList::clearDownstreamPending()
160{
161    for (auto& t : *this) {
162        if (t.markedPending) {
163            // Iterate over the SenderState stack and see if we find
164            // an MSHR entry. If we find one, clear the
165            // downstreamPending flag by calling
166            // clearDownstreamPending(). This recursively clears the
167            // downstreamPending flag in all caches this packet has
168            // passed through.
169            MSHR *mshr = t.pkt->findNextSenderState<MSHR>();
170            if (mshr != nullptr) {
171                mshr->clearDownstreamPending();
172            }
173        }
174    }
175}
176
177
178bool
179MSHR::TargetList::checkFunctional(PacketPtr pkt)
180{
181    for (auto& t : *this) {
182        if (pkt->checkFunctional(t.pkt)) {
183            return true;
184        }
185    }
186
187    return false;
188}
189
190
191void
192MSHR::TargetList::print(std::ostream &os, int verbosity,
193                        const std::string &prefix) const
194{
195    for (auto& t : *this) {
196        const char *s;
197        switch (t.source) {
198          case Target::FromCPU:
199            s = "FromCPU";
200            break;
201          case Target::FromSnoop:
202            s = "FromSnoop";
203            break;
204          case Target::FromPrefetcher:
205            s = "FromPrefetcher";
206            break;
207          default:
208            s = "";
209            break;
210        }
211        ccprintf(os, "%s%s: ", prefix, s);
212        t.pkt->print(os, verbosity, "");
213    }
214}
215
216
217void
218MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
219               Tick when_ready, Counter _order, bool alloc_on_fill)
220{
221    blkAddr = blk_addr;
222    blkSize = blk_size;
223    isSecure = target->isSecure();
224    readyTime = when_ready;
225    order = _order;
226    assert(target);
227    isForward = false;
228    allocOnFill = alloc_on_fill;
229    _isUncacheable = target->req->isUncacheable();
230    inService = false;
231    downstreamPending = false;
232    assert(targets.isReset());
233    // Don't know of a case where we would allocate a new MSHR for a
234    // snoop (mem-side request), so set source according to request here
235    Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
236        Target::FromPrefetcher : Target::FromCPU;
237    targets.add(target, when_ready, _order, source, true);
238    assert(deferredTargets.isReset());
239}
240
241
242void
243MSHR::clearDownstreamPending()
244{
245    assert(downstreamPending);
246    downstreamPending = false;
247    // recursively clear flag on any MSHRs we will be forwarding
248    // responses to
249    targets.clearDownstreamPending();
250}
251
252void
253MSHR::markInService(bool pending_modified_resp)
254{
255    assert(!inService);
256
257    inService = true;
258    pendingModified = targets.needsWritable || pending_modified_resp;
259    postInvalidate = postDowngrade = false;
260
261    if (!downstreamPending) {
262        // let upstream caches know that the request has made it to a
263        // level where it's going to get a response
264        targets.clearDownstreamPending();
265    }
266}
267
268
269void
270MSHR::deallocate()
271{
272    assert(targets.empty());
273    targets.resetFlags();
274    assert(deferredTargets.isReset());
275    inService = false;
276}
277
278/*
279 * Adds a target to an MSHR
280 */
281void
282MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order,
283                     bool alloc_on_fill)
284{
285    // assume we'd never issue a prefetch when we've got an
286    // outstanding miss
287    assert(pkt->cmd != MemCmd::HardPFReq);
288
289    // uncacheable accesses always allocate a new MSHR, and cacheable
290    // accesses ignore any uncacheable MSHRs, thus we should never
291    // have targets addded if originally allocated uncacheable
292    assert(!_isUncacheable);
293
294    // potentially re-evaluate whether we should allocate on a fill or
295    // not
296    allocOnFill = allocOnFill || alloc_on_fill;
297
298    // if there's a request already in service for this MSHR, we will
299    // have to defer the new target until after the response if any of
300    // the following are true:
301    // - there are other targets already deferred
302    // - there's a pending invalidate to be applied after the response
303    //   comes back (but before this target is processed)
304    // - this target requires a writable block and either we're not
305    //   getting a writable block back or we have already snooped
306    //   another read request that will downgrade our writable block
307    //   to non-writable (Shared or Owned)
308    if (inService &&
309        (!deferredTargets.empty() || hasPostInvalidate() ||
310         (pkt->needsWritable() &&
311          (!isPendingModified() || hasPostDowngrade() || isForward)))) {
312        // need to put on deferred list
313        if (hasPostInvalidate())
314            replaceUpgrade(pkt);
315        deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true);
316    } else {
317        // No request outstanding, or still OK to append to
318        // outstanding request: append to regular target list.  Only
319        // mark pending if current request hasn't been issued yet
320        // (isn't in service).
321        targets.add(pkt, whenReady, _order, Target::FromCPU, !inService);
322    }
323}
324
325bool
326MSHR::handleSnoop(PacketPtr pkt, Counter _order)
327{
328    DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
329            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
330
331    // when we snoop packets the needsWritable and isInvalidate flags
332    // should always be the same, however, this assumes that we never
333    // snoop writes as they are currently not marked as invalidations
334    panic_if(pkt->needsWritable() != pkt->isInvalidate(),
335             "%s got snoop %s to addr %#llx where needsWritable, "
336             "does not match isInvalidate", name(), pkt->cmdString(),
337             pkt->getAddr());
338
339    if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
340        // Request has not been issued yet, or it's been issued
341        // locally but is buffered unissued at some downstream cache
342        // which is forwarding us this snoop.  Either way, the packet
343        // we're snooping logically precedes this MSHR's request, so
344        // the snoop has no impact on the MSHR, but must be processed
345        // in the standard way by the cache.  The only exception is
346        // that if we're an L2+ cache buffering an UpgradeReq from a
347        // higher-level cache, and the snoop is invalidating, then our
348        // buffered upgrades must be converted to read exclusives,
349        // since the upper-level cache no longer has a valid copy.
350        // That is, even though the upper-level cache got out on its
351        // local bus first, some other invalidating transaction
352        // reached the global bus before the upgrade did.
353        if (pkt->needsWritable()) {
354            targets.replaceUpgrades();
355            deferredTargets.replaceUpgrades();
356        }
357
358        return false;
359    }
360
361    // From here on down, the request issued by this MSHR logically
362    // precedes the request we're snooping.
363    if (pkt->needsWritable()) {
364        // snooped request still precedes the re-request we'll have to
365        // issue for deferred targets, if any...
366        deferredTargets.replaceUpgrades();
367    }
368
369    if (hasPostInvalidate()) {
370        // a prior snoop has already appended an invalidation, so
371        // logically we don't have the block anymore; no need for
372        // further snooping.
373        return true;
374    }
375
376    if (isPendingModified() || pkt->isInvalidate()) {
377        // We need to save and replay the packet in two cases:
378        // 1. We're awaiting a writable copy (Modified or Exclusive),
379        //    so this MSHR is the orgering point, and we need to respond
380        //    after we receive data.
381        // 2. It's an invalidation (e.g., UpgradeReq), and we need
382        //    to forward the snoop up the hierarchy after the current
383        //    transaction completes.
384
385        // Start by determining if we will eventually respond or not,
386        // matching the conditions checked in Cache::handleSnoop
387        bool will_respond = isPendingModified() && pkt->needsResponse() &&
388            pkt->cmd != MemCmd::InvalidateReq;
389
390        // The packet we are snooping may be deleted by the time we
391        // actually process the target, and we consequently need to
392        // save a copy here. Clear flags and also allocate new data as
393        // the original packet data storage may have been deleted by
394        // the time we get to process this packet. In the cases where
395        // we are not responding after handling the snoop we also need
396        // to create a copy of the request to be on the safe side. In
397        // the latter case the cache is responsible for deleting both
398        // the packet and the request as part of handling the deferred
399        // snoop.
400        PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
401            new Packet(new Request(*pkt->req), pkt->cmd);
402
403        if (isPendingModified()) {
404            // we are the ordering point, and will consequently
405            // respond, and depending on whether the packet
406            // needsWritable or not we either pass a Shared line or a
407            // Modified line
408            pkt->setCacheResponding();
409
410            // inform the cache hierarchy that this cache had the line
411            // in the Modified state, even if the response is passed
412            // as Shared (and thus non-writable)
413            pkt->setResponderHadWritable();
414
415            // in the case of an uncacheable request there is no need
416            // to set the responderHadWritable flag, but since the
417            // recipient does not care there is no harm in doing so
418        }
419        targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
420                    downstreamPending && targets.needsWritable);
421
422        if (pkt->needsWritable()) {
423            // This transaction will take away our pending copy
424            postInvalidate = true;
425        }
426    }
427
428    if (!pkt->needsWritable() && !pkt->req->isUncacheable()) {
429        // This transaction will get a read-shared copy, downgrading
430        // our copy if we had a writable one
431        postDowngrade = true;
432        // make sure that any downstream cache does not respond with a
433        // writable (and dirty) copy even if it has one, unless it was
434        // explicitly asked for one
435        pkt->setHasSharers();
436    }
437
438    return true;
439}
440
441
442bool
443MSHR::promoteDeferredTargets()
444{
445    assert(targets.empty());
446    if (deferredTargets.empty()) {
447        return false;
448    }
449
450    // swap targets & deferredTargets lists
451    std::swap(targets, deferredTargets);
452
453    // clear deferredTargets flags
454    deferredTargets.resetFlags();
455
456    order = targets.front().order;
457    readyTime = std::max(curTick(), targets.front().readyTime);
458
459    return true;
460}
461
462
463void
464MSHR::promoteWritable()
465{
466    if (deferredTargets.needsWritable &&
467        !(hasPostInvalidate() || hasPostDowngrade())) {
468        // We got a writable response, but we have deferred targets
469        // which are waiting to request a writable copy (not because
470        // of a pending invalidate).  This can happen if the original
471        // request was for a read-only block, but we got a writable
472        // response anyway. Since we got the writable copy there's no
473        // need to defer the targets, so move them up to the regular
474        // target list.
475        assert(!targets.needsWritable);
476        targets.needsWritable = true;
477        // if any of the deferred targets were upper-level cache
478        // requests marked downstreamPending, need to clear that
479        assert(!downstreamPending);  // not pending here anymore
480        deferredTargets.clearDownstreamPending();
481        // this clears out deferredTargets too
482        targets.splice(targets.end(), deferredTargets);
483        deferredTargets.resetFlags();
484    }
485}
486
487
488bool
489MSHR::checkFunctional(PacketPtr pkt)
490{
491    // For printing, we treat the MSHR as a whole as single entity.
492    // For other requests, we iterate over the individual targets
493    // since that's where the actual data lies.
494    if (pkt->isPrint()) {
495        pkt->checkFunctional(this, blkAddr, isSecure, blkSize, nullptr);
496        return false;
497    } else {
498        return (targets.checkFunctional(pkt) ||
499                deferredTargets.checkFunctional(pkt));
500    }
501}
502
503bool
504MSHR::sendPacket(Cache &cache)
505{
506    return cache.sendMSHRQueuePacket(this);
507}
508
509void
510MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
511{
512    ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s\n",
513             prefix, blkAddr, blkAddr + blkSize - 1,
514             isSecure ? "s" : "ns",
515             isForward ? "Forward" : "",
516             allocOnFill ? "AllocOnFill" : "",
517             needsWritable() ? "Wrtbl" : "",
518             _isUncacheable ? "Unc" : "",
519             inService ? "InSvc" : "",
520             downstreamPending ? "DwnPend" : "",
521             hasPostInvalidate() ? "PostInv" : "",
522             hasPostDowngrade() ? "PostDowngr" : "");
523
524    ccprintf(os, "%s  Targets:\n", prefix);
525    targets.print(os, verbosity, prefix + "    ");
526    if (!deferredTargets.empty()) {
527        ccprintf(os, "%s  Deferred Targets:\n", prefix);
528        deferredTargets.print(os, verbosity, prefix + "      ");
529    }
530}
531
532std::string
533MSHR::print() const
534{
535    ostringstream str;
536    print(str);
537    return str.str();
538}
539