mshr.cc revision 4670
16876Ssteve.reinhardt@amd.com/*
26876Ssteve.reinhardt@amd.com * Copyright (c) 2002-2005 The Regents of The University of Michigan
36876Ssteve.reinhardt@amd.com * All rights reserved.
46876Ssteve.reinhardt@amd.com *
56876Ssteve.reinhardt@amd.com * Redistribution and use in source and binary forms, with or without
66876Ssteve.reinhardt@amd.com * modification, are permitted provided that the following conditions are
76876Ssteve.reinhardt@amd.com * met: redistributions of source code must retain the above copyright
86876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer;
96876Ssteve.reinhardt@amd.com * redistributions in binary form must reproduce the above copyright
106876Ssteve.reinhardt@amd.com * notice, this list of conditions and the following disclaimer in the
116876Ssteve.reinhardt@amd.com * documentation and/or other materials provided with the distribution;
126876Ssteve.reinhardt@amd.com * neither the name of the copyright holders nor the names of its
136876Ssteve.reinhardt@amd.com * contributors may be used to endorse or promote products derived from
146876Ssteve.reinhardt@amd.com * this software without specific prior written permission.
156876Ssteve.reinhardt@amd.com *
166876Ssteve.reinhardt@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
176876Ssteve.reinhardt@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
186876Ssteve.reinhardt@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
196876Ssteve.reinhardt@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
206876Ssteve.reinhardt@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
216876Ssteve.reinhardt@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
226876Ssteve.reinhardt@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
236876Ssteve.reinhardt@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
246876Ssteve.reinhardt@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
256876Ssteve.reinhardt@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
266876Ssteve.reinhardt@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
276876Ssteve.reinhardt@amd.com *
286876Ssteve.reinhardt@amd.com * Authors: Erik Hallnor
296876Ssteve.reinhardt@amd.com *          Dave Greene
306876Ssteve.reinhardt@amd.com */
3113665Sandreas.sandberg@arm.com
3213665Sandreas.sandberg@arm.com/**
336879Ssteve.reinhardt@amd.com * @file
349465Snilay@cs.wisc.edu * Miss Status and Handling Register (MSHR) definitions.
356876Ssteve.reinhardt@amd.com */
366876Ssteve.reinhardt@amd.com
379338SAndreas.Sandberg@arm.com#include <assert.h>
386876Ssteve.reinhardt@amd.com#include <string>
399594Snilay@cs.wisc.edu#include <vector>
409594Snilay@cs.wisc.edu#include <algorithm>
419594Snilay@cs.wisc.edu
4211065Snilay@cs.wisc.edu#include "mem/cache/miss/mshr.hh"
4311065Snilay@cs.wisc.edu#include "sim/core.hh" // for curTick
4411065Snilay@cs.wisc.edu#include "sim/host.hh"
4511065Snilay@cs.wisc.edu#include "base/misc.hh"
4611065Snilay@cs.wisc.edu#include "mem/cache/cache.hh"
479594Snilay@cs.wisc.edu
489594Snilay@cs.wisc.eduusing namespace std;
499594Snilay@cs.wisc.edu
509593Snilay@cs.wisc.eduMSHR::MSHR()
5110122Snilay@cs.wisc.edu{
529594Snilay@cs.wisc.edu    inService = false;
539594Snilay@cs.wisc.edu    ntargets = 0;
5410311Snilay@cs.wisc.edu    threadNum = -1;
5510311Snilay@cs.wisc.edu}
5610311Snilay@cs.wisc.edu
57void
58MSHR::allocate(Addr _addr, int _size, PacketPtr target,
59               Tick when, Counter _order)
60{
61    addr = _addr;
62    size = _size;
63    readyTick = when;
64    order = _order;
65    assert(target);
66    isCacheFill = false;
67    needsExclusive = target->needsExclusive();
68    _isUncacheable = target->req->isUncacheable();
69    inService = false;
70    threadNum = 0;
71    ntargets = 1;
72    // Don't know of a case where we would allocate a new MSHR for a
73    // snoop (mem-side request), so set cpuSide to true here.
74    targets.push_back(Target(target, when, _order, true));
75    assert(deferredTargets.empty());
76    deferredNeedsExclusive = false;
77    pendingInvalidate = false;
78    pendingShared = false;
79    replacedPendingUpgrade = false;
80    data = NULL;
81}
82
83void
84MSHR::deallocate()
85{
86    assert(targets.empty());
87    assert(deferredTargets.empty());
88    assert(ntargets == 0);
89    inService = false;
90    //allocIter = NULL;
91    //readyIter = NULL;
92}
93
94/*
95 * Adds a target to an MSHR
96 */
97void
98MSHR::allocateTarget(PacketPtr target, Tick when, Counter _order)
99{
100    if (inService) {
101        if (!deferredTargets.empty() || pendingInvalidate ||
102            (!needsExclusive && target->needsExclusive())) {
103            // need to put on deferred list
104            deferredTargets.push_back(Target(target, when, _order, true));
105            if (target->needsExclusive()) {
106                deferredNeedsExclusive = true;
107            }
108        } else {
109            // still OK to append to outstanding request
110            targets.push_back(Target(target, when, _order, true));
111        }
112    } else {
113        if (target->needsExclusive()) {
114            needsExclusive = true;
115        }
116
117        targets.push_back(Target(target, when, _order, true));
118    }
119
120    ++ntargets;
121}
122
123void
124MSHR::allocateSnoopTarget(PacketPtr pkt, Tick when, Counter _order)
125{
126    assert(inService); // don't bother to call otherwise
127
128    if (pendingInvalidate) {
129        // a prior snoop has already appended an invalidation, so
130        // logically we don't have the block anymore...
131        return;
132    }
133
134    DPRINTF(Cache, "deferred snoop on %x: %s %s\n", addr,
135            needsExclusive ? "needsExclusive" : "",
136            pkt->needsExclusive() ? "pkt->needsExclusive()" : "");
137
138    if (needsExclusive || pkt->needsExclusive()) {
139        // actual target device (typ. PhysicalMemory) will delete the
140        // packet on reception, so we need to save a copy here
141        targets.push_back(Target(new Packet(pkt), when, _order, false));
142        ++ntargets;
143
144        if (needsExclusive) {
145            // We're awaiting an exclusive copy, so ownership is pending.
146            // It's up to us to respond once the data arrives.
147            pkt->assertMemInhibit();
148        }
149
150        if (pkt->needsExclusive()) {
151            // This transaction will take away our pending copy
152            pendingInvalidate = true;
153        }
154    } else {
155        // Read to a read: no conflict, so no need to record as
156        // target, but make sure neither reader thinks he's getting an
157        // exclusive copy
158        pendingShared = true;
159        pkt->assertShared();
160    }
161}
162
163
164bool
165MSHR::promoteDeferredTargets()
166{
167    if (deferredTargets.empty()) {
168        return false;
169    }
170
171    assert(targets.empty());
172    targets = deferredTargets;
173    deferredTargets.clear();
174    assert(targets.size() == ntargets);
175
176    needsExclusive = deferredNeedsExclusive;
177    pendingInvalidate = false;
178    pendingShared = false;
179    deferredNeedsExclusive = false;
180    order = targets.front().order;
181    readyTick = std::max(curTick, targets.front().time);
182
183    return true;
184}
185
186
187void
188MSHR::handleReplacement(CacheBlk *blk, int blkSize)
189{
190    // must be an outstanding upgrade request on block we're about to
191    // replace...
192    assert(!blk->isWritable());
193    assert(needsExclusive);
194    replacedPendingUpgrade = true;
195
196    // if it's dirty, just remember what happened and allow the
197    // writeback to continue.  we'll reissue a ReadEx later whether
198    // the upgrade succeeds or not
199    if (blk->isDirty()) {
200        replacedPendingUpgradeDirty = true;
201        return;
202    }
203
204    // if not dirty, we need to save it off as it will be only valid
205    // copy in system if upgrade is successful (and may need to be
206    // written back then, as the current owner if any will be
207    // invalidating its block)
208    replacedPendingUpgradeDirty = false;
209    data = new uint8_t[blkSize];
210    std::memcpy(data, blk->data, blkSize);
211}
212
213
214bool
215MSHR::handleFill(Packet *pkt, CacheBlk *blk)
216{
217    if (replacedPendingUpgrade) {
218        // block was replaced while upgrade request was in service
219        assert(pkt->cmd == MemCmd::UpgradeResp);
220        assert(blk == NULL);
221        assert(replacedPendingUpgrade);
222        replacedPendingUpgrade = false; // reset
223        if (replacedPendingUpgradeDirty) {
224            // we wrote back the previous copy; just reissue as a ReadEx
225            return false;
226        }
227
228        // previous copy was not dirty, but we are now owner...  fake out
229        // cache by taking saved data and converting UpgradeResp to
230        // ReadExResp
231        assert(data);
232        pkt->cmd = MemCmd::ReadExResp;
233        pkt->setData(data);
234        delete [] data;
235        data = NULL;
236    } else if (pendingShared) {
237        // we snooped another read while this read was in
238        // service... assert shared line on its behalf
239        pkt->assertShared();
240    }
241
242    return true;
243}
244
245
246void
247MSHR::dump()
248{
249    ccprintf(cerr,
250             "inService: %d thread: %d\n"
251             "Addr: %x ntargets %d\n"
252             "Targets:\n",
253             inService, threadNum, addr, ntargets);
254
255    TargetListIterator tar_it = targets.begin();
256    for (int i = 0; i < ntargets; i++) {
257        assert(tar_it != targets.end());
258
259        ccprintf(cerr, "\t%d: Addr: %x cmd: %s\n",
260                 i, tar_it->pkt->getAddr(), tar_it->pkt->cmdString());
261
262        tar_it++;
263    }
264    ccprintf(cerr, "\n");
265}
266
267MSHR::~MSHR()
268{
269}
270