noncoherent_cache.cc revision 13017:a620da03ab10
1/*
2 * Copyright (c) 2010-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 *          Dave Greene
43 *          Nathan Binkert
44 *          Steve Reinhardt
45 *          Ron Dreslinski
46 *          Andreas Sandberg
47 *          Nikos Nikoleris
48 */
49
50/**
51 * @file
52 * Cache definitions.
53 */
54
55#include "mem/cache/noncoherent_cache.hh"
56
57#include <cassert>
58
59#include "base/logging.hh"
60#include "base/trace.hh"
61#include "base/types.hh"
62#include "debug/Cache.hh"
63#include "mem/cache/blk.hh"
64#include "mem/cache/mshr.hh"
65#include "params/NoncoherentCache.hh"
66
67NoncoherentCache::NoncoherentCache(const NoncoherentCacheParams *p)
68    : BaseCache(p, p->system->cacheLineSize())
69{
70}
71
72void
73NoncoherentCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
74{
75    // As this a non-coherent cache located below the point of
76    // coherency, we do not expect requests that are typically used to
77    // keep caches coherent (e.g., InvalidateReq or UpdateReq).
78    assert(pkt->isRead() || pkt->isWrite());
79    BaseCache::satisfyRequest(pkt, blk);
80}
81
82bool
83NoncoherentCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
84                         PacketList &writebacks)
85{
86    bool success = BaseCache::access(pkt, blk, lat, writebacks);
87
88    if (pkt->isWriteback() || pkt->cmd == MemCmd::WriteClean) {
89        assert(blk && blk->isValid());
90        // Writeback and WriteClean can allocate and fill even if the
91        // referenced block was not present or it was invalid. If that
92        // is the case, make sure that the new block is marked as
93        // writable
94        blk->status |= BlkWritable;
95    }
96
97    return success;
98}
99
100void
101NoncoherentCache::doWritebacks(PacketList& writebacks, Tick forward_time)
102{
103    while (!writebacks.empty()) {
104        PacketPtr wb_pkt = writebacks.front();
105        allocateWriteBuffer(wb_pkt, forward_time);
106        writebacks.pop_front();
107    }
108}
109
110void
111NoncoherentCache::doWritebacksAtomic(PacketList& writebacks)
112{
113    while (!writebacks.empty()) {
114        PacketPtr wb_pkt = writebacks.front();
115        memSidePort.sendAtomic(wb_pkt);
116        writebacks.pop_front();
117        delete wb_pkt;
118    }
119}
120
121void
122NoncoherentCache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk,
123                                      Tick forward_time, Tick request_time)
124{
125    // miss
126    Addr blk_addr = pkt->getBlockAddr(blkSize);
127    MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure(), false);
128
129    // We can always write to a non coherent cache if the block is
130    // present and therefore if we have reached this point then the
131    // block should not be in the cache.
132    assert(mshr || !blk || !blk->isValid());
133
134    BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time);
135}
136
137void
138NoncoherentCache::recvTimingReq(PacketPtr pkt)
139{
140    panic_if(pkt->cacheResponding(), "Should not see packets where cache "
141             "is responding");
142
143    panic_if(!(pkt->isRead() || pkt->isWrite()),
144             "Should only see read and writes at non-coherent cache\n");
145
146    BaseCache::recvTimingReq(pkt);
147}
148
149PacketPtr
150NoncoherentCache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
151                                   bool needs_writable) const
152{
153    // We also fill for writebacks from the coherent caches above us,
154    // and they do not need responses
155    assert(cpu_pkt->needsResponse());
156
157    // A miss can happen only due to missing block
158    assert(!blk || !blk->isValid());
159
160    PacketPtr pkt = new Packet(cpu_pkt->req, MemCmd::ReadReq, blkSize);
161
162    // the packet should be block aligned
163    assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
164
165    pkt->allocate();
166    DPRINTF(Cache, "%s created %s from %s\n", __func__, pkt->print(),
167            cpu_pkt->print());
168    return pkt;
169}
170
171
172Cycles
173NoncoherentCache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
174                                      PacketList &writebacks)
175{
176    PacketPtr bus_pkt = createMissPacket(pkt, blk, true);
177    DPRINTF(Cache, "Sending an atomic %s\n", bus_pkt->print());
178
179    Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
180
181    assert(bus_pkt->isResponse());
182    // At the moment the only supported downstream requests we issue
183    // are ReadReq and therefore here we should only see the
184    // corresponding responses
185    assert(bus_pkt->isRead());
186    assert(pkt->cmd != MemCmd::UpgradeResp);
187    assert(!bus_pkt->isInvalidate());
188    assert(!bus_pkt->hasSharers());
189
190    // We are now dealing with the response handling
191    DPRINTF(Cache, "Receive response: %s\n", bus_pkt->print());
192
193    if (!bus_pkt->isError()) {
194        // Any reponse that does not have an error should be filling,
195        // afterall it is a read response
196        DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
197                bus_pkt->getAddr());
198        blk = handleFill(bus_pkt, blk, writebacks, allocOnFill(bus_pkt->cmd));
199        assert(blk);
200    }
201    satisfyRequest(pkt, blk);
202
203    maintainClusivity(true, blk);
204
205    // Use the separate bus_pkt to generate response to pkt and
206    // then delete it.
207    if (!pkt->isWriteback() && pkt->cmd != MemCmd::WriteClean) {
208        assert(pkt->needsResponse());
209        pkt->makeAtomicResponse();
210        if (bus_pkt->isError()) {
211            pkt->copyError(bus_pkt);
212        }
213    }
214
215    delete bus_pkt;
216
217    return latency;
218}
219
220Tick
221NoncoherentCache::recvAtomic(PacketPtr pkt)
222{
223    panic_if(pkt->cacheResponding(), "Should not see packets where cache "
224             "is responding");
225
226    panic_if(!(pkt->isRead() || pkt->isWrite()),
227             "Should only see read and writes at non-coherent cache\n");
228
229    return BaseCache::recvAtomic(pkt);
230}
231
232
233void
234NoncoherentCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
235{
236    panic_if(!from_cpu_side, "Non-coherent cache received functional snoop"
237             " request\n");
238
239    BaseCache::functionalAccess(pkt, from_cpu_side);
240}
241
242void
243NoncoherentCache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
244                                     CacheBlk *blk, PacketList &writebacks)
245{
246    MSHR::Target *initial_tgt = mshr->getTarget();
247    // First offset for critical word first calculations
248    const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
249
250    MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
251    for (auto &target: targets) {
252        Packet *tgt_pkt = target.pkt;
253
254        switch (target.source) {
255          case MSHR::Target::FromCPU:
256            // handle deferred requests comming from a cache or core
257            // above
258
259            Tick completion_time;
260            // Here we charge on completion_time the delay of the xbar if the
261            // packet comes from it, charged on headerDelay.
262            completion_time = pkt->headerDelay;
263
264            satisfyRequest(tgt_pkt, blk);
265
266            // How many bytes past the first request is this one
267            int transfer_offset;
268            transfer_offset = tgt_pkt->getOffset(blkSize) - initial_offset;
269            if (transfer_offset < 0) {
270                transfer_offset += blkSize;
271            }
272            // If not critical word (offset) return payloadDelay.
273            // responseLatency is the latency of the return path
274            // from lower level caches/memory to an upper level cache or
275            // the core.
276            completion_time += clockEdge(responseLatency) +
277                (transfer_offset ? pkt->payloadDelay : 0);
278
279            assert(tgt_pkt->req->masterId() < system->maxMasters());
280            missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
281                completion_time - target.recvTime;
282
283            tgt_pkt->makeTimingResponse();
284            if (pkt->isError())
285                tgt_pkt->copyError(pkt);
286
287            // Reset the bus additional time as it is now accounted for
288            tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
289            cpuSidePort.schedTimingResp(tgt_pkt, completion_time, true);
290            break;
291
292          case MSHR::Target::FromPrefetcher:
293            // handle deferred requests comming from a prefetcher
294            // attached to this cache
295            assert(tgt_pkt->cmd == MemCmd::HardPFReq);
296
297            if (blk)
298                blk->status |= BlkHWPrefetched;
299
300            // We have filled the block and the prefetcher does not
301            // require responses.
302            delete tgt_pkt;
303            break;
304
305          default:
306            // we should never see FromSnoop Targets as this is a
307            // non-coherent cache
308            panic("Illegal target->source enum %d\n", target.source);
309        }
310    }
311
312    // Reponses are filling and bring in writable blocks, therefore
313    // there should be no deferred targets and all the non-deferred
314    // targets are now serviced.
315    assert(mshr->getNumTargets() == 0);
316}
317
318void
319NoncoherentCache::recvTimingResp(PacketPtr pkt)
320{
321    assert(pkt->isResponse());
322    // At the moment the only supported downstream requests we issue
323    // are ReadReq and therefore here we should only see the
324    // corresponding responses
325    assert(pkt->isRead());
326    assert(pkt->cmd != MemCmd::UpgradeResp);
327    assert(!pkt->isInvalidate());
328    // This cache is non-coherent and any memories below are
329    // non-coherent too (non-coherent caches or the main memory),
330    // therefore the fetched block can be marked as writable.
331    assert(!pkt->hasSharers());
332
333    BaseCache::recvTimingResp(pkt);
334}
335
336PacketPtr
337NoncoherentCache::evictBlock(CacheBlk *blk)
338{
339    // A dirty block is always written back.
340
341    // A clean block can we written back, if we turned on writebacks
342    // for clean blocks. This could be useful if there is a cache
343    // below and we want to make sure the block is cached but if the
344    // memory below is the main memory WritebackCleans are
345    // unnecessary.
346
347    // If we clean writebacks are not enabled, we do not take any
348    // further action for evictions of clean blocks (i.e., CleanEvicts
349    // are unnecessary).
350    PacketPtr pkt = (blk->isDirty() || writebackClean) ?
351        writebackBlk(blk) : nullptr;
352
353    invalidateBlock(blk);
354
355    return pkt;
356}
357
358void
359NoncoherentCache::evictBlock(CacheBlk *blk, PacketList &writebacks)
360{
361    PacketPtr pkt = evictBlock(blk);
362    if (pkt) {
363        writebacks.push_back(pkt);
364    }
365}
366
367NoncoherentCache*
368NoncoherentCacheParams::create()
369{
370    assert(tags);
371    assert(replacement_policy);
372
373    return new NoncoherentCache(this);
374}
375