base.hh revision 8833:2870638642bd
1/*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 *          Steve Reinhardt
30 *          Ron Dreslinski
31 */
32
33/**
34 * @file
35 * Declares a basic cache interface BaseCache.
36 */
37
38#ifndef __BASE_CACHE_HH__
39#define __BASE_CACHE_HH__
40
41#include <algorithm>
42#include <list>
43#include <string>
44#include <vector>
45
46#include "base/misc.hh"
47#include "base/statistics.hh"
48#include "base/trace.hh"
49#include "base/types.hh"
50#include "debug/Cache.hh"
51#include "debug/CachePort.hh"
52#include "mem/cache/mshr_queue.hh"
53#include "mem/mem_object.hh"
54#include "mem/packet.hh"
55#include "mem/request.hh"
56#include "mem/tport.hh"
57#include "params/BaseCache.hh"
58#include "sim/eventq.hh"
59#include "sim/full_system.hh"
60#include "sim/sim_exit.hh"
61#include "sim/system.hh"
62
63class MSHR;
64/**
65 * A basic cache interface. Implements some common functions for speed.
66 */
67class BaseCache : public MemObject
68{
69    /**
70     * Indexes to enumerate the MSHR queues.
71     */
72    enum MSHRQueueIndex {
73        MSHRQueue_MSHRs,
74        MSHRQueue_WriteBuffer
75    };
76
77  public:
78    /**
79     * Reasons for caches to be blocked.
80     */
81    enum BlockedCause {
82        Blocked_NoMSHRs = MSHRQueue_MSHRs,
83        Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
84        Blocked_NoTargets,
85        NUM_BLOCKED_CAUSES
86    };
87
88    /**
89     * Reasons for cache to request a bus.
90     */
91    enum RequestCause {
92        Request_MSHR = MSHRQueue_MSHRs,
93        Request_WB = MSHRQueue_WriteBuffer,
94        Request_PF,
95        NUM_REQUEST_CAUSES
96    };
97
98  protected:
99
100    class CachePort : public SimpleTimingPort
101    {
102      public:
103        BaseCache *cache;
104
105      protected:
106        CachePort(const std::string &_name, BaseCache *_cache,
107                  const std::string &_label);
108
109        virtual unsigned deviceBlockSize() const;
110
111        bool recvRetryCommon();
112
113        typedef EventWrapper<Port, &Port::sendRetry>
114            SendRetryEvent;
115
116        const std::string label;
117
118      public:
119        void setBlocked();
120
121        void clearBlocked();
122
123        bool checkFunctional(PacketPtr pkt);
124
125        bool blocked;
126
127        bool mustSendRetry;
128
129        void requestBus(RequestCause cause, Tick time)
130        {
131            DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
132            if (!waitingOnRetry) {
133                schedSendEvent(time);
134            }
135        }
136
137        void respond(PacketPtr pkt, Tick time) {
138            schedSendTiming(pkt, time);
139        }
140    };
141
142    CachePort *cpuSidePort;
143    CachePort *memSidePort;
144
145  protected:
146
147    /** Miss status registers */
148    MSHRQueue mshrQueue;
149
150    /** Write/writeback buffer */
151    MSHRQueue writeBuffer;
152
153    MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
154                                 PacketPtr pkt, Tick time, bool requestBus)
155    {
156        MSHR *mshr = mq->allocate(addr, size, pkt, time, order++);
157
158        if (mq->isFull()) {
159            setBlocked((BlockedCause)mq->index);
160        }
161
162        if (requestBus) {
163            requestMemSideBus((RequestCause)mq->index, time);
164        }
165
166        return mshr;
167    }
168
169    void markInServiceInternal(MSHR *mshr, PacketPtr pkt)
170    {
171        MSHRQueue *mq = mshr->queue;
172        bool wasFull = mq->isFull();
173        mq->markInService(mshr, pkt);
174        if (wasFull && !mq->isFull()) {
175            clearBlocked((BlockedCause)mq->index);
176        }
177    }
178
179    /** Block size of this cache */
180    const unsigned blkSize;
181
182    /**
183     * The latency of a hit in this device.
184     */
185    int hitLatency;
186
187    /** The number of targets for each MSHR. */
188    const int numTarget;
189
190    /** Do we forward snoops from mem side port through to cpu side port? */
191    bool forwardSnoops;
192
193    /** Is this cache a toplevel cache (e.g. L1, I/O cache). If so we should
194     * never try to forward ownership and similar optimizations to the cpu
195     * side */
196    bool isTopLevel;
197
198    /**
199     * Bit vector of the blocking reasons for the access path.
200     * @sa #BlockedCause
201     */
202    uint8_t blocked;
203
204    /** Increasing order number assigned to each incoming request. */
205    uint64_t order;
206
207    /** Stores time the cache blocked for statistics. */
208    Tick blockedCycle;
209
210    /** Pointer to the MSHR that has no targets. */
211    MSHR *noTargetMSHR;
212
213    /** The number of misses to trigger an exit event. */
214    Counter missCount;
215
216    /** The drain event. */
217    Event *drainEvent;
218
219    /**
220     * The address range to which the cache responds on the CPU side.
221     * Normally this is all possible memory addresses. */
222    Range<Addr> addrRange;
223
224  public:
225    /** System we are currently operating in. */
226    System *system;
227
228    // Statistics
229    /**
230     * @addtogroup CacheStatistics
231     * @{
232     */
233
234    /** Number of hits per thread for each type of command. @sa Packet::Command */
235    Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
236    /** Number of hits for demand accesses. */
237    Stats::Formula demandHits;
238    /** Number of hit for all accesses. */
239    Stats::Formula overallHits;
240
241    /** Number of misses per thread for each type of command. @sa Packet::Command */
242    Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
243    /** Number of misses for demand accesses. */
244    Stats::Formula demandMisses;
245    /** Number of misses for all accesses. */
246    Stats::Formula overallMisses;
247
248    /**
249     * Total number of cycles per thread/command spent waiting for a miss.
250     * Used to calculate the average miss latency.
251     */
252    Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
253    /** Total number of cycles spent waiting for demand misses. */
254    Stats::Formula demandMissLatency;
255    /** Total number of cycles spent waiting for all misses. */
256    Stats::Formula overallMissLatency;
257
258    /** The number of accesses per command and thread. */
259    Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
260    /** The number of demand accesses. */
261    Stats::Formula demandAccesses;
262    /** The number of overall accesses. */
263    Stats::Formula overallAccesses;
264
265    /** The miss rate per command and thread. */
266    Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
267    /** The miss rate of all demand accesses. */
268    Stats::Formula demandMissRate;
269    /** The miss rate for all accesses. */
270    Stats::Formula overallMissRate;
271
272    /** The average miss latency per command and thread. */
273    Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
274    /** The average miss latency for demand misses. */
275    Stats::Formula demandAvgMissLatency;
276    /** The average miss latency for all misses. */
277    Stats::Formula overallAvgMissLatency;
278
279    /** The total number of cycles blocked for each blocked cause. */
280    Stats::Vector blocked_cycles;
281    /** The number of times this cache blocked for each blocked cause. */
282    Stats::Vector blocked_causes;
283
284    /** The average number of cycles blocked for each blocked cause. */
285    Stats::Formula avg_blocked;
286
287    /** The number of fast writes (WH64) performed. */
288    Stats::Scalar fastWrites;
289
290    /** The number of cache copies performed. */
291    Stats::Scalar cacheCopies;
292
293    /** Number of blocks written back per thread. */
294    Stats::Vector writebacks;
295
296    /** Number of misses that hit in the MSHRs per command and thread. */
297    Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
298    /** Demand misses that hit in the MSHRs. */
299    Stats::Formula demandMshrHits;
300    /** Total number of misses that hit in the MSHRs. */
301    Stats::Formula overallMshrHits;
302
303    /** Number of misses that miss in the MSHRs, per command and thread. */
304    Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
305    /** Demand misses that miss in the MSHRs. */
306    Stats::Formula demandMshrMisses;
307    /** Total number of misses that miss in the MSHRs. */
308    Stats::Formula overallMshrMisses;
309
310    /** Number of misses that miss in the MSHRs, per command and thread. */
311    Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
312    /** Total number of misses that miss in the MSHRs. */
313    Stats::Formula overallMshrUncacheable;
314
315    /** Total cycle latency of each MSHR miss, per command and thread. */
316    Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
317    /** Total cycle latency of demand MSHR misses. */
318    Stats::Formula demandMshrMissLatency;
319    /** Total cycle latency of overall MSHR misses. */
320    Stats::Formula overallMshrMissLatency;
321
322    /** Total cycle latency of each MSHR miss, per command and thread. */
323    Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
324    /** Total cycle latency of overall MSHR misses. */
325    Stats::Formula overallMshrUncacheableLatency;
326
327#if 0
328    /** The total number of MSHR accesses per command and thread. */
329    Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
330    /** The total number of demand MSHR accesses. */
331    Stats::Formula demandMshrAccesses;
332    /** The total number of MSHR accesses. */
333    Stats::Formula overallMshrAccesses;
334#endif
335
336    /** The miss rate in the MSHRs pre command and thread. */
337    Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
338    /** The demand miss rate in the MSHRs. */
339    Stats::Formula demandMshrMissRate;
340    /** The overall miss rate in the MSHRs. */
341    Stats::Formula overallMshrMissRate;
342
343    /** The average latency of an MSHR miss, per command and thread. */
344    Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
345    /** The average latency of a demand MSHR miss. */
346    Stats::Formula demandAvgMshrMissLatency;
347    /** The average overall latency of an MSHR miss. */
348    Stats::Formula overallAvgMshrMissLatency;
349
350    /** The average latency of an MSHR miss, per command and thread. */
351    Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
352    /** The average overall latency of an MSHR miss. */
353    Stats::Formula overallAvgMshrUncacheableLatency;
354
355    /** The number of times a thread hit its MSHR cap. */
356    Stats::Vector mshr_cap_events;
357    /** The number of times software prefetches caused the MSHR to block. */
358    Stats::Vector soft_prefetch_mshr_full;
359
360    Stats::Scalar mshr_no_allocate_misses;
361
362    /**
363     * @}
364     */
365
366    /**
367     * Register stats for this object.
368     */
369    virtual void regStats();
370
371  public:
372    typedef BaseCacheParams Params;
373    BaseCache(const Params *p);
374    ~BaseCache() {}
375
376    virtual void init();
377
378    /**
379     * Query block size of a cache.
380     * @return  The block size
381     */
382    unsigned
383    getBlockSize() const
384    {
385        return blkSize;
386    }
387
388
389    Addr blockAlign(Addr addr) const { return (addr & ~(Addr(blkSize - 1))); }
390
391
392    const Range<Addr> &getAddrRange() const { return addrRange; }
393
394    MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus)
395    {
396        assert(!pkt->req->isUncacheable());
397        return allocateBufferInternal(&mshrQueue,
398                                      blockAlign(pkt->getAddr()), blkSize,
399                                      pkt, time, requestBus);
400    }
401
402    MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus)
403    {
404        assert(pkt->isWrite() && !pkt->isRead());
405        return allocateBufferInternal(&writeBuffer,
406                                      pkt->getAddr(), pkt->getSize(),
407                                      pkt, time, requestBus);
408    }
409
410    MSHR *allocateUncachedReadBuffer(PacketPtr pkt, Tick time, bool requestBus)
411    {
412        assert(pkt->req->isUncacheable());
413        assert(pkt->isRead());
414        return allocateBufferInternal(&mshrQueue,
415                                      pkt->getAddr(), pkt->getSize(),
416                                      pkt, time, requestBus);
417    }
418
419    /**
420     * Returns true if the cache is blocked for accesses.
421     */
422    bool isBlocked()
423    {
424        return blocked != 0;
425    }
426
427    /**
428     * Marks the access path of the cache as blocked for the given cause. This
429     * also sets the blocked flag in the slave interface.
430     * @param cause The reason for the cache blocking.
431     */
432    void setBlocked(BlockedCause cause)
433    {
434        uint8_t flag = 1 << cause;
435        if (blocked == 0) {
436            blocked_causes[cause]++;
437            blockedCycle = curTick();
438            cpuSidePort->setBlocked();
439        }
440        blocked |= flag;
441        DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
442    }
443
444    /**
445     * Marks the cache as unblocked for the given cause. This also clears the
446     * blocked flags in the appropriate interfaces.
447     * @param cause The newly unblocked cause.
448     * @warning Calling this function can cause a blocked request on the bus to
449     * access the cache. The cache must be in a state to handle that request.
450     */
451    void clearBlocked(BlockedCause cause)
452    {
453        uint8_t flag = 1 << cause;
454        blocked &= ~flag;
455        DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
456        if (blocked == 0) {
457            blocked_cycles[cause] += curTick() - blockedCycle;
458            cpuSidePort->clearBlocked();
459        }
460    }
461
462    /**
463     * Request the master bus for the given cause and time.
464     * @param cause The reason for the request.
465     * @param time The time to make the request.
466     */
467    void requestMemSideBus(RequestCause cause, Tick time)
468    {
469        memSidePort->requestBus(cause, time);
470    }
471
472    /**
473     * Clear the master bus request for the given cause.
474     * @param cause The request reason to clear.
475     */
476    void deassertMemSideBusRequest(RequestCause cause)
477    {
478        // Obsolete... we no longer signal bus requests explicitly so
479        // we can't deassert them.  Leaving this in as a no-op since
480        // the prefetcher calls it to indicate that it no longer wants
481        // to request a prefetch, and someday that might be
482        // interesting again.
483    }
484
485    virtual unsigned int drain(Event *de);
486
487    virtual bool inCache(Addr addr) = 0;
488
489    virtual bool inMissQueue(Addr addr) = 0;
490
491    void incMissCount(PacketPtr pkt)
492    {
493        assert(pkt->req->masterId() < system->maxMasters());
494        misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
495
496        if (missCount) {
497            --missCount;
498            if (missCount == 0)
499                exitSimLoop("A cache reached the maximum miss count");
500        }
501    }
502    void incHitCount(PacketPtr pkt)
503    {
504        assert(pkt->req->masterId() < system->maxMasters());
505        hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
506
507    }
508
509};
510
511#endif //__BASE_CACHE_HH__
512