base.hh revision 8786:8be24baf68b8
1/*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 *          Steve Reinhardt
30 *          Ron Dreslinski
31 */
32
33/**
34 * @file
35 * Declares a basic cache interface BaseCache.
36 */
37
38#ifndef __BASE_CACHE_HH__
39#define __BASE_CACHE_HH__
40
41#include <algorithm>
42#include <list>
43#include <string>
44#include <vector>
45
46#include "base/misc.hh"
47#include "base/statistics.hh"
48#include "base/trace.hh"
49#include "base/types.hh"
50#include "config/full_system.hh"
51#include "debug/Cache.hh"
52#include "debug/CachePort.hh"
53#include "mem/cache/mshr_queue.hh"
54#include "mem/mem_object.hh"
55#include "mem/packet.hh"
56#include "mem/request.hh"
57#include "mem/tport.hh"
58#include "params/BaseCache.hh"
59#include "sim/eventq.hh"
60#include "sim/full_system.hh"
61#include "sim/sim_exit.hh"
62
63class MSHR;
64/**
65 * A basic cache interface. Implements some common functions for speed.
66 */
67class BaseCache : public MemObject
68{
69    /**
70     * Indexes to enumerate the MSHR queues.
71     */
72    enum MSHRQueueIndex {
73        MSHRQueue_MSHRs,
74        MSHRQueue_WriteBuffer
75    };
76
77    /**
78     * Reasons for caches to be blocked.
79     */
80    enum BlockedCause {
81        Blocked_NoMSHRs = MSHRQueue_MSHRs,
82        Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
83        Blocked_NoTargets,
84        NUM_BLOCKED_CAUSES
85    };
86
87  public:
88    /**
89     * Reasons for cache to request a bus.
90     */
91    enum RequestCause {
92        Request_MSHR = MSHRQueue_MSHRs,
93        Request_WB = MSHRQueue_WriteBuffer,
94        Request_PF,
95        NUM_REQUEST_CAUSES
96    };
97
98  private:
99
100    class CachePort : public SimpleTimingPort
101    {
102      public:
103        BaseCache *cache;
104
105      protected:
106        CachePort(const std::string &_name, BaseCache *_cache,
107                  const std::string &_label);
108
109        virtual void recvStatusChange(Status status);
110
111        virtual unsigned deviceBlockSize() const;
112
113        bool recvRetryCommon();
114
115        typedef EventWrapper<Port, &Port::sendRetry>
116            SendRetryEvent;
117
118        const std::string label;
119
120      public:
121        void setOtherPort(CachePort *_otherPort) { otherPort = _otherPort; }
122
123        void setBlocked();
124
125        void clearBlocked();
126
127        bool checkFunctional(PacketPtr pkt);
128
129        CachePort *otherPort;
130
131        bool blocked;
132
133        bool mustSendRetry;
134
135        void requestBus(RequestCause cause, Tick time)
136        {
137            DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
138            if (!waitingOnRetry) {
139                schedSendEvent(time);
140            }
141        }
142
143        void respond(PacketPtr pkt, Tick time) {
144            schedSendTiming(pkt, time);
145        }
146    };
147
148  public: //Made public so coherence can get at it.
149    CachePort *cpuSidePort;
150    CachePort *memSidePort;
151
152  protected:
153
154    /** Miss status registers */
155    MSHRQueue mshrQueue;
156
157    /** Write/writeback buffer */
158    MSHRQueue writeBuffer;
159
160    MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
161                                 PacketPtr pkt, Tick time, bool requestBus)
162    {
163        MSHR *mshr = mq->allocate(addr, size, pkt, time, order++);
164
165        if (mq->isFull()) {
166            setBlocked((BlockedCause)mq->index);
167        }
168
169        if (requestBus) {
170            requestMemSideBus((RequestCause)mq->index, time);
171        }
172
173        return mshr;
174    }
175
176    void markInServiceInternal(MSHR *mshr, PacketPtr pkt)
177    {
178        MSHRQueue *mq = mshr->queue;
179        bool wasFull = mq->isFull();
180        mq->markInService(mshr, pkt);
181        if (wasFull && !mq->isFull()) {
182            clearBlocked((BlockedCause)mq->index);
183        }
184    }
185
186    /** Block size of this cache */
187    const unsigned blkSize;
188
189    /**
190     * The latency of a hit in this device.
191     */
192    int hitLatency;
193
194    /** The number of targets for each MSHR. */
195    const int numTarget;
196
197    /** Do we forward snoops from mem side port through to cpu side port? */
198    bool forwardSnoops;
199
200    /** Is this cache a toplevel cache (e.g. L1, I/O cache). If so we should
201     * never try to forward ownership and similar optimizations to the cpu
202     * side */
203    bool isTopLevel;
204
205    /**
206     * Bit vector of the blocking reasons for the access path.
207     * @sa #BlockedCause
208     */
209    uint8_t blocked;
210
211    /** Increasing order number assigned to each incoming request. */
212    uint64_t order;
213
214    /** Stores time the cache blocked for statistics. */
215    Tick blockedCycle;
216
217    /** Pointer to the MSHR that has no targets. */
218    MSHR *noTargetMSHR;
219
220    /** The number of misses to trigger an exit event. */
221    Counter missCount;
222
223    /** The drain event. */
224    Event *drainEvent;
225
226    /**
227     * The address range to which the cache responds on the CPU side.
228     * Normally this is all possible memory addresses. */
229    Range<Addr> addrRange;
230
231    /** number of cpus sharing this cache - from config file */
232    int _numCpus;
233
234  public:
235    int numCpus() { return _numCpus; }
236    // Statistics
237    /**
238     * @addtogroup CacheStatistics
239     * @{
240     */
241
242    /** Number of hits per thread for each type of command. @sa Packet::Command */
243    Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
244    /** Number of hits for demand accesses. */
245    Stats::Formula demandHits;
246    /** Number of hit for all accesses. */
247    Stats::Formula overallHits;
248
249    /** Number of misses per thread for each type of command. @sa Packet::Command */
250    Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
251    /** Number of misses for demand accesses. */
252    Stats::Formula demandMisses;
253    /** Number of misses for all accesses. */
254    Stats::Formula overallMisses;
255
256    /**
257     * Total number of cycles per thread/command spent waiting for a miss.
258     * Used to calculate the average miss latency.
259     */
260    Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
261    /** Total number of cycles spent waiting for demand misses. */
262    Stats::Formula demandMissLatency;
263    /** Total number of cycles spent waiting for all misses. */
264    Stats::Formula overallMissLatency;
265
266    /** The number of accesses per command and thread. */
267    Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
268    /** The number of demand accesses. */
269    Stats::Formula demandAccesses;
270    /** The number of overall accesses. */
271    Stats::Formula overallAccesses;
272
273    /** The miss rate per command and thread. */
274    Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
275    /** The miss rate of all demand accesses. */
276    Stats::Formula demandMissRate;
277    /** The miss rate for all accesses. */
278    Stats::Formula overallMissRate;
279
280    /** The average miss latency per command and thread. */
281    Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
282    /** The average miss latency for demand misses. */
283    Stats::Formula demandAvgMissLatency;
284    /** The average miss latency for all misses. */
285    Stats::Formula overallAvgMissLatency;
286
287    /** The total number of cycles blocked for each blocked cause. */
288    Stats::Vector blocked_cycles;
289    /** The number of times this cache blocked for each blocked cause. */
290    Stats::Vector blocked_causes;
291
292    /** The average number of cycles blocked for each blocked cause. */
293    Stats::Formula avg_blocked;
294
295    /** The number of fast writes (WH64) performed. */
296    Stats::Scalar fastWrites;
297
298    /** The number of cache copies performed. */
299    Stats::Scalar cacheCopies;
300
301    /** Number of blocks written back per thread. */
302    Stats::Vector writebacks;
303
304    /** Number of misses that hit in the MSHRs per command and thread. */
305    Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
306    /** Demand misses that hit in the MSHRs. */
307    Stats::Formula demandMshrHits;
308    /** Total number of misses that hit in the MSHRs. */
309    Stats::Formula overallMshrHits;
310
311    /** Number of misses that miss in the MSHRs, per command and thread. */
312    Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
313    /** Demand misses that miss in the MSHRs. */
314    Stats::Formula demandMshrMisses;
315    /** Total number of misses that miss in the MSHRs. */
316    Stats::Formula overallMshrMisses;
317
318    /** Number of misses that miss in the MSHRs, per command and thread. */
319    Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
320    /** Total number of misses that miss in the MSHRs. */
321    Stats::Formula overallMshrUncacheable;
322
323    /** Total cycle latency of each MSHR miss, per command and thread. */
324    Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
325    /** Total cycle latency of demand MSHR misses. */
326    Stats::Formula demandMshrMissLatency;
327    /** Total cycle latency of overall MSHR misses. */
328    Stats::Formula overallMshrMissLatency;
329
330    /** Total cycle latency of each MSHR miss, per command and thread. */
331    Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
332    /** Total cycle latency of overall MSHR misses. */
333    Stats::Formula overallMshrUncacheableLatency;
334
335#if 0
336    /** The total number of MSHR accesses per command and thread. */
337    Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
338    /** The total number of demand MSHR accesses. */
339    Stats::Formula demandMshrAccesses;
340    /** The total number of MSHR accesses. */
341    Stats::Formula overallMshrAccesses;
342#endif
343
344    /** The miss rate in the MSHRs pre command and thread. */
345    Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
346    /** The demand miss rate in the MSHRs. */
347    Stats::Formula demandMshrMissRate;
348    /** The overall miss rate in the MSHRs. */
349    Stats::Formula overallMshrMissRate;
350
351    /** The average latency of an MSHR miss, per command and thread. */
352    Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
353    /** The average latency of a demand MSHR miss. */
354    Stats::Formula demandAvgMshrMissLatency;
355    /** The average overall latency of an MSHR miss. */
356    Stats::Formula overallAvgMshrMissLatency;
357
358    /** The average latency of an MSHR miss, per command and thread. */
359    Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
360    /** The average overall latency of an MSHR miss. */
361    Stats::Formula overallAvgMshrUncacheableLatency;
362
363    /** The number of times a thread hit its MSHR cap. */
364    Stats::Vector mshr_cap_events;
365    /** The number of times software prefetches caused the MSHR to block. */
366    Stats::Vector soft_prefetch_mshr_full;
367
368    Stats::Scalar mshr_no_allocate_misses;
369
370    /**
371     * @}
372     */
373
374    /**
375     * Register stats for this object.
376     */
377    virtual void regStats();
378
379  public:
380    typedef BaseCacheParams Params;
381    BaseCache(const Params *p);
382    ~BaseCache() {}
383
384    virtual void init();
385
386    /**
387     * Query block size of a cache.
388     * @return  The block size
389     */
390    unsigned
391    getBlockSize() const
392    {
393        return blkSize;
394    }
395
396
397    Addr blockAlign(Addr addr) const { return (addr & ~(Addr(blkSize - 1))); }
398
399
400    const Range<Addr> &getAddrRange() const { return addrRange; }
401
402    MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus)
403    {
404        assert(!pkt->req->isUncacheable());
405        return allocateBufferInternal(&mshrQueue,
406                                      blockAlign(pkt->getAddr()), blkSize,
407                                      pkt, time, requestBus);
408    }
409
410    MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus)
411    {
412        assert(pkt->isWrite() && !pkt->isRead());
413        return allocateBufferInternal(&writeBuffer,
414                                      pkt->getAddr(), pkt->getSize(),
415                                      pkt, time, requestBus);
416    }
417
418    MSHR *allocateUncachedReadBuffer(PacketPtr pkt, Tick time, bool requestBus)
419    {
420        assert(pkt->req->isUncacheable());
421        assert(pkt->isRead());
422        return allocateBufferInternal(&mshrQueue,
423                                      pkt->getAddr(), pkt->getSize(),
424                                      pkt, time, requestBus);
425    }
426
427    /**
428     * Returns true if the cache is blocked for accesses.
429     */
430    bool isBlocked()
431    {
432        return blocked != 0;
433    }
434
435    /**
436     * Marks the access path of the cache as blocked for the given cause. This
437     * also sets the blocked flag in the slave interface.
438     * @param cause The reason for the cache blocking.
439     */
440    void setBlocked(BlockedCause cause)
441    {
442        uint8_t flag = 1 << cause;
443        if (blocked == 0) {
444            blocked_causes[cause]++;
445            blockedCycle = curTick();
446            cpuSidePort->setBlocked();
447        }
448        blocked |= flag;
449        DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
450    }
451
452    /**
453     * Marks the cache as unblocked for the given cause. This also clears the
454     * blocked flags in the appropriate interfaces.
455     * @param cause The newly unblocked cause.
456     * @warning Calling this function can cause a blocked request on the bus to
457     * access the cache. The cache must be in a state to handle that request.
458     */
459    void clearBlocked(BlockedCause cause)
460    {
461        uint8_t flag = 1 << cause;
462        blocked &= ~flag;
463        DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
464        if (blocked == 0) {
465            blocked_cycles[cause] += curTick() - blockedCycle;
466            cpuSidePort->clearBlocked();
467        }
468    }
469
470    /**
471     * Request the master bus for the given cause and time.
472     * @param cause The reason for the request.
473     * @param time The time to make the request.
474     */
475    void requestMemSideBus(RequestCause cause, Tick time)
476    {
477        memSidePort->requestBus(cause, time);
478    }
479
480    /**
481     * Clear the master bus request for the given cause.
482     * @param cause The request reason to clear.
483     */
484    void deassertMemSideBusRequest(RequestCause cause)
485    {
486        // Obsolete... we no longer signal bus requests explicitly so
487        // we can't deassert them.  Leaving this in as a no-op since
488        // the prefetcher calls it to indicate that it no longer wants
489        // to request a prefetch, and someday that might be
490        // interesting again.
491    }
492
493    virtual unsigned int drain(Event *de);
494
495    virtual bool inCache(Addr addr) = 0;
496
497    virtual bool inMissQueue(Addr addr) = 0;
498
499    void incMissCount(PacketPtr pkt, int id)
500    {
501
502        if (pkt->cmd == MemCmd::Writeback) {
503            assert(id == -1);
504            misses[pkt->cmdToIndex()][0]++;
505            /* same thing for writeback hits as misses - no context id
506             * available, meanwhile writeback hit/miss stats are not used
507             * in any aggregate hit/miss calculations, so just lump them all
508             * in bucket 0 */
509        } else if (FullSystem && id == -1) {
510            // Device accesses have id -1
511            // lump device accesses into their own bucket
512            misses[pkt->cmdToIndex()][_numCpus]++;
513        } else {
514            misses[pkt->cmdToIndex()][id % _numCpus]++;
515        }
516
517        if (missCount) {
518            --missCount;
519            if (missCount == 0)
520                exitSimLoop("A cache reached the maximum miss count");
521        }
522    }
523    void incHitCount(PacketPtr pkt, int id)
524    {
525
526        /* Writeback requests don't have a context id associated with
527         * them, so attributing a hit to a -1 context id is obviously a
528         * problem.  I've noticed in the stats that hits are split into
529         * demand and non-demand hits - neither of which include writeback
530         * hits, so here, I'll just put the writeback hits into bucket 0
531         * since it won't mess with any other stats -hsul */
532        if (pkt->cmd == MemCmd::Writeback) {
533            assert(id == -1);
534            hits[pkt->cmdToIndex()][0]++;
535        } else if (FullSystem && id == -1) {
536            // Device accesses have id -1
537            // lump device accesses into their own bucket
538            hits[pkt->cmdToIndex()][_numCpus]++;
539        } else {
540            /* the % is necessary in case there are switch cpus */
541            hits[pkt->cmdToIndex()][id % _numCpus]++;
542        }
543    }
544
545};
546
547#endif //__BASE_CACHE_HH__
548