base.hh revision 5338:e75d02a09806
12086SN/A/*
22086SN/A * Copyright (c) 2003-2005 The Regents of The University of Michigan
32086SN/A * All rights reserved.
42086SN/A *
52086SN/A * Redistribution and use in source and binary forms, with or without
62086SN/A * modification, are permitted provided that the following conditions are
72086SN/A * met: redistributions of source code must retain the above copyright
82086SN/A * notice, this list of conditions and the following disclaimer;
92086SN/A * redistributions in binary form must reproduce the above copyright
102086SN/A * notice, this list of conditions and the following disclaimer in the
112086SN/A * documentation and/or other materials provided with the distribution;
122086SN/A * neither the name of the copyright holders nor the names of its
132086SN/A * contributors may be used to endorse or promote products derived from
142086SN/A * this software without specific prior written permission.
152086SN/A *
162086SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
172086SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
182086SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192086SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
202086SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
212086SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
222086SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232086SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242086SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252086SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
262086SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272086SN/A *
282665Ssaidi@eecs.umich.edu * Authors: Erik Hallnor
292665Ssaidi@eecs.umich.edu *          Steve Reinhardt
302665Ssaidi@eecs.umich.edu *          Ron Dreslinski
312086SN/A */
324202Sbinkertn@umich.edu
332086SN/A/**
344202Sbinkertn@umich.edu * @file
354202Sbinkertn@umich.edu * Declares a basic cache interface BaseCache.
364202Sbinkertn@umich.edu */
378745Sgblack@eecs.umich.edu
386313Sgblack@eecs.umich.edu#ifndef __BASE_CACHE_HH__
398778Sgblack@eecs.umich.edu#define __BASE_CACHE_HH__
408778Sgblack@eecs.umich.edu
418778Sgblack@eecs.umich.edu#include <vector>
426365Sgblack@eecs.umich.edu#include <string>
434997Sgblack@eecs.umich.edu#include <list>
448778Sgblack@eecs.umich.edu#include <algorithm>
454202Sbinkertn@umich.edu#include <inttypes.h>
468778Sgblack@eecs.umich.edu
478778Sgblack@eecs.umich.edu#include "base/misc.hh"
488778Sgblack@eecs.umich.edu#include "base/statistics.hh"
494997Sgblack@eecs.umich.edu#include "base/trace.hh"
508747Sgblack@eecs.umich.edu#include "mem/cache/mshr_queue.hh"
514826Ssaidi@eecs.umich.edu#include "mem/mem_object.hh"
528760Sgblack@eecs.umich.edu#include "mem/packet.hh"
532086SN/A#include "mem/tport.hh"
548745Sgblack@eecs.umich.edu#include "mem/request.hh"
556365Sgblack@eecs.umich.edu#include "params/BaseCache.hh"
568778Sgblack@eecs.umich.edu#include "sim/eventq.hh"
578745Sgblack@eecs.umich.edu#include "sim/sim_exit.hh"
586365Sgblack@eecs.umich.edu
598335Snate@binkert.orgclass MSHR;
608335Snate@binkert.org/**
614997Sgblack@eecs.umich.edu * A basic cache interface. Implements some common functions for speed.
624202Sbinkertn@umich.edu */
634202Sbinkertn@umich.educlass BaseCache : public MemObject
644202Sbinkertn@umich.edu{
654202Sbinkertn@umich.edu    /**
664202Sbinkertn@umich.edu     * Indexes to enumerate the MSHR queues.
674202Sbinkertn@umich.edu     */
68    enum MSHRQueueIndex {
69        MSHRQueue_MSHRs,
70        MSHRQueue_WriteBuffer
71    };
72
73    /**
74     * Reasons for caches to be blocked.
75     */
76    enum BlockedCause {
77        Blocked_NoMSHRs = MSHRQueue_MSHRs,
78        Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
79        Blocked_NoTargets,
80        NUM_BLOCKED_CAUSES
81    };
82
83  public:
84    /**
85     * Reasons for cache to request a bus.
86     */
87    enum RequestCause {
88        Request_MSHR = MSHRQueue_MSHRs,
89        Request_WB = MSHRQueue_WriteBuffer,
90        Request_PF,
91        NUM_REQUEST_CAUSES
92    };
93
94  private:
95
96    class CachePort : public SimpleTimingPort
97    {
98      public:
99        BaseCache *cache;
100
101      protected:
102        CachePort(const std::string &_name, BaseCache *_cache,
103                  const std::string &_label,
104                  std::vector<Range<Addr> > filter_ranges);
105
106        virtual void recvStatusChange(Status status);
107
108        virtual int deviceBlockSize();
109
110        bool recvRetryCommon();
111
112        typedef EventWrapper<Port, &Port::sendRetry>
113            SendRetryEvent;
114
115        const std::string label;
116
117      public:
118        void setOtherPort(CachePort *_otherPort) { otherPort = _otherPort; }
119
120        void setBlocked();
121
122        void clearBlocked();
123
124        bool checkFunctional(PacketPtr pkt);
125
126        CachePort *otherPort;
127
128        bool blocked;
129
130        bool mustSendRetry;
131
132        /** filter ranges */
133        std::vector<Range<Addr> > filterRanges;
134
135        void requestBus(RequestCause cause, Tick time)
136        {
137            DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
138            if (!waitingOnRetry) {
139                schedSendEvent(time);
140            }
141        }
142
143        void respond(PacketPtr pkt, Tick time) {
144            schedSendTiming(pkt, time);
145        }
146    };
147
148  public: //Made public so coherence can get at it.
149    CachePort *cpuSidePort;
150    CachePort *memSidePort;
151
152  protected:
153
154    /** Miss status registers */
155    MSHRQueue mshrQueue;
156
157    /** Write/writeback buffer */
158    MSHRQueue writeBuffer;
159
160    MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
161                                 PacketPtr pkt, Tick time, bool requestBus)
162    {
163        MSHR *mshr = mq->allocate(addr, size, pkt, time, order++);
164
165        if (mq->isFull()) {
166            setBlocked((BlockedCause)mq->index);
167        }
168
169        if (requestBus) {
170            requestMemSideBus((RequestCause)mq->index, time);
171        }
172
173        return mshr;
174    }
175
176    void markInServiceInternal(MSHR *mshr)
177    {
178        MSHRQueue *mq = mshr->queue;
179        bool wasFull = mq->isFull();
180        mq->markInService(mshr);
181        if (wasFull && !mq->isFull()) {
182            clearBlocked((BlockedCause)mq->index);
183        }
184    }
185
186    /** Block size of this cache */
187    const int blkSize;
188
189    /**
190     * The latency of a hit in this device.
191     */
192    int hitLatency;
193
194    /** The number of targets for each MSHR. */
195    const int numTarget;
196
197    /** Increasing order number assigned to each incoming request. */
198    uint64_t order;
199
200    /**
201     * Bit vector of the blocking reasons for the access path.
202     * @sa #BlockedCause
203     */
204    uint8_t blocked;
205
206    /** Stores time the cache blocked for statistics. */
207    Tick blockedCycle;
208
209    /** Pointer to the MSHR that has no targets. */
210    MSHR *noTargetMSHR;
211
212    /** The number of misses to trigger an exit event. */
213    Counter missCount;
214
215    /** The drain event. */
216    Event *drainEvent;
217
218  public:
219    // Statistics
220    /**
221     * @addtogroup CacheStatistics
222     * @{
223     */
224
225    /** Number of hits per thread for each type of command. @sa Packet::Command */
226    Stats::Vector<> hits[MemCmd::NUM_MEM_CMDS];
227    /** Number of hits for demand accesses. */
228    Stats::Formula demandHits;
229    /** Number of hit for all accesses. */
230    Stats::Formula overallHits;
231
232    /** Number of misses per thread for each type of command. @sa Packet::Command */
233    Stats::Vector<> misses[MemCmd::NUM_MEM_CMDS];
234    /** Number of misses for demand accesses. */
235    Stats::Formula demandMisses;
236    /** Number of misses for all accesses. */
237    Stats::Formula overallMisses;
238
239    /**
240     * Total number of cycles per thread/command spent waiting for a miss.
241     * Used to calculate the average miss latency.
242     */
243    Stats::Vector<> missLatency[MemCmd::NUM_MEM_CMDS];
244    /** Total number of cycles spent waiting for demand misses. */
245    Stats::Formula demandMissLatency;
246    /** Total number of cycles spent waiting for all misses. */
247    Stats::Formula overallMissLatency;
248
249    /** The number of accesses per command and thread. */
250    Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
251    /** The number of demand accesses. */
252    Stats::Formula demandAccesses;
253    /** The number of overall accesses. */
254    Stats::Formula overallAccesses;
255
256    /** The miss rate per command and thread. */
257    Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
258    /** The miss rate of all demand accesses. */
259    Stats::Formula demandMissRate;
260    /** The miss rate for all accesses. */
261    Stats::Formula overallMissRate;
262
263    /** The average miss latency per command and thread. */
264    Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
265    /** The average miss latency for demand misses. */
266    Stats::Formula demandAvgMissLatency;
267    /** The average miss latency for all misses. */
268    Stats::Formula overallAvgMissLatency;
269
270    /** The total number of cycles blocked for each blocked cause. */
271    Stats::Vector<> blocked_cycles;
272    /** The number of times this cache blocked for each blocked cause. */
273    Stats::Vector<> blocked_causes;
274
275    /** The average number of cycles blocked for each blocked cause. */
276    Stats::Formula avg_blocked;
277
278    /** The number of fast writes (WH64) performed. */
279    Stats::Scalar<> fastWrites;
280
281    /** The number of cache copies performed. */
282    Stats::Scalar<> cacheCopies;
283
284    /** Number of blocks written back per thread. */
285    Stats::Vector<> writebacks;
286
287    /** Number of misses that hit in the MSHRs per command and thread. */
288    Stats::Vector<> mshr_hits[MemCmd::NUM_MEM_CMDS];
289    /** Demand misses that hit in the MSHRs. */
290    Stats::Formula demandMshrHits;
291    /** Total number of misses that hit in the MSHRs. */
292    Stats::Formula overallMshrHits;
293
294    /** Number of misses that miss in the MSHRs, per command and thread. */
295    Stats::Vector<> mshr_misses[MemCmd::NUM_MEM_CMDS];
296    /** Demand misses that miss in the MSHRs. */
297    Stats::Formula demandMshrMisses;
298    /** Total number of misses that miss in the MSHRs. */
299    Stats::Formula overallMshrMisses;
300
301    /** Number of misses that miss in the MSHRs, per command and thread. */
302    Stats::Vector<> mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
303    /** Total number of misses that miss in the MSHRs. */
304    Stats::Formula overallMshrUncacheable;
305
306    /** Total cycle latency of each MSHR miss, per command and thread. */
307    Stats::Vector<> mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
308    /** Total cycle latency of demand MSHR misses. */
309    Stats::Formula demandMshrMissLatency;
310    /** Total cycle latency of overall MSHR misses. */
311    Stats::Formula overallMshrMissLatency;
312
313    /** Total cycle latency of each MSHR miss, per command and thread. */
314    Stats::Vector<> mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
315    /** Total cycle latency of overall MSHR misses. */
316    Stats::Formula overallMshrUncacheableLatency;
317
318    /** The total number of MSHR accesses per command and thread. */
319    Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
320    /** The total number of demand MSHR accesses. */
321    Stats::Formula demandMshrAccesses;
322    /** The total number of MSHR accesses. */
323    Stats::Formula overallMshrAccesses;
324
325    /** The miss rate in the MSHRs pre command and thread. */
326    Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
327    /** The demand miss rate in the MSHRs. */
328    Stats::Formula demandMshrMissRate;
329    /** The overall miss rate in the MSHRs. */
330    Stats::Formula overallMshrMissRate;
331
332    /** The average latency of an MSHR miss, per command and thread. */
333    Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
334    /** The average latency of a demand MSHR miss. */
335    Stats::Formula demandAvgMshrMissLatency;
336    /** The average overall latency of an MSHR miss. */
337    Stats::Formula overallAvgMshrMissLatency;
338
339    /** The average latency of an MSHR miss, per command and thread. */
340    Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
341    /** The average overall latency of an MSHR miss. */
342    Stats::Formula overallAvgMshrUncacheableLatency;
343
344    /** The number of times a thread hit its MSHR cap. */
345    Stats::Vector<> mshr_cap_events;
346    /** The number of times software prefetches caused the MSHR to block. */
347    Stats::Vector<> soft_prefetch_mshr_full;
348
349    Stats::Scalar<> mshr_no_allocate_misses;
350
351    /**
352     * @}
353     */
354
355    /**
356     * Register stats for this object.
357     */
358    virtual void regStats();
359
360  public:
361    typedef BaseCacheParams Params;
362    BaseCache(const Params *p);
363    ~BaseCache() {}
364
365    virtual void init();
366
367    /**
368     * Query block size of a cache.
369     * @return  The block size
370     */
371    int getBlockSize() const
372    {
373        return blkSize;
374    }
375
376
377    Addr blockAlign(Addr addr) const { return (addr & ~(blkSize - 1)); }
378
379
380    MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus)
381    {
382        assert(!pkt->req->isUncacheable());
383        return allocateBufferInternal(&mshrQueue,
384                                      blockAlign(pkt->getAddr()), blkSize,
385                                      pkt, time, requestBus);
386    }
387
388    MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus)
389    {
390        assert(pkt->isWrite() && !pkt->isRead());
391        return allocateBufferInternal(&writeBuffer,
392                                      pkt->getAddr(), pkt->getSize(),
393                                      pkt, time, requestBus);
394    }
395
396    MSHR *allocateUncachedReadBuffer(PacketPtr pkt, Tick time, bool requestBus)
397    {
398        assert(pkt->req->isUncacheable());
399        assert(pkt->isRead());
400        return allocateBufferInternal(&mshrQueue,
401                                      pkt->getAddr(), pkt->getSize(),
402                                      pkt, time, requestBus);
403    }
404
405    /**
406     * Returns true if the cache is blocked for accesses.
407     */
408    bool isBlocked()
409    {
410        return blocked != 0;
411    }
412
413    /**
414     * Marks the access path of the cache as blocked for the given cause. This
415     * also sets the blocked flag in the slave interface.
416     * @param cause The reason for the cache blocking.
417     */
418    void setBlocked(BlockedCause cause)
419    {
420        uint8_t flag = 1 << cause;
421        if (blocked == 0) {
422            blocked_causes[cause]++;
423            blockedCycle = curTick;
424            cpuSidePort->setBlocked();
425        }
426        blocked |= flag;
427        DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
428    }
429
430    /**
431     * Marks the cache as unblocked for the given cause. This also clears the
432     * blocked flags in the appropriate interfaces.
433     * @param cause The newly unblocked cause.
434     * @warning Calling this function can cause a blocked request on the bus to
435     * access the cache. The cache must be in a state to handle that request.
436     */
437    void clearBlocked(BlockedCause cause)
438    {
439        uint8_t flag = 1 << cause;
440        blocked &= ~flag;
441        DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
442        if (blocked == 0) {
443            blocked_cycles[cause] += curTick - blockedCycle;
444            cpuSidePort->clearBlocked();
445        }
446    }
447
448    Tick nextMSHRReadyTime()
449    {
450        return std::min(mshrQueue.nextMSHRReadyTime(),
451                        writeBuffer.nextMSHRReadyTime());
452    }
453
454    /**
455     * Request the master bus for the given cause and time.
456     * @param cause The reason for the request.
457     * @param time The time to make the request.
458     */
459    void requestMemSideBus(RequestCause cause, Tick time)
460    {
461        memSidePort->requestBus(cause, time);
462    }
463
464    /**
465     * Clear the master bus request for the given cause.
466     * @param cause The request reason to clear.
467     */
468    void deassertMemSideBusRequest(RequestCause cause)
469    {
470        // obsolete!!
471        assert(false);
472        // memSidePort->deassertBusRequest(cause);
473        // checkDrain();
474    }
475
476    virtual unsigned int drain(Event *de);
477
478    virtual bool inCache(Addr addr) = 0;
479
480    virtual bool inMissQueue(Addr addr) = 0;
481
482    void incMissCount(PacketPtr pkt)
483    {
484        misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
485
486        if (missCount) {
487            --missCount;
488            if (missCount == 0)
489                exitSimLoop("A cache reached the maximum miss count");
490        }
491    }
492
493};
494
495#endif //__BASE_CACHE_HH__
496