base.hh revision 3661
1/*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31/**
32 * @file
33 * Declares a basic cache interface BaseCache.
34 */
35
36#ifndef __BASE_CACHE_HH__
37#define __BASE_CACHE_HH__
38
39#include <vector>
40#include <string>
41#include <list>
42#include <inttypes.h>
43
44#include "base/misc.hh"
45#include "base/statistics.hh"
46#include "base/trace.hh"
47#include "mem/mem_object.hh"
48#include "mem/packet.hh"
49#include "mem/port.hh"
50#include "mem/request.hh"
51#include "sim/eventq.hh"
52
53/**
54 * Reasons for Caches to be Blocked.
55 */
56enum BlockedCause{
57    Blocked_NoMSHRs,
58    Blocked_NoTargets,
59    Blocked_NoWBBuffers,
60    Blocked_Coherence,
61    NUM_BLOCKED_CAUSES
62};
63
64/**
65 * Reasons for cache to request a bus.
66 */
67enum RequestCause{
68    Request_MSHR,
69    Request_WB,
70    Request_Coherence,
71    Request_PF
72};
73
74class MSHR;
75/**
76 * A basic cache interface. Implements some common functions for speed.
77 */
78class BaseCache : public MemObject
79{
80    class CachePort : public Port
81    {
82      public:
83        BaseCache *cache;
84
85        CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
86
87      protected:
88        virtual bool recvTiming(PacketPtr pkt);
89
90        virtual Tick recvAtomic(PacketPtr pkt);
91
92        virtual void recvFunctional(PacketPtr pkt);
93
94        virtual void recvStatusChange(Status status);
95
96        virtual void getDeviceAddressRanges(AddrRangeList &resp,
97                                            AddrRangeList &snoop);
98
99        virtual int deviceBlockSize();
100
101        virtual void recvRetry();
102
103      public:
104        void setBlocked();
105
106        void clearBlocked();
107
108        bool checkFunctional(PacketPtr pkt);
109
110        void checkAndSendFunctional(PacketPtr pkt);
111
112        bool canDrain() { return drainList.empty() && transmitList.empty(); }
113
114        bool blocked;
115
116        bool mustSendRetry;
117
118        bool isCpuSide;
119
120        bool waitingOnRetry;
121
122        std::list<PacketPtr> drainList;
123
124        std::list<std::pair<Tick,PacketPtr> > transmitList;
125    };
126
127    struct CacheEvent : public Event
128    {
129        CachePort *cachePort;
130        PacketPtr pkt;
131        bool newResponse;
132
133        CacheEvent(CachePort *_cachePort, bool response);
134        void process();
135        const char *description();
136    };
137
138  public: //Made public so coherence can get at it.
139    CachePort *cpuSidePort;
140
141    CacheEvent *sendEvent;
142    CacheEvent *memSendEvent;
143
144  protected:
145    CachePort *memSidePort;
146
147  public:
148    virtual Port *getPort(const std::string &if_name, int idx = -1);
149
150  private:
151    //To be defined in cache_impl.hh not in base class
152    virtual bool doTimingAccess(PacketPtr pkt, CachePort *cachePort, bool isCpuSide)
153    {
154        fatal("No implementation");
155    }
156
157    virtual Tick doAtomicAccess(PacketPtr pkt, bool isCpuSide)
158    {
159        fatal("No implementation");
160    }
161
162    virtual void doFunctionalAccess(PacketPtr pkt, bool isCpuSide)
163    {
164        fatal("No implementation");
165    }
166
167    void recvStatusChange(Port::Status status, bool isCpuSide)
168    {
169        if (status == Port::RangeChange){
170            if (!isCpuSide) {
171                cpuSidePort->sendStatusChange(Port::RangeChange);
172            }
173            else {
174                memSidePort->sendStatusChange(Port::RangeChange);
175            }
176        }
177    }
178
179    virtual PacketPtr getPacket()
180    {
181        fatal("No implementation");
182    }
183
184    virtual PacketPtr getCoherencePacket()
185    {
186        fatal("No implementation");
187    }
188
189    virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success)
190    {
191
192        fatal("No implementation");
193    }
194
195    virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* mshr, bool success)
196    {
197
198        fatal("No implementation");
199    }
200
201    /**
202     * Bit vector of the blocking reasons for the access path.
203     * @sa #BlockedCause
204     */
205    uint8_t blocked;
206
207    /**
208     * Bit vector for the blocking reasons for the snoop path.
209     * @sa #BlockedCause
210     */
211    uint8_t blockedSnoop;
212
213    /**
214     * Bit vector for the outstanding requests for the master interface.
215     */
216    uint8_t masterRequests;
217
218    /**
219     * Bit vector for the outstanding requests for the slave interface.
220     */
221    uint8_t slaveRequests;
222
223  protected:
224
225    /** Stores time the cache blocked for statistics. */
226    Tick blockedCycle;
227
228    /** Block size of this cache */
229    const int blkSize;
230
231    /** The number of misses to trigger an exit event. */
232    Counter missCount;
233
234    /** The drain event. */
235    Event *drainEvent;
236
237  public:
238    // Statistics
239    /**
240     * @addtogroup CacheStatistics
241     * @{
242     */
243
244    /** Number of hits per thread for each type of command. @sa Packet::Command */
245    Stats::Vector<> hits[NUM_MEM_CMDS];
246    /** Number of hits for demand accesses. */
247    Stats::Formula demandHits;
248    /** Number of hit for all accesses. */
249    Stats::Formula overallHits;
250
251    /** Number of misses per thread for each type of command. @sa Packet::Command */
252    Stats::Vector<> misses[NUM_MEM_CMDS];
253    /** Number of misses for demand accesses. */
254    Stats::Formula demandMisses;
255    /** Number of misses for all accesses. */
256    Stats::Formula overallMisses;
257
258    /**
259     * Total number of cycles per thread/command spent waiting for a miss.
260     * Used to calculate the average miss latency.
261     */
262    Stats::Vector<> missLatency[NUM_MEM_CMDS];
263    /** Total number of cycles spent waiting for demand misses. */
264    Stats::Formula demandMissLatency;
265    /** Total number of cycles spent waiting for all misses. */
266    Stats::Formula overallMissLatency;
267
268    /** The number of accesses per command and thread. */
269    Stats::Formula accesses[NUM_MEM_CMDS];
270    /** The number of demand accesses. */
271    Stats::Formula demandAccesses;
272    /** The number of overall accesses. */
273    Stats::Formula overallAccesses;
274
275    /** The miss rate per command and thread. */
276    Stats::Formula missRate[NUM_MEM_CMDS];
277    /** The miss rate of all demand accesses. */
278    Stats::Formula demandMissRate;
279    /** The miss rate for all accesses. */
280    Stats::Formula overallMissRate;
281
282    /** The average miss latency per command and thread. */
283    Stats::Formula avgMissLatency[NUM_MEM_CMDS];
284    /** The average miss latency for demand misses. */
285    Stats::Formula demandAvgMissLatency;
286    /** The average miss latency for all misses. */
287    Stats::Formula overallAvgMissLatency;
288
289    /** The total number of cycles blocked for each blocked cause. */
290    Stats::Vector<> blocked_cycles;
291    /** The number of times this cache blocked for each blocked cause. */
292    Stats::Vector<> blocked_causes;
293
294    /** The average number of cycles blocked for each blocked cause. */
295    Stats::Formula avg_blocked;
296
297    /** The number of fast writes (WH64) performed. */
298    Stats::Scalar<> fastWrites;
299
300    /** The number of cache copies performed. */
301    Stats::Scalar<> cacheCopies;
302
303    /**
304     * @}
305     */
306
307    /**
308     * Register stats for this object.
309     */
310    virtual void regStats();
311
312  public:
313
314    class Params
315    {
316      public:
317        /** List of address ranges of this cache. */
318        std::vector<Range<Addr> > addrRange;
319        /** The hit latency for this cache. */
320        int hitLatency;
321        /** The block size of this cache. */
322        int blkSize;
323        /**
324         * The maximum number of misses this cache should handle before
325         * ending the simulation.
326         */
327        Counter maxMisses;
328
329        /**
330         * Construct an instance of this parameter class.
331         */
332        Params(std::vector<Range<Addr> > addr_range,
333               int hit_latency, int _blkSize, Counter max_misses)
334            : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
335              maxMisses(max_misses)
336        {
337        }
338    };
339
340    /**
341     * Create and initialize a basic cache object.
342     * @param name The name of this cache.
343     * @param hier_params Pointer to the HierParams object for this hierarchy
344     * of this cache.
345     * @param params The parameter object for this BaseCache.
346     */
347    BaseCache(const std::string &name, Params &params)
348        : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
349          slaveRequests(0), blkSize(params.blkSize),
350          missCount(params.maxMisses), drainEvent(NULL)
351    {
352        //Start ports at null if more than one is created we should panic
353        cpuSidePort = NULL;
354        memSidePort = NULL;
355    }
356
357    ~BaseCache()
358    {
359        delete sendEvent;
360        delete memSendEvent;
361    }
362
363    virtual void init();
364
365    /**
366     * Query block size of a cache.
367     * @return  The block size
368     */
369    int getBlockSize() const
370    {
371        return blkSize;
372    }
373
374    /**
375     * Returns true if the cache is blocked for accesses.
376     */
377    bool isBlocked()
378    {
379        return blocked != 0;
380    }
381
382    /**
383     * Returns true if the cache is blocked for snoops.
384     */
385    bool isBlockedForSnoop()
386    {
387        return blockedSnoop != 0;
388    }
389
390    /**
391     * Marks the access path of the cache as blocked for the given cause. This
392     * also sets the blocked flag in the slave interface.
393     * @param cause The reason for the cache blocking.
394     */
395    void setBlocked(BlockedCause cause)
396    {
397        uint8_t flag = 1 << cause;
398        if (blocked == 0) {
399            blocked_causes[cause]++;
400            blockedCycle = curTick;
401        }
402        int old_state = blocked;
403        if (!(blocked & flag)) {
404            //Wasn't already blocked for this cause
405            blocked |= flag;
406            DPRINTF(Cache,"Blocking for cause %s\n", cause);
407            if (!old_state)
408                cpuSidePort->setBlocked();
409        }
410    }
411
412    /**
413     * Marks the snoop path of the cache as blocked for the given cause. This
414     * also sets the blocked flag in the master interface.
415     * @param cause The reason to block the snoop path.
416     */
417    void setBlockedForSnoop(BlockedCause cause)
418    {
419        uint8_t flag = 1 << cause;
420        uint8_t old_state = blockedSnoop;
421        if (!(blockedSnoop & flag)) {
422            //Wasn't already blocked for this cause
423            blockedSnoop |= flag;
424            if (!old_state)
425                memSidePort->setBlocked();
426        }
427    }
428
429    /**
430     * Marks the cache as unblocked for the given cause. This also clears the
431     * blocked flags in the appropriate interfaces.
432     * @param cause The newly unblocked cause.
433     * @warning Calling this function can cause a blocked request on the bus to
434     * access the cache. The cache must be in a state to handle that request.
435     */
436    void clearBlocked(BlockedCause cause)
437    {
438        uint8_t flag = 1 << cause;
439        DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
440                cause, blocked);
441        if (blocked & flag)
442        {
443            blocked &= ~flag;
444            if (!isBlocked()) {
445                blocked_cycles[cause] += curTick - blockedCycle;
446                DPRINTF(Cache,"Unblocking from all causes\n");
447                cpuSidePort->clearBlocked();
448            }
449        }
450        if (blockedSnoop & flag)
451        {
452            blockedSnoop &= ~flag;
453            if (!isBlockedForSnoop()) {
454                memSidePort->clearBlocked();
455            }
456        }
457    }
458
459    /**
460     * True if the master bus should be requested.
461     * @return True if there are outstanding requests for the master bus.
462     */
463    bool doMasterRequest()
464    {
465        return masterRequests != 0;
466    }
467
468    /**
469     * Request the master bus for the given cause and time.
470     * @param cause The reason for the request.
471     * @param time The time to make the request.
472     */
473    void setMasterRequest(RequestCause cause, Tick time)
474    {
475        if (!doMasterRequest() && !memSidePort->waitingOnRetry)
476        {
477            BaseCache::CacheEvent * reqCpu =
478                new BaseCache::CacheEvent(memSidePort, false);
479            reqCpu->schedule(time);
480        }
481        uint8_t flag = 1<<cause;
482        masterRequests |= flag;
483    }
484
485    /**
486     * Clear the master bus request for the given cause.
487     * @param cause The request reason to clear.
488     */
489    void clearMasterRequest(RequestCause cause)
490    {
491        uint8_t flag = 1<<cause;
492        masterRequests &= ~flag;
493        checkDrain();
494    }
495
496    /**
497     * Return true if the slave bus should be requested.
498     * @return True if there are outstanding requests for the slave bus.
499     */
500    bool doSlaveRequest()
501    {
502        return slaveRequests != 0;
503    }
504
505    /**
506     * Request the slave bus for the given reason and time.
507     * @param cause The reason for the request.
508     * @param time The time to make the request.
509     */
510    void setSlaveRequest(RequestCause cause, Tick time)
511    {
512        if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
513        {
514            BaseCache::CacheEvent * reqCpu =
515                new BaseCache::CacheEvent(cpuSidePort, false);
516            reqCpu->schedule(time);
517        }
518        uint8_t flag = 1<<cause;
519        slaveRequests |= flag;
520    }
521
522    /**
523     * Clear the slave bus request for the given reason.
524     * @param cause The request reason to clear.
525     */
526    void clearSlaveRequest(RequestCause cause)
527    {
528        uint8_t flag = 1<<cause;
529        slaveRequests &= ~flag;
530        checkDrain();
531    }
532
533    /**
534     * Send a response to the slave interface.
535     * @param pkt The request being responded to.
536     * @param time The time the response is ready.
537     */
538    void respond(PacketPtr pkt, Tick time)
539    {
540        assert(time >= curTick);
541        if (pkt->needsResponse()) {
542/*            CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
543            reqCpu->schedule(time);
544*/
545            if (cpuSidePort->transmitList.empty()) {
546                assert(!sendEvent->scheduled());
547                sendEvent->schedule(time);
548                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
549                                                    (time,pkt));
550                return;
551            }
552
553            // something is on the list and this belongs at the end
554            if (time >= cpuSidePort->transmitList.back().first) {
555                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
556                                                    (time,pkt));
557                return;
558            }
559            // Something is on the list and this belongs somewhere else
560            std::list<std::pair<Tick,PacketPtr> >::iterator i =
561                cpuSidePort->transmitList.begin();
562            std::list<std::pair<Tick,PacketPtr> >::iterator end =
563                cpuSidePort->transmitList.end();
564            bool done = false;
565
566            while (i != end && !done) {
567                if (time < i->first) {
568                    if (i == cpuSidePort->transmitList.begin()) {
569                        //Inserting at begining, reschedule
570                        sendEvent->reschedule(time);
571                    }
572                    cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
573                                                     (time,pkt));
574                    done = true;
575                }
576                i++;
577            }
578        }
579        else {
580            if (pkt->cmd != Packet::UpgradeReq)
581            {
582                delete pkt->req;
583                delete pkt;
584            }
585        }
586    }
587
588    /**
589     * Send a reponse to the slave interface and calculate miss latency.
590     * @param pkt The request to respond to.
591     * @param time The time the response is ready.
592     */
593    void respondToMiss(PacketPtr pkt, Tick time)
594    {
595        assert(time >= curTick);
596        if (!pkt->req->isUncacheable()) {
597            missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
598                time - pkt->time;
599        }
600        if (pkt->needsResponse()) {
601/*            CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
602            reqCpu->schedule(time);
603*/
604            if (cpuSidePort->transmitList.empty()) {
605                assert(!sendEvent->scheduled());
606                sendEvent->schedule(time);
607                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
608                                                    (time,pkt));
609                return;
610            }
611
612            // something is on the list and this belongs at the end
613            if (time >= cpuSidePort->transmitList.back().first) {
614                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
615                                                    (time,pkt));
616                return;
617            }
618            // Something is on the list and this belongs somewhere else
619            std::list<std::pair<Tick,PacketPtr> >::iterator i =
620                cpuSidePort->transmitList.begin();
621            std::list<std::pair<Tick,PacketPtr> >::iterator end =
622                cpuSidePort->transmitList.end();
623            bool done = false;
624
625            while (i != end && !done) {
626                if (time < i->first) {
627                    if (i == cpuSidePort->transmitList.begin()) {
628                        //Inserting at begining, reschedule
629                        sendEvent->reschedule(time);
630                    }
631                    cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
632                                                     (time,pkt));
633                    done = true;
634                }
635                i++;
636            }
637        }
638        else {
639            if (pkt->cmd != Packet::UpgradeReq)
640            {
641                delete pkt->req;
642                delete pkt;
643            }
644        }
645    }
646
647    /**
648     * Suppliess the data if cache to cache transfers are enabled.
649     * @param pkt The bus transaction to fulfill.
650     */
651    void respondToSnoop(PacketPtr pkt, Tick time)
652    {
653        assert(time >= curTick);
654        assert (pkt->needsResponse());
655/*        CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
656        reqMem->schedule(time);
657*/
658        if (memSidePort->transmitList.empty()) {
659            assert(!memSendEvent->scheduled());
660            memSendEvent->schedule(time);
661            memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
662                                                (time,pkt));
663            return;
664        }
665
666        // something is on the list and this belongs at the end
667        if (time >= memSidePort->transmitList.back().first) {
668            memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
669                                                (time,pkt));
670            return;
671        }
672        // Something is on the list and this belongs somewhere else
673        std::list<std::pair<Tick,PacketPtr> >::iterator i =
674            memSidePort->transmitList.begin();
675        std::list<std::pair<Tick,PacketPtr> >::iterator end =
676            memSidePort->transmitList.end();
677        bool done = false;
678
679        while (i != end && !done) {
680            if (time < i->first) {
681                if (i == memSidePort->transmitList.begin()) {
682                    //Inserting at begining, reschedule
683                    memSendEvent->reschedule(time);
684                }
685                memSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt));
686                done = true;
687            }
688            i++;
689        }
690    }
691
692    /**
693     * Notification from master interface that a address range changed. Nothing
694     * to do for a cache.
695     */
696    void rangeChange() {}
697
698    void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide)
699    {
700        if (isCpuSide)
701        {
702            AddrRangeList dummy;
703            memSidePort->getPeerAddressRanges(resp, dummy);
704        }
705        else
706        {
707            //This is where snoops get updated
708            AddrRangeList dummy;
709            cpuSidePort->getPeerAddressRanges(dummy, snoop);
710            return;
711        }
712    }
713
714    virtual unsigned int drain(Event *de);
715
716    void checkDrain()
717    {
718        if (drainEvent && canDrain()) {
719            drainEvent->process();
720            changeState(SimObject::Drained);
721            // Clear the drain event
722            drainEvent = NULL;
723        }
724    }
725
726    bool canDrain()
727    {
728        if (doMasterRequest() || doSlaveRequest()) {
729            return false;
730        } else if (memSidePort && !memSidePort->canDrain()) {
731            return false;
732        } else if (cpuSidePort && !cpuSidePort->canDrain()) {
733            return false;
734        }
735        return true;
736    }
737};
738
739#endif //__BASE_CACHE_HH__
740