base.hh revision 4776
1/*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31/**
32 * @file
33 * Declares a basic cache interface BaseCache.
34 */
35
36#ifndef __BASE_CACHE_HH__
37#define __BASE_CACHE_HH__
38
39#include <vector>
40#include <string>
41#include <list>
42#include <inttypes.h>
43
44#include "base/misc.hh"
45#include "base/statistics.hh"
46#include "base/trace.hh"
47#include "mem/mem_object.hh"
48#include "mem/packet.hh"
49#include "mem/port.hh"
50#include "mem/request.hh"
51#include "sim/eventq.hh"
52
53/**
54 * Reasons for Caches to be Blocked.
55 */
56enum BlockedCause{
57    Blocked_NoMSHRs,
58    Blocked_NoTargets,
59    Blocked_NoWBBuffers,
60    Blocked_Coherence,
61    NUM_BLOCKED_CAUSES
62};
63
64/**
65 * Reasons for cache to request a bus.
66 */
67enum RequestCause{
68    Request_MSHR,
69    Request_WB,
70    Request_Coherence,
71    Request_PF
72};
73
74class MSHR;
75/**
76 * A basic cache interface. Implements some common functions for speed.
77 */
78class BaseCache : public MemObject
79{
80    class CachePort : public Port
81    {
82      public:
83        BaseCache *cache;
84
85      protected:
86        CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
87        virtual void recvStatusChange(Status status);
88
89        virtual void getDeviceAddressRanges(AddrRangeList &resp,
90                                            bool &snoop);
91
92        virtual int deviceBlockSize();
93
94        virtual void recvRetry();
95
96      public:
97        void setBlocked();
98
99        void clearBlocked();
100
101        bool checkFunctional(PacketPtr pkt);
102
103        void checkAndSendFunctional(PacketPtr pkt);
104
105        bool canDrain() { return drainList.empty() && transmitList.empty(); }
106
107        bool blocked;
108
109        bool mustSendRetry;
110
111        bool isCpuSide;
112
113        bool waitingOnRetry;
114
115        std::list<PacketPtr> drainList;
116
117        std::list<std::pair<Tick,PacketPtr> > transmitList;
118    };
119
120    struct RequestEvent : public Event
121    {
122        CachePort *cachePort;
123
124        RequestEvent(CachePort *_cachePort, Tick when);
125        void process();
126        const char *description();
127    };
128
129    struct ResponseEvent : public Event
130    {
131        CachePort *cachePort;
132
133        ResponseEvent(CachePort *_cachePort);
134        void process();
135        const char *description();
136    };
137
138  public: //Made public so coherence can get at it.
139    CachePort *cpuSidePort;
140    CachePort *memSidePort;
141
142    ResponseEvent *sendEvent;
143    ResponseEvent *memSendEvent;
144
145  private:
146    void recvStatusChange(Port::Status status, bool isCpuSide)
147    {
148        if (status == Port::RangeChange){
149            if (!isCpuSide) {
150                cpuSidePort->sendStatusChange(Port::RangeChange);
151            }
152            else {
153                memSidePort->sendStatusChange(Port::RangeChange);
154            }
155        }
156    }
157
158    virtual PacketPtr getPacket() = 0;
159
160    virtual PacketPtr getCoherencePacket() = 0;
161
162    virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success) = 0;
163
164    virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* mshr, bool success) = 0;
165
166    /**
167     * Bit vector of the blocking reasons for the access path.
168     * @sa #BlockedCause
169     */
170    uint8_t blocked;
171
172    /**
173     * Bit vector for the blocking reasons for the snoop path.
174     * @sa #BlockedCause
175     */
176    uint8_t blockedSnoop;
177
178    /**
179     * Bit vector for the outstanding requests for the master interface.
180     */
181    uint8_t masterRequests;
182
183    /**
184     * Bit vector for the outstanding requests for the slave interface.
185     */
186    uint8_t slaveRequests;
187
188  protected:
189
190    /** Stores time the cache blocked for statistics. */
191    Tick blockedCycle;
192
193    /** Block size of this cache */
194    const int blkSize;
195
196    /** The number of misses to trigger an exit event. */
197    Counter missCount;
198
199    /** The drain event. */
200    Event *drainEvent;
201
202  public:
203    // Statistics
204    /**
205     * @addtogroup CacheStatistics
206     * @{
207     */
208
209    /** Number of hits per thread for each type of command. @sa Packet::Command */
210    Stats::Vector<> hits[MemCmd::NUM_MEM_CMDS];
211    /** Number of hits for demand accesses. */
212    Stats::Formula demandHits;
213    /** Number of hit for all accesses. */
214    Stats::Formula overallHits;
215
216    /** Number of misses per thread for each type of command. @sa Packet::Command */
217    Stats::Vector<> misses[MemCmd::NUM_MEM_CMDS];
218    /** Number of misses for demand accesses. */
219    Stats::Formula demandMisses;
220    /** Number of misses for all accesses. */
221    Stats::Formula overallMisses;
222
223    /**
224     * Total number of cycles per thread/command spent waiting for a miss.
225     * Used to calculate the average miss latency.
226     */
227    Stats::Vector<> missLatency[MemCmd::NUM_MEM_CMDS];
228    /** Total number of cycles spent waiting for demand misses. */
229    Stats::Formula demandMissLatency;
230    /** Total number of cycles spent waiting for all misses. */
231    Stats::Formula overallMissLatency;
232
233    /** The number of accesses per command and thread. */
234    Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
235    /** The number of demand accesses. */
236    Stats::Formula demandAccesses;
237    /** The number of overall accesses. */
238    Stats::Formula overallAccesses;
239
240    /** The miss rate per command and thread. */
241    Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
242    /** The miss rate of all demand accesses. */
243    Stats::Formula demandMissRate;
244    /** The miss rate for all accesses. */
245    Stats::Formula overallMissRate;
246
247    /** The average miss latency per command and thread. */
248    Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
249    /** The average miss latency for demand misses. */
250    Stats::Formula demandAvgMissLatency;
251    /** The average miss latency for all misses. */
252    Stats::Formula overallAvgMissLatency;
253
254    /** The total number of cycles blocked for each blocked cause. */
255    Stats::Vector<> blocked_cycles;
256    /** The number of times this cache blocked for each blocked cause. */
257    Stats::Vector<> blocked_causes;
258
259    /** The average number of cycles blocked for each blocked cause. */
260    Stats::Formula avg_blocked;
261
262    /** The number of fast writes (WH64) performed. */
263    Stats::Scalar<> fastWrites;
264
265    /** The number of cache copies performed. */
266    Stats::Scalar<> cacheCopies;
267
268    /**
269     * @}
270     */
271
272    /**
273     * Register stats for this object.
274     */
275    virtual void regStats();
276
277  public:
278
279    class Params
280    {
281      public:
282        /** List of address ranges of this cache. */
283        std::vector<Range<Addr> > addrRange;
284        /** The hit latency for this cache. */
285        int hitLatency;
286        /** The block size of this cache. */
287        int blkSize;
288        /**
289         * The maximum number of misses this cache should handle before
290         * ending the simulation.
291         */
292        Counter maxMisses;
293
294        /**
295         * Construct an instance of this parameter class.
296         */
297        Params(std::vector<Range<Addr> > addr_range,
298               int hit_latency, int _blkSize, Counter max_misses)
299            : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
300              maxMisses(max_misses)
301        {
302        }
303    };
304
305    /**
306     * Create and initialize a basic cache object.
307     * @param name The name of this cache.
308     * @param hier_params Pointer to the HierParams object for this hierarchy
309     * of this cache.
310     * @param params The parameter object for this BaseCache.
311     */
312    BaseCache(const std::string &name, Params &params)
313        : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
314          slaveRequests(0), blkSize(params.blkSize),
315          missCount(params.maxMisses), drainEvent(NULL)
316    {
317        //Start ports at null if more than one is created we should panic
318        cpuSidePort = NULL;
319        memSidePort = NULL;
320    }
321
322    ~BaseCache()
323    {
324        delete sendEvent;
325        delete memSendEvent;
326    }
327
328    virtual void init();
329
330    /**
331     * Query block size of a cache.
332     * @return  The block size
333     */
334    int getBlockSize() const
335    {
336        return blkSize;
337    }
338
339    /**
340     * Returns true if the cache is blocked for accesses.
341     */
342    bool isBlocked()
343    {
344        return blocked != 0;
345    }
346
347    /**
348     * Returns true if the cache is blocked for snoops.
349     */
350    bool isBlockedForSnoop()
351    {
352        return blockedSnoop != 0;
353    }
354
355    /**
356     * Marks the access path of the cache as blocked for the given cause. This
357     * also sets the blocked flag in the slave interface.
358     * @param cause The reason for the cache blocking.
359     */
360    void setBlocked(BlockedCause cause)
361    {
362        uint8_t flag = 1 << cause;
363        if (blocked == 0) {
364            blocked_causes[cause]++;
365            blockedCycle = curTick;
366        }
367        int old_state = blocked;
368        if (!(blocked & flag)) {
369            //Wasn't already blocked for this cause
370            blocked |= flag;
371            DPRINTF(Cache,"Blocking for cause %s\n", cause);
372            if (!old_state)
373                cpuSidePort->setBlocked();
374        }
375    }
376
377    /**
378     * Marks the snoop path of the cache as blocked for the given cause. This
379     * also sets the blocked flag in the master interface.
380     * @param cause The reason to block the snoop path.
381     */
382    void setBlockedForSnoop(BlockedCause cause)
383    {
384        uint8_t flag = 1 << cause;
385        uint8_t old_state = blockedSnoop;
386        if (!(blockedSnoop & flag)) {
387            //Wasn't already blocked for this cause
388            blockedSnoop |= flag;
389            if (!old_state)
390                memSidePort->setBlocked();
391        }
392    }
393
394    /**
395     * Marks the cache as unblocked for the given cause. This also clears the
396     * blocked flags in the appropriate interfaces.
397     * @param cause The newly unblocked cause.
398     * @warning Calling this function can cause a blocked request on the bus to
399     * access the cache. The cache must be in a state to handle that request.
400     */
401    void clearBlocked(BlockedCause cause)
402    {
403        uint8_t flag = 1 << cause;
404        DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
405                cause, blocked);
406        if (blocked & flag)
407        {
408            blocked &= ~flag;
409            if (!isBlocked()) {
410                blocked_cycles[cause] += curTick - blockedCycle;
411                DPRINTF(Cache,"Unblocking from all causes\n");
412                cpuSidePort->clearBlocked();
413            }
414        }
415        if (blockedSnoop & flag)
416        {
417            blockedSnoop &= ~flag;
418            if (!isBlockedForSnoop()) {
419                memSidePort->clearBlocked();
420            }
421        }
422    }
423
424    /**
425     * True if the master bus should be requested.
426     * @return True if there are outstanding requests for the master bus.
427     */
428    bool doMasterRequest()
429    {
430        return masterRequests != 0;
431    }
432
433    /**
434     * Request the master bus for the given cause and time.
435     * @param cause The reason for the request.
436     * @param time The time to make the request.
437     */
438    void setMasterRequest(RequestCause cause, Tick time)
439    {
440        if (!doMasterRequest() && !memSidePort->waitingOnRetry)
441        {
442            new RequestEvent(memSidePort, time);
443        }
444        uint8_t flag = 1<<cause;
445        masterRequests |= flag;
446    }
447
448    /**
449     * Clear the master bus request for the given cause.
450     * @param cause The request reason to clear.
451     */
452    void clearMasterRequest(RequestCause cause)
453    {
454        uint8_t flag = 1<<cause;
455        masterRequests &= ~flag;
456        checkDrain();
457    }
458
459    /**
460     * Return true if the slave bus should be requested.
461     * @return True if there are outstanding requests for the slave bus.
462     */
463    bool doSlaveRequest()
464    {
465        return slaveRequests != 0;
466    }
467
468    /**
469     * Request the slave bus for the given reason and time.
470     * @param cause The reason for the request.
471     * @param time The time to make the request.
472     */
473    void setSlaveRequest(RequestCause cause, Tick time)
474    {
475        if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
476        {
477            new RequestEvent(cpuSidePort, time);
478        }
479        uint8_t flag = 1<<cause;
480        slaveRequests |= flag;
481    }
482
483    /**
484     * Clear the slave bus request for the given reason.
485     * @param cause The request reason to clear.
486     */
487    void clearSlaveRequest(RequestCause cause)
488    {
489        uint8_t flag = 1<<cause;
490        slaveRequests &= ~flag;
491        checkDrain();
492    }
493
494    /**
495     * Send a response to the slave interface.
496     * @param pkt The request being responded to.
497     * @param time The time the response is ready.
498     */
499    void respond(PacketPtr pkt, Tick time)
500    {
501        assert(time >= curTick);
502        if (pkt->needsResponse()) {
503/*            CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
504            reqCpu->schedule(time);
505*/
506            if (cpuSidePort->transmitList.empty()) {
507                assert(!sendEvent->scheduled());
508                sendEvent->schedule(time);
509                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
510                                                    (time,pkt));
511                return;
512            }
513
514            // something is on the list and this belongs at the end
515            if (time >= cpuSidePort->transmitList.back().first) {
516                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
517                                                    (time,pkt));
518                return;
519            }
520            // Something is on the list and this belongs somewhere else
521            std::list<std::pair<Tick,PacketPtr> >::iterator i =
522                cpuSidePort->transmitList.begin();
523            std::list<std::pair<Tick,PacketPtr> >::iterator end =
524                cpuSidePort->transmitList.end();
525            bool done = false;
526
527            while (i != end && !done) {
528                if (time < i->first) {
529                    if (i == cpuSidePort->transmitList.begin()) {
530                        //Inserting at begining, reschedule
531                        sendEvent->reschedule(time);
532                    }
533                    cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
534                                                     (time,pkt));
535                    done = true;
536                }
537                i++;
538            }
539        }
540        else {
541            if (pkt->cmd != MemCmd::UpgradeReq)
542            {
543                delete pkt->req;
544                delete pkt;
545            }
546        }
547    }
548
549    /**
550     * Send a reponse to the slave interface and calculate miss latency.
551     * @param pkt The request to respond to.
552     * @param time The time the response is ready.
553     */
554    void respondToMiss(PacketPtr pkt, Tick time)
555    {
556        assert(time >= curTick);
557        if (!pkt->req->isUncacheable()) {
558            missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
559                time - pkt->time;
560        }
561        if (pkt->needsResponse()) {
562/*            CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
563            reqCpu->schedule(time);
564*/
565            if (cpuSidePort->transmitList.empty()) {
566                assert(!sendEvent->scheduled());
567                sendEvent->schedule(time);
568                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
569                                                    (time,pkt));
570                return;
571            }
572
573            // something is on the list and this belongs at the end
574            if (time >= cpuSidePort->transmitList.back().first) {
575                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
576                                                    (time,pkt));
577                return;
578            }
579            // Something is on the list and this belongs somewhere else
580            std::list<std::pair<Tick,PacketPtr> >::iterator i =
581                cpuSidePort->transmitList.begin();
582            std::list<std::pair<Tick,PacketPtr> >::iterator end =
583                cpuSidePort->transmitList.end();
584            bool done = false;
585
586            while (i != end && !done) {
587                if (time < i->first) {
588                    if (i == cpuSidePort->transmitList.begin()) {
589                        //Inserting at begining, reschedule
590                        sendEvent->reschedule(time);
591                    }
592                    cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
593                                                     (time,pkt));
594                    done = true;
595                }
596                i++;
597            }
598        }
599        else {
600            if (pkt->cmd != MemCmd::UpgradeReq)
601            {
602                delete pkt->req;
603                delete pkt;
604            }
605        }
606    }
607
608    /**
609     * Suppliess the data if cache to cache transfers are enabled.
610     * @param pkt The bus transaction to fulfill.
611     */
612    void respondToSnoop(PacketPtr pkt, Tick time)
613    {
614        assert(time >= curTick);
615        assert (pkt->needsResponse());
616/*        CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
617        reqMem->schedule(time);
618*/
619        if (memSidePort->transmitList.empty()) {
620            assert(!memSendEvent->scheduled());
621            memSendEvent->schedule(time);
622            memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
623                                                (time,pkt));
624            return;
625        }
626
627        // something is on the list and this belongs at the end
628        if (time >= memSidePort->transmitList.back().first) {
629            memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
630                                                (time,pkt));
631            return;
632        }
633        // Something is on the list and this belongs somewhere else
634        std::list<std::pair<Tick,PacketPtr> >::iterator i =
635            memSidePort->transmitList.begin();
636        std::list<std::pair<Tick,PacketPtr> >::iterator end =
637            memSidePort->transmitList.end();
638        bool done = false;
639
640        while (i != end && !done) {
641            if (time < i->first) {
642                if (i == memSidePort->transmitList.begin()) {
643                    //Inserting at begining, reschedule
644                    memSendEvent->reschedule(time);
645                }
646                memSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt));
647                done = true;
648            }
649            i++;
650        }
651    }
652
653    /**
654     * Notification from master interface that a address range changed. Nothing
655     * to do for a cache.
656     */
657    void rangeChange() {}
658
659    void getAddressRanges(AddrRangeList &resp, bool &snoop, bool isCpuSide)
660    {
661        if (isCpuSide)
662        {
663            bool dummy;
664            memSidePort->getPeerAddressRanges(resp, dummy);
665        }
666        else
667        {
668            //This is where snoops get updated
669            AddrRangeList dummy;
670            snoop = true;
671        }
672    }
673
674    virtual unsigned int drain(Event *de);
675
676    void checkDrain()
677    {
678        if (drainEvent && canDrain()) {
679            drainEvent->process();
680            changeState(SimObject::Drained);
681            // Clear the drain event
682            drainEvent = NULL;
683        }
684    }
685
686    bool canDrain()
687    {
688        if (doMasterRequest() || doSlaveRequest()) {
689            return false;
690        } else if (memSidePort && !memSidePort->canDrain()) {
691            return false;
692        } else if (cpuSidePort && !cpuSidePort->canDrain()) {
693            return false;
694        }
695        return true;
696    }
697
698    virtual bool inCache(Addr addr) = 0;
699
700    virtual bool inMissQueue(Addr addr) = 0;
701};
702
703#endif //__BASE_CACHE_HH__
704