base.hh revision 4000
1/*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31/**
32 * @file
33 * Declares a basic cache interface BaseCache.
34 */
35
36#ifndef __BASE_CACHE_HH__
37#define __BASE_CACHE_HH__
38
39#include <vector>
40#include <string>
41#include <list>
42#include <inttypes.h>
43
44#include "base/misc.hh"
45#include "base/statistics.hh"
46#include "base/trace.hh"
47#include "mem/mem_object.hh"
48#include "mem/packet.hh"
49#include "mem/port.hh"
50#include "mem/request.hh"
51#include "sim/eventq.hh"
52
53/**
54 * Reasons for Caches to be Blocked.
55 */
56enum BlockedCause{
57    Blocked_NoMSHRs,
58    Blocked_NoTargets,
59    Blocked_NoWBBuffers,
60    Blocked_Coherence,
61    NUM_BLOCKED_CAUSES
62};
63
64/**
65 * Reasons for cache to request a bus.
66 */
67enum RequestCause{
68    Request_MSHR,
69    Request_WB,
70    Request_Coherence,
71    Request_PF
72};
73
74class MSHR;
75/**
76 * A basic cache interface. Implements some common functions for speed.
77 */
78class BaseCache : public MemObject
79{
80    class CachePort : public Port
81    {
82      public:
83        BaseCache *cache;
84
85      protected:
86        CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
87        virtual void recvStatusChange(Status status);
88
89        virtual void getDeviceAddressRanges(AddrRangeList &resp,
90                                            AddrRangeList &snoop);
91
92        virtual int deviceBlockSize();
93
94        virtual void recvRetry();
95
96      public:
97        void setBlocked();
98
99        void clearBlocked();
100
101        bool checkFunctional(PacketPtr pkt);
102
103        void checkAndSendFunctional(PacketPtr pkt);
104
105        bool canDrain() { return drainList.empty() && transmitList.empty(); }
106
107        bool blocked;
108
109        bool mustSendRetry;
110
111        bool isCpuSide;
112
113        bool waitingOnRetry;
114
115        std::list<PacketPtr> drainList;
116
117        std::list<std::pair<Tick,PacketPtr> > transmitList;
118    };
119
120    struct CacheEvent : public Event
121    {
122        CachePort *cachePort;
123        PacketPtr pkt;
124        bool newResponse;
125
126        CacheEvent(CachePort *_cachePort, bool response);
127        void process();
128        const char *description();
129    };
130
131  public: //Made public so coherence can get at it.
132    CachePort *cpuSidePort;
133    CachePort *memSidePort;
134
135    CacheEvent *sendEvent;
136    CacheEvent *memSendEvent;
137
138  private:
139    void recvStatusChange(Port::Status status, bool isCpuSide)
140    {
141        if (status == Port::RangeChange){
142            if (!isCpuSide) {
143                cpuSidePort->sendStatusChange(Port::RangeChange);
144            }
145            else {
146                memSidePort->sendStatusChange(Port::RangeChange);
147            }
148        }
149    }
150
151    virtual PacketPtr getPacket() = 0;
152
153    virtual PacketPtr getCoherencePacket() = 0;
154
155    virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success) = 0;
156
157    virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* mshr, bool success) = 0;
158
159    /**
160     * Bit vector of the blocking reasons for the access path.
161     * @sa #BlockedCause
162     */
163    uint8_t blocked;
164
165    /**
166     * Bit vector for the blocking reasons for the snoop path.
167     * @sa #BlockedCause
168     */
169    uint8_t blockedSnoop;
170
171    /**
172     * Bit vector for the outstanding requests for the master interface.
173     */
174    uint8_t masterRequests;
175
176    /**
177     * Bit vector for the outstanding requests for the slave interface.
178     */
179    uint8_t slaveRequests;
180
181  protected:
182
183    /** Stores time the cache blocked for statistics. */
184    Tick blockedCycle;
185
186    /** Block size of this cache */
187    const int blkSize;
188
189    /** The number of misses to trigger an exit event. */
190    Counter missCount;
191
192    /** The drain event. */
193    Event *drainEvent;
194
195  public:
196    // Statistics
197    /**
198     * @addtogroup CacheStatistics
199     * @{
200     */
201
202    /** Number of hits per thread for each type of command. @sa Packet::Command */
203    Stats::Vector<> hits[NUM_MEM_CMDS];
204    /** Number of hits for demand accesses. */
205    Stats::Formula demandHits;
206    /** Number of hit for all accesses. */
207    Stats::Formula overallHits;
208
209    /** Number of misses per thread for each type of command. @sa Packet::Command */
210    Stats::Vector<> misses[NUM_MEM_CMDS];
211    /** Number of misses for demand accesses. */
212    Stats::Formula demandMisses;
213    /** Number of misses for all accesses. */
214    Stats::Formula overallMisses;
215
216    /**
217     * Total number of cycles per thread/command spent waiting for a miss.
218     * Used to calculate the average miss latency.
219     */
220    Stats::Vector<> missLatency[NUM_MEM_CMDS];
221    /** Total number of cycles spent waiting for demand misses. */
222    Stats::Formula demandMissLatency;
223    /** Total number of cycles spent waiting for all misses. */
224    Stats::Formula overallMissLatency;
225
226    /** The number of accesses per command and thread. */
227    Stats::Formula accesses[NUM_MEM_CMDS];
228    /** The number of demand accesses. */
229    Stats::Formula demandAccesses;
230    /** The number of overall accesses. */
231    Stats::Formula overallAccesses;
232
233    /** The miss rate per command and thread. */
234    Stats::Formula missRate[NUM_MEM_CMDS];
235    /** The miss rate of all demand accesses. */
236    Stats::Formula demandMissRate;
237    /** The miss rate for all accesses. */
238    Stats::Formula overallMissRate;
239
240    /** The average miss latency per command and thread. */
241    Stats::Formula avgMissLatency[NUM_MEM_CMDS];
242    /** The average miss latency for demand misses. */
243    Stats::Formula demandAvgMissLatency;
244    /** The average miss latency for all misses. */
245    Stats::Formula overallAvgMissLatency;
246
247    /** The total number of cycles blocked for each blocked cause. */
248    Stats::Vector<> blocked_cycles;
249    /** The number of times this cache blocked for each blocked cause. */
250    Stats::Vector<> blocked_causes;
251
252    /** The average number of cycles blocked for each blocked cause. */
253    Stats::Formula avg_blocked;
254
255    /** The number of fast writes (WH64) performed. */
256    Stats::Scalar<> fastWrites;
257
258    /** The number of cache copies performed. */
259    Stats::Scalar<> cacheCopies;
260
261    /**
262     * @}
263     */
264
265    /**
266     * Register stats for this object.
267     */
268    virtual void regStats();
269
270  public:
271
272    class Params
273    {
274      public:
275        /** List of address ranges of this cache. */
276        std::vector<Range<Addr> > addrRange;
277        /** The hit latency for this cache. */
278        int hitLatency;
279        /** The block size of this cache. */
280        int blkSize;
281        /**
282         * The maximum number of misses this cache should handle before
283         * ending the simulation.
284         */
285        Counter maxMisses;
286
287        /**
288         * Construct an instance of this parameter class.
289         */
290        Params(std::vector<Range<Addr> > addr_range,
291               int hit_latency, int _blkSize, Counter max_misses)
292            : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
293              maxMisses(max_misses)
294        {
295        }
296    };
297
298    /**
299     * Create and initialize a basic cache object.
300     * @param name The name of this cache.
301     * @param hier_params Pointer to the HierParams object for this hierarchy
302     * of this cache.
303     * @param params The parameter object for this BaseCache.
304     */
305    BaseCache(const std::string &name, Params &params)
306        : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
307          slaveRequests(0), blkSize(params.blkSize),
308          missCount(params.maxMisses), drainEvent(NULL)
309    {
310        //Start ports at null if more than one is created we should panic
311        cpuSidePort = NULL;
312        memSidePort = NULL;
313    }
314
315    ~BaseCache()
316    {
317        delete sendEvent;
318        delete memSendEvent;
319    }
320
321    virtual void init();
322
323    /**
324     * Query block size of a cache.
325     * @return  The block size
326     */
327    int getBlockSize() const
328    {
329        return blkSize;
330    }
331
332    /**
333     * Returns true if the cache is blocked for accesses.
334     */
335    bool isBlocked()
336    {
337        return blocked != 0;
338    }
339
340    /**
341     * Returns true if the cache is blocked for snoops.
342     */
343    bool isBlockedForSnoop()
344    {
345        return blockedSnoop != 0;
346    }
347
348    /**
349     * Marks the access path of the cache as blocked for the given cause. This
350     * also sets the blocked flag in the slave interface.
351     * @param cause The reason for the cache blocking.
352     */
353    void setBlocked(BlockedCause cause)
354    {
355        uint8_t flag = 1 << cause;
356        if (blocked == 0) {
357            blocked_causes[cause]++;
358            blockedCycle = curTick;
359        }
360        int old_state = blocked;
361        if (!(blocked & flag)) {
362            //Wasn't already blocked for this cause
363            blocked |= flag;
364            DPRINTF(Cache,"Blocking for cause %s\n", cause);
365            if (!old_state)
366                cpuSidePort->setBlocked();
367        }
368    }
369
370    /**
371     * Marks the snoop path of the cache as blocked for the given cause. This
372     * also sets the blocked flag in the master interface.
373     * @param cause The reason to block the snoop path.
374     */
375    void setBlockedForSnoop(BlockedCause cause)
376    {
377        uint8_t flag = 1 << cause;
378        uint8_t old_state = blockedSnoop;
379        if (!(blockedSnoop & flag)) {
380            //Wasn't already blocked for this cause
381            blockedSnoop |= flag;
382            if (!old_state)
383                memSidePort->setBlocked();
384        }
385    }
386
387    /**
388     * Marks the cache as unblocked for the given cause. This also clears the
389     * blocked flags in the appropriate interfaces.
390     * @param cause The newly unblocked cause.
391     * @warning Calling this function can cause a blocked request on the bus to
392     * access the cache. The cache must be in a state to handle that request.
393     */
394    void clearBlocked(BlockedCause cause)
395    {
396        uint8_t flag = 1 << cause;
397        DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
398                cause, blocked);
399        if (blocked & flag)
400        {
401            blocked &= ~flag;
402            if (!isBlocked()) {
403                blocked_cycles[cause] += curTick - blockedCycle;
404                DPRINTF(Cache,"Unblocking from all causes\n");
405                cpuSidePort->clearBlocked();
406            }
407        }
408        if (blockedSnoop & flag)
409        {
410            blockedSnoop &= ~flag;
411            if (!isBlockedForSnoop()) {
412                memSidePort->clearBlocked();
413            }
414        }
415    }
416
417    /**
418     * True if the master bus should be requested.
419     * @return True if there are outstanding requests for the master bus.
420     */
421    bool doMasterRequest()
422    {
423        return masterRequests != 0;
424    }
425
426    /**
427     * Request the master bus for the given cause and time.
428     * @param cause The reason for the request.
429     * @param time The time to make the request.
430     */
431    void setMasterRequest(RequestCause cause, Tick time)
432    {
433        if (!doMasterRequest() && !memSidePort->waitingOnRetry)
434        {
435            BaseCache::CacheEvent * reqCpu =
436                new BaseCache::CacheEvent(memSidePort, false);
437            reqCpu->schedule(time);
438        }
439        uint8_t flag = 1<<cause;
440        masterRequests |= flag;
441    }
442
443    /**
444     * Clear the master bus request for the given cause.
445     * @param cause The request reason to clear.
446     */
447    void clearMasterRequest(RequestCause cause)
448    {
449        uint8_t flag = 1<<cause;
450        masterRequests &= ~flag;
451        checkDrain();
452    }
453
454    /**
455     * Return true if the slave bus should be requested.
456     * @return True if there are outstanding requests for the slave bus.
457     */
458    bool doSlaveRequest()
459    {
460        return slaveRequests != 0;
461    }
462
463    /**
464     * Request the slave bus for the given reason and time.
465     * @param cause The reason for the request.
466     * @param time The time to make the request.
467     */
468    void setSlaveRequest(RequestCause cause, Tick time)
469    {
470        if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
471        {
472            BaseCache::CacheEvent * reqCpu =
473                new BaseCache::CacheEvent(cpuSidePort, false);
474            reqCpu->schedule(time);
475        }
476        uint8_t flag = 1<<cause;
477        slaveRequests |= flag;
478    }
479
480    /**
481     * Clear the slave bus request for the given reason.
482     * @param cause The request reason to clear.
483     */
484    void clearSlaveRequest(RequestCause cause)
485    {
486        uint8_t flag = 1<<cause;
487        slaveRequests &= ~flag;
488        checkDrain();
489    }
490
491    /**
492     * Send a response to the slave interface.
493     * @param pkt The request being responded to.
494     * @param time The time the response is ready.
495     */
496    void respond(PacketPtr pkt, Tick time)
497    {
498        assert(time >= curTick);
499        if (pkt->needsResponse()) {
500/*            CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
501            reqCpu->schedule(time);
502*/
503            if (cpuSidePort->transmitList.empty()) {
504                assert(!sendEvent->scheduled());
505                sendEvent->schedule(time);
506                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
507                                                    (time,pkt));
508                return;
509            }
510
511            // something is on the list and this belongs at the end
512            if (time >= cpuSidePort->transmitList.back().first) {
513                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
514                                                    (time,pkt));
515                return;
516            }
517            // Something is on the list and this belongs somewhere else
518            std::list<std::pair<Tick,PacketPtr> >::iterator i =
519                cpuSidePort->transmitList.begin();
520            std::list<std::pair<Tick,PacketPtr> >::iterator end =
521                cpuSidePort->transmitList.end();
522            bool done = false;
523
524            while (i != end && !done) {
525                if (time < i->first) {
526                    if (i == cpuSidePort->transmitList.begin()) {
527                        //Inserting at begining, reschedule
528                        sendEvent->reschedule(time);
529                    }
530                    cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
531                                                     (time,pkt));
532                    done = true;
533                }
534                i++;
535            }
536        }
537        else {
538            if (pkt->cmd != Packet::UpgradeReq)
539            {
540                delete pkt->req;
541                delete pkt;
542            }
543        }
544    }
545
546    /**
547     * Send a reponse to the slave interface and calculate miss latency.
548     * @param pkt The request to respond to.
549     * @param time The time the response is ready.
550     */
551    void respondToMiss(PacketPtr pkt, Tick time)
552    {
553        assert(time >= curTick);
554        if (!pkt->req->isUncacheable()) {
555            missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
556                time - pkt->time;
557        }
558        if (pkt->needsResponse()) {
559/*            CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
560            reqCpu->schedule(time);
561*/
562            if (cpuSidePort->transmitList.empty()) {
563                assert(!sendEvent->scheduled());
564                sendEvent->schedule(time);
565                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
566                                                    (time,pkt));
567                return;
568            }
569
570            // something is on the list and this belongs at the end
571            if (time >= cpuSidePort->transmitList.back().first) {
572                cpuSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
573                                                    (time,pkt));
574                return;
575            }
576            // Something is on the list and this belongs somewhere else
577            std::list<std::pair<Tick,PacketPtr> >::iterator i =
578                cpuSidePort->transmitList.begin();
579            std::list<std::pair<Tick,PacketPtr> >::iterator end =
580                cpuSidePort->transmitList.end();
581            bool done = false;
582
583            while (i != end && !done) {
584                if (time < i->first) {
585                    if (i == cpuSidePort->transmitList.begin()) {
586                        //Inserting at begining, reschedule
587                        sendEvent->reschedule(time);
588                    }
589                    cpuSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>
590                                                     (time,pkt));
591                    done = true;
592                }
593                i++;
594            }
595        }
596        else {
597            if (pkt->cmd != Packet::UpgradeReq)
598            {
599                delete pkt->req;
600                delete pkt;
601            }
602        }
603    }
604
605    /**
606     * Suppliess the data if cache to cache transfers are enabled.
607     * @param pkt The bus transaction to fulfill.
608     */
609    void respondToSnoop(PacketPtr pkt, Tick time)
610    {
611        assert(time >= curTick);
612        assert (pkt->needsResponse());
613/*        CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
614        reqMem->schedule(time);
615*/
616        if (memSidePort->transmitList.empty()) {
617            assert(!memSendEvent->scheduled());
618            memSendEvent->schedule(time);
619            memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
620                                                (time,pkt));
621            return;
622        }
623
624        // something is on the list and this belongs at the end
625        if (time >= memSidePort->transmitList.back().first) {
626            memSidePort->transmitList.push_back(std::pair<Tick,PacketPtr>
627                                                (time,pkt));
628            return;
629        }
630        // Something is on the list and this belongs somewhere else
631        std::list<std::pair<Tick,PacketPtr> >::iterator i =
632            memSidePort->transmitList.begin();
633        std::list<std::pair<Tick,PacketPtr> >::iterator end =
634            memSidePort->transmitList.end();
635        bool done = false;
636
637        while (i != end && !done) {
638            if (time < i->first) {
639                if (i == memSidePort->transmitList.begin()) {
640                    //Inserting at begining, reschedule
641                    memSendEvent->reschedule(time);
642                }
643                memSidePort->transmitList.insert(i,std::pair<Tick,PacketPtr>(time,pkt));
644                done = true;
645            }
646            i++;
647        }
648    }
649
650    /**
651     * Notification from master interface that a address range changed. Nothing
652     * to do for a cache.
653     */
654    void rangeChange() {}
655
656    void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide)
657    {
658        if (isCpuSide)
659        {
660            AddrRangeList dummy;
661            memSidePort->getPeerAddressRanges(resp, dummy);
662        }
663        else
664        {
665            //This is where snoops get updated
666            AddrRangeList dummy;
667            cpuSidePort->getPeerAddressRanges(dummy, snoop);
668            return;
669        }
670    }
671
672    virtual unsigned int drain(Event *de);
673
674    void checkDrain()
675    {
676        if (drainEvent && canDrain()) {
677            drainEvent->process();
678            changeState(SimObject::Drained);
679            // Clear the drain event
680            drainEvent = NULL;
681        }
682    }
683
684    bool canDrain()
685    {
686        if (doMasterRequest() || doSlaveRequest()) {
687            return false;
688        } else if (memSidePort && !memSidePort->canDrain()) {
689            return false;
690        } else if (cpuSidePort && !cpuSidePort->canDrain()) {
691            return false;
692        }
693        return true;
694    }
695
696    virtual bool inCache(Addr addr) = 0;
697
698    virtual bool inMissQueue(Addr addr) = 0;
699};
700
701#endif //__BASE_CACHE_HH__
702