i8254xGBe.hh revision 4762
112855Sgabeblack@google.com/*
212855Sgabeblack@google.com * Copyright (c) 2006 The Regents of The University of Michigan
312855Sgabeblack@google.com * All rights reserved.
412855Sgabeblack@google.com *
512855Sgabeblack@google.com * Redistribution and use in source and binary forms, with or without
612855Sgabeblack@google.com * modification, are permitted provided that the following conditions are
712855Sgabeblack@google.com * met: redistributions of source code must retain the above copyright
812855Sgabeblack@google.com * notice, this list of conditions and the following disclaimer;
912855Sgabeblack@google.com * redistributions in binary form must reproduce the above copyright
1012855Sgabeblack@google.com * notice, this list of conditions and the following disclaimer in the
1112855Sgabeblack@google.com * documentation and/or other materials provided with the distribution;
1212855Sgabeblack@google.com * neither the name of the copyright holders nor the names of its
1312855Sgabeblack@google.com * contributors may be used to endorse or promote products derived from
1412855Sgabeblack@google.com * this software without specific prior written permission.
1512855Sgabeblack@google.com *
1612855Sgabeblack@google.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1712855Sgabeblack@google.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1812855Sgabeblack@google.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1912855Sgabeblack@google.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2012855Sgabeblack@google.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2112855Sgabeblack@google.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2212855Sgabeblack@google.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2312855Sgabeblack@google.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2412855Sgabeblack@google.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2512855Sgabeblack@google.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2612855Sgabeblack@google.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2712855Sgabeblack@google.com *
2812855Sgabeblack@google.com * Authors: Ali Saidi
2912855Sgabeblack@google.com */
3012855Sgabeblack@google.com
3112855Sgabeblack@google.com/* @file
3212855Sgabeblack@google.com * Device model for Intel's 8254x line of gigabit ethernet controllers.
3312855Sgabeblack@google.com */
3412855Sgabeblack@google.com
3512855Sgabeblack@google.com#ifndef __DEV_I8254XGBE_HH__
3612855Sgabeblack@google.com#define __DEV_I8254XGBE_HH__
3712855Sgabeblack@google.com
3812855Sgabeblack@google.com#include <deque>
3912855Sgabeblack@google.com#include <string>
4012855Sgabeblack@google.com
4112855Sgabeblack@google.com#include "base/inet.hh"
4212855Sgabeblack@google.com#include "base/statistics.hh"
4312855Sgabeblack@google.com#include "dev/etherint.hh"
4412855Sgabeblack@google.com#include "dev/etherpkt.hh"
4512855Sgabeblack@google.com#include "dev/i8254xGBe_defs.hh"
4612855Sgabeblack@google.com#include "dev/pcidev.hh"
4712855Sgabeblack@google.com#include "dev/pktfifo.hh"
4812855Sgabeblack@google.com#include "params/IGbE.hh"
4912855Sgabeblack@google.com#include "sim/eventq.hh"
5012855Sgabeblack@google.com
5112855Sgabeblack@google.comclass IGbEInt;
5212855Sgabeblack@google.com
5312855Sgabeblack@google.comclass IGbE : public PciDev
5412855Sgabeblack@google.com{
5512855Sgabeblack@google.com  private:
5612855Sgabeblack@google.com    IGbEInt *etherInt;
5712855Sgabeblack@google.com
5812855Sgabeblack@google.com    // device registers
5912855Sgabeblack@google.com    iGbReg::Regs regs;
6012855Sgabeblack@google.com
6112855Sgabeblack@google.com    // eeprom data, status and control bits
6212855Sgabeblack@google.com    int eeOpBits, eeAddrBits, eeDataBits;
6312855Sgabeblack@google.com    uint8_t eeOpcode, eeAddr;
6412855Sgabeblack@google.com    uint16_t flash[iGbReg::EEPROM_SIZE];
6512855Sgabeblack@google.com
6612855Sgabeblack@google.com    // The drain event if we have one
6712855Sgabeblack@google.com    Event *drainEvent;
6812855Sgabeblack@google.com
6912855Sgabeblack@google.com    // cached parameters from params struct
7012855Sgabeblack@google.com    bool useFlowControl;
7112855Sgabeblack@google.com
7212855Sgabeblack@google.com    // packet fifos
7312855Sgabeblack@google.com    PacketFifo rxFifo;
7412855Sgabeblack@google.com    PacketFifo txFifo;
7512855Sgabeblack@google.com
7612855Sgabeblack@google.com    // Packet that we are currently putting into the txFifo
7712855Sgabeblack@google.com    EthPacketPtr txPacket;
78
79    // Should to Rx/Tx State machine tick?
80    bool rxTick;
81    bool txTick;
82    bool txFifoTick;
83
84    bool rxDmaPacket;
85
86    // Event and function to deal with RDTR timer expiring
87    void rdtrProcess() {
88        rxDescCache.writeback(0);
89        DPRINTF(EthernetIntr, "Posting RXT interrupt because RDTR timer expired\n");
90        postInterrupt(iGbReg::IT_RXT, true);
91    }
92
93    //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
94    EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
95
96    // Event and function to deal with RADV timer expiring
97    void radvProcess() {
98        rxDescCache.writeback(0);
99        DPRINTF(EthernetIntr, "Posting RXT interrupt because RADV timer expired\n");
100        postInterrupt(iGbReg::IT_RXT, true);
101    }
102
103    //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
104    EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
105
106    // Event and function to deal with TADV timer expiring
107    void tadvProcess() {
108        txDescCache.writeback(0);
109        DPRINTF(EthernetIntr, "Posting TXDW interrupt because TADV timer expired\n");
110        postInterrupt(iGbReg::IT_TXDW, true);
111    }
112
113    //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
114    EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
115
116    // Event and function to deal with TIDV timer expiring
117    void tidvProcess() {
118        txDescCache.writeback(0);
119        DPRINTF(EthernetIntr, "Posting TXDW interrupt because TIDV timer expired\n");
120        postInterrupt(iGbReg::IT_TXDW, true);
121    }
122    //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
123    EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
124
125    // Main event to tick the device
126    void tick();
127    //friend class EventWrapper<IGbE, &IGbE::tick>;
128    EventWrapper<IGbE, &IGbE::tick> tickEvent;
129
130
131    void rxStateMachine();
132    void txStateMachine();
133    void txWire();
134
135    /** Write an interrupt into the interrupt pending register and check mask
136     * and interrupt limit timer before sending interrupt to CPU
137     * @param t the type of interrupt we are posting
138     * @param now should we ignore the interrupt limiting timer
139     */
140    void postInterrupt(iGbReg::IntTypes t, bool now = false);
141
142    /** Check and see if changes to the mask register have caused an interrupt
143     * to need to be sent or perhaps removed an interrupt cause.
144     */
145    void chkInterrupt();
146
147    /** Send an interrupt to the cpu
148     */
149    void cpuPostInt();
150    // Event to moderate interrupts
151    EventWrapper<IGbE, &IGbE::cpuPostInt> interEvent;
152
153    /** Clear the interupt line to the cpu
154     */
155    void cpuClearInt();
156
157    Tick intClock() { return Clock::Int::ns * 1024; }
158
159    /** This function is used to restart the clock so it can handle things like
160     * draining and resume in one place. */
161    void restartClock();
162
163    /** Check if all the draining things that need to occur have occured and
164     * handle the drain event if so.
165     */
166    void checkDrain();
167
168    template<class T>
169    class DescCache
170    {
171      protected:
172        virtual Addr descBase() const = 0;
173        virtual long descHead() const = 0;
174        virtual long descTail() const = 0;
175        virtual long descLen() const = 0;
176        virtual void updateHead(long h) = 0;
177        virtual void enableSm() = 0;
178        virtual void intAfterWb() const {}
179
180        std::deque<T*> usedCache;
181        std::deque<T*> unusedCache;
182
183        T *fetchBuf;
184        T *wbBuf;
185
186        // Pointer to the device we cache for
187        IGbE *igbe;
188
189        // Name of this  descriptor cache
190        std::string _name;
191
192        // How far we've cached
193        int cachePnt;
194
195        // The size of the descriptor cache
196        int size;
197
198        // How many descriptors we are currently fetching
199        int curFetching;
200
201        // How many descriptors we are currently writing back
202        int wbOut;
203
204        // if the we wrote back to the end of the descriptor ring and are going
205        // to have to wrap and write more
206        bool moreToWb;
207
208        // What the alignment is of the next descriptor writeback
209        Addr wbAlignment;
210
211       /** The packet that is currently being dmad to memory if any
212         */
213        EthPacketPtr pktPtr;
214
215      public:
216        DescCache(IGbE *i, const std::string n, int s)
217            : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
218              pktPtr(NULL), fetchEvent(this), wbEvent(this)
219        {
220            fetchBuf = new T[size];
221            wbBuf = new T[size];
222        }
223
224        virtual ~DescCache()
225        {
226            reset();
227        }
228
229        std::string name() { return _name; }
230
231        /** If the address/len/head change when we've got descriptors that are
232         * dirty that is very bad. This function checks that we don't and if we
233         * do panics.
234         */
235        void areaChanged()
236        {
237            if (usedCache.size() > 0 || curFetching || wbOut)
238                panic("Descriptor Address, Length or Head changed. Bad\n");
239            reset();
240
241        }
242
243        void writeback(Addr aMask)
244        {
245            int curHead = descHead();
246            int max_to_wb = usedCache.size();
247
248            DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
249                    "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
250                    curHead, descTail(), descLen(), cachePnt, max_to_wb,
251                    descLeft());
252
253            // Check if this writeback is less restrictive that the previous
254            // and if so setup another one immediately following it
255            if (wbOut && (aMask < wbAlignment)) {
256                moreToWb = true;
257                wbAlignment = aMask;
258                DPRINTF(EthernetDesc, "Writing back already in process, returning\n");
259                return;
260            }
261
262
263            moreToWb = false;
264            wbAlignment = aMask;
265
266            if (max_to_wb + curHead >= descLen()) {
267                max_to_wb = descLen() - curHead;
268                moreToWb = true;
269                // this is by definition aligned correctly
270            } else if (aMask != 0) {
271                // align the wb point to the mask
272                max_to_wb = max_to_wb & ~aMask;
273            }
274
275            DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
276
277            if (max_to_wb <= 0 || wbOut)
278                return;
279
280            wbOut = max_to_wb;
281
282            for (int x = 0; x < wbOut; x++)
283                memcpy(&wbBuf[x], usedCache[x], sizeof(T));
284
285            for (int x = 0; x < wbOut; x++) {
286                assert(usedCache.size());
287                delete usedCache[0];
288                usedCache.pop_front();
289            };
290
291
292            assert(wbOut);
293            igbe->dmaWrite(igbe->platform->pciToDma(descBase() + curHead * sizeof(T)),
294                    wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf);
295        }
296
297        /** Fetch a chunk of descriptors into the descriptor cache.
298         * Calls fetchComplete when the memory system returns the data
299         */
300        void fetchDescriptors()
301        {
302            size_t max_to_fetch;
303
304            if (descTail() >= cachePnt)
305                max_to_fetch = descTail() - cachePnt;
306            else
307                max_to_fetch = descLen() - cachePnt;
308
309
310            max_to_fetch = std::min(max_to_fetch, (size - usedCache.size() -
311                        unusedCache.size()));
312
313            DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
314                    "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
315                    descHead(), descTail(), descLen(), cachePnt,
316                    max_to_fetch, descLeft());
317
318            // Nothing to do
319            if (max_to_fetch == 0 || curFetching)
320                return;
321
322            // So we don't have two descriptor fetches going on at once
323            curFetching = max_to_fetch;
324
325            DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
326                    descBase() + cachePnt * sizeof(T),
327                    igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
328                    curFetching * sizeof(T));
329
330            assert(curFetching);
331            igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
332                    curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf);
333        }
334
335
336        /** Called by event when dma to read descriptors is completed
337         */
338        void fetchComplete()
339        {
340            T *newDesc;
341            for (int x = 0; x < curFetching; x++) {
342                newDesc = new T;
343                memcpy(newDesc, &fetchBuf[x], sizeof(T));
344                unusedCache.push_back(newDesc);
345            }
346
347#ifndef NDEBUG
348            int oldCp = cachePnt;
349#endif
350
351            cachePnt += curFetching;
352            assert(cachePnt <= descLen());
353            if (cachePnt == descLen())
354                cachePnt = 0;
355
356            curFetching = 0;
357
358            DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
359                    oldCp, cachePnt);
360
361            enableSm();
362            igbe->checkDrain();
363        }
364
365        EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
366
367        /** Called by event when dma to writeback descriptors is completed
368         */
369        void wbComplete()
370        {
371            long  curHead = descHead();
372#ifndef NDEBUG
373            long oldHead = curHead;
374#endif
375
376            curHead += wbOut;
377            wbOut = 0;
378
379            if (curHead >= descLen())
380                curHead -= descLen();
381
382            // Update the head
383            updateHead(curHead);
384
385            DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
386                    oldHead, curHead);
387
388            // If we still have more to wb, call wb now
389            if (moreToWb) {
390                DPRINTF(EthernetDesc, "Writeback has more todo\n");
391                writeback(wbAlignment);
392            }
393            intAfterWb();
394            igbe->checkDrain();
395        }
396
397
398        EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
399
400        /* Return the number of descriptors left in the ring, so the device has
401         * a way to figure out if it needs to interrupt.
402         */
403        int descLeft() const
404        {
405            int left = unusedCache.size();
406            if (cachePnt - descTail() >= 0)
407                left += (cachePnt - descTail());
408            else
409                left += (descTail() - cachePnt);
410
411            return left;
412        }
413
414        /* Return the number of descriptors used and not written back.
415         */
416        int descUsed() const { return usedCache.size(); }
417
418        /* Return the number of cache unused descriptors we have. */
419        int descUnused() const {return unusedCache.size(); }
420
421        /* Get into a state where the descriptor address/head/etc colud be
422         * changed */
423        void reset()
424        {
425            DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
426            for (int x = 0; x < usedCache.size(); x++)
427                delete usedCache[x];
428            for (int x = 0; x < unusedCache.size(); x++)
429                delete unusedCache[x];
430
431            usedCache.clear();
432            unusedCache.clear();
433
434            cachePnt = 0;
435
436        }
437
438        virtual void serialize(std::ostream &os)
439        {
440            SERIALIZE_SCALAR(cachePnt);
441            SERIALIZE_SCALAR(curFetching);
442            SERIALIZE_SCALAR(wbOut);
443            SERIALIZE_SCALAR(moreToWb);
444            SERIALIZE_SCALAR(wbAlignment);
445
446            int usedCacheSize = usedCache.size();
447            SERIALIZE_SCALAR(usedCacheSize);
448            for(int x = 0; x < usedCacheSize; x++) {
449                arrayParamOut(os, csprintf("usedCache_%d", x),
450                        (uint8_t*)usedCache[x],sizeof(T));
451            }
452
453            int unusedCacheSize = unusedCache.size();
454            SERIALIZE_SCALAR(unusedCacheSize);
455            for(int x = 0; x < unusedCacheSize; x++) {
456                arrayParamOut(os, csprintf("unusedCache_%d", x),
457                        (uint8_t*)unusedCache[x],sizeof(T));
458            }
459        }
460
461        virtual void unserialize(Checkpoint *cp, const std::string &section)
462        {
463            UNSERIALIZE_SCALAR(cachePnt);
464            UNSERIALIZE_SCALAR(curFetching);
465            UNSERIALIZE_SCALAR(wbOut);
466            UNSERIALIZE_SCALAR(moreToWb);
467            UNSERIALIZE_SCALAR(wbAlignment);
468
469            int usedCacheSize;
470            UNSERIALIZE_SCALAR(usedCacheSize);
471            T *temp;
472            for(int x = 0; x < usedCacheSize; x++) {
473                temp = new T;
474                arrayParamIn(cp, section, csprintf("usedCache_%d", x),
475                        (uint8_t*)temp,sizeof(T));
476                usedCache.push_back(temp);
477            }
478
479            int unusedCacheSize;
480            UNSERIALIZE_SCALAR(unusedCacheSize);
481            for(int x = 0; x < unusedCacheSize; x++) {
482                temp = new T;
483                arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
484                        (uint8_t*)temp,sizeof(T));
485                unusedCache.push_back(temp);
486            }
487        }
488        virtual bool hasOutstandingEvents() {
489            return wbEvent.scheduled() || fetchEvent.scheduled();
490        }
491
492     };
493
494
495    class RxDescCache : public DescCache<iGbReg::RxDesc>
496    {
497      protected:
498        virtual Addr descBase() const { return igbe->regs.rdba(); }
499        virtual long descHead() const { return igbe->regs.rdh(); }
500        virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
501        virtual long descTail() const { return igbe->regs.rdt(); }
502        virtual void updateHead(long h) { igbe->regs.rdh(h); }
503        virtual void enableSm();
504
505        bool pktDone;
506
507      public:
508        RxDescCache(IGbE *i, std::string n, int s);
509
510        /** Write the given packet into the buffer(s) pointed to by the
511         * descriptor and update the book keeping. Should only be called when
512         * there are no dma's pending.
513         * @param packet ethernet packet to write
514         * @return if the packet could be written (there was a free descriptor)
515         */
516        bool writePacket(EthPacketPtr packet);
517        /** Called by event when dma to write packet is completed
518         */
519        void pktComplete();
520
521        /** Check if the dma on the packet has completed.
522         */
523
524        bool packetDone();
525
526        EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
527
528        virtual bool hasOutstandingEvents();
529
530        virtual void serialize(std::ostream &os);
531        virtual void unserialize(Checkpoint *cp, const std::string &section);
532    };
533    friend class RxDescCache;
534
535    RxDescCache rxDescCache;
536
537    class TxDescCache  : public DescCache<iGbReg::TxDesc>
538    {
539      protected:
540        virtual Addr descBase() const { return igbe->regs.tdba(); }
541        virtual long descHead() const { return igbe->regs.tdh(); }
542        virtual long descTail() const { return igbe->regs.tdt(); }
543        virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
544        virtual void updateHead(long h) { igbe->regs.tdh(h); }
545        virtual void enableSm();
546        virtual void intAfterWb() const { igbe->postInterrupt(iGbReg::IT_TXDW);}
547
548        bool pktDone;
549        bool isTcp;
550        bool pktWaiting;
551
552      public:
553        TxDescCache(IGbE *i, std::string n, int s);
554
555        /** Tell the cache to DMA a packet from main memory into its buffer and
556         * return the size the of the packet to reserve space in tx fifo.
557         * @return size of the packet
558         */
559        int getPacketSize();
560        void getPacketData(EthPacketPtr p);
561
562        /** Ask if the packet has been transfered so the state machine can give
563         * it to the fifo.
564         * @return packet available in descriptor cache
565         */
566        bool packetAvailable();
567
568        /** Ask if we are still waiting for the packet to be transfered.
569         * @return packet still in transit.
570         */
571        bool packetWaiting() { return pktWaiting; }
572
573        /** Called by event when dma to write packet is completed
574         */
575        void pktComplete();
576        EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
577
578        virtual bool hasOutstandingEvents();
579
580        virtual void serialize(std::ostream &os);
581        virtual void unserialize(Checkpoint *cp, const std::string &section);
582
583    };
584    friend class TxDescCache;
585
586    TxDescCache txDescCache;
587
588  public:
589    typedef IGbEParams Params;
590    const Params *
591    params() const
592    {
593        return dynamic_cast<const Params *>(_params);
594    }
595    IGbE(Params *params);
596    ~IGbE() {}
597
598    Tick clock;
599    inline Tick cycles(int numCycles) const { return numCycles * clock; }
600
601    virtual Tick read(PacketPtr pkt);
602    virtual Tick write(PacketPtr pkt);
603
604    virtual Tick writeConfig(PacketPtr pkt);
605
606    bool ethRxPkt(EthPacketPtr packet);
607    void ethTxDone();
608
609    void setEthInt(IGbEInt *i) { assert(!etherInt); etherInt = i; }
610
611    virtual void serialize(std::ostream &os);
612    virtual void unserialize(Checkpoint *cp, const std::string &section);
613    virtual unsigned int drain(Event *de);
614    virtual void resume();
615
616};
617
618class IGbEInt : public EtherInt
619{
620  private:
621    IGbE *dev;
622
623  public:
624    IGbEInt(const std::string &name, IGbE *d)
625        : EtherInt(name), dev(d)
626        { dev->setEthInt(this); }
627
628    virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
629    virtual void sendDone() { dev->ethTxDone(); }
630};
631
632
633
634
635
636#endif //__DEV_I8254XGBE_HH__
637
638