i8254xGBe.hh revision 5535
112855Sgabeblack@google.com/*
212855Sgabeblack@google.com * Copyright (c) 2006 The Regents of The University of Michigan
312855Sgabeblack@google.com * All rights reserved.
412855Sgabeblack@google.com *
512855Sgabeblack@google.com * Redistribution and use in source and binary forms, with or without
612855Sgabeblack@google.com * modification, are permitted provided that the following conditions are
712855Sgabeblack@google.com * met: redistributions of source code must retain the above copyright
812855Sgabeblack@google.com * notice, this list of conditions and the following disclaimer;
912855Sgabeblack@google.com * redistributions in binary form must reproduce the above copyright
1012855Sgabeblack@google.com * notice, this list of conditions and the following disclaimer in the
1112855Sgabeblack@google.com * documentation and/or other materials provided with the distribution;
1212855Sgabeblack@google.com * neither the name of the copyright holders nor the names of its
1312855Sgabeblack@google.com * contributors may be used to endorse or promote products derived from
1412855Sgabeblack@google.com * this software without specific prior written permission.
1512855Sgabeblack@google.com *
1612855Sgabeblack@google.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1712855Sgabeblack@google.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1812855Sgabeblack@google.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1912855Sgabeblack@google.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2012855Sgabeblack@google.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2112855Sgabeblack@google.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2212855Sgabeblack@google.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2312855Sgabeblack@google.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2412855Sgabeblack@google.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2512855Sgabeblack@google.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2612855Sgabeblack@google.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2712855Sgabeblack@google.com *
2812855Sgabeblack@google.com * Authors: Ali Saidi
2912855Sgabeblack@google.com */
3012855Sgabeblack@google.com
3112855Sgabeblack@google.com/* @file
3212855Sgabeblack@google.com * Device model for Intel's 8254x line of gigabit ethernet controllers.
3312855Sgabeblack@google.com */
3412855Sgabeblack@google.com
3512855Sgabeblack@google.com#ifndef __DEV_I8254XGBE_HH__
3612855Sgabeblack@google.com#define __DEV_I8254XGBE_HH__
3712855Sgabeblack@google.com
3812855Sgabeblack@google.com#include <deque>
3912855Sgabeblack@google.com#include <string>
4012855Sgabeblack@google.com
4112855Sgabeblack@google.com#include "base/inet.hh"
4212855Sgabeblack@google.com#include "dev/etherdevice.hh"
4312855Sgabeblack@google.com#include "dev/etherint.hh"
4412855Sgabeblack@google.com#include "dev/etherpkt.hh"
4512855Sgabeblack@google.com#include "dev/i8254xGBe_defs.hh"
4612855Sgabeblack@google.com#include "dev/pcidev.hh"
4712855Sgabeblack@google.com#include "dev/pktfifo.hh"
4812855Sgabeblack@google.com#include "params/IGbE.hh"
4912855Sgabeblack@google.com#include "sim/eventq.hh"
5012855Sgabeblack@google.com
5112855Sgabeblack@google.comclass IGbEInt;
5212855Sgabeblack@google.com
5312855Sgabeblack@google.comclass IGbE : public EtherDevice
5412855Sgabeblack@google.com{
5512855Sgabeblack@google.com  private:
5612855Sgabeblack@google.com    IGbEInt *etherInt;
57
58    // device registers
59    iGbReg::Regs regs;
60
61    // eeprom data, status and control bits
62    int eeOpBits, eeAddrBits, eeDataBits;
63    uint8_t eeOpcode, eeAddr;
64    uint16_t flash[iGbReg::EEPROM_SIZE];
65
66    // The drain event if we have one
67    Event *drainEvent;
68
69    // cached parameters from params struct
70    bool useFlowControl;
71
72    // packet fifos
73    PacketFifo rxFifo;
74    PacketFifo txFifo;
75
76    // Packet that we are currently putting into the txFifo
77    EthPacketPtr txPacket;
78
79    // Should to Rx/Tx State machine tick?
80    bool rxTick;
81    bool txTick;
82    bool txFifoTick;
83
84    bool rxDmaPacket;
85
86    // Delays in managaging descriptors
87    Tick fetchDelay, wbDelay;
88    Tick fetchCompDelay, wbCompDelay;
89    Tick rxWriteDelay, txReadDelay;
90
91    // Event and function to deal with RDTR timer expiring
92    void rdtrProcess() {
93        rxDescCache.writeback(0);
94        DPRINTF(EthernetIntr, "Posting RXT interrupt because RDTR timer expired\n");
95        postInterrupt(iGbReg::IT_RXT);
96    }
97
98    //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
99    EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
100
101    // Event and function to deal with RADV timer expiring
102    void radvProcess() {
103        rxDescCache.writeback(0);
104        DPRINTF(EthernetIntr, "Posting RXT interrupt because RADV timer expired\n");
105        postInterrupt(iGbReg::IT_RXT);
106    }
107
108    //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
109    EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
110
111    // Event and function to deal with TADV timer expiring
112    void tadvProcess() {
113        txDescCache.writeback(0);
114        DPRINTF(EthernetIntr, "Posting TXDW interrupt because TADV timer expired\n");
115        postInterrupt(iGbReg::IT_TXDW);
116    }
117
118    //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
119    EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
120
121    // Event and function to deal with TIDV timer expiring
122    void tidvProcess() {
123        txDescCache.writeback(0);
124        DPRINTF(EthernetIntr, "Posting TXDW interrupt because TIDV timer expired\n");
125        postInterrupt(iGbReg::IT_TXDW);
126    }
127    //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
128    EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
129
130    // Main event to tick the device
131    void tick();
132    //friend class EventWrapper<IGbE, &IGbE::tick>;
133    EventWrapper<IGbE, &IGbE::tick> tickEvent;
134
135
136    void rxStateMachine();
137    void txStateMachine();
138    void txWire();
139
140    /** Write an interrupt into the interrupt pending register and check mask
141     * and interrupt limit timer before sending interrupt to CPU
142     * @param t the type of interrupt we are posting
143     * @param now should we ignore the interrupt limiting timer
144     */
145    void postInterrupt(iGbReg::IntTypes t, bool now = false);
146
147    /** Check and see if changes to the mask register have caused an interrupt
148     * to need to be sent or perhaps removed an interrupt cause.
149     */
150    void chkInterrupt();
151
152    /** Send an interrupt to the cpu
153     */
154    void delayIntEvent();
155    void cpuPostInt();
156    // Event to moderate interrupts
157    EventWrapper<IGbE, &IGbE::delayIntEvent> interEvent;
158
159    /** Clear the interupt line to the cpu
160     */
161    void cpuClearInt();
162
163    Tick intClock() { return Clock::Int::ns * 1024; }
164
165    /** This function is used to restart the clock so it can handle things like
166     * draining and resume in one place. */
167    void restartClock();
168
169    /** Check if all the draining things that need to occur have occured and
170     * handle the drain event if so.
171     */
172    void checkDrain();
173
174    template<class T>
175    class DescCache
176    {
177      protected:
178        virtual Addr descBase() const = 0;
179        virtual long descHead() const = 0;
180        virtual long descTail() const = 0;
181        virtual long descLen() const = 0;
182        virtual void updateHead(long h) = 0;
183        virtual void enableSm() = 0;
184        virtual void intAfterWb() const {}
185        virtual void fetchAfterWb() = 0;
186
187        std::deque<T*> usedCache;
188        std::deque<T*> unusedCache;
189
190        T *fetchBuf;
191        T *wbBuf;
192
193        // Pointer to the device we cache for
194        IGbE *igbe;
195
196        // Name of this  descriptor cache
197        std::string _name;
198
199        // How far we've cached
200        int cachePnt;
201
202        // The size of the descriptor cache
203        int size;
204
205        // How many descriptors we are currently fetching
206        int curFetching;
207
208        // How many descriptors we are currently writing back
209        int wbOut;
210
211        // if the we wrote back to the end of the descriptor ring and are going
212        // to have to wrap and write more
213        bool moreToWb;
214
215        // What the alignment is of the next descriptor writeback
216        Addr wbAlignment;
217
218       /** The packet that is currently being dmad to memory if any
219         */
220        EthPacketPtr pktPtr;
221
222      public:
223        DescCache(IGbE *i, const std::string n, int s)
224            : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
225              pktPtr(NULL), wbDelayEvent(this), fetchDelayEvent(this),
226              fetchEvent(this), wbEvent(this)
227        {
228            fetchBuf = new T[size];
229            wbBuf = new T[size];
230        }
231
232        virtual ~DescCache()
233        {
234            reset();
235        }
236
237        std::string name() { return _name; }
238
239        /** If the address/len/head change when we've got descriptors that are
240         * dirty that is very bad. This function checks that we don't and if we
241         * do panics.
242         */
243        void areaChanged()
244        {
245            if (usedCache.size() > 0 || curFetching || wbOut)
246                panic("Descriptor Address, Length or Head changed. Bad\n");
247            reset();
248
249        }
250
251        void writeback(Addr aMask)
252        {
253            if (wbOut) {
254                if (aMask < wbAlignment) {
255                    moreToWb = true;
256                    wbAlignment = aMask;
257                }
258                return;
259            }
260
261            wbAlignment = aMask;
262            if (!wbDelayEvent.scheduled())
263                wbDelayEvent.schedule(igbe->wbDelay + curTick);
264        }
265
266        void writeback1()
267        {
268            int curHead = descHead();
269            int max_to_wb = usedCache.size();
270
271            DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
272                    "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
273                    curHead, descTail(), descLen(), cachePnt, max_to_wb,
274                    descLeft());
275
276            if (max_to_wb + curHead >= descLen()) {
277                max_to_wb = descLen() - curHead;
278                moreToWb = true;
279                // this is by definition aligned correctly
280            } else if (wbAlignment != 0) {
281                // align the wb point to the mask
282                max_to_wb = max_to_wb & ~wbAlignment;
283            }
284
285            DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
286
287            if (max_to_wb <= 0 || wbOut)
288                return;
289
290            wbOut = max_to_wb;
291
292            for (int x = 0; x < wbOut; x++) {
293                assert(usedCache.size());
294                memcpy(&wbBuf[x], usedCache[0], sizeof(T));
295                delete usedCache[0];
296                usedCache.pop_front();
297            }
298
299
300            assert(wbOut);
301            igbe->dmaWrite(igbe->platform->pciToDma(descBase() + curHead * sizeof(T)),
302                    wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
303                    igbe->wbCompDelay);
304        }
305        EventWrapper<DescCache, &DescCache::writeback1> wbDelayEvent;
306
307        /** Fetch a chunk of descriptors into the descriptor cache.
308         * Calls fetchComplete when the memory system returns the data
309         */
310
311        void fetchDescriptors()
312        {
313            if (!fetchDelayEvent.scheduled())
314                fetchDelayEvent.schedule(igbe->fetchDelay + curTick);
315        }
316
317        void fetchDescriptors1()
318        {
319            size_t max_to_fetch;
320
321            if (curFetching)
322                return;
323
324            if (descTail() >= cachePnt)
325                max_to_fetch = descTail() - cachePnt;
326            else
327                max_to_fetch = descLen() - cachePnt;
328
329            size_t free_cache = size - usedCache.size() - unusedCache.size();
330
331            max_to_fetch = std::min(max_to_fetch, free_cache);
332
333            DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
334                    "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
335                    descHead(), descTail(), descLen(), cachePnt,
336                    max_to_fetch, descLeft());
337
338            // Nothing to do
339            if (max_to_fetch == 0)
340                return;
341
342            // So we don't have two descriptor fetches going on at once
343            curFetching = max_to_fetch;
344
345            DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
346                    descBase() + cachePnt * sizeof(T),
347                    igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
348                    curFetching * sizeof(T));
349            assert(curFetching);
350            igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
351                    curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
352                    igbe->fetchCompDelay);
353        }
354
355        EventWrapper<DescCache, &DescCache::fetchDescriptors1> fetchDelayEvent;
356
357        /** Called by event when dma to read descriptors is completed
358         */
359        void fetchComplete()
360        {
361            T *newDesc;
362            for (int x = 0; x < curFetching; x++) {
363                newDesc = new T;
364                memcpy(newDesc, &fetchBuf[x], sizeof(T));
365                unusedCache.push_back(newDesc);
366            }
367
368#ifndef NDEBUG
369            int oldCp = cachePnt;
370#endif
371
372            cachePnt += curFetching;
373            assert(cachePnt <= descLen());
374            if (cachePnt == descLen())
375                cachePnt = 0;
376
377            curFetching = 0;
378
379            DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
380                    oldCp, cachePnt);
381
382            enableSm();
383            igbe->checkDrain();
384        }
385
386        EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
387
388        /** Called by event when dma to writeback descriptors is completed
389         */
390        void wbComplete()
391        {
392
393            long  curHead = descHead();
394#ifndef NDEBUG
395            long oldHead = curHead;
396#endif
397
398            curHead += wbOut;
399            wbOut = 0;
400
401            if (curHead >= descLen())
402                curHead -= descLen();
403
404            // Update the head
405            updateHead(curHead);
406
407            DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
408                    oldHead, curHead);
409
410            // If we still have more to wb, call wb now
411            intAfterWb();
412            if (moreToWb) {
413                moreToWb = false;
414                DPRINTF(EthernetDesc, "Writeback has more todo\n");
415                writeback(wbAlignment);
416            }
417
418            if (!wbOut) {
419                igbe->checkDrain();
420            }
421            fetchAfterWb();
422        }
423
424
425        EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
426
427        /* Return the number of descriptors left in the ring, so the device has
428         * a way to figure out if it needs to interrupt.
429         */
430        int descLeft() const
431        {
432            int left = unusedCache.size();
433            if (cachePnt - descTail() >= 0)
434                left += (cachePnt - descTail());
435            else
436                left += (descTail() - cachePnt);
437
438            return left;
439        }
440
441        /* Return the number of descriptors used and not written back.
442         */
443        int descUsed() const { return usedCache.size(); }
444
445        /* Return the number of cache unused descriptors we have. */
446        int descUnused() const {return unusedCache.size(); }
447
448        /* Get into a state where the descriptor address/head/etc colud be
449         * changed */
450        void reset()
451        {
452            DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
453            for (int x = 0; x < usedCache.size(); x++)
454                delete usedCache[x];
455            for (int x = 0; x < unusedCache.size(); x++)
456                delete unusedCache[x];
457
458            usedCache.clear();
459            unusedCache.clear();
460
461            cachePnt = 0;
462
463        }
464
465        virtual void serialize(std::ostream &os)
466        {
467            SERIALIZE_SCALAR(cachePnt);
468            SERIALIZE_SCALAR(curFetching);
469            SERIALIZE_SCALAR(wbOut);
470            SERIALIZE_SCALAR(moreToWb);
471            SERIALIZE_SCALAR(wbAlignment);
472
473            int usedCacheSize = usedCache.size();
474            SERIALIZE_SCALAR(usedCacheSize);
475            for(int x = 0; x < usedCacheSize; x++) {
476                arrayParamOut(os, csprintf("usedCache_%d", x),
477                        (uint8_t*)usedCache[x],sizeof(T));
478            }
479
480            int unusedCacheSize = unusedCache.size();
481            SERIALIZE_SCALAR(unusedCacheSize);
482            for(int x = 0; x < unusedCacheSize; x++) {
483                arrayParamOut(os, csprintf("unusedCache_%d", x),
484                        (uint8_t*)unusedCache[x],sizeof(T));
485            }
486
487            Tick fetch_delay = 0, wb_delay = 0;
488            if (fetchDelayEvent.scheduled())
489                fetch_delay = fetchDelayEvent.when();
490            SERIALIZE_SCALAR(fetch_delay);
491            if (wbDelayEvent.scheduled())
492                wb_delay = wbDelayEvent.when();
493            SERIALIZE_SCALAR(wb_delay);
494
495
496        }
497
498        virtual void unserialize(Checkpoint *cp, const std::string &section)
499        {
500            UNSERIALIZE_SCALAR(cachePnt);
501            UNSERIALIZE_SCALAR(curFetching);
502            UNSERIALIZE_SCALAR(wbOut);
503            UNSERIALIZE_SCALAR(moreToWb);
504            UNSERIALIZE_SCALAR(wbAlignment);
505
506            int usedCacheSize;
507            UNSERIALIZE_SCALAR(usedCacheSize);
508            T *temp;
509            for(int x = 0; x < usedCacheSize; x++) {
510                temp = new T;
511                arrayParamIn(cp, section, csprintf("usedCache_%d", x),
512                        (uint8_t*)temp,sizeof(T));
513                usedCache.push_back(temp);
514            }
515
516            int unusedCacheSize;
517            UNSERIALIZE_SCALAR(unusedCacheSize);
518            for(int x = 0; x < unusedCacheSize; x++) {
519                temp = new T;
520                arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
521                        (uint8_t*)temp,sizeof(T));
522                unusedCache.push_back(temp);
523            }
524            Tick fetch_delay = 0, wb_delay = 0;
525            UNSERIALIZE_SCALAR(fetch_delay);
526            UNSERIALIZE_SCALAR(wb_delay);
527            if (fetch_delay)
528                fetchDelayEvent.schedule(fetch_delay);
529            if (wb_delay)
530                wbDelayEvent.schedule(wb_delay);
531
532
533        }
534        virtual bool hasOutstandingEvents() {
535            return wbEvent.scheduled() || fetchEvent.scheduled();
536        }
537
538     };
539
540
541    class RxDescCache : public DescCache<iGbReg::RxDesc>
542    {
543      protected:
544        virtual Addr descBase() const { return igbe->regs.rdba(); }
545        virtual long descHead() const { return igbe->regs.rdh(); }
546        virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
547        virtual long descTail() const { return igbe->regs.rdt(); }
548        virtual void updateHead(long h) { igbe->regs.rdh(h); }
549        virtual void enableSm();
550        virtual void fetchAfterWb() {
551            if (!igbe->rxTick && igbe->getState() == SimObject::Running)
552                fetchDescriptors();
553        }
554
555        bool pktDone;
556
557      public:
558        RxDescCache(IGbE *i, std::string n, int s);
559
560        /** Write the given packet into the buffer(s) pointed to by the
561         * descriptor and update the book keeping. Should only be called when
562         * there are no dma's pending.
563         * @param packet ethernet packet to write
564         * @return if the packet could be written (there was a free descriptor)
565         */
566        void writePacket(EthPacketPtr packet);
567        /** Called by event when dma to write packet is completed
568         */
569        void pktComplete();
570
571        /** Check if the dma on the packet has completed.
572         */
573
574        bool packetDone();
575
576        EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
577
578        virtual bool hasOutstandingEvents();
579
580        virtual void serialize(std::ostream &os);
581        virtual void unserialize(Checkpoint *cp, const std::string &section);
582    };
583    friend class RxDescCache;
584
585    RxDescCache rxDescCache;
586
587    class TxDescCache  : public DescCache<iGbReg::TxDesc>
588    {
589      protected:
590        virtual Addr descBase() const { return igbe->regs.tdba(); }
591        virtual long descHead() const { return igbe->regs.tdh(); }
592        virtual long descTail() const { return igbe->regs.tdt(); }
593        virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
594        virtual void updateHead(long h) { igbe->regs.tdh(h); }
595        virtual void enableSm();
596        virtual void intAfterWb() const { igbe->postInterrupt(iGbReg::IT_TXDW); }
597        virtual void fetchAfterWb() {
598            if (!igbe->txTick && igbe->getState() == SimObject::Running)
599                fetchDescriptors();
600        }
601
602        bool pktDone;
603        bool isTcp;
604        bool pktWaiting;
605        bool pktMultiDesc;
606
607      public:
608        TxDescCache(IGbE *i, std::string n, int s);
609
610        /** Tell the cache to DMA a packet from main memory into its buffer and
611         * return the size the of the packet to reserve space in tx fifo.
612         * @return size of the packet
613         */
614        int getPacketSize();
615        void getPacketData(EthPacketPtr p);
616
617        /** Ask if the packet has been transfered so the state machine can give
618         * it to the fifo.
619         * @return packet available in descriptor cache
620         */
621        bool packetAvailable();
622
623        /** Ask if we are still waiting for the packet to be transfered.
624         * @return packet still in transit.
625         */
626        bool packetWaiting() { return pktWaiting; }
627
628        /** Ask if this packet is composed of multiple descriptors
629         * so even if we've got data, we need to wait for more before
630         * we can send it out.
631         * @return packet can't be sent out because it's a multi-descriptor
632         * packet
633         */
634        bool packetMultiDesc() { return pktMultiDesc;}
635
636        /** Called by event when dma to write packet is completed
637         */
638        void pktComplete();
639        EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
640
641        virtual bool hasOutstandingEvents();
642
643        virtual void serialize(std::ostream &os);
644        virtual void unserialize(Checkpoint *cp, const std::string &section);
645
646    };
647    friend class TxDescCache;
648
649    TxDescCache txDescCache;
650
651  public:
652    typedef IGbEParams Params;
653    const Params *
654    params() const
655    {
656        return dynamic_cast<const Params *>(_params);
657    }
658    IGbE(const Params *params);
659    ~IGbE() {}
660
661    virtual EtherInt *getEthPort(const std::string &if_name, int idx);
662
663    Tick clock;
664    Tick lastInterrupt;
665    inline Tick ticks(int numCycles) const { return numCycles * clock; }
666
667    virtual Tick read(PacketPtr pkt);
668    virtual Tick write(PacketPtr pkt);
669
670    virtual Tick writeConfig(PacketPtr pkt);
671
672    bool ethRxPkt(EthPacketPtr packet);
673    void ethTxDone();
674
675    virtual void serialize(std::ostream &os);
676    virtual void unserialize(Checkpoint *cp, const std::string &section);
677    virtual unsigned int drain(Event *de);
678    virtual void resume();
679
680};
681
682class IGbEInt : public EtherInt
683{
684  private:
685    IGbE *dev;
686
687  public:
688    IGbEInt(const std::string &name, IGbE *d)
689        : EtherInt(name), dev(d)
690    { }
691
692    virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
693    virtual void sendDone() { dev->ethTxDone(); }
694};
695
696
697
698
699
700#endif //__DEV_I8254XGBE_HH__
701
702