i8254xGBe.hh revision 5783
1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31/* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 */
34
35#ifndef __DEV_I8254XGBE_HH__
36#define __DEV_I8254XGBE_HH__
37
38#include <deque>
39#include <string>
40
41#include "base/inet.hh"
42#include "dev/etherdevice.hh"
43#include "dev/etherint.hh"
44#include "dev/etherpkt.hh"
45#include "dev/i8254xGBe_defs.hh"
46#include "dev/pcidev.hh"
47#include "dev/pktfifo.hh"
48#include "params/IGbE.hh"
49#include "sim/eventq.hh"
50
51class IGbEInt;
52
53class IGbE : public EtherDevice
54{
55  private:
56    IGbEInt *etherInt;
57
58    // device registers
59    iGbReg::Regs regs;
60
61    // eeprom data, status and control bits
62    int eeOpBits, eeAddrBits, eeDataBits;
63    uint8_t eeOpcode, eeAddr;
64    uint16_t flash[iGbReg::EEPROM_SIZE];
65
66    // The drain event if we have one
67    Event *drainEvent;
68
69    // cached parameters from params struct
70    bool useFlowControl;
71
72    // packet fifos
73    PacketFifo rxFifo;
74    PacketFifo txFifo;
75
76    // Packet that we are currently putting into the txFifo
77    EthPacketPtr txPacket;
78
79    // Should to Rx/Tx State machine tick?
80    bool rxTick;
81    bool txTick;
82    bool txFifoTick;
83
84    bool rxDmaPacket;
85
86    // Number of bytes copied from current RX packet
87    int pktOffset;
88
89    // Delays in managaging descriptors
90    Tick fetchDelay, wbDelay;
91    Tick fetchCompDelay, wbCompDelay;
92    Tick rxWriteDelay, txReadDelay;
93
94    // Event and function to deal with RDTR timer expiring
95    void rdtrProcess() {
96        rxDescCache.writeback(0);
97        DPRINTF(EthernetIntr, "Posting RXT interrupt because RDTR timer expired\n");
98        postInterrupt(iGbReg::IT_RXT);
99    }
100
101    //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
102    EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
103
104    // Event and function to deal with RADV timer expiring
105    void radvProcess() {
106        rxDescCache.writeback(0);
107        DPRINTF(EthernetIntr, "Posting RXT interrupt because RADV timer expired\n");
108        postInterrupt(iGbReg::IT_RXT);
109    }
110
111    //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
112    EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
113
114    // Event and function to deal with TADV timer expiring
115    void tadvProcess() {
116        txDescCache.writeback(0);
117        DPRINTF(EthernetIntr, "Posting TXDW interrupt because TADV timer expired\n");
118        postInterrupt(iGbReg::IT_TXDW);
119    }
120
121    //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
122    EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
123
124    // Event and function to deal with TIDV timer expiring
125    void tidvProcess() {
126        txDescCache.writeback(0);
127        DPRINTF(EthernetIntr, "Posting TXDW interrupt because TIDV timer expired\n");
128        postInterrupt(iGbReg::IT_TXDW);
129    }
130    //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
131    EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
132
133    // Main event to tick the device
134    void tick();
135    //friend class EventWrapper<IGbE, &IGbE::tick>;
136    EventWrapper<IGbE, &IGbE::tick> tickEvent;
137
138
139    uint64_t macAddr;
140
141    void rxStateMachine();
142    void txStateMachine();
143    void txWire();
144
145    /** Write an interrupt into the interrupt pending register and check mask
146     * and interrupt limit timer before sending interrupt to CPU
147     * @param t the type of interrupt we are posting
148     * @param now should we ignore the interrupt limiting timer
149     */
150    void postInterrupt(iGbReg::IntTypes t, bool now = false);
151
152    /** Check and see if changes to the mask register have caused an interrupt
153     * to need to be sent or perhaps removed an interrupt cause.
154     */
155    void chkInterrupt();
156
157    /** Send an interrupt to the cpu
158     */
159    void delayIntEvent();
160    void cpuPostInt();
161    // Event to moderate interrupts
162    EventWrapper<IGbE, &IGbE::delayIntEvent> interEvent;
163
164    /** Clear the interupt line to the cpu
165     */
166    void cpuClearInt();
167
168    Tick intClock() { return Clock::Int::ns * 1024; }
169
170    /** This function is used to restart the clock so it can handle things like
171     * draining and resume in one place. */
172    void restartClock();
173
174    /** Check if all the draining things that need to occur have occured and
175     * handle the drain event if so.
176     */
177    void checkDrain();
178
179    template<class T>
180    class DescCache
181    {
182      protected:
183        virtual Addr descBase() const = 0;
184        virtual long descHead() const = 0;
185        virtual long descTail() const = 0;
186        virtual long descLen() const = 0;
187        virtual void updateHead(long h) = 0;
188        virtual void enableSm() = 0;
189        virtual void actionAfterWb() {}
190        virtual void fetchAfterWb() = 0;
191
192        std::deque<T*> usedCache;
193        std::deque<T*> unusedCache;
194
195        T *fetchBuf;
196        T *wbBuf;
197
198        // Pointer to the device we cache for
199        IGbE *igbe;
200
201        // Name of this  descriptor cache
202        std::string _name;
203
204        // How far we've cached
205        int cachePnt;
206
207        // The size of the descriptor cache
208        int size;
209
210        // How many descriptors we are currently fetching
211        int curFetching;
212
213        // How many descriptors we are currently writing back
214        int wbOut;
215
216        // if the we wrote back to the end of the descriptor ring and are going
217        // to have to wrap and write more
218        bool moreToWb;
219
220        // What the alignment is of the next descriptor writeback
221        Addr wbAlignment;
222
223       /** The packet that is currently being dmad to memory if any
224         */
225        EthPacketPtr pktPtr;
226
227      public:
228        DescCache(IGbE *i, const std::string n, int s)
229            : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
230              pktPtr(NULL), wbDelayEvent(this), fetchDelayEvent(this),
231              fetchEvent(this), wbEvent(this)
232        {
233            fetchBuf = new T[size];
234            wbBuf = new T[size];
235        }
236
237        virtual ~DescCache()
238        {
239            reset();
240        }
241
242        std::string name() { return _name; }
243
244        /** If the address/len/head change when we've got descriptors that are
245         * dirty that is very bad. This function checks that we don't and if we
246         * do panics.
247         */
248        void areaChanged()
249        {
250            if (usedCache.size() > 0 || curFetching || wbOut)
251                panic("Descriptor Address, Length or Head changed. Bad\n");
252            reset();
253
254        }
255
256        void writeback(Addr aMask)
257        {
258            int curHead = descHead();
259            int max_to_wb = usedCache.size();
260
261            // Check if this writeback is less restrictive that the previous
262            // and if so setup another one immediately following it
263            if (wbOut) {
264                if (aMask < wbAlignment) {
265                    moreToWb = true;
266                    wbAlignment = aMask;
267                }
268                DPRINTF(EthernetDesc, "Writing back already in process, returning\n");
269                return;
270            }
271
272            moreToWb = false;
273            wbAlignment = aMask;
274
275
276            DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
277                    "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
278                    curHead, descTail(), descLen(), cachePnt, max_to_wb,
279                    descLeft());
280
281            if (max_to_wb + curHead >= descLen()) {
282                max_to_wb = descLen() - curHead;
283                moreToWb = true;
284                // this is by definition aligned correctly
285            } else if (wbAlignment != 0) {
286                // align the wb point to the mask
287                max_to_wb = max_to_wb & ~wbAlignment;
288            }
289
290            DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
291
292            if (max_to_wb <= 0) {
293                return;
294            }
295
296            wbOut = max_to_wb;
297
298            assert(!wbDelayEvent.scheduled());
299            igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
300        }
301
302        void writeback1()
303        {
304            // If we're draining delay issuing this DMA
305            if (igbe->drainEvent) {
306                igbe->schedule(wbDelayEvent, curTick + igbe->wbDelay);
307                return;
308            }
309
310            DPRINTF(EthernetDesc, "Beining DMA of %d descriptors\n", wbOut);
311
312            for (int x = 0; x < wbOut; x++) {
313                assert(usedCache.size());
314                memcpy(&wbBuf[x], usedCache[x], sizeof(T));
315                 //delete usedCache[0];
316                //usedCache.pop_front();
317            }
318
319            assert(wbOut);
320            igbe->dmaWrite(igbe->platform->pciToDma(descBase() + descHead() * sizeof(T)),
321                    wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
322                    igbe->wbCompDelay);
323        }
324        EventWrapper<DescCache, &DescCache::writeback1> wbDelayEvent;
325
326        /** Fetch a chunk of descriptors into the descriptor cache.
327         * Calls fetchComplete when the memory system returns the data
328         */
329
330        void fetchDescriptors()
331        {
332            size_t max_to_fetch;
333
334            if (curFetching) {
335                DPRINTF(EthernetDesc, "Currently fetching %d descriptors, returning\n", curFetching);
336                return;
337            }
338
339            if (descTail() >= cachePnt)
340                max_to_fetch = descTail() - cachePnt;
341            else
342                max_to_fetch = descLen() - cachePnt;
343
344            size_t free_cache = size - usedCache.size() - unusedCache.size();
345
346            max_to_fetch = std::min(max_to_fetch, free_cache);
347
348
349            DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
350                    "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
351                    descHead(), descTail(), descLen(), cachePnt,
352                    max_to_fetch, descLeft());
353
354            // Nothing to do
355            if (max_to_fetch == 0)
356                return;
357
358            // So we don't have two descriptor fetches going on at once
359            curFetching = max_to_fetch;
360
361            assert(!fetchDelayEvent.scheduled());
362            igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
363        }
364
365        void fetchDescriptors1()
366        {
367            // If we're draining delay issuing this DMA
368            if (igbe->drainEvent) {
369                igbe->schedule(fetchDelayEvent, curTick + igbe->fetchDelay);
370                return;
371            }
372
373            DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
374                    descBase() + cachePnt * sizeof(T),
375                    igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
376                    curFetching * sizeof(T));
377            assert(curFetching);
378            igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
379                    curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
380                    igbe->fetchCompDelay);
381        }
382
383        EventWrapper<DescCache, &DescCache::fetchDescriptors1> fetchDelayEvent;
384
385        /** Called by event when dma to read descriptors is completed
386         */
387        void fetchComplete()
388        {
389            T *newDesc;
390            for (int x = 0; x < curFetching; x++) {
391                newDesc = new T;
392                memcpy(newDesc, &fetchBuf[x], sizeof(T));
393                unusedCache.push_back(newDesc);
394            }
395
396
397#ifndef NDEBUG
398            int oldCp = cachePnt;
399#endif
400
401            cachePnt += curFetching;
402            assert(cachePnt <= descLen());
403            if (cachePnt == descLen())
404                cachePnt = 0;
405
406            curFetching = 0;
407
408            DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
409                    oldCp, cachePnt);
410
411            enableSm();
412            igbe->checkDrain();
413        }
414
415        EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
416
417        /** Called by event when dma to writeback descriptors is completed
418         */
419        void wbComplete()
420        {
421
422            long  curHead = descHead();
423#ifndef NDEBUG
424            long oldHead = curHead;
425#endif
426
427            for (int x = 0; x < wbOut; x++) {
428                assert(usedCache.size());
429                delete usedCache[0];
430                usedCache.pop_front();
431            }
432
433            curHead += wbOut;
434            wbOut = 0;
435
436            if (curHead >= descLen())
437                curHead -= descLen();
438
439            // Update the head
440            updateHead(curHead);
441
442            DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
443                    oldHead, curHead);
444
445            // If we still have more to wb, call wb now
446            actionAfterWb();
447            if (moreToWb) {
448                moreToWb = false;
449                DPRINTF(EthernetDesc, "Writeback has more todo\n");
450                writeback(wbAlignment);
451            }
452
453            if (!wbOut) {
454                igbe->checkDrain();
455            }
456            fetchAfterWb();
457        }
458
459
460        EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
461
462        /* Return the number of descriptors left in the ring, so the device has
463         * a way to figure out if it needs to interrupt.
464         */
465        int descLeft() const
466        {
467            int left = unusedCache.size();
468            if (cachePnt >= descTail())
469                left += (descLen() - cachePnt + descTail());
470            else
471                left += (descTail() - cachePnt);
472
473            return left;
474        }
475
476        /* Return the number of descriptors used and not written back.
477         */
478        int descUsed() const { return usedCache.size(); }
479
480        /* Return the number of cache unused descriptors we have. */
481        int descUnused() const {return unusedCache.size(); }
482
483        /* Get into a state where the descriptor address/head/etc colud be
484         * changed */
485        void reset()
486        {
487            DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
488            for (int x = 0; x < usedCache.size(); x++)
489                delete usedCache[x];
490            for (int x = 0; x < unusedCache.size(); x++)
491                delete unusedCache[x];
492
493            usedCache.clear();
494            unusedCache.clear();
495
496            cachePnt = 0;
497
498        }
499
500        virtual void serialize(std::ostream &os)
501        {
502            SERIALIZE_SCALAR(cachePnt);
503            SERIALIZE_SCALAR(curFetching);
504            SERIALIZE_SCALAR(wbOut);
505            SERIALIZE_SCALAR(moreToWb);
506            SERIALIZE_SCALAR(wbAlignment);
507
508            int usedCacheSize = usedCache.size();
509            SERIALIZE_SCALAR(usedCacheSize);
510            for(int x = 0; x < usedCacheSize; x++) {
511                arrayParamOut(os, csprintf("usedCache_%d", x),
512                        (uint8_t*)usedCache[x],sizeof(T));
513            }
514
515            int unusedCacheSize = unusedCache.size();
516            SERIALIZE_SCALAR(unusedCacheSize);
517            for(int x = 0; x < unusedCacheSize; x++) {
518                arrayParamOut(os, csprintf("unusedCache_%d", x),
519                        (uint8_t*)unusedCache[x],sizeof(T));
520            }
521
522            Tick fetch_delay = 0, wb_delay = 0;
523            if (fetchDelayEvent.scheduled())
524                fetch_delay = fetchDelayEvent.when();
525            SERIALIZE_SCALAR(fetch_delay);
526            if (wbDelayEvent.scheduled())
527                wb_delay = wbDelayEvent.when();
528            SERIALIZE_SCALAR(wb_delay);
529
530
531        }
532
533        virtual void unserialize(Checkpoint *cp, const std::string &section)
534        {
535            UNSERIALIZE_SCALAR(cachePnt);
536            UNSERIALIZE_SCALAR(curFetching);
537            UNSERIALIZE_SCALAR(wbOut);
538            UNSERIALIZE_SCALAR(moreToWb);
539            UNSERIALIZE_SCALAR(wbAlignment);
540
541            int usedCacheSize;
542            UNSERIALIZE_SCALAR(usedCacheSize);
543            T *temp;
544            for(int x = 0; x < usedCacheSize; x++) {
545                temp = new T;
546                arrayParamIn(cp, section, csprintf("usedCache_%d", x),
547                        (uint8_t*)temp,sizeof(T));
548                usedCache.push_back(temp);
549            }
550
551            int unusedCacheSize;
552            UNSERIALIZE_SCALAR(unusedCacheSize);
553            for(int x = 0; x < unusedCacheSize; x++) {
554                temp = new T;
555                arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
556                        (uint8_t*)temp,sizeof(T));
557                unusedCache.push_back(temp);
558            }
559            Tick fetch_delay = 0, wb_delay = 0;
560            UNSERIALIZE_SCALAR(fetch_delay);
561            UNSERIALIZE_SCALAR(wb_delay);
562            if (fetch_delay)
563                igbe->schedule(fetchDelayEvent, fetch_delay);
564            if (wb_delay)
565                igbe->schedule(wbDelayEvent, wb_delay);
566
567
568        }
569        virtual bool hasOutstandingEvents() {
570            return wbEvent.scheduled() || fetchEvent.scheduled();
571        }
572
573     };
574
575
576    class RxDescCache : public DescCache<iGbReg::RxDesc>
577    {
578      protected:
579        virtual Addr descBase() const { return igbe->regs.rdba(); }
580        virtual long descHead() const { return igbe->regs.rdh(); }
581        virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
582        virtual long descTail() const { return igbe->regs.rdt(); }
583        virtual void updateHead(long h) { igbe->regs.rdh(h); }
584        virtual void enableSm();
585        virtual void fetchAfterWb() {
586            if (!igbe->rxTick && igbe->getState() == SimObject::Running)
587                fetchDescriptors();
588        }
589
590        bool pktDone;
591
592        /** Variable to head with header/data completion events */
593        int splitCount;
594
595        /** Bytes of packet that have been copied, so we know when to set EOP */
596        int bytesCopied;
597
598      public:
599        RxDescCache(IGbE *i, std::string n, int s);
600
601        /** Write the given packet into the buffer(s) pointed to by the
602         * descriptor and update the book keeping. Should only be called when
603         * there are no dma's pending.
604         * @param packet ethernet packet to write
605         * @param pkt_offset bytes already copied from the packet to memory
606         * @return pkt_offset + number of bytes copied during this call
607         */
608        int writePacket(EthPacketPtr packet, int pkt_offset);
609
610        /** Called by event when dma to write packet is completed
611         */
612        void pktComplete();
613
614        /** Check if the dma on the packet has completed and RX state machine
615         * can continue
616         */
617        bool packetDone();
618
619        EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
620
621        // Event to handle issuing header and data write at the same time
622        // and only callking pktComplete() when both are completed
623        void pktSplitDone();
624        EventWrapper<RxDescCache, &RxDescCache::pktSplitDone> pktHdrEvent;
625        EventWrapper<RxDescCache, &RxDescCache::pktSplitDone> pktDataEvent;
626
627        virtual bool hasOutstandingEvents();
628
629        virtual void serialize(std::ostream &os);
630        virtual void unserialize(Checkpoint *cp, const std::string &section);
631    };
632    friend class RxDescCache;
633
634    RxDescCache rxDescCache;
635
636    class TxDescCache  : public DescCache<iGbReg::TxDesc>
637    {
638      protected:
639        virtual Addr descBase() const { return igbe->regs.tdba(); }
640        virtual long descHead() const { return igbe->regs.tdh(); }
641        virtual long descTail() const { return igbe->regs.tdt(); }
642        virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
643        virtual void updateHead(long h) { igbe->regs.tdh(h); }
644        virtual void enableSm();
645        virtual void actionAfterWb();
646        virtual void fetchAfterWb() {
647            if (!igbe->txTick && igbe->getState() == SimObject::Running)
648                fetchDescriptors();
649        }
650
651
652
653        bool pktDone;
654        bool isTcp;
655        bool pktWaiting;
656        bool pktMultiDesc;
657        Addr completionAddress;
658        bool completionEnabled;
659        uint32_t descEnd;
660
661
662        // tso variables
663        bool useTso;
664        Addr tsoHeaderLen;
665        Addr tsoMss;
666        Addr tsoTotalLen;
667        Addr tsoUsedLen;
668        Addr tsoPrevSeq;;
669        Addr tsoPktPayloadBytes;
670        bool tsoLoadedHeader;
671        bool tsoPktHasHeader;
672        uint8_t tsoHeader[256];
673        Addr tsoDescBytesUsed;
674        Addr tsoCopyBytes;
675        int tsoPkts;
676
677      public:
678        TxDescCache(IGbE *i, std::string n, int s);
679
680        /** Tell the cache to DMA a packet from main memory into its buffer and
681         * return the size the of the packet to reserve space in tx fifo.
682         * @return size of the packet
683         */
684        int getPacketSize(EthPacketPtr p);
685        void getPacketData(EthPacketPtr p);
686        void processContextDesc();
687
688        /** Return the number of dsecriptors in a cache block for threshold
689         * operations.
690         */
691        int descInBlock(int num_desc) { return num_desc /
692            igbe->cacheBlockSize() / sizeof(iGbReg::TxDesc); }
693        /** Ask if the packet has been transfered so the state machine can give
694         * it to the fifo.
695         * @return packet available in descriptor cache
696         */
697        bool packetAvailable();
698
699        /** Ask if we are still waiting for the packet to be transfered.
700         * @return packet still in transit.
701         */
702        bool packetWaiting() { return pktWaiting; }
703
704        /** Ask if this packet is composed of multiple descriptors
705         * so even if we've got data, we need to wait for more before
706         * we can send it out.
707         * @return packet can't be sent out because it's a multi-descriptor
708         * packet
709         */
710        bool packetMultiDesc() { return pktMultiDesc;}
711
712        /** Called by event when dma to write packet is completed
713         */
714        void pktComplete();
715        EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
716
717        void headerComplete();
718        EventWrapper<TxDescCache, &TxDescCache::headerComplete> headerEvent;
719
720
721        void completionWriteback(Addr a, bool enabled) {
722            DPRINTF(EthernetDesc, "Completion writeback Addr: %#x enabled: %d\n",
723                    a, enabled);
724            completionAddress = a;
725            completionEnabled = enabled;
726        }
727
728        virtual bool hasOutstandingEvents();
729
730        void nullCallback() { DPRINTF(EthernetDesc, "Completion writeback complete\n"); }
731        EventWrapper<TxDescCache, &TxDescCache::nullCallback> nullEvent;
732
733        virtual void serialize(std::ostream &os);
734        virtual void unserialize(Checkpoint *cp, const std::string &section);
735
736    };
737    friend class TxDescCache;
738
739    TxDescCache txDescCache;
740
741  public:
742    typedef IGbEParams Params;
743    const Params *
744    params() const
745    {
746        return dynamic_cast<const Params *>(_params);
747    }
748    IGbE(const Params *params);
749    ~IGbE() {}
750
751    virtual EtherInt *getEthPort(const std::string &if_name, int idx);
752
753    Tick clock;
754    Tick lastInterrupt;
755    inline Tick ticks(int numCycles) const { return numCycles * clock; }
756
757    virtual Tick read(PacketPtr pkt);
758    virtual Tick write(PacketPtr pkt);
759
760    virtual Tick writeConfig(PacketPtr pkt);
761
762    bool ethRxPkt(EthPacketPtr packet);
763    void ethTxDone();
764
765    virtual void serialize(std::ostream &os);
766    virtual void unserialize(Checkpoint *cp, const std::string &section);
767    virtual unsigned int drain(Event *de);
768    virtual void resume();
769
770};
771
772class IGbEInt : public EtherInt
773{
774  private:
775    IGbE *dev;
776
777  public:
778    IGbEInt(const std::string &name, IGbE *d)
779        : EtherInt(name), dev(d)
780    { }
781
782    virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
783    virtual void sendDone() { dev->ethTxDone(); }
784};
785
786
787
788
789
790#endif //__DEV_I8254XGBE_HH__
791
792