i8254xGBe.hh revision 4981
1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31/* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 */
34
35#ifndef __DEV_I8254XGBE_HH__
36#define __DEV_I8254XGBE_HH__
37
38#include <deque>
39#include <string>
40
41#include "base/inet.hh"
42#include "base/statistics.hh"
43#include "dev/etherdevice.hh"
44#include "dev/etherint.hh"
45#include "dev/etherpkt.hh"
46#include "dev/i8254xGBe_defs.hh"
47#include "dev/pcidev.hh"
48#include "dev/pktfifo.hh"
49#include "params/IGbE.hh"
50#include "sim/eventq.hh"
51
52class IGbEInt;
53
54class IGbE : public EtherDevice
55{
56  private:
57    IGbEInt *etherInt;
58
59    // device registers
60    iGbReg::Regs regs;
61
62    // eeprom data, status and control bits
63    int eeOpBits, eeAddrBits, eeDataBits;
64    uint8_t eeOpcode, eeAddr;
65    uint16_t flash[iGbReg::EEPROM_SIZE];
66
67    // The drain event if we have one
68    Event *drainEvent;
69
70    // cached parameters from params struct
71    bool useFlowControl;
72
73    // packet fifos
74    PacketFifo rxFifo;
75    PacketFifo txFifo;
76
77    // Packet that we are currently putting into the txFifo
78    EthPacketPtr txPacket;
79
80    // Should to Rx/Tx State machine tick?
81    bool rxTick;
82    bool txTick;
83    bool txFifoTick;
84
85    bool rxDmaPacket;
86
87    // Event and function to deal with RDTR timer expiring
88    void rdtrProcess() {
89        rxDescCache.writeback(0);
90        DPRINTF(EthernetIntr, "Posting RXT interrupt because RDTR timer expired\n");
91        postInterrupt(iGbReg::IT_RXT, true);
92    }
93
94    //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
95    EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
96
97    // Event and function to deal with RADV timer expiring
98    void radvProcess() {
99        rxDescCache.writeback(0);
100        DPRINTF(EthernetIntr, "Posting RXT interrupt because RADV timer expired\n");
101        postInterrupt(iGbReg::IT_RXT, true);
102    }
103
104    //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
105    EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
106
107    // Event and function to deal with TADV timer expiring
108    void tadvProcess() {
109        txDescCache.writeback(0);
110        DPRINTF(EthernetIntr, "Posting TXDW interrupt because TADV timer expired\n");
111        postInterrupt(iGbReg::IT_TXDW, true);
112    }
113
114    //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
115    EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
116
117    // Event and function to deal with TIDV timer expiring
118    void tidvProcess() {
119        txDescCache.writeback(0);
120        DPRINTF(EthernetIntr, "Posting TXDW interrupt because TIDV timer expired\n");
121        postInterrupt(iGbReg::IT_TXDW, true);
122    }
123    //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
124    EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
125
126    // Main event to tick the device
127    void tick();
128    //friend class EventWrapper<IGbE, &IGbE::tick>;
129    EventWrapper<IGbE, &IGbE::tick> tickEvent;
130
131
132    void rxStateMachine();
133    void txStateMachine();
134    void txWire();
135
136    /** Write an interrupt into the interrupt pending register and check mask
137     * and interrupt limit timer before sending interrupt to CPU
138     * @param t the type of interrupt we are posting
139     * @param now should we ignore the interrupt limiting timer
140     */
141    void postInterrupt(iGbReg::IntTypes t, bool now = false);
142
143    /** Check and see if changes to the mask register have caused an interrupt
144     * to need to be sent or perhaps removed an interrupt cause.
145     */
146    void chkInterrupt();
147
148    /** Send an interrupt to the cpu
149     */
150    void cpuPostInt();
151    // Event to moderate interrupts
152    EventWrapper<IGbE, &IGbE::cpuPostInt> interEvent;
153
154    /** Clear the interupt line to the cpu
155     */
156    void cpuClearInt();
157
158    Tick intClock() { return Clock::Int::ns * 1024; }
159
160    /** This function is used to restart the clock so it can handle things like
161     * draining and resume in one place. */
162    void restartClock();
163
164    /** Check if all the draining things that need to occur have occured and
165     * handle the drain event if so.
166     */
167    void checkDrain();
168
169    template<class T>
170    class DescCache
171    {
172      protected:
173        virtual Addr descBase() const = 0;
174        virtual long descHead() const = 0;
175        virtual long descTail() const = 0;
176        virtual long descLen() const = 0;
177        virtual void updateHead(long h) = 0;
178        virtual void enableSm() = 0;
179        virtual void intAfterWb() const {}
180
181        std::deque<T*> usedCache;
182        std::deque<T*> unusedCache;
183
184        T *fetchBuf;
185        T *wbBuf;
186
187        // Pointer to the device we cache for
188        IGbE *igbe;
189
190        // Name of this  descriptor cache
191        std::string _name;
192
193        // How far we've cached
194        int cachePnt;
195
196        // The size of the descriptor cache
197        int size;
198
199        // How many descriptors we are currently fetching
200        int curFetching;
201
202        // How many descriptors we are currently writing back
203        int wbOut;
204
205        // if the we wrote back to the end of the descriptor ring and are going
206        // to have to wrap and write more
207        bool moreToWb;
208
209        // What the alignment is of the next descriptor writeback
210        Addr wbAlignment;
211
212       /** The packet that is currently being dmad to memory if any
213         */
214        EthPacketPtr pktPtr;
215
216      public:
217        DescCache(IGbE *i, const std::string n, int s)
218            : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
219              pktPtr(NULL), fetchEvent(this), wbEvent(this)
220        {
221            fetchBuf = new T[size];
222            wbBuf = new T[size];
223        }
224
225        virtual ~DescCache()
226        {
227            reset();
228        }
229
230        std::string name() { return _name; }
231
232        /** If the address/len/head change when we've got descriptors that are
233         * dirty that is very bad. This function checks that we don't and if we
234         * do panics.
235         */
236        void areaChanged()
237        {
238            if (usedCache.size() > 0 || curFetching || wbOut)
239                panic("Descriptor Address, Length or Head changed. Bad\n");
240            reset();
241
242        }
243
244        void writeback(Addr aMask)
245        {
246            int curHead = descHead();
247            int max_to_wb = usedCache.size();
248
249            DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
250                    "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
251                    curHead, descTail(), descLen(), cachePnt, max_to_wb,
252                    descLeft());
253
254            // Check if this writeback is less restrictive that the previous
255            // and if so setup another one immediately following it
256            if (wbOut && (aMask < wbAlignment)) {
257                moreToWb = true;
258                wbAlignment = aMask;
259                DPRINTF(EthernetDesc, "Writing back already in process, returning\n");
260                return;
261            }
262
263
264            moreToWb = false;
265            wbAlignment = aMask;
266
267            if (max_to_wb + curHead >= descLen()) {
268                max_to_wb = descLen() - curHead;
269                moreToWb = true;
270                // this is by definition aligned correctly
271            } else if (aMask != 0) {
272                // align the wb point to the mask
273                max_to_wb = max_to_wb & ~aMask;
274            }
275
276            DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
277
278            if (max_to_wb <= 0 || wbOut)
279                return;
280
281            wbOut = max_to_wb;
282
283            for (int x = 0; x < wbOut; x++)
284                memcpy(&wbBuf[x], usedCache[x], sizeof(T));
285
286            for (int x = 0; x < wbOut; x++) {
287                assert(usedCache.size());
288                delete usedCache[0];
289                usedCache.pop_front();
290            };
291
292
293            assert(wbOut);
294            igbe->dmaWrite(igbe->platform->pciToDma(descBase() + curHead * sizeof(T)),
295                    wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf);
296        }
297
298        /** Fetch a chunk of descriptors into the descriptor cache.
299         * Calls fetchComplete when the memory system returns the data
300         */
301        void fetchDescriptors()
302        {
303            size_t max_to_fetch;
304
305            if (descTail() >= cachePnt)
306                max_to_fetch = descTail() - cachePnt;
307            else
308                max_to_fetch = descLen() - cachePnt;
309
310
311            max_to_fetch = std::min(max_to_fetch, (size - usedCache.size() -
312                        unusedCache.size()));
313
314            DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
315                    "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
316                    descHead(), descTail(), descLen(), cachePnt,
317                    max_to_fetch, descLeft());
318
319            // Nothing to do
320            if (max_to_fetch == 0 || curFetching)
321                return;
322
323            // So we don't have two descriptor fetches going on at once
324            curFetching = max_to_fetch;
325
326            DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
327                    descBase() + cachePnt * sizeof(T),
328                    igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
329                    curFetching * sizeof(T));
330
331            assert(curFetching);
332            igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
333                    curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf);
334        }
335
336
337        /** Called by event when dma to read descriptors is completed
338         */
339        void fetchComplete()
340        {
341            T *newDesc;
342            for (int x = 0; x < curFetching; x++) {
343                newDesc = new T;
344                memcpy(newDesc, &fetchBuf[x], sizeof(T));
345                unusedCache.push_back(newDesc);
346            }
347
348#ifndef NDEBUG
349            int oldCp = cachePnt;
350#endif
351
352            cachePnt += curFetching;
353            assert(cachePnt <= descLen());
354            if (cachePnt == descLen())
355                cachePnt = 0;
356
357            curFetching = 0;
358
359            DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
360                    oldCp, cachePnt);
361
362            enableSm();
363            igbe->checkDrain();
364        }
365
366        EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
367
368        /** Called by event when dma to writeback descriptors is completed
369         */
370        void wbComplete()
371        {
372            long  curHead = descHead();
373#ifndef NDEBUG
374            long oldHead = curHead;
375#endif
376
377            curHead += wbOut;
378            wbOut = 0;
379
380            if (curHead >= descLen())
381                curHead -= descLen();
382
383            // Update the head
384            updateHead(curHead);
385
386            DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
387                    oldHead, curHead);
388
389            // If we still have more to wb, call wb now
390            if (moreToWb) {
391                DPRINTF(EthernetDesc, "Writeback has more todo\n");
392                writeback(wbAlignment);
393            }
394            intAfterWb();
395            igbe->checkDrain();
396        }
397
398
399        EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
400
401        /* Return the number of descriptors left in the ring, so the device has
402         * a way to figure out if it needs to interrupt.
403         */
404        int descLeft() const
405        {
406            int left = unusedCache.size();
407            if (cachePnt - descTail() >= 0)
408                left += (cachePnt - descTail());
409            else
410                left += (descTail() - cachePnt);
411
412            return left;
413        }
414
415        /* Return the number of descriptors used and not written back.
416         */
417        int descUsed() const { return usedCache.size(); }
418
419        /* Return the number of cache unused descriptors we have. */
420        int descUnused() const {return unusedCache.size(); }
421
422        /* Get into a state where the descriptor address/head/etc colud be
423         * changed */
424        void reset()
425        {
426            DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
427            for (int x = 0; x < usedCache.size(); x++)
428                delete usedCache[x];
429            for (int x = 0; x < unusedCache.size(); x++)
430                delete unusedCache[x];
431
432            usedCache.clear();
433            unusedCache.clear();
434
435            cachePnt = 0;
436
437        }
438
439        virtual void serialize(std::ostream &os)
440        {
441            SERIALIZE_SCALAR(cachePnt);
442            SERIALIZE_SCALAR(curFetching);
443            SERIALIZE_SCALAR(wbOut);
444            SERIALIZE_SCALAR(moreToWb);
445            SERIALIZE_SCALAR(wbAlignment);
446
447            int usedCacheSize = usedCache.size();
448            SERIALIZE_SCALAR(usedCacheSize);
449            for(int x = 0; x < usedCacheSize; x++) {
450                arrayParamOut(os, csprintf("usedCache_%d", x),
451                        (uint8_t*)usedCache[x],sizeof(T));
452            }
453
454            int unusedCacheSize = unusedCache.size();
455            SERIALIZE_SCALAR(unusedCacheSize);
456            for(int x = 0; x < unusedCacheSize; x++) {
457                arrayParamOut(os, csprintf("unusedCache_%d", x),
458                        (uint8_t*)unusedCache[x],sizeof(T));
459            }
460        }
461
462        virtual void unserialize(Checkpoint *cp, const std::string &section)
463        {
464            UNSERIALIZE_SCALAR(cachePnt);
465            UNSERIALIZE_SCALAR(curFetching);
466            UNSERIALIZE_SCALAR(wbOut);
467            UNSERIALIZE_SCALAR(moreToWb);
468            UNSERIALIZE_SCALAR(wbAlignment);
469
470            int usedCacheSize;
471            UNSERIALIZE_SCALAR(usedCacheSize);
472            T *temp;
473            for(int x = 0; x < usedCacheSize; x++) {
474                temp = new T;
475                arrayParamIn(cp, section, csprintf("usedCache_%d", x),
476                        (uint8_t*)temp,sizeof(T));
477                usedCache.push_back(temp);
478            }
479
480            int unusedCacheSize;
481            UNSERIALIZE_SCALAR(unusedCacheSize);
482            for(int x = 0; x < unusedCacheSize; x++) {
483                temp = new T;
484                arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
485                        (uint8_t*)temp,sizeof(T));
486                unusedCache.push_back(temp);
487            }
488        }
489        virtual bool hasOutstandingEvents() {
490            return wbEvent.scheduled() || fetchEvent.scheduled();
491        }
492
493     };
494
495
496    class RxDescCache : public DescCache<iGbReg::RxDesc>
497    {
498      protected:
499        virtual Addr descBase() const { return igbe->regs.rdba(); }
500        virtual long descHead() const { return igbe->regs.rdh(); }
501        virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
502        virtual long descTail() const { return igbe->regs.rdt(); }
503        virtual void updateHead(long h) { igbe->regs.rdh(h); }
504        virtual void enableSm();
505
506        bool pktDone;
507
508      public:
509        RxDescCache(IGbE *i, std::string n, int s);
510
511        /** Write the given packet into the buffer(s) pointed to by the
512         * descriptor and update the book keeping. Should only be called when
513         * there are no dma's pending.
514         * @param packet ethernet packet to write
515         * @return if the packet could be written (there was a free descriptor)
516         */
517        bool writePacket(EthPacketPtr packet);
518        /** Called by event when dma to write packet is completed
519         */
520        void pktComplete();
521
522        /** Check if the dma on the packet has completed.
523         */
524
525        bool packetDone();
526
527        EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
528
529        virtual bool hasOutstandingEvents();
530
531        virtual void serialize(std::ostream &os);
532        virtual void unserialize(Checkpoint *cp, const std::string &section);
533    };
534    friend class RxDescCache;
535
536    RxDescCache rxDescCache;
537
538    class TxDescCache  : public DescCache<iGbReg::TxDesc>
539    {
540      protected:
541        virtual Addr descBase() const { return igbe->regs.tdba(); }
542        virtual long descHead() const { return igbe->regs.tdh(); }
543        virtual long descTail() const { return igbe->regs.tdt(); }
544        virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
545        virtual void updateHead(long h) { igbe->regs.tdh(h); }
546        virtual void enableSm();
547        virtual void intAfterWb() const { igbe->postInterrupt(iGbReg::IT_TXDW);}
548
549        bool pktDone;
550        bool isTcp;
551        bool pktWaiting;
552
553      public:
554        TxDescCache(IGbE *i, std::string n, int s);
555
556        /** Tell the cache to DMA a packet from main memory into its buffer and
557         * return the size the of the packet to reserve space in tx fifo.
558         * @return size of the packet
559         */
560        int getPacketSize();
561        void getPacketData(EthPacketPtr p);
562
563        /** Ask if the packet has been transfered so the state machine can give
564         * it to the fifo.
565         * @return packet available in descriptor cache
566         */
567        bool packetAvailable();
568
569        /** Ask if we are still waiting for the packet to be transfered.
570         * @return packet still in transit.
571         */
572        bool packetWaiting() { return pktWaiting; }
573
574        /** Called by event when dma to write packet is completed
575         */
576        void pktComplete();
577        EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
578
579        virtual bool hasOutstandingEvents();
580
581        virtual void serialize(std::ostream &os);
582        virtual void unserialize(Checkpoint *cp, const std::string &section);
583
584    };
585    friend class TxDescCache;
586
587    TxDescCache txDescCache;
588
589  public:
590    typedef IGbEParams Params;
591    const Params *
592    params() const
593    {
594        return dynamic_cast<const Params *>(_params);
595    }
596    IGbE(const Params *params);
597    ~IGbE() {}
598
599    virtual EtherInt *getEthPort(const std::string &if_name, int idx);
600
601    Tick clock;
602    inline Tick cycles(int numCycles) const { return numCycles * clock; }
603
604    virtual Tick read(PacketPtr pkt);
605    virtual Tick write(PacketPtr pkt);
606
607    virtual Tick writeConfig(PacketPtr pkt);
608
609    bool ethRxPkt(EthPacketPtr packet);
610    void ethTxDone();
611
612    virtual void serialize(std::ostream &os);
613    virtual void unserialize(Checkpoint *cp, const std::string &section);
614    virtual unsigned int drain(Event *de);
615    virtual void resume();
616
617};
618
619class IGbEInt : public EtherInt
620{
621  private:
622    IGbE *dev;
623
624  public:
625    IGbEInt(const std::string &name, IGbE *d)
626        : EtherInt(name), dev(d)
627    { }
628
629    virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
630    virtual void sendDone() { dev->ethTxDone(); }
631};
632
633
634
635
636
637#endif //__DEV_I8254XGBE_HH__
638
639