i8254xGBe.hh revision 4291
1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31/* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 */
34
35#ifndef __DEV_I8254XGBE_HH__
36#define __DEV_I8254XGBE_HH__
37
38#include <deque>
39#include <string>
40
41#include "base/inet.hh"
42#include "base/statistics.hh"
43#include "dev/etherint.hh"
44#include "dev/etherpkt.hh"
45#include "dev/i8254xGBe_defs.hh"
46#include "dev/pcidev.hh"
47#include "dev/pktfifo.hh"
48#include "sim/eventq.hh"
49
50class IGbEInt;
51
52class IGbE : public PciDev
53{
54  private:
55    IGbEInt *etherInt;
56
57    // device registers
58    iGbReg::Regs regs;
59
60    // eeprom data, status and control bits
61    int eeOpBits, eeAddrBits, eeDataBits;
62    uint8_t eeOpcode, eeAddr;
63    uint16_t flash[iGbReg::EEPROM_SIZE];
64
65    // cached parameters from params struct
66    Tick tickRate;
67    bool useFlowControl;
68
69    // packet fifos
70    PacketFifo rxFifo;
71    PacketFifo txFifo;
72
73    // Packet that we are currently putting into the txFifo
74    EthPacketPtr txPacket;
75
76    // Should to Rx/Tx State machine tick?
77    bool rxTick;
78    bool txTick;
79    bool txFifoTick;
80
81    // Event and function to deal with RDTR timer expiring
82    void rdtrProcess() {
83        rxDescCache.writeback(0);
84        DPRINTF(EthernetIntr, "Posting RXT interrupt because RDTR timer expired\n");
85        postInterrupt(iGbReg::IT_RXT, true);
86    }
87
88    //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>;
89    EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent;
90
91    // Event and function to deal with RADV timer expiring
92    void radvProcess() {
93        rxDescCache.writeback(0);
94        DPRINTF(EthernetIntr, "Posting RXT interrupt because RADV timer expired\n");
95        postInterrupt(iGbReg::IT_RXT, true);
96    }
97
98    //friend class EventWrapper<IGbE, &IGbE::radvProcess>;
99    EventWrapper<IGbE, &IGbE::radvProcess> radvEvent;
100
101    // Event and function to deal with TADV timer expiring
102    void tadvProcess() {
103        txDescCache.writeback(0);
104        DPRINTF(EthernetIntr, "Posting TXDW interrupt because TADV timer expired\n");
105        postInterrupt(iGbReg::IT_TXDW, true);
106    }
107
108    //friend class EventWrapper<IGbE, &IGbE::tadvProcess>;
109    EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent;
110
111    // Event and function to deal with TIDV timer expiring
112    void tidvProcess() {
113        txDescCache.writeback(0);
114        DPRINTF(EthernetIntr, "Posting TXDW interrupt because TIDV timer expired\n");
115        postInterrupt(iGbReg::IT_TXDW, true);
116    }
117    //friend class EventWrapper<IGbE, &IGbE::tidvProcess>;
118    EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent;
119
120    // Main event to tick the device
121    void tick();
122    //friend class EventWrapper<IGbE, &IGbE::tick>;
123    EventWrapper<IGbE, &IGbE::tick> tickEvent;
124
125
126    void rxStateMachine();
127    void txStateMachine();
128    void txWire();
129
130    /** Write an interrupt into the interrupt pending register and check mask
131     * and interrupt limit timer before sending interrupt to CPU
132     * @param t the type of interrupt we are posting
133     * @param now should we ignore the interrupt limiting timer
134     */
135    void postInterrupt(iGbReg::IntTypes t, bool now = false);
136
137    /** Check and see if changes to the mask register have caused an interrupt
138     * to need to be sent or perhaps removed an interrupt cause.
139     */
140    void chkInterrupt();
141
142    /** Send an interrupt to the cpu
143     */
144    void cpuPostInt();
145    // Event to moderate interrupts
146    EventWrapper<IGbE, &IGbE::cpuPostInt> interEvent;
147
148    /** Clear the interupt line to the cpu
149     */
150    void cpuClearInt();
151
152    Tick intClock() { return Clock::Int::ns * 1024; }
153
154    void restartClock();
155
156    template<class T>
157    class DescCache
158    {
159      protected:
160        virtual Addr descBase() const = 0;
161        virtual long descHead() const = 0;
162        virtual long descTail() const = 0;
163        virtual long descLen() const = 0;
164        virtual void updateHead(long h) = 0;
165        virtual void enableSm() = 0;
166        virtual void intAfterWb() const {}
167
168        std::deque<T*> usedCache;
169        std::deque<T*> unusedCache;
170
171        T *fetchBuf;
172        T *wbBuf;
173
174        // Pointer to the device we cache for
175        IGbE *igbe;
176
177        // Name of this  descriptor cache
178        std::string _name;
179
180        // How far we've cached
181        int cachePnt;
182
183        // The size of the descriptor cache
184        int size;
185
186        // How many descriptors we are currently fetching
187        int curFetching;
188
189        // How many descriptors we are currently writing back
190        int wbOut;
191
192        // if the we wrote back to the end of the descriptor ring and are going
193        // to have to wrap and write more
194        bool moreToWb;
195
196        // What the alignment is of the next descriptor writeback
197        Addr wbAlignment;
198
199       /** The packet that is currently being dmad to memory if any
200         */
201        EthPacketPtr pktPtr;
202
203      public:
204        DescCache(IGbE *i, const std::string n, int s)
205            : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0),
206              pktPtr(NULL), fetchEvent(this), wbEvent(this)
207        {
208            fetchBuf = new T[size];
209            wbBuf = new T[size];
210        }
211
212        virtual ~DescCache()
213        {
214            reset();
215        }
216
217        std::string name() { return _name; }
218
219        /** If the address/len/head change when we've got descriptors that are
220         * dirty that is very bad. This function checks that we don't and if we
221         * do panics.
222         */
223        void areaChanged()
224        {
225            if (usedCache.size() > 0 || curFetching || wbOut)
226                panic("Descriptor Address, Length or Head changed. Bad\n");
227            reset();
228
229        }
230
231        void writeback(Addr aMask)
232        {
233            int curHead = descHead();
234            int max_to_wb = usedCache.size();
235
236            DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
237                    "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
238                    curHead, descTail(), descLen(), cachePnt, max_to_wb,
239                    descLeft());
240
241            // Check if this writeback is less restrictive that the previous
242            // and if so setup another one immediately following it
243            if (wbOut && (aMask < wbAlignment)) {
244                moreToWb = true;
245                wbAlignment = aMask;
246                DPRINTF(EthernetDesc, "Writing back already in process, returning\n");
247                return;
248            }
249
250
251            moreToWb = false;
252            wbAlignment = aMask;
253
254            if (max_to_wb + curHead >= descLen()) {
255                max_to_wb = descLen() - curHead;
256                moreToWb = true;
257                // this is by definition aligned correctly
258            } else if (aMask != 0) {
259                // align the wb point to the mask
260                max_to_wb = max_to_wb & ~aMask;
261            }
262
263            DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
264
265            if (max_to_wb <= 0 || wbOut)
266                return;
267
268            wbOut = max_to_wb;
269
270            for (int x = 0; x < wbOut; x++)
271                memcpy(&wbBuf[x], usedCache[x], sizeof(T));
272
273            for (int x = 0; x < wbOut; x++) {
274                assert(usedCache.size());
275                delete usedCache[0];
276                usedCache.pop_front();
277            };
278
279
280            assert(wbOut);
281            igbe->dmaWrite(igbe->platform->pciToDma(descBase() + curHead * sizeof(T)),
282                    wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf);
283        }
284
285        /** Fetch a chunk of descriptors into the descriptor cache.
286         * Calls fetchComplete when the memory system returns the data
287         */
288        void fetchDescriptors()
289        {
290            size_t max_to_fetch;
291
292            if (descTail() >= cachePnt)
293                max_to_fetch = descTail() - cachePnt;
294            else
295                max_to_fetch = descLen() - cachePnt;
296
297
298            max_to_fetch = std::min(max_to_fetch, (size - usedCache.size() -
299                        unusedCache.size()));
300
301            DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
302                    "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
303                    descHead(), descTail(), descLen(), cachePnt,
304                    max_to_fetch, descLeft());
305
306            // Nothing to do
307            if (max_to_fetch == 0 || curFetching)
308                return;
309
310            // So we don't have two descriptor fetches going on at once
311            curFetching = max_to_fetch;
312
313            DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
314                    descBase() + cachePnt * sizeof(T),
315                    igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
316                    curFetching * sizeof(T));
317
318            assert(curFetching);
319            igbe->dmaRead(igbe->platform->pciToDma(descBase() + cachePnt * sizeof(T)),
320                    curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf);
321        }
322
323
324        /** Called by event when dma to read descriptors is completed
325         */
326        void fetchComplete()
327        {
328            T *newDesc;
329            for (int x = 0; x < curFetching; x++) {
330                newDesc = new T;
331                memcpy(newDesc, &fetchBuf[x], sizeof(T));
332                unusedCache.push_back(newDesc);
333            }
334
335#ifndef NDEBUG
336            int oldCp = cachePnt;
337#endif
338
339            cachePnt += curFetching;
340            assert(cachePnt <= descLen());
341            if (cachePnt == descLen())
342                cachePnt = 0;
343
344            curFetching = 0;
345
346            DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
347                    oldCp, cachePnt);
348
349            enableSm();
350
351        }
352
353        EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent;
354
355        /** Called by event when dma to writeback descriptors is completed
356         */
357        void wbComplete()
358        {
359            long  curHead = descHead();
360#ifndef NDEBUG
361            long oldHead = curHead;
362#endif
363
364            curHead += wbOut;
365            wbOut = 0;
366
367            if (curHead >= descLen())
368                curHead -= descLen();
369
370            // Update the head
371            updateHead(curHead);
372
373            DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
374                    oldHead, curHead);
375
376            // If we still have more to wb, call wb now
377            if (moreToWb) {
378                DPRINTF(EthernetDesc, "Writeback has more todo\n");
379                writeback(wbAlignment);
380            }
381            intAfterWb();
382        }
383
384
385        EventWrapper<DescCache, &DescCache::wbComplete> wbEvent;
386
387        /* Return the number of descriptors left in the ring, so the device has
388         * a way to figure out if it needs to interrupt.
389         */
390        int descLeft() const
391        {
392            int left = unusedCache.size();
393            if (cachePnt - descTail() >= 0)
394                left += (cachePnt - descTail());
395            else
396                left += (descTail() - cachePnt);
397
398            return left;
399        }
400
401        /* Return the number of descriptors used and not written back.
402         */
403        int descUsed() const { return usedCache.size(); }
404
405        /* Return the number of cache unused descriptors we have. */
406        int descUnused() const {return unusedCache.size(); }
407
408        /* Get into a state where the descriptor address/head/etc colud be
409         * changed */
410        void reset()
411        {
412            DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
413            for (int x = 0; x < usedCache.size(); x++)
414                delete usedCache[x];
415            for (int x = 0; x < unusedCache.size(); x++)
416                delete unusedCache[x];
417
418            usedCache.clear();
419            unusedCache.clear();
420
421            cachePnt = 0;
422
423        }
424
425     };
426
427
428    class RxDescCache : public DescCache<iGbReg::RxDesc>
429    {
430      protected:
431        virtual Addr descBase() const { return igbe->regs.rdba(); }
432        virtual long descHead() const { return igbe->regs.rdh(); }
433        virtual long descLen() const { return igbe->regs.rdlen() >> 4; }
434        virtual long descTail() const { return igbe->regs.rdt(); }
435        virtual void updateHead(long h) { igbe->regs.rdh(h); }
436        virtual void enableSm();
437
438        bool pktDone;
439
440      public:
441        RxDescCache(IGbE *i, std::string n, int s);
442
443        /** Write the given packet into the buffer(s) pointed to by the
444         * descriptor and update the book keeping. Should only be called when
445         * there are no dma's pending.
446         * @param packet ethernet packet to write
447         * @return if the packet could be written (there was a free descriptor)
448         */
449        bool writePacket(EthPacketPtr packet);
450        /** Called by event when dma to write packet is completed
451         */
452        void pktComplete();
453
454        /** Check if the dma on the packet has completed.
455         */
456
457        bool packetDone();
458
459        EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent;
460
461    };
462    friend class RxDescCache;
463
464    RxDescCache rxDescCache;
465
466    class TxDescCache  : public DescCache<iGbReg::TxDesc>
467    {
468      protected:
469        virtual Addr descBase() const { return igbe->regs.tdba(); }
470        virtual long descHead() const { return igbe->regs.tdh(); }
471        virtual long descTail() const { return igbe->regs.tdt(); }
472        virtual long descLen() const { return igbe->regs.tdlen() >> 4; }
473        virtual void updateHead(long h) { igbe->regs.tdh(h); }
474        virtual void enableSm();
475        virtual void intAfterWb() const { igbe->postInterrupt(iGbReg::IT_TXDW);}
476
477        bool pktDone;
478        bool isTcp;
479        bool pktWaiting;
480        int hLen;
481
482      public:
483        TxDescCache(IGbE *i, std::string n, int s);
484
485        /** Tell the cache to DMA a packet from main memory into its buffer and
486         * return the size the of the packet to reserve space in tx fifo.
487         * @return size of the packet
488         */
489        int getPacketSize();
490        void getPacketData(EthPacketPtr p);
491
492        /** Ask if the packet has been transfered so the state machine can give
493         * it to the fifo.
494         * @return packet available in descriptor cache
495         */
496        bool packetAvailable();
497
498        /** Ask if we are still waiting for the packet to be transfered.
499         * @return packet still in transit.
500         */
501        bool packetWaiting() { return pktWaiting; }
502
503        /** Called by event when dma to write packet is completed
504         */
505        void pktComplete();
506        EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent;
507
508    };
509    friend class TxDescCache;
510
511    TxDescCache txDescCache;
512
513  public:
514    struct Params : public PciDev::Params
515    {
516        Net::EthAddr hardware_address;
517        bool use_flow_control;
518        int rx_fifo_size;
519        int tx_fifo_size;
520        int rx_desc_cache_size;
521        int tx_desc_cache_size;
522        Tick clock;
523    };
524
525    IGbE(Params *params);
526    ~IGbE() {;}
527
528    Tick clock;
529    inline Tick cycles(int numCycles) const { return numCycles * clock; }
530
531    virtual Tick read(PacketPtr pkt);
532    virtual Tick write(PacketPtr pkt);
533
534    virtual Tick writeConfig(PacketPtr pkt);
535
536    bool ethRxPkt(EthPacketPtr packet);
537    void ethTxDone();
538
539    void setEthInt(IGbEInt *i) { assert(!etherInt); etherInt = i; }
540
541
542    const Params *params() const {return (const Params *)_params; }
543
544    virtual void serialize(std::ostream &os);
545    virtual void unserialize(Checkpoint *cp, const std::string &section);
546
547
548};
549
550class IGbEInt : public EtherInt
551{
552  private:
553    IGbE *dev;
554
555  public:
556    IGbEInt(const std::string &name, IGbE *d)
557        : EtherInt(name), dev(d)
558        { dev->setEthInt(this); }
559
560    virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); }
561    virtual void sendDone() { dev->ethTxDone(); }
562};
563
564
565
566
567
568#endif //__DEV_I8254XGBE_HH__
569
570