ns_gige.cc revision 1817
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller.  Does not support priority queueing
32 */
33#include <cstdio>
34#include <deque>
35#include <string>
36
37#include "base/inet.hh"
38#include "cpu/exec_context.hh"
39#include "dev/etherlink.hh"
40#include "dev/ns_gige.hh"
41#include "dev/pciconfigall.hh"
42#include "mem/bus/bus.hh"
43#include "mem/bus/dma_interface.hh"
44#include "mem/bus/pio_interface.hh"
45#include "mem/bus/pio_interface_impl.hh"
46#include "mem/functional/memory_control.hh"
47#include "mem/functional/physical.hh"
48#include "sim/builder.hh"
49#include "sim/debug.hh"
50#include "sim/host.hh"
51#include "sim/stats.hh"
52#include "targetarch/vtophys.hh"
53
54const char *NsRxStateStrings[] =
55{
56    "rxIdle",
57    "rxDescRefr",
58    "rxDescRead",
59    "rxFifoBlock",
60    "rxFragWrite",
61    "rxDescWrite",
62    "rxAdvance"
63};
64
65const char *NsTxStateStrings[] =
66{
67    "txIdle",
68    "txDescRefr",
69    "txDescRead",
70    "txFifoBlock",
71    "txFragRead",
72    "txDescWrite",
73    "txAdvance"
74};
75
76const char *NsDmaState[] =
77{
78    "dmaIdle",
79    "dmaReading",
80    "dmaWriting",
81    "dmaReadWaiting",
82    "dmaWriteWaiting"
83};
84
85using namespace std;
86using namespace Net;
87
88///////////////////////////////////////////////////////////////////////
89//
90// NSGigE PCI Device
91//
92NSGigE::NSGigE(Params *p)
93    : PciDev(p), ioEnable(false),
94      txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
95      txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
96      txXferLen(0), rxXferLen(0), clock(p->clock),
97      txState(txIdle), txEnable(false), CTDD(false),
98      txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
99      rxEnable(false), CRDD(false), rxPktBytes(0),
100      rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
101      rxDmaReadEvent(this), rxDmaWriteEvent(this),
102      txDmaReadEvent(this), txDmaWriteEvent(this),
103      dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
104      txDelay(p->tx_delay), rxDelay(p->rx_delay),
105      rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
106      txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
107      acceptMulticast(false), acceptUnicast(false),
108      acceptPerfect(false), acceptArp(false),
109      physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
110      intrEvent(0), interface(0)
111{
112    if (p->header_bus) {
113        pioInterface = newPioInterface(name(), p->hier,
114                                       p->header_bus, this,
115                                       &NSGigE::cacheAccess);
116
117        pioLatency = p->pio_latency * p->header_bus->clockRate;
118
119        if (p->payload_bus)
120            dmaInterface = new DMAInterface<Bus>(name() + ".dma",
121                                                 p->header_bus,
122                                                 p->payload_bus, 1,
123                                                 p->dma_no_allocate);
124        else
125            dmaInterface = new DMAInterface<Bus>(name() + ".dma",
126                                                 p->header_bus,
127                                                 p->header_bus, 1,
128                                                 p->dma_no_allocate);
129    } else if (p->payload_bus) {
130        pioInterface = newPioInterface(name(), p->hier,
131                                       p->payload_bus, this,
132                                       &NSGigE::cacheAccess);
133
134        pioLatency = p->pio_latency * p->payload_bus->clockRate;
135
136        dmaInterface = new DMAInterface<Bus>(name() + ".dma",
137                                             p->payload_bus,
138                                             p->payload_bus, 1,
139                                             p->dma_no_allocate);
140    }
141
142
143    intrDelay = p->intr_delay;
144    dmaReadDelay = p->dma_read_delay;
145    dmaWriteDelay = p->dma_write_delay;
146    dmaReadFactor = p->dma_read_factor;
147    dmaWriteFactor = p->dma_write_factor;
148
149    regsReset();
150    memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
151}
152
153NSGigE::~NSGigE()
154{}
155
156void
157NSGigE::regStats()
158{
159    txBytes
160        .name(name() + ".txBytes")
161        .desc("Bytes Transmitted")
162        .prereq(txBytes)
163        ;
164
165    rxBytes
166        .name(name() + ".rxBytes")
167        .desc("Bytes Received")
168        .prereq(rxBytes)
169        ;
170
171    txPackets
172        .name(name() + ".txPackets")
173        .desc("Number of Packets Transmitted")
174        .prereq(txBytes)
175        ;
176
177    rxPackets
178        .name(name() + ".rxPackets")
179        .desc("Number of Packets Received")
180        .prereq(rxBytes)
181        ;
182
183    txIpChecksums
184        .name(name() + ".txIpChecksums")
185        .desc("Number of tx IP Checksums done by device")
186        .precision(0)
187        .prereq(txBytes)
188        ;
189
190    rxIpChecksums
191        .name(name() + ".rxIpChecksums")
192        .desc("Number of rx IP Checksums done by device")
193        .precision(0)
194        .prereq(rxBytes)
195        ;
196
197    txTcpChecksums
198        .name(name() + ".txTcpChecksums")
199        .desc("Number of tx TCP Checksums done by device")
200        .precision(0)
201        .prereq(txBytes)
202        ;
203
204    rxTcpChecksums
205        .name(name() + ".rxTcpChecksums")
206        .desc("Number of rx TCP Checksums done by device")
207        .precision(0)
208        .prereq(rxBytes)
209        ;
210
211    txUdpChecksums
212        .name(name() + ".txUdpChecksums")
213        .desc("Number of tx UDP Checksums done by device")
214        .precision(0)
215        .prereq(txBytes)
216        ;
217
218    rxUdpChecksums
219        .name(name() + ".rxUdpChecksums")
220        .desc("Number of rx UDP Checksums done by device")
221        .precision(0)
222        .prereq(rxBytes)
223        ;
224
225    descDmaReads
226        .name(name() + ".descDMAReads")
227        .desc("Number of descriptors the device read w/ DMA")
228        .precision(0)
229        ;
230
231    descDmaWrites
232        .name(name() + ".descDMAWrites")
233        .desc("Number of descriptors the device wrote w/ DMA")
234        .precision(0)
235        ;
236
237    descDmaRdBytes
238        .name(name() + ".descDmaReadBytes")
239        .desc("number of descriptor bytes read w/ DMA")
240        .precision(0)
241        ;
242
243   descDmaWrBytes
244        .name(name() + ".descDmaWriteBytes")
245        .desc("number of descriptor bytes write w/ DMA")
246        .precision(0)
247        ;
248
249    txBandwidth
250        .name(name() + ".txBandwidth")
251        .desc("Transmit Bandwidth (bits/s)")
252        .precision(0)
253        .prereq(txBytes)
254        ;
255
256    rxBandwidth
257        .name(name() + ".rxBandwidth")
258        .desc("Receive Bandwidth (bits/s)")
259        .precision(0)
260        .prereq(rxBytes)
261        ;
262
263    totBandwidth
264        .name(name() + ".totBandwidth")
265        .desc("Total Bandwidth (bits/s)")
266        .precision(0)
267        .prereq(totBytes)
268        ;
269
270    totPackets
271        .name(name() + ".totPackets")
272        .desc("Total Packets")
273        .precision(0)
274        .prereq(totBytes)
275        ;
276
277    totBytes
278        .name(name() + ".totBytes")
279        .desc("Total Bytes")
280        .precision(0)
281        .prereq(totBytes)
282        ;
283
284    totPacketRate
285        .name(name() + ".totPPS")
286        .desc("Total Tranmission Rate (packets/s)")
287        .precision(0)
288        .prereq(totBytes)
289        ;
290
291    txPacketRate
292        .name(name() + ".txPPS")
293        .desc("Packet Tranmission Rate (packets/s)")
294        .precision(0)
295        .prereq(txBytes)
296        ;
297
298    rxPacketRate
299        .name(name() + ".rxPPS")
300        .desc("Packet Reception Rate (packets/s)")
301        .precision(0)
302        .prereq(rxBytes)
303        ;
304
305    postedSwi
306        .name(name() + ".postedSwi")
307        .desc("number of software interrupts posted to CPU")
308        .precision(0)
309        ;
310
311    totalSwi
312        .name(name() + ".totalSwi")
313        .desc("number of total Swi written to ISR")
314        .precision(0)
315        ;
316
317    coalescedSwi
318        .name(name() + ".coalescedSwi")
319        .desc("average number of Swi's coalesced into each post")
320        .precision(0)
321        ;
322
323    postedRxIdle
324        .name(name() + ".postedRxIdle")
325        .desc("number of rxIdle interrupts posted to CPU")
326        .precision(0)
327        ;
328
329    totalRxIdle
330        .name(name() + ".totalRxIdle")
331        .desc("number of total RxIdle written to ISR")
332        .precision(0)
333        ;
334
335    coalescedRxIdle
336        .name(name() + ".coalescedRxIdle")
337        .desc("average number of RxIdle's coalesced into each post")
338        .precision(0)
339        ;
340
341    postedRxOk
342        .name(name() + ".postedRxOk")
343        .desc("number of RxOk interrupts posted to CPU")
344        .precision(0)
345        ;
346
347    totalRxOk
348        .name(name() + ".totalRxOk")
349        .desc("number of total RxOk written to ISR")
350        .precision(0)
351        ;
352
353    coalescedRxOk
354        .name(name() + ".coalescedRxOk")
355        .desc("average number of RxOk's coalesced into each post")
356        .precision(0)
357        ;
358
359    postedRxDesc
360        .name(name() + ".postedRxDesc")
361        .desc("number of RxDesc interrupts posted to CPU")
362        .precision(0)
363        ;
364
365    totalRxDesc
366        .name(name() + ".totalRxDesc")
367        .desc("number of total RxDesc written to ISR")
368        .precision(0)
369        ;
370
371    coalescedRxDesc
372        .name(name() + ".coalescedRxDesc")
373        .desc("average number of RxDesc's coalesced into each post")
374        .precision(0)
375        ;
376
377    postedTxOk
378        .name(name() + ".postedTxOk")
379        .desc("number of TxOk interrupts posted to CPU")
380        .precision(0)
381        ;
382
383    totalTxOk
384        .name(name() + ".totalTxOk")
385        .desc("number of total TxOk written to ISR")
386        .precision(0)
387        ;
388
389    coalescedTxOk
390        .name(name() + ".coalescedTxOk")
391        .desc("average number of TxOk's coalesced into each post")
392        .precision(0)
393        ;
394
395    postedTxIdle
396        .name(name() + ".postedTxIdle")
397        .desc("number of TxIdle interrupts posted to CPU")
398        .precision(0)
399        ;
400
401    totalTxIdle
402        .name(name() + ".totalTxIdle")
403        .desc("number of total TxIdle written to ISR")
404        .precision(0)
405        ;
406
407    coalescedTxIdle
408        .name(name() + ".coalescedTxIdle")
409        .desc("average number of TxIdle's coalesced into each post")
410        .precision(0)
411        ;
412
413    postedTxDesc
414        .name(name() + ".postedTxDesc")
415        .desc("number of TxDesc interrupts posted to CPU")
416        .precision(0)
417        ;
418
419    totalTxDesc
420        .name(name() + ".totalTxDesc")
421        .desc("number of total TxDesc written to ISR")
422        .precision(0)
423        ;
424
425    coalescedTxDesc
426        .name(name() + ".coalescedTxDesc")
427        .desc("average number of TxDesc's coalesced into each post")
428        .precision(0)
429        ;
430
431    postedRxOrn
432        .name(name() + ".postedRxOrn")
433        .desc("number of RxOrn posted to CPU")
434        .precision(0)
435        ;
436
437    totalRxOrn
438        .name(name() + ".totalRxOrn")
439        .desc("number of total RxOrn written to ISR")
440        .precision(0)
441        ;
442
443    coalescedRxOrn
444        .name(name() + ".coalescedRxOrn")
445        .desc("average number of RxOrn's coalesced into each post")
446        .precision(0)
447        ;
448
449    coalescedTotal
450        .name(name() + ".coalescedTotal")
451        .desc("average number of interrupts coalesced into each post")
452        .precision(0)
453        ;
454
455    postedInterrupts
456        .name(name() + ".postedInterrupts")
457        .desc("number of posts to CPU")
458        .precision(0)
459        ;
460
461    droppedPackets
462        .name(name() + ".droppedPackets")
463        .desc("number of packets dropped")
464        .precision(0)
465        ;
466
467    coalescedSwi = totalSwi / postedInterrupts;
468    coalescedRxIdle = totalRxIdle / postedInterrupts;
469    coalescedRxOk = totalRxOk / postedInterrupts;
470    coalescedRxDesc = totalRxDesc / postedInterrupts;
471    coalescedTxOk = totalTxOk / postedInterrupts;
472    coalescedTxIdle = totalTxIdle / postedInterrupts;
473    coalescedTxDesc = totalTxDesc / postedInterrupts;
474    coalescedRxOrn = totalRxOrn / postedInterrupts;
475
476    coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
477                      totalTxOk + totalTxIdle + totalTxDesc +
478                      totalRxOrn) / postedInterrupts;
479
480    txBandwidth = txBytes * Stats::constant(8) / simSeconds;
481    rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
482    totBandwidth = txBandwidth + rxBandwidth;
483    totBytes = txBytes + rxBytes;
484    totPackets = txPackets + rxPackets;
485
486    txPacketRate = txPackets / simSeconds;
487    rxPacketRate = rxPackets / simSeconds;
488}
489
490/**
491 * This is to read the PCI general configuration registers
492 */
493void
494NSGigE::readConfig(int offset, int size, uint8_t *data)
495{
496    if (offset < PCI_DEVICE_SPECIFIC)
497        PciDev::readConfig(offset, size, data);
498    else
499        panic("Device specific PCI config space not implemented!\n");
500}
501
502/**
503 * This is to write to the PCI general configuration registers
504 */
505void
506NSGigE::writeConfig(int offset, int size, const uint8_t* data)
507{
508    if (offset < PCI_DEVICE_SPECIFIC)
509        PciDev::writeConfig(offset, size, data);
510    else
511        panic("Device specific PCI config space not implemented!\n");
512
513    // Need to catch writes to BARs to update the PIO interface
514    switch (offset) {
515        // seems to work fine without all these PCI settings, but i
516        // put in the IO to double check, an assertion will fail if we
517        // need to properly implement it
518      case PCI_COMMAND:
519        if (config.data[offset] & PCI_CMD_IOSE)
520            ioEnable = true;
521        else
522            ioEnable = false;
523
524#if 0
525        if (config.data[offset] & PCI_CMD_BME) {
526            bmEnabled = true;
527        }
528        else {
529            bmEnabled = false;
530        }
531
532        if (config.data[offset] & PCI_CMD_MSE) {
533            memEnable = true;
534        }
535        else {
536            memEnable = false;
537        }
538#endif
539        break;
540
541      case PCI0_BASE_ADDR0:
542        if (BARAddrs[0] != 0) {
543            if (pioInterface)
544                pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
545
546            BARAddrs[0] &= EV5::PAddrUncachedMask;
547        }
548        break;
549      case PCI0_BASE_ADDR1:
550        if (BARAddrs[1] != 0) {
551            if (pioInterface)
552                pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
553
554            BARAddrs[1] &= EV5::PAddrUncachedMask;
555        }
556        break;
557    }
558}
559
560/**
561 * This reads the device registers, which are detailed in the NS83820
562 * spec sheet
563 */
564Fault
565NSGigE::read(MemReqPtr &req, uint8_t *data)
566{
567    assert(ioEnable);
568
569    //The mask is to give you only the offset into the device register file
570    Addr daddr = req->paddr & 0xfff;
571    DPRINTF(EthernetPIO, "read  da=%#x pa=%#x va=%#x size=%d\n",
572            daddr, req->paddr, req->vaddr, req->size);
573
574
575    // there are some reserved registers, you can see ns_gige_reg.h and
576    // the spec sheet for details
577    if (daddr > LAST && daddr <=  RESERVED) {
578        panic("Accessing reserved register");
579    } else if (daddr > RESERVED && daddr <= 0x3FC) {
580        readConfig(daddr & 0xff, req->size, data);
581        return No_Fault;
582    } else if (daddr >= MIB_START && daddr <= MIB_END) {
583        // don't implement all the MIB's.  hopefully the kernel
584        // doesn't actually DEPEND upon their values
585        // MIB are just hardware stats keepers
586        uint32_t &reg = *(uint32_t *) data;
587        reg = 0;
588        return No_Fault;
589    } else if (daddr > 0x3FC)
590        panic("Something is messed up!\n");
591
592    switch (req->size) {
593      case sizeof(uint32_t):
594        {
595            uint32_t &reg = *(uint32_t *)data;
596
597            switch (daddr) {
598              case CR:
599                reg = regs.command;
600                //these are supposed to be cleared on a read
601                reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
602                break;
603
604              case CFGR:
605                reg = regs.config;
606                break;
607
608              case MEAR:
609                reg = regs.mear;
610                break;
611
612              case PTSCR:
613                reg = regs.ptscr;
614                break;
615
616              case ISR:
617                reg = regs.isr;
618                devIntrClear(ISR_ALL);
619                break;
620
621              case IMR:
622                reg = regs.imr;
623                break;
624
625              case IER:
626                reg = regs.ier;
627                break;
628
629              case IHR:
630                reg = regs.ihr;
631                break;
632
633              case TXDP:
634                reg = regs.txdp;
635                break;
636
637              case TXDP_HI:
638                reg = regs.txdp_hi;
639                break;
640
641              case TX_CFG:
642                reg = regs.txcfg;
643                break;
644
645              case GPIOR:
646                reg = regs.gpior;
647                break;
648
649              case RXDP:
650                reg = regs.rxdp;
651                break;
652
653              case RXDP_HI:
654                reg = regs.rxdp_hi;
655                break;
656
657              case RX_CFG:
658                reg = regs.rxcfg;
659                break;
660
661              case PQCR:
662                reg = regs.pqcr;
663                break;
664
665              case WCSR:
666                reg = regs.wcsr;
667                break;
668
669              case PCR:
670                reg = regs.pcr;
671                break;
672
673                // see the spec sheet for how RFCR and RFDR work
674                // basically, you write to RFCR to tell the machine
675                // what you want to do next, then you act upon RFDR,
676                // and the device will be prepared b/c of what you
677                // wrote to RFCR
678              case RFCR:
679                reg = regs.rfcr;
680                break;
681
682              case RFDR:
683                switch (regs.rfcr & RFCR_RFADDR) {
684                  case 0x000:
685                    reg = rom.perfectMatch[1];
686                    reg = reg << 8;
687                    reg += rom.perfectMatch[0];
688                    break;
689                  case 0x002:
690                    reg = rom.perfectMatch[3] << 8;
691                    reg += rom.perfectMatch[2];
692                    break;
693                  case 0x004:
694                    reg = rom.perfectMatch[5] << 8;
695                    reg += rom.perfectMatch[4];
696                    break;
697                  default:
698                    panic("reading RFDR for something other than PMATCH!\n");
699                    // didn't implement other RFDR functionality b/c
700                    // driver didn't use it
701                }
702                break;
703
704              case SRR:
705                reg = regs.srr;
706                break;
707
708              case MIBC:
709                reg = regs.mibc;
710                reg &= ~(MIBC_MIBS | MIBC_ACLR);
711                break;
712
713              case VRCR:
714                reg = regs.vrcr;
715                break;
716
717              case VTCR:
718                reg = regs.vtcr;
719                break;
720
721              case VDR:
722                reg = regs.vdr;
723                break;
724
725              case CCSR:
726                reg = regs.ccsr;
727                break;
728
729              case TBICR:
730                reg = regs.tbicr;
731                break;
732
733              case TBISR:
734                reg = regs.tbisr;
735                break;
736
737              case TANAR:
738                reg = regs.tanar;
739                break;
740
741              case TANLPAR:
742                reg = regs.tanlpar;
743                break;
744
745              case TANER:
746                reg = regs.taner;
747                break;
748
749              case TESR:
750                reg = regs.tesr;
751                break;
752
753              case M5REG:
754                reg = params()->m5reg;
755                break;
756
757              default:
758                panic("reading unimplemented register: addr=%#x", daddr);
759            }
760
761            DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
762                    daddr, reg, reg);
763        }
764        break;
765
766      default:
767        panic("accessing register with invalid size: addr=%#x, size=%d",
768              daddr, req->size);
769    }
770
771    return No_Fault;
772}
773
774Fault
775NSGigE::write(MemReqPtr &req, const uint8_t *data)
776{
777    assert(ioEnable);
778
779    Addr daddr = req->paddr & 0xfff;
780    DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
781            daddr, req->paddr, req->vaddr, req->size);
782
783    if (daddr > LAST && daddr <=  RESERVED) {
784        panic("Accessing reserved register");
785    } else if (daddr > RESERVED && daddr <= 0x3FC) {
786        writeConfig(daddr & 0xff, req->size, data);
787        return No_Fault;
788    } else if (daddr > 0x3FC)
789        panic("Something is messed up!\n");
790
791    if (req->size == sizeof(uint32_t)) {
792        uint32_t reg = *(uint32_t *)data;
793        DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
794
795        switch (daddr) {
796          case CR:
797            regs.command = reg;
798            if (reg & CR_TXD) {
799                txEnable = false;
800            } else if (reg & CR_TXE) {
801                txEnable = true;
802
803                // the kernel is enabling the transmit machine
804                if (txState == txIdle)
805                    txKick();
806            }
807
808            if (reg & CR_RXD) {
809                rxEnable = false;
810            } else if (reg & CR_RXE) {
811                rxEnable = true;
812
813                if (rxState == rxIdle)
814                    rxKick();
815            }
816
817            if (reg & CR_TXR)
818                txReset();
819
820            if (reg & CR_RXR)
821                rxReset();
822
823            if (reg & CR_SWI)
824                devIntrPost(ISR_SWI);
825
826            if (reg & CR_RST) {
827                txReset();
828                rxReset();
829
830                regsReset();
831            }
832            break;
833
834          case CFGR:
835            if (reg & CFGR_LNKSTS ||
836                reg & CFGR_SPDSTS ||
837                reg & CFGR_DUPSTS ||
838                reg & CFGR_RESERVED ||
839                reg & CFGR_T64ADDR ||
840                reg & CFGR_PCI64_DET)
841                panic("writing to read-only or reserved CFGR bits!\n");
842
843            regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
844                                   CFGR_RESERVED | CFGR_T64ADDR |
845                                   CFGR_PCI64_DET);
846
847// all these #if 0's are because i don't THINK the kernel needs to
848// have these implemented. if there is a problem relating to one of
849// these, you may need to add functionality in.
850#if 0
851            if (reg & CFGR_TBI_EN) ;
852            if (reg & CFGR_MODE_1000) ;
853#endif
854
855            if (reg & CFGR_AUTO_1000)
856                panic("CFGR_AUTO_1000 not implemented!\n");
857
858#if 0
859            if (reg & CFGR_PINT_DUPSTS ||
860                reg & CFGR_PINT_LNKSTS ||
861                reg & CFGR_PINT_SPDSTS)
862                ;
863
864            if (reg & CFGR_TMRTEST) ;
865            if (reg & CFGR_MRM_DIS) ;
866            if (reg & CFGR_MWI_DIS) ;
867
868            if (reg & CFGR_T64ADDR)
869                panic("CFGR_T64ADDR is read only register!\n");
870
871            if (reg & CFGR_PCI64_DET)
872                panic("CFGR_PCI64_DET is read only register!\n");
873
874            if (reg & CFGR_DATA64_EN) ;
875            if (reg & CFGR_M64ADDR) ;
876            if (reg & CFGR_PHY_RST) ;
877            if (reg & CFGR_PHY_DIS) ;
878#endif
879
880            if (reg & CFGR_EXTSTS_EN)
881                extstsEnable = true;
882            else
883                extstsEnable = false;
884
885#if 0
886              if (reg & CFGR_REQALG) ;
887              if (reg & CFGR_SB) ;
888              if (reg & CFGR_POW) ;
889              if (reg & CFGR_EXD) ;
890              if (reg & CFGR_PESEL) ;
891              if (reg & CFGR_BROM_DIS) ;
892              if (reg & CFGR_EXT_125) ;
893              if (reg & CFGR_BEM) ;
894#endif
895            break;
896
897          case MEAR:
898            regs.mear = reg;
899            // since phy is completely faked, MEAR_MD* don't matter
900            // and since the driver never uses MEAR_EE*, they don't
901            // matter
902#if 0
903            if (reg & MEAR_EEDI) ;
904            if (reg & MEAR_EEDO) ; // this one is read only
905            if (reg & MEAR_EECLK) ;
906            if (reg & MEAR_EESEL) ;
907            if (reg & MEAR_MDIO) ;
908            if (reg & MEAR_MDDIR) ;
909            if (reg & MEAR_MDC) ;
910#endif
911            break;
912
913          case PTSCR:
914            regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
915            // these control BISTs for various parts of chip - we
916            // don't care or do just fake that the BIST is done
917            if (reg & PTSCR_RBIST_EN)
918                regs.ptscr |= PTSCR_RBIST_DONE;
919            if (reg & PTSCR_EEBIST_EN)
920                regs.ptscr &= ~PTSCR_EEBIST_EN;
921            if (reg & PTSCR_EELOAD_EN)
922                regs.ptscr &= ~PTSCR_EELOAD_EN;
923            break;
924
925          case ISR: /* writing to the ISR has no effect */
926            panic("ISR is a read only register!\n");
927
928          case IMR:
929            regs.imr = reg;
930            devIntrChangeMask();
931            break;
932
933          case IER:
934            regs.ier = reg;
935            break;
936
937          case IHR:
938            regs.ihr = reg;
939            /* not going to implement real interrupt holdoff */
940            break;
941
942          case TXDP:
943            regs.txdp = (reg & 0xFFFFFFFC);
944            assert(txState == txIdle);
945            CTDD = false;
946            break;
947
948          case TXDP_HI:
949            regs.txdp_hi = reg;
950            break;
951
952          case TX_CFG:
953            regs.txcfg = reg;
954#if 0
955            if (reg & TX_CFG_CSI) ;
956            if (reg & TX_CFG_HBI) ;
957            if (reg & TX_CFG_MLB) ;
958            if (reg & TX_CFG_ATP) ;
959            if (reg & TX_CFG_ECRETRY) {
960                /*
961                 * this could easily be implemented, but considering
962                 * the network is just a fake pipe, wouldn't make
963                 * sense to do this
964                 */
965            }
966
967            if (reg & TX_CFG_BRST_DIS) ;
968#endif
969
970#if 0
971            /* we handle our own DMA, ignore the kernel's exhortations */
972            if (reg & TX_CFG_MXDMA) ;
973#endif
974
975            // also, we currently don't care about fill/drain
976            // thresholds though this may change in the future with
977            // more realistic networks or a driver which changes it
978            // according to feedback
979
980            break;
981
982          case GPIOR:
983            regs.gpior = reg;
984            /* these just control general purpose i/o pins, don't matter */
985            break;
986
987          case RXDP:
988            regs.rxdp = reg;
989            CRDD = false;
990            break;
991
992          case RXDP_HI:
993            regs.rxdp_hi = reg;
994            break;
995
996          case RX_CFG:
997            regs.rxcfg = reg;
998#if 0
999            if (reg & RX_CFG_AEP) ;
1000            if (reg & RX_CFG_ARP) ;
1001            if (reg & RX_CFG_STRIPCRC) ;
1002            if (reg & RX_CFG_RX_RD) ;
1003            if (reg & RX_CFG_ALP) ;
1004            if (reg & RX_CFG_AIRL) ;
1005
1006            /* we handle our own DMA, ignore what kernel says about it */
1007            if (reg & RX_CFG_MXDMA) ;
1008
1009            //also, we currently don't care about fill/drain thresholds
1010            //though this may change in the future with more realistic
1011            //networks or a driver which changes it according to feedback
1012            if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
1013#endif
1014            break;
1015
1016          case PQCR:
1017            /* there is no priority queueing used in the linux 2.6 driver */
1018            regs.pqcr = reg;
1019            break;
1020
1021          case WCSR:
1022            /* not going to implement wake on LAN */
1023            regs.wcsr = reg;
1024            break;
1025
1026          case PCR:
1027            /* not going to implement pause control */
1028            regs.pcr = reg;
1029            break;
1030
1031          case RFCR:
1032            regs.rfcr = reg;
1033
1034            rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1035            acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1036            acceptMulticast = (reg & RFCR_AAM) ? true : false;
1037            acceptUnicast = (reg & RFCR_AAU) ? true : false;
1038            acceptPerfect = (reg & RFCR_APM) ? true : false;
1039            acceptArp = (reg & RFCR_AARP) ? true : false;
1040
1041#if 0
1042            if (reg & RFCR_APAT)
1043                panic("RFCR_APAT not implemented!\n");
1044#endif
1045
1046            if (reg & RFCR_MHEN || reg & RFCR_UHEN)
1047                panic("hash filtering not implemented!\n");
1048
1049            if (reg & RFCR_ULM)
1050                panic("RFCR_ULM not implemented!\n");
1051
1052            break;
1053
1054          case RFDR:
1055            panic("the driver never writes to RFDR, something is wrong!\n");
1056
1057          case BRAR:
1058            panic("the driver never uses BRAR, something is wrong!\n");
1059
1060          case BRDR:
1061            panic("the driver never uses BRDR, something is wrong!\n");
1062
1063          case SRR:
1064            panic("SRR is read only register!\n");
1065
1066          case MIBC:
1067            panic("the driver never uses MIBC, something is wrong!\n");
1068
1069          case VRCR:
1070            regs.vrcr = reg;
1071            break;
1072
1073          case VTCR:
1074            regs.vtcr = reg;
1075            break;
1076
1077          case VDR:
1078            panic("the driver never uses VDR, something is wrong!\n");
1079            break;
1080
1081          case CCSR:
1082            /* not going to implement clockrun stuff */
1083            regs.ccsr = reg;
1084            break;
1085
1086          case TBICR:
1087            regs.tbicr = reg;
1088            if (reg & TBICR_MR_LOOPBACK)
1089                panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1090
1091            if (reg & TBICR_MR_AN_ENABLE) {
1092                regs.tanlpar = regs.tanar;
1093                regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1094            }
1095
1096#if 0
1097            if (reg & TBICR_MR_RESTART_AN) ;
1098#endif
1099
1100            break;
1101
1102          case TBISR:
1103            panic("TBISR is read only register!\n");
1104
1105          case TANAR:
1106            regs.tanar = reg;
1107            if (reg & TANAR_PS2)
1108                panic("this isn't used in driver, something wrong!\n");
1109
1110            if (reg & TANAR_PS1)
1111                panic("this isn't used in driver, something wrong!\n");
1112            break;
1113
1114          case TANLPAR:
1115            panic("this should only be written to by the fake phy!\n");
1116
1117          case TANER:
1118            panic("TANER is read only register!\n");
1119
1120          case TESR:
1121            regs.tesr = reg;
1122            break;
1123
1124          default:
1125            panic("invalid register access daddr=%#x", daddr);
1126        }
1127    } else {
1128        panic("Invalid Request Size");
1129    }
1130
1131    return No_Fault;
1132}
1133
1134void
1135NSGigE::devIntrPost(uint32_t interrupts)
1136{
1137    if (interrupts & ISR_RESERVE)
1138        panic("Cannot set a reserved interrupt");
1139
1140    if (interrupts & ISR_NOIMPL)
1141        warn("interrupt not implemented %#x\n", interrupts);
1142
1143    interrupts &= ~ISR_NOIMPL;
1144    regs.isr |= interrupts;
1145
1146    if (interrupts & regs.imr) {
1147        if (interrupts & ISR_SWI) {
1148            totalSwi++;
1149        }
1150        if (interrupts & ISR_RXIDLE) {
1151            totalRxIdle++;
1152        }
1153        if (interrupts & ISR_RXOK) {
1154            totalRxOk++;
1155        }
1156        if (interrupts & ISR_RXDESC) {
1157            totalRxDesc++;
1158        }
1159        if (interrupts & ISR_TXOK) {
1160            totalTxOk++;
1161        }
1162        if (interrupts & ISR_TXIDLE) {
1163            totalTxIdle++;
1164        }
1165        if (interrupts & ISR_TXDESC) {
1166            totalTxDesc++;
1167        }
1168        if (interrupts & ISR_RXORN) {
1169            totalRxOrn++;
1170        }
1171    }
1172
1173    DPRINTF(EthernetIntr,
1174            "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1175            interrupts, regs.isr, regs.imr);
1176
1177    if ((regs.isr & regs.imr)) {
1178        Tick when = curTick;
1179        if (!(regs.isr & regs.imr & ISR_NODELAY))
1180            when += intrDelay;
1181        cpuIntrPost(when);
1182    }
1183}
1184
1185/* writing this interrupt counting stats inside this means that this function
1186   is now limited to being used to clear all interrupts upon the kernel
1187   reading isr and servicing.  just telling you in case you were thinking
1188   of expanding use.
1189*/
1190void
1191NSGigE::devIntrClear(uint32_t interrupts)
1192{
1193    if (interrupts & ISR_RESERVE)
1194        panic("Cannot clear a reserved interrupt");
1195
1196    if (regs.isr & regs.imr & ISR_SWI) {
1197        postedSwi++;
1198    }
1199    if (regs.isr & regs.imr & ISR_RXIDLE) {
1200        postedRxIdle++;
1201    }
1202    if (regs.isr & regs.imr & ISR_RXOK) {
1203        postedRxOk++;
1204    }
1205    if (regs.isr & regs.imr & ISR_RXDESC) {
1206            postedRxDesc++;
1207    }
1208    if (regs.isr & regs.imr & ISR_TXOK) {
1209        postedTxOk++;
1210    }
1211    if (regs.isr & regs.imr & ISR_TXIDLE) {
1212        postedTxIdle++;
1213    }
1214    if (regs.isr & regs.imr & ISR_TXDESC) {
1215        postedTxDesc++;
1216    }
1217    if (regs.isr & regs.imr & ISR_RXORN) {
1218        postedRxOrn++;
1219    }
1220
1221    if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC |
1222                               ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) )
1223        postedInterrupts++;
1224
1225    interrupts &= ~ISR_NOIMPL;
1226    regs.isr &= ~interrupts;
1227
1228    DPRINTF(EthernetIntr,
1229            "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1230            interrupts, regs.isr, regs.imr);
1231
1232    if (!(regs.isr & regs.imr))
1233        cpuIntrClear();
1234}
1235
1236void
1237NSGigE::devIntrChangeMask()
1238{
1239    DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1240            regs.isr, regs.imr, regs.isr & regs.imr);
1241
1242    if (regs.isr & regs.imr)
1243        cpuIntrPost(curTick);
1244    else
1245        cpuIntrClear();
1246}
1247
1248void
1249NSGigE::cpuIntrPost(Tick when)
1250{
1251    // If the interrupt you want to post is later than an interrupt
1252    // already scheduled, just let it post in the coming one and don't
1253    // schedule another.
1254    // HOWEVER, must be sure that the scheduled intrTick is in the
1255    // future (this was formerly the source of a bug)
1256    /**
1257     * @todo this warning should be removed and the intrTick code should
1258     * be fixed.
1259     */
1260    assert(when >= curTick);
1261    assert(intrTick >= curTick || intrTick == 0);
1262    if (when > intrTick && intrTick != 0) {
1263        DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1264                intrTick);
1265        return;
1266    }
1267
1268    intrTick = when;
1269    if (intrTick < curTick) {
1270        debug_break();
1271        intrTick = curTick;
1272    }
1273
1274    DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1275            intrTick);
1276
1277    if (intrEvent)
1278        intrEvent->squash();
1279    intrEvent = new IntrEvent(this, true);
1280    intrEvent->schedule(intrTick);
1281}
1282
1283void
1284NSGigE::cpuInterrupt()
1285{
1286    assert(intrTick == curTick);
1287
1288    // Whether or not there's a pending interrupt, we don't care about
1289    // it anymore
1290    intrEvent = 0;
1291    intrTick = 0;
1292
1293    // Don't send an interrupt if there's already one
1294    if (cpuPendingIntr) {
1295        DPRINTF(EthernetIntr,
1296                "would send an interrupt now, but there's already pending\n");
1297    } else {
1298        // Send interrupt
1299        cpuPendingIntr = true;
1300
1301        DPRINTF(EthernetIntr, "posting interrupt\n");
1302        intrPost();
1303    }
1304}
1305
1306void
1307NSGigE::cpuIntrClear()
1308{
1309    if (!cpuPendingIntr)
1310        return;
1311
1312    if (intrEvent) {
1313        intrEvent->squash();
1314        intrEvent = 0;
1315    }
1316
1317    intrTick = 0;
1318
1319    cpuPendingIntr = false;
1320
1321    DPRINTF(EthernetIntr, "clearing interrupt\n");
1322    intrClear();
1323}
1324
1325bool
1326NSGigE::cpuIntrPending() const
1327{ return cpuPendingIntr; }
1328
1329void
1330NSGigE::txReset()
1331{
1332
1333    DPRINTF(Ethernet, "transmit reset\n");
1334
1335    CTDD = false;
1336    txEnable = false;;
1337    txFragPtr = 0;
1338    assert(txDescCnt == 0);
1339    txFifo.clear();
1340    txState = txIdle;
1341    assert(txDmaState == dmaIdle);
1342}
1343
1344void
1345NSGigE::rxReset()
1346{
1347    DPRINTF(Ethernet, "receive reset\n");
1348
1349    CRDD = false;
1350    assert(rxPktBytes == 0);
1351    rxEnable = false;
1352    rxFragPtr = 0;
1353    assert(rxDescCnt == 0);
1354    assert(rxDmaState == dmaIdle);
1355    rxFifo.clear();
1356    rxState = rxIdle;
1357}
1358
1359void
1360NSGigE::regsReset()
1361{
1362    memset(&regs, 0, sizeof(regs));
1363    regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1364    regs.mear = 0x22;
1365    regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1366                        // fill threshold to 32 bytes
1367    regs.rxcfg = 0x4;   // set drain threshold to 16 bytes
1368    regs.srr = 0x0103;  // set the silicon revision to rev B or 0x103
1369    regs.mibc = MIBC_FRZ;
1370    regs.vdr = 0x81;    // set the vlan tag type to 802.1q
1371    regs.tesr = 0xc000; // TBI capable of both full and half duplex
1372
1373    extstsEnable = false;
1374    acceptBroadcast = false;
1375    acceptMulticast = false;
1376    acceptUnicast = false;
1377    acceptPerfect = false;
1378    acceptArp = false;
1379}
1380
1381void
1382NSGigE::rxDmaReadCopy()
1383{
1384    assert(rxDmaState == dmaReading);
1385
1386    physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1387    rxDmaState = dmaIdle;
1388
1389    DPRINTF(EthernetDMA, "rx dma read  paddr=%#x len=%d\n",
1390            rxDmaAddr, rxDmaLen);
1391    DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1392}
1393
1394bool
1395NSGigE::doRxDmaRead()
1396{
1397    assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1398    rxDmaState = dmaReading;
1399
1400    if (dmaInterface && !rxDmaFree) {
1401        if (dmaInterface->busy())
1402            rxDmaState = dmaReadWaiting;
1403        else
1404            dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1405                                &rxDmaReadEvent, true);
1406        return true;
1407    }
1408
1409    if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1410        rxDmaReadCopy();
1411        return false;
1412    }
1413
1414    Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1415    Tick start = curTick + dmaReadDelay + factor;
1416    rxDmaReadEvent.schedule(start);
1417    return true;
1418}
1419
1420void
1421NSGigE::rxDmaReadDone()
1422{
1423    assert(rxDmaState == dmaReading);
1424    rxDmaReadCopy();
1425
1426    // If the transmit state machine has a pending DMA, let it go first
1427    if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1428        txKick();
1429
1430    rxKick();
1431}
1432
1433void
1434NSGigE::rxDmaWriteCopy()
1435{
1436    assert(rxDmaState == dmaWriting);
1437
1438    physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1439    rxDmaState = dmaIdle;
1440
1441    DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1442            rxDmaAddr, rxDmaLen);
1443    DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1444}
1445
1446bool
1447NSGigE::doRxDmaWrite()
1448{
1449    assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1450    rxDmaState = dmaWriting;
1451
1452    if (dmaInterface && !rxDmaFree) {
1453        if (dmaInterface->busy())
1454            rxDmaState = dmaWriteWaiting;
1455        else
1456            dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1457                                &rxDmaWriteEvent, true);
1458        return true;
1459    }
1460
1461    if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1462        rxDmaWriteCopy();
1463        return false;
1464    }
1465
1466    Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1467    Tick start = curTick + dmaWriteDelay + factor;
1468    rxDmaWriteEvent.schedule(start);
1469    return true;
1470}
1471
1472void
1473NSGigE::rxDmaWriteDone()
1474{
1475    assert(rxDmaState == dmaWriting);
1476    rxDmaWriteCopy();
1477
1478    // If the transmit state machine has a pending DMA, let it go first
1479    if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1480        txKick();
1481
1482    rxKick();
1483}
1484
1485void
1486NSGigE::rxKick()
1487{
1488    DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n",
1489            NsRxStateStrings[rxState], rxFifo.size());
1490
1491  next:
1492    if (clock) {
1493        if (rxKickTick > curTick) {
1494            DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1495                    rxKickTick);
1496
1497            goto exit;
1498        }
1499
1500        // Go to the next state machine clock tick.
1501        rxKickTick = curTick + cycles(1);
1502    }
1503
1504    switch(rxDmaState) {
1505      case dmaReadWaiting:
1506        if (doRxDmaRead())
1507            goto exit;
1508        break;
1509      case dmaWriteWaiting:
1510        if (doRxDmaWrite())
1511            goto exit;
1512        break;
1513      default:
1514        break;
1515    }
1516
1517    // see state machine from spec for details
1518    // the way this works is, if you finish work on one state and can
1519    // go directly to another, you do that through jumping to the
1520    // label "next".  however, if you have intermediate work, like DMA
1521    // so that you can't go to the next state yet, you go to exit and
1522    // exit the loop.  however, when the DMA is done it will trigger
1523    // an event and come back to this loop.
1524    switch (rxState) {
1525      case rxIdle:
1526        if (!rxEnable) {
1527            DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1528            goto exit;
1529        }
1530
1531        if (CRDD) {
1532            rxState = rxDescRefr;
1533
1534            rxDmaAddr = regs.rxdp & 0x3fffffff;
1535            rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1536            rxDmaLen = sizeof(rxDescCache.link);
1537            rxDmaFree = dmaDescFree;
1538
1539            descDmaReads++;
1540            descDmaRdBytes += rxDmaLen;
1541
1542            if (doRxDmaRead())
1543                goto exit;
1544        } else {
1545            rxState = rxDescRead;
1546
1547            rxDmaAddr = regs.rxdp & 0x3fffffff;
1548            rxDmaData = &rxDescCache;
1549            rxDmaLen = sizeof(ns_desc);
1550            rxDmaFree = dmaDescFree;
1551
1552            descDmaReads++;
1553            descDmaRdBytes += rxDmaLen;
1554
1555            if (doRxDmaRead())
1556                goto exit;
1557        }
1558        break;
1559
1560      case rxDescRefr:
1561        if (rxDmaState != dmaIdle)
1562            goto exit;
1563
1564        rxState = rxAdvance;
1565        break;
1566
1567     case rxDescRead:
1568        if (rxDmaState != dmaIdle)
1569            goto exit;
1570
1571        DPRINTF(EthernetDesc, "rxDescCache: addr=%08x read descriptor\n",
1572                regs.rxdp & 0x3fffffff);
1573        DPRINTF(EthernetDesc,
1574                "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1575                rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1576                rxDescCache.extsts);
1577
1578        if (rxDescCache.cmdsts & CMDSTS_OWN) {
1579            devIntrPost(ISR_RXIDLE);
1580            rxState = rxIdle;
1581            goto exit;
1582        } else {
1583            rxState = rxFifoBlock;
1584            rxFragPtr = rxDescCache.bufptr;
1585            rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1586        }
1587        break;
1588
1589      case rxFifoBlock:
1590        if (!rxPacket) {
1591            /**
1592             * @todo in reality, we should be able to start processing
1593             * the packet as it arrives, and not have to wait for the
1594             * full packet ot be in the receive fifo.
1595             */
1596            if (rxFifo.empty())
1597                goto exit;
1598
1599            DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1600
1601            // If we don't have a packet, grab a new one from the fifo.
1602            rxPacket = rxFifo.front();
1603            rxPktBytes = rxPacket->length;
1604            rxPacketBufPtr = rxPacket->data;
1605
1606#if TRACING_ON
1607            if (DTRACE(Ethernet)) {
1608                IpPtr ip(rxPacket);
1609                if (ip) {
1610                    DPRINTF(Ethernet, "ID is %d\n", ip->id());
1611                    TcpPtr tcp(ip);
1612                    if (tcp) {
1613                        DPRINTF(Ethernet,
1614                                "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1615                                tcp->sport(), tcp->dport(), tcp->seq(),
1616                                tcp->ack());
1617                    }
1618                }
1619            }
1620#endif
1621
1622            // sanity check - i think the driver behaves like this
1623            assert(rxDescCnt >= rxPktBytes);
1624            rxFifo.pop();
1625        }
1626
1627
1628        // dont' need the && rxDescCnt > 0 if driver sanity check
1629        // above holds
1630        if (rxPktBytes > 0) {
1631            rxState = rxFragWrite;
1632            // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1633            // check holds
1634            rxXferLen = rxPktBytes;
1635
1636            rxDmaAddr = rxFragPtr & 0x3fffffff;
1637            rxDmaData = rxPacketBufPtr;
1638            rxDmaLen = rxXferLen;
1639            rxDmaFree = dmaDataFree;
1640
1641            if (doRxDmaWrite())
1642                goto exit;
1643
1644        } else {
1645            rxState = rxDescWrite;
1646
1647            //if (rxPktBytes == 0) {  /* packet is done */
1648            assert(rxPktBytes == 0);
1649            DPRINTF(EthernetSM, "done with receiving packet\n");
1650
1651            rxDescCache.cmdsts |= CMDSTS_OWN;
1652            rxDescCache.cmdsts &= ~CMDSTS_MORE;
1653            rxDescCache.cmdsts |= CMDSTS_OK;
1654            rxDescCache.cmdsts &= 0xffff0000;
1655            rxDescCache.cmdsts += rxPacket->length;   //i.e. set CMDSTS_SIZE
1656
1657#if 0
1658            /*
1659             * all the driver uses these are for its own stats keeping
1660             * which we don't care about, aren't necessary for
1661             * functionality and doing this would just slow us down.
1662             * if they end up using this in a later version for
1663             * functional purposes, just undef
1664             */
1665            if (rxFilterEnable) {
1666                rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1667                const EthAddr &dst = rxFifoFront()->dst();
1668                if (dst->unicast())
1669                    rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1670                if (dst->multicast())
1671                    rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1672                if (dst->broadcast())
1673                    rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1674            }
1675#endif
1676
1677            IpPtr ip(rxPacket);
1678            if (extstsEnable && ip) {
1679                rxDescCache.extsts |= EXTSTS_IPPKT;
1680                rxIpChecksums++;
1681                if (cksum(ip) != 0) {
1682                    DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1683                    rxDescCache.extsts |= EXTSTS_IPERR;
1684                }
1685                TcpPtr tcp(ip);
1686                UdpPtr udp(ip);
1687                if (tcp) {
1688                    rxDescCache.extsts |= EXTSTS_TCPPKT;
1689                    rxTcpChecksums++;
1690                    if (cksum(tcp) != 0) {
1691                        DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1692                        rxDescCache.extsts |= EXTSTS_TCPERR;
1693
1694                    }
1695                } else if (udp) {
1696                    rxDescCache.extsts |= EXTSTS_UDPPKT;
1697                    rxUdpChecksums++;
1698                    if (cksum(udp) != 0) {
1699                        DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1700                        rxDescCache.extsts |= EXTSTS_UDPERR;
1701                    }
1702                }
1703            }
1704            rxPacket = 0;
1705
1706            /*
1707             * the driver seems to always receive into desc buffers
1708             * of size 1514, so you never have a pkt that is split
1709             * into multiple descriptors on the receive side, so
1710             * i don't implement that case, hence the assert above.
1711             */
1712
1713            DPRINTF(EthernetDesc,
1714                    "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1715                    regs.rxdp & 0x3fffffff);
1716            DPRINTF(EthernetDesc,
1717                    "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1718                    rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1719                    rxDescCache.extsts);
1720
1721            rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1722            rxDmaData = &(rxDescCache.cmdsts);
1723            rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1724            rxDmaFree = dmaDescFree;
1725
1726            descDmaWrites++;
1727            descDmaWrBytes += rxDmaLen;
1728
1729            if (doRxDmaWrite())
1730                goto exit;
1731        }
1732        break;
1733
1734      case rxFragWrite:
1735        if (rxDmaState != dmaIdle)
1736            goto exit;
1737
1738        rxPacketBufPtr += rxXferLen;
1739        rxFragPtr += rxXferLen;
1740        rxPktBytes -= rxXferLen;
1741
1742        rxState = rxFifoBlock;
1743        break;
1744
1745      case rxDescWrite:
1746        if (rxDmaState != dmaIdle)
1747            goto exit;
1748
1749        assert(rxDescCache.cmdsts & CMDSTS_OWN);
1750
1751        assert(rxPacket == 0);
1752        devIntrPost(ISR_RXOK);
1753
1754        if (rxDescCache.cmdsts & CMDSTS_INTR)
1755            devIntrPost(ISR_RXDESC);
1756
1757        if (!rxEnable) {
1758            DPRINTF(EthernetSM, "Halting the RX state machine\n");
1759            rxState = rxIdle;
1760            goto exit;
1761        } else
1762            rxState = rxAdvance;
1763        break;
1764
1765      case rxAdvance:
1766        if (rxDescCache.link == 0) {
1767            devIntrPost(ISR_RXIDLE);
1768            rxState = rxIdle;
1769            CRDD = true;
1770            goto exit;
1771        } else {
1772            rxState = rxDescRead;
1773            regs.rxdp = rxDescCache.link;
1774            CRDD = false;
1775
1776            rxDmaAddr = regs.rxdp & 0x3fffffff;
1777            rxDmaData = &rxDescCache;
1778            rxDmaLen = sizeof(ns_desc);
1779            rxDmaFree = dmaDescFree;
1780
1781            if (doRxDmaRead())
1782                goto exit;
1783        }
1784        break;
1785
1786      default:
1787        panic("Invalid rxState!");
1788    }
1789
1790    DPRINTF(EthernetSM, "entering next rxState=%s\n",
1791            NsRxStateStrings[rxState]);
1792    goto next;
1793
1794  exit:
1795    /**
1796     * @todo do we want to schedule a future kick?
1797     */
1798    DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1799            NsRxStateStrings[rxState]);
1800
1801    if (clock && !rxKickEvent.scheduled())
1802        rxKickEvent.schedule(rxKickTick);
1803}
1804
1805void
1806NSGigE::transmit()
1807{
1808    if (txFifo.empty()) {
1809        DPRINTF(Ethernet, "nothing to transmit\n");
1810        return;
1811    }
1812
1813    DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1814            txFifo.size());
1815    if (interface->sendPacket(txFifo.front())) {
1816#if TRACING_ON
1817        if (DTRACE(Ethernet)) {
1818            IpPtr ip(txFifo.front());
1819            if (ip) {
1820                DPRINTF(Ethernet, "ID is %d\n", ip->id());
1821                TcpPtr tcp(ip);
1822                if (tcp) {
1823                    DPRINTF(Ethernet,
1824                            "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1825                            tcp->sport(), tcp->dport(), tcp->seq(), tcp->ack());
1826                }
1827            }
1828        }
1829#endif
1830
1831        DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1832        txBytes += txFifo.front()->length;
1833        txPackets++;
1834
1835        DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1836                txFifo.avail());
1837        txFifo.pop();
1838
1839        /*
1840         * normally do a writeback of the descriptor here, and ONLY
1841         * after that is done, send this interrupt.  but since our
1842         * stuff never actually fails, just do this interrupt here,
1843         * otherwise the code has to stray from this nice format.
1844         * besides, it's functionally the same.
1845         */
1846        devIntrPost(ISR_TXOK);
1847    }
1848
1849   if (!txFifo.empty() && !txEvent.scheduled()) {
1850       DPRINTF(Ethernet, "reschedule transmit\n");
1851       txEvent.schedule(curTick + retryTime);
1852   }
1853}
1854
1855void
1856NSGigE::txDmaReadCopy()
1857{
1858    assert(txDmaState == dmaReading);
1859
1860    physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1861    txDmaState = dmaIdle;
1862
1863    DPRINTF(EthernetDMA, "tx dma read  paddr=%#x len=%d\n",
1864            txDmaAddr, txDmaLen);
1865    DDUMP(EthernetDMA, txDmaData, txDmaLen);
1866}
1867
1868bool
1869NSGigE::doTxDmaRead()
1870{
1871    assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1872    txDmaState = dmaReading;
1873
1874    if (dmaInterface && !txDmaFree) {
1875        if (dmaInterface->busy())
1876            txDmaState = dmaReadWaiting;
1877        else
1878            dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1879                                &txDmaReadEvent, true);
1880        return true;
1881    }
1882
1883    if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1884        txDmaReadCopy();
1885        return false;
1886    }
1887
1888    Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1889    Tick start = curTick + dmaReadDelay + factor;
1890    txDmaReadEvent.schedule(start);
1891    return true;
1892}
1893
1894void
1895NSGigE::txDmaReadDone()
1896{
1897    assert(txDmaState == dmaReading);
1898    txDmaReadCopy();
1899
1900    // If the receive state machine  has a pending DMA, let it go first
1901    if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1902        rxKick();
1903
1904    txKick();
1905}
1906
1907void
1908NSGigE::txDmaWriteCopy()
1909{
1910    assert(txDmaState == dmaWriting);
1911
1912    physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1913    txDmaState = dmaIdle;
1914
1915    DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1916            txDmaAddr, txDmaLen);
1917    DDUMP(EthernetDMA, txDmaData, txDmaLen);
1918}
1919
1920bool
1921NSGigE::doTxDmaWrite()
1922{
1923    assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1924    txDmaState = dmaWriting;
1925
1926    if (dmaInterface && !txDmaFree) {
1927        if (dmaInterface->busy())
1928            txDmaState = dmaWriteWaiting;
1929        else
1930            dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1931                                &txDmaWriteEvent, true);
1932        return true;
1933    }
1934
1935    if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1936        txDmaWriteCopy();
1937        return false;
1938    }
1939
1940    Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1941    Tick start = curTick + dmaWriteDelay + factor;
1942    txDmaWriteEvent.schedule(start);
1943    return true;
1944}
1945
1946void
1947NSGigE::txDmaWriteDone()
1948{
1949    assert(txDmaState == dmaWriting);
1950    txDmaWriteCopy();
1951
1952    // If the receive state machine  has a pending DMA, let it go first
1953    if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1954        rxKick();
1955
1956    txKick();
1957}
1958
1959void
1960NSGigE::txKick()
1961{
1962    DPRINTF(EthernetSM, "transmit kick txState=%s\n",
1963            NsTxStateStrings[txState]);
1964
1965  next:
1966    if (clock) {
1967        if (txKickTick > curTick) {
1968            DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1969                    txKickTick);
1970            goto exit;
1971        }
1972
1973        // Go to the next state machine clock tick.
1974        txKickTick = curTick + cycles(1);
1975    }
1976
1977    switch(txDmaState) {
1978      case dmaReadWaiting:
1979        if (doTxDmaRead())
1980            goto exit;
1981        break;
1982      case dmaWriteWaiting:
1983        if (doTxDmaWrite())
1984            goto exit;
1985        break;
1986      default:
1987        break;
1988    }
1989
1990    switch (txState) {
1991      case txIdle:
1992        if (!txEnable) {
1993            DPRINTF(EthernetSM, "Transmit disabled.  Nothing to do.\n");
1994            goto exit;
1995        }
1996
1997        if (CTDD) {
1998            txState = txDescRefr;
1999
2000            txDmaAddr = regs.txdp & 0x3fffffff;
2001            txDmaData = &txDescCache + offsetof(ns_desc, link);
2002            txDmaLen = sizeof(txDescCache.link);
2003            txDmaFree = dmaDescFree;
2004
2005            descDmaReads++;
2006            descDmaRdBytes += txDmaLen;
2007
2008            if (doTxDmaRead())
2009                goto exit;
2010
2011        } else {
2012            txState = txDescRead;
2013
2014            txDmaAddr = regs.txdp & 0x3fffffff;
2015            txDmaData = &txDescCache;
2016            txDmaLen = sizeof(ns_desc);
2017            txDmaFree = dmaDescFree;
2018
2019            descDmaReads++;
2020            descDmaRdBytes += txDmaLen;
2021
2022            if (doTxDmaRead())
2023                goto exit;
2024        }
2025        break;
2026
2027      case txDescRefr:
2028        if (txDmaState != dmaIdle)
2029            goto exit;
2030
2031        txState = txAdvance;
2032        break;
2033
2034      case txDescRead:
2035        if (txDmaState != dmaIdle)
2036            goto exit;
2037
2038        DPRINTF(EthernetDesc, "txDescCache: addr=%08x read descriptor\n",
2039                regs.txdp & 0x3fffffff);
2040        DPRINTF(EthernetDesc,
2041                "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
2042                txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
2043                txDescCache.extsts);
2044
2045        if (txDescCache.cmdsts & CMDSTS_OWN) {
2046            txState = txFifoBlock;
2047            txFragPtr = txDescCache.bufptr;
2048            txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
2049        } else {
2050            devIntrPost(ISR_TXIDLE);
2051            txState = txIdle;
2052            goto exit;
2053        }
2054        break;
2055
2056      case txFifoBlock:
2057        if (!txPacket) {
2058            DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2059            txPacket = new PacketData(16384);
2060            txPacketBufPtr = txPacket->data;
2061        }
2062
2063        if (txDescCnt == 0) {
2064            DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2065            if (txDescCache.cmdsts & CMDSTS_MORE) {
2066                DPRINTF(EthernetSM, "there are more descriptors to come\n");
2067                txState = txDescWrite;
2068
2069                txDescCache.cmdsts &= ~CMDSTS_OWN;
2070
2071                txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2072                txDmaAddr &= 0x3fffffff;
2073                txDmaData = &(txDescCache.cmdsts);
2074                txDmaLen = sizeof(txDescCache.cmdsts);
2075                txDmaFree = dmaDescFree;
2076
2077                if (doTxDmaWrite())
2078                    goto exit;
2079
2080            } else { /* this packet is totally done */
2081                DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2082                /* deal with the the packet that just finished */
2083                if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2084                    IpPtr ip(txPacket);
2085                    if (txDescCache.extsts & EXTSTS_UDPPKT) {
2086                        UdpPtr udp(ip);
2087                        udp->sum(0);
2088                        udp->sum(cksum(udp));
2089                        txUdpChecksums++;
2090                    } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
2091                        TcpPtr tcp(ip);
2092                        tcp->sum(0);
2093                        tcp->sum(cksum(tcp));
2094                        txTcpChecksums++;
2095                    }
2096                    if (txDescCache.extsts & EXTSTS_IPPKT) {
2097                        ip->sum(0);
2098                        ip->sum(cksum(ip));
2099                        txIpChecksums++;
2100                    }
2101                }
2102
2103                txPacket->length = txPacketBufPtr - txPacket->data;
2104                // this is just because the receive can't handle a
2105                // packet bigger want to make sure
2106                assert(txPacket->length <= 1514);
2107#ifndef NDEBUG
2108                bool success =
2109#endif
2110                    txFifo.push(txPacket);
2111                assert(success);
2112
2113                /*
2114                 * this following section is not tqo spec, but
2115                 * functionally shouldn't be any different.  normally,
2116                 * the chip will wait til the transmit has occurred
2117                 * before writing back the descriptor because it has
2118                 * to wait to see that it was successfully transmitted
2119                 * to decide whether to set CMDSTS_OK or not.
2120                 * however, in the simulator since it is always
2121                 * successfully transmitted, and writing it exactly to
2122                 * spec would complicate the code, we just do it here
2123                 */
2124
2125                txDescCache.cmdsts &= ~CMDSTS_OWN;
2126                txDescCache.cmdsts |= CMDSTS_OK;
2127
2128                DPRINTF(EthernetDesc,
2129                        "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2130                        txDescCache.cmdsts, txDescCache.extsts);
2131
2132                txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2133                txDmaAddr &= 0x3fffffff;
2134                txDmaData = &(txDescCache.cmdsts);
2135                txDmaLen = sizeof(txDescCache.cmdsts) +
2136                    sizeof(txDescCache.extsts);
2137                txDmaFree = dmaDescFree;
2138
2139                descDmaWrites++;
2140                descDmaWrBytes += txDmaLen;
2141
2142                transmit();
2143                txPacket = 0;
2144
2145                if (!txEnable) {
2146                    DPRINTF(EthernetSM, "halting TX state machine\n");
2147                    txState = txIdle;
2148                    goto exit;
2149                } else
2150                    txState = txAdvance;
2151
2152                if (doTxDmaWrite())
2153                    goto exit;
2154            }
2155        } else {
2156            DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2157            if (!txFifo.full()) {
2158                txState = txFragRead;
2159
2160                /*
2161                 * The number of bytes transferred is either whatever
2162                 * is left in the descriptor (txDescCnt), or if there
2163                 * is not enough room in the fifo, just whatever room
2164                 * is left in the fifo
2165                 */
2166                txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2167
2168                txDmaAddr = txFragPtr & 0x3fffffff;
2169                txDmaData = txPacketBufPtr;
2170                txDmaLen = txXferLen;
2171                txDmaFree = dmaDataFree;
2172
2173                if (doTxDmaRead())
2174                    goto exit;
2175            } else {
2176                txState = txFifoBlock;
2177                transmit();
2178
2179                goto exit;
2180            }
2181
2182        }
2183        break;
2184
2185      case txFragRead:
2186        if (txDmaState != dmaIdle)
2187            goto exit;
2188
2189        txPacketBufPtr += txXferLen;
2190        txFragPtr += txXferLen;
2191        txDescCnt -= txXferLen;
2192        txFifo.reserve(txXferLen);
2193
2194        txState = txFifoBlock;
2195        break;
2196
2197      case txDescWrite:
2198        if (txDmaState != dmaIdle)
2199            goto exit;
2200
2201        if (txDescCache.cmdsts & CMDSTS_INTR)
2202            devIntrPost(ISR_TXDESC);
2203
2204        if (!txEnable) {
2205            DPRINTF(EthernetSM, "halting TX state machine\n");
2206            txState = txIdle;
2207            goto exit;
2208        } else
2209            txState = txAdvance;
2210        break;
2211
2212      case txAdvance:
2213        if (txDescCache.link == 0) {
2214            devIntrPost(ISR_TXIDLE);
2215            txState = txIdle;
2216            goto exit;
2217        } else {
2218            txState = txDescRead;
2219            regs.txdp = txDescCache.link;
2220            CTDD = false;
2221
2222            txDmaAddr = txDescCache.link & 0x3fffffff;
2223            txDmaData = &txDescCache;
2224            txDmaLen = sizeof(ns_desc);
2225            txDmaFree = dmaDescFree;
2226
2227            if (doTxDmaRead())
2228                goto exit;
2229        }
2230        break;
2231
2232      default:
2233        panic("invalid state");
2234    }
2235
2236    DPRINTF(EthernetSM, "entering next txState=%s\n",
2237            NsTxStateStrings[txState]);
2238    goto next;
2239
2240  exit:
2241    /**
2242     * @todo do we want to schedule a future kick?
2243     */
2244    DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2245            NsTxStateStrings[txState]);
2246
2247    if (clock && !txKickEvent.scheduled())
2248        txKickEvent.schedule(txKickTick);
2249}
2250
2251void
2252NSGigE::transferDone()
2253{
2254    if (txFifo.empty()) {
2255        DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2256        return;
2257    }
2258
2259    DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2260
2261    if (txEvent.scheduled())
2262        txEvent.reschedule(curTick + cycles(1));
2263    else
2264        txEvent.schedule(curTick + cycles(1));
2265}
2266
2267bool
2268NSGigE::rxFilter(const PacketPtr &packet)
2269{
2270    EthPtr eth = packet;
2271    bool drop = true;
2272    string type;
2273
2274    const EthAddr &dst = eth->dst();
2275    if (dst.unicast()) {
2276        // If we're accepting all unicast addresses
2277        if (acceptUnicast)
2278            drop = false;
2279
2280        // If we make a perfect match
2281        if (acceptPerfect && dst == rom.perfectMatch)
2282            drop = false;
2283
2284        if (acceptArp && eth->type() == ETH_TYPE_ARP)
2285            drop = false;
2286
2287    } else if (dst.broadcast()) {
2288        // if we're accepting broadcasts
2289        if (acceptBroadcast)
2290            drop = false;
2291
2292    } else if (dst.multicast()) {
2293        // if we're accepting all multicasts
2294        if (acceptMulticast)
2295            drop = false;
2296
2297    }
2298
2299    if (drop) {
2300        DPRINTF(Ethernet, "rxFilter drop\n");
2301        DDUMP(EthernetData, packet->data, packet->length);
2302    }
2303
2304    return drop;
2305}
2306
2307bool
2308NSGigE::recvPacket(PacketPtr packet)
2309{
2310    rxBytes += packet->length;
2311    rxPackets++;
2312
2313    DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2314            rxFifo.avail());
2315
2316    if (!rxEnable) {
2317        DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2318        interface->recvDone();
2319        return true;
2320    }
2321
2322    if (rxFilterEnable && rxFilter(packet)) {
2323        DPRINTF(Ethernet, "packet filtered...dropped\n");
2324        interface->recvDone();
2325        return true;
2326    }
2327
2328    if (rxFifo.avail() < packet->length) {
2329#if TRACING_ON
2330        IpPtr ip(packet);
2331        TcpPtr tcp(ip);
2332        if (ip) {
2333            DPRINTF(Ethernet,
2334                    "packet won't fit in receive buffer...pkt ID %d dropped\n",
2335                    ip->id());
2336            if (tcp) {
2337                DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2338            }
2339        }
2340#endif
2341        droppedPackets++;
2342        devIntrPost(ISR_RXORN);
2343        return false;
2344    }
2345
2346    rxFifo.push(packet);
2347    interface->recvDone();
2348
2349    rxKick();
2350    return true;
2351}
2352
2353//=====================================================================
2354//
2355//
2356void
2357NSGigE::serialize(ostream &os)
2358{
2359    // Serialize the PciDev base class
2360    PciDev::serialize(os);
2361
2362    /*
2363     * Finalize any DMA events now.
2364     */
2365    if (rxDmaReadEvent.scheduled())
2366        rxDmaReadCopy();
2367    if (rxDmaWriteEvent.scheduled())
2368        rxDmaWriteCopy();
2369    if (txDmaReadEvent.scheduled())
2370        txDmaReadCopy();
2371    if (txDmaWriteEvent.scheduled())
2372        txDmaWriteCopy();
2373
2374    /*
2375     * Serialize the device registers
2376     */
2377    SERIALIZE_SCALAR(regs.command);
2378    SERIALIZE_SCALAR(regs.config);
2379    SERIALIZE_SCALAR(regs.mear);
2380    SERIALIZE_SCALAR(regs.ptscr);
2381    SERIALIZE_SCALAR(regs.isr);
2382    SERIALIZE_SCALAR(regs.imr);
2383    SERIALIZE_SCALAR(regs.ier);
2384    SERIALIZE_SCALAR(regs.ihr);
2385    SERIALIZE_SCALAR(regs.txdp);
2386    SERIALIZE_SCALAR(regs.txdp_hi);
2387    SERIALIZE_SCALAR(regs.txcfg);
2388    SERIALIZE_SCALAR(regs.gpior);
2389    SERIALIZE_SCALAR(regs.rxdp);
2390    SERIALIZE_SCALAR(regs.rxdp_hi);
2391    SERIALIZE_SCALAR(regs.rxcfg);
2392    SERIALIZE_SCALAR(regs.pqcr);
2393    SERIALIZE_SCALAR(regs.wcsr);
2394    SERIALIZE_SCALAR(regs.pcr);
2395    SERIALIZE_SCALAR(regs.rfcr);
2396    SERIALIZE_SCALAR(regs.rfdr);
2397    SERIALIZE_SCALAR(regs.srr);
2398    SERIALIZE_SCALAR(regs.mibc);
2399    SERIALIZE_SCALAR(regs.vrcr);
2400    SERIALIZE_SCALAR(regs.vtcr);
2401    SERIALIZE_SCALAR(regs.vdr);
2402    SERIALIZE_SCALAR(regs.ccsr);
2403    SERIALIZE_SCALAR(regs.tbicr);
2404    SERIALIZE_SCALAR(regs.tbisr);
2405    SERIALIZE_SCALAR(regs.tanar);
2406    SERIALIZE_SCALAR(regs.tanlpar);
2407    SERIALIZE_SCALAR(regs.taner);
2408    SERIALIZE_SCALAR(regs.tesr);
2409
2410    SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2411
2412    SERIALIZE_SCALAR(ioEnable);
2413
2414    /*
2415     * Serialize the data Fifos
2416     */
2417    rxFifo.serialize("rxFifo", os);
2418    txFifo.serialize("txFifo", os);
2419
2420    /*
2421     * Serialize the various helper variables
2422     */
2423    bool txPacketExists = txPacket;
2424    SERIALIZE_SCALAR(txPacketExists);
2425    if (txPacketExists) {
2426        txPacket->length = txPacketBufPtr - txPacket->data;
2427        txPacket->serialize("txPacket", os);
2428        uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2429        SERIALIZE_SCALAR(txPktBufPtr);
2430    }
2431
2432    bool rxPacketExists = rxPacket;
2433    SERIALIZE_SCALAR(rxPacketExists);
2434    if (rxPacketExists) {
2435        rxPacket->serialize("rxPacket", os);
2436        uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2437        SERIALIZE_SCALAR(rxPktBufPtr);
2438    }
2439
2440    SERIALIZE_SCALAR(txXferLen);
2441    SERIALIZE_SCALAR(rxXferLen);
2442
2443    /*
2444     * Serialize DescCaches
2445     */
2446    SERIALIZE_SCALAR(txDescCache.link);
2447    SERIALIZE_SCALAR(txDescCache.bufptr);
2448    SERIALIZE_SCALAR(txDescCache.cmdsts);
2449    SERIALIZE_SCALAR(txDescCache.extsts);
2450    SERIALIZE_SCALAR(rxDescCache.link);
2451    SERIALIZE_SCALAR(rxDescCache.bufptr);
2452    SERIALIZE_SCALAR(rxDescCache.cmdsts);
2453    SERIALIZE_SCALAR(rxDescCache.extsts);
2454    SERIALIZE_SCALAR(extstsEnable);
2455
2456    /*
2457     * Serialize tx state machine
2458     */
2459    int txState = this->txState;
2460    SERIALIZE_SCALAR(txState);
2461    SERIALIZE_SCALAR(txEnable);
2462    SERIALIZE_SCALAR(CTDD);
2463    SERIALIZE_SCALAR(txFragPtr);
2464    SERIALIZE_SCALAR(txDescCnt);
2465    int txDmaState = this->txDmaState;
2466    SERIALIZE_SCALAR(txDmaState);
2467    SERIALIZE_SCALAR(txKickTick);
2468
2469    /*
2470     * Serialize rx state machine
2471     */
2472    int rxState = this->rxState;
2473    SERIALIZE_SCALAR(rxState);
2474    SERIALIZE_SCALAR(rxEnable);
2475    SERIALIZE_SCALAR(CRDD);
2476    SERIALIZE_SCALAR(rxPktBytes);
2477    SERIALIZE_SCALAR(rxFragPtr);
2478    SERIALIZE_SCALAR(rxDescCnt);
2479    int rxDmaState = this->rxDmaState;
2480    SERIALIZE_SCALAR(rxDmaState);
2481    SERIALIZE_SCALAR(rxKickTick);
2482
2483    /*
2484     * If there's a pending transmit, store the time so we can
2485     * reschedule it later
2486     */
2487    Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2488    SERIALIZE_SCALAR(transmitTick);
2489
2490    /*
2491     * receive address filter settings
2492     */
2493    SERIALIZE_SCALAR(rxFilterEnable);
2494    SERIALIZE_SCALAR(acceptBroadcast);
2495    SERIALIZE_SCALAR(acceptMulticast);
2496    SERIALIZE_SCALAR(acceptUnicast);
2497    SERIALIZE_SCALAR(acceptPerfect);
2498    SERIALIZE_SCALAR(acceptArp);
2499
2500    /*
2501     * Keep track of pending interrupt status.
2502     */
2503    SERIALIZE_SCALAR(intrTick);
2504    SERIALIZE_SCALAR(cpuPendingIntr);
2505    Tick intrEventTick = 0;
2506    if (intrEvent)
2507        intrEventTick = intrEvent->when();
2508    SERIALIZE_SCALAR(intrEventTick);
2509
2510}
2511
2512void
2513NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2514{
2515    // Unserialize the PciDev base class
2516    PciDev::unserialize(cp, section);
2517
2518    UNSERIALIZE_SCALAR(regs.command);
2519    UNSERIALIZE_SCALAR(regs.config);
2520    UNSERIALIZE_SCALAR(regs.mear);
2521    UNSERIALIZE_SCALAR(regs.ptscr);
2522    UNSERIALIZE_SCALAR(regs.isr);
2523    UNSERIALIZE_SCALAR(regs.imr);
2524    UNSERIALIZE_SCALAR(regs.ier);
2525    UNSERIALIZE_SCALAR(regs.ihr);
2526    UNSERIALIZE_SCALAR(regs.txdp);
2527    UNSERIALIZE_SCALAR(regs.txdp_hi);
2528    UNSERIALIZE_SCALAR(regs.txcfg);
2529    UNSERIALIZE_SCALAR(regs.gpior);
2530    UNSERIALIZE_SCALAR(regs.rxdp);
2531    UNSERIALIZE_SCALAR(regs.rxdp_hi);
2532    UNSERIALIZE_SCALAR(regs.rxcfg);
2533    UNSERIALIZE_SCALAR(regs.pqcr);
2534    UNSERIALIZE_SCALAR(regs.wcsr);
2535    UNSERIALIZE_SCALAR(regs.pcr);
2536    UNSERIALIZE_SCALAR(regs.rfcr);
2537    UNSERIALIZE_SCALAR(regs.rfdr);
2538    UNSERIALIZE_SCALAR(regs.srr);
2539    UNSERIALIZE_SCALAR(regs.mibc);
2540    UNSERIALIZE_SCALAR(regs.vrcr);
2541    UNSERIALIZE_SCALAR(regs.vtcr);
2542    UNSERIALIZE_SCALAR(regs.vdr);
2543    UNSERIALIZE_SCALAR(regs.ccsr);
2544    UNSERIALIZE_SCALAR(regs.tbicr);
2545    UNSERIALIZE_SCALAR(regs.tbisr);
2546    UNSERIALIZE_SCALAR(regs.tanar);
2547    UNSERIALIZE_SCALAR(regs.tanlpar);
2548    UNSERIALIZE_SCALAR(regs.taner);
2549    UNSERIALIZE_SCALAR(regs.tesr);
2550
2551    UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2552
2553    UNSERIALIZE_SCALAR(ioEnable);
2554
2555    /*
2556     * unserialize the data fifos
2557     */
2558    rxFifo.unserialize("rxFifo", cp, section);
2559    txFifo.unserialize("txFifo", cp, section);
2560
2561    /*
2562     * unserialize the various helper variables
2563     */
2564    bool txPacketExists;
2565    UNSERIALIZE_SCALAR(txPacketExists);
2566    if (txPacketExists) {
2567        txPacket = new PacketData(16384);
2568        txPacket->unserialize("txPacket", cp, section);
2569        uint32_t txPktBufPtr;
2570        UNSERIALIZE_SCALAR(txPktBufPtr);
2571        txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2572    } else
2573        txPacket = 0;
2574
2575    bool rxPacketExists;
2576    UNSERIALIZE_SCALAR(rxPacketExists);
2577    rxPacket = 0;
2578    if (rxPacketExists) {
2579        rxPacket = new PacketData(16384);
2580        rxPacket->unserialize("rxPacket", cp, section);
2581        uint32_t rxPktBufPtr;
2582        UNSERIALIZE_SCALAR(rxPktBufPtr);
2583        rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2584    } else
2585        rxPacket = 0;
2586
2587    UNSERIALIZE_SCALAR(txXferLen);
2588    UNSERIALIZE_SCALAR(rxXferLen);
2589
2590    /*
2591     * Unserialize DescCaches
2592     */
2593    UNSERIALIZE_SCALAR(txDescCache.link);
2594    UNSERIALIZE_SCALAR(txDescCache.bufptr);
2595    UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2596    UNSERIALIZE_SCALAR(txDescCache.extsts);
2597    UNSERIALIZE_SCALAR(rxDescCache.link);
2598    UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2599    UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2600    UNSERIALIZE_SCALAR(rxDescCache.extsts);
2601    UNSERIALIZE_SCALAR(extstsEnable);
2602
2603    /*
2604     * unserialize tx state machine
2605     */
2606    int txState;
2607    UNSERIALIZE_SCALAR(txState);
2608    this->txState = (TxState) txState;
2609    UNSERIALIZE_SCALAR(txEnable);
2610    UNSERIALIZE_SCALAR(CTDD);
2611    UNSERIALIZE_SCALAR(txFragPtr);
2612    UNSERIALIZE_SCALAR(txDescCnt);
2613    int txDmaState;
2614    UNSERIALIZE_SCALAR(txDmaState);
2615    this->txDmaState = (DmaState) txDmaState;
2616    UNSERIALIZE_SCALAR(txKickTick);
2617    if (txKickTick)
2618        txKickEvent.schedule(txKickTick);
2619
2620    /*
2621     * unserialize rx state machine
2622     */
2623    int rxState;
2624    UNSERIALIZE_SCALAR(rxState);
2625    this->rxState = (RxState) rxState;
2626    UNSERIALIZE_SCALAR(rxEnable);
2627    UNSERIALIZE_SCALAR(CRDD);
2628    UNSERIALIZE_SCALAR(rxPktBytes);
2629    UNSERIALIZE_SCALAR(rxFragPtr);
2630    UNSERIALIZE_SCALAR(rxDescCnt);
2631    int rxDmaState;
2632    UNSERIALIZE_SCALAR(rxDmaState);
2633    this->rxDmaState = (DmaState) rxDmaState;
2634    UNSERIALIZE_SCALAR(rxKickTick);
2635    if (rxKickTick)
2636        rxKickEvent.schedule(rxKickTick);
2637
2638     /*
2639     * If there's a pending transmit, reschedule it now
2640     */
2641    Tick transmitTick;
2642    UNSERIALIZE_SCALAR(transmitTick);
2643    if (transmitTick)
2644        txEvent.schedule(curTick + transmitTick);
2645
2646    /*
2647     * unserialize receive address filter settings
2648     */
2649    UNSERIALIZE_SCALAR(rxFilterEnable);
2650    UNSERIALIZE_SCALAR(acceptBroadcast);
2651    UNSERIALIZE_SCALAR(acceptMulticast);
2652    UNSERIALIZE_SCALAR(acceptUnicast);
2653    UNSERIALIZE_SCALAR(acceptPerfect);
2654    UNSERIALIZE_SCALAR(acceptArp);
2655
2656    /*
2657     * Keep track of pending interrupt status.
2658     */
2659    UNSERIALIZE_SCALAR(intrTick);
2660    UNSERIALIZE_SCALAR(cpuPendingIntr);
2661    Tick intrEventTick;
2662    UNSERIALIZE_SCALAR(intrEventTick);
2663    if (intrEventTick) {
2664        intrEvent = new IntrEvent(this, true);
2665        intrEvent->schedule(intrEventTick);
2666    }
2667
2668    /*
2669     * re-add addrRanges to bus bridges
2670     */
2671    if (pioInterface) {
2672        pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2673        pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2674    }
2675}
2676
2677Tick
2678NSGigE::cacheAccess(MemReqPtr &req)
2679{
2680    DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2681            req->paddr, req->paddr - addr);
2682    return curTick + pioLatency;
2683}
2684
2685BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2686
2687    SimObjectParam<EtherInt *> peer;
2688    SimObjectParam<NSGigE *> device;
2689
2690END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2691
2692BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2693
2694    INIT_PARAM_DFLT(peer, "peer interface", NULL),
2695    INIT_PARAM(device, "Ethernet device of this interface")
2696
2697END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2698
2699CREATE_SIM_OBJECT(NSGigEInt)
2700{
2701    NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2702
2703    EtherInt *p = (EtherInt *)peer;
2704    if (p) {
2705        dev_int->setPeer(p);
2706        p->setPeer(dev_int);
2707    }
2708
2709    return dev_int;
2710}
2711
2712REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2713
2714
2715BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2716
2717    Param<Addr> addr;
2718    Param<Tick> clock;
2719    Param<Tick> tx_delay;
2720    Param<Tick> rx_delay;
2721    Param<Tick> intr_delay;
2722    SimObjectParam<MemoryController *> mmu;
2723    SimObjectParam<PhysicalMemory *> physmem;
2724    Param<bool> rx_filter;
2725    Param<string> hardware_address;
2726    SimObjectParam<Bus*> io_bus;
2727    SimObjectParam<Bus*> payload_bus;
2728    SimObjectParam<HierParams *> hier;
2729    Param<Tick> pio_latency;
2730    Param<bool> dma_desc_free;
2731    Param<bool> dma_data_free;
2732    Param<Tick> dma_read_delay;
2733    Param<Tick> dma_write_delay;
2734    Param<Tick> dma_read_factor;
2735    Param<Tick> dma_write_factor;
2736    SimObjectParam<PciConfigAll *> configspace;
2737    SimObjectParam<PciConfigData *> configdata;
2738    SimObjectParam<Platform *> platform;
2739    Param<uint32_t> pci_bus;
2740    Param<uint32_t> pci_dev;
2741    Param<uint32_t> pci_func;
2742    Param<uint32_t> tx_fifo_size;
2743    Param<uint32_t> rx_fifo_size;
2744    Param<uint32_t> m5reg;
2745    Param<bool> dma_no_allocate;
2746
2747END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2748
2749BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2750
2751    INIT_PARAM(addr, "Device Address"),
2752    INIT_PARAM(clock, "State machine processor frequency"),
2753    INIT_PARAM(tx_delay, "Transmit Delay"),
2754    INIT_PARAM(rx_delay, "Receive Delay"),
2755    INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
2756    INIT_PARAM(mmu, "Memory Controller"),
2757    INIT_PARAM(physmem, "Physical Memory"),
2758    INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2759    INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2760                    "00:99:00:00:00:01"),
2761    INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL),
2762    INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2763    INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2764    INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2765    INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2766    INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2767    INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2768    INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2769    INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2770    INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2771    INIT_PARAM(configspace, "PCI Configspace"),
2772    INIT_PARAM(configdata, "PCI Config data"),
2773    INIT_PARAM(platform, "Platform"),
2774    INIT_PARAM(pci_bus, "PCI bus"),
2775    INIT_PARAM(pci_dev, "PCI device number"),
2776    INIT_PARAM(pci_func, "PCI function code"),
2777    INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2778    INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072),
2779    INIT_PARAM(m5reg, "m5 register"),
2780    INIT_PARAM_DFLT(dma_no_allocate, "Should DMA reads allocate cache lines", true)
2781
2782END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2783
2784
2785CREATE_SIM_OBJECT(NSGigE)
2786{
2787    NSGigE::Params *params = new NSGigE::Params;
2788
2789    params->name = getInstanceName();
2790    params->mmu = mmu;
2791    params->configSpace = configspace;
2792    params->configData = configdata;
2793    params->plat = platform;
2794    params->busNum = pci_bus;
2795    params->deviceNum = pci_dev;
2796    params->functionNum = pci_func;
2797
2798    params->clock = clock;
2799    params->intr_delay = intr_delay;
2800    params->pmem = physmem;
2801    params->tx_delay = tx_delay;
2802    params->rx_delay = rx_delay;
2803    params->hier = hier;
2804    params->header_bus = io_bus;
2805    params->payload_bus = payload_bus;
2806    params->pio_latency = pio_latency;
2807    params->dma_desc_free = dma_desc_free;
2808    params->dma_data_free = dma_data_free;
2809    params->dma_read_delay = dma_read_delay;
2810    params->dma_write_delay = dma_write_delay;
2811    params->dma_read_factor = dma_read_factor;
2812    params->dma_write_factor = dma_write_factor;
2813    params->rx_filter = rx_filter;
2814    params->eaddr = hardware_address;
2815    params->tx_fifo_size = tx_fifo_size;
2816    params->rx_fifo_size = rx_fifo_size;
2817    params->m5reg = m5reg;
2818    params->dma_no_allocate = dma_no_allocate;
2819    return new NSGigE(params);
2820}
2821
2822REGISTER_SIM_OBJECT("NSGigE", NSGigE)
2823