ns_gige.cc (12087:0e082672ac6b) ns_gige.cc (12561:7227dc3a0715)
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32/** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36
37#include "dev/net/ns_gige.hh"
38
39#include <deque>
40#include <memory>
41#include <string>
42
43#include "base/debug.hh"
44#include "base/inet.hh"
45#include "base/types.hh"
46#include "config/the_isa.hh"
47#include "debug/EthernetAll.hh"
48#include "dev/net/etherlink.hh"
49#include "mem/packet.hh"
50#include "mem/packet_access.hh"
51#include "params/NSGigE.hh"
52#include "sim/system.hh"
53
54// clang complains about std::set being overloaded with Packet::set if
55// we open up the entire namespace std
56using std::make_shared;
57using std::min;
58using std::ostream;
59using std::string;
60
61const char *NsRxStateStrings[] =
62{
63 "rxIdle",
64 "rxDescRefr",
65 "rxDescRead",
66 "rxFifoBlock",
67 "rxFragWrite",
68 "rxDescWrite",
69 "rxAdvance"
70};
71
72const char *NsTxStateStrings[] =
73{
74 "txIdle",
75 "txDescRefr",
76 "txDescRead",
77 "txFifoBlock",
78 "txFragRead",
79 "txDescWrite",
80 "txAdvance"
81};
82
83const char *NsDmaState[] =
84{
85 "dmaIdle",
86 "dmaReading",
87 "dmaWriting",
88 "dmaReadWaiting",
89 "dmaWriteWaiting"
90};
91
92using namespace Net;
93using namespace TheISA;
94
95///////////////////////////////////////////////////////////////////////
96//
97// NSGigE PCI Device
98//
99NSGigE::NSGigE(Params *p)
100 : EtherDevBase(p), ioEnable(false),
101 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
102 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
103 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
104 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
105 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
106 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
107 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
108 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
109 eepromOpcode(0), eepromAddress(0), eepromData(0),
110 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
111 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
112 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
113 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
114 rxDmaReadEvent([this]{ rxDmaReadDone(); }, name()),
115 rxDmaWriteEvent([this]{ rxDmaWriteDone(); }, name()),
116 txDmaReadEvent([this]{ txDmaReadDone(); }, name()),
117 txDmaWriteEvent([this]{ txDmaWriteDone(); }, name()),
118 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
119 txDelay(p->tx_delay), rxDelay(p->rx_delay),
120 rxKickTick(0),
121 rxKickEvent([this]{ rxKick(); }, name()),
122 txKickTick(0),
123 txKickEvent([this]{ txKick(); }, name()),
124 txEvent([this]{ txEventTransmit(); }, name()),
125 rxFilterEnable(p->rx_filter),
126 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
127 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
128 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
129 intrEvent(0), interface(0)
130{
131
132
133 interface = new NSGigEInt(name() + ".int0", this);
134
135 regsReset();
136 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN);
137
138 memset(&rxDesc32, 0, sizeof(rxDesc32));
139 memset(&txDesc32, 0, sizeof(txDesc32));
140 memset(&rxDesc64, 0, sizeof(rxDesc64));
141 memset(&txDesc64, 0, sizeof(txDesc64));
142}
143
144NSGigE::~NSGigE()
145{
146 delete interface;
147}
148
149/**
150 * This is to write to the PCI general configuration registers
151 */
152Tick
153NSGigE::writeConfig(PacketPtr pkt)
154{
155 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
156 if (offset < PCI_DEVICE_SPECIFIC)
157 PciDevice::writeConfig(pkt);
158 else
159 panic("Device specific PCI config space not implemented!\n");
160
161 switch (offset) {
162 // seems to work fine without all these PCI settings, but i
163 // put in the IO to double check, an assertion will fail if we
164 // need to properly implement it
165 case PCI_COMMAND:
166 if (config.data[offset] & PCI_CMD_IOSE)
167 ioEnable = true;
168 else
169 ioEnable = false;
170 break;
171 }
172
173 return configDelay;
174}
175
176EtherInt*
177NSGigE::getEthPort(const std::string &if_name, int idx)
178{
179 if (if_name == "interface") {
180 if (interface->getPeer())
181 panic("interface already connected to\n");
182 return interface;
183 }
184 return NULL;
185}
186
187/**
188 * This reads the device registers, which are detailed in the NS83820
189 * spec sheet
190 */
191Tick
192NSGigE::read(PacketPtr pkt)
193{
194 assert(ioEnable);
195
196 //The mask is to give you only the offset into the device register file
197 Addr daddr = pkt->getAddr() & 0xfff;
198 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
199 daddr, pkt->getAddr(), pkt->getSize());
200
201
202 // there are some reserved registers, you can see ns_gige_reg.h and
203 // the spec sheet for details
204 if (daddr > LAST && daddr <= RESERVED) {
205 panic("Accessing reserved register");
206 } else if (daddr > RESERVED && daddr <= 0x3FC) {
207 return readConfig(pkt);
208 } else if (daddr >= MIB_START && daddr <= MIB_END) {
209 // don't implement all the MIB's. hopefully the kernel
210 // doesn't actually DEPEND upon their values
211 // MIB are just hardware stats keepers
212 pkt->set<uint32_t>(0);
213 pkt->makeAtomicResponse();
214 return pioDelay;
215 } else if (daddr > 0x3FC)
216 panic("Something is messed up!\n");
217
218 assert(pkt->getSize() == sizeof(uint32_t));
219 uint32_t &reg = *pkt->getPtr<uint32_t>();
220 uint16_t rfaddr;
221
222 switch (daddr) {
223 case CR:
224 reg = regs.command;
225 //these are supposed to be cleared on a read
226 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
227 break;
228
229 case CFGR:
230 reg = regs.config;
231 break;
232
233 case MEAR:
234 reg = regs.mear;
235 break;
236
237 case PTSCR:
238 reg = regs.ptscr;
239 break;
240
241 case ISR:
242 reg = regs.isr;
243 devIntrClear(ISR_ALL);
244 break;
245
246 case IMR:
247 reg = regs.imr;
248 break;
249
250 case IER:
251 reg = regs.ier;
252 break;
253
254 case IHR:
255 reg = regs.ihr;
256 break;
257
258 case TXDP:
259 reg = regs.txdp;
260 break;
261
262 case TXDP_HI:
263 reg = regs.txdp_hi;
264 break;
265
266 case TX_CFG:
267 reg = regs.txcfg;
268 break;
269
270 case GPIOR:
271 reg = regs.gpior;
272 break;
273
274 case RXDP:
275 reg = regs.rxdp;
276 break;
277
278 case RXDP_HI:
279 reg = regs.rxdp_hi;
280 break;
281
282 case RX_CFG:
283 reg = regs.rxcfg;
284 break;
285
286 case PQCR:
287 reg = regs.pqcr;
288 break;
289
290 case WCSR:
291 reg = regs.wcsr;
292 break;
293
294 case PCR:
295 reg = regs.pcr;
296 break;
297
298 // see the spec sheet for how RFCR and RFDR work
299 // basically, you write to RFCR to tell the machine
300 // what you want to do next, then you act upon RFDR,
301 // and the device will be prepared b/c of what you
302 // wrote to RFCR
303 case RFCR:
304 reg = regs.rfcr;
305 break;
306
307 case RFDR:
308 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
309 switch (rfaddr) {
310 // Read from perfect match ROM octets
311 case 0x000:
312 reg = rom.perfectMatch[1];
313 reg = reg << 8;
314 reg += rom.perfectMatch[0];
315 break;
316 case 0x002:
317 reg = rom.perfectMatch[3] << 8;
318 reg += rom.perfectMatch[2];
319 break;
320 case 0x004:
321 reg = rom.perfectMatch[5] << 8;
322 reg += rom.perfectMatch[4];
323 break;
324 default:
325 // Read filter hash table
326 if (rfaddr >= FHASH_ADDR &&
327 rfaddr < FHASH_ADDR + FHASH_SIZE) {
328
329 // Only word-aligned reads supported
330 if (rfaddr % 2)
331 panic("unaligned read from filter hash table!");
332
333 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
334 reg += rom.filterHash[rfaddr - FHASH_ADDR];
335 break;
336 }
337
338 panic("reading RFDR for something other than pattern"
339 " matching or hashing! %#x\n", rfaddr);
340 }
341 break;
342
343 case SRR:
344 reg = regs.srr;
345 break;
346
347 case MIBC:
348 reg = regs.mibc;
349 reg &= ~(MIBC_MIBS | MIBC_ACLR);
350 break;
351
352 case VRCR:
353 reg = regs.vrcr;
354 break;
355
356 case VTCR:
357 reg = regs.vtcr;
358 break;
359
360 case VDR:
361 reg = regs.vdr;
362 break;
363
364 case CCSR:
365 reg = regs.ccsr;
366 break;
367
368 case TBICR:
369 reg = regs.tbicr;
370 break;
371
372 case TBISR:
373 reg = regs.tbisr;
374 break;
375
376 case TANAR:
377 reg = regs.tanar;
378 break;
379
380 case TANLPAR:
381 reg = regs.tanlpar;
382 break;
383
384 case TANER:
385 reg = regs.taner;
386 break;
387
388 case TESR:
389 reg = regs.tesr;
390 break;
391
392 case M5REG:
393 reg = 0;
394 if (params()->rx_thread)
395 reg |= M5REG_RX_THREAD;
396 if (params()->tx_thread)
397 reg |= M5REG_TX_THREAD;
398 if (params()->rss)
399 reg |= M5REG_RSS;
400 break;
401
402 default:
403 panic("reading unimplemented register: addr=%#x", daddr);
404 }
405
406 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
407 daddr, reg, reg);
408
409 pkt->makeAtomicResponse();
410 return pioDelay;
411}
412
413Tick
414NSGigE::write(PacketPtr pkt)
415{
416 assert(ioEnable);
417
418 Addr daddr = pkt->getAddr() & 0xfff;
419 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
420 daddr, pkt->getAddr(), pkt->getSize());
421
422 if (daddr > LAST && daddr <= RESERVED) {
423 panic("Accessing reserved register");
424 } else if (daddr > RESERVED && daddr <= 0x3FC) {
425 return writeConfig(pkt);
426 } else if (daddr > 0x3FC)
427 panic("Something is messed up!\n");
428
429 if (pkt->getSize() == sizeof(uint32_t)) {
430 uint32_t reg = pkt->get<uint32_t>();
431 uint16_t rfaddr;
432
433 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
434
435 switch (daddr) {
436 case CR:
437 regs.command = reg;
438 if (reg & CR_TXD) {
439 txEnable = false;
440 } else if (reg & CR_TXE) {
441 txEnable = true;
442
443 // the kernel is enabling the transmit machine
444 if (txState == txIdle)
445 txKick();
446 }
447
448 if (reg & CR_RXD) {
449 rxEnable = false;
450 } else if (reg & CR_RXE) {
451 rxEnable = true;
452
453 if (rxState == rxIdle)
454 rxKick();
455 }
456
457 if (reg & CR_TXR)
458 txReset();
459
460 if (reg & CR_RXR)
461 rxReset();
462
463 if (reg & CR_SWI)
464 devIntrPost(ISR_SWI);
465
466 if (reg & CR_RST) {
467 txReset();
468 rxReset();
469
470 regsReset();
471 }
472 break;
473
474 case CFGR:
475 if (reg & CFGR_LNKSTS ||
476 reg & CFGR_SPDSTS ||
477 reg & CFGR_DUPSTS ||
478 reg & CFGR_RESERVED ||
479 reg & CFGR_T64ADDR ||
480 reg & CFGR_PCI64_DET) {
481 // First clear all writable bits
482 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
483 CFGR_RESERVED | CFGR_T64ADDR |
484 CFGR_PCI64_DET;
485 // Now set the appropriate writable bits
486 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
487 CFGR_RESERVED | CFGR_T64ADDR |
488 CFGR_PCI64_DET);
489 }
490
491// all these #if 0's are because i don't THINK the kernel needs to
492// have these implemented. if there is a problem relating to one of
493// these, you may need to add functionality in.
494
495// grouped together and #if 0'ed to avoid empty if body and make clang happy
496#if 0
497 if (reg & CFGR_TBI_EN) ;
498 if (reg & CFGR_MODE_1000) ;
499
500 if (reg & CFGR_PINT_DUPSTS ||
501 reg & CFGR_PINT_LNKSTS ||
502 reg & CFGR_PINT_SPDSTS)
503 ;
504
505 if (reg & CFGR_TMRTEST) ;
506 if (reg & CFGR_MRM_DIS) ;
507 if (reg & CFGR_MWI_DIS) ;
508
509 if (reg & CFGR_DATA64_EN) ;
510 if (reg & CFGR_M64ADDR) ;
511 if (reg & CFGR_PHY_RST) ;
512 if (reg & CFGR_PHY_DIS) ;
513
514 if (reg & CFGR_REQALG) ;
515 if (reg & CFGR_SB) ;
516 if (reg & CFGR_POW) ;
517 if (reg & CFGR_EXD) ;
518 if (reg & CFGR_PESEL) ;
519 if (reg & CFGR_BROM_DIS) ;
520 if (reg & CFGR_EXT_125) ;
521 if (reg & CFGR_BEM) ;
522
523 if (reg & CFGR_T64ADDR) ;
524 // panic("CFGR_T64ADDR is read only register!\n");
525#endif
526 if (reg & CFGR_AUTO_1000)
527 panic("CFGR_AUTO_1000 not implemented!\n");
528
529 if (reg & CFGR_PCI64_DET)
530 panic("CFGR_PCI64_DET is read only register!\n");
531
532 if (reg & CFGR_EXTSTS_EN)
533 extstsEnable = true;
534 else
535 extstsEnable = false;
536 break;
537
538 case MEAR:
539 // Clear writable bits
540 regs.mear &= MEAR_EEDO;
541 // Set appropriate writable bits
542 regs.mear |= reg & ~MEAR_EEDO;
543
544 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
545 // even though it could get it through RFDR
546 if (reg & MEAR_EESEL) {
547 // Rising edge of clock
548 if (reg & MEAR_EECLK && !eepromClk)
549 eepromKick();
550 }
551 else {
552 eepromState = eepromStart;
553 regs.mear &= ~MEAR_EEDI;
554 }
555
556 eepromClk = reg & MEAR_EECLK;
557
558 // since phy is completely faked, MEAR_MD* don't matter
559
560// grouped together and #if 0'ed to avoid empty if body and make clang happy
561#if 0
562 if (reg & MEAR_MDIO) ;
563 if (reg & MEAR_MDDIR) ;
564 if (reg & MEAR_MDC) ;
565#endif
566 break;
567
568 case PTSCR:
569 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
570 // these control BISTs for various parts of chip - we
571 // don't care or do just fake that the BIST is done
572 if (reg & PTSCR_RBIST_EN)
573 regs.ptscr |= PTSCR_RBIST_DONE;
574 if (reg & PTSCR_EEBIST_EN)
575 regs.ptscr &= ~PTSCR_EEBIST_EN;
576 if (reg & PTSCR_EELOAD_EN)
577 regs.ptscr &= ~PTSCR_EELOAD_EN;
578 break;
579
580 case ISR: /* writing to the ISR has no effect */
581 panic("ISR is a read only register!\n");
582
583 case IMR:
584 regs.imr = reg;
585 devIntrChangeMask();
586 break;
587
588 case IER:
589 regs.ier = reg;
590 break;
591
592 case IHR:
593 regs.ihr = reg;
594 /* not going to implement real interrupt holdoff */
595 break;
596
597 case TXDP:
598 regs.txdp = (reg & 0xFFFFFFFC);
599 assert(txState == txIdle);
600 CTDD = false;
601 break;
602
603 case TXDP_HI:
604 regs.txdp_hi = reg;
605 break;
606
607 case TX_CFG:
608 regs.txcfg = reg;
609#if 0
610 if (reg & TX_CFG_CSI) ;
611 if (reg & TX_CFG_HBI) ;
612 if (reg & TX_CFG_MLB) ;
613 if (reg & TX_CFG_ATP) ;
614 if (reg & TX_CFG_ECRETRY) {
615 /*
616 * this could easily be implemented, but considering
617 * the network is just a fake pipe, wouldn't make
618 * sense to do this
619 */
620 }
621
622 if (reg & TX_CFG_BRST_DIS) ;
623#endif
624
625#if 0
626 /* we handle our own DMA, ignore the kernel's exhortations */
627 if (reg & TX_CFG_MXDMA) ;
628#endif
629
630 // also, we currently don't care about fill/drain
631 // thresholds though this may change in the future with
632 // more realistic networks or a driver which changes it
633 // according to feedback
634
635 break;
636
637 case GPIOR:
638 // Only write writable bits
639 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
640 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
641 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
642 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
643 /* these just control general purpose i/o pins, don't matter */
644 break;
645
646 case RXDP:
647 regs.rxdp = reg;
648 CRDD = false;
649 break;
650
651 case RXDP_HI:
652 regs.rxdp_hi = reg;
653 break;
654
655 case RX_CFG:
656 regs.rxcfg = reg;
657#if 0
658 if (reg & RX_CFG_AEP) ;
659 if (reg & RX_CFG_ARP) ;
660 if (reg & RX_CFG_STRIPCRC) ;
661 if (reg & RX_CFG_RX_RD) ;
662 if (reg & RX_CFG_ALP) ;
663 if (reg & RX_CFG_AIRL) ;
664
665 /* we handle our own DMA, ignore what kernel says about it */
666 if (reg & RX_CFG_MXDMA) ;
667
668 //also, we currently don't care about fill/drain thresholds
669 //though this may change in the future with more realistic
670 //networks or a driver which changes it according to feedback
671 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
672#endif
673 break;
674
675 case PQCR:
676 /* there is no priority queueing used in the linux 2.6 driver */
677 regs.pqcr = reg;
678 break;
679
680 case WCSR:
681 /* not going to implement wake on LAN */
682 regs.wcsr = reg;
683 break;
684
685 case PCR:
686 /* not going to implement pause control */
687 regs.pcr = reg;
688 break;
689
690 case RFCR:
691 regs.rfcr = reg;
692
693 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
694 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
695 acceptMulticast = (reg & RFCR_AAM) ? true : false;
696 acceptUnicast = (reg & RFCR_AAU) ? true : false;
697 acceptPerfect = (reg & RFCR_APM) ? true : false;
698 acceptArp = (reg & RFCR_AARP) ? true : false;
699 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
700
701#if 0
702 if (reg & RFCR_APAT)
703 panic("RFCR_APAT not implemented!\n");
704#endif
705 if (reg & RFCR_UHEN)
706 panic("Unicast hash filtering not used by drivers!\n");
707
708 if (reg & RFCR_ULM)
709 panic("RFCR_ULM not implemented!\n");
710
711 break;
712
713 case RFDR:
714 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
715 switch (rfaddr) {
716 case 0x000:
717 rom.perfectMatch[0] = (uint8_t)reg;
718 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
719 break;
720 case 0x002:
721 rom.perfectMatch[2] = (uint8_t)reg;
722 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
723 break;
724 case 0x004:
725 rom.perfectMatch[4] = (uint8_t)reg;
726 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
727 break;
728 default:
729
730 if (rfaddr >= FHASH_ADDR &&
731 rfaddr < FHASH_ADDR + FHASH_SIZE) {
732
733 // Only word-aligned writes supported
734 if (rfaddr % 2)
735 panic("unaligned write to filter hash table!");
736
737 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
738 rom.filterHash[rfaddr - FHASH_ADDR + 1]
739 = (uint8_t)(reg >> 8);
740 break;
741 }
742 panic("writing RFDR for something other than pattern matching "
743 "or hashing! %#x\n", rfaddr);
744 }
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32/** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36
37#include "dev/net/ns_gige.hh"
38
39#include <deque>
40#include <memory>
41#include <string>
42
43#include "base/debug.hh"
44#include "base/inet.hh"
45#include "base/types.hh"
46#include "config/the_isa.hh"
47#include "debug/EthernetAll.hh"
48#include "dev/net/etherlink.hh"
49#include "mem/packet.hh"
50#include "mem/packet_access.hh"
51#include "params/NSGigE.hh"
52#include "sim/system.hh"
53
54// clang complains about std::set being overloaded with Packet::set if
55// we open up the entire namespace std
56using std::make_shared;
57using std::min;
58using std::ostream;
59using std::string;
60
61const char *NsRxStateStrings[] =
62{
63 "rxIdle",
64 "rxDescRefr",
65 "rxDescRead",
66 "rxFifoBlock",
67 "rxFragWrite",
68 "rxDescWrite",
69 "rxAdvance"
70};
71
72const char *NsTxStateStrings[] =
73{
74 "txIdle",
75 "txDescRefr",
76 "txDescRead",
77 "txFifoBlock",
78 "txFragRead",
79 "txDescWrite",
80 "txAdvance"
81};
82
83const char *NsDmaState[] =
84{
85 "dmaIdle",
86 "dmaReading",
87 "dmaWriting",
88 "dmaReadWaiting",
89 "dmaWriteWaiting"
90};
91
92using namespace Net;
93using namespace TheISA;
94
95///////////////////////////////////////////////////////////////////////
96//
97// NSGigE PCI Device
98//
99NSGigE::NSGigE(Params *p)
100 : EtherDevBase(p), ioEnable(false),
101 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
102 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
103 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
104 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
105 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
106 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
107 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
108 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
109 eepromOpcode(0), eepromAddress(0), eepromData(0),
110 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
111 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
112 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
113 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
114 rxDmaReadEvent([this]{ rxDmaReadDone(); }, name()),
115 rxDmaWriteEvent([this]{ rxDmaWriteDone(); }, name()),
116 txDmaReadEvent([this]{ txDmaReadDone(); }, name()),
117 txDmaWriteEvent([this]{ txDmaWriteDone(); }, name()),
118 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
119 txDelay(p->tx_delay), rxDelay(p->rx_delay),
120 rxKickTick(0),
121 rxKickEvent([this]{ rxKick(); }, name()),
122 txKickTick(0),
123 txKickEvent([this]{ txKick(); }, name()),
124 txEvent([this]{ txEventTransmit(); }, name()),
125 rxFilterEnable(p->rx_filter),
126 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
127 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
128 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
129 intrEvent(0), interface(0)
130{
131
132
133 interface = new NSGigEInt(name() + ".int0", this);
134
135 regsReset();
136 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN);
137
138 memset(&rxDesc32, 0, sizeof(rxDesc32));
139 memset(&txDesc32, 0, sizeof(txDesc32));
140 memset(&rxDesc64, 0, sizeof(rxDesc64));
141 memset(&txDesc64, 0, sizeof(txDesc64));
142}
143
144NSGigE::~NSGigE()
145{
146 delete interface;
147}
148
149/**
150 * This is to write to the PCI general configuration registers
151 */
152Tick
153NSGigE::writeConfig(PacketPtr pkt)
154{
155 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
156 if (offset < PCI_DEVICE_SPECIFIC)
157 PciDevice::writeConfig(pkt);
158 else
159 panic("Device specific PCI config space not implemented!\n");
160
161 switch (offset) {
162 // seems to work fine without all these PCI settings, but i
163 // put in the IO to double check, an assertion will fail if we
164 // need to properly implement it
165 case PCI_COMMAND:
166 if (config.data[offset] & PCI_CMD_IOSE)
167 ioEnable = true;
168 else
169 ioEnable = false;
170 break;
171 }
172
173 return configDelay;
174}
175
176EtherInt*
177NSGigE::getEthPort(const std::string &if_name, int idx)
178{
179 if (if_name == "interface") {
180 if (interface->getPeer())
181 panic("interface already connected to\n");
182 return interface;
183 }
184 return NULL;
185}
186
187/**
188 * This reads the device registers, which are detailed in the NS83820
189 * spec sheet
190 */
191Tick
192NSGigE::read(PacketPtr pkt)
193{
194 assert(ioEnable);
195
196 //The mask is to give you only the offset into the device register file
197 Addr daddr = pkt->getAddr() & 0xfff;
198 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
199 daddr, pkt->getAddr(), pkt->getSize());
200
201
202 // there are some reserved registers, you can see ns_gige_reg.h and
203 // the spec sheet for details
204 if (daddr > LAST && daddr <= RESERVED) {
205 panic("Accessing reserved register");
206 } else if (daddr > RESERVED && daddr <= 0x3FC) {
207 return readConfig(pkt);
208 } else if (daddr >= MIB_START && daddr <= MIB_END) {
209 // don't implement all the MIB's. hopefully the kernel
210 // doesn't actually DEPEND upon their values
211 // MIB are just hardware stats keepers
212 pkt->set<uint32_t>(0);
213 pkt->makeAtomicResponse();
214 return pioDelay;
215 } else if (daddr > 0x3FC)
216 panic("Something is messed up!\n");
217
218 assert(pkt->getSize() == sizeof(uint32_t));
219 uint32_t &reg = *pkt->getPtr<uint32_t>();
220 uint16_t rfaddr;
221
222 switch (daddr) {
223 case CR:
224 reg = regs.command;
225 //these are supposed to be cleared on a read
226 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
227 break;
228
229 case CFGR:
230 reg = regs.config;
231 break;
232
233 case MEAR:
234 reg = regs.mear;
235 break;
236
237 case PTSCR:
238 reg = regs.ptscr;
239 break;
240
241 case ISR:
242 reg = regs.isr;
243 devIntrClear(ISR_ALL);
244 break;
245
246 case IMR:
247 reg = regs.imr;
248 break;
249
250 case IER:
251 reg = regs.ier;
252 break;
253
254 case IHR:
255 reg = regs.ihr;
256 break;
257
258 case TXDP:
259 reg = regs.txdp;
260 break;
261
262 case TXDP_HI:
263 reg = regs.txdp_hi;
264 break;
265
266 case TX_CFG:
267 reg = regs.txcfg;
268 break;
269
270 case GPIOR:
271 reg = regs.gpior;
272 break;
273
274 case RXDP:
275 reg = regs.rxdp;
276 break;
277
278 case RXDP_HI:
279 reg = regs.rxdp_hi;
280 break;
281
282 case RX_CFG:
283 reg = regs.rxcfg;
284 break;
285
286 case PQCR:
287 reg = regs.pqcr;
288 break;
289
290 case WCSR:
291 reg = regs.wcsr;
292 break;
293
294 case PCR:
295 reg = regs.pcr;
296 break;
297
298 // see the spec sheet for how RFCR and RFDR work
299 // basically, you write to RFCR to tell the machine
300 // what you want to do next, then you act upon RFDR,
301 // and the device will be prepared b/c of what you
302 // wrote to RFCR
303 case RFCR:
304 reg = regs.rfcr;
305 break;
306
307 case RFDR:
308 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
309 switch (rfaddr) {
310 // Read from perfect match ROM octets
311 case 0x000:
312 reg = rom.perfectMatch[1];
313 reg = reg << 8;
314 reg += rom.perfectMatch[0];
315 break;
316 case 0x002:
317 reg = rom.perfectMatch[3] << 8;
318 reg += rom.perfectMatch[2];
319 break;
320 case 0x004:
321 reg = rom.perfectMatch[5] << 8;
322 reg += rom.perfectMatch[4];
323 break;
324 default:
325 // Read filter hash table
326 if (rfaddr >= FHASH_ADDR &&
327 rfaddr < FHASH_ADDR + FHASH_SIZE) {
328
329 // Only word-aligned reads supported
330 if (rfaddr % 2)
331 panic("unaligned read from filter hash table!");
332
333 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
334 reg += rom.filterHash[rfaddr - FHASH_ADDR];
335 break;
336 }
337
338 panic("reading RFDR for something other than pattern"
339 " matching or hashing! %#x\n", rfaddr);
340 }
341 break;
342
343 case SRR:
344 reg = regs.srr;
345 break;
346
347 case MIBC:
348 reg = regs.mibc;
349 reg &= ~(MIBC_MIBS | MIBC_ACLR);
350 break;
351
352 case VRCR:
353 reg = regs.vrcr;
354 break;
355
356 case VTCR:
357 reg = regs.vtcr;
358 break;
359
360 case VDR:
361 reg = regs.vdr;
362 break;
363
364 case CCSR:
365 reg = regs.ccsr;
366 break;
367
368 case TBICR:
369 reg = regs.tbicr;
370 break;
371
372 case TBISR:
373 reg = regs.tbisr;
374 break;
375
376 case TANAR:
377 reg = regs.tanar;
378 break;
379
380 case TANLPAR:
381 reg = regs.tanlpar;
382 break;
383
384 case TANER:
385 reg = regs.taner;
386 break;
387
388 case TESR:
389 reg = regs.tesr;
390 break;
391
392 case M5REG:
393 reg = 0;
394 if (params()->rx_thread)
395 reg |= M5REG_RX_THREAD;
396 if (params()->tx_thread)
397 reg |= M5REG_TX_THREAD;
398 if (params()->rss)
399 reg |= M5REG_RSS;
400 break;
401
402 default:
403 panic("reading unimplemented register: addr=%#x", daddr);
404 }
405
406 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
407 daddr, reg, reg);
408
409 pkt->makeAtomicResponse();
410 return pioDelay;
411}
412
413Tick
414NSGigE::write(PacketPtr pkt)
415{
416 assert(ioEnable);
417
418 Addr daddr = pkt->getAddr() & 0xfff;
419 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
420 daddr, pkt->getAddr(), pkt->getSize());
421
422 if (daddr > LAST && daddr <= RESERVED) {
423 panic("Accessing reserved register");
424 } else if (daddr > RESERVED && daddr <= 0x3FC) {
425 return writeConfig(pkt);
426 } else if (daddr > 0x3FC)
427 panic("Something is messed up!\n");
428
429 if (pkt->getSize() == sizeof(uint32_t)) {
430 uint32_t reg = pkt->get<uint32_t>();
431 uint16_t rfaddr;
432
433 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
434
435 switch (daddr) {
436 case CR:
437 regs.command = reg;
438 if (reg & CR_TXD) {
439 txEnable = false;
440 } else if (reg & CR_TXE) {
441 txEnable = true;
442
443 // the kernel is enabling the transmit machine
444 if (txState == txIdle)
445 txKick();
446 }
447
448 if (reg & CR_RXD) {
449 rxEnable = false;
450 } else if (reg & CR_RXE) {
451 rxEnable = true;
452
453 if (rxState == rxIdle)
454 rxKick();
455 }
456
457 if (reg & CR_TXR)
458 txReset();
459
460 if (reg & CR_RXR)
461 rxReset();
462
463 if (reg & CR_SWI)
464 devIntrPost(ISR_SWI);
465
466 if (reg & CR_RST) {
467 txReset();
468 rxReset();
469
470 regsReset();
471 }
472 break;
473
474 case CFGR:
475 if (reg & CFGR_LNKSTS ||
476 reg & CFGR_SPDSTS ||
477 reg & CFGR_DUPSTS ||
478 reg & CFGR_RESERVED ||
479 reg & CFGR_T64ADDR ||
480 reg & CFGR_PCI64_DET) {
481 // First clear all writable bits
482 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
483 CFGR_RESERVED | CFGR_T64ADDR |
484 CFGR_PCI64_DET;
485 // Now set the appropriate writable bits
486 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
487 CFGR_RESERVED | CFGR_T64ADDR |
488 CFGR_PCI64_DET);
489 }
490
491// all these #if 0's are because i don't THINK the kernel needs to
492// have these implemented. if there is a problem relating to one of
493// these, you may need to add functionality in.
494
495// grouped together and #if 0'ed to avoid empty if body and make clang happy
496#if 0
497 if (reg & CFGR_TBI_EN) ;
498 if (reg & CFGR_MODE_1000) ;
499
500 if (reg & CFGR_PINT_DUPSTS ||
501 reg & CFGR_PINT_LNKSTS ||
502 reg & CFGR_PINT_SPDSTS)
503 ;
504
505 if (reg & CFGR_TMRTEST) ;
506 if (reg & CFGR_MRM_DIS) ;
507 if (reg & CFGR_MWI_DIS) ;
508
509 if (reg & CFGR_DATA64_EN) ;
510 if (reg & CFGR_M64ADDR) ;
511 if (reg & CFGR_PHY_RST) ;
512 if (reg & CFGR_PHY_DIS) ;
513
514 if (reg & CFGR_REQALG) ;
515 if (reg & CFGR_SB) ;
516 if (reg & CFGR_POW) ;
517 if (reg & CFGR_EXD) ;
518 if (reg & CFGR_PESEL) ;
519 if (reg & CFGR_BROM_DIS) ;
520 if (reg & CFGR_EXT_125) ;
521 if (reg & CFGR_BEM) ;
522
523 if (reg & CFGR_T64ADDR) ;
524 // panic("CFGR_T64ADDR is read only register!\n");
525#endif
526 if (reg & CFGR_AUTO_1000)
527 panic("CFGR_AUTO_1000 not implemented!\n");
528
529 if (reg & CFGR_PCI64_DET)
530 panic("CFGR_PCI64_DET is read only register!\n");
531
532 if (reg & CFGR_EXTSTS_EN)
533 extstsEnable = true;
534 else
535 extstsEnable = false;
536 break;
537
538 case MEAR:
539 // Clear writable bits
540 regs.mear &= MEAR_EEDO;
541 // Set appropriate writable bits
542 regs.mear |= reg & ~MEAR_EEDO;
543
544 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
545 // even though it could get it through RFDR
546 if (reg & MEAR_EESEL) {
547 // Rising edge of clock
548 if (reg & MEAR_EECLK && !eepromClk)
549 eepromKick();
550 }
551 else {
552 eepromState = eepromStart;
553 regs.mear &= ~MEAR_EEDI;
554 }
555
556 eepromClk = reg & MEAR_EECLK;
557
558 // since phy is completely faked, MEAR_MD* don't matter
559
560// grouped together and #if 0'ed to avoid empty if body and make clang happy
561#if 0
562 if (reg & MEAR_MDIO) ;
563 if (reg & MEAR_MDDIR) ;
564 if (reg & MEAR_MDC) ;
565#endif
566 break;
567
568 case PTSCR:
569 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
570 // these control BISTs for various parts of chip - we
571 // don't care or do just fake that the BIST is done
572 if (reg & PTSCR_RBIST_EN)
573 regs.ptscr |= PTSCR_RBIST_DONE;
574 if (reg & PTSCR_EEBIST_EN)
575 regs.ptscr &= ~PTSCR_EEBIST_EN;
576 if (reg & PTSCR_EELOAD_EN)
577 regs.ptscr &= ~PTSCR_EELOAD_EN;
578 break;
579
580 case ISR: /* writing to the ISR has no effect */
581 panic("ISR is a read only register!\n");
582
583 case IMR:
584 regs.imr = reg;
585 devIntrChangeMask();
586 break;
587
588 case IER:
589 regs.ier = reg;
590 break;
591
592 case IHR:
593 regs.ihr = reg;
594 /* not going to implement real interrupt holdoff */
595 break;
596
597 case TXDP:
598 regs.txdp = (reg & 0xFFFFFFFC);
599 assert(txState == txIdle);
600 CTDD = false;
601 break;
602
603 case TXDP_HI:
604 regs.txdp_hi = reg;
605 break;
606
607 case TX_CFG:
608 regs.txcfg = reg;
609#if 0
610 if (reg & TX_CFG_CSI) ;
611 if (reg & TX_CFG_HBI) ;
612 if (reg & TX_CFG_MLB) ;
613 if (reg & TX_CFG_ATP) ;
614 if (reg & TX_CFG_ECRETRY) {
615 /*
616 * this could easily be implemented, but considering
617 * the network is just a fake pipe, wouldn't make
618 * sense to do this
619 */
620 }
621
622 if (reg & TX_CFG_BRST_DIS) ;
623#endif
624
625#if 0
626 /* we handle our own DMA, ignore the kernel's exhortations */
627 if (reg & TX_CFG_MXDMA) ;
628#endif
629
630 // also, we currently don't care about fill/drain
631 // thresholds though this may change in the future with
632 // more realistic networks or a driver which changes it
633 // according to feedback
634
635 break;
636
637 case GPIOR:
638 // Only write writable bits
639 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
640 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
641 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
642 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
643 /* these just control general purpose i/o pins, don't matter */
644 break;
645
646 case RXDP:
647 regs.rxdp = reg;
648 CRDD = false;
649 break;
650
651 case RXDP_HI:
652 regs.rxdp_hi = reg;
653 break;
654
655 case RX_CFG:
656 regs.rxcfg = reg;
657#if 0
658 if (reg & RX_CFG_AEP) ;
659 if (reg & RX_CFG_ARP) ;
660 if (reg & RX_CFG_STRIPCRC) ;
661 if (reg & RX_CFG_RX_RD) ;
662 if (reg & RX_CFG_ALP) ;
663 if (reg & RX_CFG_AIRL) ;
664
665 /* we handle our own DMA, ignore what kernel says about it */
666 if (reg & RX_CFG_MXDMA) ;
667
668 //also, we currently don't care about fill/drain thresholds
669 //though this may change in the future with more realistic
670 //networks or a driver which changes it according to feedback
671 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
672#endif
673 break;
674
675 case PQCR:
676 /* there is no priority queueing used in the linux 2.6 driver */
677 regs.pqcr = reg;
678 break;
679
680 case WCSR:
681 /* not going to implement wake on LAN */
682 regs.wcsr = reg;
683 break;
684
685 case PCR:
686 /* not going to implement pause control */
687 regs.pcr = reg;
688 break;
689
690 case RFCR:
691 regs.rfcr = reg;
692
693 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
694 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
695 acceptMulticast = (reg & RFCR_AAM) ? true : false;
696 acceptUnicast = (reg & RFCR_AAU) ? true : false;
697 acceptPerfect = (reg & RFCR_APM) ? true : false;
698 acceptArp = (reg & RFCR_AARP) ? true : false;
699 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
700
701#if 0
702 if (reg & RFCR_APAT)
703 panic("RFCR_APAT not implemented!\n");
704#endif
705 if (reg & RFCR_UHEN)
706 panic("Unicast hash filtering not used by drivers!\n");
707
708 if (reg & RFCR_ULM)
709 panic("RFCR_ULM not implemented!\n");
710
711 break;
712
713 case RFDR:
714 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
715 switch (rfaddr) {
716 case 0x000:
717 rom.perfectMatch[0] = (uint8_t)reg;
718 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
719 break;
720 case 0x002:
721 rom.perfectMatch[2] = (uint8_t)reg;
722 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
723 break;
724 case 0x004:
725 rom.perfectMatch[4] = (uint8_t)reg;
726 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
727 break;
728 default:
729
730 if (rfaddr >= FHASH_ADDR &&
731 rfaddr < FHASH_ADDR + FHASH_SIZE) {
732
733 // Only word-aligned writes supported
734 if (rfaddr % 2)
735 panic("unaligned write to filter hash table!");
736
737 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
738 rom.filterHash[rfaddr - FHASH_ADDR + 1]
739 = (uint8_t)(reg >> 8);
740 break;
741 }
742 panic("writing RFDR for something other than pattern matching "
743 "or hashing! %#x\n", rfaddr);
744 }
745 break;
745
746 case BRAR:
747 regs.brar = reg;
748 break;
749
750 case BRDR:
751 panic("the driver never uses BRDR, something is wrong!\n");
752
753 case SRR:
754 panic("SRR is read only register!\n");
755
756 case MIBC:
757 panic("the driver never uses MIBC, something is wrong!\n");
758
759 case VRCR:
760 regs.vrcr = reg;
761 break;
762
763 case VTCR:
764 regs.vtcr = reg;
765 break;
766
767 case VDR:
768 panic("the driver never uses VDR, something is wrong!\n");
769
770 case CCSR:
771 /* not going to implement clockrun stuff */
772 regs.ccsr = reg;
773 break;
774
775 case TBICR:
776 regs.tbicr = reg;
777 if (reg & TBICR_MR_LOOPBACK)
778 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
779
780 if (reg & TBICR_MR_AN_ENABLE) {
781 regs.tanlpar = regs.tanar;
782 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
783 }
784
785#if 0
786 if (reg & TBICR_MR_RESTART_AN) ;
787#endif
788
789 break;
790
791 case TBISR:
792 panic("TBISR is read only register!\n");
793
794 case TANAR:
795 // Only write the writable bits
796 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
797 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
798
799 // Pause capability unimplemented
800#if 0
801 if (reg & TANAR_PS2) ;
802 if (reg & TANAR_PS1) ;
803#endif
804
805 break;
806
807 case TANLPAR:
808 panic("this should only be written to by the fake phy!\n");
809
810 case TANER:
811 panic("TANER is read only register!\n");
812
813 case TESR:
814 regs.tesr = reg;
815 break;
816
817 default:
818 panic("invalid register access daddr=%#x", daddr);
819 }
820 } else {
821 panic("Invalid Request Size");
822 }
823 pkt->makeAtomicResponse();
824 return pioDelay;
825}
826
827void
828NSGigE::devIntrPost(uint32_t interrupts)
829{
830 if (interrupts & ISR_RESERVE)
831 panic("Cannot set a reserved interrupt");
832
833 if (interrupts & ISR_NOIMPL)
834 warn("interrupt not implemented %#x\n", interrupts);
835
836 interrupts &= ISR_IMPL;
837 regs.isr |= interrupts;
838
839 if (interrupts & regs.imr) {
840 if (interrupts & ISR_SWI) {
841 totalSwi++;
842 }
843 if (interrupts & ISR_RXIDLE) {
844 totalRxIdle++;
845 }
846 if (interrupts & ISR_RXOK) {
847 totalRxOk++;
848 }
849 if (interrupts & ISR_RXDESC) {
850 totalRxDesc++;
851 }
852 if (interrupts & ISR_TXOK) {
853 totalTxOk++;
854 }
855 if (interrupts & ISR_TXIDLE) {
856 totalTxIdle++;
857 }
858 if (interrupts & ISR_TXDESC) {
859 totalTxDesc++;
860 }
861 if (interrupts & ISR_RXORN) {
862 totalRxOrn++;
863 }
864 }
865
866 DPRINTF(EthernetIntr,
867 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
868 interrupts, regs.isr, regs.imr);
869
870 if ((regs.isr & regs.imr)) {
871 Tick when = curTick();
872 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
873 when += intrDelay;
874 postedInterrupts++;
875 cpuIntrPost(when);
876 }
877}
878
879/* writing this interrupt counting stats inside this means that this function
880 is now limited to being used to clear all interrupts upon the kernel
881 reading isr and servicing. just telling you in case you were thinking
882 of expanding use.
883*/
884void
885NSGigE::devIntrClear(uint32_t interrupts)
886{
887 if (interrupts & ISR_RESERVE)
888 panic("Cannot clear a reserved interrupt");
889
890 if (regs.isr & regs.imr & ISR_SWI) {
891 postedSwi++;
892 }
893 if (regs.isr & regs.imr & ISR_RXIDLE) {
894 postedRxIdle++;
895 }
896 if (regs.isr & regs.imr & ISR_RXOK) {
897 postedRxOk++;
898 }
899 if (regs.isr & regs.imr & ISR_RXDESC) {
900 postedRxDesc++;
901 }
902 if (regs.isr & regs.imr & ISR_TXOK) {
903 postedTxOk++;
904 }
905 if (regs.isr & regs.imr & ISR_TXIDLE) {
906 postedTxIdle++;
907 }
908 if (regs.isr & regs.imr & ISR_TXDESC) {
909 postedTxDesc++;
910 }
911 if (regs.isr & regs.imr & ISR_RXORN) {
912 postedRxOrn++;
913 }
914
915 interrupts &= ~ISR_NOIMPL;
916 regs.isr &= ~interrupts;
917
918 DPRINTF(EthernetIntr,
919 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
920 interrupts, regs.isr, regs.imr);
921
922 if (!(regs.isr & regs.imr))
923 cpuIntrClear();
924}
925
926void
927NSGigE::devIntrChangeMask()
928{
929 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
930 regs.isr, regs.imr, regs.isr & regs.imr);
931
932 if (regs.isr & regs.imr)
933 cpuIntrPost(curTick());
934 else
935 cpuIntrClear();
936}
937
938void
939NSGigE::cpuIntrPost(Tick when)
940{
941 // If the interrupt you want to post is later than an interrupt
942 // already scheduled, just let it post in the coming one and don't
943 // schedule another.
944 // HOWEVER, must be sure that the scheduled intrTick is in the
945 // future (this was formerly the source of a bug)
946 /**
947 * @todo this warning should be removed and the intrTick code should
948 * be fixed.
949 */
950 assert(when >= curTick());
951 assert(intrTick >= curTick() || intrTick == 0);
952 if (when > intrTick && intrTick != 0) {
953 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
954 intrTick);
955 return;
956 }
957
958 intrTick = when;
959 if (intrTick < curTick()) {
960 intrTick = curTick();
961 }
962
963 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
964 intrTick);
965
966 if (intrEvent)
967 intrEvent->squash();
968
969 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
970 name(), true);
971 schedule(intrEvent, intrTick);
972}
973
974void
975NSGigE::cpuInterrupt()
976{
977 assert(intrTick == curTick());
978
979 // Whether or not there's a pending interrupt, we don't care about
980 // it anymore
981 intrEvent = 0;
982 intrTick = 0;
983
984 // Don't send an interrupt if there's already one
985 if (cpuPendingIntr) {
986 DPRINTF(EthernetIntr,
987 "would send an interrupt now, but there's already pending\n");
988 } else {
989 // Send interrupt
990 cpuPendingIntr = true;
991
992 DPRINTF(EthernetIntr, "posting interrupt\n");
993 intrPost();
994 }
995}
996
997void
998NSGigE::cpuIntrClear()
999{
1000 if (!cpuPendingIntr)
1001 return;
1002
1003 if (intrEvent) {
1004 intrEvent->squash();
1005 intrEvent = 0;
1006 }
1007
1008 intrTick = 0;
1009
1010 cpuPendingIntr = false;
1011
1012 DPRINTF(EthernetIntr, "clearing interrupt\n");
1013 intrClear();
1014}
1015
1016bool
1017NSGigE::cpuIntrPending() const
1018{ return cpuPendingIntr; }
1019
1020void
1021NSGigE::txReset()
1022{
1023
1024 DPRINTF(Ethernet, "transmit reset\n");
1025
1026 CTDD = false;
1027 txEnable = false;;
1028 txFragPtr = 0;
1029 assert(txDescCnt == 0);
1030 txFifo.clear();
1031 txState = txIdle;
1032 assert(txDmaState == dmaIdle);
1033}
1034
1035void
1036NSGigE::rxReset()
1037{
1038 DPRINTF(Ethernet, "receive reset\n");
1039
1040 CRDD = false;
1041 assert(rxPktBytes == 0);
1042 rxEnable = false;
1043 rxFragPtr = 0;
1044 assert(rxDescCnt == 0);
1045 assert(rxDmaState == dmaIdle);
1046 rxFifo.clear();
1047 rxState = rxIdle;
1048}
1049
1050void
1051NSGigE::regsReset()
1052{
1053 memset(&regs, 0, sizeof(regs));
1054 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1055 regs.mear = 0x12;
1056 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1057 // fill threshold to 32 bytes
1058 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1059 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1060 regs.mibc = MIBC_FRZ;
1061 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1062 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1063 regs.brar = 0xffffffff;
1064
1065 extstsEnable = false;
1066 acceptBroadcast = false;
1067 acceptMulticast = false;
1068 acceptUnicast = false;
1069 acceptPerfect = false;
1070 acceptArp = false;
1071}
1072
1073bool
1074NSGigE::doRxDmaRead()
1075{
1076 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1077 rxDmaState = dmaReading;
1078
1079 if (dmaPending() || drainState() != DrainState::Running)
1080 rxDmaState = dmaReadWaiting;
1081 else
1082 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1083
1084 return true;
1085}
1086
1087void
1088NSGigE::rxDmaReadDone()
1089{
1090 assert(rxDmaState == dmaReading);
1091 rxDmaState = dmaIdle;
1092
1093 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1094 rxDmaAddr, rxDmaLen);
1095 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1096
1097 // If the transmit state machine has a pending DMA, let it go first
1098 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1099 txKick();
1100
1101 rxKick();
1102}
1103
1104bool
1105NSGigE::doRxDmaWrite()
1106{
1107 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1108 rxDmaState = dmaWriting;
1109
1110 if (dmaPending() || drainState() != DrainState::Running)
1111 rxDmaState = dmaWriteWaiting;
1112 else
1113 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1114 return true;
1115}
1116
1117void
1118NSGigE::rxDmaWriteDone()
1119{
1120 assert(rxDmaState == dmaWriting);
1121 rxDmaState = dmaIdle;
1122
1123 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1124 rxDmaAddr, rxDmaLen);
1125 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1126
1127 // If the transmit state machine has a pending DMA, let it go first
1128 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1129 txKick();
1130
1131 rxKick();
1132}
1133
1134void
1135NSGigE::rxKick()
1136{
1137 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1138
1139 DPRINTF(EthernetSM,
1140 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1141 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1142
1143 Addr link, bufptr;
1144 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1145 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1146
1147 next:
1148 if (rxKickTick > curTick()) {
1149 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1150 rxKickTick);
1151
1152 goto exit;
1153 }
1154
1155 // Go to the next state machine clock tick.
1156 rxKickTick = clockEdge(Cycles(1));
1157
1158 switch(rxDmaState) {
1159 case dmaReadWaiting:
1160 if (doRxDmaRead())
1161 goto exit;
1162 break;
1163 case dmaWriteWaiting:
1164 if (doRxDmaWrite())
1165 goto exit;
1166 break;
1167 default:
1168 break;
1169 }
1170
1171 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1172 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1173
1174 // see state machine from spec for details
1175 // the way this works is, if you finish work on one state and can
1176 // go directly to another, you do that through jumping to the
1177 // label "next". however, if you have intermediate work, like DMA
1178 // so that you can't go to the next state yet, you go to exit and
1179 // exit the loop. however, when the DMA is done it will trigger
1180 // an event and come back to this loop.
1181 switch (rxState) {
1182 case rxIdle:
1183 if (!rxEnable) {
1184 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1185 goto exit;
1186 }
1187
1188 if (CRDD) {
1189 rxState = rxDescRefr;
1190
1191 rxDmaAddr = regs.rxdp & 0x3fffffff;
1192 rxDmaData =
1193 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1194 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1195 rxDmaFree = dmaDescFree;
1196
1197 descDmaReads++;
1198 descDmaRdBytes += rxDmaLen;
1199
1200 if (doRxDmaRead())
1201 goto exit;
1202 } else {
1203 rxState = rxDescRead;
1204
1205 rxDmaAddr = regs.rxdp & 0x3fffffff;
1206 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1207 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1208 rxDmaFree = dmaDescFree;
1209
1210 descDmaReads++;
1211 descDmaRdBytes += rxDmaLen;
1212
1213 if (doRxDmaRead())
1214 goto exit;
1215 }
1216 break;
1217
1218 case rxDescRefr:
1219 if (rxDmaState != dmaIdle)
1220 goto exit;
1221
1222 rxState = rxAdvance;
1223 break;
1224
1225 case rxDescRead:
1226 if (rxDmaState != dmaIdle)
1227 goto exit;
1228
1229 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1230 regs.rxdp & 0x3fffffff);
1231 DPRINTF(EthernetDesc,
1232 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1233 link, bufptr, cmdsts, extsts);
1234
1235 if (cmdsts & CMDSTS_OWN) {
1236 devIntrPost(ISR_RXIDLE);
1237 rxState = rxIdle;
1238 goto exit;
1239 } else {
1240 rxState = rxFifoBlock;
1241 rxFragPtr = bufptr;
1242 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1243 }
1244 break;
1245
1246 case rxFifoBlock:
1247 if (!rxPacket) {
1248 /**
1249 * @todo in reality, we should be able to start processing
1250 * the packet as it arrives, and not have to wait for the
1251 * full packet ot be in the receive fifo.
1252 */
1253 if (rxFifo.empty())
1254 goto exit;
1255
1256 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1257
1258 // If we don't have a packet, grab a new one from the fifo.
1259 rxPacket = rxFifo.front();
1260 rxPktBytes = rxPacket->length;
1261 rxPacketBufPtr = rxPacket->data;
1262
1263#if TRACING_ON
1264 if (DTRACE(Ethernet)) {
1265 IpPtr ip(rxPacket);
1266 if (ip) {
1267 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1268 TcpPtr tcp(ip);
1269 if (tcp) {
1270 DPRINTF(Ethernet,
1271 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1272 tcp->sport(), tcp->dport(), tcp->seq(),
1273 tcp->ack());
1274 }
1275 }
1276 }
1277#endif
1278
1279 // sanity check - i think the driver behaves like this
1280 assert(rxDescCnt >= rxPktBytes);
1281 rxFifo.pop();
1282 }
1283
1284
1285 // dont' need the && rxDescCnt > 0 if driver sanity check
1286 // above holds
1287 if (rxPktBytes > 0) {
1288 rxState = rxFragWrite;
1289 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1290 // check holds
1291 rxXferLen = rxPktBytes;
1292
1293 rxDmaAddr = rxFragPtr & 0x3fffffff;
1294 rxDmaData = rxPacketBufPtr;
1295 rxDmaLen = rxXferLen;
1296 rxDmaFree = dmaDataFree;
1297
1298 if (doRxDmaWrite())
1299 goto exit;
1300
1301 } else {
1302 rxState = rxDescWrite;
1303
1304 //if (rxPktBytes == 0) { /* packet is done */
1305 assert(rxPktBytes == 0);
1306 DPRINTF(EthernetSM, "done with receiving packet\n");
1307
1308 cmdsts |= CMDSTS_OWN;
1309 cmdsts &= ~CMDSTS_MORE;
1310 cmdsts |= CMDSTS_OK;
1311 cmdsts &= 0xffff0000;
1312 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1313
1314#if 0
1315 /*
1316 * all the driver uses these are for its own stats keeping
1317 * which we don't care about, aren't necessary for
1318 * functionality and doing this would just slow us down.
1319 * if they end up using this in a later version for
1320 * functional purposes, just undef
1321 */
1322 if (rxFilterEnable) {
1323 cmdsts &= ~CMDSTS_DEST_MASK;
1324 const EthAddr &dst = rxFifoFront()->dst();
1325 if (dst->unicast())
1326 cmdsts |= CMDSTS_DEST_SELF;
1327 if (dst->multicast())
1328 cmdsts |= CMDSTS_DEST_MULTI;
1329 if (dst->broadcast())
1330 cmdsts |= CMDSTS_DEST_MASK;
1331 }
1332#endif
1333
1334 IpPtr ip(rxPacket);
1335 if (extstsEnable && ip) {
1336 extsts |= EXTSTS_IPPKT;
1337 rxIpChecksums++;
1338 if (cksum(ip) != 0) {
1339 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1340 extsts |= EXTSTS_IPERR;
1341 }
1342 TcpPtr tcp(ip);
1343 UdpPtr udp(ip);
1344 if (tcp) {
1345 extsts |= EXTSTS_TCPPKT;
1346 rxTcpChecksums++;
1347 if (cksum(tcp) != 0) {
1348 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1349 extsts |= EXTSTS_TCPERR;
1350
1351 }
1352 } else if (udp) {
1353 extsts |= EXTSTS_UDPPKT;
1354 rxUdpChecksums++;
1355 if (cksum(udp) != 0) {
1356 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1357 extsts |= EXTSTS_UDPERR;
1358 }
1359 }
1360 }
1361 rxPacket = 0;
1362
1363 /*
1364 * the driver seems to always receive into desc buffers
1365 * of size 1514, so you never have a pkt that is split
1366 * into multiple descriptors on the receive side, so
1367 * i don't implement that case, hence the assert above.
1368 */
1369
1370 DPRINTF(EthernetDesc,
1371 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1372 regs.rxdp & 0x3fffffff);
1373 DPRINTF(EthernetDesc,
1374 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1375 link, bufptr, cmdsts, extsts);
1376
1377 rxDmaAddr = regs.rxdp & 0x3fffffff;
1378 rxDmaData = &cmdsts;
1379 if (is64bit) {
1380 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1381 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1382 } else {
1383 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1384 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1385 }
1386 rxDmaFree = dmaDescFree;
1387
1388 descDmaWrites++;
1389 descDmaWrBytes += rxDmaLen;
1390
1391 if (doRxDmaWrite())
1392 goto exit;
1393 }
1394 break;
1395
1396 case rxFragWrite:
1397 if (rxDmaState != dmaIdle)
1398 goto exit;
1399
1400 rxPacketBufPtr += rxXferLen;
1401 rxFragPtr += rxXferLen;
1402 rxPktBytes -= rxXferLen;
1403
1404 rxState = rxFifoBlock;
1405 break;
1406
1407 case rxDescWrite:
1408 if (rxDmaState != dmaIdle)
1409 goto exit;
1410
1411 assert(cmdsts & CMDSTS_OWN);
1412
1413 assert(rxPacket == 0);
1414 devIntrPost(ISR_RXOK);
1415
1416 if (cmdsts & CMDSTS_INTR)
1417 devIntrPost(ISR_RXDESC);
1418
1419 if (!rxEnable) {
1420 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1421 rxState = rxIdle;
1422 goto exit;
1423 } else
1424 rxState = rxAdvance;
1425 break;
1426
1427 case rxAdvance:
1428 if (link == 0) {
1429 devIntrPost(ISR_RXIDLE);
1430 rxState = rxIdle;
1431 CRDD = true;
1432 goto exit;
1433 } else {
1434 if (rxDmaState != dmaIdle)
1435 goto exit;
1436 rxState = rxDescRead;
1437 regs.rxdp = link;
1438 CRDD = false;
1439
1440 rxDmaAddr = regs.rxdp & 0x3fffffff;
1441 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1442 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1443 rxDmaFree = dmaDescFree;
1444
1445 if (doRxDmaRead())
1446 goto exit;
1447 }
1448 break;
1449
1450 default:
1451 panic("Invalid rxState!");
1452 }
1453
1454 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1455 NsRxStateStrings[rxState]);
1456 goto next;
1457
1458 exit:
1459 /**
1460 * @todo do we want to schedule a future kick?
1461 */
1462 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1463 NsRxStateStrings[rxState]);
1464
1465 if (!rxKickEvent.scheduled())
1466 schedule(rxKickEvent, rxKickTick);
1467}
1468
1469void
1470NSGigE::transmit()
1471{
1472 if (txFifo.empty()) {
1473 DPRINTF(Ethernet, "nothing to transmit\n");
1474 return;
1475 }
1476
1477 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1478 txFifo.size());
1479 if (interface->sendPacket(txFifo.front())) {
1480#if TRACING_ON
1481 if (DTRACE(Ethernet)) {
1482 IpPtr ip(txFifo.front());
1483 if (ip) {
1484 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1485 TcpPtr tcp(ip);
1486 if (tcp) {
1487 DPRINTF(Ethernet,
1488 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1489 tcp->sport(), tcp->dport(), tcp->seq(),
1490 tcp->ack());
1491 }
1492 }
1493 }
1494#endif
1495
1496 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1497 txBytes += txFifo.front()->length;
1498 txPackets++;
1499
1500 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1501 txFifo.avail());
1502 txFifo.pop();
1503
1504 /*
1505 * normally do a writeback of the descriptor here, and ONLY
1506 * after that is done, send this interrupt. but since our
1507 * stuff never actually fails, just do this interrupt here,
1508 * otherwise the code has to stray from this nice format.
1509 * besides, it's functionally the same.
1510 */
1511 devIntrPost(ISR_TXOK);
1512 }
1513
1514 if (!txFifo.empty() && !txEvent.scheduled()) {
1515 DPRINTF(Ethernet, "reschedule transmit\n");
1516 schedule(txEvent, curTick() + retryTime);
1517 }
1518}
1519
1520bool
1521NSGigE::doTxDmaRead()
1522{
1523 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1524 txDmaState = dmaReading;
1525
1526 if (dmaPending() || drainState() != DrainState::Running)
1527 txDmaState = dmaReadWaiting;
1528 else
1529 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1530
1531 return true;
1532}
1533
1534void
1535NSGigE::txDmaReadDone()
1536{
1537 assert(txDmaState == dmaReading);
1538 txDmaState = dmaIdle;
1539
1540 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1541 txDmaAddr, txDmaLen);
1542 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1543
1544 // If the receive state machine has a pending DMA, let it go first
1545 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1546 rxKick();
1547
1548 txKick();
1549}
1550
1551bool
1552NSGigE::doTxDmaWrite()
1553{
1554 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1555 txDmaState = dmaWriting;
1556
1557 if (dmaPending() || drainState() != DrainState::Running)
1558 txDmaState = dmaWriteWaiting;
1559 else
1560 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1561 return true;
1562}
1563
1564void
1565NSGigE::txDmaWriteDone()
1566{
1567 assert(txDmaState == dmaWriting);
1568 txDmaState = dmaIdle;
1569
1570 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1571 txDmaAddr, txDmaLen);
1572 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1573
1574 // If the receive state machine has a pending DMA, let it go first
1575 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1576 rxKick();
1577
1578 txKick();
1579}
1580
1581void
1582NSGigE::txKick()
1583{
1584 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1585
1586 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1587 NsTxStateStrings[txState], is64bit ? 64 : 32);
1588
1589 Addr link, bufptr;
1590 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1591 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1592
1593 next:
1594 if (txKickTick > curTick()) {
1595 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1596 txKickTick);
1597 goto exit;
1598 }
1599
1600 // Go to the next state machine clock tick.
1601 txKickTick = clockEdge(Cycles(1));
1602
1603 switch(txDmaState) {
1604 case dmaReadWaiting:
1605 if (doTxDmaRead())
1606 goto exit;
1607 break;
1608 case dmaWriteWaiting:
1609 if (doTxDmaWrite())
1610 goto exit;
1611 break;
1612 default:
1613 break;
1614 }
1615
1616 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1617 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1618 switch (txState) {
1619 case txIdle:
1620 if (!txEnable) {
1621 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1622 goto exit;
1623 }
1624
1625 if (CTDD) {
1626 txState = txDescRefr;
1627
1628 txDmaAddr = regs.txdp & 0x3fffffff;
1629 txDmaData =
1630 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1631 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1632 txDmaFree = dmaDescFree;
1633
1634 descDmaReads++;
1635 descDmaRdBytes += txDmaLen;
1636
1637 if (doTxDmaRead())
1638 goto exit;
1639
1640 } else {
1641 txState = txDescRead;
1642
1643 txDmaAddr = regs.txdp & 0x3fffffff;
1644 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1645 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1646 txDmaFree = dmaDescFree;
1647
1648 descDmaReads++;
1649 descDmaRdBytes += txDmaLen;
1650
1651 if (doTxDmaRead())
1652 goto exit;
1653 }
1654 break;
1655
1656 case txDescRefr:
1657 if (txDmaState != dmaIdle)
1658 goto exit;
1659
1660 txState = txAdvance;
1661 break;
1662
1663 case txDescRead:
1664 if (txDmaState != dmaIdle)
1665 goto exit;
1666
1667 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1668 regs.txdp & 0x3fffffff);
1669 DPRINTF(EthernetDesc,
1670 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1671 link, bufptr, cmdsts, extsts);
1672
1673 if (cmdsts & CMDSTS_OWN) {
1674 txState = txFifoBlock;
1675 txFragPtr = bufptr;
1676 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1677 } else {
1678 devIntrPost(ISR_TXIDLE);
1679 txState = txIdle;
1680 goto exit;
1681 }
1682 break;
1683
1684 case txFifoBlock:
1685 if (!txPacket) {
1686 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1687 txPacket = make_shared<EthPacketData>(16384);
1688 txPacketBufPtr = txPacket->data;
1689 }
1690
1691 if (txDescCnt == 0) {
1692 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1693 if (cmdsts & CMDSTS_MORE) {
1694 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1695 txState = txDescWrite;
1696
1697 cmdsts &= ~CMDSTS_OWN;
1698
1699 txDmaAddr = regs.txdp & 0x3fffffff;
1700 txDmaData = &cmdsts;
1701 if (is64bit) {
1702 txDmaAddr += offsetof(ns_desc64, cmdsts);
1703 txDmaLen = sizeof(txDesc64.cmdsts);
1704 } else {
1705 txDmaAddr += offsetof(ns_desc32, cmdsts);
1706 txDmaLen = sizeof(txDesc32.cmdsts);
1707 }
1708 txDmaFree = dmaDescFree;
1709
1710 if (doTxDmaWrite())
1711 goto exit;
1712
1713 } else { /* this packet is totally done */
1714 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1715 /* deal with the the packet that just finished */
1716 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1717 IpPtr ip(txPacket);
1718 if (extsts & EXTSTS_UDPPKT) {
1719 UdpPtr udp(ip);
1720 if (udp) {
1721 udp->sum(0);
1722 udp->sum(cksum(udp));
1723 txUdpChecksums++;
1724 } else {
1725 Debug::breakpoint();
1726 warn_once("UDPPKT set, but not UDP!\n");
1727 }
1728 } else if (extsts & EXTSTS_TCPPKT) {
1729 TcpPtr tcp(ip);
1730 if (tcp) {
1731 tcp->sum(0);
1732 tcp->sum(cksum(tcp));
1733 txTcpChecksums++;
1734 } else {
1735 warn_once("TCPPKT set, but not UDP!\n");
1736 }
1737 }
1738 if (extsts & EXTSTS_IPPKT) {
1739 if (ip) {
1740 ip->sum(0);
1741 ip->sum(cksum(ip));
1742 txIpChecksums++;
1743 } else {
1744 warn_once("IPPKT set, but not UDP!\n");
1745 }
1746 }
1747 }
1748
1749 txPacket->simLength = txPacketBufPtr - txPacket->data;
1750 txPacket->length = txPacketBufPtr - txPacket->data;
1751 // this is just because the receive can't handle a
1752 // packet bigger want to make sure
1753 if (txPacket->length > 1514)
1754 panic("transmit packet too large, %s > 1514\n",
1755 txPacket->length);
1756
1757#ifndef NDEBUG
1758 bool success =
1759#endif
1760 txFifo.push(txPacket);
1761 assert(success);
1762
1763 /*
1764 * this following section is not tqo spec, but
1765 * functionally shouldn't be any different. normally,
1766 * the chip will wait til the transmit has occurred
1767 * before writing back the descriptor because it has
1768 * to wait to see that it was successfully transmitted
1769 * to decide whether to set CMDSTS_OK or not.
1770 * however, in the simulator since it is always
1771 * successfully transmitted, and writing it exactly to
1772 * spec would complicate the code, we just do it here
1773 */
1774
1775 cmdsts &= ~CMDSTS_OWN;
1776 cmdsts |= CMDSTS_OK;
1777
1778 DPRINTF(EthernetDesc,
1779 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1780 cmdsts, extsts);
1781
1782 txDmaFree = dmaDescFree;
1783 txDmaAddr = regs.txdp & 0x3fffffff;
1784 txDmaData = &cmdsts;
1785 if (is64bit) {
1786 txDmaAddr += offsetof(ns_desc64, cmdsts);
1787 txDmaLen =
1788 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
1789 } else {
1790 txDmaAddr += offsetof(ns_desc32, cmdsts);
1791 txDmaLen =
1792 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
1793 }
1794
1795 descDmaWrites++;
1796 descDmaWrBytes += txDmaLen;
1797
1798 transmit();
1799 txPacket = 0;
1800
1801 if (!txEnable) {
1802 DPRINTF(EthernetSM, "halting TX state machine\n");
1803 txState = txIdle;
1804 goto exit;
1805 } else
1806 txState = txAdvance;
1807
1808 if (doTxDmaWrite())
1809 goto exit;
1810 }
1811 } else {
1812 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1813 if (!txFifo.full()) {
1814 txState = txFragRead;
1815
1816 /*
1817 * The number of bytes transferred is either whatever
1818 * is left in the descriptor (txDescCnt), or if there
1819 * is not enough room in the fifo, just whatever room
1820 * is left in the fifo
1821 */
1822 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
1823
1824 txDmaAddr = txFragPtr & 0x3fffffff;
1825 txDmaData = txPacketBufPtr;
1826 txDmaLen = txXferLen;
1827 txDmaFree = dmaDataFree;
1828
1829 if (doTxDmaRead())
1830 goto exit;
1831 } else {
1832 txState = txFifoBlock;
1833 transmit();
1834
1835 goto exit;
1836 }
1837
1838 }
1839 break;
1840
1841 case txFragRead:
1842 if (txDmaState != dmaIdle)
1843 goto exit;
1844
1845 txPacketBufPtr += txXferLen;
1846 txFragPtr += txXferLen;
1847 txDescCnt -= txXferLen;
1848 txFifo.reserve(txXferLen);
1849
1850 txState = txFifoBlock;
1851 break;
1852
1853 case txDescWrite:
1854 if (txDmaState != dmaIdle)
1855 goto exit;
1856
1857 if (cmdsts & CMDSTS_INTR)
1858 devIntrPost(ISR_TXDESC);
1859
1860 if (!txEnable) {
1861 DPRINTF(EthernetSM, "halting TX state machine\n");
1862 txState = txIdle;
1863 goto exit;
1864 } else
1865 txState = txAdvance;
1866 break;
1867
1868 case txAdvance:
1869 if (link == 0) {
1870 devIntrPost(ISR_TXIDLE);
1871 txState = txIdle;
1872 goto exit;
1873 } else {
1874 if (txDmaState != dmaIdle)
1875 goto exit;
1876 txState = txDescRead;
1877 regs.txdp = link;
1878 CTDD = false;
1879
1880 txDmaAddr = link & 0x3fffffff;
1881 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1882 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1883 txDmaFree = dmaDescFree;
1884
1885 if (doTxDmaRead())
1886 goto exit;
1887 }
1888 break;
1889
1890 default:
1891 panic("invalid state");
1892 }
1893
1894 DPRINTF(EthernetSM, "entering next txState=%s\n",
1895 NsTxStateStrings[txState]);
1896 goto next;
1897
1898 exit:
1899 /**
1900 * @todo do we want to schedule a future kick?
1901 */
1902 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1903 NsTxStateStrings[txState]);
1904
1905 if (!txKickEvent.scheduled())
1906 schedule(txKickEvent, txKickTick);
1907}
1908
1909/**
1910 * Advance the EEPROM state machine
1911 * Called on rising edge of EEPROM clock bit in MEAR
1912 */
1913void
1914NSGigE::eepromKick()
1915{
1916 switch (eepromState) {
1917
1918 case eepromStart:
1919
1920 // Wait for start bit
1921 if (regs.mear & MEAR_EEDI) {
1922 // Set up to get 2 opcode bits
1923 eepromState = eepromGetOpcode;
1924 eepromBitsToRx = 2;
1925 eepromOpcode = 0;
1926 }
1927 break;
1928
1929 case eepromGetOpcode:
1930 eepromOpcode <<= 1;
1931 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
1932 --eepromBitsToRx;
1933
1934 // Done getting opcode
1935 if (eepromBitsToRx == 0) {
1936 if (eepromOpcode != EEPROM_READ)
1937 panic("only EEPROM reads are implemented!");
1938
1939 // Set up to get address
1940 eepromState = eepromGetAddress;
1941 eepromBitsToRx = 6;
1942 eepromAddress = 0;
1943 }
1944 break;
1945
1946 case eepromGetAddress:
1947 eepromAddress <<= 1;
1948 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
1949 --eepromBitsToRx;
1950
1951 // Done getting address
1952 if (eepromBitsToRx == 0) {
1953
1954 if (eepromAddress >= EEPROM_SIZE)
1955 panic("EEPROM read access out of range!");
1956
1957 switch (eepromAddress) {
1958
1959 case EEPROM_PMATCH2_ADDR:
1960 eepromData = rom.perfectMatch[5];
1961 eepromData <<= 8;
1962 eepromData += rom.perfectMatch[4];
1963 break;
1964
1965 case EEPROM_PMATCH1_ADDR:
1966 eepromData = rom.perfectMatch[3];
1967 eepromData <<= 8;
1968 eepromData += rom.perfectMatch[2];
1969 break;
1970
1971 case EEPROM_PMATCH0_ADDR:
1972 eepromData = rom.perfectMatch[1];
1973 eepromData <<= 8;
1974 eepromData += rom.perfectMatch[0];
1975 break;
1976
1977 default:
1978 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
1979 }
1980 // Set up to read data
1981 eepromState = eepromRead;
1982 eepromBitsToRx = 16;
1983
1984 // Clear data in bit
1985 regs.mear &= ~MEAR_EEDI;
1986 }
1987 break;
1988
1989 case eepromRead:
1990 // Clear Data Out bit
1991 regs.mear &= ~MEAR_EEDO;
1992 // Set bit to value of current EEPROM bit
1993 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
1994
1995 eepromData <<= 1;
1996 --eepromBitsToRx;
1997
1998 // All done
1999 if (eepromBitsToRx == 0) {
2000 eepromState = eepromStart;
2001 }
2002 break;
2003
2004 default:
2005 panic("invalid EEPROM state");
2006 }
2007
2008}
2009
2010void
2011NSGigE::transferDone()
2012{
2013 if (txFifo.empty()) {
2014 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2015 return;
2016 }
2017
2018 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2019
2020 reschedule(txEvent, clockEdge(Cycles(1)), true);
2021}
2022
2023bool
2024NSGigE::rxFilter(const EthPacketPtr &packet)
2025{
2026 EthPtr eth = packet;
2027 bool drop = true;
2028 string type;
2029
2030 const EthAddr &dst = eth->dst();
2031 if (dst.unicast()) {
2032 // If we're accepting all unicast addresses
2033 if (acceptUnicast)
2034 drop = false;
2035
2036 // If we make a perfect match
2037 if (acceptPerfect && dst == rom.perfectMatch)
2038 drop = false;
2039
2040 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2041 drop = false;
2042
2043 } else if (dst.broadcast()) {
2044 // if we're accepting broadcasts
2045 if (acceptBroadcast)
2046 drop = false;
2047
2048 } else if (dst.multicast()) {
2049 // if we're accepting all multicasts
2050 if (acceptMulticast)
2051 drop = false;
2052
2053 // Multicast hashing faked - all packets accepted
2054 if (multicastHashEnable)
2055 drop = false;
2056 }
2057
2058 if (drop) {
2059 DPRINTF(Ethernet, "rxFilter drop\n");
2060 DDUMP(EthernetData, packet->data, packet->length);
2061 }
2062
2063 return drop;
2064}
2065
2066bool
2067NSGigE::recvPacket(EthPacketPtr packet)
2068{
2069 rxBytes += packet->length;
2070 rxPackets++;
2071
2072 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2073 rxFifo.avail());
2074
2075 if (!rxEnable) {
2076 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2077 return true;
2078 }
2079
2080 if (!rxFilterEnable) {
2081 DPRINTF(Ethernet,
2082 "receive packet filtering disabled . . . packet dropped\n");
2083 return true;
2084 }
2085
2086 if (rxFilter(packet)) {
2087 DPRINTF(Ethernet, "packet filtered...dropped\n");
2088 return true;
2089 }
2090
2091 if (rxFifo.avail() < packet->length) {
2092#if TRACING_ON
2093 IpPtr ip(packet);
2094 TcpPtr tcp(ip);
2095 if (ip) {
2096 DPRINTF(Ethernet,
2097 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2098 ip->id());
2099 if (tcp) {
2100 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2101 }
2102 }
2103#endif
2104 droppedPackets++;
2105 devIntrPost(ISR_RXORN);
2106 return false;
2107 }
2108
2109 rxFifo.push(packet);
2110
2111 rxKick();
2112 return true;
2113}
2114
2115
2116void
2117NSGigE::drainResume()
2118{
2119 Drainable::drainResume();
2120
2121 // During drain we could have left the state machines in a waiting state and
2122 // they wouldn't get out until some other event occured to kick them.
2123 // This way they'll get out immediately
2124 txKick();
2125 rxKick();
2126}
2127
2128
2129//=====================================================================
2130//
2131//
2132void
2133NSGigE::serialize(CheckpointOut &cp) const
2134{
2135 // Serialize the PciDevice base class
2136 PciDevice::serialize(cp);
2137
2138 /*
2139 * Finalize any DMA events now.
2140 */
2141 // @todo will mem system save pending dma?
2142
2143 /*
2144 * Serialize the device registers
2145 */
2146 SERIALIZE_SCALAR(regs.command);
2147 SERIALIZE_SCALAR(regs.config);
2148 SERIALIZE_SCALAR(regs.mear);
2149 SERIALIZE_SCALAR(regs.ptscr);
2150 SERIALIZE_SCALAR(regs.isr);
2151 SERIALIZE_SCALAR(regs.imr);
2152 SERIALIZE_SCALAR(regs.ier);
2153 SERIALIZE_SCALAR(regs.ihr);
2154 SERIALIZE_SCALAR(regs.txdp);
2155 SERIALIZE_SCALAR(regs.txdp_hi);
2156 SERIALIZE_SCALAR(regs.txcfg);
2157 SERIALIZE_SCALAR(regs.gpior);
2158 SERIALIZE_SCALAR(regs.rxdp);
2159 SERIALIZE_SCALAR(regs.rxdp_hi);
2160 SERIALIZE_SCALAR(regs.rxcfg);
2161 SERIALIZE_SCALAR(regs.pqcr);
2162 SERIALIZE_SCALAR(regs.wcsr);
2163 SERIALIZE_SCALAR(regs.pcr);
2164 SERIALIZE_SCALAR(regs.rfcr);
2165 SERIALIZE_SCALAR(regs.rfdr);
2166 SERIALIZE_SCALAR(regs.brar);
2167 SERIALIZE_SCALAR(regs.brdr);
2168 SERIALIZE_SCALAR(regs.srr);
2169 SERIALIZE_SCALAR(regs.mibc);
2170 SERIALIZE_SCALAR(regs.vrcr);
2171 SERIALIZE_SCALAR(regs.vtcr);
2172 SERIALIZE_SCALAR(regs.vdr);
2173 SERIALIZE_SCALAR(regs.ccsr);
2174 SERIALIZE_SCALAR(regs.tbicr);
2175 SERIALIZE_SCALAR(regs.tbisr);
2176 SERIALIZE_SCALAR(regs.tanar);
2177 SERIALIZE_SCALAR(regs.tanlpar);
2178 SERIALIZE_SCALAR(regs.taner);
2179 SERIALIZE_SCALAR(regs.tesr);
2180
2181 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2182 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2183
2184 SERIALIZE_SCALAR(ioEnable);
2185
2186 /*
2187 * Serialize the data Fifos
2188 */
2189 rxFifo.serialize("rxFifo", cp);
2190 txFifo.serialize("txFifo", cp);
2191
2192 /*
2193 * Serialize the various helper variables
2194 */
2195 bool txPacketExists = txPacket != nullptr;
2196 SERIALIZE_SCALAR(txPacketExists);
2197 if (txPacketExists) {
2198 txPacket->simLength = txPacketBufPtr - txPacket->data;
2199 txPacket->length = txPacketBufPtr - txPacket->data;
2200 txPacket->serialize("txPacket", cp);
2201 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2202 SERIALIZE_SCALAR(txPktBufPtr);
2203 }
2204
2205 bool rxPacketExists = rxPacket != nullptr;
2206 SERIALIZE_SCALAR(rxPacketExists);
2207 if (rxPacketExists) {
2208 rxPacket->serialize("rxPacket", cp);
2209 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2210 SERIALIZE_SCALAR(rxPktBufPtr);
2211 }
2212
2213 SERIALIZE_SCALAR(txXferLen);
2214 SERIALIZE_SCALAR(rxXferLen);
2215
2216 /*
2217 * Serialize Cached Descriptors
2218 */
2219 SERIALIZE_SCALAR(rxDesc64.link);
2220 SERIALIZE_SCALAR(rxDesc64.bufptr);
2221 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2222 SERIALIZE_SCALAR(rxDesc64.extsts);
2223 SERIALIZE_SCALAR(txDesc64.link);
2224 SERIALIZE_SCALAR(txDesc64.bufptr);
2225 SERIALIZE_SCALAR(txDesc64.cmdsts);
2226 SERIALIZE_SCALAR(txDesc64.extsts);
2227 SERIALIZE_SCALAR(rxDesc32.link);
2228 SERIALIZE_SCALAR(rxDesc32.bufptr);
2229 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2230 SERIALIZE_SCALAR(rxDesc32.extsts);
2231 SERIALIZE_SCALAR(txDesc32.link);
2232 SERIALIZE_SCALAR(txDesc32.bufptr);
2233 SERIALIZE_SCALAR(txDesc32.cmdsts);
2234 SERIALIZE_SCALAR(txDesc32.extsts);
2235 SERIALIZE_SCALAR(extstsEnable);
2236
2237 /*
2238 * Serialize tx state machine
2239 */
2240 int txState = this->txState;
2241 SERIALIZE_SCALAR(txState);
2242 SERIALIZE_SCALAR(txEnable);
2243 SERIALIZE_SCALAR(CTDD);
2244 SERIALIZE_SCALAR(txFragPtr);
2245 SERIALIZE_SCALAR(txDescCnt);
2246 int txDmaState = this->txDmaState;
2247 SERIALIZE_SCALAR(txDmaState);
2248 SERIALIZE_SCALAR(txKickTick);
2249
2250 /*
2251 * Serialize rx state machine
2252 */
2253 int rxState = this->rxState;
2254 SERIALIZE_SCALAR(rxState);
2255 SERIALIZE_SCALAR(rxEnable);
2256 SERIALIZE_SCALAR(CRDD);
2257 SERIALIZE_SCALAR(rxPktBytes);
2258 SERIALIZE_SCALAR(rxFragPtr);
2259 SERIALIZE_SCALAR(rxDescCnt);
2260 int rxDmaState = this->rxDmaState;
2261 SERIALIZE_SCALAR(rxDmaState);
2262 SERIALIZE_SCALAR(rxKickTick);
2263
2264 /*
2265 * Serialize EEPROM state machine
2266 */
2267 int eepromState = this->eepromState;
2268 SERIALIZE_SCALAR(eepromState);
2269 SERIALIZE_SCALAR(eepromClk);
2270 SERIALIZE_SCALAR(eepromBitsToRx);
2271 SERIALIZE_SCALAR(eepromOpcode);
2272 SERIALIZE_SCALAR(eepromAddress);
2273 SERIALIZE_SCALAR(eepromData);
2274
2275 /*
2276 * If there's a pending transmit, store the time so we can
2277 * reschedule it later
2278 */
2279 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
2280 SERIALIZE_SCALAR(transmitTick);
2281
2282 /*
2283 * receive address filter settings
2284 */
2285 SERIALIZE_SCALAR(rxFilterEnable);
2286 SERIALIZE_SCALAR(acceptBroadcast);
2287 SERIALIZE_SCALAR(acceptMulticast);
2288 SERIALIZE_SCALAR(acceptUnicast);
2289 SERIALIZE_SCALAR(acceptPerfect);
2290 SERIALIZE_SCALAR(acceptArp);
2291 SERIALIZE_SCALAR(multicastHashEnable);
2292
2293 /*
2294 * Keep track of pending interrupt status.
2295 */
2296 SERIALIZE_SCALAR(intrTick);
2297 SERIALIZE_SCALAR(cpuPendingIntr);
2298 Tick intrEventTick = 0;
2299 if (intrEvent)
2300 intrEventTick = intrEvent->when();
2301 SERIALIZE_SCALAR(intrEventTick);
2302
2303}
2304
2305void
2306NSGigE::unserialize(CheckpointIn &cp)
2307{
2308 // Unserialize the PciDevice base class
2309 PciDevice::unserialize(cp);
2310
2311 UNSERIALIZE_SCALAR(regs.command);
2312 UNSERIALIZE_SCALAR(regs.config);
2313 UNSERIALIZE_SCALAR(regs.mear);
2314 UNSERIALIZE_SCALAR(regs.ptscr);
2315 UNSERIALIZE_SCALAR(regs.isr);
2316 UNSERIALIZE_SCALAR(regs.imr);
2317 UNSERIALIZE_SCALAR(regs.ier);
2318 UNSERIALIZE_SCALAR(regs.ihr);
2319 UNSERIALIZE_SCALAR(regs.txdp);
2320 UNSERIALIZE_SCALAR(regs.txdp_hi);
2321 UNSERIALIZE_SCALAR(regs.txcfg);
2322 UNSERIALIZE_SCALAR(regs.gpior);
2323 UNSERIALIZE_SCALAR(regs.rxdp);
2324 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2325 UNSERIALIZE_SCALAR(regs.rxcfg);
2326 UNSERIALIZE_SCALAR(regs.pqcr);
2327 UNSERIALIZE_SCALAR(regs.wcsr);
2328 UNSERIALIZE_SCALAR(regs.pcr);
2329 UNSERIALIZE_SCALAR(regs.rfcr);
2330 UNSERIALIZE_SCALAR(regs.rfdr);
2331 UNSERIALIZE_SCALAR(regs.brar);
2332 UNSERIALIZE_SCALAR(regs.brdr);
2333 UNSERIALIZE_SCALAR(regs.srr);
2334 UNSERIALIZE_SCALAR(regs.mibc);
2335 UNSERIALIZE_SCALAR(regs.vrcr);
2336 UNSERIALIZE_SCALAR(regs.vtcr);
2337 UNSERIALIZE_SCALAR(regs.vdr);
2338 UNSERIALIZE_SCALAR(regs.ccsr);
2339 UNSERIALIZE_SCALAR(regs.tbicr);
2340 UNSERIALIZE_SCALAR(regs.tbisr);
2341 UNSERIALIZE_SCALAR(regs.tanar);
2342 UNSERIALIZE_SCALAR(regs.tanlpar);
2343 UNSERIALIZE_SCALAR(regs.taner);
2344 UNSERIALIZE_SCALAR(regs.tesr);
2345
2346 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2347 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2348
2349 UNSERIALIZE_SCALAR(ioEnable);
2350
2351 /*
2352 * unserialize the data fifos
2353 */
2354 rxFifo.unserialize("rxFifo", cp);
2355 txFifo.unserialize("txFifo", cp);
2356
2357 /*
2358 * unserialize the various helper variables
2359 */
2360 bool txPacketExists;
2361 UNSERIALIZE_SCALAR(txPacketExists);
2362 if (txPacketExists) {
2363 txPacket = make_shared<EthPacketData>(16384);
2364 txPacket->unserialize("txPacket", cp);
2365 uint32_t txPktBufPtr;
2366 UNSERIALIZE_SCALAR(txPktBufPtr);
2367 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2368 } else
2369 txPacket = 0;
2370
2371 bool rxPacketExists;
2372 UNSERIALIZE_SCALAR(rxPacketExists);
2373 rxPacket = 0;
2374 if (rxPacketExists) {
2375 rxPacket = make_shared<EthPacketData>();
2376 rxPacket->unserialize("rxPacket", cp);
2377 uint32_t rxPktBufPtr;
2378 UNSERIALIZE_SCALAR(rxPktBufPtr);
2379 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2380 } else
2381 rxPacket = 0;
2382
2383 UNSERIALIZE_SCALAR(txXferLen);
2384 UNSERIALIZE_SCALAR(rxXferLen);
2385
2386 /*
2387 * Unserialize Cached Descriptors
2388 */
2389 UNSERIALIZE_SCALAR(rxDesc64.link);
2390 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2391 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2392 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2393 UNSERIALIZE_SCALAR(txDesc64.link);
2394 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2395 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2396 UNSERIALIZE_SCALAR(txDesc64.extsts);
2397 UNSERIALIZE_SCALAR(rxDesc32.link);
2398 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2399 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2400 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2401 UNSERIALIZE_SCALAR(txDesc32.link);
2402 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2403 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2404 UNSERIALIZE_SCALAR(txDesc32.extsts);
2405 UNSERIALIZE_SCALAR(extstsEnable);
2406
2407 /*
2408 * unserialize tx state machine
2409 */
2410 int txState;
2411 UNSERIALIZE_SCALAR(txState);
2412 this->txState = (TxState) txState;
2413 UNSERIALIZE_SCALAR(txEnable);
2414 UNSERIALIZE_SCALAR(CTDD);
2415 UNSERIALIZE_SCALAR(txFragPtr);
2416 UNSERIALIZE_SCALAR(txDescCnt);
2417 int txDmaState;
2418 UNSERIALIZE_SCALAR(txDmaState);
2419 this->txDmaState = (DmaState) txDmaState;
2420 UNSERIALIZE_SCALAR(txKickTick);
2421 if (txKickTick)
2422 schedule(txKickEvent, txKickTick);
2423
2424 /*
2425 * unserialize rx state machine
2426 */
2427 int rxState;
2428 UNSERIALIZE_SCALAR(rxState);
2429 this->rxState = (RxState) rxState;
2430 UNSERIALIZE_SCALAR(rxEnable);
2431 UNSERIALIZE_SCALAR(CRDD);
2432 UNSERIALIZE_SCALAR(rxPktBytes);
2433 UNSERIALIZE_SCALAR(rxFragPtr);
2434 UNSERIALIZE_SCALAR(rxDescCnt);
2435 int rxDmaState;
2436 UNSERIALIZE_SCALAR(rxDmaState);
2437 this->rxDmaState = (DmaState) rxDmaState;
2438 UNSERIALIZE_SCALAR(rxKickTick);
2439 if (rxKickTick)
2440 schedule(rxKickEvent, rxKickTick);
2441
2442 /*
2443 * Unserialize EEPROM state machine
2444 */
2445 int eepromState;
2446 UNSERIALIZE_SCALAR(eepromState);
2447 this->eepromState = (EEPROMState) eepromState;
2448 UNSERIALIZE_SCALAR(eepromClk);
2449 UNSERIALIZE_SCALAR(eepromBitsToRx);
2450 UNSERIALIZE_SCALAR(eepromOpcode);
2451 UNSERIALIZE_SCALAR(eepromAddress);
2452 UNSERIALIZE_SCALAR(eepromData);
2453
2454 /*
2455 * If there's a pending transmit, reschedule it now
2456 */
2457 Tick transmitTick;
2458 UNSERIALIZE_SCALAR(transmitTick);
2459 if (transmitTick)
2460 schedule(txEvent, curTick() + transmitTick);
2461
2462 /*
2463 * unserialize receive address filter settings
2464 */
2465 UNSERIALIZE_SCALAR(rxFilterEnable);
2466 UNSERIALIZE_SCALAR(acceptBroadcast);
2467 UNSERIALIZE_SCALAR(acceptMulticast);
2468 UNSERIALIZE_SCALAR(acceptUnicast);
2469 UNSERIALIZE_SCALAR(acceptPerfect);
2470 UNSERIALIZE_SCALAR(acceptArp);
2471 UNSERIALIZE_SCALAR(multicastHashEnable);
2472
2473 /*
2474 * Keep track of pending interrupt status.
2475 */
2476 UNSERIALIZE_SCALAR(intrTick);
2477 UNSERIALIZE_SCALAR(cpuPendingIntr);
2478 Tick intrEventTick;
2479 UNSERIALIZE_SCALAR(intrEventTick);
2480 if (intrEventTick) {
2481 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
2482 name(), true);
2483 schedule(intrEvent, intrEventTick);
2484 }
2485}
2486
2487NSGigE *
2488NSGigEParams::create()
2489{
2490 return new NSGigE(this);
2491}
746
747 case BRAR:
748 regs.brar = reg;
749 break;
750
751 case BRDR:
752 panic("the driver never uses BRDR, something is wrong!\n");
753
754 case SRR:
755 panic("SRR is read only register!\n");
756
757 case MIBC:
758 panic("the driver never uses MIBC, something is wrong!\n");
759
760 case VRCR:
761 regs.vrcr = reg;
762 break;
763
764 case VTCR:
765 regs.vtcr = reg;
766 break;
767
768 case VDR:
769 panic("the driver never uses VDR, something is wrong!\n");
770
771 case CCSR:
772 /* not going to implement clockrun stuff */
773 regs.ccsr = reg;
774 break;
775
776 case TBICR:
777 regs.tbicr = reg;
778 if (reg & TBICR_MR_LOOPBACK)
779 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
780
781 if (reg & TBICR_MR_AN_ENABLE) {
782 regs.tanlpar = regs.tanar;
783 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
784 }
785
786#if 0
787 if (reg & TBICR_MR_RESTART_AN) ;
788#endif
789
790 break;
791
792 case TBISR:
793 panic("TBISR is read only register!\n");
794
795 case TANAR:
796 // Only write the writable bits
797 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
798 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
799
800 // Pause capability unimplemented
801#if 0
802 if (reg & TANAR_PS2) ;
803 if (reg & TANAR_PS1) ;
804#endif
805
806 break;
807
808 case TANLPAR:
809 panic("this should only be written to by the fake phy!\n");
810
811 case TANER:
812 panic("TANER is read only register!\n");
813
814 case TESR:
815 regs.tesr = reg;
816 break;
817
818 default:
819 panic("invalid register access daddr=%#x", daddr);
820 }
821 } else {
822 panic("Invalid Request Size");
823 }
824 pkt->makeAtomicResponse();
825 return pioDelay;
826}
827
828void
829NSGigE::devIntrPost(uint32_t interrupts)
830{
831 if (interrupts & ISR_RESERVE)
832 panic("Cannot set a reserved interrupt");
833
834 if (interrupts & ISR_NOIMPL)
835 warn("interrupt not implemented %#x\n", interrupts);
836
837 interrupts &= ISR_IMPL;
838 regs.isr |= interrupts;
839
840 if (interrupts & regs.imr) {
841 if (interrupts & ISR_SWI) {
842 totalSwi++;
843 }
844 if (interrupts & ISR_RXIDLE) {
845 totalRxIdle++;
846 }
847 if (interrupts & ISR_RXOK) {
848 totalRxOk++;
849 }
850 if (interrupts & ISR_RXDESC) {
851 totalRxDesc++;
852 }
853 if (interrupts & ISR_TXOK) {
854 totalTxOk++;
855 }
856 if (interrupts & ISR_TXIDLE) {
857 totalTxIdle++;
858 }
859 if (interrupts & ISR_TXDESC) {
860 totalTxDesc++;
861 }
862 if (interrupts & ISR_RXORN) {
863 totalRxOrn++;
864 }
865 }
866
867 DPRINTF(EthernetIntr,
868 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
869 interrupts, regs.isr, regs.imr);
870
871 if ((regs.isr & regs.imr)) {
872 Tick when = curTick();
873 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
874 when += intrDelay;
875 postedInterrupts++;
876 cpuIntrPost(when);
877 }
878}
879
880/* writing this interrupt counting stats inside this means that this function
881 is now limited to being used to clear all interrupts upon the kernel
882 reading isr and servicing. just telling you in case you were thinking
883 of expanding use.
884*/
885void
886NSGigE::devIntrClear(uint32_t interrupts)
887{
888 if (interrupts & ISR_RESERVE)
889 panic("Cannot clear a reserved interrupt");
890
891 if (regs.isr & regs.imr & ISR_SWI) {
892 postedSwi++;
893 }
894 if (regs.isr & regs.imr & ISR_RXIDLE) {
895 postedRxIdle++;
896 }
897 if (regs.isr & regs.imr & ISR_RXOK) {
898 postedRxOk++;
899 }
900 if (regs.isr & regs.imr & ISR_RXDESC) {
901 postedRxDesc++;
902 }
903 if (regs.isr & regs.imr & ISR_TXOK) {
904 postedTxOk++;
905 }
906 if (regs.isr & regs.imr & ISR_TXIDLE) {
907 postedTxIdle++;
908 }
909 if (regs.isr & regs.imr & ISR_TXDESC) {
910 postedTxDesc++;
911 }
912 if (regs.isr & regs.imr & ISR_RXORN) {
913 postedRxOrn++;
914 }
915
916 interrupts &= ~ISR_NOIMPL;
917 regs.isr &= ~interrupts;
918
919 DPRINTF(EthernetIntr,
920 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
921 interrupts, regs.isr, regs.imr);
922
923 if (!(regs.isr & regs.imr))
924 cpuIntrClear();
925}
926
927void
928NSGigE::devIntrChangeMask()
929{
930 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
931 regs.isr, regs.imr, regs.isr & regs.imr);
932
933 if (regs.isr & regs.imr)
934 cpuIntrPost(curTick());
935 else
936 cpuIntrClear();
937}
938
939void
940NSGigE::cpuIntrPost(Tick when)
941{
942 // If the interrupt you want to post is later than an interrupt
943 // already scheduled, just let it post in the coming one and don't
944 // schedule another.
945 // HOWEVER, must be sure that the scheduled intrTick is in the
946 // future (this was formerly the source of a bug)
947 /**
948 * @todo this warning should be removed and the intrTick code should
949 * be fixed.
950 */
951 assert(when >= curTick());
952 assert(intrTick >= curTick() || intrTick == 0);
953 if (when > intrTick && intrTick != 0) {
954 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
955 intrTick);
956 return;
957 }
958
959 intrTick = when;
960 if (intrTick < curTick()) {
961 intrTick = curTick();
962 }
963
964 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
965 intrTick);
966
967 if (intrEvent)
968 intrEvent->squash();
969
970 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
971 name(), true);
972 schedule(intrEvent, intrTick);
973}
974
975void
976NSGigE::cpuInterrupt()
977{
978 assert(intrTick == curTick());
979
980 // Whether or not there's a pending interrupt, we don't care about
981 // it anymore
982 intrEvent = 0;
983 intrTick = 0;
984
985 // Don't send an interrupt if there's already one
986 if (cpuPendingIntr) {
987 DPRINTF(EthernetIntr,
988 "would send an interrupt now, but there's already pending\n");
989 } else {
990 // Send interrupt
991 cpuPendingIntr = true;
992
993 DPRINTF(EthernetIntr, "posting interrupt\n");
994 intrPost();
995 }
996}
997
998void
999NSGigE::cpuIntrClear()
1000{
1001 if (!cpuPendingIntr)
1002 return;
1003
1004 if (intrEvent) {
1005 intrEvent->squash();
1006 intrEvent = 0;
1007 }
1008
1009 intrTick = 0;
1010
1011 cpuPendingIntr = false;
1012
1013 DPRINTF(EthernetIntr, "clearing interrupt\n");
1014 intrClear();
1015}
1016
1017bool
1018NSGigE::cpuIntrPending() const
1019{ return cpuPendingIntr; }
1020
1021void
1022NSGigE::txReset()
1023{
1024
1025 DPRINTF(Ethernet, "transmit reset\n");
1026
1027 CTDD = false;
1028 txEnable = false;;
1029 txFragPtr = 0;
1030 assert(txDescCnt == 0);
1031 txFifo.clear();
1032 txState = txIdle;
1033 assert(txDmaState == dmaIdle);
1034}
1035
1036void
1037NSGigE::rxReset()
1038{
1039 DPRINTF(Ethernet, "receive reset\n");
1040
1041 CRDD = false;
1042 assert(rxPktBytes == 0);
1043 rxEnable = false;
1044 rxFragPtr = 0;
1045 assert(rxDescCnt == 0);
1046 assert(rxDmaState == dmaIdle);
1047 rxFifo.clear();
1048 rxState = rxIdle;
1049}
1050
1051void
1052NSGigE::regsReset()
1053{
1054 memset(&regs, 0, sizeof(regs));
1055 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1056 regs.mear = 0x12;
1057 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1058 // fill threshold to 32 bytes
1059 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1060 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1061 regs.mibc = MIBC_FRZ;
1062 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1063 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1064 regs.brar = 0xffffffff;
1065
1066 extstsEnable = false;
1067 acceptBroadcast = false;
1068 acceptMulticast = false;
1069 acceptUnicast = false;
1070 acceptPerfect = false;
1071 acceptArp = false;
1072}
1073
1074bool
1075NSGigE::doRxDmaRead()
1076{
1077 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1078 rxDmaState = dmaReading;
1079
1080 if (dmaPending() || drainState() != DrainState::Running)
1081 rxDmaState = dmaReadWaiting;
1082 else
1083 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1084
1085 return true;
1086}
1087
1088void
1089NSGigE::rxDmaReadDone()
1090{
1091 assert(rxDmaState == dmaReading);
1092 rxDmaState = dmaIdle;
1093
1094 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1095 rxDmaAddr, rxDmaLen);
1096 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1097
1098 // If the transmit state machine has a pending DMA, let it go first
1099 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1100 txKick();
1101
1102 rxKick();
1103}
1104
1105bool
1106NSGigE::doRxDmaWrite()
1107{
1108 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1109 rxDmaState = dmaWriting;
1110
1111 if (dmaPending() || drainState() != DrainState::Running)
1112 rxDmaState = dmaWriteWaiting;
1113 else
1114 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1115 return true;
1116}
1117
1118void
1119NSGigE::rxDmaWriteDone()
1120{
1121 assert(rxDmaState == dmaWriting);
1122 rxDmaState = dmaIdle;
1123
1124 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1125 rxDmaAddr, rxDmaLen);
1126 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1127
1128 // If the transmit state machine has a pending DMA, let it go first
1129 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1130 txKick();
1131
1132 rxKick();
1133}
1134
1135void
1136NSGigE::rxKick()
1137{
1138 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1139
1140 DPRINTF(EthernetSM,
1141 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1142 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1143
1144 Addr link, bufptr;
1145 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1146 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1147
1148 next:
1149 if (rxKickTick > curTick()) {
1150 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1151 rxKickTick);
1152
1153 goto exit;
1154 }
1155
1156 // Go to the next state machine clock tick.
1157 rxKickTick = clockEdge(Cycles(1));
1158
1159 switch(rxDmaState) {
1160 case dmaReadWaiting:
1161 if (doRxDmaRead())
1162 goto exit;
1163 break;
1164 case dmaWriteWaiting:
1165 if (doRxDmaWrite())
1166 goto exit;
1167 break;
1168 default:
1169 break;
1170 }
1171
1172 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1173 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1174
1175 // see state machine from spec for details
1176 // the way this works is, if you finish work on one state and can
1177 // go directly to another, you do that through jumping to the
1178 // label "next". however, if you have intermediate work, like DMA
1179 // so that you can't go to the next state yet, you go to exit and
1180 // exit the loop. however, when the DMA is done it will trigger
1181 // an event and come back to this loop.
1182 switch (rxState) {
1183 case rxIdle:
1184 if (!rxEnable) {
1185 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1186 goto exit;
1187 }
1188
1189 if (CRDD) {
1190 rxState = rxDescRefr;
1191
1192 rxDmaAddr = regs.rxdp & 0x3fffffff;
1193 rxDmaData =
1194 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1195 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1196 rxDmaFree = dmaDescFree;
1197
1198 descDmaReads++;
1199 descDmaRdBytes += rxDmaLen;
1200
1201 if (doRxDmaRead())
1202 goto exit;
1203 } else {
1204 rxState = rxDescRead;
1205
1206 rxDmaAddr = regs.rxdp & 0x3fffffff;
1207 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1208 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1209 rxDmaFree = dmaDescFree;
1210
1211 descDmaReads++;
1212 descDmaRdBytes += rxDmaLen;
1213
1214 if (doRxDmaRead())
1215 goto exit;
1216 }
1217 break;
1218
1219 case rxDescRefr:
1220 if (rxDmaState != dmaIdle)
1221 goto exit;
1222
1223 rxState = rxAdvance;
1224 break;
1225
1226 case rxDescRead:
1227 if (rxDmaState != dmaIdle)
1228 goto exit;
1229
1230 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1231 regs.rxdp & 0x3fffffff);
1232 DPRINTF(EthernetDesc,
1233 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1234 link, bufptr, cmdsts, extsts);
1235
1236 if (cmdsts & CMDSTS_OWN) {
1237 devIntrPost(ISR_RXIDLE);
1238 rxState = rxIdle;
1239 goto exit;
1240 } else {
1241 rxState = rxFifoBlock;
1242 rxFragPtr = bufptr;
1243 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1244 }
1245 break;
1246
1247 case rxFifoBlock:
1248 if (!rxPacket) {
1249 /**
1250 * @todo in reality, we should be able to start processing
1251 * the packet as it arrives, and not have to wait for the
1252 * full packet ot be in the receive fifo.
1253 */
1254 if (rxFifo.empty())
1255 goto exit;
1256
1257 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1258
1259 // If we don't have a packet, grab a new one from the fifo.
1260 rxPacket = rxFifo.front();
1261 rxPktBytes = rxPacket->length;
1262 rxPacketBufPtr = rxPacket->data;
1263
1264#if TRACING_ON
1265 if (DTRACE(Ethernet)) {
1266 IpPtr ip(rxPacket);
1267 if (ip) {
1268 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1269 TcpPtr tcp(ip);
1270 if (tcp) {
1271 DPRINTF(Ethernet,
1272 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1273 tcp->sport(), tcp->dport(), tcp->seq(),
1274 tcp->ack());
1275 }
1276 }
1277 }
1278#endif
1279
1280 // sanity check - i think the driver behaves like this
1281 assert(rxDescCnt >= rxPktBytes);
1282 rxFifo.pop();
1283 }
1284
1285
1286 // dont' need the && rxDescCnt > 0 if driver sanity check
1287 // above holds
1288 if (rxPktBytes > 0) {
1289 rxState = rxFragWrite;
1290 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1291 // check holds
1292 rxXferLen = rxPktBytes;
1293
1294 rxDmaAddr = rxFragPtr & 0x3fffffff;
1295 rxDmaData = rxPacketBufPtr;
1296 rxDmaLen = rxXferLen;
1297 rxDmaFree = dmaDataFree;
1298
1299 if (doRxDmaWrite())
1300 goto exit;
1301
1302 } else {
1303 rxState = rxDescWrite;
1304
1305 //if (rxPktBytes == 0) { /* packet is done */
1306 assert(rxPktBytes == 0);
1307 DPRINTF(EthernetSM, "done with receiving packet\n");
1308
1309 cmdsts |= CMDSTS_OWN;
1310 cmdsts &= ~CMDSTS_MORE;
1311 cmdsts |= CMDSTS_OK;
1312 cmdsts &= 0xffff0000;
1313 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1314
1315#if 0
1316 /*
1317 * all the driver uses these are for its own stats keeping
1318 * which we don't care about, aren't necessary for
1319 * functionality and doing this would just slow us down.
1320 * if they end up using this in a later version for
1321 * functional purposes, just undef
1322 */
1323 if (rxFilterEnable) {
1324 cmdsts &= ~CMDSTS_DEST_MASK;
1325 const EthAddr &dst = rxFifoFront()->dst();
1326 if (dst->unicast())
1327 cmdsts |= CMDSTS_DEST_SELF;
1328 if (dst->multicast())
1329 cmdsts |= CMDSTS_DEST_MULTI;
1330 if (dst->broadcast())
1331 cmdsts |= CMDSTS_DEST_MASK;
1332 }
1333#endif
1334
1335 IpPtr ip(rxPacket);
1336 if (extstsEnable && ip) {
1337 extsts |= EXTSTS_IPPKT;
1338 rxIpChecksums++;
1339 if (cksum(ip) != 0) {
1340 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1341 extsts |= EXTSTS_IPERR;
1342 }
1343 TcpPtr tcp(ip);
1344 UdpPtr udp(ip);
1345 if (tcp) {
1346 extsts |= EXTSTS_TCPPKT;
1347 rxTcpChecksums++;
1348 if (cksum(tcp) != 0) {
1349 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1350 extsts |= EXTSTS_TCPERR;
1351
1352 }
1353 } else if (udp) {
1354 extsts |= EXTSTS_UDPPKT;
1355 rxUdpChecksums++;
1356 if (cksum(udp) != 0) {
1357 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1358 extsts |= EXTSTS_UDPERR;
1359 }
1360 }
1361 }
1362 rxPacket = 0;
1363
1364 /*
1365 * the driver seems to always receive into desc buffers
1366 * of size 1514, so you never have a pkt that is split
1367 * into multiple descriptors on the receive side, so
1368 * i don't implement that case, hence the assert above.
1369 */
1370
1371 DPRINTF(EthernetDesc,
1372 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1373 regs.rxdp & 0x3fffffff);
1374 DPRINTF(EthernetDesc,
1375 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1376 link, bufptr, cmdsts, extsts);
1377
1378 rxDmaAddr = regs.rxdp & 0x3fffffff;
1379 rxDmaData = &cmdsts;
1380 if (is64bit) {
1381 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1382 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1383 } else {
1384 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1385 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1386 }
1387 rxDmaFree = dmaDescFree;
1388
1389 descDmaWrites++;
1390 descDmaWrBytes += rxDmaLen;
1391
1392 if (doRxDmaWrite())
1393 goto exit;
1394 }
1395 break;
1396
1397 case rxFragWrite:
1398 if (rxDmaState != dmaIdle)
1399 goto exit;
1400
1401 rxPacketBufPtr += rxXferLen;
1402 rxFragPtr += rxXferLen;
1403 rxPktBytes -= rxXferLen;
1404
1405 rxState = rxFifoBlock;
1406 break;
1407
1408 case rxDescWrite:
1409 if (rxDmaState != dmaIdle)
1410 goto exit;
1411
1412 assert(cmdsts & CMDSTS_OWN);
1413
1414 assert(rxPacket == 0);
1415 devIntrPost(ISR_RXOK);
1416
1417 if (cmdsts & CMDSTS_INTR)
1418 devIntrPost(ISR_RXDESC);
1419
1420 if (!rxEnable) {
1421 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1422 rxState = rxIdle;
1423 goto exit;
1424 } else
1425 rxState = rxAdvance;
1426 break;
1427
1428 case rxAdvance:
1429 if (link == 0) {
1430 devIntrPost(ISR_RXIDLE);
1431 rxState = rxIdle;
1432 CRDD = true;
1433 goto exit;
1434 } else {
1435 if (rxDmaState != dmaIdle)
1436 goto exit;
1437 rxState = rxDescRead;
1438 regs.rxdp = link;
1439 CRDD = false;
1440
1441 rxDmaAddr = regs.rxdp & 0x3fffffff;
1442 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1443 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1444 rxDmaFree = dmaDescFree;
1445
1446 if (doRxDmaRead())
1447 goto exit;
1448 }
1449 break;
1450
1451 default:
1452 panic("Invalid rxState!");
1453 }
1454
1455 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1456 NsRxStateStrings[rxState]);
1457 goto next;
1458
1459 exit:
1460 /**
1461 * @todo do we want to schedule a future kick?
1462 */
1463 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1464 NsRxStateStrings[rxState]);
1465
1466 if (!rxKickEvent.scheduled())
1467 schedule(rxKickEvent, rxKickTick);
1468}
1469
1470void
1471NSGigE::transmit()
1472{
1473 if (txFifo.empty()) {
1474 DPRINTF(Ethernet, "nothing to transmit\n");
1475 return;
1476 }
1477
1478 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1479 txFifo.size());
1480 if (interface->sendPacket(txFifo.front())) {
1481#if TRACING_ON
1482 if (DTRACE(Ethernet)) {
1483 IpPtr ip(txFifo.front());
1484 if (ip) {
1485 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1486 TcpPtr tcp(ip);
1487 if (tcp) {
1488 DPRINTF(Ethernet,
1489 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1490 tcp->sport(), tcp->dport(), tcp->seq(),
1491 tcp->ack());
1492 }
1493 }
1494 }
1495#endif
1496
1497 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1498 txBytes += txFifo.front()->length;
1499 txPackets++;
1500
1501 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1502 txFifo.avail());
1503 txFifo.pop();
1504
1505 /*
1506 * normally do a writeback of the descriptor here, and ONLY
1507 * after that is done, send this interrupt. but since our
1508 * stuff never actually fails, just do this interrupt here,
1509 * otherwise the code has to stray from this nice format.
1510 * besides, it's functionally the same.
1511 */
1512 devIntrPost(ISR_TXOK);
1513 }
1514
1515 if (!txFifo.empty() && !txEvent.scheduled()) {
1516 DPRINTF(Ethernet, "reschedule transmit\n");
1517 schedule(txEvent, curTick() + retryTime);
1518 }
1519}
1520
1521bool
1522NSGigE::doTxDmaRead()
1523{
1524 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1525 txDmaState = dmaReading;
1526
1527 if (dmaPending() || drainState() != DrainState::Running)
1528 txDmaState = dmaReadWaiting;
1529 else
1530 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1531
1532 return true;
1533}
1534
1535void
1536NSGigE::txDmaReadDone()
1537{
1538 assert(txDmaState == dmaReading);
1539 txDmaState = dmaIdle;
1540
1541 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1542 txDmaAddr, txDmaLen);
1543 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1544
1545 // If the receive state machine has a pending DMA, let it go first
1546 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1547 rxKick();
1548
1549 txKick();
1550}
1551
1552bool
1553NSGigE::doTxDmaWrite()
1554{
1555 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1556 txDmaState = dmaWriting;
1557
1558 if (dmaPending() || drainState() != DrainState::Running)
1559 txDmaState = dmaWriteWaiting;
1560 else
1561 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1562 return true;
1563}
1564
1565void
1566NSGigE::txDmaWriteDone()
1567{
1568 assert(txDmaState == dmaWriting);
1569 txDmaState = dmaIdle;
1570
1571 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1572 txDmaAddr, txDmaLen);
1573 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1574
1575 // If the receive state machine has a pending DMA, let it go first
1576 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1577 rxKick();
1578
1579 txKick();
1580}
1581
1582void
1583NSGigE::txKick()
1584{
1585 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1586
1587 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1588 NsTxStateStrings[txState], is64bit ? 64 : 32);
1589
1590 Addr link, bufptr;
1591 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1592 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1593
1594 next:
1595 if (txKickTick > curTick()) {
1596 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1597 txKickTick);
1598 goto exit;
1599 }
1600
1601 // Go to the next state machine clock tick.
1602 txKickTick = clockEdge(Cycles(1));
1603
1604 switch(txDmaState) {
1605 case dmaReadWaiting:
1606 if (doTxDmaRead())
1607 goto exit;
1608 break;
1609 case dmaWriteWaiting:
1610 if (doTxDmaWrite())
1611 goto exit;
1612 break;
1613 default:
1614 break;
1615 }
1616
1617 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1618 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1619 switch (txState) {
1620 case txIdle:
1621 if (!txEnable) {
1622 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1623 goto exit;
1624 }
1625
1626 if (CTDD) {
1627 txState = txDescRefr;
1628
1629 txDmaAddr = regs.txdp & 0x3fffffff;
1630 txDmaData =
1631 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1632 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1633 txDmaFree = dmaDescFree;
1634
1635 descDmaReads++;
1636 descDmaRdBytes += txDmaLen;
1637
1638 if (doTxDmaRead())
1639 goto exit;
1640
1641 } else {
1642 txState = txDescRead;
1643
1644 txDmaAddr = regs.txdp & 0x3fffffff;
1645 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1646 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1647 txDmaFree = dmaDescFree;
1648
1649 descDmaReads++;
1650 descDmaRdBytes += txDmaLen;
1651
1652 if (doTxDmaRead())
1653 goto exit;
1654 }
1655 break;
1656
1657 case txDescRefr:
1658 if (txDmaState != dmaIdle)
1659 goto exit;
1660
1661 txState = txAdvance;
1662 break;
1663
1664 case txDescRead:
1665 if (txDmaState != dmaIdle)
1666 goto exit;
1667
1668 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1669 regs.txdp & 0x3fffffff);
1670 DPRINTF(EthernetDesc,
1671 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1672 link, bufptr, cmdsts, extsts);
1673
1674 if (cmdsts & CMDSTS_OWN) {
1675 txState = txFifoBlock;
1676 txFragPtr = bufptr;
1677 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1678 } else {
1679 devIntrPost(ISR_TXIDLE);
1680 txState = txIdle;
1681 goto exit;
1682 }
1683 break;
1684
1685 case txFifoBlock:
1686 if (!txPacket) {
1687 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1688 txPacket = make_shared<EthPacketData>(16384);
1689 txPacketBufPtr = txPacket->data;
1690 }
1691
1692 if (txDescCnt == 0) {
1693 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1694 if (cmdsts & CMDSTS_MORE) {
1695 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1696 txState = txDescWrite;
1697
1698 cmdsts &= ~CMDSTS_OWN;
1699
1700 txDmaAddr = regs.txdp & 0x3fffffff;
1701 txDmaData = &cmdsts;
1702 if (is64bit) {
1703 txDmaAddr += offsetof(ns_desc64, cmdsts);
1704 txDmaLen = sizeof(txDesc64.cmdsts);
1705 } else {
1706 txDmaAddr += offsetof(ns_desc32, cmdsts);
1707 txDmaLen = sizeof(txDesc32.cmdsts);
1708 }
1709 txDmaFree = dmaDescFree;
1710
1711 if (doTxDmaWrite())
1712 goto exit;
1713
1714 } else { /* this packet is totally done */
1715 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1716 /* deal with the the packet that just finished */
1717 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1718 IpPtr ip(txPacket);
1719 if (extsts & EXTSTS_UDPPKT) {
1720 UdpPtr udp(ip);
1721 if (udp) {
1722 udp->sum(0);
1723 udp->sum(cksum(udp));
1724 txUdpChecksums++;
1725 } else {
1726 Debug::breakpoint();
1727 warn_once("UDPPKT set, but not UDP!\n");
1728 }
1729 } else if (extsts & EXTSTS_TCPPKT) {
1730 TcpPtr tcp(ip);
1731 if (tcp) {
1732 tcp->sum(0);
1733 tcp->sum(cksum(tcp));
1734 txTcpChecksums++;
1735 } else {
1736 warn_once("TCPPKT set, but not UDP!\n");
1737 }
1738 }
1739 if (extsts & EXTSTS_IPPKT) {
1740 if (ip) {
1741 ip->sum(0);
1742 ip->sum(cksum(ip));
1743 txIpChecksums++;
1744 } else {
1745 warn_once("IPPKT set, but not UDP!\n");
1746 }
1747 }
1748 }
1749
1750 txPacket->simLength = txPacketBufPtr - txPacket->data;
1751 txPacket->length = txPacketBufPtr - txPacket->data;
1752 // this is just because the receive can't handle a
1753 // packet bigger want to make sure
1754 if (txPacket->length > 1514)
1755 panic("transmit packet too large, %s > 1514\n",
1756 txPacket->length);
1757
1758#ifndef NDEBUG
1759 bool success =
1760#endif
1761 txFifo.push(txPacket);
1762 assert(success);
1763
1764 /*
1765 * this following section is not tqo spec, but
1766 * functionally shouldn't be any different. normally,
1767 * the chip will wait til the transmit has occurred
1768 * before writing back the descriptor because it has
1769 * to wait to see that it was successfully transmitted
1770 * to decide whether to set CMDSTS_OK or not.
1771 * however, in the simulator since it is always
1772 * successfully transmitted, and writing it exactly to
1773 * spec would complicate the code, we just do it here
1774 */
1775
1776 cmdsts &= ~CMDSTS_OWN;
1777 cmdsts |= CMDSTS_OK;
1778
1779 DPRINTF(EthernetDesc,
1780 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1781 cmdsts, extsts);
1782
1783 txDmaFree = dmaDescFree;
1784 txDmaAddr = regs.txdp & 0x3fffffff;
1785 txDmaData = &cmdsts;
1786 if (is64bit) {
1787 txDmaAddr += offsetof(ns_desc64, cmdsts);
1788 txDmaLen =
1789 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
1790 } else {
1791 txDmaAddr += offsetof(ns_desc32, cmdsts);
1792 txDmaLen =
1793 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
1794 }
1795
1796 descDmaWrites++;
1797 descDmaWrBytes += txDmaLen;
1798
1799 transmit();
1800 txPacket = 0;
1801
1802 if (!txEnable) {
1803 DPRINTF(EthernetSM, "halting TX state machine\n");
1804 txState = txIdle;
1805 goto exit;
1806 } else
1807 txState = txAdvance;
1808
1809 if (doTxDmaWrite())
1810 goto exit;
1811 }
1812 } else {
1813 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1814 if (!txFifo.full()) {
1815 txState = txFragRead;
1816
1817 /*
1818 * The number of bytes transferred is either whatever
1819 * is left in the descriptor (txDescCnt), or if there
1820 * is not enough room in the fifo, just whatever room
1821 * is left in the fifo
1822 */
1823 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
1824
1825 txDmaAddr = txFragPtr & 0x3fffffff;
1826 txDmaData = txPacketBufPtr;
1827 txDmaLen = txXferLen;
1828 txDmaFree = dmaDataFree;
1829
1830 if (doTxDmaRead())
1831 goto exit;
1832 } else {
1833 txState = txFifoBlock;
1834 transmit();
1835
1836 goto exit;
1837 }
1838
1839 }
1840 break;
1841
1842 case txFragRead:
1843 if (txDmaState != dmaIdle)
1844 goto exit;
1845
1846 txPacketBufPtr += txXferLen;
1847 txFragPtr += txXferLen;
1848 txDescCnt -= txXferLen;
1849 txFifo.reserve(txXferLen);
1850
1851 txState = txFifoBlock;
1852 break;
1853
1854 case txDescWrite:
1855 if (txDmaState != dmaIdle)
1856 goto exit;
1857
1858 if (cmdsts & CMDSTS_INTR)
1859 devIntrPost(ISR_TXDESC);
1860
1861 if (!txEnable) {
1862 DPRINTF(EthernetSM, "halting TX state machine\n");
1863 txState = txIdle;
1864 goto exit;
1865 } else
1866 txState = txAdvance;
1867 break;
1868
1869 case txAdvance:
1870 if (link == 0) {
1871 devIntrPost(ISR_TXIDLE);
1872 txState = txIdle;
1873 goto exit;
1874 } else {
1875 if (txDmaState != dmaIdle)
1876 goto exit;
1877 txState = txDescRead;
1878 regs.txdp = link;
1879 CTDD = false;
1880
1881 txDmaAddr = link & 0x3fffffff;
1882 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1883 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1884 txDmaFree = dmaDescFree;
1885
1886 if (doTxDmaRead())
1887 goto exit;
1888 }
1889 break;
1890
1891 default:
1892 panic("invalid state");
1893 }
1894
1895 DPRINTF(EthernetSM, "entering next txState=%s\n",
1896 NsTxStateStrings[txState]);
1897 goto next;
1898
1899 exit:
1900 /**
1901 * @todo do we want to schedule a future kick?
1902 */
1903 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1904 NsTxStateStrings[txState]);
1905
1906 if (!txKickEvent.scheduled())
1907 schedule(txKickEvent, txKickTick);
1908}
1909
1910/**
1911 * Advance the EEPROM state machine
1912 * Called on rising edge of EEPROM clock bit in MEAR
1913 */
1914void
1915NSGigE::eepromKick()
1916{
1917 switch (eepromState) {
1918
1919 case eepromStart:
1920
1921 // Wait for start bit
1922 if (regs.mear & MEAR_EEDI) {
1923 // Set up to get 2 opcode bits
1924 eepromState = eepromGetOpcode;
1925 eepromBitsToRx = 2;
1926 eepromOpcode = 0;
1927 }
1928 break;
1929
1930 case eepromGetOpcode:
1931 eepromOpcode <<= 1;
1932 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
1933 --eepromBitsToRx;
1934
1935 // Done getting opcode
1936 if (eepromBitsToRx == 0) {
1937 if (eepromOpcode != EEPROM_READ)
1938 panic("only EEPROM reads are implemented!");
1939
1940 // Set up to get address
1941 eepromState = eepromGetAddress;
1942 eepromBitsToRx = 6;
1943 eepromAddress = 0;
1944 }
1945 break;
1946
1947 case eepromGetAddress:
1948 eepromAddress <<= 1;
1949 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
1950 --eepromBitsToRx;
1951
1952 // Done getting address
1953 if (eepromBitsToRx == 0) {
1954
1955 if (eepromAddress >= EEPROM_SIZE)
1956 panic("EEPROM read access out of range!");
1957
1958 switch (eepromAddress) {
1959
1960 case EEPROM_PMATCH2_ADDR:
1961 eepromData = rom.perfectMatch[5];
1962 eepromData <<= 8;
1963 eepromData += rom.perfectMatch[4];
1964 break;
1965
1966 case EEPROM_PMATCH1_ADDR:
1967 eepromData = rom.perfectMatch[3];
1968 eepromData <<= 8;
1969 eepromData += rom.perfectMatch[2];
1970 break;
1971
1972 case EEPROM_PMATCH0_ADDR:
1973 eepromData = rom.perfectMatch[1];
1974 eepromData <<= 8;
1975 eepromData += rom.perfectMatch[0];
1976 break;
1977
1978 default:
1979 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
1980 }
1981 // Set up to read data
1982 eepromState = eepromRead;
1983 eepromBitsToRx = 16;
1984
1985 // Clear data in bit
1986 regs.mear &= ~MEAR_EEDI;
1987 }
1988 break;
1989
1990 case eepromRead:
1991 // Clear Data Out bit
1992 regs.mear &= ~MEAR_EEDO;
1993 // Set bit to value of current EEPROM bit
1994 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
1995
1996 eepromData <<= 1;
1997 --eepromBitsToRx;
1998
1999 // All done
2000 if (eepromBitsToRx == 0) {
2001 eepromState = eepromStart;
2002 }
2003 break;
2004
2005 default:
2006 panic("invalid EEPROM state");
2007 }
2008
2009}
2010
2011void
2012NSGigE::transferDone()
2013{
2014 if (txFifo.empty()) {
2015 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2016 return;
2017 }
2018
2019 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2020
2021 reschedule(txEvent, clockEdge(Cycles(1)), true);
2022}
2023
2024bool
2025NSGigE::rxFilter(const EthPacketPtr &packet)
2026{
2027 EthPtr eth = packet;
2028 bool drop = true;
2029 string type;
2030
2031 const EthAddr &dst = eth->dst();
2032 if (dst.unicast()) {
2033 // If we're accepting all unicast addresses
2034 if (acceptUnicast)
2035 drop = false;
2036
2037 // If we make a perfect match
2038 if (acceptPerfect && dst == rom.perfectMatch)
2039 drop = false;
2040
2041 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2042 drop = false;
2043
2044 } else if (dst.broadcast()) {
2045 // if we're accepting broadcasts
2046 if (acceptBroadcast)
2047 drop = false;
2048
2049 } else if (dst.multicast()) {
2050 // if we're accepting all multicasts
2051 if (acceptMulticast)
2052 drop = false;
2053
2054 // Multicast hashing faked - all packets accepted
2055 if (multicastHashEnable)
2056 drop = false;
2057 }
2058
2059 if (drop) {
2060 DPRINTF(Ethernet, "rxFilter drop\n");
2061 DDUMP(EthernetData, packet->data, packet->length);
2062 }
2063
2064 return drop;
2065}
2066
2067bool
2068NSGigE::recvPacket(EthPacketPtr packet)
2069{
2070 rxBytes += packet->length;
2071 rxPackets++;
2072
2073 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2074 rxFifo.avail());
2075
2076 if (!rxEnable) {
2077 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2078 return true;
2079 }
2080
2081 if (!rxFilterEnable) {
2082 DPRINTF(Ethernet,
2083 "receive packet filtering disabled . . . packet dropped\n");
2084 return true;
2085 }
2086
2087 if (rxFilter(packet)) {
2088 DPRINTF(Ethernet, "packet filtered...dropped\n");
2089 return true;
2090 }
2091
2092 if (rxFifo.avail() < packet->length) {
2093#if TRACING_ON
2094 IpPtr ip(packet);
2095 TcpPtr tcp(ip);
2096 if (ip) {
2097 DPRINTF(Ethernet,
2098 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2099 ip->id());
2100 if (tcp) {
2101 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2102 }
2103 }
2104#endif
2105 droppedPackets++;
2106 devIntrPost(ISR_RXORN);
2107 return false;
2108 }
2109
2110 rxFifo.push(packet);
2111
2112 rxKick();
2113 return true;
2114}
2115
2116
2117void
2118NSGigE::drainResume()
2119{
2120 Drainable::drainResume();
2121
2122 // During drain we could have left the state machines in a waiting state and
2123 // they wouldn't get out until some other event occured to kick them.
2124 // This way they'll get out immediately
2125 txKick();
2126 rxKick();
2127}
2128
2129
2130//=====================================================================
2131//
2132//
2133void
2134NSGigE::serialize(CheckpointOut &cp) const
2135{
2136 // Serialize the PciDevice base class
2137 PciDevice::serialize(cp);
2138
2139 /*
2140 * Finalize any DMA events now.
2141 */
2142 // @todo will mem system save pending dma?
2143
2144 /*
2145 * Serialize the device registers
2146 */
2147 SERIALIZE_SCALAR(regs.command);
2148 SERIALIZE_SCALAR(regs.config);
2149 SERIALIZE_SCALAR(regs.mear);
2150 SERIALIZE_SCALAR(regs.ptscr);
2151 SERIALIZE_SCALAR(regs.isr);
2152 SERIALIZE_SCALAR(regs.imr);
2153 SERIALIZE_SCALAR(regs.ier);
2154 SERIALIZE_SCALAR(regs.ihr);
2155 SERIALIZE_SCALAR(regs.txdp);
2156 SERIALIZE_SCALAR(regs.txdp_hi);
2157 SERIALIZE_SCALAR(regs.txcfg);
2158 SERIALIZE_SCALAR(regs.gpior);
2159 SERIALIZE_SCALAR(regs.rxdp);
2160 SERIALIZE_SCALAR(regs.rxdp_hi);
2161 SERIALIZE_SCALAR(regs.rxcfg);
2162 SERIALIZE_SCALAR(regs.pqcr);
2163 SERIALIZE_SCALAR(regs.wcsr);
2164 SERIALIZE_SCALAR(regs.pcr);
2165 SERIALIZE_SCALAR(regs.rfcr);
2166 SERIALIZE_SCALAR(regs.rfdr);
2167 SERIALIZE_SCALAR(regs.brar);
2168 SERIALIZE_SCALAR(regs.brdr);
2169 SERIALIZE_SCALAR(regs.srr);
2170 SERIALIZE_SCALAR(regs.mibc);
2171 SERIALIZE_SCALAR(regs.vrcr);
2172 SERIALIZE_SCALAR(regs.vtcr);
2173 SERIALIZE_SCALAR(regs.vdr);
2174 SERIALIZE_SCALAR(regs.ccsr);
2175 SERIALIZE_SCALAR(regs.tbicr);
2176 SERIALIZE_SCALAR(regs.tbisr);
2177 SERIALIZE_SCALAR(regs.tanar);
2178 SERIALIZE_SCALAR(regs.tanlpar);
2179 SERIALIZE_SCALAR(regs.taner);
2180 SERIALIZE_SCALAR(regs.tesr);
2181
2182 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2183 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2184
2185 SERIALIZE_SCALAR(ioEnable);
2186
2187 /*
2188 * Serialize the data Fifos
2189 */
2190 rxFifo.serialize("rxFifo", cp);
2191 txFifo.serialize("txFifo", cp);
2192
2193 /*
2194 * Serialize the various helper variables
2195 */
2196 bool txPacketExists = txPacket != nullptr;
2197 SERIALIZE_SCALAR(txPacketExists);
2198 if (txPacketExists) {
2199 txPacket->simLength = txPacketBufPtr - txPacket->data;
2200 txPacket->length = txPacketBufPtr - txPacket->data;
2201 txPacket->serialize("txPacket", cp);
2202 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2203 SERIALIZE_SCALAR(txPktBufPtr);
2204 }
2205
2206 bool rxPacketExists = rxPacket != nullptr;
2207 SERIALIZE_SCALAR(rxPacketExists);
2208 if (rxPacketExists) {
2209 rxPacket->serialize("rxPacket", cp);
2210 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2211 SERIALIZE_SCALAR(rxPktBufPtr);
2212 }
2213
2214 SERIALIZE_SCALAR(txXferLen);
2215 SERIALIZE_SCALAR(rxXferLen);
2216
2217 /*
2218 * Serialize Cached Descriptors
2219 */
2220 SERIALIZE_SCALAR(rxDesc64.link);
2221 SERIALIZE_SCALAR(rxDesc64.bufptr);
2222 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2223 SERIALIZE_SCALAR(rxDesc64.extsts);
2224 SERIALIZE_SCALAR(txDesc64.link);
2225 SERIALIZE_SCALAR(txDesc64.bufptr);
2226 SERIALIZE_SCALAR(txDesc64.cmdsts);
2227 SERIALIZE_SCALAR(txDesc64.extsts);
2228 SERIALIZE_SCALAR(rxDesc32.link);
2229 SERIALIZE_SCALAR(rxDesc32.bufptr);
2230 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2231 SERIALIZE_SCALAR(rxDesc32.extsts);
2232 SERIALIZE_SCALAR(txDesc32.link);
2233 SERIALIZE_SCALAR(txDesc32.bufptr);
2234 SERIALIZE_SCALAR(txDesc32.cmdsts);
2235 SERIALIZE_SCALAR(txDesc32.extsts);
2236 SERIALIZE_SCALAR(extstsEnable);
2237
2238 /*
2239 * Serialize tx state machine
2240 */
2241 int txState = this->txState;
2242 SERIALIZE_SCALAR(txState);
2243 SERIALIZE_SCALAR(txEnable);
2244 SERIALIZE_SCALAR(CTDD);
2245 SERIALIZE_SCALAR(txFragPtr);
2246 SERIALIZE_SCALAR(txDescCnt);
2247 int txDmaState = this->txDmaState;
2248 SERIALIZE_SCALAR(txDmaState);
2249 SERIALIZE_SCALAR(txKickTick);
2250
2251 /*
2252 * Serialize rx state machine
2253 */
2254 int rxState = this->rxState;
2255 SERIALIZE_SCALAR(rxState);
2256 SERIALIZE_SCALAR(rxEnable);
2257 SERIALIZE_SCALAR(CRDD);
2258 SERIALIZE_SCALAR(rxPktBytes);
2259 SERIALIZE_SCALAR(rxFragPtr);
2260 SERIALIZE_SCALAR(rxDescCnt);
2261 int rxDmaState = this->rxDmaState;
2262 SERIALIZE_SCALAR(rxDmaState);
2263 SERIALIZE_SCALAR(rxKickTick);
2264
2265 /*
2266 * Serialize EEPROM state machine
2267 */
2268 int eepromState = this->eepromState;
2269 SERIALIZE_SCALAR(eepromState);
2270 SERIALIZE_SCALAR(eepromClk);
2271 SERIALIZE_SCALAR(eepromBitsToRx);
2272 SERIALIZE_SCALAR(eepromOpcode);
2273 SERIALIZE_SCALAR(eepromAddress);
2274 SERIALIZE_SCALAR(eepromData);
2275
2276 /*
2277 * If there's a pending transmit, store the time so we can
2278 * reschedule it later
2279 */
2280 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
2281 SERIALIZE_SCALAR(transmitTick);
2282
2283 /*
2284 * receive address filter settings
2285 */
2286 SERIALIZE_SCALAR(rxFilterEnable);
2287 SERIALIZE_SCALAR(acceptBroadcast);
2288 SERIALIZE_SCALAR(acceptMulticast);
2289 SERIALIZE_SCALAR(acceptUnicast);
2290 SERIALIZE_SCALAR(acceptPerfect);
2291 SERIALIZE_SCALAR(acceptArp);
2292 SERIALIZE_SCALAR(multicastHashEnable);
2293
2294 /*
2295 * Keep track of pending interrupt status.
2296 */
2297 SERIALIZE_SCALAR(intrTick);
2298 SERIALIZE_SCALAR(cpuPendingIntr);
2299 Tick intrEventTick = 0;
2300 if (intrEvent)
2301 intrEventTick = intrEvent->when();
2302 SERIALIZE_SCALAR(intrEventTick);
2303
2304}
2305
2306void
2307NSGigE::unserialize(CheckpointIn &cp)
2308{
2309 // Unserialize the PciDevice base class
2310 PciDevice::unserialize(cp);
2311
2312 UNSERIALIZE_SCALAR(regs.command);
2313 UNSERIALIZE_SCALAR(regs.config);
2314 UNSERIALIZE_SCALAR(regs.mear);
2315 UNSERIALIZE_SCALAR(regs.ptscr);
2316 UNSERIALIZE_SCALAR(regs.isr);
2317 UNSERIALIZE_SCALAR(regs.imr);
2318 UNSERIALIZE_SCALAR(regs.ier);
2319 UNSERIALIZE_SCALAR(regs.ihr);
2320 UNSERIALIZE_SCALAR(regs.txdp);
2321 UNSERIALIZE_SCALAR(regs.txdp_hi);
2322 UNSERIALIZE_SCALAR(regs.txcfg);
2323 UNSERIALIZE_SCALAR(regs.gpior);
2324 UNSERIALIZE_SCALAR(regs.rxdp);
2325 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2326 UNSERIALIZE_SCALAR(regs.rxcfg);
2327 UNSERIALIZE_SCALAR(regs.pqcr);
2328 UNSERIALIZE_SCALAR(regs.wcsr);
2329 UNSERIALIZE_SCALAR(regs.pcr);
2330 UNSERIALIZE_SCALAR(regs.rfcr);
2331 UNSERIALIZE_SCALAR(regs.rfdr);
2332 UNSERIALIZE_SCALAR(regs.brar);
2333 UNSERIALIZE_SCALAR(regs.brdr);
2334 UNSERIALIZE_SCALAR(regs.srr);
2335 UNSERIALIZE_SCALAR(regs.mibc);
2336 UNSERIALIZE_SCALAR(regs.vrcr);
2337 UNSERIALIZE_SCALAR(regs.vtcr);
2338 UNSERIALIZE_SCALAR(regs.vdr);
2339 UNSERIALIZE_SCALAR(regs.ccsr);
2340 UNSERIALIZE_SCALAR(regs.tbicr);
2341 UNSERIALIZE_SCALAR(regs.tbisr);
2342 UNSERIALIZE_SCALAR(regs.tanar);
2343 UNSERIALIZE_SCALAR(regs.tanlpar);
2344 UNSERIALIZE_SCALAR(regs.taner);
2345 UNSERIALIZE_SCALAR(regs.tesr);
2346
2347 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2348 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2349
2350 UNSERIALIZE_SCALAR(ioEnable);
2351
2352 /*
2353 * unserialize the data fifos
2354 */
2355 rxFifo.unserialize("rxFifo", cp);
2356 txFifo.unserialize("txFifo", cp);
2357
2358 /*
2359 * unserialize the various helper variables
2360 */
2361 bool txPacketExists;
2362 UNSERIALIZE_SCALAR(txPacketExists);
2363 if (txPacketExists) {
2364 txPacket = make_shared<EthPacketData>(16384);
2365 txPacket->unserialize("txPacket", cp);
2366 uint32_t txPktBufPtr;
2367 UNSERIALIZE_SCALAR(txPktBufPtr);
2368 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2369 } else
2370 txPacket = 0;
2371
2372 bool rxPacketExists;
2373 UNSERIALIZE_SCALAR(rxPacketExists);
2374 rxPacket = 0;
2375 if (rxPacketExists) {
2376 rxPacket = make_shared<EthPacketData>();
2377 rxPacket->unserialize("rxPacket", cp);
2378 uint32_t rxPktBufPtr;
2379 UNSERIALIZE_SCALAR(rxPktBufPtr);
2380 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2381 } else
2382 rxPacket = 0;
2383
2384 UNSERIALIZE_SCALAR(txXferLen);
2385 UNSERIALIZE_SCALAR(rxXferLen);
2386
2387 /*
2388 * Unserialize Cached Descriptors
2389 */
2390 UNSERIALIZE_SCALAR(rxDesc64.link);
2391 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2392 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2393 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2394 UNSERIALIZE_SCALAR(txDesc64.link);
2395 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2396 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2397 UNSERIALIZE_SCALAR(txDesc64.extsts);
2398 UNSERIALIZE_SCALAR(rxDesc32.link);
2399 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2400 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2401 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2402 UNSERIALIZE_SCALAR(txDesc32.link);
2403 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2404 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2405 UNSERIALIZE_SCALAR(txDesc32.extsts);
2406 UNSERIALIZE_SCALAR(extstsEnable);
2407
2408 /*
2409 * unserialize tx state machine
2410 */
2411 int txState;
2412 UNSERIALIZE_SCALAR(txState);
2413 this->txState = (TxState) txState;
2414 UNSERIALIZE_SCALAR(txEnable);
2415 UNSERIALIZE_SCALAR(CTDD);
2416 UNSERIALIZE_SCALAR(txFragPtr);
2417 UNSERIALIZE_SCALAR(txDescCnt);
2418 int txDmaState;
2419 UNSERIALIZE_SCALAR(txDmaState);
2420 this->txDmaState = (DmaState) txDmaState;
2421 UNSERIALIZE_SCALAR(txKickTick);
2422 if (txKickTick)
2423 schedule(txKickEvent, txKickTick);
2424
2425 /*
2426 * unserialize rx state machine
2427 */
2428 int rxState;
2429 UNSERIALIZE_SCALAR(rxState);
2430 this->rxState = (RxState) rxState;
2431 UNSERIALIZE_SCALAR(rxEnable);
2432 UNSERIALIZE_SCALAR(CRDD);
2433 UNSERIALIZE_SCALAR(rxPktBytes);
2434 UNSERIALIZE_SCALAR(rxFragPtr);
2435 UNSERIALIZE_SCALAR(rxDescCnt);
2436 int rxDmaState;
2437 UNSERIALIZE_SCALAR(rxDmaState);
2438 this->rxDmaState = (DmaState) rxDmaState;
2439 UNSERIALIZE_SCALAR(rxKickTick);
2440 if (rxKickTick)
2441 schedule(rxKickEvent, rxKickTick);
2442
2443 /*
2444 * Unserialize EEPROM state machine
2445 */
2446 int eepromState;
2447 UNSERIALIZE_SCALAR(eepromState);
2448 this->eepromState = (EEPROMState) eepromState;
2449 UNSERIALIZE_SCALAR(eepromClk);
2450 UNSERIALIZE_SCALAR(eepromBitsToRx);
2451 UNSERIALIZE_SCALAR(eepromOpcode);
2452 UNSERIALIZE_SCALAR(eepromAddress);
2453 UNSERIALIZE_SCALAR(eepromData);
2454
2455 /*
2456 * If there's a pending transmit, reschedule it now
2457 */
2458 Tick transmitTick;
2459 UNSERIALIZE_SCALAR(transmitTick);
2460 if (transmitTick)
2461 schedule(txEvent, curTick() + transmitTick);
2462
2463 /*
2464 * unserialize receive address filter settings
2465 */
2466 UNSERIALIZE_SCALAR(rxFilterEnable);
2467 UNSERIALIZE_SCALAR(acceptBroadcast);
2468 UNSERIALIZE_SCALAR(acceptMulticast);
2469 UNSERIALIZE_SCALAR(acceptUnicast);
2470 UNSERIALIZE_SCALAR(acceptPerfect);
2471 UNSERIALIZE_SCALAR(acceptArp);
2472 UNSERIALIZE_SCALAR(multicastHashEnable);
2473
2474 /*
2475 * Keep track of pending interrupt status.
2476 */
2477 UNSERIALIZE_SCALAR(intrTick);
2478 UNSERIALIZE_SCALAR(cpuPendingIntr);
2479 Tick intrEventTick;
2480 UNSERIALIZE_SCALAR(intrEventTick);
2481 if (intrEventTick) {
2482 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
2483 name(), true);
2484 schedule(intrEvent, intrEventTick);
2485 }
2486}
2487
2488NSGigE *
2489NSGigEParams::create()
2490{
2491 return new NSGigE(this);
2492}