ns_gige.cc revision 1036
1/* 2 * Copyright (c) 2004 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/* @file 30 * Device module for modelling the National Semiconductor 31 * DP83820 ethernet controller. Does not support priority queueing 32 */ 33#include <cstdio> 34#include <deque> 35#include <string> 36 37#include "base/inet.hh" 38#include "cpu/exec_context.hh" 39#include "cpu/intr_control.hh" 40#include "dev/dma.hh" 41#include "dev/etherlink.hh" 42#include "dev/ns_gige.hh" 43#include "dev/pciconfigall.hh" 44#include "dev/tsunami_cchip.hh" 45#include "mem/bus/bus.hh" 46#include "mem/bus/dma_interface.hh" 47#include "mem/bus/pio_interface.hh" 48#include "mem/bus/pio_interface_impl.hh" 49#include "mem/functional_mem/memory_control.hh" 50#include "mem/functional_mem/physical_memory.hh" 51#include "sim/builder.hh" 52#include "sim/debug.hh" 53#include "sim/host.hh" 54#include "sim/sim_stats.hh" 55#include "targetarch/vtophys.hh" 56 57const char *NsRxStateStrings[] = 58{ 59 "rxIdle", 60 "rxDescRefr", 61 "rxDescRead", 62 "rxFifoBlock", 63 "rxFragWrite", 64 "rxDescWrite", 65 "rxAdvance" 66}; 67 68const char *NsTxStateStrings[] = 69{ 70 "txIdle", 71 "txDescRefr", 72 "txDescRead", 73 "txFifoBlock", 74 "txFragRead", 75 "txDescWrite", 76 "txAdvance" 77}; 78 79const char *NsDmaState[] = 80{ 81 "dmaIdle", 82 "dmaReading", 83 "dmaWriting", 84 "dmaReadWaiting", 85 "dmaWriteWaiting" 86}; 87 88using namespace std; 89 90// helper function declarations 91// These functions reverse Endianness so we can evaluate network data 92// correctly 93uint16_t reverseEnd16(uint16_t); 94uint32_t reverseEnd32(uint32_t); 95 96/////////////////////////////////////////////////////////////////////// 97// 98// NSGigE PCI Device 99// 100NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay, 101 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay, 102 MemoryController *mmu, HierParams *hier, Bus *header_bus, 103 Bus *payload_bus, Tick pio_latency, bool dma_desc_free, 104 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay, 105 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf, 106 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev, 107 uint32_t func, bool rx_filter, const int eaddr[6], 108 uint32_t tx_fifo_size, uint32_t rx_fifo_size) 109 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t), ioEnable(false), 110 maxTxFifoSize(tx_fifo_size), maxRxFifoSize(rx_fifo_size), 111 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 112 txXferLen(0), rxXferLen(0), txState(txIdle), txEnable(false), 113 CTDD(false), txFifoAvail(tx_fifo_size), 114 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 115 rxEnable(false), CRDD(false), rxPktBytes(0), rxFifoCnt(0), 116 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 117 rxDmaReadEvent(this), rxDmaWriteEvent(this), 118 txDmaReadEvent(this), txDmaWriteEvent(this), 119 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free), 120 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0), 121 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false), 122 acceptMulticast(false), acceptUnicast(false), 123 acceptPerfect(false), acceptArp(false), 124 physmem(pmem), intctrl(i), intrTick(0), cpuPendingIntr(false), 125 intrEvent(0), interface(0) 126{ 127 tsunami->ethernet = this; 128 129 if (header_bus) { 130 pioInterface = newPioInterface(name, hier, header_bus, this, 131 &NSGigE::cacheAccess); 132 133 pioLatency = pio_latency * header_bus->clockRatio; 134 135 if (payload_bus) 136 dmaInterface = new DMAInterface<Bus>(name + ".dma", 137 header_bus, payload_bus, 1); 138 else 139 dmaInterface = new DMAInterface<Bus>(name + ".dma", 140 header_bus, header_bus, 1); 141 } else if (payload_bus) { 142 pioInterface = newPioInterface(name, hier, payload_bus, this, 143 &NSGigE::cacheAccess); 144 145 pioLatency = pio_latency * payload_bus->clockRatio; 146 147 dmaInterface = new DMAInterface<Bus>(name + ".dma", payload_bus, 148 payload_bus, 1); 149 } 150 151 152 intrDelay = US2Ticks(intr_delay); 153 dmaReadDelay = dma_read_delay; 154 dmaWriteDelay = dma_write_delay; 155 dmaReadFactor = dma_read_factor; 156 dmaWriteFactor = dma_write_factor; 157 158 regsReset(); 159 rom.perfectMatch[0] = eaddr[0]; 160 rom.perfectMatch[1] = eaddr[1]; 161 rom.perfectMatch[2] = eaddr[2]; 162 rom.perfectMatch[3] = eaddr[3]; 163 rom.perfectMatch[4] = eaddr[4]; 164 rom.perfectMatch[5] = eaddr[5]; 165} 166 167NSGigE::~NSGigE() 168{} 169 170void 171NSGigE::regStats() 172{ 173 txBytes 174 .name(name() + ".txBytes") 175 .desc("Bytes Transmitted") 176 .prereq(txBytes) 177 ; 178 179 rxBytes 180 .name(name() + ".rxBytes") 181 .desc("Bytes Received") 182 .prereq(rxBytes) 183 ; 184 185 txPackets 186 .name(name() + ".txPackets") 187 .desc("Number of Packets Transmitted") 188 .prereq(txBytes) 189 ; 190 191 rxPackets 192 .name(name() + ".rxPackets") 193 .desc("Number of Packets Received") 194 .prereq(rxBytes) 195 ; 196 197 txIPChecksums 198 .name(name() + ".txIPChecksums") 199 .desc("Number of tx IP Checksums done by device") 200 .precision(0) 201 .prereq(txBytes) 202 ; 203 204 rxIPChecksums 205 .name(name() + ".rxIPChecksums") 206 .desc("Number of rx IP Checksums done by device") 207 .precision(0) 208 .prereq(rxBytes) 209 ; 210 211 txTCPChecksums 212 .name(name() + ".txTCPChecksums") 213 .desc("Number of tx TCP Checksums done by device") 214 .precision(0) 215 .prereq(txBytes) 216 ; 217 218 rxTCPChecksums 219 .name(name() + ".rxTCPChecksums") 220 .desc("Number of rx TCP Checksums done by device") 221 .precision(0) 222 .prereq(rxBytes) 223 ; 224 225 descDmaReads 226 .name(name() + ".descDMAReads") 227 .desc("Number of descriptors the device read w/ DMA") 228 .precision(0) 229 ; 230 231 descDmaWrites 232 .name(name() + ".descDMAWrites") 233 .desc("Number of descriptors the device wrote w/ DMA") 234 .precision(0) 235 ; 236 237 descDmaRdBytes 238 .name(name() + ".descDmaReadBytes") 239 .desc("number of descriptor bytes read w/ DMA") 240 .precision(0) 241 ; 242 243 descDmaWrBytes 244 .name(name() + ".descDmaWriteBytes") 245 .desc("number of descriptor bytes write w/ DMA") 246 .precision(0) 247 ; 248 249 250 txBandwidth 251 .name(name() + ".txBandwidth") 252 .desc("Transmit Bandwidth (bits/s)") 253 .precision(0) 254 .prereq(txBytes) 255 ; 256 257 rxBandwidth 258 .name(name() + ".rxBandwidth") 259 .desc("Receive Bandwidth (bits/s)") 260 .precision(0) 261 .prereq(rxBytes) 262 ; 263 264 txPacketRate 265 .name(name() + ".txPPS") 266 .desc("Packet Tranmission Rate (packets/s)") 267 .precision(0) 268 .prereq(txBytes) 269 ; 270 271 rxPacketRate 272 .name(name() + ".rxPPS") 273 .desc("Packet Reception Rate (packets/s)") 274 .precision(0) 275 .prereq(rxBytes) 276 ; 277 278 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 279 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 280 txPacketRate = txPackets / simSeconds; 281 rxPacketRate = rxPackets / simSeconds; 282} 283 284/** 285 * This is to read the PCI general configuration registers 286 */ 287void 288NSGigE::ReadConfig(int offset, int size, uint8_t *data) 289{ 290 if (offset < PCI_DEVICE_SPECIFIC) 291 PciDev::ReadConfig(offset, size, data); 292 else 293 panic("Device specific PCI config space not implemented!\n"); 294} 295 296/** 297 * This is to write to the PCI general configuration registers 298 */ 299void 300NSGigE::WriteConfig(int offset, int size, uint32_t data) 301{ 302 if (offset < PCI_DEVICE_SPECIFIC) 303 PciDev::WriteConfig(offset, size, data); 304 else 305 panic("Device specific PCI config space not implemented!\n"); 306 307 // Need to catch writes to BARs to update the PIO interface 308 switch (offset) { 309 // seems to work fine without all these PCI settings, but i 310 // put in the IO to double check, an assertion will fail if we 311 // need to properly implement it 312 case PCI_COMMAND: 313 if (config.data[offset] & PCI_CMD_IOSE) 314 ioEnable = true; 315 else 316 ioEnable = false; 317 318#if 0 319 if (config.data[offset] & PCI_CMD_BME) { 320 bmEnabled = true; 321 } 322 else { 323 bmEnabled = false; 324 } 325 326 if (config.data[offset] & PCI_CMD_MSE) { 327 memEnable = true; 328 } 329 else { 330 memEnable = false; 331 } 332#endif 333 break; 334 335 case PCI0_BASE_ADDR0: 336 if (BARAddrs[0] != 0) { 337 if (pioInterface) 338 pioInterface->addAddrRange(BARAddrs[0], 339 BARAddrs[0] + BARSize[0] - 1); 340 341 BARAddrs[0] &= PA_UNCACHED_MASK; 342 } 343 break; 344 case PCI0_BASE_ADDR1: 345 if (BARAddrs[1] != 0) { 346 if (pioInterface) 347 pioInterface->addAddrRange(BARAddrs[1], 348 BARAddrs[1] + BARSize[1] - 1); 349 350 BARAddrs[1] &= PA_UNCACHED_MASK; 351 } 352 break; 353 } 354} 355 356/** 357 * This reads the device registers, which are detailed in the NS83820 358 * spec sheet 359 */ 360Fault 361NSGigE::read(MemReqPtr &req, uint8_t *data) 362{ 363 assert(ioEnable); 364 365 //The mask is to give you only the offset into the device register file 366 Addr daddr = req->paddr & 0xfff; 367 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n", 368 daddr, req->paddr, req->vaddr, req->size); 369 370 371 // there are some reserved registers, you can see ns_gige_reg.h and 372 // the spec sheet for details 373 if (daddr > LAST && daddr <= RESERVED) { 374 panic("Accessing reserved register"); 375 } else if (daddr > RESERVED && daddr <= 0x3FC) { 376 ReadConfig(daddr & 0xff, req->size, data); 377 return No_Fault; 378 } else if (daddr >= MIB_START && daddr <= MIB_END) { 379 // don't implement all the MIB's. hopefully the kernel 380 // doesn't actually DEPEND upon their values 381 // MIB are just hardware stats keepers 382 uint32_t ® = *(uint32_t *) data; 383 reg = 0; 384 return No_Fault; 385 } else if (daddr > 0x3FC) 386 panic("Something is messed up!\n"); 387 388 switch (req->size) { 389 case sizeof(uint32_t): 390 { 391 uint32_t ® = *(uint32_t *)data; 392 393 switch (daddr) { 394 case CR: 395 reg = regs.command; 396 //these are supposed to be cleared on a read 397 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 398 break; 399 400 case CFG: 401 reg = regs.config; 402 break; 403 404 case MEAR: 405 reg = regs.mear; 406 break; 407 408 case PTSCR: 409 reg = regs.ptscr; 410 break; 411 412 case ISR: 413 reg = regs.isr; 414 devIntrClear(ISR_ALL); 415 break; 416 417 case IMR: 418 reg = regs.imr; 419 break; 420 421 case IER: 422 reg = regs.ier; 423 break; 424 425 case IHR: 426 reg = regs.ihr; 427 break; 428 429 case TXDP: 430 reg = regs.txdp; 431 break; 432 433 case TXDP_HI: 434 reg = regs.txdp_hi; 435 break; 436 437 case TXCFG: 438 reg = regs.txcfg; 439 break; 440 441 case GPIOR: 442 reg = regs.gpior; 443 break; 444 445 case RXDP: 446 reg = regs.rxdp; 447 break; 448 449 case RXDP_HI: 450 reg = regs.rxdp_hi; 451 break; 452 453 case RXCFG: 454 reg = regs.rxcfg; 455 break; 456 457 case PQCR: 458 reg = regs.pqcr; 459 break; 460 461 case WCSR: 462 reg = regs.wcsr; 463 break; 464 465 case PCR: 466 reg = regs.pcr; 467 break; 468 469 // see the spec sheet for how RFCR and RFDR work 470 // basically, you write to RFCR to tell the machine 471 // what you want to do next, then you act upon RFDR, 472 // and the device will be prepared b/c of what you 473 // wrote to RFCR 474 case RFCR: 475 reg = regs.rfcr; 476 break; 477 478 case RFDR: 479 switch (regs.rfcr & RFCR_RFADDR) { 480 case 0x000: 481 reg = rom.perfectMatch[1]; 482 reg = reg << 8; 483 reg += rom.perfectMatch[0]; 484 break; 485 case 0x002: 486 reg = rom.perfectMatch[3] << 8; 487 reg += rom.perfectMatch[2]; 488 break; 489 case 0x004: 490 reg = rom.perfectMatch[5] << 8; 491 reg += rom.perfectMatch[4]; 492 break; 493 default: 494 panic("reading RFDR for something other than PMATCH!\n"); 495 // didn't implement other RFDR functionality b/c 496 // driver didn't use it 497 } 498 break; 499 500 case SRR: 501 reg = regs.srr; 502 break; 503 504 case MIBC: 505 reg = regs.mibc; 506 reg &= ~(MIBC_MIBS | MIBC_ACLR); 507 break; 508 509 case VRCR: 510 reg = regs.vrcr; 511 break; 512 513 case VTCR: 514 reg = regs.vtcr; 515 break; 516 517 case VDR: 518 reg = regs.vdr; 519 break; 520 521 case CCSR: 522 reg = regs.ccsr; 523 break; 524 525 case TBICR: 526 reg = regs.tbicr; 527 break; 528 529 case TBISR: 530 reg = regs.tbisr; 531 break; 532 533 case TANAR: 534 reg = regs.tanar; 535 break; 536 537 case TANLPAR: 538 reg = regs.tanlpar; 539 break; 540 541 case TANER: 542 reg = regs.taner; 543 break; 544 545 case TESR: 546 reg = regs.tesr; 547 break; 548 549 default: 550 panic("reading unimplemented register: addr=%#x", daddr); 551 } 552 553 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 554 daddr, reg, reg); 555 } 556 break; 557 558 default: 559 panic("accessing register with invalid size: addr=%#x, size=%d", 560 daddr, req->size); 561 } 562 563 return No_Fault; 564} 565 566Fault 567NSGigE::write(MemReqPtr &req, const uint8_t *data) 568{ 569 assert(ioEnable); 570 571 Addr daddr = req->paddr & 0xfff; 572 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n", 573 daddr, req->paddr, req->vaddr, req->size); 574 575 if (daddr > LAST && daddr <= RESERVED) { 576 panic("Accessing reserved register"); 577 } else if (daddr > RESERVED && daddr <= 0x3FC) { 578 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data); 579 return No_Fault; 580 } else if (daddr > 0x3FC) 581 panic("Something is messed up!\n"); 582 583 if (req->size == sizeof(uint32_t)) { 584 uint32_t reg = *(uint32_t *)data; 585 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 586 587 switch (daddr) { 588 case CR: 589 regs.command = reg; 590 if (reg & CR_TXD) { 591 txEnable = false; 592 } else if (reg & CR_TXE) { 593 txEnable = true; 594 595 // the kernel is enabling the transmit machine 596 if (txState == txIdle) 597 txKick(); 598 } 599 600 if (reg & CR_RXD) { 601 rxEnable = false; 602 } else if (reg & CR_RXE) { 603 rxEnable = true; 604 605 if (rxState == rxIdle) 606 rxKick(); 607 } 608 609 if (reg & CR_TXR) 610 txReset(); 611 612 if (reg & CR_RXR) 613 rxReset(); 614 615 if (reg & CR_SWI) 616 devIntrPost(ISR_SWI); 617 618 if (reg & CR_RST) { 619 txReset(); 620 rxReset(); 621 622 regsReset(); 623 } 624 break; 625 626 case CFG: 627 if (reg & CFG_LNKSTS || 628 reg & CFG_SPDSTS || 629 reg & CFG_DUPSTS || 630 reg & CFG_RESERVED || 631 reg & CFG_T64ADDR || 632 reg & CFG_PCI64_DET) 633 panic("writing to read-only or reserved CFG bits!\n"); 634 635 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | 636 CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET); 637 638// all these #if 0's are because i don't THINK the kernel needs to 639// have these implemented. if there is a problem relating to one of 640// these, you may need to add functionality in. 641#if 0 642 if (reg & CFG_TBI_EN) ; 643 if (reg & CFG_MODE_1000) ; 644#endif 645 646 if (reg & CFG_AUTO_1000) 647 panic("CFG_AUTO_1000 not implemented!\n"); 648 649#if 0 650 if (reg & CFG_PINT_DUPSTS || 651 reg & CFG_PINT_LNKSTS || 652 reg & CFG_PINT_SPDSTS) 653 ; 654 655 if (reg & CFG_TMRTEST) ; 656 if (reg & CFG_MRM_DIS) ; 657 if (reg & CFG_MWI_DIS) ; 658 659 if (reg & CFG_T64ADDR) 660 panic("CFG_T64ADDR is read only register!\n"); 661 662 if (reg & CFG_PCI64_DET) 663 panic("CFG_PCI64_DET is read only register!\n"); 664 665 if (reg & CFG_DATA64_EN) ; 666 if (reg & CFG_M64ADDR) ; 667 if (reg & CFG_PHY_RST) ; 668 if (reg & CFG_PHY_DIS) ; 669#endif 670 671 if (reg & CFG_EXTSTS_EN) 672 extstsEnable = true; 673 else 674 extstsEnable = false; 675 676#if 0 677 if (reg & CFG_REQALG) ; 678 if (reg & CFG_SB) ; 679 if (reg & CFG_POW) ; 680 if (reg & CFG_EXD) ; 681 if (reg & CFG_PESEL) ; 682 if (reg & CFG_BROM_DIS) ; 683 if (reg & CFG_EXT_125) ; 684 if (reg & CFG_BEM) ; 685#endif 686 break; 687 688 case MEAR: 689 regs.mear = reg; 690 // since phy is completely faked, MEAR_MD* don't matter 691 // and since the driver never uses MEAR_EE*, they don't 692 // matter 693#if 0 694 if (reg & MEAR_EEDI) ; 695 if (reg & MEAR_EEDO) ; // this one is read only 696 if (reg & MEAR_EECLK) ; 697 if (reg & MEAR_EESEL) ; 698 if (reg & MEAR_MDIO) ; 699 if (reg & MEAR_MDDIR) ; 700 if (reg & MEAR_MDC) ; 701#endif 702 break; 703 704 case PTSCR: 705 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 706 // these control BISTs for various parts of chip - we 707 // don't care or do just fake that the BIST is done 708 if (reg & PTSCR_RBIST_EN) 709 regs.ptscr |= PTSCR_RBIST_DONE; 710 if (reg & PTSCR_EEBIST_EN) 711 regs.ptscr &= ~PTSCR_EEBIST_EN; 712 if (reg & PTSCR_EELOAD_EN) 713 regs.ptscr &= ~PTSCR_EELOAD_EN; 714 break; 715 716 case ISR: /* writing to the ISR has no effect */ 717 panic("ISR is a read only register!\n"); 718 719 case IMR: 720 regs.imr = reg; 721 devIntrChangeMask(); 722 break; 723 724 case IER: 725 regs.ier = reg; 726 break; 727 728 case IHR: 729 regs.ihr = reg; 730 /* not going to implement real interrupt holdoff */ 731 break; 732 733 case TXDP: 734 regs.txdp = (reg & 0xFFFFFFFC); 735 assert(txState == txIdle); 736 CTDD = false; 737 break; 738 739 case TXDP_HI: 740 regs.txdp_hi = reg; 741 break; 742 743 case TXCFG: 744 regs.txcfg = reg; 745#if 0 746 if (reg & TXCFG_CSI) ; 747 if (reg & TXCFG_HBI) ; 748 if (reg & TXCFG_MLB) ; 749 if (reg & TXCFG_ATP) ; 750 if (reg & TXCFG_ECRETRY) { 751 /* 752 * this could easily be implemented, but considering 753 * the network is just a fake pipe, wouldn't make 754 * sense to do this 755 */ 756 } 757 758 if (reg & TXCFG_BRST_DIS) ; 759#endif 760 761#if 0 762 /* we handle our own DMA, ignore the kernel's exhortations */ 763 if (reg & TXCFG_MXDMA) ; 764#endif 765 766 // also, we currently don't care about fill/drain 767 // thresholds though this may change in the future with 768 // more realistic networks or a driver which changes it 769 // according to feedback 770 771 break; 772 773 case GPIOR: 774 regs.gpior = reg; 775 /* these just control general purpose i/o pins, don't matter */ 776 break; 777 778 case RXDP: 779 regs.rxdp = reg; 780 CRDD = false; 781 break; 782 783 case RXDP_HI: 784 regs.rxdp_hi = reg; 785 break; 786 787 case RXCFG: 788 regs.rxcfg = reg; 789#if 0 790 if (reg & RXCFG_AEP) ; 791 if (reg & RXCFG_ARP) ; 792 if (reg & RXCFG_STRIPCRC) ; 793 if (reg & RXCFG_RX_RD) ; 794 if (reg & RXCFG_ALP) ; 795 if (reg & RXCFG_AIRL) ; 796 797 /* we handle our own DMA, ignore what kernel says about it */ 798 if (reg & RXCFG_MXDMA) ; 799 800 //also, we currently don't care about fill/drain thresholds 801 //though this may change in the future with more realistic 802 //networks or a driver which changes it according to feedback 803 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ; 804#endif 805 break; 806 807 case PQCR: 808 /* there is no priority queueing used in the linux 2.6 driver */ 809 regs.pqcr = reg; 810 break; 811 812 case WCSR: 813 /* not going to implement wake on LAN */ 814 regs.wcsr = reg; 815 break; 816 817 case PCR: 818 /* not going to implement pause control */ 819 regs.pcr = reg; 820 break; 821 822 case RFCR: 823 regs.rfcr = reg; 824 825 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 826 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 827 acceptMulticast = (reg & RFCR_AAM) ? true : false; 828 acceptUnicast = (reg & RFCR_AAU) ? true : false; 829 acceptPerfect = (reg & RFCR_APM) ? true : false; 830 acceptArp = (reg & RFCR_AARP) ? true : false; 831 832#if 0 833 if (reg & RFCR_APAT) 834 panic("RFCR_APAT not implemented!\n"); 835#endif 836 837 if (reg & RFCR_MHEN || reg & RFCR_UHEN) 838 panic("hash filtering not implemented!\n"); 839 840 if (reg & RFCR_ULM) 841 panic("RFCR_ULM not implemented!\n"); 842 843 break; 844 845 case RFDR: 846 panic("the driver never writes to RFDR, something is wrong!\n"); 847 848 case BRAR: 849 panic("the driver never uses BRAR, something is wrong!\n"); 850 851 case BRDR: 852 panic("the driver never uses BRDR, something is wrong!\n"); 853 854 case SRR: 855 panic("SRR is read only register!\n"); 856 857 case MIBC: 858 panic("the driver never uses MIBC, something is wrong!\n"); 859 860 case VRCR: 861 regs.vrcr = reg; 862 break; 863 864 case VTCR: 865 regs.vtcr = reg; 866 break; 867 868 case VDR: 869 panic("the driver never uses VDR, something is wrong!\n"); 870 break; 871 872 case CCSR: 873 /* not going to implement clockrun stuff */ 874 regs.ccsr = reg; 875 break; 876 877 case TBICR: 878 regs.tbicr = reg; 879 if (reg & TBICR_MR_LOOPBACK) 880 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 881 882 if (reg & TBICR_MR_AN_ENABLE) { 883 regs.tanlpar = regs.tanar; 884 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 885 } 886 887#if 0 888 if (reg & TBICR_MR_RESTART_AN) ; 889#endif 890 891 break; 892 893 case TBISR: 894 panic("TBISR is read only register!\n"); 895 896 case TANAR: 897 regs.tanar = reg; 898 if (reg & TANAR_PS2) 899 panic("this isn't used in driver, something wrong!\n"); 900 901 if (reg & TANAR_PS1) 902 panic("this isn't used in driver, something wrong!\n"); 903 break; 904 905 case TANLPAR: 906 panic("this should only be written to by the fake phy!\n"); 907 908 case TANER: 909 panic("TANER is read only register!\n"); 910 911 case TESR: 912 regs.tesr = reg; 913 break; 914 915 default: 916 panic("invalid register access daddr=%#x", daddr); 917 } 918 } else { 919 panic("Invalid Request Size"); 920 } 921 922 return No_Fault; 923} 924 925void 926NSGigE::devIntrPost(uint32_t interrupts) 927{ 928 bool delay = false; 929 930 if (interrupts & ISR_RESERVE) 931 panic("Cannot set a reserved interrupt"); 932 933 if (interrupts & ISR_TXRCMP) 934 regs.isr |= ISR_TXRCMP; 935 936 if (interrupts & ISR_RXRCMP) 937 regs.isr |= ISR_RXRCMP; 938 939//ISR_DPERR not implemented 940//ISR_SSERR not implemented 941//ISR_RMABT not implemented 942//ISR_RXSOVR not implemented 943//ISR_HIBINT not implemented 944//ISR_PHY not implemented 945//ISR_PME not implemented 946 947 if (interrupts & ISR_SWI) 948 regs.isr |= ISR_SWI; 949 950//ISR_MIB not implemented 951//ISR_TXURN not implemented 952 953 if (interrupts & ISR_TXIDLE) 954 regs.isr |= ISR_TXIDLE; 955 956 if (interrupts & ISR_TXERR) 957 regs.isr |= ISR_TXERR; 958 959 if (interrupts & ISR_TXDESC) 960 regs.isr |= ISR_TXDESC; 961 962 if (interrupts & ISR_TXOK) { 963 regs.isr |= ISR_TXOK; 964 delay = true; 965 } 966 967 if (interrupts & ISR_RXORN) 968 regs.isr |= ISR_RXORN; 969 970 if (interrupts & ISR_RXIDLE) 971 regs.isr |= ISR_RXIDLE; 972 973//ISR_RXEARLY not implemented 974 975 if (interrupts & ISR_RXERR) 976 regs.isr |= ISR_RXERR; 977 978 if (interrupts & ISR_RXDESC) 979 regs.isr |= ISR_RXDESC; 980 981 if (interrupts & ISR_RXOK) { 982 delay = true; 983 regs.isr |= ISR_RXOK; 984 } 985 986 if ((regs.isr & regs.imr)) { 987 Tick when = curTick; 988 if (delay) 989 when += intrDelay; 990 cpuIntrPost(when); 991 } 992 993 DPRINTF(EthernetIntr, 994 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 995 interrupts, regs.isr, regs.imr); 996} 997 998void 999NSGigE::devIntrClear(uint32_t interrupts) 1000{ 1001 if (interrupts & ISR_RESERVE) 1002 panic("Cannot clear a reserved interrupt"); 1003 1004 if (interrupts & ISR_TXRCMP) 1005 regs.isr &= ~ISR_TXRCMP; 1006 1007 if (interrupts & ISR_RXRCMP) 1008 regs.isr &= ~ISR_RXRCMP; 1009 1010//ISR_DPERR not implemented 1011//ISR_SSERR not implemented 1012//ISR_RMABT not implemented 1013//ISR_RXSOVR not implemented 1014//ISR_HIBINT not implemented 1015//ISR_PHY not implemented 1016//ISR_PME not implemented 1017 1018 if (interrupts & ISR_SWI) 1019 regs.isr &= ~ISR_SWI; 1020 1021//ISR_MIB not implemented 1022//ISR_TXURN not implemented 1023 1024 if (interrupts & ISR_TXIDLE) 1025 regs.isr &= ~ISR_TXIDLE; 1026 1027 if (interrupts & ISR_TXERR) 1028 regs.isr &= ~ISR_TXERR; 1029 1030 if (interrupts & ISR_TXDESC) 1031 regs.isr &= ~ISR_TXDESC; 1032 1033 if (interrupts & ISR_TXOK) 1034 regs.isr &= ~ISR_TXOK; 1035 1036 if (interrupts & ISR_RXORN) 1037 regs.isr &= ~ISR_RXORN; 1038 1039 if (interrupts & ISR_RXIDLE) 1040 regs.isr &= ~ISR_RXIDLE; 1041 1042//ISR_RXEARLY not implemented 1043 1044 if (interrupts & ISR_RXERR) 1045 regs.isr &= ~ISR_RXERR; 1046 1047 if (interrupts & ISR_RXDESC) 1048 regs.isr &= ~ISR_RXDESC; 1049 1050 if (interrupts & ISR_RXOK) 1051 regs.isr &= ~ISR_RXOK; 1052 1053 DPRINTF(EthernetIntr, 1054 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 1055 interrupts, regs.isr, regs.imr); 1056 1057 if (!(regs.isr & regs.imr)) 1058 cpuIntrClear(); 1059} 1060 1061void 1062NSGigE::devIntrChangeMask() 1063{ 1064 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 1065 regs.isr, regs.imr, regs.isr & regs.imr); 1066 1067 if (regs.isr & regs.imr) 1068 cpuIntrPost(curTick); 1069 else 1070 cpuIntrClear(); 1071} 1072 1073void 1074NSGigE::cpuIntrPost(Tick when) 1075{ 1076 // If the interrupt you want to post is later than an interrupt 1077 // already scheduled, just let it post in the coming one and don't 1078 // schedule another. 1079 // HOWEVER, must be sure that the scheduled intrTick is in the 1080 // future (this was formerly the source of a bug) 1081 /** 1082 * @todo this warning should be removed and the intrTick code should 1083 * be fixed. 1084 */ 1085 if (intrTick < curTick && intrTick != 0) { 1086 warn("intrTick < curTick !!! intrTick=%d curTick=%d\n", 1087 intrTick, curTick); 1088 intrTick = 0; 1089 } 1090 assert((intrTick >= curTick) || (intrTick == 0)); 1091 if (when > intrTick && intrTick != 0) 1092 return; 1093 1094 intrTick = when; 1095 1096 if (intrEvent) { 1097 intrEvent->squash(); 1098 intrEvent = 0; 1099 } 1100 1101 if (when < curTick) { 1102 cpuInterrupt(); 1103 } else { 1104 DPRINTF(EthernetIntr, 1105 "going to schedule an interrupt for intrTick=%d\n", 1106 intrTick); 1107 intrEvent = new IntrEvent(this, true); 1108 intrEvent->schedule(intrTick); 1109 } 1110} 1111 1112void 1113NSGigE::cpuInterrupt() 1114{ 1115 // Don't send an interrupt if there's already one 1116 if (cpuPendingIntr) { 1117 DPRINTF(EthernetIntr, 1118 "would send an interrupt now, but there's already pending\n"); 1119 intrTick = 0; 1120 return; 1121 } 1122 // Don't send an interrupt if it's supposed to be delayed 1123 if (intrTick > curTick) { 1124 DPRINTF(EthernetIntr, 1125 "an interrupt is scheduled for %d, wait til then\n", 1126 intrTick); 1127 return; 1128 } 1129 1130 // Whether or not there's a pending interrupt, we don't care about 1131 // it anymore 1132 intrEvent = 0; 1133 intrTick = 0; 1134 1135 // Send interrupt 1136 cpuPendingIntr = true; 1137 /** @todo rework the intctrl to be tsunami ok */ 1138 //intctrl->post(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET); 1139 DPRINTF(EthernetIntr, "Posting interrupts to cchip!\n"); 1140 tsunami->cchip->postDRIR(configData->config.hdr.pci0.interruptLine); 1141} 1142 1143void 1144NSGigE::cpuIntrClear() 1145{ 1146 if (!cpuPendingIntr) 1147 return; 1148 1149 cpuPendingIntr = false; 1150 /** @todo rework the intctrl to be tsunami ok */ 1151 //intctrl->clear(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET); 1152 DPRINTF(EthernetIntr, "clearing all interrupts from cchip\n"); 1153 tsunami->cchip->clearDRIR(configData->config.hdr.pci0.interruptLine); 1154} 1155 1156bool 1157NSGigE::cpuIntrPending() const 1158{ return cpuPendingIntr; } 1159 1160void 1161NSGigE::txReset() 1162{ 1163 1164 DPRINTF(Ethernet, "transmit reset\n"); 1165 1166 CTDD = false; 1167 txFifoAvail = maxTxFifoSize; 1168 txEnable = false;; 1169 txFragPtr = 0; 1170 assert(txDescCnt == 0); 1171 txFifo.clear(); 1172 txState = txIdle; 1173 assert(txDmaState == dmaIdle); 1174} 1175 1176void 1177NSGigE::rxReset() 1178{ 1179 DPRINTF(Ethernet, "receive reset\n"); 1180 1181 CRDD = false; 1182 assert(rxPktBytes == 0); 1183 rxFifoCnt = 0; 1184 rxEnable = false; 1185 rxFragPtr = 0; 1186 assert(rxDescCnt == 0); 1187 assert(rxDmaState == dmaIdle); 1188 rxFifo.clear(); 1189 rxState = rxIdle; 1190} 1191 1192void 1193NSGigE::regsReset() 1194{ 1195 memset(®s, 0, sizeof(regs)); 1196 regs.config = 0x80000000; 1197 regs.mear = 0x12; 1198 regs.isr = 0x00608000; 1199 regs.txcfg = 0x120; 1200 regs.rxcfg = 0x4; 1201 regs.srr = 0x0103; 1202 regs.mibc = 0x2; 1203 regs.vdr = 0x81; 1204 regs.tesr = 0xc000; 1205 1206 extstsEnable = false; 1207 acceptBroadcast = false; 1208 acceptMulticast = false; 1209 acceptUnicast = false; 1210 acceptPerfect = false; 1211 acceptArp = false; 1212} 1213 1214void 1215NSGigE::rxDmaReadCopy() 1216{ 1217 assert(rxDmaState == dmaReading); 1218 1219 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen); 1220 rxDmaState = dmaIdle; 1221 1222 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1223 rxDmaAddr, rxDmaLen); 1224 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1225} 1226 1227bool 1228NSGigE::doRxDmaRead() 1229{ 1230 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1231 rxDmaState = dmaReading; 1232 1233 if (dmaInterface && !rxDmaFree) { 1234 if (dmaInterface->busy()) 1235 rxDmaState = dmaReadWaiting; 1236 else 1237 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick, 1238 &rxDmaReadEvent, true); 1239 return true; 1240 } 1241 1242 if (dmaReadDelay == 0 && dmaReadFactor == 0) { 1243 rxDmaReadCopy(); 1244 return false; 1245 } 1246 1247 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1248 Tick start = curTick + dmaReadDelay + factor; 1249 rxDmaReadEvent.schedule(start); 1250 return true; 1251} 1252 1253void 1254NSGigE::rxDmaReadDone() 1255{ 1256 assert(rxDmaState == dmaReading); 1257 rxDmaReadCopy(); 1258 1259 // If the transmit state machine has a pending DMA, let it go first 1260 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1261 txKick(); 1262 1263 rxKick(); 1264} 1265 1266void 1267NSGigE::rxDmaWriteCopy() 1268{ 1269 assert(rxDmaState == dmaWriting); 1270 1271 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen); 1272 rxDmaState = dmaIdle; 1273 1274 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1275 rxDmaAddr, rxDmaLen); 1276 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1277} 1278 1279bool 1280NSGigE::doRxDmaWrite() 1281{ 1282 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1283 rxDmaState = dmaWriting; 1284 1285 if (dmaInterface && !rxDmaFree) { 1286 if (dmaInterface->busy()) 1287 rxDmaState = dmaWriteWaiting; 1288 else 1289 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick, 1290 &rxDmaWriteEvent, true); 1291 return true; 1292 } 1293 1294 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) { 1295 rxDmaWriteCopy(); 1296 return false; 1297 } 1298 1299 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1300 Tick start = curTick + dmaWriteDelay + factor; 1301 rxDmaWriteEvent.schedule(start); 1302 return true; 1303} 1304 1305void 1306NSGigE::rxDmaWriteDone() 1307{ 1308 assert(rxDmaState == dmaWriting); 1309 rxDmaWriteCopy(); 1310 1311 // If the transmit state machine has a pending DMA, let it go first 1312 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1313 txKick(); 1314 1315 rxKick(); 1316} 1317 1318void 1319NSGigE::rxKick() 1320{ 1321 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n", 1322 NsRxStateStrings[rxState], rxFifo.size()); 1323 1324 if (rxKickTick > curTick) { 1325 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1326 rxKickTick); 1327 return; 1328 } 1329 1330 next: 1331 switch(rxDmaState) { 1332 case dmaReadWaiting: 1333 if (doRxDmaRead()) 1334 goto exit; 1335 break; 1336 case dmaWriteWaiting: 1337 if (doRxDmaWrite()) 1338 goto exit; 1339 break; 1340 default: 1341 break; 1342 } 1343 1344 // see state machine from spec for details 1345 // the way this works is, if you finish work on one state and can 1346 // go directly to another, you do that through jumping to the 1347 // label "next". however, if you have intermediate work, like DMA 1348 // so that you can't go to the next state yet, you go to exit and 1349 // exit the loop. however, when the DMA is done it will trigger 1350 // an event and come back to this loop. 1351 switch (rxState) { 1352 case rxIdle: 1353 if (!rxEnable) { 1354 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1355 goto exit; 1356 } 1357 1358 if (CRDD) { 1359 rxState = rxDescRefr; 1360 1361 rxDmaAddr = regs.rxdp & 0x3fffffff; 1362 rxDmaData = &rxDescCache + offsetof(ns_desc, link); 1363 rxDmaLen = sizeof(rxDescCache.link); 1364 rxDmaFree = dmaDescFree; 1365 1366 descDmaReads++; 1367 descDmaRdBytes += rxDmaLen; 1368 1369 if (doRxDmaRead()) 1370 goto exit; 1371 } else { 1372 rxState = rxDescRead; 1373 1374 rxDmaAddr = regs.rxdp & 0x3fffffff; 1375 rxDmaData = &rxDescCache; 1376 rxDmaLen = sizeof(ns_desc); 1377 rxDmaFree = dmaDescFree; 1378 1379 descDmaReads++; 1380 descDmaRdBytes += rxDmaLen; 1381 1382 if (doRxDmaRead()) 1383 goto exit; 1384 } 1385 break; 1386 1387 case rxDescRefr: 1388 if (rxDmaState != dmaIdle) 1389 goto exit; 1390 1391 rxState = rxAdvance; 1392 break; 1393 1394 case rxDescRead: 1395 if (rxDmaState != dmaIdle) 1396 goto exit; 1397 1398 DPRINTF(EthernetDesc, 1399 "rxDescCache: addr=%08x read descriptor\n", 1400 regs.rxdp & 0x3fffffff); 1401 DPRINTF(EthernetDesc, 1402 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1403 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1404 rxDescCache.extsts); 1405 1406 if (rxDescCache.cmdsts & CMDSTS_OWN) { 1407 devIntrPost(ISR_RXIDLE); 1408 rxState = rxIdle; 1409 goto exit; 1410 } else { 1411 rxState = rxFifoBlock; 1412 rxFragPtr = rxDescCache.bufptr; 1413 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK; 1414 } 1415 break; 1416 1417 case rxFifoBlock: 1418 if (!rxPacket) { 1419 /** 1420 * @todo in reality, we should be able to start processing 1421 * the packet as it arrives, and not have to wait for the 1422 * full packet ot be in the receive fifo. 1423 */ 1424 if (rxFifo.empty()) 1425 goto exit; 1426 1427 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1428 1429 // If we don't have a packet, grab a new one from the fifo. 1430 rxPacket = rxFifo.front(); 1431 rxPktBytes = rxPacket->length; 1432 rxPacketBufPtr = rxPacket->data; 1433 1434#if TRACING_ON 1435 if (DTRACE(Ethernet)) { 1436 if (rxPacket->isIpPkt()) { 1437 ip_header *ip = rxPacket->getIpHdr(); 1438 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID)); 1439 if (rxPacket->isTcpPkt()) { 1440 tcp_header *tcp = rxPacket->getTcpHdr(ip); 1441 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n", 1442 reverseEnd16(tcp->src_port_num), 1443 reverseEnd16(tcp->dest_port_num)); 1444 } 1445 } 1446 } 1447#endif 1448 1449 // sanity check - i think the driver behaves like this 1450 assert(rxDescCnt >= rxPktBytes); 1451 1452 // Must clear the value before popping to decrement the 1453 // reference count 1454 rxFifo.front() = NULL; 1455 rxFifo.pop_front(); 1456 rxFifoCnt -= rxPacket->length; 1457 } 1458 1459 1460 // dont' need the && rxDescCnt > 0 if driver sanity check 1461 // above holds 1462 if (rxPktBytes > 0) { 1463 rxState = rxFragWrite; 1464 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1465 // check holds 1466 rxXferLen = rxPktBytes; 1467 1468 rxDmaAddr = rxFragPtr & 0x3fffffff; 1469 rxDmaData = rxPacketBufPtr; 1470 rxDmaLen = rxXferLen; 1471 rxDmaFree = dmaDataFree; 1472 1473 if (doRxDmaWrite()) 1474 goto exit; 1475 1476 } else { 1477 rxState = rxDescWrite; 1478 1479 //if (rxPktBytes == 0) { /* packet is done */ 1480 assert(rxPktBytes == 0); 1481 DPRINTF(EthernetSM, "done with receiving packet\n"); 1482 1483 rxDescCache.cmdsts |= CMDSTS_OWN; 1484 rxDescCache.cmdsts &= ~CMDSTS_MORE; 1485 rxDescCache.cmdsts |= CMDSTS_OK; 1486 rxDescCache.cmdsts &= 0xffff0000; 1487 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1488 1489#if 0 1490 /* 1491 * all the driver uses these are for its own stats keeping 1492 * which we don't care about, aren't necessary for 1493 * functionality and doing this would just slow us down. 1494 * if they end up using this in a later version for 1495 * functional purposes, just undef 1496 */ 1497 if (rxFilterEnable) { 1498 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK; 1499 if (rxFifo.front()->IsUnicast()) 1500 rxDescCache.cmdsts |= CMDSTS_DEST_SELF; 1501 if (rxFifo.front()->IsMulticast()) 1502 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI; 1503 if (rxFifo.front()->IsBroadcast()) 1504 rxDescCache.cmdsts |= CMDSTS_DEST_MASK; 1505 } 1506#endif 1507 1508 if (rxPacket->isIpPkt() && extstsEnable) { 1509 rxDescCache.extsts |= EXTSTS_IPPKT; 1510 rxIPChecksums++; 1511 if (!ipChecksum(rxPacket, false)) { 1512 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1513 rxDescCache.extsts |= EXTSTS_IPERR; 1514 } 1515 if (rxPacket->isTcpPkt()) { 1516 rxDescCache.extsts |= EXTSTS_TCPPKT; 1517 rxTCPChecksums++; 1518 if (!tcpChecksum(rxPacket, false)) { 1519 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1520 rxDescCache.extsts |= EXTSTS_TCPERR; 1521 1522 } 1523 } else if (rxPacket->isUdpPkt()) { 1524 rxDescCache.extsts |= EXTSTS_UDPPKT; 1525 if (!udpChecksum(rxPacket, false)) { 1526 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1527 rxDescCache.extsts |= EXTSTS_UDPERR; 1528 } 1529 } 1530 } 1531 rxPacket = 0; 1532 1533 /* 1534 * the driver seems to always receive into desc buffers 1535 * of size 1514, so you never have a pkt that is split 1536 * into multiple descriptors on the receive side, so 1537 * i don't implement that case, hence the assert above. 1538 */ 1539 1540 DPRINTF(EthernetDesc, 1541 "rxDescCache: addr=%08x writeback cmdsts extsts\n", 1542 regs.rxdp & 0x3fffffff); 1543 DPRINTF(EthernetDesc, 1544 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1545 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1546 rxDescCache.extsts); 1547 1548 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1549 rxDmaData = &(rxDescCache.cmdsts); 1550 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts); 1551 rxDmaFree = dmaDescFree; 1552 1553 descDmaWrites++; 1554 descDmaWrBytes += rxDmaLen; 1555 1556 if (doRxDmaWrite()) 1557 goto exit; 1558 } 1559 break; 1560 1561 case rxFragWrite: 1562 if (rxDmaState != dmaIdle) 1563 goto exit; 1564 1565 rxPacketBufPtr += rxXferLen; 1566 rxFragPtr += rxXferLen; 1567 rxPktBytes -= rxXferLen; 1568 1569 rxState = rxFifoBlock; 1570 break; 1571 1572 case rxDescWrite: 1573 if (rxDmaState != dmaIdle) 1574 goto exit; 1575 1576 assert(rxDescCache.cmdsts & CMDSTS_OWN); 1577 1578 assert(rxPacket == 0); 1579 devIntrPost(ISR_RXOK); 1580 1581 if (rxDescCache.cmdsts & CMDSTS_INTR) 1582 devIntrPost(ISR_RXDESC); 1583 1584 if (!rxEnable) { 1585 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1586 rxState = rxIdle; 1587 goto exit; 1588 } else 1589 rxState = rxAdvance; 1590 break; 1591 1592 case rxAdvance: 1593 if (rxDescCache.link == 0) { 1594 devIntrPost(ISR_RXIDLE); 1595 rxState = rxIdle; 1596 CRDD = true; 1597 goto exit; 1598 } else { 1599 rxState = rxDescRead; 1600 regs.rxdp = rxDescCache.link; 1601 CRDD = false; 1602 1603 rxDmaAddr = regs.rxdp & 0x3fffffff; 1604 rxDmaData = &rxDescCache; 1605 rxDmaLen = sizeof(ns_desc); 1606 rxDmaFree = dmaDescFree; 1607 1608 if (doRxDmaRead()) 1609 goto exit; 1610 } 1611 break; 1612 1613 default: 1614 panic("Invalid rxState!"); 1615 } 1616 1617 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1618 NsRxStateStrings[rxState]); 1619 1620 goto next; 1621 1622 exit: 1623 /** 1624 * @todo do we want to schedule a future kick? 1625 */ 1626 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1627 NsRxStateStrings[rxState]); 1628} 1629 1630void 1631NSGigE::transmit() 1632{ 1633 if (txFifo.empty()) { 1634 DPRINTF(Ethernet, "nothing to transmit\n"); 1635 return; 1636 } 1637 1638 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1639 maxTxFifoSize - txFifoAvail); 1640 if (interface->sendPacket(txFifo.front())) { 1641#if TRACING_ON 1642 if (DTRACE(Ethernet)) { 1643 if (txFifo.front()->isIpPkt()) { 1644 ip_header *ip = txFifo.front()->getIpHdr(); 1645 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID)); 1646 if (txFifo.front()->isTcpPkt()) { 1647 tcp_header *tcp = txFifo.front()->getTcpHdr(ip); 1648 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n", 1649 reverseEnd16(tcp->src_port_num), 1650 reverseEnd16(tcp->dest_port_num)); 1651 } 1652 } 1653 } 1654#endif 1655 1656 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length); 1657 txBytes += txFifo.front()->length; 1658 txPackets++; 1659 1660 txFifoAvail += txFifo.front()->length; 1661 1662 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1663 txFifoAvail); 1664 txFifo.front() = NULL; 1665 txFifo.pop_front(); 1666 1667 /* 1668 * normally do a writeback of the descriptor here, and ONLY 1669 * after that is done, send this interrupt. but since our 1670 * stuff never actually fails, just do this interrupt here, 1671 * otherwise the code has to stray from this nice format. 1672 * besides, it's functionally the same. 1673 */ 1674 devIntrPost(ISR_TXOK); 1675 } else { 1676 DPRINTF(Ethernet, 1677 "May need to rethink always sending the descriptors back?\n"); 1678 } 1679 1680 if (!txFifo.empty() && !txEvent.scheduled()) { 1681 DPRINTF(Ethernet, "reschedule transmit\n"); 1682 txEvent.schedule(curTick + 1000); 1683 } 1684} 1685 1686void 1687NSGigE::txDmaReadCopy() 1688{ 1689 assert(txDmaState == dmaReading); 1690 1691 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen); 1692 txDmaState = dmaIdle; 1693 1694 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1695 txDmaAddr, txDmaLen); 1696 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1697} 1698 1699bool 1700NSGigE::doTxDmaRead() 1701{ 1702 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1703 txDmaState = dmaReading; 1704 1705 if (dmaInterface && !txDmaFree) { 1706 if (dmaInterface->busy()) 1707 txDmaState = dmaReadWaiting; 1708 else 1709 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick, 1710 &txDmaReadEvent, true); 1711 return true; 1712 } 1713 1714 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) { 1715 txDmaReadCopy(); 1716 return false; 1717 } 1718 1719 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1720 Tick start = curTick + dmaReadDelay + factor; 1721 txDmaReadEvent.schedule(start); 1722 return true; 1723} 1724 1725void 1726NSGigE::txDmaReadDone() 1727{ 1728 assert(txDmaState == dmaReading); 1729 txDmaReadCopy(); 1730 1731 // If the receive state machine has a pending DMA, let it go first 1732 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1733 rxKick(); 1734 1735 txKick(); 1736} 1737 1738void 1739NSGigE::txDmaWriteCopy() 1740{ 1741 assert(txDmaState == dmaWriting); 1742 1743 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen); 1744 txDmaState = dmaIdle; 1745 1746 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1747 txDmaAddr, txDmaLen); 1748 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1749} 1750 1751bool 1752NSGigE::doTxDmaWrite() 1753{ 1754 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1755 txDmaState = dmaWriting; 1756 1757 if (dmaInterface && !txDmaFree) { 1758 if (dmaInterface->busy()) 1759 txDmaState = dmaWriteWaiting; 1760 else 1761 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick, 1762 &txDmaWriteEvent, true); 1763 return true; 1764 } 1765 1766 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) { 1767 txDmaWriteCopy(); 1768 return false; 1769 } 1770 1771 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1772 Tick start = curTick + dmaWriteDelay + factor; 1773 txDmaWriteEvent.schedule(start); 1774 return true; 1775} 1776 1777void 1778NSGigE::txDmaWriteDone() 1779{ 1780 assert(txDmaState == dmaWriting); 1781 txDmaWriteCopy(); 1782 1783 // If the receive state machine has a pending DMA, let it go first 1784 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1785 rxKick(); 1786 1787 txKick(); 1788} 1789 1790void 1791NSGigE::txKick() 1792{ 1793 DPRINTF(EthernetSM, "transmit kick txState=%s\n", 1794 NsTxStateStrings[txState]); 1795 1796 if (txKickTick > curTick) { 1797 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1798 txKickTick); 1799 1800 return; 1801 } 1802 1803 next: 1804 switch(txDmaState) { 1805 case dmaReadWaiting: 1806 if (doTxDmaRead()) 1807 goto exit; 1808 break; 1809 case dmaWriteWaiting: 1810 if (doTxDmaWrite()) 1811 goto exit; 1812 break; 1813 default: 1814 break; 1815 } 1816 1817 switch (txState) { 1818 case txIdle: 1819 if (!txEnable) { 1820 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1821 goto exit; 1822 } 1823 1824 if (CTDD) { 1825 txState = txDescRefr; 1826 1827 txDmaAddr = regs.txdp & 0x3fffffff; 1828 txDmaData = &txDescCache + offsetof(ns_desc, link); 1829 txDmaLen = sizeof(txDescCache.link); 1830 txDmaFree = dmaDescFree; 1831 1832 descDmaReads++; 1833 descDmaRdBytes += txDmaLen; 1834 1835 if (doTxDmaRead()) 1836 goto exit; 1837 1838 } else { 1839 txState = txDescRead; 1840 1841 txDmaAddr = regs.txdp & 0x3fffffff; 1842 txDmaData = &txDescCache; 1843 txDmaLen = sizeof(ns_desc); 1844 txDmaFree = dmaDescFree; 1845 1846 descDmaReads++; 1847 descDmaRdBytes += txDmaLen; 1848 1849 if (doTxDmaRead()) 1850 goto exit; 1851 } 1852 break; 1853 1854 case txDescRefr: 1855 if (txDmaState != dmaIdle) 1856 goto exit; 1857 1858 txState = txAdvance; 1859 break; 1860 1861 case txDescRead: 1862 if (txDmaState != dmaIdle) 1863 goto exit; 1864 1865 DPRINTF(EthernetDesc, 1866 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1867 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts, 1868 txDescCache.extsts); 1869 1870 if (txDescCache.cmdsts & CMDSTS_OWN) { 1871 txState = txFifoBlock; 1872 txFragPtr = txDescCache.bufptr; 1873 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK; 1874 } else { 1875 devIntrPost(ISR_TXIDLE); 1876 txState = txIdle; 1877 goto exit; 1878 } 1879 break; 1880 1881 case txFifoBlock: 1882 if (!txPacket) { 1883 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 1884 txPacket = new EtherPacket; 1885 txPacket->data = new uint8_t[16384]; 1886 txPacketBufPtr = txPacket->data; 1887 } 1888 1889 if (txDescCnt == 0) { 1890 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 1891 if (txDescCache.cmdsts & CMDSTS_MORE) { 1892 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 1893 txState = txDescWrite; 1894 1895 txDescCache.cmdsts &= ~CMDSTS_OWN; 1896 1897 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 1898 txDmaAddr &= 0x3fffffff; 1899 txDmaData = &(txDescCache.cmdsts); 1900 txDmaLen = sizeof(txDescCache.cmdsts); 1901 txDmaFree = dmaDescFree; 1902 1903 if (doTxDmaWrite()) 1904 goto exit; 1905 1906 } else { /* this packet is totally done */ 1907 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 1908 /* deal with the the packet that just finished */ 1909 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 1910 if (txDescCache.extsts & EXTSTS_UDPPKT) { 1911 udpChecksum(txPacket, true); 1912 } else if (txDescCache.extsts & EXTSTS_TCPPKT) { 1913 tcpChecksum(txPacket, true); 1914 txTCPChecksums++; 1915 } 1916 if (txDescCache.extsts & EXTSTS_IPPKT) { 1917 ipChecksum(txPacket, true); 1918 txIPChecksums++; 1919 } 1920 } 1921 1922 txPacket->length = txPacketBufPtr - txPacket->data; 1923 // this is just because the receive can't handle a 1924 // packet bigger want to make sure 1925 assert(txPacket->length <= 1514); 1926 txFifo.push_back(txPacket); 1927 1928 /* 1929 * this following section is not tqo spec, but 1930 * functionally shouldn't be any different. normally, 1931 * the chip will wait til the transmit has occurred 1932 * before writing back the descriptor because it has 1933 * to wait to see that it was successfully transmitted 1934 * to decide whether to set CMDSTS_OK or not. 1935 * however, in the simulator since it is always 1936 * successfully transmitted, and writing it exactly to 1937 * spec would complicate the code, we just do it here 1938 */ 1939 1940 txDescCache.cmdsts &= ~CMDSTS_OWN; 1941 txDescCache.cmdsts |= CMDSTS_OK; 1942 1943 DPRINTF(EthernetDesc, 1944 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 1945 txDescCache.cmdsts, txDescCache.extsts); 1946 1947 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 1948 txDmaAddr &= 0x3fffffff; 1949 txDmaData = &(txDescCache.cmdsts); 1950 txDmaLen = sizeof(txDescCache.cmdsts) + 1951 sizeof(txDescCache.extsts); 1952 txDmaFree = dmaDescFree; 1953 1954 descDmaWrites++; 1955 descDmaWrBytes += txDmaLen; 1956 1957 transmit(); 1958 txPacket = 0; 1959 1960 if (!txEnable) { 1961 DPRINTF(EthernetSM, "halting TX state machine\n"); 1962 txState = txIdle; 1963 goto exit; 1964 } else 1965 txState = txAdvance; 1966 1967 if (doTxDmaWrite()) 1968 goto exit; 1969 } 1970 } else { 1971 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 1972 if (txFifoAvail) { 1973 txState = txFragRead; 1974 1975 /* 1976 * The number of bytes transferred is either whatever 1977 * is left in the descriptor (txDescCnt), or if there 1978 * is not enough room in the fifo, just whatever room 1979 * is left in the fifo 1980 */ 1981 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail); 1982 1983 txDmaAddr = txFragPtr & 0x3fffffff; 1984 txDmaData = txPacketBufPtr; 1985 txDmaLen = txXferLen; 1986 txDmaFree = dmaDataFree; 1987 1988 if (doTxDmaRead()) 1989 goto exit; 1990 } else { 1991 txState = txFifoBlock; 1992 transmit(); 1993 1994 goto exit; 1995 } 1996 1997 } 1998 break; 1999 2000 case txFragRead: 2001 if (txDmaState != dmaIdle) 2002 goto exit; 2003 2004 txPacketBufPtr += txXferLen; 2005 txFragPtr += txXferLen; 2006 txDescCnt -= txXferLen; 2007 txFifoAvail -= txXferLen; 2008 2009 txState = txFifoBlock; 2010 break; 2011 2012 case txDescWrite: 2013 if (txDmaState != dmaIdle) 2014 goto exit; 2015 2016 if (txDescCache.cmdsts & CMDSTS_INTR) 2017 devIntrPost(ISR_TXDESC); 2018 2019 txState = txAdvance; 2020 break; 2021 2022 case txAdvance: 2023 if (txDescCache.link == 0) { 2024 devIntrPost(ISR_TXIDLE); 2025 txState = txIdle; 2026 goto exit; 2027 } else { 2028 txState = txDescRead; 2029 regs.txdp = txDescCache.link; 2030 CTDD = false; 2031 2032 txDmaAddr = txDescCache.link & 0x3fffffff; 2033 txDmaData = &txDescCache; 2034 txDmaLen = sizeof(ns_desc); 2035 txDmaFree = dmaDescFree; 2036 2037 if (doTxDmaRead()) 2038 goto exit; 2039 } 2040 break; 2041 2042 default: 2043 panic("invalid state"); 2044 } 2045 2046 DPRINTF(EthernetSM, "entering next txState=%s\n", 2047 NsTxStateStrings[txState]); 2048 2049 goto next; 2050 2051 exit: 2052 /** 2053 * @todo do we want to schedule a future kick? 2054 */ 2055 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 2056 NsTxStateStrings[txState]); 2057} 2058 2059void 2060NSGigE::transferDone() 2061{ 2062 if (txFifo.empty()) { 2063 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2064 return; 2065 } 2066 2067 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2068 2069 if (txEvent.scheduled()) 2070 txEvent.reschedule(curTick + 1); 2071 else 2072 txEvent.schedule(curTick + 1); 2073} 2074 2075bool 2076NSGigE::rxFilter(PacketPtr packet) 2077{ 2078 bool drop = true; 2079 string type; 2080 2081 if (packet->IsUnicast()) { 2082 type = "unicast"; 2083 2084 // If we're accepting all unicast addresses 2085 if (acceptUnicast) 2086 drop = false; 2087 2088 // If we make a perfect match 2089 if (acceptPerfect && 2090 memcmp(rom.perfectMatch, packet->data, EADDR_LEN) == 0) 2091 drop = false; 2092 2093 eth_header *eth = (eth_header *) packet->data; 2094 if ((acceptArp) && (eth->type == 0x608)) 2095 drop = false; 2096 2097 } else if (packet->IsBroadcast()) { 2098 type = "broadcast"; 2099 2100 // if we're accepting broadcasts 2101 if (acceptBroadcast) 2102 drop = false; 2103 2104 } else if (packet->IsMulticast()) { 2105 type = "multicast"; 2106 2107 // if we're accepting all multicasts 2108 if (acceptMulticast) 2109 drop = false; 2110 2111 } else { 2112 type = "unknown"; 2113 2114 // oh well, punt on this one 2115 } 2116 2117 if (drop) { 2118 DPRINTF(Ethernet, "rxFilter drop\n"); 2119 DDUMP(EthernetData, packet->data, packet->length); 2120 } 2121 2122 return drop; 2123} 2124 2125bool 2126NSGigE::recvPacket(PacketPtr packet) 2127{ 2128 rxBytes += packet->length; 2129 rxPackets++; 2130 2131 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2132 maxRxFifoSize - rxFifoCnt); 2133 2134 if (!rxEnable) { 2135 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2136 debug_break(); 2137 interface->recvDone(); 2138 return true; 2139 } 2140 2141 if (rxFilterEnable && rxFilter(packet)) { 2142 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2143 interface->recvDone(); 2144 return true; 2145 } 2146 2147 if ((rxFifoCnt + packet->length) >= maxRxFifoSize) { 2148 DPRINTF(Ethernet, 2149 "packet will not fit in receive buffer...packet dropped\n"); 2150 devIntrPost(ISR_RXORN); 2151 return false; 2152 } 2153 2154 rxFifo.push_back(packet); 2155 rxFifoCnt += packet->length; 2156 interface->recvDone(); 2157 2158 rxKick(); 2159 return true; 2160} 2161 2162/** 2163 * does a udp checksum. if gen is true, then it generates it and puts 2164 * it in the right place else, it just checks what it calculates 2165 * against the value in the header in packet 2166 */ 2167bool 2168NSGigE::udpChecksum(PacketPtr packet, bool gen) 2169{ 2170 ip_header *ip = packet->getIpHdr(); 2171 udp_header *hdr = packet->getUdpHdr(ip); 2172 2173 pseudo_header *pseudo = new pseudo_header; 2174 2175 pseudo->src_ip_addr = ip->src_ip_addr; 2176 pseudo->dest_ip_addr = ip->dest_ip_addr; 2177 pseudo->protocol = ip->protocol; 2178 pseudo->len = hdr->len; 2179 2180 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr, 2181 (uint32_t) hdr->len); 2182 2183 delete pseudo; 2184 if (gen) 2185 hdr->chksum = cksum; 2186 else 2187 if (cksum != 0) 2188 return false; 2189 2190 return true; 2191} 2192 2193bool 2194NSGigE::tcpChecksum(PacketPtr packet, bool gen) 2195{ 2196 ip_header *ip = packet->getIpHdr(); 2197 tcp_header *hdr = packet->getTcpHdr(ip); 2198 2199 uint16_t cksum; 2200 pseudo_header *pseudo = new pseudo_header; 2201 if (!gen) { 2202 pseudo->src_ip_addr = ip->src_ip_addr; 2203 pseudo->dest_ip_addr = ip->dest_ip_addr; 2204 pseudo->protocol = reverseEnd16(ip->protocol); 2205 pseudo->len = reverseEnd16(reverseEnd16(ip->dgram_len) - 2206 (ip->vers_len & 0xf)*4); 2207 2208 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr, 2209 (uint32_t) reverseEnd16(pseudo->len)); 2210 } else { 2211 pseudo->src_ip_addr = 0; 2212 pseudo->dest_ip_addr = 0; 2213 pseudo->protocol = hdr->chksum; 2214 pseudo->len = 0; 2215 hdr->chksum = 0; 2216 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr, 2217 (uint32_t) (reverseEnd16(ip->dgram_len) - 2218 (ip->vers_len & 0xf)*4)); 2219 } 2220 2221 delete pseudo; 2222 if (gen) 2223 hdr->chksum = cksum; 2224 else 2225 if (cksum != 0) 2226 return false; 2227 2228 return true; 2229} 2230 2231bool 2232NSGigE::ipChecksum(PacketPtr packet, bool gen) 2233{ 2234 ip_header *hdr = packet->getIpHdr(); 2235 2236 uint16_t cksum = checksumCalc(NULL, (uint16_t *) hdr, 2237 (hdr->vers_len & 0xf)*4); 2238 2239 if (gen) { 2240 DPRINTF(EthernetCksum, "generated checksum: %#x\n", cksum); 2241 hdr->hdr_chksum = cksum; 2242 } 2243 else 2244 if (cksum != 0) 2245 return false; 2246 2247 return true; 2248} 2249 2250uint16_t 2251NSGigE::checksumCalc(uint16_t *pseudo, uint16_t *buf, uint32_t len) 2252{ 2253 uint32_t sum = 0; 2254 2255 uint16_t last_pad = 0; 2256 if (len & 1) { 2257 last_pad = buf[len/2] & 0xff; 2258 len--; 2259 sum += last_pad; 2260 } 2261 2262 if (pseudo) { 2263 sum = pseudo[0] + pseudo[1] + pseudo[2] + 2264 pseudo[3] + pseudo[4] + pseudo[5]; 2265 } 2266 2267 for (int i=0; i < (len/2); ++i) { 2268 sum += buf[i]; 2269 } 2270 2271 while (sum >> 16) 2272 sum = (sum >> 16) + (sum & 0xffff); 2273 2274 return ~sum; 2275} 2276 2277//===================================================================== 2278// 2279// 2280void 2281NSGigE::serialize(ostream &os) 2282{ 2283 // Serialize the PciDev base class 2284 PciDev::serialize(os); 2285 2286 /* 2287 * Finalize any DMA events now. 2288 */ 2289 if (rxDmaReadEvent.scheduled()) 2290 rxDmaReadCopy(); 2291 if (rxDmaWriteEvent.scheduled()) 2292 rxDmaWriteCopy(); 2293 if (txDmaReadEvent.scheduled()) 2294 txDmaReadCopy(); 2295 if (txDmaWriteEvent.scheduled()) 2296 txDmaWriteCopy(); 2297 2298 /* 2299 * Serialize the device registers 2300 */ 2301 SERIALIZE_SCALAR(regs.command); 2302 SERIALIZE_SCALAR(regs.config); 2303 SERIALIZE_SCALAR(regs.mear); 2304 SERIALIZE_SCALAR(regs.ptscr); 2305 SERIALIZE_SCALAR(regs.isr); 2306 SERIALIZE_SCALAR(regs.imr); 2307 SERIALIZE_SCALAR(regs.ier); 2308 SERIALIZE_SCALAR(regs.ihr); 2309 SERIALIZE_SCALAR(regs.txdp); 2310 SERIALIZE_SCALAR(regs.txdp_hi); 2311 SERIALIZE_SCALAR(regs.txcfg); 2312 SERIALIZE_SCALAR(regs.gpior); 2313 SERIALIZE_SCALAR(regs.rxdp); 2314 SERIALIZE_SCALAR(regs.rxdp_hi); 2315 SERIALIZE_SCALAR(regs.rxcfg); 2316 SERIALIZE_SCALAR(regs.pqcr); 2317 SERIALIZE_SCALAR(regs.wcsr); 2318 SERIALIZE_SCALAR(regs.pcr); 2319 SERIALIZE_SCALAR(regs.rfcr); 2320 SERIALIZE_SCALAR(regs.rfdr); 2321 SERIALIZE_SCALAR(regs.srr); 2322 SERIALIZE_SCALAR(regs.mibc); 2323 SERIALIZE_SCALAR(regs.vrcr); 2324 SERIALIZE_SCALAR(regs.vtcr); 2325 SERIALIZE_SCALAR(regs.vdr); 2326 SERIALIZE_SCALAR(regs.ccsr); 2327 SERIALIZE_SCALAR(regs.tbicr); 2328 SERIALIZE_SCALAR(regs.tbisr); 2329 SERIALIZE_SCALAR(regs.tanar); 2330 SERIALIZE_SCALAR(regs.tanlpar); 2331 SERIALIZE_SCALAR(regs.taner); 2332 SERIALIZE_SCALAR(regs.tesr); 2333 2334 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN); 2335 2336 SERIALIZE_SCALAR(ioEnable); 2337 2338 /* 2339 * Serialize the data Fifos 2340 */ 2341 int txNumPkts = txFifo.size(); 2342 SERIALIZE_SCALAR(txNumPkts); 2343 int i = 0; 2344 pktiter_t end = txFifo.end(); 2345 for (pktiter_t p = txFifo.begin(); p != end; ++p) { 2346 nameOut(os, csprintf("%s.txFifo%d", name(), i++)); 2347 (*p)->serialize(os); 2348 } 2349 2350 int rxNumPkts = rxFifo.size(); 2351 SERIALIZE_SCALAR(rxNumPkts); 2352 i = 0; 2353 end = rxFifo.end(); 2354 for (pktiter_t p = rxFifo.begin(); p != end; ++p) { 2355 nameOut(os, csprintf("%s.rxFifo%d", name(), i++)); 2356 (*p)->serialize(os); 2357 } 2358 2359 /* 2360 * Serialize the various helper variables 2361 */ 2362 bool txPacketExists = txPacket; 2363 SERIALIZE_SCALAR(txPacketExists); 2364 if (txPacketExists) { 2365 nameOut(os, csprintf("%s.txPacket", name())); 2366 txPacket->serialize(os); 2367 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2368 SERIALIZE_SCALAR(txPktBufPtr); 2369 } 2370 2371 bool rxPacketExists = rxPacket; 2372 SERIALIZE_SCALAR(rxPacketExists); 2373 if (rxPacketExists) { 2374 nameOut(os, csprintf("%s.rxPacket", name())); 2375 rxPacket->serialize(os); 2376 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2377 SERIALIZE_SCALAR(rxPktBufPtr); 2378 } 2379 2380 SERIALIZE_SCALAR(txXferLen); 2381 SERIALIZE_SCALAR(rxXferLen); 2382 2383 /* 2384 * Serialize DescCaches 2385 */ 2386 SERIALIZE_SCALAR(txDescCache.link); 2387 SERIALIZE_SCALAR(txDescCache.bufptr); 2388 SERIALIZE_SCALAR(txDescCache.cmdsts); 2389 SERIALIZE_SCALAR(txDescCache.extsts); 2390 SERIALIZE_SCALAR(rxDescCache.link); 2391 SERIALIZE_SCALAR(rxDescCache.bufptr); 2392 SERIALIZE_SCALAR(rxDescCache.cmdsts); 2393 SERIALIZE_SCALAR(rxDescCache.extsts); 2394 2395 /* 2396 * Serialize tx state machine 2397 */ 2398 int txState = this->txState; 2399 SERIALIZE_SCALAR(txState); 2400 SERIALIZE_SCALAR(txEnable); 2401 SERIALIZE_SCALAR(CTDD); 2402 SERIALIZE_SCALAR(txFifoAvail); 2403 SERIALIZE_SCALAR(txFragPtr); 2404 SERIALIZE_SCALAR(txDescCnt); 2405 int txDmaState = this->txDmaState; 2406 SERIALIZE_SCALAR(txDmaState); 2407 2408 /* 2409 * Serialize rx state machine 2410 */ 2411 int rxState = this->rxState; 2412 SERIALIZE_SCALAR(rxState); 2413 SERIALIZE_SCALAR(rxEnable); 2414 SERIALIZE_SCALAR(CRDD); 2415 SERIALIZE_SCALAR(rxPktBytes); 2416 SERIALIZE_SCALAR(rxFifoCnt); 2417 SERIALIZE_SCALAR(rxDescCnt); 2418 int rxDmaState = this->rxDmaState; 2419 SERIALIZE_SCALAR(rxDmaState); 2420 2421 SERIALIZE_SCALAR(extstsEnable); 2422 2423 /* 2424 * If there's a pending transmit, store the time so we can 2425 * reschedule it later 2426 */ 2427 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2428 SERIALIZE_SCALAR(transmitTick); 2429 2430 /* 2431 * receive address filter settings 2432 */ 2433 SERIALIZE_SCALAR(rxFilterEnable); 2434 SERIALIZE_SCALAR(acceptBroadcast); 2435 SERIALIZE_SCALAR(acceptMulticast); 2436 SERIALIZE_SCALAR(acceptUnicast); 2437 SERIALIZE_SCALAR(acceptPerfect); 2438 SERIALIZE_SCALAR(acceptArp); 2439 2440 /* 2441 * Keep track of pending interrupt status. 2442 */ 2443 SERIALIZE_SCALAR(intrTick); 2444 SERIALIZE_SCALAR(cpuPendingIntr); 2445 Tick intrEventTick = 0; 2446 if (intrEvent) 2447 intrEventTick = intrEvent->when(); 2448 SERIALIZE_SCALAR(intrEventTick); 2449 2450} 2451 2452void 2453NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2454{ 2455 // Unserialize the PciDev base class 2456 PciDev::unserialize(cp, section); 2457 2458 UNSERIALIZE_SCALAR(regs.command); 2459 UNSERIALIZE_SCALAR(regs.config); 2460 UNSERIALIZE_SCALAR(regs.mear); 2461 UNSERIALIZE_SCALAR(regs.ptscr); 2462 UNSERIALIZE_SCALAR(regs.isr); 2463 UNSERIALIZE_SCALAR(regs.imr); 2464 UNSERIALIZE_SCALAR(regs.ier); 2465 UNSERIALIZE_SCALAR(regs.ihr); 2466 UNSERIALIZE_SCALAR(regs.txdp); 2467 UNSERIALIZE_SCALAR(regs.txdp_hi); 2468 UNSERIALIZE_SCALAR(regs.txcfg); 2469 UNSERIALIZE_SCALAR(regs.gpior); 2470 UNSERIALIZE_SCALAR(regs.rxdp); 2471 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2472 UNSERIALIZE_SCALAR(regs.rxcfg); 2473 UNSERIALIZE_SCALAR(regs.pqcr); 2474 UNSERIALIZE_SCALAR(regs.wcsr); 2475 UNSERIALIZE_SCALAR(regs.pcr); 2476 UNSERIALIZE_SCALAR(regs.rfcr); 2477 UNSERIALIZE_SCALAR(regs.rfdr); 2478 UNSERIALIZE_SCALAR(regs.srr); 2479 UNSERIALIZE_SCALAR(regs.mibc); 2480 UNSERIALIZE_SCALAR(regs.vrcr); 2481 UNSERIALIZE_SCALAR(regs.vtcr); 2482 UNSERIALIZE_SCALAR(regs.vdr); 2483 UNSERIALIZE_SCALAR(regs.ccsr); 2484 UNSERIALIZE_SCALAR(regs.tbicr); 2485 UNSERIALIZE_SCALAR(regs.tbisr); 2486 UNSERIALIZE_SCALAR(regs.tanar); 2487 UNSERIALIZE_SCALAR(regs.tanlpar); 2488 UNSERIALIZE_SCALAR(regs.taner); 2489 UNSERIALIZE_SCALAR(regs.tesr); 2490 2491 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN); 2492 2493 UNSERIALIZE_SCALAR(ioEnable); 2494 2495 /* 2496 * unserialize the data fifos 2497 */ 2498 int txNumPkts; 2499 UNSERIALIZE_SCALAR(txNumPkts); 2500 int i; 2501 for (i = 0; i < txNumPkts; ++i) { 2502 PacketPtr p = new EtherPacket; 2503 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i)); 2504 txFifo.push_back(p); 2505 } 2506 2507 int rxNumPkts; 2508 UNSERIALIZE_SCALAR(rxNumPkts); 2509 for (i = 0; i < rxNumPkts; ++i) { 2510 PacketPtr p = new EtherPacket; 2511 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i)); 2512 rxFifo.push_back(p); 2513 } 2514 2515 /* 2516 * unserialize the various helper variables 2517 */ 2518 bool txPacketExists; 2519 UNSERIALIZE_SCALAR(txPacketExists); 2520 if (txPacketExists) { 2521 txPacket = new EtherPacket; 2522 txPacket->unserialize(cp, csprintf("%s.txPacket", section)); 2523 uint32_t txPktBufPtr; 2524 UNSERIALIZE_SCALAR(txPktBufPtr); 2525 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2526 } else 2527 txPacket = 0; 2528 2529 bool rxPacketExists; 2530 UNSERIALIZE_SCALAR(rxPacketExists); 2531 rxPacket = 0; 2532 if (rxPacketExists) { 2533 rxPacket = new EtherPacket; 2534 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section)); 2535 uint32_t rxPktBufPtr; 2536 UNSERIALIZE_SCALAR(rxPktBufPtr); 2537 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2538 } else 2539 rxPacket = 0; 2540 2541 UNSERIALIZE_SCALAR(txXferLen); 2542 UNSERIALIZE_SCALAR(rxXferLen); 2543 2544 /* 2545 * Unserialize DescCaches 2546 */ 2547 UNSERIALIZE_SCALAR(txDescCache.link); 2548 UNSERIALIZE_SCALAR(txDescCache.bufptr); 2549 UNSERIALIZE_SCALAR(txDescCache.cmdsts); 2550 UNSERIALIZE_SCALAR(txDescCache.extsts); 2551 UNSERIALIZE_SCALAR(rxDescCache.link); 2552 UNSERIALIZE_SCALAR(rxDescCache.bufptr); 2553 UNSERIALIZE_SCALAR(rxDescCache.cmdsts); 2554 UNSERIALIZE_SCALAR(rxDescCache.extsts); 2555 2556 /* 2557 * unserialize tx state machine 2558 */ 2559 int txState; 2560 UNSERIALIZE_SCALAR(txState); 2561 this->txState = (TxState) txState; 2562 UNSERIALIZE_SCALAR(txEnable); 2563 UNSERIALIZE_SCALAR(CTDD); 2564 UNSERIALIZE_SCALAR(txFifoAvail); 2565 UNSERIALIZE_SCALAR(txFragPtr); 2566 UNSERIALIZE_SCALAR(txDescCnt); 2567 int txDmaState; 2568 UNSERIALIZE_SCALAR(txDmaState); 2569 this->txDmaState = (DmaState) txDmaState; 2570 2571 /* 2572 * unserialize rx state machine 2573 */ 2574 int rxState; 2575 UNSERIALIZE_SCALAR(rxState); 2576 this->rxState = (RxState) rxState; 2577 UNSERIALIZE_SCALAR(rxEnable); 2578 UNSERIALIZE_SCALAR(CRDD); 2579 UNSERIALIZE_SCALAR(rxPktBytes); 2580 UNSERIALIZE_SCALAR(rxFifoCnt); 2581 UNSERIALIZE_SCALAR(rxDescCnt); 2582 int rxDmaState; 2583 UNSERIALIZE_SCALAR(rxDmaState); 2584 this->rxDmaState = (DmaState) rxDmaState; 2585 2586 UNSERIALIZE_SCALAR(extstsEnable); 2587 2588 /* 2589 * If there's a pending transmit, reschedule it now 2590 */ 2591 Tick transmitTick; 2592 UNSERIALIZE_SCALAR(transmitTick); 2593 if (transmitTick) 2594 txEvent.schedule(curTick + transmitTick); 2595 2596 /* 2597 * unserialize receive address filter settings 2598 */ 2599 UNSERIALIZE_SCALAR(rxFilterEnable); 2600 UNSERIALIZE_SCALAR(acceptBroadcast); 2601 UNSERIALIZE_SCALAR(acceptMulticast); 2602 UNSERIALIZE_SCALAR(acceptUnicast); 2603 UNSERIALIZE_SCALAR(acceptPerfect); 2604 UNSERIALIZE_SCALAR(acceptArp); 2605 2606 /* 2607 * Keep track of pending interrupt status. 2608 */ 2609 UNSERIALIZE_SCALAR(intrTick); 2610 UNSERIALIZE_SCALAR(cpuPendingIntr); 2611 Tick intrEventTick; 2612 UNSERIALIZE_SCALAR(intrEventTick); 2613 if (intrEventTick) { 2614 intrEvent = new IntrEvent(this, true); 2615 intrEvent->schedule(intrEventTick); 2616 } 2617 2618 /* 2619 * re-add addrRanges to bus bridges 2620 */ 2621 if (pioInterface) { 2622 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1); 2623 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1); 2624 } 2625} 2626 2627Tick 2628NSGigE::cacheAccess(MemReqPtr &req) 2629{ 2630 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n", 2631 req->paddr, req->paddr - addr); 2632 return curTick + pioLatency; 2633} 2634//===================================================================== 2635 2636 2637//********** helper functions****************************************** 2638 2639uint16_t reverseEnd16(uint16_t num) 2640{ 2641 uint16_t reverse = (num & 0xff)<<8; 2642 reverse += ((num & 0xff00) >> 8); 2643 return reverse; 2644} 2645 2646uint32_t reverseEnd32(uint32_t num) 2647{ 2648 uint32_t reverse = (reverseEnd16(num & 0xffff)) << 16; 2649 reverse += reverseEnd16((uint16_t) ((num & 0xffff0000) >> 8)); 2650 return reverse; 2651} 2652 2653 2654 2655//===================================================================== 2656 2657BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2658 2659 SimObjectParam<EtherInt *> peer; 2660 SimObjectParam<NSGigE *> device; 2661 2662END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2663 2664BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2665 2666 INIT_PARAM_DFLT(peer, "peer interface", NULL), 2667 INIT_PARAM(device, "Ethernet device of this interface") 2668 2669END_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2670 2671CREATE_SIM_OBJECT(NSGigEInt) 2672{ 2673 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device); 2674 2675 EtherInt *p = (EtherInt *)peer; 2676 if (p) { 2677 dev_int->setPeer(p); 2678 p->setPeer(dev_int); 2679 } 2680 2681 return dev_int; 2682} 2683 2684REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt) 2685 2686 2687BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2688 2689 Param<Tick> tx_delay; 2690 Param<Tick> rx_delay; 2691 SimObjectParam<IntrControl *> intr_ctrl; 2692 Param<Tick> intr_delay; 2693 SimObjectParam<MemoryController *> mmu; 2694 SimObjectParam<PhysicalMemory *> physmem; 2695 Param<bool> rx_filter; 2696 Param<string> hardware_address; 2697 SimObjectParam<Bus*> header_bus; 2698 SimObjectParam<Bus*> payload_bus; 2699 SimObjectParam<HierParams *> hier; 2700 Param<Tick> pio_latency; 2701 Param<bool> dma_desc_free; 2702 Param<bool> dma_data_free; 2703 Param<Tick> dma_read_delay; 2704 Param<Tick> dma_write_delay; 2705 Param<Tick> dma_read_factor; 2706 Param<Tick> dma_write_factor; 2707 SimObjectParam<PciConfigAll *> configspace; 2708 SimObjectParam<PciConfigData *> configdata; 2709 SimObjectParam<Tsunami *> tsunami; 2710 Param<uint32_t> pci_bus; 2711 Param<uint32_t> pci_dev; 2712 Param<uint32_t> pci_func; 2713 Param<uint32_t> tx_fifo_size; 2714 Param<uint32_t> rx_fifo_size; 2715 2716END_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2717 2718BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE) 2719 2720 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000), 2721 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000), 2722 INIT_PARAM(intr_ctrl, "Interrupt Controller"), 2723 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0), 2724 INIT_PARAM(mmu, "Memory Controller"), 2725 INIT_PARAM(physmem, "Physical Memory"), 2726 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true), 2727 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address", 2728 "00:99:00:00:00:01"), 2729 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL), 2730 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL), 2731 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams), 2732 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 2733 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false), 2734 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false), 2735 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0), 2736 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0), 2737 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0), 2738 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0), 2739 INIT_PARAM(configspace, "PCI Configspace"), 2740 INIT_PARAM(configdata, "PCI Config data"), 2741 INIT_PARAM(tsunami, "Tsunami"), 2742 INIT_PARAM(pci_bus, "PCI bus"), 2743 INIT_PARAM(pci_dev, "PCI device number"), 2744 INIT_PARAM(pci_func, "PCI function code"), 2745 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072), 2746 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072) 2747 2748END_INIT_SIM_OBJECT_PARAMS(NSGigE) 2749 2750 2751CREATE_SIM_OBJECT(NSGigE) 2752{ 2753 int eaddr[6]; 2754 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x", 2755 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]); 2756 2757 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay, 2758 physmem, tx_delay, rx_delay, mmu, hier, header_bus, 2759 payload_bus, pio_latency, dma_desc_free, dma_data_free, 2760 dma_read_delay, dma_write_delay, dma_read_factor, 2761 dma_write_factor, configspace, configdata, 2762 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr, 2763 tx_fifo_size, rx_fifo_size); 2764} 2765 2766REGISTER_SIM_OBJECT("NSGigE", NSGigE) 2767