ns_gige.cc revision 3348
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 * Lisa Hsu 30 */ 31 32/** @file 33 * Device module for modelling the National Semiconductor 34 * DP83820 ethernet controller. Does not support priority queueing 35 */ 36#include <deque> 37#include <string> 38 39#include "arch/alpha/ev5.hh" 40#include "base/inet.hh" 41#include "cpu/thread_context.hh" 42#include "dev/etherlink.hh" 43#include "dev/ns_gige.hh" 44#include "dev/pciconfigall.hh" 45#include "mem/packet.hh" 46#include "mem/packet_access.hh" 47#include "sim/builder.hh" 48#include "sim/debug.hh" 49#include "sim/host.hh" 50#include "sim/stats.hh" 51#include "sim/system.hh" 52 53const char *NsRxStateStrings[] = 54{ 55 "rxIdle", 56 "rxDescRefr", 57 "rxDescRead", 58 "rxFifoBlock", 59 "rxFragWrite", 60 "rxDescWrite", 61 "rxAdvance" 62}; 63 64const char *NsTxStateStrings[] = 65{ 66 "txIdle", 67 "txDescRefr", 68 "txDescRead", 69 "txFifoBlock", 70 "txFragRead", 71 "txDescWrite", 72 "txAdvance" 73}; 74 75const char *NsDmaState[] = 76{ 77 "dmaIdle", 78 "dmaReading", 79 "dmaWriting", 80 "dmaReadWaiting", 81 "dmaWriteWaiting" 82}; 83 84using namespace std; 85using namespace Net; 86using namespace TheISA; 87 88/////////////////////////////////////////////////////////////////////// 89// 90// NSGigE PCI Device 91// 92NSGigE::NSGigE(Params *p) 93 : PciDev(p), ioEnable(false), 94 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 96 txXferLen(0), rxXferLen(0), clock(p->clock), 97 txState(txIdle), txEnable(false), CTDD(false), 98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 99 rxEnable(false), CRDD(false), rxPktBytes(0), 100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 101 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this), 102 txDmaReadEvent(this), txDmaWriteEvent(this), 103 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 104 txDelay(p->tx_delay), rxDelay(p->rx_delay), 105 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this), 106 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false), 107 acceptMulticast(false), acceptUnicast(false), 108 acceptPerfect(false), acceptArp(false), multicastHashEnable(false), 109 intrTick(0), cpuPendingIntr(false), 110 intrEvent(0), interface(0) 111{ 112 113 intrDelay = p->intr_delay; 114 dmaReadDelay = p->dma_read_delay; 115 dmaWriteDelay = p->dma_write_delay; 116 dmaReadFactor = p->dma_read_factor; 117 dmaWriteFactor = p->dma_write_factor; 118 119 regsReset(); 120 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN); 121 122 memset(&rxDesc32, 0, sizeof(rxDesc32)); 123 memset(&txDesc32, 0, sizeof(txDesc32)); 124 memset(&rxDesc64, 0, sizeof(rxDesc64)); 125 memset(&txDesc64, 0, sizeof(txDesc64)); 126} 127 128NSGigE::~NSGigE() 129{} 130 131void 132NSGigE::regStats() 133{ 134 txBytes 135 .name(name() + ".txBytes") 136 .desc("Bytes Transmitted") 137 .prereq(txBytes) 138 ; 139 140 rxBytes 141 .name(name() + ".rxBytes") 142 .desc("Bytes Received") 143 .prereq(rxBytes) 144 ; 145 146 txPackets 147 .name(name() + ".txPackets") 148 .desc("Number of Packets Transmitted") 149 .prereq(txBytes) 150 ; 151 152 rxPackets 153 .name(name() + ".rxPackets") 154 .desc("Number of Packets Received") 155 .prereq(rxBytes) 156 ; 157 158 txIpChecksums 159 .name(name() + ".txIpChecksums") 160 .desc("Number of tx IP Checksums done by device") 161 .precision(0) 162 .prereq(txBytes) 163 ; 164 165 rxIpChecksums 166 .name(name() + ".rxIpChecksums") 167 .desc("Number of rx IP Checksums done by device") 168 .precision(0) 169 .prereq(rxBytes) 170 ; 171 172 txTcpChecksums 173 .name(name() + ".txTcpChecksums") 174 .desc("Number of tx TCP Checksums done by device") 175 .precision(0) 176 .prereq(txBytes) 177 ; 178 179 rxTcpChecksums 180 .name(name() + ".rxTcpChecksums") 181 .desc("Number of rx TCP Checksums done by device") 182 .precision(0) 183 .prereq(rxBytes) 184 ; 185 186 txUdpChecksums 187 .name(name() + ".txUdpChecksums") 188 .desc("Number of tx UDP Checksums done by device") 189 .precision(0) 190 .prereq(txBytes) 191 ; 192 193 rxUdpChecksums 194 .name(name() + ".rxUdpChecksums") 195 .desc("Number of rx UDP Checksums done by device") 196 .precision(0) 197 .prereq(rxBytes) 198 ; 199 200 descDmaReads 201 .name(name() + ".descDMAReads") 202 .desc("Number of descriptors the device read w/ DMA") 203 .precision(0) 204 ; 205 206 descDmaWrites 207 .name(name() + ".descDMAWrites") 208 .desc("Number of descriptors the device wrote w/ DMA") 209 .precision(0) 210 ; 211 212 descDmaRdBytes 213 .name(name() + ".descDmaReadBytes") 214 .desc("number of descriptor bytes read w/ DMA") 215 .precision(0) 216 ; 217 218 descDmaWrBytes 219 .name(name() + ".descDmaWriteBytes") 220 .desc("number of descriptor bytes write w/ DMA") 221 .precision(0) 222 ; 223 224 txBandwidth 225 .name(name() + ".txBandwidth") 226 .desc("Transmit Bandwidth (bits/s)") 227 .precision(0) 228 .prereq(txBytes) 229 ; 230 231 rxBandwidth 232 .name(name() + ".rxBandwidth") 233 .desc("Receive Bandwidth (bits/s)") 234 .precision(0) 235 .prereq(rxBytes) 236 ; 237 238 totBandwidth 239 .name(name() + ".totBandwidth") 240 .desc("Total Bandwidth (bits/s)") 241 .precision(0) 242 .prereq(totBytes) 243 ; 244 245 totPackets 246 .name(name() + ".totPackets") 247 .desc("Total Packets") 248 .precision(0) 249 .prereq(totBytes) 250 ; 251 252 totBytes 253 .name(name() + ".totBytes") 254 .desc("Total Bytes") 255 .precision(0) 256 .prereq(totBytes) 257 ; 258 259 totPacketRate 260 .name(name() + ".totPPS") 261 .desc("Total Tranmission Rate (packets/s)") 262 .precision(0) 263 .prereq(totBytes) 264 ; 265 266 txPacketRate 267 .name(name() + ".txPPS") 268 .desc("Packet Tranmission Rate (packets/s)") 269 .precision(0) 270 .prereq(txBytes) 271 ; 272 273 rxPacketRate 274 .name(name() + ".rxPPS") 275 .desc("Packet Reception Rate (packets/s)") 276 .precision(0) 277 .prereq(rxBytes) 278 ; 279 280 postedSwi 281 .name(name() + ".postedSwi") 282 .desc("number of software interrupts posted to CPU") 283 .precision(0) 284 ; 285 286 totalSwi 287 .name(name() + ".totalSwi") 288 .desc("total number of Swi written to ISR") 289 .precision(0) 290 ; 291 292 coalescedSwi 293 .name(name() + ".coalescedSwi") 294 .desc("average number of Swi's coalesced into each post") 295 .precision(0) 296 ; 297 298 postedRxIdle 299 .name(name() + ".postedRxIdle") 300 .desc("number of rxIdle interrupts posted to CPU") 301 .precision(0) 302 ; 303 304 totalRxIdle 305 .name(name() + ".totalRxIdle") 306 .desc("total number of RxIdle written to ISR") 307 .precision(0) 308 ; 309 310 coalescedRxIdle 311 .name(name() + ".coalescedRxIdle") 312 .desc("average number of RxIdle's coalesced into each post") 313 .precision(0) 314 ; 315 316 postedRxOk 317 .name(name() + ".postedRxOk") 318 .desc("number of RxOk interrupts posted to CPU") 319 .precision(0) 320 ; 321 322 totalRxOk 323 .name(name() + ".totalRxOk") 324 .desc("total number of RxOk written to ISR") 325 .precision(0) 326 ; 327 328 coalescedRxOk 329 .name(name() + ".coalescedRxOk") 330 .desc("average number of RxOk's coalesced into each post") 331 .precision(0) 332 ; 333 334 postedRxDesc 335 .name(name() + ".postedRxDesc") 336 .desc("number of RxDesc interrupts posted to CPU") 337 .precision(0) 338 ; 339 340 totalRxDesc 341 .name(name() + ".totalRxDesc") 342 .desc("total number of RxDesc written to ISR") 343 .precision(0) 344 ; 345 346 coalescedRxDesc 347 .name(name() + ".coalescedRxDesc") 348 .desc("average number of RxDesc's coalesced into each post") 349 .precision(0) 350 ; 351 352 postedTxOk 353 .name(name() + ".postedTxOk") 354 .desc("number of TxOk interrupts posted to CPU") 355 .precision(0) 356 ; 357 358 totalTxOk 359 .name(name() + ".totalTxOk") 360 .desc("total number of TxOk written to ISR") 361 .precision(0) 362 ; 363 364 coalescedTxOk 365 .name(name() + ".coalescedTxOk") 366 .desc("average number of TxOk's coalesced into each post") 367 .precision(0) 368 ; 369 370 postedTxIdle 371 .name(name() + ".postedTxIdle") 372 .desc("number of TxIdle interrupts posted to CPU") 373 .precision(0) 374 ; 375 376 totalTxIdle 377 .name(name() + ".totalTxIdle") 378 .desc("total number of TxIdle written to ISR") 379 .precision(0) 380 ; 381 382 coalescedTxIdle 383 .name(name() + ".coalescedTxIdle") 384 .desc("average number of TxIdle's coalesced into each post") 385 .precision(0) 386 ; 387 388 postedTxDesc 389 .name(name() + ".postedTxDesc") 390 .desc("number of TxDesc interrupts posted to CPU") 391 .precision(0) 392 ; 393 394 totalTxDesc 395 .name(name() + ".totalTxDesc") 396 .desc("total number of TxDesc written to ISR") 397 .precision(0) 398 ; 399 400 coalescedTxDesc 401 .name(name() + ".coalescedTxDesc") 402 .desc("average number of TxDesc's coalesced into each post") 403 .precision(0) 404 ; 405 406 postedRxOrn 407 .name(name() + ".postedRxOrn") 408 .desc("number of RxOrn posted to CPU") 409 .precision(0) 410 ; 411 412 totalRxOrn 413 .name(name() + ".totalRxOrn") 414 .desc("total number of RxOrn written to ISR") 415 .precision(0) 416 ; 417 418 coalescedRxOrn 419 .name(name() + ".coalescedRxOrn") 420 .desc("average number of RxOrn's coalesced into each post") 421 .precision(0) 422 ; 423 424 coalescedTotal 425 .name(name() + ".coalescedTotal") 426 .desc("average number of interrupts coalesced into each post") 427 .precision(0) 428 ; 429 430 postedInterrupts 431 .name(name() + ".postedInterrupts") 432 .desc("number of posts to CPU") 433 .precision(0) 434 ; 435 436 droppedPackets 437 .name(name() + ".droppedPackets") 438 .desc("number of packets dropped") 439 .precision(0) 440 ; 441 442 coalescedSwi = totalSwi / postedInterrupts; 443 coalescedRxIdle = totalRxIdle / postedInterrupts; 444 coalescedRxOk = totalRxOk / postedInterrupts; 445 coalescedRxDesc = totalRxDesc / postedInterrupts; 446 coalescedTxOk = totalTxOk / postedInterrupts; 447 coalescedTxIdle = totalTxIdle / postedInterrupts; 448 coalescedTxDesc = totalTxDesc / postedInterrupts; 449 coalescedRxOrn = totalRxOrn / postedInterrupts; 450 451 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + 452 totalTxOk + totalTxIdle + totalTxDesc + 453 totalRxOrn) / postedInterrupts; 454 455 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 456 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 457 totBandwidth = txBandwidth + rxBandwidth; 458 totBytes = txBytes + rxBytes; 459 totPackets = txPackets + rxPackets; 460 461 txPacketRate = txPackets / simSeconds; 462 rxPacketRate = rxPackets / simSeconds; 463} 464 465 466/** 467 * This is to write to the PCI general configuration registers 468 */ 469Tick 470NSGigE::writeConfig(Packet *pkt) 471{ 472 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 473 if (offset < PCI_DEVICE_SPECIFIC) 474 PciDev::writeConfig(pkt); 475 else 476 panic("Device specific PCI config space not implemented!\n"); 477 478 switch (offset) { 479 // seems to work fine without all these PCI settings, but i 480 // put in the IO to double check, an assertion will fail if we 481 // need to properly implement it 482 case PCI_COMMAND: 483 if (config.data[offset] & PCI_CMD_IOSE) 484 ioEnable = true; 485 else 486 ioEnable = false; 487 break; 488 } 489 pkt->result = Packet::Success; 490 return configDelay; 491} 492 493/** 494 * This reads the device registers, which are detailed in the NS83820 495 * spec sheet 496 */ 497Tick 498NSGigE::read(Packet *pkt) 499{ 500 assert(ioEnable); 501 502 pkt->allocate(); 503 504 //The mask is to give you only the offset into the device register file 505 Addr daddr = pkt->getAddr() & 0xfff; 506 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n", 507 daddr, pkt->getAddr(), pkt->getSize()); 508 509 510 // there are some reserved registers, you can see ns_gige_reg.h and 511 // the spec sheet for details 512 if (daddr > LAST && daddr <= RESERVED) { 513 panic("Accessing reserved register"); 514 } else if (daddr > RESERVED && daddr <= 0x3FC) { 515 return readConfig(pkt); 516 } else if (daddr >= MIB_START && daddr <= MIB_END) { 517 // don't implement all the MIB's. hopefully the kernel 518 // doesn't actually DEPEND upon their values 519 // MIB are just hardware stats keepers 520 pkt->set<uint32_t>(0); 521 pkt->result = Packet::Success; 522 return pioDelay; 523 } else if (daddr > 0x3FC) 524 panic("Something is messed up!\n"); 525 526 assert(pkt->getSize() == sizeof(uint32_t)); 527 uint32_t ® = *pkt->getPtr<uint32_t>(); 528 uint16_t rfaddr; 529 530 switch (daddr) { 531 case CR: 532 reg = regs.command; 533 //these are supposed to be cleared on a read 534 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 535 break; 536 537 case CFGR: 538 reg = regs.config; 539 break; 540 541 case MEAR: 542 reg = regs.mear; 543 break; 544 545 case PTSCR: 546 reg = regs.ptscr; 547 break; 548 549 case ISR: 550 reg = regs.isr; 551 devIntrClear(ISR_ALL); 552 break; 553 554 case IMR: 555 reg = regs.imr; 556 break; 557 558 case IER: 559 reg = regs.ier; 560 break; 561 562 case IHR: 563 reg = regs.ihr; 564 break; 565 566 case TXDP: 567 reg = regs.txdp; 568 break; 569 570 case TXDP_HI: 571 reg = regs.txdp_hi; 572 break; 573 574 case TX_CFG: 575 reg = regs.txcfg; 576 break; 577 578 case GPIOR: 579 reg = regs.gpior; 580 break; 581 582 case RXDP: 583 reg = regs.rxdp; 584 break; 585 586 case RXDP_HI: 587 reg = regs.rxdp_hi; 588 break; 589 590 case RX_CFG: 591 reg = regs.rxcfg; 592 break; 593 594 case PQCR: 595 reg = regs.pqcr; 596 break; 597 598 case WCSR: 599 reg = regs.wcsr; 600 break; 601 602 case PCR: 603 reg = regs.pcr; 604 break; 605 606 // see the spec sheet for how RFCR and RFDR work 607 // basically, you write to RFCR to tell the machine 608 // what you want to do next, then you act upon RFDR, 609 // and the device will be prepared b/c of what you 610 // wrote to RFCR 611 case RFCR: 612 reg = regs.rfcr; 613 break; 614 615 case RFDR: 616 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 617 switch (rfaddr) { 618 // Read from perfect match ROM octets 619 case 0x000: 620 reg = rom.perfectMatch[1]; 621 reg = reg << 8; 622 reg += rom.perfectMatch[0]; 623 break; 624 case 0x002: 625 reg = rom.perfectMatch[3] << 8; 626 reg += rom.perfectMatch[2]; 627 break; 628 case 0x004: 629 reg = rom.perfectMatch[5] << 8; 630 reg += rom.perfectMatch[4]; 631 break; 632 default: 633 // Read filter hash table 634 if (rfaddr >= FHASH_ADDR && 635 rfaddr < FHASH_ADDR + FHASH_SIZE) { 636 637 // Only word-aligned reads supported 638 if (rfaddr % 2) 639 panic("unaligned read from filter hash table!"); 640 641 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8; 642 reg += rom.filterHash[rfaddr - FHASH_ADDR]; 643 break; 644 } 645 646 panic("reading RFDR for something other than pattern" 647 " matching or hashing! %#x\n", rfaddr); 648 } 649 break; 650 651 case SRR: 652 reg = regs.srr; 653 break; 654 655 case MIBC: 656 reg = regs.mibc; 657 reg &= ~(MIBC_MIBS | MIBC_ACLR); 658 break; 659 660 case VRCR: 661 reg = regs.vrcr; 662 break; 663 664 case VTCR: 665 reg = regs.vtcr; 666 break; 667 668 case VDR: 669 reg = regs.vdr; 670 break; 671 672 case CCSR: 673 reg = regs.ccsr; 674 break; 675 676 case TBICR: 677 reg = regs.tbicr; 678 break; 679 680 case TBISR: 681 reg = regs.tbisr; 682 break; 683 684 case TANAR: 685 reg = regs.tanar; 686 break; 687 688 case TANLPAR: 689 reg = regs.tanlpar; 690 break; 691 692 case TANER: 693 reg = regs.taner; 694 break; 695 696 case TESR: 697 reg = regs.tesr; 698 break; 699 700 case M5REG: 701 reg = 0; 702 if (params()->rx_thread) 703 reg |= M5REG_RX_THREAD; 704 if (params()->tx_thread) 705 reg |= M5REG_TX_THREAD; 706 if (params()->rss) 707 reg |= M5REG_RSS; 708 break; 709 710 default: 711 panic("reading unimplemented register: addr=%#x", daddr); 712 } 713 714 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 715 daddr, reg, reg); 716 717 pkt->result = Packet::Success; 718 return pioDelay; 719} 720 721Tick 722NSGigE::write(Packet *pkt) 723{ 724 assert(ioEnable); 725 726 Addr daddr = pkt->getAddr() & 0xfff; 727 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n", 728 daddr, pkt->getAddr(), pkt->getSize()); 729 730 if (daddr > LAST && daddr <= RESERVED) { 731 panic("Accessing reserved register"); 732 } else if (daddr > RESERVED && daddr <= 0x3FC) { 733 return writeConfig(pkt); 734 } else if (daddr > 0x3FC) 735 panic("Something is messed up!\n"); 736 737 if (pkt->getSize() == sizeof(uint32_t)) { 738 uint32_t reg = pkt->get<uint32_t>(); 739 uint16_t rfaddr; 740 741 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 742 743 switch (daddr) { 744 case CR: 745 regs.command = reg; 746 if (reg & CR_TXD) { 747 txEnable = false; 748 } else if (reg & CR_TXE) { 749 txEnable = true; 750 751 // the kernel is enabling the transmit machine 752 if (txState == txIdle) 753 txKick(); 754 } 755 756 if (reg & CR_RXD) { 757 rxEnable = false; 758 } else if (reg & CR_RXE) { 759 rxEnable = true; 760 761 if (rxState == rxIdle) 762 rxKick(); 763 } 764 765 if (reg & CR_TXR) 766 txReset(); 767 768 if (reg & CR_RXR) 769 rxReset(); 770 771 if (reg & CR_SWI) 772 devIntrPost(ISR_SWI); 773 774 if (reg & CR_RST) { 775 txReset(); 776 rxReset(); 777 778 regsReset(); 779 } 780 break; 781 782 case CFGR: 783 if (reg & CFGR_LNKSTS || 784 reg & CFGR_SPDSTS || 785 reg & CFGR_DUPSTS || 786 reg & CFGR_RESERVED || 787 reg & CFGR_T64ADDR || 788 reg & CFGR_PCI64_DET) 789 790 // First clear all writable bits 791 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 792 CFGR_RESERVED | CFGR_T64ADDR | 793 CFGR_PCI64_DET; 794 // Now set the appropriate writable bits 795 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 796 CFGR_RESERVED | CFGR_T64ADDR | 797 CFGR_PCI64_DET); 798 799// all these #if 0's are because i don't THINK the kernel needs to 800// have these implemented. if there is a problem relating to one of 801// these, you may need to add functionality in. 802 if (reg & CFGR_TBI_EN) ; 803 if (reg & CFGR_MODE_1000) ; 804 805 if (reg & CFGR_AUTO_1000) 806 panic("CFGR_AUTO_1000 not implemented!\n"); 807 808 if (reg & CFGR_PINT_DUPSTS || 809 reg & CFGR_PINT_LNKSTS || 810 reg & CFGR_PINT_SPDSTS) 811 ; 812 813 if (reg & CFGR_TMRTEST) ; 814 if (reg & CFGR_MRM_DIS) ; 815 if (reg & CFGR_MWI_DIS) ; 816 817 if (reg & CFGR_T64ADDR) ; 818 // panic("CFGR_T64ADDR is read only register!\n"); 819 820 if (reg & CFGR_PCI64_DET) 821 panic("CFGR_PCI64_DET is read only register!\n"); 822 823 if (reg & CFGR_DATA64_EN) ; 824 if (reg & CFGR_M64ADDR) ; 825 if (reg & CFGR_PHY_RST) ; 826 if (reg & CFGR_PHY_DIS) ; 827 828 if (reg & CFGR_EXTSTS_EN) 829 extstsEnable = true; 830 else 831 extstsEnable = false; 832 833 if (reg & CFGR_REQALG) ; 834 if (reg & CFGR_SB) ; 835 if (reg & CFGR_POW) ; 836 if (reg & CFGR_EXD) ; 837 if (reg & CFGR_PESEL) ; 838 if (reg & CFGR_BROM_DIS) ; 839 if (reg & CFGR_EXT_125) ; 840 if (reg & CFGR_BEM) ; 841 break; 842 843 case MEAR: 844 // Clear writable bits 845 regs.mear &= MEAR_EEDO; 846 // Set appropriate writable bits 847 regs.mear |= reg & ~MEAR_EEDO; 848 849 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address) 850 // even though it could get it through RFDR 851 if (reg & MEAR_EESEL) { 852 // Rising edge of clock 853 if (reg & MEAR_EECLK && !eepromClk) 854 eepromKick(); 855 } 856 else { 857 eepromState = eepromStart; 858 regs.mear &= ~MEAR_EEDI; 859 } 860 861 eepromClk = reg & MEAR_EECLK; 862 863 // since phy is completely faked, MEAR_MD* don't matter 864 if (reg & MEAR_MDIO) ; 865 if (reg & MEAR_MDDIR) ; 866 if (reg & MEAR_MDC) ; 867 break; 868 869 case PTSCR: 870 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 871 // these control BISTs for various parts of chip - we 872 // don't care or do just fake that the BIST is done 873 if (reg & PTSCR_RBIST_EN) 874 regs.ptscr |= PTSCR_RBIST_DONE; 875 if (reg & PTSCR_EEBIST_EN) 876 regs.ptscr &= ~PTSCR_EEBIST_EN; 877 if (reg & PTSCR_EELOAD_EN) 878 regs.ptscr &= ~PTSCR_EELOAD_EN; 879 break; 880 881 case ISR: /* writing to the ISR has no effect */ 882 panic("ISR is a read only register!\n"); 883 884 case IMR: 885 regs.imr = reg; 886 devIntrChangeMask(); 887 break; 888 889 case IER: 890 regs.ier = reg; 891 break; 892 893 case IHR: 894 regs.ihr = reg; 895 /* not going to implement real interrupt holdoff */ 896 break; 897 898 case TXDP: 899 regs.txdp = (reg & 0xFFFFFFFC); 900 assert(txState == txIdle); 901 CTDD = false; 902 break; 903 904 case TXDP_HI: 905 regs.txdp_hi = reg; 906 break; 907 908 case TX_CFG: 909 regs.txcfg = reg; 910#if 0 911 if (reg & TX_CFG_CSI) ; 912 if (reg & TX_CFG_HBI) ; 913 if (reg & TX_CFG_MLB) ; 914 if (reg & TX_CFG_ATP) ; 915 if (reg & TX_CFG_ECRETRY) { 916 /* 917 * this could easily be implemented, but considering 918 * the network is just a fake pipe, wouldn't make 919 * sense to do this 920 */ 921 } 922 923 if (reg & TX_CFG_BRST_DIS) ; 924#endif 925 926#if 0 927 /* we handle our own DMA, ignore the kernel's exhortations */ 928 if (reg & TX_CFG_MXDMA) ; 929#endif 930 931 // also, we currently don't care about fill/drain 932 // thresholds though this may change in the future with 933 // more realistic networks or a driver which changes it 934 // according to feedback 935 936 break; 937 938 case GPIOR: 939 // Only write writable bits 940 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 941 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN; 942 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 943 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN); 944 /* these just control general purpose i/o pins, don't matter */ 945 break; 946 947 case RXDP: 948 regs.rxdp = reg; 949 CRDD = false; 950 break; 951 952 case RXDP_HI: 953 regs.rxdp_hi = reg; 954 break; 955 956 case RX_CFG: 957 regs.rxcfg = reg; 958#if 0 959 if (reg & RX_CFG_AEP) ; 960 if (reg & RX_CFG_ARP) ; 961 if (reg & RX_CFG_STRIPCRC) ; 962 if (reg & RX_CFG_RX_RD) ; 963 if (reg & RX_CFG_ALP) ; 964 if (reg & RX_CFG_AIRL) ; 965 966 /* we handle our own DMA, ignore what kernel says about it */ 967 if (reg & RX_CFG_MXDMA) ; 968 969 //also, we currently don't care about fill/drain thresholds 970 //though this may change in the future with more realistic 971 //networks or a driver which changes it according to feedback 972 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ; 973#endif 974 break; 975 976 case PQCR: 977 /* there is no priority queueing used in the linux 2.6 driver */ 978 regs.pqcr = reg; 979 break; 980 981 case WCSR: 982 /* not going to implement wake on LAN */ 983 regs.wcsr = reg; 984 break; 985 986 case PCR: 987 /* not going to implement pause control */ 988 regs.pcr = reg; 989 break; 990 991 case RFCR: 992 regs.rfcr = reg; 993 994 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 995 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 996 acceptMulticast = (reg & RFCR_AAM) ? true : false; 997 acceptUnicast = (reg & RFCR_AAU) ? true : false; 998 acceptPerfect = (reg & RFCR_APM) ? true : false; 999 acceptArp = (reg & RFCR_AARP) ? true : false; 1000 multicastHashEnable = (reg & RFCR_MHEN) ? true : false; 1001 1002#if 0 1003 if (reg & RFCR_APAT) 1004 panic("RFCR_APAT not implemented!\n"); 1005#endif 1006 if (reg & RFCR_UHEN) 1007 panic("Unicast hash filtering not used by drivers!\n"); 1008 1009 if (reg & RFCR_ULM) 1010 panic("RFCR_ULM not implemented!\n"); 1011 1012 break; 1013 1014 case RFDR: 1015 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 1016 switch (rfaddr) { 1017 case 0x000: 1018 rom.perfectMatch[0] = (uint8_t)reg; 1019 rom.perfectMatch[1] = (uint8_t)(reg >> 8); 1020 break; 1021 case 0x002: 1022 rom.perfectMatch[2] = (uint8_t)reg; 1023 rom.perfectMatch[3] = (uint8_t)(reg >> 8); 1024 break; 1025 case 0x004: 1026 rom.perfectMatch[4] = (uint8_t)reg; 1027 rom.perfectMatch[5] = (uint8_t)(reg >> 8); 1028 break; 1029 default: 1030 1031 if (rfaddr >= FHASH_ADDR && 1032 rfaddr < FHASH_ADDR + FHASH_SIZE) { 1033 1034 // Only word-aligned writes supported 1035 if (rfaddr % 2) 1036 panic("unaligned write to filter hash table!"); 1037 1038 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg; 1039 rom.filterHash[rfaddr - FHASH_ADDR + 1] 1040 = (uint8_t)(reg >> 8); 1041 break; 1042 } 1043 panic("writing RFDR for something other than pattern matching\ 1044 or hashing! %#x\n", rfaddr); 1045 } 1046 1047 case BRAR: 1048 regs.brar = reg; 1049 break; 1050 1051 case BRDR: 1052 panic("the driver never uses BRDR, something is wrong!\n"); 1053 1054 case SRR: 1055 panic("SRR is read only register!\n"); 1056 1057 case MIBC: 1058 panic("the driver never uses MIBC, something is wrong!\n"); 1059 1060 case VRCR: 1061 regs.vrcr = reg; 1062 break; 1063 1064 case VTCR: 1065 regs.vtcr = reg; 1066 break; 1067 1068 case VDR: 1069 panic("the driver never uses VDR, something is wrong!\n"); 1070 1071 case CCSR: 1072 /* not going to implement clockrun stuff */ 1073 regs.ccsr = reg; 1074 break; 1075 1076 case TBICR: 1077 regs.tbicr = reg; 1078 if (reg & TBICR_MR_LOOPBACK) 1079 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 1080 1081 if (reg & TBICR_MR_AN_ENABLE) { 1082 regs.tanlpar = regs.tanar; 1083 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 1084 } 1085 1086#if 0 1087 if (reg & TBICR_MR_RESTART_AN) ; 1088#endif 1089 1090 break; 1091 1092 case TBISR: 1093 panic("TBISR is read only register!\n"); 1094 1095 case TANAR: 1096 // Only write the writable bits 1097 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED; 1098 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED); 1099 1100 // Pause capability unimplemented 1101#if 0 1102 if (reg & TANAR_PS2) ; 1103 if (reg & TANAR_PS1) ; 1104#endif 1105 1106 break; 1107 1108 case TANLPAR: 1109 panic("this should only be written to by the fake phy!\n"); 1110 1111 case TANER: 1112 panic("TANER is read only register!\n"); 1113 1114 case TESR: 1115 regs.tesr = reg; 1116 break; 1117 1118 default: 1119 panic("invalid register access daddr=%#x", daddr); 1120 } 1121 } else { 1122 panic("Invalid Request Size"); 1123 } 1124 pkt->result = Packet::Success; 1125 return pioDelay; 1126} 1127 1128void 1129NSGigE::devIntrPost(uint32_t interrupts) 1130{ 1131 if (interrupts & ISR_RESERVE) 1132 panic("Cannot set a reserved interrupt"); 1133 1134 if (interrupts & ISR_NOIMPL) 1135 warn("interrupt not implemented %#x\n", interrupts); 1136 1137 interrupts &= ISR_IMPL; 1138 regs.isr |= interrupts; 1139 1140 if (interrupts & regs.imr) { 1141 if (interrupts & ISR_SWI) { 1142 totalSwi++; 1143 } 1144 if (interrupts & ISR_RXIDLE) { 1145 totalRxIdle++; 1146 } 1147 if (interrupts & ISR_RXOK) { 1148 totalRxOk++; 1149 } 1150 if (interrupts & ISR_RXDESC) { 1151 totalRxDesc++; 1152 } 1153 if (interrupts & ISR_TXOK) { 1154 totalTxOk++; 1155 } 1156 if (interrupts & ISR_TXIDLE) { 1157 totalTxIdle++; 1158 } 1159 if (interrupts & ISR_TXDESC) { 1160 totalTxDesc++; 1161 } 1162 if (interrupts & ISR_RXORN) { 1163 totalRxOrn++; 1164 } 1165 } 1166 1167 DPRINTF(EthernetIntr, 1168 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 1169 interrupts, regs.isr, regs.imr); 1170 1171 if ((regs.isr & regs.imr)) { 1172 Tick when = curTick; 1173 if ((regs.isr & regs.imr & ISR_NODELAY) == 0) 1174 when += intrDelay; 1175 cpuIntrPost(when); 1176 } 1177} 1178 1179/* writing this interrupt counting stats inside this means that this function 1180 is now limited to being used to clear all interrupts upon the kernel 1181 reading isr and servicing. just telling you in case you were thinking 1182 of expanding use. 1183*/ 1184void 1185NSGigE::devIntrClear(uint32_t interrupts) 1186{ 1187 if (interrupts & ISR_RESERVE) 1188 panic("Cannot clear a reserved interrupt"); 1189 1190 if (regs.isr & regs.imr & ISR_SWI) { 1191 postedSwi++; 1192 } 1193 if (regs.isr & regs.imr & ISR_RXIDLE) { 1194 postedRxIdle++; 1195 } 1196 if (regs.isr & regs.imr & ISR_RXOK) { 1197 postedRxOk++; 1198 } 1199 if (regs.isr & regs.imr & ISR_RXDESC) { 1200 postedRxDesc++; 1201 } 1202 if (regs.isr & regs.imr & ISR_TXOK) { 1203 postedTxOk++; 1204 } 1205 if (regs.isr & regs.imr & ISR_TXIDLE) { 1206 postedTxIdle++; 1207 } 1208 if (regs.isr & regs.imr & ISR_TXDESC) { 1209 postedTxDesc++; 1210 } 1211 if (regs.isr & regs.imr & ISR_RXORN) { 1212 postedRxOrn++; 1213 } 1214 1215 if (regs.isr & regs.imr & ISR_IMPL) 1216 postedInterrupts++; 1217 1218 interrupts &= ~ISR_NOIMPL; 1219 regs.isr &= ~interrupts; 1220 1221 DPRINTF(EthernetIntr, 1222 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 1223 interrupts, regs.isr, regs.imr); 1224 1225 if (!(regs.isr & regs.imr)) 1226 cpuIntrClear(); 1227} 1228 1229void 1230NSGigE::devIntrChangeMask() 1231{ 1232 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 1233 regs.isr, regs.imr, regs.isr & regs.imr); 1234 1235 if (regs.isr & regs.imr) 1236 cpuIntrPost(curTick); 1237 else 1238 cpuIntrClear(); 1239} 1240 1241void 1242NSGigE::cpuIntrPost(Tick when) 1243{ 1244 // If the interrupt you want to post is later than an interrupt 1245 // already scheduled, just let it post in the coming one and don't 1246 // schedule another. 1247 // HOWEVER, must be sure that the scheduled intrTick is in the 1248 // future (this was formerly the source of a bug) 1249 /** 1250 * @todo this warning should be removed and the intrTick code should 1251 * be fixed. 1252 */ 1253 assert(when >= curTick); 1254 assert(intrTick >= curTick || intrTick == 0); 1255 if (when > intrTick && intrTick != 0) { 1256 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 1257 intrTick); 1258 return; 1259 } 1260 1261 intrTick = when; 1262 if (intrTick < curTick) { 1263 debug_break(); 1264 intrTick = curTick; 1265 } 1266 1267 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 1268 intrTick); 1269 1270 if (intrEvent) 1271 intrEvent->squash(); 1272 intrEvent = new IntrEvent(this, true); 1273 intrEvent->schedule(intrTick); 1274} 1275 1276void 1277NSGigE::cpuInterrupt() 1278{ 1279 assert(intrTick == curTick); 1280 1281 // Whether or not there's a pending interrupt, we don't care about 1282 // it anymore 1283 intrEvent = 0; 1284 intrTick = 0; 1285 1286 // Don't send an interrupt if there's already one 1287 if (cpuPendingIntr) { 1288 DPRINTF(EthernetIntr, 1289 "would send an interrupt now, but there's already pending\n"); 1290 } else { 1291 // Send interrupt 1292 cpuPendingIntr = true; 1293 1294 DPRINTF(EthernetIntr, "posting interrupt\n"); 1295 intrPost(); 1296 } 1297} 1298 1299void 1300NSGigE::cpuIntrClear() 1301{ 1302 if (!cpuPendingIntr) 1303 return; 1304 1305 if (intrEvent) { 1306 intrEvent->squash(); 1307 intrEvent = 0; 1308 } 1309 1310 intrTick = 0; 1311 1312 cpuPendingIntr = false; 1313 1314 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1315 intrClear(); 1316} 1317 1318bool 1319NSGigE::cpuIntrPending() const 1320{ return cpuPendingIntr; } 1321 1322void 1323NSGigE::txReset() 1324{ 1325 1326 DPRINTF(Ethernet, "transmit reset\n"); 1327 1328 CTDD = false; 1329 txEnable = false;; 1330 txFragPtr = 0; 1331 assert(txDescCnt == 0); 1332 txFifo.clear(); 1333 txState = txIdle; 1334 assert(txDmaState == dmaIdle); 1335} 1336 1337void 1338NSGigE::rxReset() 1339{ 1340 DPRINTF(Ethernet, "receive reset\n"); 1341 1342 CRDD = false; 1343 assert(rxPktBytes == 0); 1344 rxEnable = false; 1345 rxFragPtr = 0; 1346 assert(rxDescCnt == 0); 1347 assert(rxDmaState == dmaIdle); 1348 rxFifo.clear(); 1349 rxState = rxIdle; 1350} 1351 1352void 1353NSGigE::regsReset() 1354{ 1355 memset(®s, 0, sizeof(regs)); 1356 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000); 1357 regs.mear = 0x12; 1358 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1359 // fill threshold to 32 bytes 1360 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1361 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1362 regs.mibc = MIBC_FRZ; 1363 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1364 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1365 regs.brar = 0xffffffff; 1366 1367 extstsEnable = false; 1368 acceptBroadcast = false; 1369 acceptMulticast = false; 1370 acceptUnicast = false; 1371 acceptPerfect = false; 1372 acceptArp = false; 1373} 1374 1375bool 1376NSGigE::doRxDmaRead() 1377{ 1378 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1379 rxDmaState = dmaReading; 1380 1381 if (dmaPending() || getState() != Running) 1382 rxDmaState = dmaReadWaiting; 1383 else 1384 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData); 1385 1386 return true; 1387} 1388 1389void 1390NSGigE::rxDmaReadDone() 1391{ 1392 assert(rxDmaState == dmaReading); 1393 rxDmaState = dmaIdle; 1394 1395 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1396 rxDmaAddr, rxDmaLen); 1397 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1398 1399 // If the transmit state machine has a pending DMA, let it go first 1400 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1401 txKick(); 1402 1403 rxKick(); 1404} 1405 1406bool 1407NSGigE::doRxDmaWrite() 1408{ 1409 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1410 rxDmaState = dmaWriting; 1411 1412 if (dmaPending() || getState() != Running) 1413 rxDmaState = dmaWriteWaiting; 1414 else 1415 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData); 1416 return true; 1417} 1418 1419void 1420NSGigE::rxDmaWriteDone() 1421{ 1422 assert(rxDmaState == dmaWriting); 1423 rxDmaState = dmaIdle; 1424 1425 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1426 rxDmaAddr, rxDmaLen); 1427 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1428 1429 // If the transmit state machine has a pending DMA, let it go first 1430 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1431 txKick(); 1432 1433 rxKick(); 1434} 1435 1436void 1437NSGigE::rxKick() 1438{ 1439 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1440 1441 DPRINTF(EthernetSM, 1442 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n", 1443 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32); 1444 1445 Addr link, bufptr; 1446 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts; 1447 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts; 1448 1449 next: 1450 if (clock) { 1451 if (rxKickTick > curTick) { 1452 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1453 rxKickTick); 1454 1455 goto exit; 1456 } 1457 1458 // Go to the next state machine clock tick. 1459 rxKickTick = curTick + cycles(1); 1460 } 1461 1462 switch(rxDmaState) { 1463 case dmaReadWaiting: 1464 if (doRxDmaRead()) 1465 goto exit; 1466 break; 1467 case dmaWriteWaiting: 1468 if (doRxDmaWrite()) 1469 goto exit; 1470 break; 1471 default: 1472 break; 1473 } 1474 1475 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link; 1476 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr; 1477 1478 // see state machine from spec for details 1479 // the way this works is, if you finish work on one state and can 1480 // go directly to another, you do that through jumping to the 1481 // label "next". however, if you have intermediate work, like DMA 1482 // so that you can't go to the next state yet, you go to exit and 1483 // exit the loop. however, when the DMA is done it will trigger 1484 // an event and come back to this loop. 1485 switch (rxState) { 1486 case rxIdle: 1487 if (!rxEnable) { 1488 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1489 goto exit; 1490 } 1491 1492 if (CRDD) { 1493 rxState = rxDescRefr; 1494 1495 rxDmaAddr = regs.rxdp & 0x3fffffff; 1496 rxDmaData = 1497 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link; 1498 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link); 1499 rxDmaFree = dmaDescFree; 1500 1501 descDmaReads++; 1502 descDmaRdBytes += rxDmaLen; 1503 1504 if (doRxDmaRead()) 1505 goto exit; 1506 } else { 1507 rxState = rxDescRead; 1508 1509 rxDmaAddr = regs.rxdp & 0x3fffffff; 1510 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1511 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1512 rxDmaFree = dmaDescFree; 1513 1514 descDmaReads++; 1515 descDmaRdBytes += rxDmaLen; 1516 1517 if (doRxDmaRead()) 1518 goto exit; 1519 } 1520 break; 1521 1522 case rxDescRefr: 1523 if (rxDmaState != dmaIdle) 1524 goto exit; 1525 1526 rxState = rxAdvance; 1527 break; 1528 1529 case rxDescRead: 1530 if (rxDmaState != dmaIdle) 1531 goto exit; 1532 1533 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n", 1534 regs.rxdp & 0x3fffffff); 1535 DPRINTF(EthernetDesc, 1536 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1537 link, bufptr, cmdsts, extsts); 1538 1539 if (cmdsts & CMDSTS_OWN) { 1540 devIntrPost(ISR_RXIDLE); 1541 rxState = rxIdle; 1542 goto exit; 1543 } else { 1544 rxState = rxFifoBlock; 1545 rxFragPtr = bufptr; 1546 rxDescCnt = cmdsts & CMDSTS_LEN_MASK; 1547 } 1548 break; 1549 1550 case rxFifoBlock: 1551 if (!rxPacket) { 1552 /** 1553 * @todo in reality, we should be able to start processing 1554 * the packet as it arrives, and not have to wait for the 1555 * full packet ot be in the receive fifo. 1556 */ 1557 if (rxFifo.empty()) 1558 goto exit; 1559 1560 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1561 1562 // If we don't have a packet, grab a new one from the fifo. 1563 rxPacket = rxFifo.front(); 1564 rxPktBytes = rxPacket->length; 1565 rxPacketBufPtr = rxPacket->data; 1566 1567#if TRACING_ON 1568 if (DTRACE(Ethernet)) { 1569 IpPtr ip(rxPacket); 1570 if (ip) { 1571 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1572 TcpPtr tcp(ip); 1573 if (tcp) { 1574 DPRINTF(Ethernet, 1575 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1576 tcp->sport(), tcp->dport(), tcp->seq(), 1577 tcp->ack()); 1578 } 1579 } 1580 } 1581#endif 1582 1583 // sanity check - i think the driver behaves like this 1584 assert(rxDescCnt >= rxPktBytes); 1585 rxFifo.pop(); 1586 } 1587 1588 1589 // dont' need the && rxDescCnt > 0 if driver sanity check 1590 // above holds 1591 if (rxPktBytes > 0) { 1592 rxState = rxFragWrite; 1593 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1594 // check holds 1595 rxXferLen = rxPktBytes; 1596 1597 rxDmaAddr = rxFragPtr & 0x3fffffff; 1598 rxDmaData = rxPacketBufPtr; 1599 rxDmaLen = rxXferLen; 1600 rxDmaFree = dmaDataFree; 1601 1602 if (doRxDmaWrite()) 1603 goto exit; 1604 1605 } else { 1606 rxState = rxDescWrite; 1607 1608 //if (rxPktBytes == 0) { /* packet is done */ 1609 assert(rxPktBytes == 0); 1610 DPRINTF(EthernetSM, "done with receiving packet\n"); 1611 1612 cmdsts |= CMDSTS_OWN; 1613 cmdsts &= ~CMDSTS_MORE; 1614 cmdsts |= CMDSTS_OK; 1615 cmdsts &= 0xffff0000; 1616 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1617 1618#if 0 1619 /* 1620 * all the driver uses these are for its own stats keeping 1621 * which we don't care about, aren't necessary for 1622 * functionality and doing this would just slow us down. 1623 * if they end up using this in a later version for 1624 * functional purposes, just undef 1625 */ 1626 if (rxFilterEnable) { 1627 cmdsts &= ~CMDSTS_DEST_MASK; 1628 const EthAddr &dst = rxFifoFront()->dst(); 1629 if (dst->unicast()) 1630 cmdsts |= CMDSTS_DEST_SELF; 1631 if (dst->multicast()) 1632 cmdsts |= CMDSTS_DEST_MULTI; 1633 if (dst->broadcast()) 1634 cmdsts |= CMDSTS_DEST_MASK; 1635 } 1636#endif 1637 1638 IpPtr ip(rxPacket); 1639 if (extstsEnable && ip) { 1640 extsts |= EXTSTS_IPPKT; 1641 rxIpChecksums++; 1642 if (cksum(ip) != 0) { 1643 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1644 extsts |= EXTSTS_IPERR; 1645 } 1646 TcpPtr tcp(ip); 1647 UdpPtr udp(ip); 1648 if (tcp) { 1649 extsts |= EXTSTS_TCPPKT; 1650 rxTcpChecksums++; 1651 if (cksum(tcp) != 0) { 1652 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1653 extsts |= EXTSTS_TCPERR; 1654 1655 } 1656 } else if (udp) { 1657 extsts |= EXTSTS_UDPPKT; 1658 rxUdpChecksums++; 1659 if (cksum(udp) != 0) { 1660 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1661 extsts |= EXTSTS_UDPERR; 1662 } 1663 } 1664 } 1665 rxPacket = 0; 1666 1667 /* 1668 * the driver seems to always receive into desc buffers 1669 * of size 1514, so you never have a pkt that is split 1670 * into multiple descriptors on the receive side, so 1671 * i don't implement that case, hence the assert above. 1672 */ 1673 1674 DPRINTF(EthernetDesc, 1675 "rxDesc: addr=%08x writeback cmdsts extsts\n", 1676 regs.rxdp & 0x3fffffff); 1677 DPRINTF(EthernetDesc, 1678 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1679 link, bufptr, cmdsts, extsts); 1680 1681 rxDmaAddr = regs.rxdp & 0x3fffffff; 1682 rxDmaData = &cmdsts; 1683 if (is64bit) { 1684 rxDmaAddr += offsetof(ns_desc64, cmdsts); 1685 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts); 1686 } else { 1687 rxDmaAddr += offsetof(ns_desc32, cmdsts); 1688 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts); 1689 } 1690 rxDmaFree = dmaDescFree; 1691 1692 descDmaWrites++; 1693 descDmaWrBytes += rxDmaLen; 1694 1695 if (doRxDmaWrite()) 1696 goto exit; 1697 } 1698 break; 1699 1700 case rxFragWrite: 1701 if (rxDmaState != dmaIdle) 1702 goto exit; 1703 1704 rxPacketBufPtr += rxXferLen; 1705 rxFragPtr += rxXferLen; 1706 rxPktBytes -= rxXferLen; 1707 1708 rxState = rxFifoBlock; 1709 break; 1710 1711 case rxDescWrite: 1712 if (rxDmaState != dmaIdle) 1713 goto exit; 1714 1715 assert(cmdsts & CMDSTS_OWN); 1716 1717 assert(rxPacket == 0); 1718 devIntrPost(ISR_RXOK); 1719 1720 if (cmdsts & CMDSTS_INTR) 1721 devIntrPost(ISR_RXDESC); 1722 1723 if (!rxEnable) { 1724 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1725 rxState = rxIdle; 1726 goto exit; 1727 } else 1728 rxState = rxAdvance; 1729 break; 1730 1731 case rxAdvance: 1732 if (link == 0) { 1733 devIntrPost(ISR_RXIDLE); 1734 rxState = rxIdle; 1735 CRDD = true; 1736 goto exit; 1737 } else { 1738 if (rxDmaState != dmaIdle) 1739 goto exit; 1740 rxState = rxDescRead; 1741 regs.rxdp = link; 1742 CRDD = false; 1743 1744 rxDmaAddr = regs.rxdp & 0x3fffffff; 1745 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1746 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1747 rxDmaFree = dmaDescFree; 1748 1749 if (doRxDmaRead()) 1750 goto exit; 1751 } 1752 break; 1753 1754 default: 1755 panic("Invalid rxState!"); 1756 } 1757 1758 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1759 NsRxStateStrings[rxState]); 1760 goto next; 1761 1762 exit: 1763 /** 1764 * @todo do we want to schedule a future kick? 1765 */ 1766 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1767 NsRxStateStrings[rxState]); 1768 1769 if (clock && !rxKickEvent.scheduled()) 1770 rxKickEvent.schedule(rxKickTick); 1771} 1772 1773void 1774NSGigE::transmit() 1775{ 1776 if (txFifo.empty()) { 1777 DPRINTF(Ethernet, "nothing to transmit\n"); 1778 return; 1779 } 1780 1781 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1782 txFifo.size()); 1783 if (interface->sendPacket(txFifo.front())) { 1784#if TRACING_ON 1785 if (DTRACE(Ethernet)) { 1786 IpPtr ip(txFifo.front()); 1787 if (ip) { 1788 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1789 TcpPtr tcp(ip); 1790 if (tcp) { 1791 DPRINTF(Ethernet, 1792 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1793 tcp->sport(), tcp->dport(), tcp->seq(), 1794 tcp->ack()); 1795 } 1796 } 1797 } 1798#endif 1799 1800 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1801 txBytes += txFifo.front()->length; 1802 txPackets++; 1803 1804 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1805 txFifo.avail()); 1806 txFifo.pop(); 1807 1808 /* 1809 * normally do a writeback of the descriptor here, and ONLY 1810 * after that is done, send this interrupt. but since our 1811 * stuff never actually fails, just do this interrupt here, 1812 * otherwise the code has to stray from this nice format. 1813 * besides, it's functionally the same. 1814 */ 1815 devIntrPost(ISR_TXOK); 1816 } 1817 1818 if (!txFifo.empty() && !txEvent.scheduled()) { 1819 DPRINTF(Ethernet, "reschedule transmit\n"); 1820 txEvent.schedule(curTick + retryTime); 1821 } 1822} 1823 1824bool 1825NSGigE::doTxDmaRead() 1826{ 1827 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1828 txDmaState = dmaReading; 1829 1830 if (dmaPending() || getState() != Running) 1831 txDmaState = dmaReadWaiting; 1832 else 1833 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData); 1834 1835 return true; 1836} 1837 1838void 1839NSGigE::txDmaReadDone() 1840{ 1841 assert(txDmaState == dmaReading); 1842 txDmaState = dmaIdle; 1843 1844 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1845 txDmaAddr, txDmaLen); 1846 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1847 1848 // If the receive state machine has a pending DMA, let it go first 1849 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1850 rxKick(); 1851 1852 txKick(); 1853} 1854 1855bool 1856NSGigE::doTxDmaWrite() 1857{ 1858 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1859 txDmaState = dmaWriting; 1860 1861 if (dmaPending() || getState() != Running) 1862 txDmaState = dmaWriteWaiting; 1863 else 1864 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData); 1865 return true; 1866} 1867 1868void 1869NSGigE::txDmaWriteDone() 1870{ 1871 assert(txDmaState == dmaWriting); 1872 txDmaState = dmaIdle; 1873 1874 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1875 txDmaAddr, txDmaLen); 1876 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1877 1878 // If the receive state machine has a pending DMA, let it go first 1879 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1880 rxKick(); 1881 1882 txKick(); 1883} 1884 1885void 1886NSGigE::txKick() 1887{ 1888 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1889 1890 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n", 1891 NsTxStateStrings[txState], is64bit ? 64 : 32); 1892 1893 Addr link, bufptr; 1894 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts; 1895 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts; 1896 1897 next: 1898 if (clock) { 1899 if (txKickTick > curTick) { 1900 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1901 txKickTick); 1902 goto exit; 1903 } 1904 1905 // Go to the next state machine clock tick. 1906 txKickTick = curTick + cycles(1); 1907 } 1908 1909 switch(txDmaState) { 1910 case dmaReadWaiting: 1911 if (doTxDmaRead()) 1912 goto exit; 1913 break; 1914 case dmaWriteWaiting: 1915 if (doTxDmaWrite()) 1916 goto exit; 1917 break; 1918 default: 1919 break; 1920 } 1921 1922 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link; 1923 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr; 1924 switch (txState) { 1925 case txIdle: 1926 if (!txEnable) { 1927 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1928 goto exit; 1929 } 1930 1931 if (CTDD) { 1932 txState = txDescRefr; 1933 1934 txDmaAddr = regs.txdp & 0x3fffffff; 1935 txDmaData = 1936 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link; 1937 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link); 1938 txDmaFree = dmaDescFree; 1939 1940 descDmaReads++; 1941 descDmaRdBytes += txDmaLen; 1942 1943 if (doTxDmaRead()) 1944 goto exit; 1945 1946 } else { 1947 txState = txDescRead; 1948 1949 txDmaAddr = regs.txdp & 0x3fffffff; 1950 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1951 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1952 txDmaFree = dmaDescFree; 1953 1954 descDmaReads++; 1955 descDmaRdBytes += txDmaLen; 1956 1957 if (doTxDmaRead()) 1958 goto exit; 1959 } 1960 break; 1961 1962 case txDescRefr: 1963 if (txDmaState != dmaIdle) 1964 goto exit; 1965 1966 txState = txAdvance; 1967 break; 1968 1969 case txDescRead: 1970 if (txDmaState != dmaIdle) 1971 goto exit; 1972 1973 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n", 1974 regs.txdp & 0x3fffffff); 1975 DPRINTF(EthernetDesc, 1976 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n", 1977 link, bufptr, cmdsts, extsts); 1978 1979 if (cmdsts & CMDSTS_OWN) { 1980 txState = txFifoBlock; 1981 txFragPtr = bufptr; 1982 txDescCnt = cmdsts & CMDSTS_LEN_MASK; 1983 } else { 1984 devIntrPost(ISR_TXIDLE); 1985 txState = txIdle; 1986 goto exit; 1987 } 1988 break; 1989 1990 case txFifoBlock: 1991 if (!txPacket) { 1992 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 1993 txPacket = new EthPacketData(16384); 1994 txPacketBufPtr = txPacket->data; 1995 } 1996 1997 if (txDescCnt == 0) { 1998 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 1999 if (cmdsts & CMDSTS_MORE) { 2000 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 2001 txState = txDescWrite; 2002 2003 cmdsts &= ~CMDSTS_OWN; 2004 2005 txDmaAddr = regs.txdp & 0x3fffffff; 2006 txDmaData = &cmdsts; 2007 if (is64bit) { 2008 txDmaAddr += offsetof(ns_desc64, cmdsts); 2009 txDmaLen = sizeof(txDesc64.cmdsts); 2010 } else { 2011 txDmaAddr += offsetof(ns_desc32, cmdsts); 2012 txDmaLen = sizeof(txDesc32.cmdsts); 2013 } 2014 txDmaFree = dmaDescFree; 2015 2016 if (doTxDmaWrite()) 2017 goto exit; 2018 2019 } else { /* this packet is totally done */ 2020 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 2021 /* deal with the the packet that just finished */ 2022 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 2023 IpPtr ip(txPacket); 2024 if (extsts & EXTSTS_UDPPKT) { 2025 UdpPtr udp(ip); 2026 udp->sum(0); 2027 udp->sum(cksum(udp)); 2028 txUdpChecksums++; 2029 } else if (extsts & EXTSTS_TCPPKT) { 2030 TcpPtr tcp(ip); 2031 tcp->sum(0); 2032 tcp->sum(cksum(tcp)); 2033 txTcpChecksums++; 2034 } 2035 if (extsts & EXTSTS_IPPKT) { 2036 ip->sum(0); 2037 ip->sum(cksum(ip)); 2038 txIpChecksums++; 2039 } 2040 } 2041 2042 txPacket->length = txPacketBufPtr - txPacket->data; 2043 // this is just because the receive can't handle a 2044 // packet bigger want to make sure 2045 if (txPacket->length > 1514) 2046 panic("transmit packet too large, %s > 1514\n", 2047 txPacket->length); 2048 2049#ifndef NDEBUG 2050 bool success = 2051#endif 2052 txFifo.push(txPacket); 2053 assert(success); 2054 2055 /* 2056 * this following section is not tqo spec, but 2057 * functionally shouldn't be any different. normally, 2058 * the chip will wait til the transmit has occurred 2059 * before writing back the descriptor because it has 2060 * to wait to see that it was successfully transmitted 2061 * to decide whether to set CMDSTS_OK or not. 2062 * however, in the simulator since it is always 2063 * successfully transmitted, and writing it exactly to 2064 * spec would complicate the code, we just do it here 2065 */ 2066 2067 cmdsts &= ~CMDSTS_OWN; 2068 cmdsts |= CMDSTS_OK; 2069 2070 DPRINTF(EthernetDesc, 2071 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 2072 cmdsts, extsts); 2073 2074 txDmaFree = dmaDescFree; 2075 txDmaAddr = regs.txdp & 0x3fffffff; 2076 txDmaData = &cmdsts; 2077 if (is64bit) { 2078 txDmaAddr += offsetof(ns_desc64, cmdsts); 2079 txDmaLen = 2080 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts); 2081 } else { 2082 txDmaAddr += offsetof(ns_desc32, cmdsts); 2083 txDmaLen = 2084 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts); 2085 } 2086 2087 descDmaWrites++; 2088 descDmaWrBytes += txDmaLen; 2089 2090 transmit(); 2091 txPacket = 0; 2092 2093 if (!txEnable) { 2094 DPRINTF(EthernetSM, "halting TX state machine\n"); 2095 txState = txIdle; 2096 goto exit; 2097 } else 2098 txState = txAdvance; 2099 2100 if (doTxDmaWrite()) 2101 goto exit; 2102 } 2103 } else { 2104 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 2105 if (!txFifo.full()) { 2106 txState = txFragRead; 2107 2108 /* 2109 * The number of bytes transferred is either whatever 2110 * is left in the descriptor (txDescCnt), or if there 2111 * is not enough room in the fifo, just whatever room 2112 * is left in the fifo 2113 */ 2114 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 2115 2116 txDmaAddr = txFragPtr & 0x3fffffff; 2117 txDmaData = txPacketBufPtr; 2118 txDmaLen = txXferLen; 2119 txDmaFree = dmaDataFree; 2120 2121 if (doTxDmaRead()) 2122 goto exit; 2123 } else { 2124 txState = txFifoBlock; 2125 transmit(); 2126 2127 goto exit; 2128 } 2129 2130 } 2131 break; 2132 2133 case txFragRead: 2134 if (txDmaState != dmaIdle) 2135 goto exit; 2136 2137 txPacketBufPtr += txXferLen; 2138 txFragPtr += txXferLen; 2139 txDescCnt -= txXferLen; 2140 txFifo.reserve(txXferLen); 2141 2142 txState = txFifoBlock; 2143 break; 2144 2145 case txDescWrite: 2146 if (txDmaState != dmaIdle) 2147 goto exit; 2148 2149 if (cmdsts & CMDSTS_INTR) 2150 devIntrPost(ISR_TXDESC); 2151 2152 if (!txEnable) { 2153 DPRINTF(EthernetSM, "halting TX state machine\n"); 2154 txState = txIdle; 2155 goto exit; 2156 } else 2157 txState = txAdvance; 2158 break; 2159 2160 case txAdvance: 2161 if (link == 0) { 2162 devIntrPost(ISR_TXIDLE); 2163 txState = txIdle; 2164 goto exit; 2165 } else { 2166 if (txDmaState != dmaIdle) 2167 goto exit; 2168 txState = txDescRead; 2169 regs.txdp = link; 2170 CTDD = false; 2171 2172 txDmaAddr = link & 0x3fffffff; 2173 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 2174 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 2175 txDmaFree = dmaDescFree; 2176 2177 if (doTxDmaRead()) 2178 goto exit; 2179 } 2180 break; 2181 2182 default: 2183 panic("invalid state"); 2184 } 2185 2186 DPRINTF(EthernetSM, "entering next txState=%s\n", 2187 NsTxStateStrings[txState]); 2188 goto next; 2189 2190 exit: 2191 /** 2192 * @todo do we want to schedule a future kick? 2193 */ 2194 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 2195 NsTxStateStrings[txState]); 2196 2197 if (clock && !txKickEvent.scheduled()) 2198 txKickEvent.schedule(txKickTick); 2199} 2200 2201/** 2202 * Advance the EEPROM state machine 2203 * Called on rising edge of EEPROM clock bit in MEAR 2204 */ 2205void 2206NSGigE::eepromKick() 2207{ 2208 switch (eepromState) { 2209 2210 case eepromStart: 2211 2212 // Wait for start bit 2213 if (regs.mear & MEAR_EEDI) { 2214 // Set up to get 2 opcode bits 2215 eepromState = eepromGetOpcode; 2216 eepromBitsToRx = 2; 2217 eepromOpcode = 0; 2218 } 2219 break; 2220 2221 case eepromGetOpcode: 2222 eepromOpcode <<= 1; 2223 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0; 2224 --eepromBitsToRx; 2225 2226 // Done getting opcode 2227 if (eepromBitsToRx == 0) { 2228 if (eepromOpcode != EEPROM_READ) 2229 panic("only EEPROM reads are implemented!"); 2230 2231 // Set up to get address 2232 eepromState = eepromGetAddress; 2233 eepromBitsToRx = 6; 2234 eepromAddress = 0; 2235 } 2236 break; 2237 2238 case eepromGetAddress: 2239 eepromAddress <<= 1; 2240 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0; 2241 --eepromBitsToRx; 2242 2243 // Done getting address 2244 if (eepromBitsToRx == 0) { 2245 2246 if (eepromAddress >= EEPROM_SIZE) 2247 panic("EEPROM read access out of range!"); 2248 2249 switch (eepromAddress) { 2250 2251 case EEPROM_PMATCH2_ADDR: 2252 eepromData = rom.perfectMatch[5]; 2253 eepromData <<= 8; 2254 eepromData += rom.perfectMatch[4]; 2255 break; 2256 2257 case EEPROM_PMATCH1_ADDR: 2258 eepromData = rom.perfectMatch[3]; 2259 eepromData <<= 8; 2260 eepromData += rom.perfectMatch[2]; 2261 break; 2262 2263 case EEPROM_PMATCH0_ADDR: 2264 eepromData = rom.perfectMatch[1]; 2265 eepromData <<= 8; 2266 eepromData += rom.perfectMatch[0]; 2267 break; 2268 2269 default: 2270 panic("FreeBSD driver only uses EEPROM to read PMATCH!"); 2271 } 2272 // Set up to read data 2273 eepromState = eepromRead; 2274 eepromBitsToRx = 16; 2275 2276 // Clear data in bit 2277 regs.mear &= ~MEAR_EEDI; 2278 } 2279 break; 2280 2281 case eepromRead: 2282 // Clear Data Out bit 2283 regs.mear &= ~MEAR_EEDO; 2284 // Set bit to value of current EEPROM bit 2285 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0; 2286 2287 eepromData <<= 1; 2288 --eepromBitsToRx; 2289 2290 // All done 2291 if (eepromBitsToRx == 0) { 2292 eepromState = eepromStart; 2293 } 2294 break; 2295 2296 default: 2297 panic("invalid EEPROM state"); 2298 } 2299 2300} 2301 2302void 2303NSGigE::transferDone() 2304{ 2305 if (txFifo.empty()) { 2306 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2307 return; 2308 } 2309 2310 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2311 2312 if (txEvent.scheduled()) 2313 txEvent.reschedule(curTick + cycles(1)); 2314 else 2315 txEvent.schedule(curTick + cycles(1)); 2316} 2317 2318bool 2319NSGigE::rxFilter(const EthPacketPtr &packet) 2320{ 2321 EthPtr eth = packet; 2322 bool drop = true; 2323 string type; 2324 2325 const EthAddr &dst = eth->dst(); 2326 if (dst.unicast()) { 2327 // If we're accepting all unicast addresses 2328 if (acceptUnicast) 2329 drop = false; 2330 2331 // If we make a perfect match 2332 if (acceptPerfect && dst == rom.perfectMatch) 2333 drop = false; 2334 2335 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2336 drop = false; 2337 2338 } else if (dst.broadcast()) { 2339 // if we're accepting broadcasts 2340 if (acceptBroadcast) 2341 drop = false; 2342 2343 } else if (dst.multicast()) { 2344 // if we're accepting all multicasts 2345 if (acceptMulticast) 2346 drop = false; 2347 2348 // Multicast hashing faked - all packets accepted 2349 if (multicastHashEnable) 2350 drop = false; 2351 } 2352 2353 if (drop) { 2354 DPRINTF(Ethernet, "rxFilter drop\n"); 2355 DDUMP(EthernetData, packet->data, packet->length); 2356 } 2357 2358 return drop; 2359} 2360 2361bool 2362NSGigE::recvPacket(EthPacketPtr packet) 2363{ 2364 rxBytes += packet->length; 2365 rxPackets++; 2366 2367 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2368 rxFifo.avail()); 2369 2370 if (!rxEnable) { 2371 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2372 return true; 2373 } 2374 2375 if (!rxFilterEnable) { 2376 DPRINTF(Ethernet, 2377 "receive packet filtering disabled . . . packet dropped\n"); 2378 return true; 2379 } 2380 2381 if (rxFilter(packet)) { 2382 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2383 return true; 2384 } 2385 2386 if (rxFifo.avail() < packet->length) { 2387#if TRACING_ON 2388 IpPtr ip(packet); 2389 TcpPtr tcp(ip); 2390 if (ip) { 2391 DPRINTF(Ethernet, 2392 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2393 ip->id()); 2394 if (tcp) { 2395 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2396 } 2397 } 2398#endif 2399 droppedPackets++; 2400 devIntrPost(ISR_RXORN); 2401 return false; 2402 } 2403 2404 rxFifo.push(packet); 2405 2406 rxKick(); 2407 return true; 2408} 2409 2410 2411void 2412NSGigE::resume() 2413{ 2414 SimObject::resume(); 2415 2416 // During drain we could have left the state machines in a waiting state and 2417 // they wouldn't get out until some other event occured to kick them. 2418 // This way they'll get out immediately 2419 txKick(); 2420 rxKick(); 2421} 2422 2423 2424//===================================================================== 2425// 2426// 2427void 2428NSGigE::serialize(ostream &os) 2429{ 2430 // Serialize the PciDev base class 2431 PciDev::serialize(os); 2432 2433 /* 2434 * Finalize any DMA events now. 2435 */ 2436 // @todo will mem system save pending dma? 2437 2438 /* 2439 * Serialize the device registers 2440 */ 2441 SERIALIZE_SCALAR(regs.command); 2442 SERIALIZE_SCALAR(regs.config); 2443 SERIALIZE_SCALAR(regs.mear); 2444 SERIALIZE_SCALAR(regs.ptscr); 2445 SERIALIZE_SCALAR(regs.isr); 2446 SERIALIZE_SCALAR(regs.imr); 2447 SERIALIZE_SCALAR(regs.ier); 2448 SERIALIZE_SCALAR(regs.ihr); 2449 SERIALIZE_SCALAR(regs.txdp); 2450 SERIALIZE_SCALAR(regs.txdp_hi); 2451 SERIALIZE_SCALAR(regs.txcfg); 2452 SERIALIZE_SCALAR(regs.gpior); 2453 SERIALIZE_SCALAR(regs.rxdp); 2454 SERIALIZE_SCALAR(regs.rxdp_hi); 2455 SERIALIZE_SCALAR(regs.rxcfg); 2456 SERIALIZE_SCALAR(regs.pqcr); 2457 SERIALIZE_SCALAR(regs.wcsr); 2458 SERIALIZE_SCALAR(regs.pcr); 2459 SERIALIZE_SCALAR(regs.rfcr); 2460 SERIALIZE_SCALAR(regs.rfdr); 2461 SERIALIZE_SCALAR(regs.brar); 2462 SERIALIZE_SCALAR(regs.brdr); 2463 SERIALIZE_SCALAR(regs.srr); 2464 SERIALIZE_SCALAR(regs.mibc); 2465 SERIALIZE_SCALAR(regs.vrcr); 2466 SERIALIZE_SCALAR(regs.vtcr); 2467 SERIALIZE_SCALAR(regs.vdr); 2468 SERIALIZE_SCALAR(regs.ccsr); 2469 SERIALIZE_SCALAR(regs.tbicr); 2470 SERIALIZE_SCALAR(regs.tbisr); 2471 SERIALIZE_SCALAR(regs.tanar); 2472 SERIALIZE_SCALAR(regs.tanlpar); 2473 SERIALIZE_SCALAR(regs.taner); 2474 SERIALIZE_SCALAR(regs.tesr); 2475 2476 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2477 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2478 2479 SERIALIZE_SCALAR(ioEnable); 2480 2481 /* 2482 * Serialize the data Fifos 2483 */ 2484 rxFifo.serialize("rxFifo", os); 2485 txFifo.serialize("txFifo", os); 2486 2487 /* 2488 * Serialize the various helper variables 2489 */ 2490 bool txPacketExists = txPacket; 2491 SERIALIZE_SCALAR(txPacketExists); 2492 if (txPacketExists) { 2493 txPacket->length = txPacketBufPtr - txPacket->data; 2494 txPacket->serialize("txPacket", os); 2495 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2496 SERIALIZE_SCALAR(txPktBufPtr); 2497 } 2498 2499 bool rxPacketExists = rxPacket; 2500 SERIALIZE_SCALAR(rxPacketExists); 2501 if (rxPacketExists) { 2502 rxPacket->serialize("rxPacket", os); 2503 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2504 SERIALIZE_SCALAR(rxPktBufPtr); 2505 } 2506 2507 SERIALIZE_SCALAR(txXferLen); 2508 SERIALIZE_SCALAR(rxXferLen); 2509 2510 /* 2511 * Serialize Cached Descriptors 2512 */ 2513 SERIALIZE_SCALAR(rxDesc64.link); 2514 SERIALIZE_SCALAR(rxDesc64.bufptr); 2515 SERIALIZE_SCALAR(rxDesc64.cmdsts); 2516 SERIALIZE_SCALAR(rxDesc64.extsts); 2517 SERIALIZE_SCALAR(txDesc64.link); 2518 SERIALIZE_SCALAR(txDesc64.bufptr); 2519 SERIALIZE_SCALAR(txDesc64.cmdsts); 2520 SERIALIZE_SCALAR(txDesc64.extsts); 2521 SERIALIZE_SCALAR(rxDesc32.link); 2522 SERIALIZE_SCALAR(rxDesc32.bufptr); 2523 SERIALIZE_SCALAR(rxDesc32.cmdsts); 2524 SERIALIZE_SCALAR(rxDesc32.extsts); 2525 SERIALIZE_SCALAR(txDesc32.link); 2526 SERIALIZE_SCALAR(txDesc32.bufptr); 2527 SERIALIZE_SCALAR(txDesc32.cmdsts); 2528 SERIALIZE_SCALAR(txDesc32.extsts); 2529 SERIALIZE_SCALAR(extstsEnable); 2530 2531 /* 2532 * Serialize tx state machine 2533 */ 2534 int txState = this->txState; 2535 SERIALIZE_SCALAR(txState); 2536 SERIALIZE_SCALAR(txEnable); 2537 SERIALIZE_SCALAR(CTDD); 2538 SERIALIZE_SCALAR(txFragPtr); 2539 SERIALIZE_SCALAR(txDescCnt); 2540 int txDmaState = this->txDmaState; 2541 SERIALIZE_SCALAR(txDmaState); 2542 SERIALIZE_SCALAR(txKickTick); 2543 2544 /* 2545 * Serialize rx state machine 2546 */ 2547 int rxState = this->rxState; 2548 SERIALIZE_SCALAR(rxState); 2549 SERIALIZE_SCALAR(rxEnable); 2550 SERIALIZE_SCALAR(CRDD); 2551 SERIALIZE_SCALAR(rxPktBytes); 2552 SERIALIZE_SCALAR(rxFragPtr); 2553 SERIALIZE_SCALAR(rxDescCnt); 2554 int rxDmaState = this->rxDmaState; 2555 SERIALIZE_SCALAR(rxDmaState); 2556 SERIALIZE_SCALAR(rxKickTick); 2557 2558 /* 2559 * Serialize EEPROM state machine 2560 */ 2561 int eepromState = this->eepromState; 2562 SERIALIZE_SCALAR(eepromState); 2563 SERIALIZE_SCALAR(eepromClk); 2564 SERIALIZE_SCALAR(eepromBitsToRx); 2565 SERIALIZE_SCALAR(eepromOpcode); 2566 SERIALIZE_SCALAR(eepromAddress); 2567 SERIALIZE_SCALAR(eepromData); 2568 2569 /* 2570 * If there's a pending transmit, store the time so we can 2571 * reschedule it later 2572 */ 2573 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2574 SERIALIZE_SCALAR(transmitTick); 2575 2576 /* 2577 * receive address filter settings 2578 */ 2579 SERIALIZE_SCALAR(rxFilterEnable); 2580 SERIALIZE_SCALAR(acceptBroadcast); 2581 SERIALIZE_SCALAR(acceptMulticast); 2582 SERIALIZE_SCALAR(acceptUnicast); 2583 SERIALIZE_SCALAR(acceptPerfect); 2584 SERIALIZE_SCALAR(acceptArp); 2585 SERIALIZE_SCALAR(multicastHashEnable); 2586 2587 /* 2588 * Keep track of pending interrupt status. 2589 */ 2590 SERIALIZE_SCALAR(intrTick); 2591 SERIALIZE_SCALAR(cpuPendingIntr); 2592 Tick intrEventTick = 0; 2593 if (intrEvent) 2594 intrEventTick = intrEvent->when(); 2595 SERIALIZE_SCALAR(intrEventTick); 2596 2597} 2598 2599void 2600NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2601{ 2602 // Unserialize the PciDev base class 2603 PciDev::unserialize(cp, section); 2604 2605 UNSERIALIZE_SCALAR(regs.command); 2606 UNSERIALIZE_SCALAR(regs.config); 2607 UNSERIALIZE_SCALAR(regs.mear); 2608 UNSERIALIZE_SCALAR(regs.ptscr); 2609 UNSERIALIZE_SCALAR(regs.isr); 2610 UNSERIALIZE_SCALAR(regs.imr); 2611 UNSERIALIZE_SCALAR(regs.ier); 2612 UNSERIALIZE_SCALAR(regs.ihr); 2613 UNSERIALIZE_SCALAR(regs.txdp); 2614 UNSERIALIZE_SCALAR(regs.txdp_hi); 2615 UNSERIALIZE_SCALAR(regs.txcfg); 2616 UNSERIALIZE_SCALAR(regs.gpior); 2617 UNSERIALIZE_SCALAR(regs.rxdp); 2618 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2619 UNSERIALIZE_SCALAR(regs.rxcfg); 2620 UNSERIALIZE_SCALAR(regs.pqcr); 2621 UNSERIALIZE_SCALAR(regs.wcsr); 2622 UNSERIALIZE_SCALAR(regs.pcr); 2623 UNSERIALIZE_SCALAR(regs.rfcr); 2624 UNSERIALIZE_SCALAR(regs.rfdr); 2625 UNSERIALIZE_SCALAR(regs.brar); 2626 UNSERIALIZE_SCALAR(regs.brdr); 2627 UNSERIALIZE_SCALAR(regs.srr); 2628 UNSERIALIZE_SCALAR(regs.mibc); 2629 UNSERIALIZE_SCALAR(regs.vrcr); 2630 UNSERIALIZE_SCALAR(regs.vtcr); 2631 UNSERIALIZE_SCALAR(regs.vdr); 2632 UNSERIALIZE_SCALAR(regs.ccsr); 2633 UNSERIALIZE_SCALAR(regs.tbicr); 2634 UNSERIALIZE_SCALAR(regs.tbisr); 2635 UNSERIALIZE_SCALAR(regs.tanar); 2636 UNSERIALIZE_SCALAR(regs.tanlpar); 2637 UNSERIALIZE_SCALAR(regs.taner); 2638 UNSERIALIZE_SCALAR(regs.tesr); 2639 2640 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2641 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2642 2643 UNSERIALIZE_SCALAR(ioEnable); 2644 2645 /* 2646 * unserialize the data fifos 2647 */ 2648 rxFifo.unserialize("rxFifo", cp, section); 2649 txFifo.unserialize("txFifo", cp, section); 2650 2651 /* 2652 * unserialize the various helper variables 2653 */ 2654 bool txPacketExists; 2655 UNSERIALIZE_SCALAR(txPacketExists); 2656 if (txPacketExists) { 2657 txPacket = new EthPacketData(16384); 2658 txPacket->unserialize("txPacket", cp, section); 2659 uint32_t txPktBufPtr; 2660 UNSERIALIZE_SCALAR(txPktBufPtr); 2661 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2662 } else 2663 txPacket = 0; 2664 2665 bool rxPacketExists; 2666 UNSERIALIZE_SCALAR(rxPacketExists); 2667 rxPacket = 0; 2668 if (rxPacketExists) { 2669 rxPacket = new EthPacketData(16384); 2670 rxPacket->unserialize("rxPacket", cp, section); 2671 uint32_t rxPktBufPtr; 2672 UNSERIALIZE_SCALAR(rxPktBufPtr); 2673 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2674 } else 2675 rxPacket = 0; 2676 2677 UNSERIALIZE_SCALAR(txXferLen); 2678 UNSERIALIZE_SCALAR(rxXferLen); 2679 2680 /* 2681 * Unserialize Cached Descriptors 2682 */ 2683 UNSERIALIZE_SCALAR(rxDesc64.link); 2684 UNSERIALIZE_SCALAR(rxDesc64.bufptr); 2685 UNSERIALIZE_SCALAR(rxDesc64.cmdsts); 2686 UNSERIALIZE_SCALAR(rxDesc64.extsts); 2687 UNSERIALIZE_SCALAR(txDesc64.link); 2688 UNSERIALIZE_SCALAR(txDesc64.bufptr); 2689 UNSERIALIZE_SCALAR(txDesc64.cmdsts); 2690 UNSERIALIZE_SCALAR(txDesc64.extsts); 2691 UNSERIALIZE_SCALAR(rxDesc32.link); 2692 UNSERIALIZE_SCALAR(rxDesc32.bufptr); 2693 UNSERIALIZE_SCALAR(rxDesc32.cmdsts); 2694 UNSERIALIZE_SCALAR(rxDesc32.extsts); 2695 UNSERIALIZE_SCALAR(txDesc32.link); 2696 UNSERIALIZE_SCALAR(txDesc32.bufptr); 2697 UNSERIALIZE_SCALAR(txDesc32.cmdsts); 2698 UNSERIALIZE_SCALAR(txDesc32.extsts); 2699 UNSERIALIZE_SCALAR(extstsEnable); 2700 2701 /* 2702 * unserialize tx state machine 2703 */ 2704 int txState; 2705 UNSERIALIZE_SCALAR(txState); 2706 this->txState = (TxState) txState; 2707 UNSERIALIZE_SCALAR(txEnable); 2708 UNSERIALIZE_SCALAR(CTDD); 2709 UNSERIALIZE_SCALAR(txFragPtr); 2710 UNSERIALIZE_SCALAR(txDescCnt); 2711 int txDmaState; 2712 UNSERIALIZE_SCALAR(txDmaState); 2713 this->txDmaState = (DmaState) txDmaState; 2714 UNSERIALIZE_SCALAR(txKickTick); 2715 if (txKickTick) 2716 txKickEvent.schedule(txKickTick); 2717 2718 /* 2719 * unserialize rx state machine 2720 */ 2721 int rxState; 2722 UNSERIALIZE_SCALAR(rxState); 2723 this->rxState = (RxState) rxState; 2724 UNSERIALIZE_SCALAR(rxEnable); 2725 UNSERIALIZE_SCALAR(CRDD); 2726 UNSERIALIZE_SCALAR(rxPktBytes); 2727 UNSERIALIZE_SCALAR(rxFragPtr); 2728 UNSERIALIZE_SCALAR(rxDescCnt); 2729 int rxDmaState; 2730 UNSERIALIZE_SCALAR(rxDmaState); 2731 this->rxDmaState = (DmaState) rxDmaState; 2732 UNSERIALIZE_SCALAR(rxKickTick); 2733 if (rxKickTick) 2734 rxKickEvent.schedule(rxKickTick); 2735 2736 /* 2737 * Unserialize EEPROM state machine 2738 */ 2739 int eepromState; 2740 UNSERIALIZE_SCALAR(eepromState); 2741 this->eepromState = (EEPROMState) eepromState; 2742 UNSERIALIZE_SCALAR(eepromClk); 2743 UNSERIALIZE_SCALAR(eepromBitsToRx); 2744 UNSERIALIZE_SCALAR(eepromOpcode); 2745 UNSERIALIZE_SCALAR(eepromAddress); 2746 UNSERIALIZE_SCALAR(eepromData); 2747 2748 /* 2749 * If there's a pending transmit, reschedule it now 2750 */ 2751 Tick transmitTick; 2752 UNSERIALIZE_SCALAR(transmitTick); 2753 if (transmitTick) 2754 txEvent.schedule(curTick + transmitTick); 2755 2756 /* 2757 * unserialize receive address filter settings 2758 */ 2759 UNSERIALIZE_SCALAR(rxFilterEnable); 2760 UNSERIALIZE_SCALAR(acceptBroadcast); 2761 UNSERIALIZE_SCALAR(acceptMulticast); 2762 UNSERIALIZE_SCALAR(acceptUnicast); 2763 UNSERIALIZE_SCALAR(acceptPerfect); 2764 UNSERIALIZE_SCALAR(acceptArp); 2765 UNSERIALIZE_SCALAR(multicastHashEnable); 2766 2767 /* 2768 * Keep track of pending interrupt status. 2769 */ 2770 UNSERIALIZE_SCALAR(intrTick); 2771 UNSERIALIZE_SCALAR(cpuPendingIntr); 2772 Tick intrEventTick; 2773 UNSERIALIZE_SCALAR(intrEventTick); 2774 if (intrEventTick) { 2775 intrEvent = new IntrEvent(this, true); 2776 intrEvent->schedule(intrEventTick); 2777 } 2778} 2779 2780BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2781 2782 SimObjectParam<EtherInt *> peer; 2783 SimObjectParam<NSGigE *> device; 2784 2785END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2786 2787BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2788 2789 INIT_PARAM_DFLT(peer, "peer interface", NULL), 2790 INIT_PARAM(device, "Ethernet device of this interface") 2791 2792END_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2793 2794CREATE_SIM_OBJECT(NSGigEInt) 2795{ 2796 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device); 2797 2798 EtherInt *p = (EtherInt *)peer; 2799 if (p) { 2800 dev_int->setPeer(p); 2801 p->setPeer(dev_int); 2802 } 2803 2804 return dev_int; 2805} 2806 2807REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt) 2808 2809 2810BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2811 2812 SimObjectParam<System *> system; 2813 SimObjectParam<Platform *> platform; 2814 SimObjectParam<PciConfigData *> configdata; 2815 Param<uint32_t> pci_bus; 2816 Param<uint32_t> pci_dev; 2817 Param<uint32_t> pci_func; 2818 Param<Tick> pio_latency; 2819 Param<Tick> config_latency; 2820 2821 Param<Tick> clock; 2822 Param<bool> dma_desc_free; 2823 Param<bool> dma_data_free; 2824 Param<Tick> dma_read_delay; 2825 Param<Tick> dma_write_delay; 2826 Param<Tick> dma_read_factor; 2827 Param<Tick> dma_write_factor; 2828 Param<bool> dma_no_allocate; 2829 Param<Tick> intr_delay; 2830 2831 Param<Tick> rx_delay; 2832 Param<Tick> tx_delay; 2833 Param<uint32_t> rx_fifo_size; 2834 Param<uint32_t> tx_fifo_size; 2835 2836 Param<bool> rx_filter; 2837 Param<string> hardware_address; 2838 Param<bool> rx_thread; 2839 Param<bool> tx_thread; 2840 Param<bool> rss; 2841 2842END_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2843 2844BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE) 2845 2846 INIT_PARAM(system, "System pointer"), 2847 INIT_PARAM(platform, "Platform pointer"), 2848 INIT_PARAM(configdata, "PCI Config data"), 2849 INIT_PARAM(pci_bus, "PCI bus ID"), 2850 INIT_PARAM(pci_dev, "PCI device number"), 2851 INIT_PARAM(pci_func, "PCI function code"), 2852 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 2853 INIT_PARAM(config_latency, "Number of cycles for a config read or write"), 2854 INIT_PARAM(clock, "State machine cycle time"), 2855 2856 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"), 2857 INIT_PARAM(dma_data_free, "DMA of Data is free"), 2858 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"), 2859 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"), 2860 INIT_PARAM(dma_read_factor, "multiplier for dma reads"), 2861 INIT_PARAM(dma_write_factor, "multiplier for dma writes"), 2862 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"), 2863 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"), 2864 2865 INIT_PARAM(rx_delay, "Receive Delay"), 2866 INIT_PARAM(tx_delay, "Transmit Delay"), 2867 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"), 2868 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"), 2869 2870 INIT_PARAM(rx_filter, "Enable Receive Filter"), 2871 INIT_PARAM(hardware_address, "Ethernet Hardware Address"), 2872 INIT_PARAM(rx_thread, ""), 2873 INIT_PARAM(tx_thread, ""), 2874 INIT_PARAM(rss, "") 2875 2876END_INIT_SIM_OBJECT_PARAMS(NSGigE) 2877 2878 2879CREATE_SIM_OBJECT(NSGigE) 2880{ 2881 NSGigE::Params *params = new NSGigE::Params; 2882 2883 params->name = getInstanceName(); 2884 params->platform = platform; 2885 params->system = system; 2886 params->configData = configdata; 2887 params->busNum = pci_bus; 2888 params->deviceNum = pci_dev; 2889 params->functionNum = pci_func; 2890 params->pio_delay = pio_latency; 2891 params->config_delay = config_latency; 2892 2893 params->clock = clock; 2894 params->dma_desc_free = dma_desc_free; 2895 params->dma_data_free = dma_data_free; 2896 params->dma_read_delay = dma_read_delay; 2897 params->dma_write_delay = dma_write_delay; 2898 params->dma_read_factor = dma_read_factor; 2899 params->dma_write_factor = dma_write_factor; 2900 params->dma_no_allocate = dma_no_allocate; 2901 params->pio_delay = pio_latency; 2902 params->intr_delay = intr_delay; 2903 2904 params->rx_delay = rx_delay; 2905 params->tx_delay = tx_delay; 2906 params->rx_fifo_size = rx_fifo_size; 2907 params->tx_fifo_size = tx_fifo_size; 2908 2909 params->rx_filter = rx_filter; 2910 params->eaddr = hardware_address; 2911 params->rx_thread = rx_thread; 2912 params->tx_thread = tx_thread; 2913 params->rss = rss; 2914 2915 return new NSGigE(params); 2916} 2917 2918REGISTER_SIM_OBJECT("NSGigE", NSGigE) 2919