ns_gige.cc revision 5484
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 * Lisa Hsu 30 */ 31 32/** @file 33 * Device module for modelling the National Semiconductor 34 * DP83820 ethernet controller. Does not support priority queueing 35 */ 36#include <deque> 37#include <string> 38 39#include "base/inet.hh" 40#include "cpu/thread_context.hh" 41#include "dev/etherlink.hh" 42#include "dev/ns_gige.hh" 43#include "dev/pciconfigall.hh" 44#include "mem/packet.hh" 45#include "mem/packet_access.hh" 46#include "params/NSGigE.hh" 47#include "sim/debug.hh" 48#include "sim/host.hh" 49#include "sim/stats.hh" 50#include "sim/system.hh" 51 52const char *NsRxStateStrings[] = 53{ 54 "rxIdle", 55 "rxDescRefr", 56 "rxDescRead", 57 "rxFifoBlock", 58 "rxFragWrite", 59 "rxDescWrite", 60 "rxAdvance" 61}; 62 63const char *NsTxStateStrings[] = 64{ 65 "txIdle", 66 "txDescRefr", 67 "txDescRead", 68 "txFifoBlock", 69 "txFragRead", 70 "txDescWrite", 71 "txAdvance" 72}; 73 74const char *NsDmaState[] = 75{ 76 "dmaIdle", 77 "dmaReading", 78 "dmaWriting", 79 "dmaReadWaiting", 80 "dmaWriteWaiting" 81}; 82 83using namespace std; 84using namespace Net; 85using namespace TheISA; 86 87/////////////////////////////////////////////////////////////////////// 88// 89// NSGigE PCI Device 90// 91NSGigE::NSGigE(Params *p) 92 : EtherDevice(p), ioEnable(false), 93 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 94 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 95 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false), 96 clock(p->clock), 97 txState(txIdle), txEnable(false), CTDD(false), txHalt(false), 98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 99 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false), 100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 101 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0), 102 eepromOpcode(0), eepromAddress(0), eepromData(0), 103 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay), 104 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor), 105 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0), 106 txDmaData(NULL), txDmaAddr(0), txDmaLen(0), 107 rxDmaReadEvent(this), rxDmaWriteEvent(this), 108 txDmaReadEvent(this), txDmaWriteEvent(this), 109 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 110 txDelay(p->tx_delay), rxDelay(p->rx_delay), 111 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this), 112 txEvent(this), rxFilterEnable(p->rx_filter), 113 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false), 114 acceptPerfect(false), acceptArp(false), multicastHashEnable(false), 115 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false), 116 intrEvent(0), interface(0) 117{ 118 119 120 interface = new NSGigEInt(name() + ".int0", this); 121 122 regsReset(); 123 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN); 124 125 memset(&rxDesc32, 0, sizeof(rxDesc32)); 126 memset(&txDesc32, 0, sizeof(txDesc32)); 127 memset(&rxDesc64, 0, sizeof(rxDesc64)); 128 memset(&txDesc64, 0, sizeof(txDesc64)); 129} 130 131NSGigE::~NSGigE() 132{} 133 134void 135NSGigE::regStats() 136{ 137 txBytes 138 .name(name() + ".txBytes") 139 .desc("Bytes Transmitted") 140 .prereq(txBytes) 141 ; 142 143 rxBytes 144 .name(name() + ".rxBytes") 145 .desc("Bytes Received") 146 .prereq(rxBytes) 147 ; 148 149 txPackets 150 .name(name() + ".txPackets") 151 .desc("Number of Packets Transmitted") 152 .prereq(txBytes) 153 ; 154 155 rxPackets 156 .name(name() + ".rxPackets") 157 .desc("Number of Packets Received") 158 .prereq(rxBytes) 159 ; 160 161 txIpChecksums 162 .name(name() + ".txIpChecksums") 163 .desc("Number of tx IP Checksums done by device") 164 .precision(0) 165 .prereq(txBytes) 166 ; 167 168 rxIpChecksums 169 .name(name() + ".rxIpChecksums") 170 .desc("Number of rx IP Checksums done by device") 171 .precision(0) 172 .prereq(rxBytes) 173 ; 174 175 txTcpChecksums 176 .name(name() + ".txTcpChecksums") 177 .desc("Number of tx TCP Checksums done by device") 178 .precision(0) 179 .prereq(txBytes) 180 ; 181 182 rxTcpChecksums 183 .name(name() + ".rxTcpChecksums") 184 .desc("Number of rx TCP Checksums done by device") 185 .precision(0) 186 .prereq(rxBytes) 187 ; 188 189 txUdpChecksums 190 .name(name() + ".txUdpChecksums") 191 .desc("Number of tx UDP Checksums done by device") 192 .precision(0) 193 .prereq(txBytes) 194 ; 195 196 rxUdpChecksums 197 .name(name() + ".rxUdpChecksums") 198 .desc("Number of rx UDP Checksums done by device") 199 .precision(0) 200 .prereq(rxBytes) 201 ; 202 203 descDmaReads 204 .name(name() + ".descDMAReads") 205 .desc("Number of descriptors the device read w/ DMA") 206 .precision(0) 207 ; 208 209 descDmaWrites 210 .name(name() + ".descDMAWrites") 211 .desc("Number of descriptors the device wrote w/ DMA") 212 .precision(0) 213 ; 214 215 descDmaRdBytes 216 .name(name() + ".descDmaReadBytes") 217 .desc("number of descriptor bytes read w/ DMA") 218 .precision(0) 219 ; 220 221 descDmaWrBytes 222 .name(name() + ".descDmaWriteBytes") 223 .desc("number of descriptor bytes write w/ DMA") 224 .precision(0) 225 ; 226 227 txBandwidth 228 .name(name() + ".txBandwidth") 229 .desc("Transmit Bandwidth (bits/s)") 230 .precision(0) 231 .prereq(txBytes) 232 ; 233 234 rxBandwidth 235 .name(name() + ".rxBandwidth") 236 .desc("Receive Bandwidth (bits/s)") 237 .precision(0) 238 .prereq(rxBytes) 239 ; 240 241 totBandwidth 242 .name(name() + ".totBandwidth") 243 .desc("Total Bandwidth (bits/s)") 244 .precision(0) 245 .prereq(totBytes) 246 ; 247 248 totPackets 249 .name(name() + ".totPackets") 250 .desc("Total Packets") 251 .precision(0) 252 .prereq(totBytes) 253 ; 254 255 totBytes 256 .name(name() + ".totBytes") 257 .desc("Total Bytes") 258 .precision(0) 259 .prereq(totBytes) 260 ; 261 262 totPacketRate 263 .name(name() + ".totPPS") 264 .desc("Total Tranmission Rate (packets/s)") 265 .precision(0) 266 .prereq(totBytes) 267 ; 268 269 txPacketRate 270 .name(name() + ".txPPS") 271 .desc("Packet Tranmission Rate (packets/s)") 272 .precision(0) 273 .prereq(txBytes) 274 ; 275 276 rxPacketRate 277 .name(name() + ".rxPPS") 278 .desc("Packet Reception Rate (packets/s)") 279 .precision(0) 280 .prereq(rxBytes) 281 ; 282 283 postedSwi 284 .name(name() + ".postedSwi") 285 .desc("number of software interrupts posted to CPU") 286 .precision(0) 287 ; 288 289 totalSwi 290 .name(name() + ".totalSwi") 291 .desc("total number of Swi written to ISR") 292 .precision(0) 293 ; 294 295 coalescedSwi 296 .name(name() + ".coalescedSwi") 297 .desc("average number of Swi's coalesced into each post") 298 .precision(0) 299 ; 300 301 postedRxIdle 302 .name(name() + ".postedRxIdle") 303 .desc("number of rxIdle interrupts posted to CPU") 304 .precision(0) 305 ; 306 307 totalRxIdle 308 .name(name() + ".totalRxIdle") 309 .desc("total number of RxIdle written to ISR") 310 .precision(0) 311 ; 312 313 coalescedRxIdle 314 .name(name() + ".coalescedRxIdle") 315 .desc("average number of RxIdle's coalesced into each post") 316 .precision(0) 317 ; 318 319 postedRxOk 320 .name(name() + ".postedRxOk") 321 .desc("number of RxOk interrupts posted to CPU") 322 .precision(0) 323 ; 324 325 totalRxOk 326 .name(name() + ".totalRxOk") 327 .desc("total number of RxOk written to ISR") 328 .precision(0) 329 ; 330 331 coalescedRxOk 332 .name(name() + ".coalescedRxOk") 333 .desc("average number of RxOk's coalesced into each post") 334 .precision(0) 335 ; 336 337 postedRxDesc 338 .name(name() + ".postedRxDesc") 339 .desc("number of RxDesc interrupts posted to CPU") 340 .precision(0) 341 ; 342 343 totalRxDesc 344 .name(name() + ".totalRxDesc") 345 .desc("total number of RxDesc written to ISR") 346 .precision(0) 347 ; 348 349 coalescedRxDesc 350 .name(name() + ".coalescedRxDesc") 351 .desc("average number of RxDesc's coalesced into each post") 352 .precision(0) 353 ; 354 355 postedTxOk 356 .name(name() + ".postedTxOk") 357 .desc("number of TxOk interrupts posted to CPU") 358 .precision(0) 359 ; 360 361 totalTxOk 362 .name(name() + ".totalTxOk") 363 .desc("total number of TxOk written to ISR") 364 .precision(0) 365 ; 366 367 coalescedTxOk 368 .name(name() + ".coalescedTxOk") 369 .desc("average number of TxOk's coalesced into each post") 370 .precision(0) 371 ; 372 373 postedTxIdle 374 .name(name() + ".postedTxIdle") 375 .desc("number of TxIdle interrupts posted to CPU") 376 .precision(0) 377 ; 378 379 totalTxIdle 380 .name(name() + ".totalTxIdle") 381 .desc("total number of TxIdle written to ISR") 382 .precision(0) 383 ; 384 385 coalescedTxIdle 386 .name(name() + ".coalescedTxIdle") 387 .desc("average number of TxIdle's coalesced into each post") 388 .precision(0) 389 ; 390 391 postedTxDesc 392 .name(name() + ".postedTxDesc") 393 .desc("number of TxDesc interrupts posted to CPU") 394 .precision(0) 395 ; 396 397 totalTxDesc 398 .name(name() + ".totalTxDesc") 399 .desc("total number of TxDesc written to ISR") 400 .precision(0) 401 ; 402 403 coalescedTxDesc 404 .name(name() + ".coalescedTxDesc") 405 .desc("average number of TxDesc's coalesced into each post") 406 .precision(0) 407 ; 408 409 postedRxOrn 410 .name(name() + ".postedRxOrn") 411 .desc("number of RxOrn posted to CPU") 412 .precision(0) 413 ; 414 415 totalRxOrn 416 .name(name() + ".totalRxOrn") 417 .desc("total number of RxOrn written to ISR") 418 .precision(0) 419 ; 420 421 coalescedRxOrn 422 .name(name() + ".coalescedRxOrn") 423 .desc("average number of RxOrn's coalesced into each post") 424 .precision(0) 425 ; 426 427 coalescedTotal 428 .name(name() + ".coalescedTotal") 429 .desc("average number of interrupts coalesced into each post") 430 .precision(0) 431 ; 432 433 postedInterrupts 434 .name(name() + ".postedInterrupts") 435 .desc("number of posts to CPU") 436 .precision(0) 437 ; 438 439 droppedPackets 440 .name(name() + ".droppedPackets") 441 .desc("number of packets dropped") 442 .precision(0) 443 ; 444 445 coalescedSwi = totalSwi / postedInterrupts; 446 coalescedRxIdle = totalRxIdle / postedInterrupts; 447 coalescedRxOk = totalRxOk / postedInterrupts; 448 coalescedRxDesc = totalRxDesc / postedInterrupts; 449 coalescedTxOk = totalTxOk / postedInterrupts; 450 coalescedTxIdle = totalTxIdle / postedInterrupts; 451 coalescedTxDesc = totalTxDesc / postedInterrupts; 452 coalescedRxOrn = totalRxOrn / postedInterrupts; 453 454 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + 455 totalTxOk + totalTxIdle + totalTxDesc + 456 totalRxOrn) / postedInterrupts; 457 458 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 459 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 460 totBandwidth = txBandwidth + rxBandwidth; 461 totBytes = txBytes + rxBytes; 462 totPackets = txPackets + rxPackets; 463 464 txPacketRate = txPackets / simSeconds; 465 rxPacketRate = rxPackets / simSeconds; 466} 467 468 469/** 470 * This is to write to the PCI general configuration registers 471 */ 472Tick 473NSGigE::writeConfig(PacketPtr pkt) 474{ 475 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 476 if (offset < PCI_DEVICE_SPECIFIC) 477 PciDev::writeConfig(pkt); 478 else 479 panic("Device specific PCI config space not implemented!\n"); 480 481 switch (offset) { 482 // seems to work fine without all these PCI settings, but i 483 // put in the IO to double check, an assertion will fail if we 484 // need to properly implement it 485 case PCI_COMMAND: 486 if (config.data[offset] & PCI_CMD_IOSE) 487 ioEnable = true; 488 else 489 ioEnable = false; 490 break; 491 } 492 493 return configDelay; 494} 495 496EtherInt* 497NSGigE::getEthPort(const std::string &if_name, int idx) 498{ 499 if (if_name == "interface") { 500 if (interface->getPeer()) 501 panic("interface already connected to\n"); 502 return interface; 503 } 504 return NULL; 505} 506 507/** 508 * This reads the device registers, which are detailed in the NS83820 509 * spec sheet 510 */ 511Tick 512NSGigE::read(PacketPtr pkt) 513{ 514 assert(ioEnable); 515 516 pkt->allocate(); 517 518 //The mask is to give you only the offset into the device register file 519 Addr daddr = pkt->getAddr() & 0xfff; 520 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n", 521 daddr, pkt->getAddr(), pkt->getSize()); 522 523 524 // there are some reserved registers, you can see ns_gige_reg.h and 525 // the spec sheet for details 526 if (daddr > LAST && daddr <= RESERVED) { 527 panic("Accessing reserved register"); 528 } else if (daddr > RESERVED && daddr <= 0x3FC) { 529 return readConfig(pkt); 530 } else if (daddr >= MIB_START && daddr <= MIB_END) { 531 // don't implement all the MIB's. hopefully the kernel 532 // doesn't actually DEPEND upon their values 533 // MIB are just hardware stats keepers 534 pkt->set<uint32_t>(0); 535 pkt->makeAtomicResponse(); 536 return pioDelay; 537 } else if (daddr > 0x3FC) 538 panic("Something is messed up!\n"); 539 540 assert(pkt->getSize() == sizeof(uint32_t)); 541 uint32_t ® = *pkt->getPtr<uint32_t>(); 542 uint16_t rfaddr; 543 544 switch (daddr) { 545 case CR: 546 reg = regs.command; 547 //these are supposed to be cleared on a read 548 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 549 break; 550 551 case CFGR: 552 reg = regs.config; 553 break; 554 555 case MEAR: 556 reg = regs.mear; 557 break; 558 559 case PTSCR: 560 reg = regs.ptscr; 561 break; 562 563 case ISR: 564 reg = regs.isr; 565 devIntrClear(ISR_ALL); 566 break; 567 568 case IMR: 569 reg = regs.imr; 570 break; 571 572 case IER: 573 reg = regs.ier; 574 break; 575 576 case IHR: 577 reg = regs.ihr; 578 break; 579 580 case TXDP: 581 reg = regs.txdp; 582 break; 583 584 case TXDP_HI: 585 reg = regs.txdp_hi; 586 break; 587 588 case TX_CFG: 589 reg = regs.txcfg; 590 break; 591 592 case GPIOR: 593 reg = regs.gpior; 594 break; 595 596 case RXDP: 597 reg = regs.rxdp; 598 break; 599 600 case RXDP_HI: 601 reg = regs.rxdp_hi; 602 break; 603 604 case RX_CFG: 605 reg = regs.rxcfg; 606 break; 607 608 case PQCR: 609 reg = regs.pqcr; 610 break; 611 612 case WCSR: 613 reg = regs.wcsr; 614 break; 615 616 case PCR: 617 reg = regs.pcr; 618 break; 619 620 // see the spec sheet for how RFCR and RFDR work 621 // basically, you write to RFCR to tell the machine 622 // what you want to do next, then you act upon RFDR, 623 // and the device will be prepared b/c of what you 624 // wrote to RFCR 625 case RFCR: 626 reg = regs.rfcr; 627 break; 628 629 case RFDR: 630 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 631 switch (rfaddr) { 632 // Read from perfect match ROM octets 633 case 0x000: 634 reg = rom.perfectMatch[1]; 635 reg = reg << 8; 636 reg += rom.perfectMatch[0]; 637 break; 638 case 0x002: 639 reg = rom.perfectMatch[3] << 8; 640 reg += rom.perfectMatch[2]; 641 break; 642 case 0x004: 643 reg = rom.perfectMatch[5] << 8; 644 reg += rom.perfectMatch[4]; 645 break; 646 default: 647 // Read filter hash table 648 if (rfaddr >= FHASH_ADDR && 649 rfaddr < FHASH_ADDR + FHASH_SIZE) { 650 651 // Only word-aligned reads supported 652 if (rfaddr % 2) 653 panic("unaligned read from filter hash table!"); 654 655 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8; 656 reg += rom.filterHash[rfaddr - FHASH_ADDR]; 657 break; 658 } 659 660 panic("reading RFDR for something other than pattern" 661 " matching or hashing! %#x\n", rfaddr); 662 } 663 break; 664 665 case SRR: 666 reg = regs.srr; 667 break; 668 669 case MIBC: 670 reg = regs.mibc; 671 reg &= ~(MIBC_MIBS | MIBC_ACLR); 672 break; 673 674 case VRCR: 675 reg = regs.vrcr; 676 break; 677 678 case VTCR: 679 reg = regs.vtcr; 680 break; 681 682 case VDR: 683 reg = regs.vdr; 684 break; 685 686 case CCSR: 687 reg = regs.ccsr; 688 break; 689 690 case TBICR: 691 reg = regs.tbicr; 692 break; 693 694 case TBISR: 695 reg = regs.tbisr; 696 break; 697 698 case TANAR: 699 reg = regs.tanar; 700 break; 701 702 case TANLPAR: 703 reg = regs.tanlpar; 704 break; 705 706 case TANER: 707 reg = regs.taner; 708 break; 709 710 case TESR: 711 reg = regs.tesr; 712 break; 713 714 case M5REG: 715 reg = 0; 716 if (params()->rx_thread) 717 reg |= M5REG_RX_THREAD; 718 if (params()->tx_thread) 719 reg |= M5REG_TX_THREAD; 720 if (params()->rss) 721 reg |= M5REG_RSS; 722 break; 723 724 default: 725 panic("reading unimplemented register: addr=%#x", daddr); 726 } 727 728 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 729 daddr, reg, reg); 730 731 pkt->makeAtomicResponse(); 732 return pioDelay; 733} 734 735Tick 736NSGigE::write(PacketPtr pkt) 737{ 738 assert(ioEnable); 739 740 Addr daddr = pkt->getAddr() & 0xfff; 741 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n", 742 daddr, pkt->getAddr(), pkt->getSize()); 743 744 if (daddr > LAST && daddr <= RESERVED) { 745 panic("Accessing reserved register"); 746 } else if (daddr > RESERVED && daddr <= 0x3FC) { 747 return writeConfig(pkt); 748 } else if (daddr > 0x3FC) 749 panic("Something is messed up!\n"); 750 751 if (pkt->getSize() == sizeof(uint32_t)) { 752 uint32_t reg = pkt->get<uint32_t>(); 753 uint16_t rfaddr; 754 755 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 756 757 switch (daddr) { 758 case CR: 759 regs.command = reg; 760 if (reg & CR_TXD) { 761 txEnable = false; 762 } else if (reg & CR_TXE) { 763 txEnable = true; 764 765 // the kernel is enabling the transmit machine 766 if (txState == txIdle) 767 txKick(); 768 } 769 770 if (reg & CR_RXD) { 771 rxEnable = false; 772 } else if (reg & CR_RXE) { 773 rxEnable = true; 774 775 if (rxState == rxIdle) 776 rxKick(); 777 } 778 779 if (reg & CR_TXR) 780 txReset(); 781 782 if (reg & CR_RXR) 783 rxReset(); 784 785 if (reg & CR_SWI) 786 devIntrPost(ISR_SWI); 787 788 if (reg & CR_RST) { 789 txReset(); 790 rxReset(); 791 792 regsReset(); 793 } 794 break; 795 796 case CFGR: 797 if (reg & CFGR_LNKSTS || 798 reg & CFGR_SPDSTS || 799 reg & CFGR_DUPSTS || 800 reg & CFGR_RESERVED || 801 reg & CFGR_T64ADDR || 802 reg & CFGR_PCI64_DET) 803 804 // First clear all writable bits 805 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 806 CFGR_RESERVED | CFGR_T64ADDR | 807 CFGR_PCI64_DET; 808 // Now set the appropriate writable bits 809 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 810 CFGR_RESERVED | CFGR_T64ADDR | 811 CFGR_PCI64_DET); 812 813// all these #if 0's are because i don't THINK the kernel needs to 814// have these implemented. if there is a problem relating to one of 815// these, you may need to add functionality in. 816 if (reg & CFGR_TBI_EN) ; 817 if (reg & CFGR_MODE_1000) ; 818 819 if (reg & CFGR_AUTO_1000) 820 panic("CFGR_AUTO_1000 not implemented!\n"); 821 822 if (reg & CFGR_PINT_DUPSTS || 823 reg & CFGR_PINT_LNKSTS || 824 reg & CFGR_PINT_SPDSTS) 825 ; 826 827 if (reg & CFGR_TMRTEST) ; 828 if (reg & CFGR_MRM_DIS) ; 829 if (reg & CFGR_MWI_DIS) ; 830 831 if (reg & CFGR_T64ADDR) ; 832 // panic("CFGR_T64ADDR is read only register!\n"); 833 834 if (reg & CFGR_PCI64_DET) 835 panic("CFGR_PCI64_DET is read only register!\n"); 836 837 if (reg & CFGR_DATA64_EN) ; 838 if (reg & CFGR_M64ADDR) ; 839 if (reg & CFGR_PHY_RST) ; 840 if (reg & CFGR_PHY_DIS) ; 841 842 if (reg & CFGR_EXTSTS_EN) 843 extstsEnable = true; 844 else 845 extstsEnable = false; 846 847 if (reg & CFGR_REQALG) ; 848 if (reg & CFGR_SB) ; 849 if (reg & CFGR_POW) ; 850 if (reg & CFGR_EXD) ; 851 if (reg & CFGR_PESEL) ; 852 if (reg & CFGR_BROM_DIS) ; 853 if (reg & CFGR_EXT_125) ; 854 if (reg & CFGR_BEM) ; 855 break; 856 857 case MEAR: 858 // Clear writable bits 859 regs.mear &= MEAR_EEDO; 860 // Set appropriate writable bits 861 regs.mear |= reg & ~MEAR_EEDO; 862 863 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address) 864 // even though it could get it through RFDR 865 if (reg & MEAR_EESEL) { 866 // Rising edge of clock 867 if (reg & MEAR_EECLK && !eepromClk) 868 eepromKick(); 869 } 870 else { 871 eepromState = eepromStart; 872 regs.mear &= ~MEAR_EEDI; 873 } 874 875 eepromClk = reg & MEAR_EECLK; 876 877 // since phy is completely faked, MEAR_MD* don't matter 878 if (reg & MEAR_MDIO) ; 879 if (reg & MEAR_MDDIR) ; 880 if (reg & MEAR_MDC) ; 881 break; 882 883 case PTSCR: 884 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 885 // these control BISTs for various parts of chip - we 886 // don't care or do just fake that the BIST is done 887 if (reg & PTSCR_RBIST_EN) 888 regs.ptscr |= PTSCR_RBIST_DONE; 889 if (reg & PTSCR_EEBIST_EN) 890 regs.ptscr &= ~PTSCR_EEBIST_EN; 891 if (reg & PTSCR_EELOAD_EN) 892 regs.ptscr &= ~PTSCR_EELOAD_EN; 893 break; 894 895 case ISR: /* writing to the ISR has no effect */ 896 panic("ISR is a read only register!\n"); 897 898 case IMR: 899 regs.imr = reg; 900 devIntrChangeMask(); 901 break; 902 903 case IER: 904 regs.ier = reg; 905 break; 906 907 case IHR: 908 regs.ihr = reg; 909 /* not going to implement real interrupt holdoff */ 910 break; 911 912 case TXDP: 913 regs.txdp = (reg & 0xFFFFFFFC); 914 assert(txState == txIdle); 915 CTDD = false; 916 break; 917 918 case TXDP_HI: 919 regs.txdp_hi = reg; 920 break; 921 922 case TX_CFG: 923 regs.txcfg = reg; 924#if 0 925 if (reg & TX_CFG_CSI) ; 926 if (reg & TX_CFG_HBI) ; 927 if (reg & TX_CFG_MLB) ; 928 if (reg & TX_CFG_ATP) ; 929 if (reg & TX_CFG_ECRETRY) { 930 /* 931 * this could easily be implemented, but considering 932 * the network is just a fake pipe, wouldn't make 933 * sense to do this 934 */ 935 } 936 937 if (reg & TX_CFG_BRST_DIS) ; 938#endif 939 940#if 0 941 /* we handle our own DMA, ignore the kernel's exhortations */ 942 if (reg & TX_CFG_MXDMA) ; 943#endif 944 945 // also, we currently don't care about fill/drain 946 // thresholds though this may change in the future with 947 // more realistic networks or a driver which changes it 948 // according to feedback 949 950 break; 951 952 case GPIOR: 953 // Only write writable bits 954 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 955 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN; 956 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 957 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN); 958 /* these just control general purpose i/o pins, don't matter */ 959 break; 960 961 case RXDP: 962 regs.rxdp = reg; 963 CRDD = false; 964 break; 965 966 case RXDP_HI: 967 regs.rxdp_hi = reg; 968 break; 969 970 case RX_CFG: 971 regs.rxcfg = reg; 972#if 0 973 if (reg & RX_CFG_AEP) ; 974 if (reg & RX_CFG_ARP) ; 975 if (reg & RX_CFG_STRIPCRC) ; 976 if (reg & RX_CFG_RX_RD) ; 977 if (reg & RX_CFG_ALP) ; 978 if (reg & RX_CFG_AIRL) ; 979 980 /* we handle our own DMA, ignore what kernel says about it */ 981 if (reg & RX_CFG_MXDMA) ; 982 983 //also, we currently don't care about fill/drain thresholds 984 //though this may change in the future with more realistic 985 //networks or a driver which changes it according to feedback 986 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ; 987#endif 988 break; 989 990 case PQCR: 991 /* there is no priority queueing used in the linux 2.6 driver */ 992 regs.pqcr = reg; 993 break; 994 995 case WCSR: 996 /* not going to implement wake on LAN */ 997 regs.wcsr = reg; 998 break; 999 1000 case PCR: 1001 /* not going to implement pause control */ 1002 regs.pcr = reg; 1003 break; 1004 1005 case RFCR: 1006 regs.rfcr = reg; 1007 1008 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 1009 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 1010 acceptMulticast = (reg & RFCR_AAM) ? true : false; 1011 acceptUnicast = (reg & RFCR_AAU) ? true : false; 1012 acceptPerfect = (reg & RFCR_APM) ? true : false; 1013 acceptArp = (reg & RFCR_AARP) ? true : false; 1014 multicastHashEnable = (reg & RFCR_MHEN) ? true : false; 1015 1016#if 0 1017 if (reg & RFCR_APAT) 1018 panic("RFCR_APAT not implemented!\n"); 1019#endif 1020 if (reg & RFCR_UHEN) 1021 panic("Unicast hash filtering not used by drivers!\n"); 1022 1023 if (reg & RFCR_ULM) 1024 panic("RFCR_ULM not implemented!\n"); 1025 1026 break; 1027 1028 case RFDR: 1029 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 1030 switch (rfaddr) { 1031 case 0x000: 1032 rom.perfectMatch[0] = (uint8_t)reg; 1033 rom.perfectMatch[1] = (uint8_t)(reg >> 8); 1034 break; 1035 case 0x002: 1036 rom.perfectMatch[2] = (uint8_t)reg; 1037 rom.perfectMatch[3] = (uint8_t)(reg >> 8); 1038 break; 1039 case 0x004: 1040 rom.perfectMatch[4] = (uint8_t)reg; 1041 rom.perfectMatch[5] = (uint8_t)(reg >> 8); 1042 break; 1043 default: 1044 1045 if (rfaddr >= FHASH_ADDR && 1046 rfaddr < FHASH_ADDR + FHASH_SIZE) { 1047 1048 // Only word-aligned writes supported 1049 if (rfaddr % 2) 1050 panic("unaligned write to filter hash table!"); 1051 1052 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg; 1053 rom.filterHash[rfaddr - FHASH_ADDR + 1] 1054 = (uint8_t)(reg >> 8); 1055 break; 1056 } 1057 panic("writing RFDR for something other than pattern matching\ 1058 or hashing! %#x\n", rfaddr); 1059 } 1060 1061 case BRAR: 1062 regs.brar = reg; 1063 break; 1064 1065 case BRDR: 1066 panic("the driver never uses BRDR, something is wrong!\n"); 1067 1068 case SRR: 1069 panic("SRR is read only register!\n"); 1070 1071 case MIBC: 1072 panic("the driver never uses MIBC, something is wrong!\n"); 1073 1074 case VRCR: 1075 regs.vrcr = reg; 1076 break; 1077 1078 case VTCR: 1079 regs.vtcr = reg; 1080 break; 1081 1082 case VDR: 1083 panic("the driver never uses VDR, something is wrong!\n"); 1084 1085 case CCSR: 1086 /* not going to implement clockrun stuff */ 1087 regs.ccsr = reg; 1088 break; 1089 1090 case TBICR: 1091 regs.tbicr = reg; 1092 if (reg & TBICR_MR_LOOPBACK) 1093 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 1094 1095 if (reg & TBICR_MR_AN_ENABLE) { 1096 regs.tanlpar = regs.tanar; 1097 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 1098 } 1099 1100#if 0 1101 if (reg & TBICR_MR_RESTART_AN) ; 1102#endif 1103 1104 break; 1105 1106 case TBISR: 1107 panic("TBISR is read only register!\n"); 1108 1109 case TANAR: 1110 // Only write the writable bits 1111 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED; 1112 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED); 1113 1114 // Pause capability unimplemented 1115#if 0 1116 if (reg & TANAR_PS2) ; 1117 if (reg & TANAR_PS1) ; 1118#endif 1119 1120 break; 1121 1122 case TANLPAR: 1123 panic("this should only be written to by the fake phy!\n"); 1124 1125 case TANER: 1126 panic("TANER is read only register!\n"); 1127 1128 case TESR: 1129 regs.tesr = reg; 1130 break; 1131 1132 default: 1133 panic("invalid register access daddr=%#x", daddr); 1134 } 1135 } else { 1136 panic("Invalid Request Size"); 1137 } 1138 pkt->makeAtomicResponse(); 1139 return pioDelay; 1140} 1141 1142void 1143NSGigE::devIntrPost(uint32_t interrupts) 1144{ 1145 if (interrupts & ISR_RESERVE) 1146 panic("Cannot set a reserved interrupt"); 1147 1148 if (interrupts & ISR_NOIMPL) 1149 warn("interrupt not implemented %#x\n", interrupts); 1150 1151 interrupts &= ISR_IMPL; 1152 regs.isr |= interrupts; 1153 1154 if (interrupts & regs.imr) { 1155 if (interrupts & ISR_SWI) { 1156 totalSwi++; 1157 } 1158 if (interrupts & ISR_RXIDLE) { 1159 totalRxIdle++; 1160 } 1161 if (interrupts & ISR_RXOK) { 1162 totalRxOk++; 1163 } 1164 if (interrupts & ISR_RXDESC) { 1165 totalRxDesc++; 1166 } 1167 if (interrupts & ISR_TXOK) { 1168 totalTxOk++; 1169 } 1170 if (interrupts & ISR_TXIDLE) { 1171 totalTxIdle++; 1172 } 1173 if (interrupts & ISR_TXDESC) { 1174 totalTxDesc++; 1175 } 1176 if (interrupts & ISR_RXORN) { 1177 totalRxOrn++; 1178 } 1179 } 1180 1181 DPRINTF(EthernetIntr, 1182 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 1183 interrupts, regs.isr, regs.imr); 1184 1185 if ((regs.isr & regs.imr)) { 1186 Tick when = curTick; 1187 if ((regs.isr & regs.imr & ISR_NODELAY) == 0) 1188 when += intrDelay; 1189 cpuIntrPost(when); 1190 } 1191} 1192 1193/* writing this interrupt counting stats inside this means that this function 1194 is now limited to being used to clear all interrupts upon the kernel 1195 reading isr and servicing. just telling you in case you were thinking 1196 of expanding use. 1197*/ 1198void 1199NSGigE::devIntrClear(uint32_t interrupts) 1200{ 1201 if (interrupts & ISR_RESERVE) 1202 panic("Cannot clear a reserved interrupt"); 1203 1204 if (regs.isr & regs.imr & ISR_SWI) { 1205 postedSwi++; 1206 } 1207 if (regs.isr & regs.imr & ISR_RXIDLE) { 1208 postedRxIdle++; 1209 } 1210 if (regs.isr & regs.imr & ISR_RXOK) { 1211 postedRxOk++; 1212 } 1213 if (regs.isr & regs.imr & ISR_RXDESC) { 1214 postedRxDesc++; 1215 } 1216 if (regs.isr & regs.imr & ISR_TXOK) { 1217 postedTxOk++; 1218 } 1219 if (regs.isr & regs.imr & ISR_TXIDLE) { 1220 postedTxIdle++; 1221 } 1222 if (regs.isr & regs.imr & ISR_TXDESC) { 1223 postedTxDesc++; 1224 } 1225 if (regs.isr & regs.imr & ISR_RXORN) { 1226 postedRxOrn++; 1227 } 1228 1229 if (regs.isr & regs.imr & ISR_IMPL) 1230 postedInterrupts++; 1231 1232 interrupts &= ~ISR_NOIMPL; 1233 regs.isr &= ~interrupts; 1234 1235 DPRINTF(EthernetIntr, 1236 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 1237 interrupts, regs.isr, regs.imr); 1238 1239 if (!(regs.isr & regs.imr)) 1240 cpuIntrClear(); 1241} 1242 1243void 1244NSGigE::devIntrChangeMask() 1245{ 1246 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 1247 regs.isr, regs.imr, regs.isr & regs.imr); 1248 1249 if (regs.isr & regs.imr) 1250 cpuIntrPost(curTick); 1251 else 1252 cpuIntrClear(); 1253} 1254 1255void 1256NSGigE::cpuIntrPost(Tick when) 1257{ 1258 // If the interrupt you want to post is later than an interrupt 1259 // already scheduled, just let it post in the coming one and don't 1260 // schedule another. 1261 // HOWEVER, must be sure that the scheduled intrTick is in the 1262 // future (this was formerly the source of a bug) 1263 /** 1264 * @todo this warning should be removed and the intrTick code should 1265 * be fixed. 1266 */ 1267 assert(when >= curTick); 1268 assert(intrTick >= curTick || intrTick == 0); 1269 if (when > intrTick && intrTick != 0) { 1270 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 1271 intrTick); 1272 return; 1273 } 1274 1275 intrTick = when; 1276 if (intrTick < curTick) { 1277 debug_break(); 1278 intrTick = curTick; 1279 } 1280 1281 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 1282 intrTick); 1283 1284 if (intrEvent) 1285 intrEvent->squash(); 1286 intrEvent = new IntrEvent(this, intrTick, true); 1287} 1288 1289void 1290NSGigE::cpuInterrupt() 1291{ 1292 assert(intrTick == curTick); 1293 1294 // Whether or not there's a pending interrupt, we don't care about 1295 // it anymore 1296 intrEvent = 0; 1297 intrTick = 0; 1298 1299 // Don't send an interrupt if there's already one 1300 if (cpuPendingIntr) { 1301 DPRINTF(EthernetIntr, 1302 "would send an interrupt now, but there's already pending\n"); 1303 } else { 1304 // Send interrupt 1305 cpuPendingIntr = true; 1306 1307 DPRINTF(EthernetIntr, "posting interrupt\n"); 1308 intrPost(); 1309 } 1310} 1311 1312void 1313NSGigE::cpuIntrClear() 1314{ 1315 if (!cpuPendingIntr) 1316 return; 1317 1318 if (intrEvent) { 1319 intrEvent->squash(); 1320 intrEvent = 0; 1321 } 1322 1323 intrTick = 0; 1324 1325 cpuPendingIntr = false; 1326 1327 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1328 intrClear(); 1329} 1330 1331bool 1332NSGigE::cpuIntrPending() const 1333{ return cpuPendingIntr; } 1334 1335void 1336NSGigE::txReset() 1337{ 1338 1339 DPRINTF(Ethernet, "transmit reset\n"); 1340 1341 CTDD = false; 1342 txEnable = false;; 1343 txFragPtr = 0; 1344 assert(txDescCnt == 0); 1345 txFifo.clear(); 1346 txState = txIdle; 1347 assert(txDmaState == dmaIdle); 1348} 1349 1350void 1351NSGigE::rxReset() 1352{ 1353 DPRINTF(Ethernet, "receive reset\n"); 1354 1355 CRDD = false; 1356 assert(rxPktBytes == 0); 1357 rxEnable = false; 1358 rxFragPtr = 0; 1359 assert(rxDescCnt == 0); 1360 assert(rxDmaState == dmaIdle); 1361 rxFifo.clear(); 1362 rxState = rxIdle; 1363} 1364 1365void 1366NSGigE::regsReset() 1367{ 1368 memset(®s, 0, sizeof(regs)); 1369 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000); 1370 regs.mear = 0x12; 1371 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1372 // fill threshold to 32 bytes 1373 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1374 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1375 regs.mibc = MIBC_FRZ; 1376 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1377 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1378 regs.brar = 0xffffffff; 1379 1380 extstsEnable = false; 1381 acceptBroadcast = false; 1382 acceptMulticast = false; 1383 acceptUnicast = false; 1384 acceptPerfect = false; 1385 acceptArp = false; 1386} 1387 1388bool 1389NSGigE::doRxDmaRead() 1390{ 1391 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1392 rxDmaState = dmaReading; 1393 1394 if (dmaPending() || getState() != Running) 1395 rxDmaState = dmaReadWaiting; 1396 else 1397 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData); 1398 1399 return true; 1400} 1401 1402void 1403NSGigE::rxDmaReadDone() 1404{ 1405 assert(rxDmaState == dmaReading); 1406 rxDmaState = dmaIdle; 1407 1408 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1409 rxDmaAddr, rxDmaLen); 1410 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1411 1412 // If the transmit state machine has a pending DMA, let it go first 1413 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1414 txKick(); 1415 1416 rxKick(); 1417} 1418 1419bool 1420NSGigE::doRxDmaWrite() 1421{ 1422 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1423 rxDmaState = dmaWriting; 1424 1425 if (dmaPending() || getState() != Running) 1426 rxDmaState = dmaWriteWaiting; 1427 else 1428 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData); 1429 return true; 1430} 1431 1432void 1433NSGigE::rxDmaWriteDone() 1434{ 1435 assert(rxDmaState == dmaWriting); 1436 rxDmaState = dmaIdle; 1437 1438 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1439 rxDmaAddr, rxDmaLen); 1440 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1441 1442 // If the transmit state machine has a pending DMA, let it go first 1443 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1444 txKick(); 1445 1446 rxKick(); 1447} 1448 1449void 1450NSGigE::rxKick() 1451{ 1452 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1453 1454 DPRINTF(EthernetSM, 1455 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n", 1456 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32); 1457 1458 Addr link, bufptr; 1459 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts; 1460 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts; 1461 1462 next: 1463 if (clock) { 1464 if (rxKickTick > curTick) { 1465 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1466 rxKickTick); 1467 1468 goto exit; 1469 } 1470 1471 // Go to the next state machine clock tick. 1472 rxKickTick = curTick + ticks(1); 1473 } 1474 1475 switch(rxDmaState) { 1476 case dmaReadWaiting: 1477 if (doRxDmaRead()) 1478 goto exit; 1479 break; 1480 case dmaWriteWaiting: 1481 if (doRxDmaWrite()) 1482 goto exit; 1483 break; 1484 default: 1485 break; 1486 } 1487 1488 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link; 1489 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr; 1490 1491 // see state machine from spec for details 1492 // the way this works is, if you finish work on one state and can 1493 // go directly to another, you do that through jumping to the 1494 // label "next". however, if you have intermediate work, like DMA 1495 // so that you can't go to the next state yet, you go to exit and 1496 // exit the loop. however, when the DMA is done it will trigger 1497 // an event and come back to this loop. 1498 switch (rxState) { 1499 case rxIdle: 1500 if (!rxEnable) { 1501 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1502 goto exit; 1503 } 1504 1505 if (CRDD) { 1506 rxState = rxDescRefr; 1507 1508 rxDmaAddr = regs.rxdp & 0x3fffffff; 1509 rxDmaData = 1510 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link; 1511 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link); 1512 rxDmaFree = dmaDescFree; 1513 1514 descDmaReads++; 1515 descDmaRdBytes += rxDmaLen; 1516 1517 if (doRxDmaRead()) 1518 goto exit; 1519 } else { 1520 rxState = rxDescRead; 1521 1522 rxDmaAddr = regs.rxdp & 0x3fffffff; 1523 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1524 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1525 rxDmaFree = dmaDescFree; 1526 1527 descDmaReads++; 1528 descDmaRdBytes += rxDmaLen; 1529 1530 if (doRxDmaRead()) 1531 goto exit; 1532 } 1533 break; 1534 1535 case rxDescRefr: 1536 if (rxDmaState != dmaIdle) 1537 goto exit; 1538 1539 rxState = rxAdvance; 1540 break; 1541 1542 case rxDescRead: 1543 if (rxDmaState != dmaIdle) 1544 goto exit; 1545 1546 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n", 1547 regs.rxdp & 0x3fffffff); 1548 DPRINTF(EthernetDesc, 1549 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1550 link, bufptr, cmdsts, extsts); 1551 1552 if (cmdsts & CMDSTS_OWN) { 1553 devIntrPost(ISR_RXIDLE); 1554 rxState = rxIdle; 1555 goto exit; 1556 } else { 1557 rxState = rxFifoBlock; 1558 rxFragPtr = bufptr; 1559 rxDescCnt = cmdsts & CMDSTS_LEN_MASK; 1560 } 1561 break; 1562 1563 case rxFifoBlock: 1564 if (!rxPacket) { 1565 /** 1566 * @todo in reality, we should be able to start processing 1567 * the packet as it arrives, and not have to wait for the 1568 * full packet ot be in the receive fifo. 1569 */ 1570 if (rxFifo.empty()) 1571 goto exit; 1572 1573 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1574 1575 // If we don't have a packet, grab a new one from the fifo. 1576 rxPacket = rxFifo.front(); 1577 rxPktBytes = rxPacket->length; 1578 rxPacketBufPtr = rxPacket->data; 1579 1580#if TRACING_ON 1581 if (DTRACE(Ethernet)) { 1582 IpPtr ip(rxPacket); 1583 if (ip) { 1584 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1585 TcpPtr tcp(ip); 1586 if (tcp) { 1587 DPRINTF(Ethernet, 1588 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1589 tcp->sport(), tcp->dport(), tcp->seq(), 1590 tcp->ack()); 1591 } 1592 } 1593 } 1594#endif 1595 1596 // sanity check - i think the driver behaves like this 1597 assert(rxDescCnt >= rxPktBytes); 1598 rxFifo.pop(); 1599 } 1600 1601 1602 // dont' need the && rxDescCnt > 0 if driver sanity check 1603 // above holds 1604 if (rxPktBytes > 0) { 1605 rxState = rxFragWrite; 1606 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1607 // check holds 1608 rxXferLen = rxPktBytes; 1609 1610 rxDmaAddr = rxFragPtr & 0x3fffffff; 1611 rxDmaData = rxPacketBufPtr; 1612 rxDmaLen = rxXferLen; 1613 rxDmaFree = dmaDataFree; 1614 1615 if (doRxDmaWrite()) 1616 goto exit; 1617 1618 } else { 1619 rxState = rxDescWrite; 1620 1621 //if (rxPktBytes == 0) { /* packet is done */ 1622 assert(rxPktBytes == 0); 1623 DPRINTF(EthernetSM, "done with receiving packet\n"); 1624 1625 cmdsts |= CMDSTS_OWN; 1626 cmdsts &= ~CMDSTS_MORE; 1627 cmdsts |= CMDSTS_OK; 1628 cmdsts &= 0xffff0000; 1629 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1630 1631#if 0 1632 /* 1633 * all the driver uses these are for its own stats keeping 1634 * which we don't care about, aren't necessary for 1635 * functionality and doing this would just slow us down. 1636 * if they end up using this in a later version for 1637 * functional purposes, just undef 1638 */ 1639 if (rxFilterEnable) { 1640 cmdsts &= ~CMDSTS_DEST_MASK; 1641 const EthAddr &dst = rxFifoFront()->dst(); 1642 if (dst->unicast()) 1643 cmdsts |= CMDSTS_DEST_SELF; 1644 if (dst->multicast()) 1645 cmdsts |= CMDSTS_DEST_MULTI; 1646 if (dst->broadcast()) 1647 cmdsts |= CMDSTS_DEST_MASK; 1648 } 1649#endif 1650 1651 IpPtr ip(rxPacket); 1652 if (extstsEnable && ip) { 1653 extsts |= EXTSTS_IPPKT; 1654 rxIpChecksums++; 1655 if (cksum(ip) != 0) { 1656 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1657 extsts |= EXTSTS_IPERR; 1658 } 1659 TcpPtr tcp(ip); 1660 UdpPtr udp(ip); 1661 if (tcp) { 1662 extsts |= EXTSTS_TCPPKT; 1663 rxTcpChecksums++; 1664 if (cksum(tcp) != 0) { 1665 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1666 extsts |= EXTSTS_TCPERR; 1667 1668 } 1669 } else if (udp) { 1670 extsts |= EXTSTS_UDPPKT; 1671 rxUdpChecksums++; 1672 if (cksum(udp) != 0) { 1673 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1674 extsts |= EXTSTS_UDPERR; 1675 } 1676 } 1677 } 1678 rxPacket = 0; 1679 1680 /* 1681 * the driver seems to always receive into desc buffers 1682 * of size 1514, so you never have a pkt that is split 1683 * into multiple descriptors on the receive side, so 1684 * i don't implement that case, hence the assert above. 1685 */ 1686 1687 DPRINTF(EthernetDesc, 1688 "rxDesc: addr=%08x writeback cmdsts extsts\n", 1689 regs.rxdp & 0x3fffffff); 1690 DPRINTF(EthernetDesc, 1691 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1692 link, bufptr, cmdsts, extsts); 1693 1694 rxDmaAddr = regs.rxdp & 0x3fffffff; 1695 rxDmaData = &cmdsts; 1696 if (is64bit) { 1697 rxDmaAddr += offsetof(ns_desc64, cmdsts); 1698 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts); 1699 } else { 1700 rxDmaAddr += offsetof(ns_desc32, cmdsts); 1701 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts); 1702 } 1703 rxDmaFree = dmaDescFree; 1704 1705 descDmaWrites++; 1706 descDmaWrBytes += rxDmaLen; 1707 1708 if (doRxDmaWrite()) 1709 goto exit; 1710 } 1711 break; 1712 1713 case rxFragWrite: 1714 if (rxDmaState != dmaIdle) 1715 goto exit; 1716 1717 rxPacketBufPtr += rxXferLen; 1718 rxFragPtr += rxXferLen; 1719 rxPktBytes -= rxXferLen; 1720 1721 rxState = rxFifoBlock; 1722 break; 1723 1724 case rxDescWrite: 1725 if (rxDmaState != dmaIdle) 1726 goto exit; 1727 1728 assert(cmdsts & CMDSTS_OWN); 1729 1730 assert(rxPacket == 0); 1731 devIntrPost(ISR_RXOK); 1732 1733 if (cmdsts & CMDSTS_INTR) 1734 devIntrPost(ISR_RXDESC); 1735 1736 if (!rxEnable) { 1737 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1738 rxState = rxIdle; 1739 goto exit; 1740 } else 1741 rxState = rxAdvance; 1742 break; 1743 1744 case rxAdvance: 1745 if (link == 0) { 1746 devIntrPost(ISR_RXIDLE); 1747 rxState = rxIdle; 1748 CRDD = true; 1749 goto exit; 1750 } else { 1751 if (rxDmaState != dmaIdle) 1752 goto exit; 1753 rxState = rxDescRead; 1754 regs.rxdp = link; 1755 CRDD = false; 1756 1757 rxDmaAddr = regs.rxdp & 0x3fffffff; 1758 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1759 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1760 rxDmaFree = dmaDescFree; 1761 1762 if (doRxDmaRead()) 1763 goto exit; 1764 } 1765 break; 1766 1767 default: 1768 panic("Invalid rxState!"); 1769 } 1770 1771 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1772 NsRxStateStrings[rxState]); 1773 goto next; 1774 1775 exit: 1776 /** 1777 * @todo do we want to schedule a future kick? 1778 */ 1779 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1780 NsRxStateStrings[rxState]); 1781 1782 if (clock && !rxKickEvent.scheduled()) 1783 rxKickEvent.schedule(rxKickTick); 1784} 1785 1786void 1787NSGigE::transmit() 1788{ 1789 if (txFifo.empty()) { 1790 DPRINTF(Ethernet, "nothing to transmit\n"); 1791 return; 1792 } 1793 1794 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1795 txFifo.size()); 1796 if (interface->sendPacket(txFifo.front())) { 1797#if TRACING_ON 1798 if (DTRACE(Ethernet)) { 1799 IpPtr ip(txFifo.front()); 1800 if (ip) { 1801 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1802 TcpPtr tcp(ip); 1803 if (tcp) { 1804 DPRINTF(Ethernet, 1805 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1806 tcp->sport(), tcp->dport(), tcp->seq(), 1807 tcp->ack()); 1808 } 1809 } 1810 } 1811#endif 1812 1813 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1814 txBytes += txFifo.front()->length; 1815 txPackets++; 1816 1817 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1818 txFifo.avail()); 1819 txFifo.pop(); 1820 1821 /* 1822 * normally do a writeback of the descriptor here, and ONLY 1823 * after that is done, send this interrupt. but since our 1824 * stuff never actually fails, just do this interrupt here, 1825 * otherwise the code has to stray from this nice format. 1826 * besides, it's functionally the same. 1827 */ 1828 devIntrPost(ISR_TXOK); 1829 } 1830 1831 if (!txFifo.empty() && !txEvent.scheduled()) { 1832 DPRINTF(Ethernet, "reschedule transmit\n"); 1833 txEvent.schedule(curTick + retryTime); 1834 } 1835} 1836 1837bool 1838NSGigE::doTxDmaRead() 1839{ 1840 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1841 txDmaState = dmaReading; 1842 1843 if (dmaPending() || getState() != Running) 1844 txDmaState = dmaReadWaiting; 1845 else 1846 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData); 1847 1848 return true; 1849} 1850 1851void 1852NSGigE::txDmaReadDone() 1853{ 1854 assert(txDmaState == dmaReading); 1855 txDmaState = dmaIdle; 1856 1857 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1858 txDmaAddr, txDmaLen); 1859 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1860 1861 // If the receive state machine has a pending DMA, let it go first 1862 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1863 rxKick(); 1864 1865 txKick(); 1866} 1867 1868bool 1869NSGigE::doTxDmaWrite() 1870{ 1871 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1872 txDmaState = dmaWriting; 1873 1874 if (dmaPending() || getState() != Running) 1875 txDmaState = dmaWriteWaiting; 1876 else 1877 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData); 1878 return true; 1879} 1880 1881void 1882NSGigE::txDmaWriteDone() 1883{ 1884 assert(txDmaState == dmaWriting); 1885 txDmaState = dmaIdle; 1886 1887 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1888 txDmaAddr, txDmaLen); 1889 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1890 1891 // If the receive state machine has a pending DMA, let it go first 1892 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1893 rxKick(); 1894 1895 txKick(); 1896} 1897 1898void 1899NSGigE::txKick() 1900{ 1901 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1902 1903 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n", 1904 NsTxStateStrings[txState], is64bit ? 64 : 32); 1905 1906 Addr link, bufptr; 1907 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts; 1908 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts; 1909 1910 next: 1911 if (clock) { 1912 if (txKickTick > curTick) { 1913 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1914 txKickTick); 1915 goto exit; 1916 } 1917 1918 // Go to the next state machine clock tick. 1919 txKickTick = curTick + ticks(1); 1920 } 1921 1922 switch(txDmaState) { 1923 case dmaReadWaiting: 1924 if (doTxDmaRead()) 1925 goto exit; 1926 break; 1927 case dmaWriteWaiting: 1928 if (doTxDmaWrite()) 1929 goto exit; 1930 break; 1931 default: 1932 break; 1933 } 1934 1935 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link; 1936 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr; 1937 switch (txState) { 1938 case txIdle: 1939 if (!txEnable) { 1940 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1941 goto exit; 1942 } 1943 1944 if (CTDD) { 1945 txState = txDescRefr; 1946 1947 txDmaAddr = regs.txdp & 0x3fffffff; 1948 txDmaData = 1949 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link; 1950 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link); 1951 txDmaFree = dmaDescFree; 1952 1953 descDmaReads++; 1954 descDmaRdBytes += txDmaLen; 1955 1956 if (doTxDmaRead()) 1957 goto exit; 1958 1959 } else { 1960 txState = txDescRead; 1961 1962 txDmaAddr = regs.txdp & 0x3fffffff; 1963 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1964 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1965 txDmaFree = dmaDescFree; 1966 1967 descDmaReads++; 1968 descDmaRdBytes += txDmaLen; 1969 1970 if (doTxDmaRead()) 1971 goto exit; 1972 } 1973 break; 1974 1975 case txDescRefr: 1976 if (txDmaState != dmaIdle) 1977 goto exit; 1978 1979 txState = txAdvance; 1980 break; 1981 1982 case txDescRead: 1983 if (txDmaState != dmaIdle) 1984 goto exit; 1985 1986 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n", 1987 regs.txdp & 0x3fffffff); 1988 DPRINTF(EthernetDesc, 1989 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n", 1990 link, bufptr, cmdsts, extsts); 1991 1992 if (cmdsts & CMDSTS_OWN) { 1993 txState = txFifoBlock; 1994 txFragPtr = bufptr; 1995 txDescCnt = cmdsts & CMDSTS_LEN_MASK; 1996 } else { 1997 devIntrPost(ISR_TXIDLE); 1998 txState = txIdle; 1999 goto exit; 2000 } 2001 break; 2002 2003 case txFifoBlock: 2004 if (!txPacket) { 2005 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 2006 txPacket = new EthPacketData(16384); 2007 txPacketBufPtr = txPacket->data; 2008 } 2009 2010 if (txDescCnt == 0) { 2011 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 2012 if (cmdsts & CMDSTS_MORE) { 2013 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 2014 txState = txDescWrite; 2015 2016 cmdsts &= ~CMDSTS_OWN; 2017 2018 txDmaAddr = regs.txdp & 0x3fffffff; 2019 txDmaData = &cmdsts; 2020 if (is64bit) { 2021 txDmaAddr += offsetof(ns_desc64, cmdsts); 2022 txDmaLen = sizeof(txDesc64.cmdsts); 2023 } else { 2024 txDmaAddr += offsetof(ns_desc32, cmdsts); 2025 txDmaLen = sizeof(txDesc32.cmdsts); 2026 } 2027 txDmaFree = dmaDescFree; 2028 2029 if (doTxDmaWrite()) 2030 goto exit; 2031 2032 } else { /* this packet is totally done */ 2033 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 2034 /* deal with the the packet that just finished */ 2035 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 2036 IpPtr ip(txPacket); 2037 if (extsts & EXTSTS_UDPPKT) { 2038 UdpPtr udp(ip); 2039 if (udp) { 2040 udp->sum(0); 2041 udp->sum(cksum(udp)); 2042 txUdpChecksums++; 2043 } else { 2044 debug_break(); 2045 warn_once("UDPPKT set, but not UDP!\n"); 2046 } 2047 } else if (extsts & EXTSTS_TCPPKT) { 2048 TcpPtr tcp(ip); 2049 if (tcp) { 2050 tcp->sum(0); 2051 tcp->sum(cksum(tcp)); 2052 txTcpChecksums++; 2053 } else { 2054 debug_break(); 2055 warn_once("TCPPKT set, but not UDP!\n"); 2056 } 2057 } 2058 if (extsts & EXTSTS_IPPKT) { 2059 if (ip) { 2060 ip->sum(0); 2061 ip->sum(cksum(ip)); 2062 txIpChecksums++; 2063 } else { 2064 debug_break(); 2065 warn_once("IPPKT set, but not UDP!\n"); 2066 } 2067 } 2068 } 2069 2070 txPacket->length = txPacketBufPtr - txPacket->data; 2071 // this is just because the receive can't handle a 2072 // packet bigger want to make sure 2073 if (txPacket->length > 1514) 2074 panic("transmit packet too large, %s > 1514\n", 2075 txPacket->length); 2076 2077#ifndef NDEBUG 2078 bool success = 2079#endif 2080 txFifo.push(txPacket); 2081 assert(success); 2082 2083 /* 2084 * this following section is not tqo spec, but 2085 * functionally shouldn't be any different. normally, 2086 * the chip will wait til the transmit has occurred 2087 * before writing back the descriptor because it has 2088 * to wait to see that it was successfully transmitted 2089 * to decide whether to set CMDSTS_OK or not. 2090 * however, in the simulator since it is always 2091 * successfully transmitted, and writing it exactly to 2092 * spec would complicate the code, we just do it here 2093 */ 2094 2095 cmdsts &= ~CMDSTS_OWN; 2096 cmdsts |= CMDSTS_OK; 2097 2098 DPRINTF(EthernetDesc, 2099 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 2100 cmdsts, extsts); 2101 2102 txDmaFree = dmaDescFree; 2103 txDmaAddr = regs.txdp & 0x3fffffff; 2104 txDmaData = &cmdsts; 2105 if (is64bit) { 2106 txDmaAddr += offsetof(ns_desc64, cmdsts); 2107 txDmaLen = 2108 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts); 2109 } else { 2110 txDmaAddr += offsetof(ns_desc32, cmdsts); 2111 txDmaLen = 2112 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts); 2113 } 2114 2115 descDmaWrites++; 2116 descDmaWrBytes += txDmaLen; 2117 2118 transmit(); 2119 txPacket = 0; 2120 2121 if (!txEnable) { 2122 DPRINTF(EthernetSM, "halting TX state machine\n"); 2123 txState = txIdle; 2124 goto exit; 2125 } else 2126 txState = txAdvance; 2127 2128 if (doTxDmaWrite()) 2129 goto exit; 2130 } 2131 } else { 2132 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 2133 if (!txFifo.full()) { 2134 txState = txFragRead; 2135 2136 /* 2137 * The number of bytes transferred is either whatever 2138 * is left in the descriptor (txDescCnt), or if there 2139 * is not enough room in the fifo, just whatever room 2140 * is left in the fifo 2141 */ 2142 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 2143 2144 txDmaAddr = txFragPtr & 0x3fffffff; 2145 txDmaData = txPacketBufPtr; 2146 txDmaLen = txXferLen; 2147 txDmaFree = dmaDataFree; 2148 2149 if (doTxDmaRead()) 2150 goto exit; 2151 } else { 2152 txState = txFifoBlock; 2153 transmit(); 2154 2155 goto exit; 2156 } 2157 2158 } 2159 break; 2160 2161 case txFragRead: 2162 if (txDmaState != dmaIdle) 2163 goto exit; 2164 2165 txPacketBufPtr += txXferLen; 2166 txFragPtr += txXferLen; 2167 txDescCnt -= txXferLen; 2168 txFifo.reserve(txXferLen); 2169 2170 txState = txFifoBlock; 2171 break; 2172 2173 case txDescWrite: 2174 if (txDmaState != dmaIdle) 2175 goto exit; 2176 2177 if (cmdsts & CMDSTS_INTR) 2178 devIntrPost(ISR_TXDESC); 2179 2180 if (!txEnable) { 2181 DPRINTF(EthernetSM, "halting TX state machine\n"); 2182 txState = txIdle; 2183 goto exit; 2184 } else 2185 txState = txAdvance; 2186 break; 2187 2188 case txAdvance: 2189 if (link == 0) { 2190 devIntrPost(ISR_TXIDLE); 2191 txState = txIdle; 2192 goto exit; 2193 } else { 2194 if (txDmaState != dmaIdle) 2195 goto exit; 2196 txState = txDescRead; 2197 regs.txdp = link; 2198 CTDD = false; 2199 2200 txDmaAddr = link & 0x3fffffff; 2201 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 2202 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 2203 txDmaFree = dmaDescFree; 2204 2205 if (doTxDmaRead()) 2206 goto exit; 2207 } 2208 break; 2209 2210 default: 2211 panic("invalid state"); 2212 } 2213 2214 DPRINTF(EthernetSM, "entering next txState=%s\n", 2215 NsTxStateStrings[txState]); 2216 goto next; 2217 2218 exit: 2219 /** 2220 * @todo do we want to schedule a future kick? 2221 */ 2222 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 2223 NsTxStateStrings[txState]); 2224 2225 if (clock && !txKickEvent.scheduled()) 2226 txKickEvent.schedule(txKickTick); 2227} 2228 2229/** 2230 * Advance the EEPROM state machine 2231 * Called on rising edge of EEPROM clock bit in MEAR 2232 */ 2233void 2234NSGigE::eepromKick() 2235{ 2236 switch (eepromState) { 2237 2238 case eepromStart: 2239 2240 // Wait for start bit 2241 if (regs.mear & MEAR_EEDI) { 2242 // Set up to get 2 opcode bits 2243 eepromState = eepromGetOpcode; 2244 eepromBitsToRx = 2; 2245 eepromOpcode = 0; 2246 } 2247 break; 2248 2249 case eepromGetOpcode: 2250 eepromOpcode <<= 1; 2251 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0; 2252 --eepromBitsToRx; 2253 2254 // Done getting opcode 2255 if (eepromBitsToRx == 0) { 2256 if (eepromOpcode != EEPROM_READ) 2257 panic("only EEPROM reads are implemented!"); 2258 2259 // Set up to get address 2260 eepromState = eepromGetAddress; 2261 eepromBitsToRx = 6; 2262 eepromAddress = 0; 2263 } 2264 break; 2265 2266 case eepromGetAddress: 2267 eepromAddress <<= 1; 2268 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0; 2269 --eepromBitsToRx; 2270 2271 // Done getting address 2272 if (eepromBitsToRx == 0) { 2273 2274 if (eepromAddress >= EEPROM_SIZE) 2275 panic("EEPROM read access out of range!"); 2276 2277 switch (eepromAddress) { 2278 2279 case EEPROM_PMATCH2_ADDR: 2280 eepromData = rom.perfectMatch[5]; 2281 eepromData <<= 8; 2282 eepromData += rom.perfectMatch[4]; 2283 break; 2284 2285 case EEPROM_PMATCH1_ADDR: 2286 eepromData = rom.perfectMatch[3]; 2287 eepromData <<= 8; 2288 eepromData += rom.perfectMatch[2]; 2289 break; 2290 2291 case EEPROM_PMATCH0_ADDR: 2292 eepromData = rom.perfectMatch[1]; 2293 eepromData <<= 8; 2294 eepromData += rom.perfectMatch[0]; 2295 break; 2296 2297 default: 2298 panic("FreeBSD driver only uses EEPROM to read PMATCH!"); 2299 } 2300 // Set up to read data 2301 eepromState = eepromRead; 2302 eepromBitsToRx = 16; 2303 2304 // Clear data in bit 2305 regs.mear &= ~MEAR_EEDI; 2306 } 2307 break; 2308 2309 case eepromRead: 2310 // Clear Data Out bit 2311 regs.mear &= ~MEAR_EEDO; 2312 // Set bit to value of current EEPROM bit 2313 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0; 2314 2315 eepromData <<= 1; 2316 --eepromBitsToRx; 2317 2318 // All done 2319 if (eepromBitsToRx == 0) { 2320 eepromState = eepromStart; 2321 } 2322 break; 2323 2324 default: 2325 panic("invalid EEPROM state"); 2326 } 2327 2328} 2329 2330void 2331NSGigE::transferDone() 2332{ 2333 if (txFifo.empty()) { 2334 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2335 return; 2336 } 2337 2338 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2339 2340 txEvent.reschedule(curTick + ticks(1), true); 2341} 2342 2343bool 2344NSGigE::rxFilter(const EthPacketPtr &packet) 2345{ 2346 EthPtr eth = packet; 2347 bool drop = true; 2348 string type; 2349 2350 const EthAddr &dst = eth->dst(); 2351 if (dst.unicast()) { 2352 // If we're accepting all unicast addresses 2353 if (acceptUnicast) 2354 drop = false; 2355 2356 // If we make a perfect match 2357 if (acceptPerfect && dst == rom.perfectMatch) 2358 drop = false; 2359 2360 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2361 drop = false; 2362 2363 } else if (dst.broadcast()) { 2364 // if we're accepting broadcasts 2365 if (acceptBroadcast) 2366 drop = false; 2367 2368 } else if (dst.multicast()) { 2369 // if we're accepting all multicasts 2370 if (acceptMulticast) 2371 drop = false; 2372 2373 // Multicast hashing faked - all packets accepted 2374 if (multicastHashEnable) 2375 drop = false; 2376 } 2377 2378 if (drop) { 2379 DPRINTF(Ethernet, "rxFilter drop\n"); 2380 DDUMP(EthernetData, packet->data, packet->length); 2381 } 2382 2383 return drop; 2384} 2385 2386bool 2387NSGigE::recvPacket(EthPacketPtr packet) 2388{ 2389 rxBytes += packet->length; 2390 rxPackets++; 2391 2392 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2393 rxFifo.avail()); 2394 2395 if (!rxEnable) { 2396 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2397 return true; 2398 } 2399 2400 if (!rxFilterEnable) { 2401 DPRINTF(Ethernet, 2402 "receive packet filtering disabled . . . packet dropped\n"); 2403 return true; 2404 } 2405 2406 if (rxFilter(packet)) { 2407 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2408 return true; 2409 } 2410 2411 if (rxFifo.avail() < packet->length) { 2412#if TRACING_ON 2413 IpPtr ip(packet); 2414 TcpPtr tcp(ip); 2415 if (ip) { 2416 DPRINTF(Ethernet, 2417 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2418 ip->id()); 2419 if (tcp) { 2420 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2421 } 2422 } 2423#endif 2424 droppedPackets++; 2425 devIntrPost(ISR_RXORN); 2426 return false; 2427 } 2428 2429 rxFifo.push(packet); 2430 2431 rxKick(); 2432 return true; 2433} 2434 2435 2436void 2437NSGigE::resume() 2438{ 2439 SimObject::resume(); 2440 2441 // During drain we could have left the state machines in a waiting state and 2442 // they wouldn't get out until some other event occured to kick them. 2443 // This way they'll get out immediately 2444 txKick(); 2445 rxKick(); 2446} 2447 2448 2449//===================================================================== 2450// 2451// 2452void 2453NSGigE::serialize(ostream &os) 2454{ 2455 // Serialize the PciDev base class 2456 PciDev::serialize(os); 2457 2458 /* 2459 * Finalize any DMA events now. 2460 */ 2461 // @todo will mem system save pending dma? 2462 2463 /* 2464 * Serialize the device registers 2465 */ 2466 SERIALIZE_SCALAR(regs.command); 2467 SERIALIZE_SCALAR(regs.config); 2468 SERIALIZE_SCALAR(regs.mear); 2469 SERIALIZE_SCALAR(regs.ptscr); 2470 SERIALIZE_SCALAR(regs.isr); 2471 SERIALIZE_SCALAR(regs.imr); 2472 SERIALIZE_SCALAR(regs.ier); 2473 SERIALIZE_SCALAR(regs.ihr); 2474 SERIALIZE_SCALAR(regs.txdp); 2475 SERIALIZE_SCALAR(regs.txdp_hi); 2476 SERIALIZE_SCALAR(regs.txcfg); 2477 SERIALIZE_SCALAR(regs.gpior); 2478 SERIALIZE_SCALAR(regs.rxdp); 2479 SERIALIZE_SCALAR(regs.rxdp_hi); 2480 SERIALIZE_SCALAR(regs.rxcfg); 2481 SERIALIZE_SCALAR(regs.pqcr); 2482 SERIALIZE_SCALAR(regs.wcsr); 2483 SERIALIZE_SCALAR(regs.pcr); 2484 SERIALIZE_SCALAR(regs.rfcr); 2485 SERIALIZE_SCALAR(regs.rfdr); 2486 SERIALIZE_SCALAR(regs.brar); 2487 SERIALIZE_SCALAR(regs.brdr); 2488 SERIALIZE_SCALAR(regs.srr); 2489 SERIALIZE_SCALAR(regs.mibc); 2490 SERIALIZE_SCALAR(regs.vrcr); 2491 SERIALIZE_SCALAR(regs.vtcr); 2492 SERIALIZE_SCALAR(regs.vdr); 2493 SERIALIZE_SCALAR(regs.ccsr); 2494 SERIALIZE_SCALAR(regs.tbicr); 2495 SERIALIZE_SCALAR(regs.tbisr); 2496 SERIALIZE_SCALAR(regs.tanar); 2497 SERIALIZE_SCALAR(regs.tanlpar); 2498 SERIALIZE_SCALAR(regs.taner); 2499 SERIALIZE_SCALAR(regs.tesr); 2500 2501 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2502 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2503 2504 SERIALIZE_SCALAR(ioEnable); 2505 2506 /* 2507 * Serialize the data Fifos 2508 */ 2509 rxFifo.serialize("rxFifo", os); 2510 txFifo.serialize("txFifo", os); 2511 2512 /* 2513 * Serialize the various helper variables 2514 */ 2515 bool txPacketExists = txPacket; 2516 SERIALIZE_SCALAR(txPacketExists); 2517 if (txPacketExists) { 2518 txPacket->length = txPacketBufPtr - txPacket->data; 2519 txPacket->serialize("txPacket", os); 2520 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2521 SERIALIZE_SCALAR(txPktBufPtr); 2522 } 2523 2524 bool rxPacketExists = rxPacket; 2525 SERIALIZE_SCALAR(rxPacketExists); 2526 if (rxPacketExists) { 2527 rxPacket->serialize("rxPacket", os); 2528 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2529 SERIALIZE_SCALAR(rxPktBufPtr); 2530 } 2531 2532 SERIALIZE_SCALAR(txXferLen); 2533 SERIALIZE_SCALAR(rxXferLen); 2534 2535 /* 2536 * Serialize Cached Descriptors 2537 */ 2538 SERIALIZE_SCALAR(rxDesc64.link); 2539 SERIALIZE_SCALAR(rxDesc64.bufptr); 2540 SERIALIZE_SCALAR(rxDesc64.cmdsts); 2541 SERIALIZE_SCALAR(rxDesc64.extsts); 2542 SERIALIZE_SCALAR(txDesc64.link); 2543 SERIALIZE_SCALAR(txDesc64.bufptr); 2544 SERIALIZE_SCALAR(txDesc64.cmdsts); 2545 SERIALIZE_SCALAR(txDesc64.extsts); 2546 SERIALIZE_SCALAR(rxDesc32.link); 2547 SERIALIZE_SCALAR(rxDesc32.bufptr); 2548 SERIALIZE_SCALAR(rxDesc32.cmdsts); 2549 SERIALIZE_SCALAR(rxDesc32.extsts); 2550 SERIALIZE_SCALAR(txDesc32.link); 2551 SERIALIZE_SCALAR(txDesc32.bufptr); 2552 SERIALIZE_SCALAR(txDesc32.cmdsts); 2553 SERIALIZE_SCALAR(txDesc32.extsts); 2554 SERIALIZE_SCALAR(extstsEnable); 2555 2556 /* 2557 * Serialize tx state machine 2558 */ 2559 int txState = this->txState; 2560 SERIALIZE_SCALAR(txState); 2561 SERIALIZE_SCALAR(txEnable); 2562 SERIALIZE_SCALAR(CTDD); 2563 SERIALIZE_SCALAR(txFragPtr); 2564 SERIALIZE_SCALAR(txDescCnt); 2565 int txDmaState = this->txDmaState; 2566 SERIALIZE_SCALAR(txDmaState); 2567 SERIALIZE_SCALAR(txKickTick); 2568 2569 /* 2570 * Serialize rx state machine 2571 */ 2572 int rxState = this->rxState; 2573 SERIALIZE_SCALAR(rxState); 2574 SERIALIZE_SCALAR(rxEnable); 2575 SERIALIZE_SCALAR(CRDD); 2576 SERIALIZE_SCALAR(rxPktBytes); 2577 SERIALIZE_SCALAR(rxFragPtr); 2578 SERIALIZE_SCALAR(rxDescCnt); 2579 int rxDmaState = this->rxDmaState; 2580 SERIALIZE_SCALAR(rxDmaState); 2581 SERIALIZE_SCALAR(rxKickTick); 2582 2583 /* 2584 * Serialize EEPROM state machine 2585 */ 2586 int eepromState = this->eepromState; 2587 SERIALIZE_SCALAR(eepromState); 2588 SERIALIZE_SCALAR(eepromClk); 2589 SERIALIZE_SCALAR(eepromBitsToRx); 2590 SERIALIZE_SCALAR(eepromOpcode); 2591 SERIALIZE_SCALAR(eepromAddress); 2592 SERIALIZE_SCALAR(eepromData); 2593 2594 /* 2595 * If there's a pending transmit, store the time so we can 2596 * reschedule it later 2597 */ 2598 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2599 SERIALIZE_SCALAR(transmitTick); 2600 2601 /* 2602 * receive address filter settings 2603 */ 2604 SERIALIZE_SCALAR(rxFilterEnable); 2605 SERIALIZE_SCALAR(acceptBroadcast); 2606 SERIALIZE_SCALAR(acceptMulticast); 2607 SERIALIZE_SCALAR(acceptUnicast); 2608 SERIALIZE_SCALAR(acceptPerfect); 2609 SERIALIZE_SCALAR(acceptArp); 2610 SERIALIZE_SCALAR(multicastHashEnable); 2611 2612 /* 2613 * Keep track of pending interrupt status. 2614 */ 2615 SERIALIZE_SCALAR(intrTick); 2616 SERIALIZE_SCALAR(cpuPendingIntr); 2617 Tick intrEventTick = 0; 2618 if (intrEvent) 2619 intrEventTick = intrEvent->when(); 2620 SERIALIZE_SCALAR(intrEventTick); 2621 2622} 2623 2624void 2625NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2626{ 2627 // Unserialize the PciDev base class 2628 PciDev::unserialize(cp, section); 2629 2630 UNSERIALIZE_SCALAR(regs.command); 2631 UNSERIALIZE_SCALAR(regs.config); 2632 UNSERIALIZE_SCALAR(regs.mear); 2633 UNSERIALIZE_SCALAR(regs.ptscr); 2634 UNSERIALIZE_SCALAR(regs.isr); 2635 UNSERIALIZE_SCALAR(regs.imr); 2636 UNSERIALIZE_SCALAR(regs.ier); 2637 UNSERIALIZE_SCALAR(regs.ihr); 2638 UNSERIALIZE_SCALAR(regs.txdp); 2639 UNSERIALIZE_SCALAR(regs.txdp_hi); 2640 UNSERIALIZE_SCALAR(regs.txcfg); 2641 UNSERIALIZE_SCALAR(regs.gpior); 2642 UNSERIALIZE_SCALAR(regs.rxdp); 2643 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2644 UNSERIALIZE_SCALAR(regs.rxcfg); 2645 UNSERIALIZE_SCALAR(regs.pqcr); 2646 UNSERIALIZE_SCALAR(regs.wcsr); 2647 UNSERIALIZE_SCALAR(regs.pcr); 2648 UNSERIALIZE_SCALAR(regs.rfcr); 2649 UNSERIALIZE_SCALAR(regs.rfdr); 2650 UNSERIALIZE_SCALAR(regs.brar); 2651 UNSERIALIZE_SCALAR(regs.brdr); 2652 UNSERIALIZE_SCALAR(regs.srr); 2653 UNSERIALIZE_SCALAR(regs.mibc); 2654 UNSERIALIZE_SCALAR(regs.vrcr); 2655 UNSERIALIZE_SCALAR(regs.vtcr); 2656 UNSERIALIZE_SCALAR(regs.vdr); 2657 UNSERIALIZE_SCALAR(regs.ccsr); 2658 UNSERIALIZE_SCALAR(regs.tbicr); 2659 UNSERIALIZE_SCALAR(regs.tbisr); 2660 UNSERIALIZE_SCALAR(regs.tanar); 2661 UNSERIALIZE_SCALAR(regs.tanlpar); 2662 UNSERIALIZE_SCALAR(regs.taner); 2663 UNSERIALIZE_SCALAR(regs.tesr); 2664 2665 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2666 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2667 2668 UNSERIALIZE_SCALAR(ioEnable); 2669 2670 /* 2671 * unserialize the data fifos 2672 */ 2673 rxFifo.unserialize("rxFifo", cp, section); 2674 txFifo.unserialize("txFifo", cp, section); 2675 2676 /* 2677 * unserialize the various helper variables 2678 */ 2679 bool txPacketExists; 2680 UNSERIALIZE_SCALAR(txPacketExists); 2681 if (txPacketExists) { 2682 txPacket = new EthPacketData(16384); 2683 txPacket->unserialize("txPacket", cp, section); 2684 uint32_t txPktBufPtr; 2685 UNSERIALIZE_SCALAR(txPktBufPtr); 2686 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2687 } else 2688 txPacket = 0; 2689 2690 bool rxPacketExists; 2691 UNSERIALIZE_SCALAR(rxPacketExists); 2692 rxPacket = 0; 2693 if (rxPacketExists) { 2694 rxPacket = new EthPacketData(16384); 2695 rxPacket->unserialize("rxPacket", cp, section); 2696 uint32_t rxPktBufPtr; 2697 UNSERIALIZE_SCALAR(rxPktBufPtr); 2698 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2699 } else 2700 rxPacket = 0; 2701 2702 UNSERIALIZE_SCALAR(txXferLen); 2703 UNSERIALIZE_SCALAR(rxXferLen); 2704 2705 /* 2706 * Unserialize Cached Descriptors 2707 */ 2708 UNSERIALIZE_SCALAR(rxDesc64.link); 2709 UNSERIALIZE_SCALAR(rxDesc64.bufptr); 2710 UNSERIALIZE_SCALAR(rxDesc64.cmdsts); 2711 UNSERIALIZE_SCALAR(rxDesc64.extsts); 2712 UNSERIALIZE_SCALAR(txDesc64.link); 2713 UNSERIALIZE_SCALAR(txDesc64.bufptr); 2714 UNSERIALIZE_SCALAR(txDesc64.cmdsts); 2715 UNSERIALIZE_SCALAR(txDesc64.extsts); 2716 UNSERIALIZE_SCALAR(rxDesc32.link); 2717 UNSERIALIZE_SCALAR(rxDesc32.bufptr); 2718 UNSERIALIZE_SCALAR(rxDesc32.cmdsts); 2719 UNSERIALIZE_SCALAR(rxDesc32.extsts); 2720 UNSERIALIZE_SCALAR(txDesc32.link); 2721 UNSERIALIZE_SCALAR(txDesc32.bufptr); 2722 UNSERIALIZE_SCALAR(txDesc32.cmdsts); 2723 UNSERIALIZE_SCALAR(txDesc32.extsts); 2724 UNSERIALIZE_SCALAR(extstsEnable); 2725 2726 /* 2727 * unserialize tx state machine 2728 */ 2729 int txState; 2730 UNSERIALIZE_SCALAR(txState); 2731 this->txState = (TxState) txState; 2732 UNSERIALIZE_SCALAR(txEnable); 2733 UNSERIALIZE_SCALAR(CTDD); 2734 UNSERIALIZE_SCALAR(txFragPtr); 2735 UNSERIALIZE_SCALAR(txDescCnt); 2736 int txDmaState; 2737 UNSERIALIZE_SCALAR(txDmaState); 2738 this->txDmaState = (DmaState) txDmaState; 2739 UNSERIALIZE_SCALAR(txKickTick); 2740 if (txKickTick) 2741 txKickEvent.schedule(txKickTick); 2742 2743 /* 2744 * unserialize rx state machine 2745 */ 2746 int rxState; 2747 UNSERIALIZE_SCALAR(rxState); 2748 this->rxState = (RxState) rxState; 2749 UNSERIALIZE_SCALAR(rxEnable); 2750 UNSERIALIZE_SCALAR(CRDD); 2751 UNSERIALIZE_SCALAR(rxPktBytes); 2752 UNSERIALIZE_SCALAR(rxFragPtr); 2753 UNSERIALIZE_SCALAR(rxDescCnt); 2754 int rxDmaState; 2755 UNSERIALIZE_SCALAR(rxDmaState); 2756 this->rxDmaState = (DmaState) rxDmaState; 2757 UNSERIALIZE_SCALAR(rxKickTick); 2758 if (rxKickTick) 2759 rxKickEvent.schedule(rxKickTick); 2760 2761 /* 2762 * Unserialize EEPROM state machine 2763 */ 2764 int eepromState; 2765 UNSERIALIZE_SCALAR(eepromState); 2766 this->eepromState = (EEPROMState) eepromState; 2767 UNSERIALIZE_SCALAR(eepromClk); 2768 UNSERIALIZE_SCALAR(eepromBitsToRx); 2769 UNSERIALIZE_SCALAR(eepromOpcode); 2770 UNSERIALIZE_SCALAR(eepromAddress); 2771 UNSERIALIZE_SCALAR(eepromData); 2772 2773 /* 2774 * If there's a pending transmit, reschedule it now 2775 */ 2776 Tick transmitTick; 2777 UNSERIALIZE_SCALAR(transmitTick); 2778 if (transmitTick) 2779 txEvent.schedule(curTick + transmitTick); 2780 2781 /* 2782 * unserialize receive address filter settings 2783 */ 2784 UNSERIALIZE_SCALAR(rxFilterEnable); 2785 UNSERIALIZE_SCALAR(acceptBroadcast); 2786 UNSERIALIZE_SCALAR(acceptMulticast); 2787 UNSERIALIZE_SCALAR(acceptUnicast); 2788 UNSERIALIZE_SCALAR(acceptPerfect); 2789 UNSERIALIZE_SCALAR(acceptArp); 2790 UNSERIALIZE_SCALAR(multicastHashEnable); 2791 2792 /* 2793 * Keep track of pending interrupt status. 2794 */ 2795 UNSERIALIZE_SCALAR(intrTick); 2796 UNSERIALIZE_SCALAR(cpuPendingIntr); 2797 Tick intrEventTick; 2798 UNSERIALIZE_SCALAR(intrEventTick); 2799 if (intrEventTick) { 2800 intrEvent = new IntrEvent(this, intrEventTick, true); 2801 } 2802} 2803 2804NSGigE * 2805NSGigEParams::create() 2806{ 2807 return new NSGigE(this); 2808} 2809