ns_gige.cc revision 1561
1/* 2 * Copyright (c) 2004 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/* @file 30 * Device module for modelling the National Semiconductor 31 * DP83820 ethernet controller. Does not support priority queueing 32 */ 33#include <cstdio> 34#include <deque> 35#include <string> 36 37#include "base/inet.hh" 38#include "cpu/exec_context.hh" 39#include "dev/dma.hh" 40#include "dev/etherlink.hh" 41#include "dev/ns_gige.hh" 42#include "dev/pciconfigall.hh" 43#include "mem/bus/bus.hh" 44#include "mem/bus/dma_interface.hh" 45#include "mem/bus/pio_interface.hh" 46#include "mem/bus/pio_interface_impl.hh" 47#include "mem/functional_mem/memory_control.hh" 48#include "mem/functional_mem/physical_memory.hh" 49#include "sim/builder.hh" 50#include "sim/debug.hh" 51#include "sim/host.hh" 52#include "sim/stats.hh" 53#include "targetarch/vtophys.hh" 54 55const char *NsRxStateStrings[] = 56{ 57 "rxIdle", 58 "rxDescRefr", 59 "rxDescRead", 60 "rxFifoBlock", 61 "rxFragWrite", 62 "rxDescWrite", 63 "rxAdvance" 64}; 65 66const char *NsTxStateStrings[] = 67{ 68 "txIdle", 69 "txDescRefr", 70 "txDescRead", 71 "txFifoBlock", 72 "txFragRead", 73 "txDescWrite", 74 "txAdvance" 75}; 76 77const char *NsDmaState[] = 78{ 79 "dmaIdle", 80 "dmaReading", 81 "dmaWriting", 82 "dmaReadWaiting", 83 "dmaWriteWaiting" 84}; 85 86using namespace std; 87using namespace Net; 88 89/////////////////////////////////////////////////////////////////////// 90// 91// NSGigE PCI Device 92// 93NSGigE::NSGigE(Params *p) 94 : PciDev(p), ioEnable(false), 95 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 96 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 97 txXferLen(0), rxXferLen(0), txState(txIdle), txEnable(false), 98 CTDD(false), 99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 100 rxEnable(false), CRDD(false), rxPktBytes(0), 101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 102 rxDmaReadEvent(this), rxDmaWriteEvent(this), 103 txDmaReadEvent(this), txDmaWriteEvent(this), 104 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 105 txDelay(p->tx_delay), rxDelay(p->rx_delay), 106 rxKickTick(0), txKickTick(0), 107 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false), 108 acceptMulticast(false), acceptUnicast(false), 109 acceptPerfect(false), acceptArp(false), 110 physmem(p->pmem), intrTick(0), cpuPendingIntr(false), 111 intrEvent(0), interface(0) 112{ 113 if (p->header_bus) { 114 pioInterface = newPioInterface(name(), p->hier, 115 p->header_bus, this, 116 &NSGigE::cacheAccess); 117 118 pioLatency = p->pio_latency * p->header_bus->clockRatio; 119 120 if (p->payload_bus) 121 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 122 p->header_bus, 123 p->payload_bus, 1); 124 else 125 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 126 p->header_bus, 127 p->header_bus, 1); 128 } else if (p->payload_bus) { 129 pioInterface = newPioInterface(name(), p->hier, 130 p->payload_bus, this, 131 &NSGigE::cacheAccess); 132 133 pioLatency = p->pio_latency * p->payload_bus->clockRatio; 134 135 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 136 p->payload_bus, 137 p->payload_bus, 1); 138 } 139 140 141 intrDelay = US2Ticks(p->intr_delay); 142 dmaReadDelay = p->dma_read_delay; 143 dmaWriteDelay = p->dma_write_delay; 144 dmaReadFactor = p->dma_read_factor; 145 dmaWriteFactor = p->dma_write_factor; 146 147 regsReset(); 148 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN); 149} 150 151NSGigE::~NSGigE() 152{} 153 154void 155NSGigE::regStats() 156{ 157 txBytes 158 .name(name() + ".txBytes") 159 .desc("Bytes Transmitted") 160 .prereq(txBytes) 161 ; 162 163 rxBytes 164 .name(name() + ".rxBytes") 165 .desc("Bytes Received") 166 .prereq(rxBytes) 167 ; 168 169 txPackets 170 .name(name() + ".txPackets") 171 .desc("Number of Packets Transmitted") 172 .prereq(txBytes) 173 ; 174 175 rxPackets 176 .name(name() + ".rxPackets") 177 .desc("Number of Packets Received") 178 .prereq(rxBytes) 179 ; 180 181 txIpChecksums 182 .name(name() + ".txIpChecksums") 183 .desc("Number of tx IP Checksums done by device") 184 .precision(0) 185 .prereq(txBytes) 186 ; 187 188 rxIpChecksums 189 .name(name() + ".rxIpChecksums") 190 .desc("Number of rx IP Checksums done by device") 191 .precision(0) 192 .prereq(rxBytes) 193 ; 194 195 txTcpChecksums 196 .name(name() + ".txTcpChecksums") 197 .desc("Number of tx TCP Checksums done by device") 198 .precision(0) 199 .prereq(txBytes) 200 ; 201 202 rxTcpChecksums 203 .name(name() + ".rxTcpChecksums") 204 .desc("Number of rx TCP Checksums done by device") 205 .precision(0) 206 .prereq(rxBytes) 207 ; 208 209 txUdpChecksums 210 .name(name() + ".txUdpChecksums") 211 .desc("Number of tx UDP Checksums done by device") 212 .precision(0) 213 .prereq(txBytes) 214 ; 215 216 rxUdpChecksums 217 .name(name() + ".rxUdpChecksums") 218 .desc("Number of rx UDP Checksums done by device") 219 .precision(0) 220 .prereq(rxBytes) 221 ; 222 223 descDmaReads 224 .name(name() + ".descDMAReads") 225 .desc("Number of descriptors the device read w/ DMA") 226 .precision(0) 227 ; 228 229 descDmaWrites 230 .name(name() + ".descDMAWrites") 231 .desc("Number of descriptors the device wrote w/ DMA") 232 .precision(0) 233 ; 234 235 descDmaRdBytes 236 .name(name() + ".descDmaReadBytes") 237 .desc("number of descriptor bytes read w/ DMA") 238 .precision(0) 239 ; 240 241 descDmaWrBytes 242 .name(name() + ".descDmaWriteBytes") 243 .desc("number of descriptor bytes write w/ DMA") 244 .precision(0) 245 ; 246 247 txBandwidth 248 .name(name() + ".txBandwidth") 249 .desc("Transmit Bandwidth (bits/s)") 250 .precision(0) 251 .prereq(txBytes) 252 ; 253 254 rxBandwidth 255 .name(name() + ".rxBandwidth") 256 .desc("Receive Bandwidth (bits/s)") 257 .precision(0) 258 .prereq(rxBytes) 259 ; 260 261 totBandwidth 262 .name(name() + ".totBandwidth") 263 .desc("Total Bandwidth (bits/s)") 264 .precision(0) 265 .prereq(totBytes) 266 ; 267 268 totPackets 269 .name(name() + ".totPackets") 270 .desc("Total Packets") 271 .precision(0) 272 .prereq(totBytes) 273 ; 274 275 totBytes 276 .name(name() + ".totBytes") 277 .desc("Total Bytes") 278 .precision(0) 279 .prereq(totBytes) 280 ; 281 282 totPacketRate 283 .name(name() + ".totPPS") 284 .desc("Total Tranmission Rate (packets/s)") 285 .precision(0) 286 .prereq(totBytes) 287 ; 288 289 txPacketRate 290 .name(name() + ".txPPS") 291 .desc("Packet Tranmission Rate (packets/s)") 292 .precision(0) 293 .prereq(txBytes) 294 ; 295 296 rxPacketRate 297 .name(name() + ".rxPPS") 298 .desc("Packet Reception Rate (packets/s)") 299 .precision(0) 300 .prereq(rxBytes) 301 ; 302 303 postedSwi 304 .name(name() + ".postedSwi") 305 .desc("number of software interrupts posted to CPU") 306 .precision(0) 307 ; 308 309 totalSwi 310 .name(name() + ".totalSwi") 311 .desc("number of total Swi written to ISR") 312 .precision(0) 313 ; 314 315 coalescedSwi 316 .name(name() + ".coalescedSwi") 317 .desc("average number of Swi's coalesced into each post") 318 .precision(0) 319 ; 320 321 postedRxIdle 322 .name(name() + ".postedRxIdle") 323 .desc("number of rxIdle interrupts posted to CPU") 324 .precision(0) 325 ; 326 327 totalRxIdle 328 .name(name() + ".totalRxIdle") 329 .desc("number of total RxIdle written to ISR") 330 .precision(0) 331 ; 332 333 coalescedRxIdle 334 .name(name() + ".coalescedRxIdle") 335 .desc("average number of RxIdle's coalesced into each post") 336 .precision(0) 337 ; 338 339 postedRxOk 340 .name(name() + ".postedRxOk") 341 .desc("number of RxOk interrupts posted to CPU") 342 .precision(0) 343 ; 344 345 totalRxOk 346 .name(name() + ".totalRxOk") 347 .desc("number of total RxOk written to ISR") 348 .precision(0) 349 ; 350 351 coalescedRxOk 352 .name(name() + ".coalescedRxOk") 353 .desc("average number of RxOk's coalesced into each post") 354 .precision(0) 355 ; 356 357 postedRxDesc 358 .name(name() + ".postedRxDesc") 359 .desc("number of RxDesc interrupts posted to CPU") 360 .precision(0) 361 ; 362 363 totalRxDesc 364 .name(name() + ".totalRxDesc") 365 .desc("number of total RxDesc written to ISR") 366 .precision(0) 367 ; 368 369 coalescedRxDesc 370 .name(name() + ".coalescedRxDesc") 371 .desc("average number of RxDesc's coalesced into each post") 372 .precision(0) 373 ; 374 375 postedTxOk 376 .name(name() + ".postedTxOk") 377 .desc("number of TxOk interrupts posted to CPU") 378 .precision(0) 379 ; 380 381 totalTxOk 382 .name(name() + ".totalTxOk") 383 .desc("number of total TxOk written to ISR") 384 .precision(0) 385 ; 386 387 coalescedTxOk 388 .name(name() + ".coalescedTxOk") 389 .desc("average number of TxOk's coalesced into each post") 390 .precision(0) 391 ; 392 393 postedTxIdle 394 .name(name() + ".postedTxIdle") 395 .desc("number of TxIdle interrupts posted to CPU") 396 .precision(0) 397 ; 398 399 totalTxIdle 400 .name(name() + ".totalTxIdle") 401 .desc("number of total TxIdle written to ISR") 402 .precision(0) 403 ; 404 405 coalescedTxIdle 406 .name(name() + ".coalescedTxIdle") 407 .desc("average number of TxIdle's coalesced into each post") 408 .precision(0) 409 ; 410 411 postedTxDesc 412 .name(name() + ".postedTxDesc") 413 .desc("number of TxDesc interrupts posted to CPU") 414 .precision(0) 415 ; 416 417 totalTxDesc 418 .name(name() + ".totalTxDesc") 419 .desc("number of total TxDesc written to ISR") 420 .precision(0) 421 ; 422 423 coalescedTxDesc 424 .name(name() + ".coalescedTxDesc") 425 .desc("average number of TxDesc's coalesced into each post") 426 .precision(0) 427 ; 428 429 postedRxOrn 430 .name(name() + ".postedRxOrn") 431 .desc("number of RxOrn posted to CPU") 432 .precision(0) 433 ; 434 435 totalRxOrn 436 .name(name() + ".totalRxOrn") 437 .desc("number of total RxOrn written to ISR") 438 .precision(0) 439 ; 440 441 coalescedRxOrn 442 .name(name() + ".coalescedRxOrn") 443 .desc("average number of RxOrn's coalesced into each post") 444 .precision(0) 445 ; 446 447 coalescedTotal 448 .name(name() + ".coalescedTotal") 449 .desc("average number of interrupts coalesced into each post") 450 .precision(0) 451 ; 452 453 postedInterrupts 454 .name(name() + ".postedInterrupts") 455 .desc("number of posts to CPU") 456 .precision(0) 457 ; 458 459 droppedPackets 460 .name(name() + ".droppedPackets") 461 .desc("number of packets dropped") 462 .precision(0) 463 ; 464 465 coalescedSwi = totalSwi / postedInterrupts; 466 coalescedRxIdle = totalRxIdle / postedInterrupts; 467 coalescedRxOk = totalRxOk / postedInterrupts; 468 coalescedRxDesc = totalRxDesc / postedInterrupts; 469 coalescedTxOk = totalTxOk / postedInterrupts; 470 coalescedTxIdle = totalTxIdle / postedInterrupts; 471 coalescedTxDesc = totalTxDesc / postedInterrupts; 472 coalescedRxOrn = totalRxOrn / postedInterrupts; 473 474 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + totalTxOk 475 + totalTxIdle + totalTxDesc + totalRxOrn) / postedInterrupts; 476 477 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 478 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 479 totBandwidth = txBandwidth + rxBandwidth; 480 totBytes = txBytes + rxBytes; 481 totPackets = txPackets + rxPackets; 482 483 txPacketRate = txPackets / simSeconds; 484 rxPacketRate = rxPackets / simSeconds; 485} 486 487/** 488 * This is to read the PCI general configuration registers 489 */ 490void 491NSGigE::ReadConfig(int offset, int size, uint8_t *data) 492{ 493 if (offset < PCI_DEVICE_SPECIFIC) 494 PciDev::ReadConfig(offset, size, data); 495 else 496 panic("Device specific PCI config space not implemented!\n"); 497} 498 499/** 500 * This is to write to the PCI general configuration registers 501 */ 502void 503NSGigE::WriteConfig(int offset, int size, uint32_t data) 504{ 505 if (offset < PCI_DEVICE_SPECIFIC) 506 PciDev::WriteConfig(offset, size, data); 507 else 508 panic("Device specific PCI config space not implemented!\n"); 509 510 // Need to catch writes to BARs to update the PIO interface 511 switch (offset) { 512 // seems to work fine without all these PCI settings, but i 513 // put in the IO to double check, an assertion will fail if we 514 // need to properly implement it 515 case PCI_COMMAND: 516 if (config.data[offset] & PCI_CMD_IOSE) 517 ioEnable = true; 518 else 519 ioEnable = false; 520 521#if 0 522 if (config.data[offset] & PCI_CMD_BME) { 523 bmEnabled = true; 524 } 525 else { 526 bmEnabled = false; 527 } 528 529 if (config.data[offset] & PCI_CMD_MSE) { 530 memEnable = true; 531 } 532 else { 533 memEnable = false; 534 } 535#endif 536 break; 537 538 case PCI0_BASE_ADDR0: 539 if (BARAddrs[0] != 0) { 540 if (pioInterface) 541 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 542 543 BARAddrs[0] &= EV5::PAddrUncachedMask; 544 } 545 break; 546 case PCI0_BASE_ADDR1: 547 if (BARAddrs[1] != 0) { 548 if (pioInterface) 549 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 550 551 BARAddrs[1] &= EV5::PAddrUncachedMask; 552 } 553 break; 554 } 555} 556 557/** 558 * This reads the device registers, which are detailed in the NS83820 559 * spec sheet 560 */ 561Fault 562NSGigE::read(MemReqPtr &req, uint8_t *data) 563{ 564 assert(ioEnable); 565 566 //The mask is to give you only the offset into the device register file 567 Addr daddr = req->paddr & 0xfff; 568 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n", 569 daddr, req->paddr, req->vaddr, req->size); 570 571 572 // there are some reserved registers, you can see ns_gige_reg.h and 573 // the spec sheet for details 574 if (daddr > LAST && daddr <= RESERVED) { 575 panic("Accessing reserved register"); 576 } else if (daddr > RESERVED && daddr <= 0x3FC) { 577 ReadConfig(daddr & 0xff, req->size, data); 578 return No_Fault; 579 } else if (daddr >= MIB_START && daddr <= MIB_END) { 580 // don't implement all the MIB's. hopefully the kernel 581 // doesn't actually DEPEND upon their values 582 // MIB are just hardware stats keepers 583 uint32_t ® = *(uint32_t *) data; 584 reg = 0; 585 return No_Fault; 586 } else if (daddr > 0x3FC) 587 panic("Something is messed up!\n"); 588 589 switch (req->size) { 590 case sizeof(uint32_t): 591 { 592 uint32_t ® = *(uint32_t *)data; 593 594 switch (daddr) { 595 case CR: 596 reg = regs.command; 597 //these are supposed to be cleared on a read 598 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 599 break; 600 601 case CFG: 602 reg = regs.config; 603 break; 604 605 case MEAR: 606 reg = regs.mear; 607 break; 608 609 case PTSCR: 610 reg = regs.ptscr; 611 break; 612 613 case ISR: 614 reg = regs.isr; 615 devIntrClear(ISR_ALL); 616 break; 617 618 case IMR: 619 reg = regs.imr; 620 break; 621 622 case IER: 623 reg = regs.ier; 624 break; 625 626 case IHR: 627 reg = regs.ihr; 628 break; 629 630 case TXDP: 631 reg = regs.txdp; 632 break; 633 634 case TXDP_HI: 635 reg = regs.txdp_hi; 636 break; 637 638 case TXCFG: 639 reg = regs.txcfg; 640 break; 641 642 case GPIOR: 643 reg = regs.gpior; 644 break; 645 646 case RXDP: 647 reg = regs.rxdp; 648 break; 649 650 case RXDP_HI: 651 reg = regs.rxdp_hi; 652 break; 653 654 case RXCFG: 655 reg = regs.rxcfg; 656 break; 657 658 case PQCR: 659 reg = regs.pqcr; 660 break; 661 662 case WCSR: 663 reg = regs.wcsr; 664 break; 665 666 case PCR: 667 reg = regs.pcr; 668 break; 669 670 // see the spec sheet for how RFCR and RFDR work 671 // basically, you write to RFCR to tell the machine 672 // what you want to do next, then you act upon RFDR, 673 // and the device will be prepared b/c of what you 674 // wrote to RFCR 675 case RFCR: 676 reg = regs.rfcr; 677 break; 678 679 case RFDR: 680 switch (regs.rfcr & RFCR_RFADDR) { 681 case 0x000: 682 reg = rom.perfectMatch[1]; 683 reg = reg << 8; 684 reg += rom.perfectMatch[0]; 685 break; 686 case 0x002: 687 reg = rom.perfectMatch[3] << 8; 688 reg += rom.perfectMatch[2]; 689 break; 690 case 0x004: 691 reg = rom.perfectMatch[5] << 8; 692 reg += rom.perfectMatch[4]; 693 break; 694 default: 695 panic("reading RFDR for something other than PMATCH!\n"); 696 // didn't implement other RFDR functionality b/c 697 // driver didn't use it 698 } 699 break; 700 701 case SRR: 702 reg = regs.srr; 703 break; 704 705 case MIBC: 706 reg = regs.mibc; 707 reg &= ~(MIBC_MIBS | MIBC_ACLR); 708 break; 709 710 case VRCR: 711 reg = regs.vrcr; 712 break; 713 714 case VTCR: 715 reg = regs.vtcr; 716 break; 717 718 case VDR: 719 reg = regs.vdr; 720 break; 721 722 case CCSR: 723 reg = regs.ccsr; 724 break; 725 726 case TBICR: 727 reg = regs.tbicr; 728 break; 729 730 case TBISR: 731 reg = regs.tbisr; 732 break; 733 734 case TANAR: 735 reg = regs.tanar; 736 break; 737 738 case TANLPAR: 739 reg = regs.tanlpar; 740 break; 741 742 case TANER: 743 reg = regs.taner; 744 break; 745 746 case TESR: 747 reg = regs.tesr; 748 break; 749 750 default: 751 panic("reading unimplemented register: addr=%#x", daddr); 752 } 753 754 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 755 daddr, reg, reg); 756 } 757 break; 758 759 default: 760 panic("accessing register with invalid size: addr=%#x, size=%d", 761 daddr, req->size); 762 } 763 764 return No_Fault; 765} 766 767Fault 768NSGigE::write(MemReqPtr &req, const uint8_t *data) 769{ 770 assert(ioEnable); 771 772 Addr daddr = req->paddr & 0xfff; 773 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n", 774 daddr, req->paddr, req->vaddr, req->size); 775 776 if (daddr > LAST && daddr <= RESERVED) { 777 panic("Accessing reserved register"); 778 } else if (daddr > RESERVED && daddr <= 0x3FC) { 779 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data); 780 return No_Fault; 781 } else if (daddr > 0x3FC) 782 panic("Something is messed up!\n"); 783 784 if (req->size == sizeof(uint32_t)) { 785 uint32_t reg = *(uint32_t *)data; 786 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 787 788 switch (daddr) { 789 case CR: 790 regs.command = reg; 791 if (reg & CR_TXD) { 792 txEnable = false; 793 } else if (reg & CR_TXE) { 794 txEnable = true; 795 796 // the kernel is enabling the transmit machine 797 if (txState == txIdle) 798 txKick(); 799 } 800 801 if (reg & CR_RXD) { 802 rxEnable = false; 803 } else if (reg & CR_RXE) { 804 rxEnable = true; 805 806 if (rxState == rxIdle) 807 rxKick(); 808 } 809 810 if (reg & CR_TXR) 811 txReset(); 812 813 if (reg & CR_RXR) 814 rxReset(); 815 816 if (reg & CR_SWI) 817 devIntrPost(ISR_SWI); 818 819 if (reg & CR_RST) { 820 txReset(); 821 rxReset(); 822 823 regsReset(); 824 } 825 break; 826 827 case CFG: 828 if (reg & CFG_LNKSTS || 829 reg & CFG_SPDSTS || 830 reg & CFG_DUPSTS || 831 reg & CFG_RESERVED || 832 reg & CFG_T64ADDR || 833 reg & CFG_PCI64_DET) 834 panic("writing to read-only or reserved CFG bits!\n"); 835 836 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | 837 CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET); 838 839// all these #if 0's are because i don't THINK the kernel needs to 840// have these implemented. if there is a problem relating to one of 841// these, you may need to add functionality in. 842#if 0 843 if (reg & CFG_TBI_EN) ; 844 if (reg & CFG_MODE_1000) ; 845#endif 846 847 if (reg & CFG_AUTO_1000) 848 panic("CFG_AUTO_1000 not implemented!\n"); 849 850#if 0 851 if (reg & CFG_PINT_DUPSTS || 852 reg & CFG_PINT_LNKSTS || 853 reg & CFG_PINT_SPDSTS) 854 ; 855 856 if (reg & CFG_TMRTEST) ; 857 if (reg & CFG_MRM_DIS) ; 858 if (reg & CFG_MWI_DIS) ; 859 860 if (reg & CFG_T64ADDR) 861 panic("CFG_T64ADDR is read only register!\n"); 862 863 if (reg & CFG_PCI64_DET) 864 panic("CFG_PCI64_DET is read only register!\n"); 865 866 if (reg & CFG_DATA64_EN) ; 867 if (reg & CFG_M64ADDR) ; 868 if (reg & CFG_PHY_RST) ; 869 if (reg & CFG_PHY_DIS) ; 870#endif 871 872 if (reg & CFG_EXTSTS_EN) 873 extstsEnable = true; 874 else 875 extstsEnable = false; 876 877#if 0 878 if (reg & CFG_REQALG) ; 879 if (reg & CFG_SB) ; 880 if (reg & CFG_POW) ; 881 if (reg & CFG_EXD) ; 882 if (reg & CFG_PESEL) ; 883 if (reg & CFG_BROM_DIS) ; 884 if (reg & CFG_EXT_125) ; 885 if (reg & CFG_BEM) ; 886#endif 887 break; 888 889 case MEAR: 890 regs.mear = reg; 891 // since phy is completely faked, MEAR_MD* don't matter 892 // and since the driver never uses MEAR_EE*, they don't 893 // matter 894#if 0 895 if (reg & MEAR_EEDI) ; 896 if (reg & MEAR_EEDO) ; // this one is read only 897 if (reg & MEAR_EECLK) ; 898 if (reg & MEAR_EESEL) ; 899 if (reg & MEAR_MDIO) ; 900 if (reg & MEAR_MDDIR) ; 901 if (reg & MEAR_MDC) ; 902#endif 903 break; 904 905 case PTSCR: 906 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 907 // these control BISTs for various parts of chip - we 908 // don't care or do just fake that the BIST is done 909 if (reg & PTSCR_RBIST_EN) 910 regs.ptscr |= PTSCR_RBIST_DONE; 911 if (reg & PTSCR_EEBIST_EN) 912 regs.ptscr &= ~PTSCR_EEBIST_EN; 913 if (reg & PTSCR_EELOAD_EN) 914 regs.ptscr &= ~PTSCR_EELOAD_EN; 915 break; 916 917 case ISR: /* writing to the ISR has no effect */ 918 panic("ISR is a read only register!\n"); 919 920 case IMR: 921 regs.imr = reg; 922 devIntrChangeMask(); 923 break; 924 925 case IER: 926 regs.ier = reg; 927 break; 928 929 case IHR: 930 regs.ihr = reg; 931 /* not going to implement real interrupt holdoff */ 932 break; 933 934 case TXDP: 935 regs.txdp = (reg & 0xFFFFFFFC); 936 assert(txState == txIdle); 937 CTDD = false; 938 break; 939 940 case TXDP_HI: 941 regs.txdp_hi = reg; 942 break; 943 944 case TXCFG: 945 regs.txcfg = reg; 946#if 0 947 if (reg & TXCFG_CSI) ; 948 if (reg & TXCFG_HBI) ; 949 if (reg & TXCFG_MLB) ; 950 if (reg & TXCFG_ATP) ; 951 if (reg & TXCFG_ECRETRY) { 952 /* 953 * this could easily be implemented, but considering 954 * the network is just a fake pipe, wouldn't make 955 * sense to do this 956 */ 957 } 958 959 if (reg & TXCFG_BRST_DIS) ; 960#endif 961 962#if 0 963 /* we handle our own DMA, ignore the kernel's exhortations */ 964 if (reg & TXCFG_MXDMA) ; 965#endif 966 967 // also, we currently don't care about fill/drain 968 // thresholds though this may change in the future with 969 // more realistic networks or a driver which changes it 970 // according to feedback 971 972 break; 973 974 case GPIOR: 975 regs.gpior = reg; 976 /* these just control general purpose i/o pins, don't matter */ 977 break; 978 979 case RXDP: 980 regs.rxdp = reg; 981 CRDD = false; 982 break; 983 984 case RXDP_HI: 985 regs.rxdp_hi = reg; 986 break; 987 988 case RXCFG: 989 regs.rxcfg = reg; 990#if 0 991 if (reg & RXCFG_AEP) ; 992 if (reg & RXCFG_ARP) ; 993 if (reg & RXCFG_STRIPCRC) ; 994 if (reg & RXCFG_RX_RD) ; 995 if (reg & RXCFG_ALP) ; 996 if (reg & RXCFG_AIRL) ; 997 998 /* we handle our own DMA, ignore what kernel says about it */ 999 if (reg & RXCFG_MXDMA) ; 1000 1001 //also, we currently don't care about fill/drain thresholds 1002 //though this may change in the future with more realistic 1003 //networks or a driver which changes it according to feedback 1004 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ; 1005#endif 1006 break; 1007 1008 case PQCR: 1009 /* there is no priority queueing used in the linux 2.6 driver */ 1010 regs.pqcr = reg; 1011 break; 1012 1013 case WCSR: 1014 /* not going to implement wake on LAN */ 1015 regs.wcsr = reg; 1016 break; 1017 1018 case PCR: 1019 /* not going to implement pause control */ 1020 regs.pcr = reg; 1021 break; 1022 1023 case RFCR: 1024 regs.rfcr = reg; 1025 1026 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 1027 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 1028 acceptMulticast = (reg & RFCR_AAM) ? true : false; 1029 acceptUnicast = (reg & RFCR_AAU) ? true : false; 1030 acceptPerfect = (reg & RFCR_APM) ? true : false; 1031 acceptArp = (reg & RFCR_AARP) ? true : false; 1032 1033#if 0 1034 if (reg & RFCR_APAT) 1035 panic("RFCR_APAT not implemented!\n"); 1036#endif 1037 1038 if (reg & RFCR_MHEN || reg & RFCR_UHEN) 1039 panic("hash filtering not implemented!\n"); 1040 1041 if (reg & RFCR_ULM) 1042 panic("RFCR_ULM not implemented!\n"); 1043 1044 break; 1045 1046 case RFDR: 1047 panic("the driver never writes to RFDR, something is wrong!\n"); 1048 1049 case BRAR: 1050 panic("the driver never uses BRAR, something is wrong!\n"); 1051 1052 case BRDR: 1053 panic("the driver never uses BRDR, something is wrong!\n"); 1054 1055 case SRR: 1056 panic("SRR is read only register!\n"); 1057 1058 case MIBC: 1059 panic("the driver never uses MIBC, something is wrong!\n"); 1060 1061 case VRCR: 1062 regs.vrcr = reg; 1063 break; 1064 1065 case VTCR: 1066 regs.vtcr = reg; 1067 break; 1068 1069 case VDR: 1070 panic("the driver never uses VDR, something is wrong!\n"); 1071 break; 1072 1073 case CCSR: 1074 /* not going to implement clockrun stuff */ 1075 regs.ccsr = reg; 1076 break; 1077 1078 case TBICR: 1079 regs.tbicr = reg; 1080 if (reg & TBICR_MR_LOOPBACK) 1081 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 1082 1083 if (reg & TBICR_MR_AN_ENABLE) { 1084 regs.tanlpar = regs.tanar; 1085 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 1086 } 1087 1088#if 0 1089 if (reg & TBICR_MR_RESTART_AN) ; 1090#endif 1091 1092 break; 1093 1094 case TBISR: 1095 panic("TBISR is read only register!\n"); 1096 1097 case TANAR: 1098 regs.tanar = reg; 1099 if (reg & TANAR_PS2) 1100 panic("this isn't used in driver, something wrong!\n"); 1101 1102 if (reg & TANAR_PS1) 1103 panic("this isn't used in driver, something wrong!\n"); 1104 break; 1105 1106 case TANLPAR: 1107 panic("this should only be written to by the fake phy!\n"); 1108 1109 case TANER: 1110 panic("TANER is read only register!\n"); 1111 1112 case TESR: 1113 regs.tesr = reg; 1114 break; 1115 1116 default: 1117 panic("invalid register access daddr=%#x", daddr); 1118 } 1119 } else { 1120 panic("Invalid Request Size"); 1121 } 1122 1123 return No_Fault; 1124} 1125 1126void 1127NSGigE::devIntrPost(uint32_t interrupts) 1128{ 1129 if (interrupts & ISR_RESERVE) 1130 panic("Cannot set a reserved interrupt"); 1131 1132 if (interrupts & ISR_NOIMPL) 1133 warn("interrupt not implemented %#x\n", interrupts); 1134 1135 interrupts &= ~ISR_NOIMPL; 1136 regs.isr |= interrupts; 1137 1138 if (interrupts & regs.imr) { 1139 if (interrupts & ISR_SWI) { 1140 totalSwi++; 1141 } 1142 if (interrupts & ISR_RXIDLE) { 1143 totalRxIdle++; 1144 } 1145 if (interrupts & ISR_RXOK) { 1146 totalRxOk++; 1147 } 1148 if (interrupts & ISR_RXDESC) { 1149 totalRxDesc++; 1150 } 1151 if (interrupts & ISR_TXOK) { 1152 totalTxOk++; 1153 } 1154 if (interrupts & ISR_TXIDLE) { 1155 totalTxIdle++; 1156 } 1157 if (interrupts & ISR_TXDESC) { 1158 totalTxDesc++; 1159 } 1160 if (interrupts & ISR_RXORN) { 1161 totalRxOrn++; 1162 } 1163 } 1164 1165 DPRINTF(EthernetIntr, 1166 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 1167 interrupts, regs.isr, regs.imr); 1168 1169 if ((regs.isr & regs.imr)) { 1170 Tick when = curTick; 1171 if (!(regs.isr & regs.imr & ISR_NODELAY)) 1172 when += intrDelay; 1173 cpuIntrPost(when); 1174 } 1175} 1176 1177/* writing this interrupt counting stats inside this means that this function 1178 is now limited to being used to clear all interrupts upon the kernel 1179 reading isr and servicing. just telling you in case you were thinking 1180 of expanding use. 1181*/ 1182void 1183NSGigE::devIntrClear(uint32_t interrupts) 1184{ 1185 if (interrupts & ISR_RESERVE) 1186 panic("Cannot clear a reserved interrupt"); 1187 1188 if (regs.isr & regs.imr & ISR_SWI) { 1189 postedSwi++; 1190 } 1191 if (regs.isr & regs.imr & ISR_RXIDLE) { 1192 postedRxIdle++; 1193 } 1194 if (regs.isr & regs.imr & ISR_RXOK) { 1195 postedRxOk++; 1196 } 1197 if (regs.isr & regs.imr & ISR_RXDESC) { 1198 postedRxDesc++; 1199 } 1200 if (regs.isr & regs.imr & ISR_TXOK) { 1201 postedTxOk++; 1202 } 1203 if (regs.isr & regs.imr & ISR_TXIDLE) { 1204 postedTxIdle++; 1205 } 1206 if (regs.isr & regs.imr & ISR_TXDESC) { 1207 postedTxDesc++; 1208 } 1209 if (regs.isr & regs.imr & ISR_RXORN) { 1210 postedRxOrn++; 1211 } 1212 1213 if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC | 1214 ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) ) 1215 postedInterrupts++; 1216 1217 interrupts &= ~ISR_NOIMPL; 1218 regs.isr &= ~interrupts; 1219 1220 DPRINTF(EthernetIntr, 1221 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 1222 interrupts, regs.isr, regs.imr); 1223 1224 if (!(regs.isr & regs.imr)) 1225 cpuIntrClear(); 1226} 1227 1228void 1229NSGigE::devIntrChangeMask() 1230{ 1231 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 1232 regs.isr, regs.imr, regs.isr & regs.imr); 1233 1234 if (regs.isr & regs.imr) 1235 cpuIntrPost(curTick); 1236 else 1237 cpuIntrClear(); 1238} 1239 1240void 1241NSGigE::cpuIntrPost(Tick when) 1242{ 1243 // If the interrupt you want to post is later than an interrupt 1244 // already scheduled, just let it post in the coming one and don't 1245 // schedule another. 1246 // HOWEVER, must be sure that the scheduled intrTick is in the 1247 // future (this was formerly the source of a bug) 1248 /** 1249 * @todo this warning should be removed and the intrTick code should 1250 * be fixed. 1251 */ 1252 assert(when >= curTick); 1253 assert(intrTick >= curTick || intrTick == 0); 1254 if (when > intrTick && intrTick != 0) { 1255 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 1256 intrTick); 1257 return; 1258 } 1259 1260 intrTick = when; 1261 if (intrTick < curTick) { 1262 debug_break(); 1263 intrTick = curTick; 1264 } 1265 1266 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 1267 intrTick); 1268 1269 if (intrEvent) 1270 intrEvent->squash(); 1271 intrEvent = new IntrEvent(this, true); 1272 intrEvent->schedule(intrTick); 1273} 1274 1275void 1276NSGigE::cpuInterrupt() 1277{ 1278 assert(intrTick == curTick); 1279 1280 // Whether or not there's a pending interrupt, we don't care about 1281 // it anymore 1282 intrEvent = 0; 1283 intrTick = 0; 1284 1285 // Don't send an interrupt if there's already one 1286 if (cpuPendingIntr) { 1287 DPRINTF(EthernetIntr, 1288 "would send an interrupt now, but there's already pending\n"); 1289 } else { 1290 // Send interrupt 1291 cpuPendingIntr = true; 1292 1293 DPRINTF(EthernetIntr, "posting interrupt\n"); 1294 intrPost(); 1295 } 1296} 1297 1298void 1299NSGigE::cpuIntrClear() 1300{ 1301 if (!cpuPendingIntr) 1302 return; 1303 1304 if (intrEvent) { 1305 intrEvent->squash(); 1306 intrEvent = 0; 1307 } 1308 1309 intrTick = 0; 1310 1311 cpuPendingIntr = false; 1312 1313 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1314 intrClear(); 1315} 1316 1317bool 1318NSGigE::cpuIntrPending() const 1319{ return cpuPendingIntr; } 1320 1321void 1322NSGigE::txReset() 1323{ 1324 1325 DPRINTF(Ethernet, "transmit reset\n"); 1326 1327 CTDD = false; 1328 txEnable = false;; 1329 txFragPtr = 0; 1330 assert(txDescCnt == 0); 1331 txFifo.clear(); 1332 txState = txIdle; 1333 assert(txDmaState == dmaIdle); 1334} 1335 1336void 1337NSGigE::rxReset() 1338{ 1339 DPRINTF(Ethernet, "receive reset\n"); 1340 1341 CRDD = false; 1342 assert(rxPktBytes == 0); 1343 rxEnable = false; 1344 rxFragPtr = 0; 1345 assert(rxDescCnt == 0); 1346 assert(rxDmaState == dmaIdle); 1347 rxFifo.clear(); 1348 rxState = rxIdle; 1349} 1350 1351void 1352NSGigE::regsReset() 1353{ 1354 memset(®s, 0, sizeof(regs)); 1355 regs.config = CFG_LNKSTS; 1356 regs.mear = MEAR_MDDIR | MEAR_EEDO; 1357 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1358 // fill threshold to 32 bytes 1359 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1360 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1361 regs.mibc = MIBC_FRZ; 1362 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1363 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1364 1365 extstsEnable = false; 1366 acceptBroadcast = false; 1367 acceptMulticast = false; 1368 acceptUnicast = false; 1369 acceptPerfect = false; 1370 acceptArp = false; 1371} 1372 1373void 1374NSGigE::rxDmaReadCopy() 1375{ 1376 assert(rxDmaState == dmaReading); 1377 1378 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen); 1379 rxDmaState = dmaIdle; 1380 1381 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1382 rxDmaAddr, rxDmaLen); 1383 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1384} 1385 1386bool 1387NSGigE::doRxDmaRead() 1388{ 1389 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1390 rxDmaState = dmaReading; 1391 1392 if (dmaInterface && !rxDmaFree) { 1393 if (dmaInterface->busy()) 1394 rxDmaState = dmaReadWaiting; 1395 else 1396 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick, 1397 &rxDmaReadEvent, true); 1398 return true; 1399 } 1400 1401 if (dmaReadDelay == 0 && dmaReadFactor == 0) { 1402 rxDmaReadCopy(); 1403 return false; 1404 } 1405 1406 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1407 Tick start = curTick + dmaReadDelay + factor; 1408 rxDmaReadEvent.schedule(start); 1409 return true; 1410} 1411 1412void 1413NSGigE::rxDmaReadDone() 1414{ 1415 assert(rxDmaState == dmaReading); 1416 rxDmaReadCopy(); 1417 1418 // If the transmit state machine has a pending DMA, let it go first 1419 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1420 txKick(); 1421 1422 rxKick(); 1423} 1424 1425void 1426NSGigE::rxDmaWriteCopy() 1427{ 1428 assert(rxDmaState == dmaWriting); 1429 1430 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen); 1431 rxDmaState = dmaIdle; 1432 1433 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1434 rxDmaAddr, rxDmaLen); 1435 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1436} 1437 1438bool 1439NSGigE::doRxDmaWrite() 1440{ 1441 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1442 rxDmaState = dmaWriting; 1443 1444 if (dmaInterface && !rxDmaFree) { 1445 if (dmaInterface->busy()) 1446 rxDmaState = dmaWriteWaiting; 1447 else 1448 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick, 1449 &rxDmaWriteEvent, true); 1450 return true; 1451 } 1452 1453 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) { 1454 rxDmaWriteCopy(); 1455 return false; 1456 } 1457 1458 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1459 Tick start = curTick + dmaWriteDelay + factor; 1460 rxDmaWriteEvent.schedule(start); 1461 return true; 1462} 1463 1464void 1465NSGigE::rxDmaWriteDone() 1466{ 1467 assert(rxDmaState == dmaWriting); 1468 rxDmaWriteCopy(); 1469 1470 // If the transmit state machine has a pending DMA, let it go first 1471 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1472 txKick(); 1473 1474 rxKick(); 1475} 1476 1477void 1478NSGigE::rxKick() 1479{ 1480 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n", 1481 NsRxStateStrings[rxState], rxFifo.size()); 1482 1483 if (rxKickTick > curTick) { 1484 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1485 rxKickTick); 1486 return; 1487 } 1488 1489 next: 1490 switch(rxDmaState) { 1491 case dmaReadWaiting: 1492 if (doRxDmaRead()) 1493 goto exit; 1494 break; 1495 case dmaWriteWaiting: 1496 if (doRxDmaWrite()) 1497 goto exit; 1498 break; 1499 default: 1500 break; 1501 } 1502 1503 // see state machine from spec for details 1504 // the way this works is, if you finish work on one state and can 1505 // go directly to another, you do that through jumping to the 1506 // label "next". however, if you have intermediate work, like DMA 1507 // so that you can't go to the next state yet, you go to exit and 1508 // exit the loop. however, when the DMA is done it will trigger 1509 // an event and come back to this loop. 1510 switch (rxState) { 1511 case rxIdle: 1512 if (!rxEnable) { 1513 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1514 goto exit; 1515 } 1516 1517 if (CRDD) { 1518 rxState = rxDescRefr; 1519 1520 rxDmaAddr = regs.rxdp & 0x3fffffff; 1521 rxDmaData = &rxDescCache + offsetof(ns_desc, link); 1522 rxDmaLen = sizeof(rxDescCache.link); 1523 rxDmaFree = dmaDescFree; 1524 1525 descDmaReads++; 1526 descDmaRdBytes += rxDmaLen; 1527 1528 if (doRxDmaRead()) 1529 goto exit; 1530 } else { 1531 rxState = rxDescRead; 1532 1533 rxDmaAddr = regs.rxdp & 0x3fffffff; 1534 rxDmaData = &rxDescCache; 1535 rxDmaLen = sizeof(ns_desc); 1536 rxDmaFree = dmaDescFree; 1537 1538 descDmaReads++; 1539 descDmaRdBytes += rxDmaLen; 1540 1541 if (doRxDmaRead()) 1542 goto exit; 1543 } 1544 break; 1545 1546 case rxDescRefr: 1547 if (rxDmaState != dmaIdle) 1548 goto exit; 1549 1550 rxState = rxAdvance; 1551 break; 1552 1553 case rxDescRead: 1554 if (rxDmaState != dmaIdle) 1555 goto exit; 1556 1557 DPRINTF(EthernetDesc, 1558 "rxDescCache: addr=%08x read descriptor\n", 1559 regs.rxdp & 0x3fffffff); 1560 DPRINTF(EthernetDesc, 1561 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1562 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1563 rxDescCache.extsts); 1564 1565 if (rxDescCache.cmdsts & CMDSTS_OWN) { 1566 devIntrPost(ISR_RXIDLE); 1567 rxState = rxIdle; 1568 goto exit; 1569 } else { 1570 rxState = rxFifoBlock; 1571 rxFragPtr = rxDescCache.bufptr; 1572 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK; 1573 } 1574 break; 1575 1576 case rxFifoBlock: 1577 if (!rxPacket) { 1578 /** 1579 * @todo in reality, we should be able to start processing 1580 * the packet as it arrives, and not have to wait for the 1581 * full packet ot be in the receive fifo. 1582 */ 1583 if (rxFifo.empty()) 1584 goto exit; 1585 1586 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1587 1588 // If we don't have a packet, grab a new one from the fifo. 1589 rxPacket = rxFifo.front(); 1590 rxPktBytes = rxPacket->length; 1591 rxPacketBufPtr = rxPacket->data; 1592 1593#if TRACING_ON 1594 if (DTRACE(Ethernet)) { 1595 IpPtr ip(rxPacket); 1596 if (ip) { 1597 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1598 TcpPtr tcp(ip); 1599 if (tcp) { 1600 DPRINTF(Ethernet, 1601 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1602 tcp->sport(), tcp->dport(), tcp->seq(), 1603 tcp->ack()); 1604 } 1605 } 1606 } 1607#endif 1608 1609 // sanity check - i think the driver behaves like this 1610 assert(rxDescCnt >= rxPktBytes); 1611 rxFifo.pop(); 1612 } 1613 1614 1615 // dont' need the && rxDescCnt > 0 if driver sanity check 1616 // above holds 1617 if (rxPktBytes > 0) { 1618 rxState = rxFragWrite; 1619 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1620 // check holds 1621 rxXferLen = rxPktBytes; 1622 1623 rxDmaAddr = rxFragPtr & 0x3fffffff; 1624 rxDmaData = rxPacketBufPtr; 1625 rxDmaLen = rxXferLen; 1626 rxDmaFree = dmaDataFree; 1627 1628 if (doRxDmaWrite()) 1629 goto exit; 1630 1631 } else { 1632 rxState = rxDescWrite; 1633 1634 //if (rxPktBytes == 0) { /* packet is done */ 1635 assert(rxPktBytes == 0); 1636 DPRINTF(EthernetSM, "done with receiving packet\n"); 1637 1638 rxDescCache.cmdsts |= CMDSTS_OWN; 1639 rxDescCache.cmdsts &= ~CMDSTS_MORE; 1640 rxDescCache.cmdsts |= CMDSTS_OK; 1641 rxDescCache.cmdsts &= 0xffff0000; 1642 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1643 1644#if 0 1645 /* 1646 * all the driver uses these are for its own stats keeping 1647 * which we don't care about, aren't necessary for 1648 * functionality and doing this would just slow us down. 1649 * if they end up using this in a later version for 1650 * functional purposes, just undef 1651 */ 1652 if (rxFilterEnable) { 1653 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK; 1654 const EthAddr &dst = rxFifoFront()->dst(); 1655 if (dst->unicast()) 1656 rxDescCache.cmdsts |= CMDSTS_DEST_SELF; 1657 if (dst->multicast()) 1658 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI; 1659 if (dst->broadcast()) 1660 rxDescCache.cmdsts |= CMDSTS_DEST_MASK; 1661 } 1662#endif 1663 1664 IpPtr ip(rxPacket); 1665 if (extstsEnable && ip) { 1666 rxDescCache.extsts |= EXTSTS_IPPKT; 1667 rxIpChecksums++; 1668 if (cksum(ip) != 0) { 1669 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1670 rxDescCache.extsts |= EXTSTS_IPERR; 1671 } 1672 TcpPtr tcp(ip); 1673 UdpPtr udp(ip); 1674 if (tcp) { 1675 rxDescCache.extsts |= EXTSTS_TCPPKT; 1676 rxTcpChecksums++; 1677 if (cksum(tcp) != 0) { 1678 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1679 rxDescCache.extsts |= EXTSTS_TCPERR; 1680 1681 } 1682 } else if (udp) { 1683 rxDescCache.extsts |= EXTSTS_UDPPKT; 1684 rxUdpChecksums++; 1685 if (cksum(udp) != 0) { 1686 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1687 rxDescCache.extsts |= EXTSTS_UDPERR; 1688 } 1689 } 1690 } 1691 rxPacket = 0; 1692 1693 /* 1694 * the driver seems to always receive into desc buffers 1695 * of size 1514, so you never have a pkt that is split 1696 * into multiple descriptors on the receive side, so 1697 * i don't implement that case, hence the assert above. 1698 */ 1699 1700 DPRINTF(EthernetDesc, 1701 "rxDescCache: addr=%08x writeback cmdsts extsts\n", 1702 regs.rxdp & 0x3fffffff); 1703 DPRINTF(EthernetDesc, 1704 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1705 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1706 rxDescCache.extsts); 1707 1708 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1709 rxDmaData = &(rxDescCache.cmdsts); 1710 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts); 1711 rxDmaFree = dmaDescFree; 1712 1713 descDmaWrites++; 1714 descDmaWrBytes += rxDmaLen; 1715 1716 if (doRxDmaWrite()) 1717 goto exit; 1718 } 1719 break; 1720 1721 case rxFragWrite: 1722 if (rxDmaState != dmaIdle) 1723 goto exit; 1724 1725 rxPacketBufPtr += rxXferLen; 1726 rxFragPtr += rxXferLen; 1727 rxPktBytes -= rxXferLen; 1728 1729 rxState = rxFifoBlock; 1730 break; 1731 1732 case rxDescWrite: 1733 if (rxDmaState != dmaIdle) 1734 goto exit; 1735 1736 assert(rxDescCache.cmdsts & CMDSTS_OWN); 1737 1738 assert(rxPacket == 0); 1739 devIntrPost(ISR_RXOK); 1740 1741 if (rxDescCache.cmdsts & CMDSTS_INTR) 1742 devIntrPost(ISR_RXDESC); 1743 1744 if (!rxEnable) { 1745 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1746 rxState = rxIdle; 1747 goto exit; 1748 } else 1749 rxState = rxAdvance; 1750 break; 1751 1752 case rxAdvance: 1753 if (rxDescCache.link == 0) { 1754 devIntrPost(ISR_RXIDLE); 1755 rxState = rxIdle; 1756 CRDD = true; 1757 goto exit; 1758 } else { 1759 rxState = rxDescRead; 1760 regs.rxdp = rxDescCache.link; 1761 CRDD = false; 1762 1763 rxDmaAddr = regs.rxdp & 0x3fffffff; 1764 rxDmaData = &rxDescCache; 1765 rxDmaLen = sizeof(ns_desc); 1766 rxDmaFree = dmaDescFree; 1767 1768 if (doRxDmaRead()) 1769 goto exit; 1770 } 1771 break; 1772 1773 default: 1774 panic("Invalid rxState!"); 1775 } 1776 1777 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1778 NsRxStateStrings[rxState]); 1779 1780 goto next; 1781 1782 exit: 1783 /** 1784 * @todo do we want to schedule a future kick? 1785 */ 1786 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1787 NsRxStateStrings[rxState]); 1788} 1789 1790void 1791NSGigE::transmit() 1792{ 1793 if (txFifo.empty()) { 1794 DPRINTF(Ethernet, "nothing to transmit\n"); 1795 return; 1796 } 1797 1798 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1799 txFifo.size()); 1800 if (interface->sendPacket(txFifo.front())) { 1801#if TRACING_ON 1802 if (DTRACE(Ethernet)) { 1803 IpPtr ip(txFifo.front()); 1804 if (ip) { 1805 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1806 TcpPtr tcp(ip); 1807 if (tcp) { 1808 DPRINTF(Ethernet, 1809 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1810 tcp->sport(), tcp->dport(), tcp->seq(), tcp->ack()); 1811 } 1812 } 1813 } 1814#endif 1815 1816 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1817 txBytes += txFifo.front()->length; 1818 txPackets++; 1819 1820 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1821 txFifo.avail()); 1822 txFifo.pop(); 1823 1824 /* 1825 * normally do a writeback of the descriptor here, and ONLY 1826 * after that is done, send this interrupt. but since our 1827 * stuff never actually fails, just do this interrupt here, 1828 * otherwise the code has to stray from this nice format. 1829 * besides, it's functionally the same. 1830 */ 1831 devIntrPost(ISR_TXOK); 1832 } 1833 1834 if (!txFifo.empty() && !txEvent.scheduled()) { 1835 DPRINTF(Ethernet, "reschedule transmit\n"); 1836 txEvent.schedule(curTick + 1000); 1837 } 1838} 1839 1840void 1841NSGigE::txDmaReadCopy() 1842{ 1843 assert(txDmaState == dmaReading); 1844 1845 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen); 1846 txDmaState = dmaIdle; 1847 1848 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1849 txDmaAddr, txDmaLen); 1850 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1851} 1852 1853bool 1854NSGigE::doTxDmaRead() 1855{ 1856 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1857 txDmaState = dmaReading; 1858 1859 if (dmaInterface && !txDmaFree) { 1860 if (dmaInterface->busy()) 1861 txDmaState = dmaReadWaiting; 1862 else 1863 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick, 1864 &txDmaReadEvent, true); 1865 return true; 1866 } 1867 1868 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) { 1869 txDmaReadCopy(); 1870 return false; 1871 } 1872 1873 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1874 Tick start = curTick + dmaReadDelay + factor; 1875 txDmaReadEvent.schedule(start); 1876 return true; 1877} 1878 1879void 1880NSGigE::txDmaReadDone() 1881{ 1882 assert(txDmaState == dmaReading); 1883 txDmaReadCopy(); 1884 1885 // If the receive state machine has a pending DMA, let it go first 1886 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1887 rxKick(); 1888 1889 txKick(); 1890} 1891 1892void 1893NSGigE::txDmaWriteCopy() 1894{ 1895 assert(txDmaState == dmaWriting); 1896 1897 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen); 1898 txDmaState = dmaIdle; 1899 1900 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1901 txDmaAddr, txDmaLen); 1902 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1903} 1904 1905bool 1906NSGigE::doTxDmaWrite() 1907{ 1908 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1909 txDmaState = dmaWriting; 1910 1911 if (dmaInterface && !txDmaFree) { 1912 if (dmaInterface->busy()) 1913 txDmaState = dmaWriteWaiting; 1914 else 1915 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick, 1916 &txDmaWriteEvent, true); 1917 return true; 1918 } 1919 1920 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) { 1921 txDmaWriteCopy(); 1922 return false; 1923 } 1924 1925 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1926 Tick start = curTick + dmaWriteDelay + factor; 1927 txDmaWriteEvent.schedule(start); 1928 return true; 1929} 1930 1931void 1932NSGigE::txDmaWriteDone() 1933{ 1934 assert(txDmaState == dmaWriting); 1935 txDmaWriteCopy(); 1936 1937 // If the receive state machine has a pending DMA, let it go first 1938 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1939 rxKick(); 1940 1941 txKick(); 1942} 1943 1944void 1945NSGigE::txKick() 1946{ 1947 DPRINTF(EthernetSM, "transmit kick txState=%s\n", 1948 NsTxStateStrings[txState]); 1949 1950 if (txKickTick > curTick) { 1951 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1952 txKickTick); 1953 1954 return; 1955 } 1956 1957 next: 1958 switch(txDmaState) { 1959 case dmaReadWaiting: 1960 if (doTxDmaRead()) 1961 goto exit; 1962 break; 1963 case dmaWriteWaiting: 1964 if (doTxDmaWrite()) 1965 goto exit; 1966 break; 1967 default: 1968 break; 1969 } 1970 1971 switch (txState) { 1972 case txIdle: 1973 if (!txEnable) { 1974 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1975 goto exit; 1976 } 1977 1978 if (CTDD) { 1979 txState = txDescRefr; 1980 1981 txDmaAddr = regs.txdp & 0x3fffffff; 1982 txDmaData = &txDescCache + offsetof(ns_desc, link); 1983 txDmaLen = sizeof(txDescCache.link); 1984 txDmaFree = dmaDescFree; 1985 1986 descDmaReads++; 1987 descDmaRdBytes += txDmaLen; 1988 1989 if (doTxDmaRead()) 1990 goto exit; 1991 1992 } else { 1993 txState = txDescRead; 1994 1995 txDmaAddr = regs.txdp & 0x3fffffff; 1996 txDmaData = &txDescCache; 1997 txDmaLen = sizeof(ns_desc); 1998 txDmaFree = dmaDescFree; 1999 2000 descDmaReads++; 2001 descDmaRdBytes += txDmaLen; 2002 2003 if (doTxDmaRead()) 2004 goto exit; 2005 } 2006 break; 2007 2008 case txDescRefr: 2009 if (txDmaState != dmaIdle) 2010 goto exit; 2011 2012 txState = txAdvance; 2013 break; 2014 2015 case txDescRead: 2016 if (txDmaState != dmaIdle) 2017 goto exit; 2018 2019 DPRINTF(EthernetDesc, 2020 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 2021 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts, 2022 txDescCache.extsts); 2023 2024 if (txDescCache.cmdsts & CMDSTS_OWN) { 2025 txState = txFifoBlock; 2026 txFragPtr = txDescCache.bufptr; 2027 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK; 2028 } else { 2029 devIntrPost(ISR_TXIDLE); 2030 txState = txIdle; 2031 goto exit; 2032 } 2033 break; 2034 2035 case txFifoBlock: 2036 if (!txPacket) { 2037 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 2038 txPacket = new PacketData(16384); 2039 txPacketBufPtr = txPacket->data; 2040 } 2041 2042 if (txDescCnt == 0) { 2043 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 2044 if (txDescCache.cmdsts & CMDSTS_MORE) { 2045 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 2046 txState = txDescWrite; 2047 2048 txDescCache.cmdsts &= ~CMDSTS_OWN; 2049 2050 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2051 txDmaAddr &= 0x3fffffff; 2052 txDmaData = &(txDescCache.cmdsts); 2053 txDmaLen = sizeof(txDescCache.cmdsts); 2054 txDmaFree = dmaDescFree; 2055 2056 if (doTxDmaWrite()) 2057 goto exit; 2058 2059 } else { /* this packet is totally done */ 2060 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 2061 /* deal with the the packet that just finished */ 2062 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 2063 IpPtr ip(txPacket); 2064 if (txDescCache.extsts & EXTSTS_UDPPKT) { 2065 UdpPtr udp(ip); 2066 udp->sum(0); 2067 udp->sum(cksum(udp)); 2068 txUdpChecksums++; 2069 } else if (txDescCache.extsts & EXTSTS_TCPPKT) { 2070 TcpPtr tcp(ip); 2071 tcp->sum(0); 2072 tcp->sum(cksum(tcp)); 2073 txTcpChecksums++; 2074 } 2075 if (txDescCache.extsts & EXTSTS_IPPKT) { 2076 ip->sum(0); 2077 ip->sum(cksum(ip)); 2078 txIpChecksums++; 2079 } 2080 } 2081 2082 txPacket->length = txPacketBufPtr - txPacket->data; 2083 // this is just because the receive can't handle a 2084 // packet bigger want to make sure 2085 assert(txPacket->length <= 1514); 2086#ifndef NDEBUG 2087 bool success = 2088#endif 2089 txFifo.push(txPacket); 2090 assert(success); 2091 2092 /* 2093 * this following section is not tqo spec, but 2094 * functionally shouldn't be any different. normally, 2095 * the chip will wait til the transmit has occurred 2096 * before writing back the descriptor because it has 2097 * to wait to see that it was successfully transmitted 2098 * to decide whether to set CMDSTS_OK or not. 2099 * however, in the simulator since it is always 2100 * successfully transmitted, and writing it exactly to 2101 * spec would complicate the code, we just do it here 2102 */ 2103 2104 txDescCache.cmdsts &= ~CMDSTS_OWN; 2105 txDescCache.cmdsts |= CMDSTS_OK; 2106 2107 DPRINTF(EthernetDesc, 2108 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 2109 txDescCache.cmdsts, txDescCache.extsts); 2110 2111 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2112 txDmaAddr &= 0x3fffffff; 2113 txDmaData = &(txDescCache.cmdsts); 2114 txDmaLen = sizeof(txDescCache.cmdsts) + 2115 sizeof(txDescCache.extsts); 2116 txDmaFree = dmaDescFree; 2117 2118 descDmaWrites++; 2119 descDmaWrBytes += txDmaLen; 2120 2121 transmit(); 2122 txPacket = 0; 2123 2124 if (!txEnable) { 2125 DPRINTF(EthernetSM, "halting TX state machine\n"); 2126 txState = txIdle; 2127 goto exit; 2128 } else 2129 txState = txAdvance; 2130 2131 if (doTxDmaWrite()) 2132 goto exit; 2133 } 2134 } else { 2135 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 2136 if (!txFifo.full()) { 2137 txState = txFragRead; 2138 2139 /* 2140 * The number of bytes transferred is either whatever 2141 * is left in the descriptor (txDescCnt), or if there 2142 * is not enough room in the fifo, just whatever room 2143 * is left in the fifo 2144 */ 2145 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 2146 2147 txDmaAddr = txFragPtr & 0x3fffffff; 2148 txDmaData = txPacketBufPtr; 2149 txDmaLen = txXferLen; 2150 txDmaFree = dmaDataFree; 2151 2152 if (doTxDmaRead()) 2153 goto exit; 2154 } else { 2155 txState = txFifoBlock; 2156 transmit(); 2157 2158 goto exit; 2159 } 2160 2161 } 2162 break; 2163 2164 case txFragRead: 2165 if (txDmaState != dmaIdle) 2166 goto exit; 2167 2168 txPacketBufPtr += txXferLen; 2169 txFragPtr += txXferLen; 2170 txDescCnt -= txXferLen; 2171 txFifo.reserve(txXferLen); 2172 2173 txState = txFifoBlock; 2174 break; 2175 2176 case txDescWrite: 2177 if (txDmaState != dmaIdle) 2178 goto exit; 2179 2180 if (txDescCache.cmdsts & CMDSTS_INTR) 2181 devIntrPost(ISR_TXDESC); 2182 2183 txState = txAdvance; 2184 break; 2185 2186 case txAdvance: 2187 if (txDescCache.link == 0) { 2188 devIntrPost(ISR_TXIDLE); 2189 txState = txIdle; 2190 goto exit; 2191 } else { 2192 txState = txDescRead; 2193 regs.txdp = txDescCache.link; 2194 CTDD = false; 2195 2196 txDmaAddr = txDescCache.link & 0x3fffffff; 2197 txDmaData = &txDescCache; 2198 txDmaLen = sizeof(ns_desc); 2199 txDmaFree = dmaDescFree; 2200 2201 if (doTxDmaRead()) 2202 goto exit; 2203 } 2204 break; 2205 2206 default: 2207 panic("invalid state"); 2208 } 2209 2210 DPRINTF(EthernetSM, "entering next txState=%s\n", 2211 NsTxStateStrings[txState]); 2212 2213 goto next; 2214 2215 exit: 2216 /** 2217 * @todo do we want to schedule a future kick? 2218 */ 2219 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 2220 NsTxStateStrings[txState]); 2221} 2222 2223void 2224NSGigE::transferDone() 2225{ 2226 if (txFifo.empty()) { 2227 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2228 return; 2229 } 2230 2231 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2232 2233 if (txEvent.scheduled()) 2234 txEvent.reschedule(curTick + 1); 2235 else 2236 txEvent.schedule(curTick + 1); 2237} 2238 2239bool 2240NSGigE::rxFilter(const PacketPtr &packet) 2241{ 2242 EthPtr eth = packet; 2243 bool drop = true; 2244 string type; 2245 2246 const EthAddr &dst = eth->dst(); 2247 if (dst.unicast()) { 2248 // If we're accepting all unicast addresses 2249 if (acceptUnicast) 2250 drop = false; 2251 2252 // If we make a perfect match 2253 if (acceptPerfect && dst == rom.perfectMatch) 2254 drop = false; 2255 2256 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2257 drop = false; 2258 2259 } else if (dst.broadcast()) { 2260 // if we're accepting broadcasts 2261 if (acceptBroadcast) 2262 drop = false; 2263 2264 } else if (dst.multicast()) { 2265 // if we're accepting all multicasts 2266 if (acceptMulticast) 2267 drop = false; 2268 2269 } 2270 2271 if (drop) { 2272 DPRINTF(Ethernet, "rxFilter drop\n"); 2273 DDUMP(EthernetData, packet->data, packet->length); 2274 } 2275 2276 return drop; 2277} 2278 2279bool 2280NSGigE::recvPacket(PacketPtr packet) 2281{ 2282 rxBytes += packet->length; 2283 rxPackets++; 2284 2285 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2286 rxFifo.avail()); 2287 2288 if (!rxEnable) { 2289 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2290 debug_break(); 2291 interface->recvDone(); 2292 return true; 2293 } 2294 2295 if (rxFilterEnable && rxFilter(packet)) { 2296 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2297 interface->recvDone(); 2298 return true; 2299 } 2300 2301 if (rxFifo.avail() < packet->length) { 2302#if TRACING_ON 2303 IpPtr ip(packet); 2304 TcpPtr tcp(ip); 2305 if (ip) { 2306 DPRINTF(Ethernet, 2307 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2308 ip->id()); 2309 if (tcp) { 2310 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2311 } 2312 } 2313#endif 2314 droppedPackets++; 2315 devIntrPost(ISR_RXORN); 2316 return false; 2317 } 2318 2319 rxFifo.push(packet); 2320 interface->recvDone(); 2321 2322 rxKick(); 2323 return true; 2324} 2325 2326//===================================================================== 2327// 2328// 2329void 2330NSGigE::serialize(ostream &os) 2331{ 2332 // Serialize the PciDev base class 2333 PciDev::serialize(os); 2334 2335 /* 2336 * Finalize any DMA events now. 2337 */ 2338 if (rxDmaReadEvent.scheduled()) 2339 rxDmaReadCopy(); 2340 if (rxDmaWriteEvent.scheduled()) 2341 rxDmaWriteCopy(); 2342 if (txDmaReadEvent.scheduled()) 2343 txDmaReadCopy(); 2344 if (txDmaWriteEvent.scheduled()) 2345 txDmaWriteCopy(); 2346 2347 /* 2348 * Serialize the device registers 2349 */ 2350 SERIALIZE_SCALAR(regs.command); 2351 SERIALIZE_SCALAR(regs.config); 2352 SERIALIZE_SCALAR(regs.mear); 2353 SERIALIZE_SCALAR(regs.ptscr); 2354 SERIALIZE_SCALAR(regs.isr); 2355 SERIALIZE_SCALAR(regs.imr); 2356 SERIALIZE_SCALAR(regs.ier); 2357 SERIALIZE_SCALAR(regs.ihr); 2358 SERIALIZE_SCALAR(regs.txdp); 2359 SERIALIZE_SCALAR(regs.txdp_hi); 2360 SERIALIZE_SCALAR(regs.txcfg); 2361 SERIALIZE_SCALAR(regs.gpior); 2362 SERIALIZE_SCALAR(regs.rxdp); 2363 SERIALIZE_SCALAR(regs.rxdp_hi); 2364 SERIALIZE_SCALAR(regs.rxcfg); 2365 SERIALIZE_SCALAR(regs.pqcr); 2366 SERIALIZE_SCALAR(regs.wcsr); 2367 SERIALIZE_SCALAR(regs.pcr); 2368 SERIALIZE_SCALAR(regs.rfcr); 2369 SERIALIZE_SCALAR(regs.rfdr); 2370 SERIALIZE_SCALAR(regs.srr); 2371 SERIALIZE_SCALAR(regs.mibc); 2372 SERIALIZE_SCALAR(regs.vrcr); 2373 SERIALIZE_SCALAR(regs.vtcr); 2374 SERIALIZE_SCALAR(regs.vdr); 2375 SERIALIZE_SCALAR(regs.ccsr); 2376 SERIALIZE_SCALAR(regs.tbicr); 2377 SERIALIZE_SCALAR(regs.tbisr); 2378 SERIALIZE_SCALAR(regs.tanar); 2379 SERIALIZE_SCALAR(regs.tanlpar); 2380 SERIALIZE_SCALAR(regs.taner); 2381 SERIALIZE_SCALAR(regs.tesr); 2382 2383 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2384 2385 SERIALIZE_SCALAR(ioEnable); 2386 2387 /* 2388 * Serialize the data Fifos 2389 */ 2390 rxFifo.serialize("rxFifo", os); 2391 txFifo.serialize("txFifo", os); 2392 2393 /* 2394 * Serialize the various helper variables 2395 */ 2396 bool txPacketExists = txPacket; 2397 SERIALIZE_SCALAR(txPacketExists); 2398 if (txPacketExists) { 2399 txPacket->length = txPacketBufPtr - txPacket->data; 2400 txPacket->serialize("txPacket", os); 2401 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2402 SERIALIZE_SCALAR(txPktBufPtr); 2403 } 2404 2405 bool rxPacketExists = rxPacket; 2406 SERIALIZE_SCALAR(rxPacketExists); 2407 if (rxPacketExists) { 2408 rxPacket->serialize("rxPacket", os); 2409 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2410 SERIALIZE_SCALAR(rxPktBufPtr); 2411 } 2412 2413 SERIALIZE_SCALAR(txXferLen); 2414 SERIALIZE_SCALAR(rxXferLen); 2415 2416 /* 2417 * Serialize DescCaches 2418 */ 2419 SERIALIZE_SCALAR(txDescCache.link); 2420 SERIALIZE_SCALAR(txDescCache.bufptr); 2421 SERIALIZE_SCALAR(txDescCache.cmdsts); 2422 SERIALIZE_SCALAR(txDescCache.extsts); 2423 SERIALIZE_SCALAR(rxDescCache.link); 2424 SERIALIZE_SCALAR(rxDescCache.bufptr); 2425 SERIALIZE_SCALAR(rxDescCache.cmdsts); 2426 SERIALIZE_SCALAR(rxDescCache.extsts); 2427 2428 /* 2429 * Serialize tx state machine 2430 */ 2431 int txState = this->txState; 2432 SERIALIZE_SCALAR(txState); 2433 SERIALIZE_SCALAR(txEnable); 2434 SERIALIZE_SCALAR(CTDD); 2435 SERIALIZE_SCALAR(txFragPtr); 2436 SERIALIZE_SCALAR(txDescCnt); 2437 int txDmaState = this->txDmaState; 2438 SERIALIZE_SCALAR(txDmaState); 2439 2440 /* 2441 * Serialize rx state machine 2442 */ 2443 int rxState = this->rxState; 2444 SERIALIZE_SCALAR(rxState); 2445 SERIALIZE_SCALAR(rxEnable); 2446 SERIALIZE_SCALAR(CRDD); 2447 SERIALIZE_SCALAR(rxPktBytes); 2448 SERIALIZE_SCALAR(rxFragPtr); 2449 SERIALIZE_SCALAR(rxDescCnt); 2450 int rxDmaState = this->rxDmaState; 2451 SERIALIZE_SCALAR(rxDmaState); 2452 2453 SERIALIZE_SCALAR(extstsEnable); 2454 2455 /* 2456 * If there's a pending transmit, store the time so we can 2457 * reschedule it later 2458 */ 2459 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2460 SERIALIZE_SCALAR(transmitTick); 2461 2462 /* 2463 * receive address filter settings 2464 */ 2465 SERIALIZE_SCALAR(rxFilterEnable); 2466 SERIALIZE_SCALAR(acceptBroadcast); 2467 SERIALIZE_SCALAR(acceptMulticast); 2468 SERIALIZE_SCALAR(acceptUnicast); 2469 SERIALIZE_SCALAR(acceptPerfect); 2470 SERIALIZE_SCALAR(acceptArp); 2471 2472 /* 2473 * Keep track of pending interrupt status. 2474 */ 2475 SERIALIZE_SCALAR(intrTick); 2476 SERIALIZE_SCALAR(cpuPendingIntr); 2477 Tick intrEventTick = 0; 2478 if (intrEvent) 2479 intrEventTick = intrEvent->when(); 2480 SERIALIZE_SCALAR(intrEventTick); 2481 2482} 2483 2484void 2485NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2486{ 2487 // Unserialize the PciDev base class 2488 PciDev::unserialize(cp, section); 2489 2490 UNSERIALIZE_SCALAR(regs.command); 2491 UNSERIALIZE_SCALAR(regs.config); 2492 UNSERIALIZE_SCALAR(regs.mear); 2493 UNSERIALIZE_SCALAR(regs.ptscr); 2494 UNSERIALIZE_SCALAR(regs.isr); 2495 UNSERIALIZE_SCALAR(regs.imr); 2496 UNSERIALIZE_SCALAR(regs.ier); 2497 UNSERIALIZE_SCALAR(regs.ihr); 2498 UNSERIALIZE_SCALAR(regs.txdp); 2499 UNSERIALIZE_SCALAR(regs.txdp_hi); 2500 UNSERIALIZE_SCALAR(regs.txcfg); 2501 UNSERIALIZE_SCALAR(regs.gpior); 2502 UNSERIALIZE_SCALAR(regs.rxdp); 2503 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2504 UNSERIALIZE_SCALAR(regs.rxcfg); 2505 UNSERIALIZE_SCALAR(regs.pqcr); 2506 UNSERIALIZE_SCALAR(regs.wcsr); 2507 UNSERIALIZE_SCALAR(regs.pcr); 2508 UNSERIALIZE_SCALAR(regs.rfcr); 2509 UNSERIALIZE_SCALAR(regs.rfdr); 2510 UNSERIALIZE_SCALAR(regs.srr); 2511 UNSERIALIZE_SCALAR(regs.mibc); 2512 UNSERIALIZE_SCALAR(regs.vrcr); 2513 UNSERIALIZE_SCALAR(regs.vtcr); 2514 UNSERIALIZE_SCALAR(regs.vdr); 2515 UNSERIALIZE_SCALAR(regs.ccsr); 2516 UNSERIALIZE_SCALAR(regs.tbicr); 2517 UNSERIALIZE_SCALAR(regs.tbisr); 2518 UNSERIALIZE_SCALAR(regs.tanar); 2519 UNSERIALIZE_SCALAR(regs.tanlpar); 2520 UNSERIALIZE_SCALAR(regs.taner); 2521 UNSERIALIZE_SCALAR(regs.tesr); 2522 2523 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2524 2525 UNSERIALIZE_SCALAR(ioEnable); 2526 2527 /* 2528 * unserialize the data fifos 2529 */ 2530 rxFifo.unserialize("rxFifo", cp, section); 2531 txFifo.unserialize("txFifo", cp, section); 2532 2533 /* 2534 * unserialize the various helper variables 2535 */ 2536 bool txPacketExists; 2537 UNSERIALIZE_SCALAR(txPacketExists); 2538 if (txPacketExists) { 2539 txPacket = new PacketData(16384); 2540 txPacket->unserialize("txPacket", cp, section); 2541 uint32_t txPktBufPtr; 2542 UNSERIALIZE_SCALAR(txPktBufPtr); 2543 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2544 } else 2545 txPacket = 0; 2546 2547 bool rxPacketExists; 2548 UNSERIALIZE_SCALAR(rxPacketExists); 2549 rxPacket = 0; 2550 if (rxPacketExists) { 2551 rxPacket = new PacketData(16384); 2552 rxPacket->unserialize("rxPacket", cp, section); 2553 uint32_t rxPktBufPtr; 2554 UNSERIALIZE_SCALAR(rxPktBufPtr); 2555 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2556 } else 2557 rxPacket = 0; 2558 2559 UNSERIALIZE_SCALAR(txXferLen); 2560 UNSERIALIZE_SCALAR(rxXferLen); 2561 2562 /* 2563 * Unserialize DescCaches 2564 */ 2565 UNSERIALIZE_SCALAR(txDescCache.link); 2566 UNSERIALIZE_SCALAR(txDescCache.bufptr); 2567 UNSERIALIZE_SCALAR(txDescCache.cmdsts); 2568 UNSERIALIZE_SCALAR(txDescCache.extsts); 2569 UNSERIALIZE_SCALAR(rxDescCache.link); 2570 UNSERIALIZE_SCALAR(rxDescCache.bufptr); 2571 UNSERIALIZE_SCALAR(rxDescCache.cmdsts); 2572 UNSERIALIZE_SCALAR(rxDescCache.extsts); 2573 2574 /* 2575 * unserialize tx state machine 2576 */ 2577 int txState; 2578 UNSERIALIZE_SCALAR(txState); 2579 this->txState = (TxState) txState; 2580 UNSERIALIZE_SCALAR(txEnable); 2581 UNSERIALIZE_SCALAR(CTDD); 2582 UNSERIALIZE_SCALAR(txFragPtr); 2583 UNSERIALIZE_SCALAR(txDescCnt); 2584 int txDmaState; 2585 UNSERIALIZE_SCALAR(txDmaState); 2586 this->txDmaState = (DmaState) txDmaState; 2587 2588 /* 2589 * unserialize rx state machine 2590 */ 2591 int rxState; 2592 UNSERIALIZE_SCALAR(rxState); 2593 this->rxState = (RxState) rxState; 2594 UNSERIALIZE_SCALAR(rxEnable); 2595 UNSERIALIZE_SCALAR(CRDD); 2596 UNSERIALIZE_SCALAR(rxPktBytes); 2597 UNSERIALIZE_SCALAR(rxFragPtr); 2598 UNSERIALIZE_SCALAR(rxDescCnt); 2599 int rxDmaState; 2600 UNSERIALIZE_SCALAR(rxDmaState); 2601 this->rxDmaState = (DmaState) rxDmaState; 2602 2603 UNSERIALIZE_SCALAR(extstsEnable); 2604 2605 /* 2606 * If there's a pending transmit, reschedule it now 2607 */ 2608 Tick transmitTick; 2609 UNSERIALIZE_SCALAR(transmitTick); 2610 if (transmitTick) 2611 txEvent.schedule(curTick + transmitTick); 2612 2613 /* 2614 * unserialize receive address filter settings 2615 */ 2616 UNSERIALIZE_SCALAR(rxFilterEnable); 2617 UNSERIALIZE_SCALAR(acceptBroadcast); 2618 UNSERIALIZE_SCALAR(acceptMulticast); 2619 UNSERIALIZE_SCALAR(acceptUnicast); 2620 UNSERIALIZE_SCALAR(acceptPerfect); 2621 UNSERIALIZE_SCALAR(acceptArp); 2622 2623 /* 2624 * Keep track of pending interrupt status. 2625 */ 2626 UNSERIALIZE_SCALAR(intrTick); 2627 UNSERIALIZE_SCALAR(cpuPendingIntr); 2628 Tick intrEventTick; 2629 UNSERIALIZE_SCALAR(intrEventTick); 2630 if (intrEventTick) { 2631 intrEvent = new IntrEvent(this, true); 2632 intrEvent->schedule(intrEventTick); 2633 } 2634 2635 /* 2636 * re-add addrRanges to bus bridges 2637 */ 2638 if (pioInterface) { 2639 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 2640 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 2641 } 2642} 2643 2644Tick 2645NSGigE::cacheAccess(MemReqPtr &req) 2646{ 2647 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n", 2648 req->paddr, req->paddr - addr); 2649 return curTick + pioLatency; 2650} 2651 2652BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2653 2654 SimObjectParam<EtherInt *> peer; 2655 SimObjectParam<NSGigE *> device; 2656 2657END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2658 2659BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2660 2661 INIT_PARAM_DFLT(peer, "peer interface", NULL), 2662 INIT_PARAM(device, "Ethernet device of this interface") 2663 2664END_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2665 2666CREATE_SIM_OBJECT(NSGigEInt) 2667{ 2668 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device); 2669 2670 EtherInt *p = (EtherInt *)peer; 2671 if (p) { 2672 dev_int->setPeer(p); 2673 p->setPeer(dev_int); 2674 } 2675 2676 return dev_int; 2677} 2678 2679REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt) 2680 2681 2682BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2683 2684 Param<Addr> addr; 2685 Param<Tick> tx_delay; 2686 Param<Tick> rx_delay; 2687 Param<Tick> intr_delay; 2688 SimObjectParam<MemoryController *> mmu; 2689 SimObjectParam<PhysicalMemory *> physmem; 2690 Param<bool> rx_filter; 2691 Param<string> hardware_address; 2692 SimObjectParam<Bus*> io_bus; 2693 SimObjectParam<Bus*> payload_bus; 2694 SimObjectParam<HierParams *> hier; 2695 Param<Tick> pio_latency; 2696 Param<bool> dma_desc_free; 2697 Param<bool> dma_data_free; 2698 Param<Tick> dma_read_delay; 2699 Param<Tick> dma_write_delay; 2700 Param<Tick> dma_read_factor; 2701 Param<Tick> dma_write_factor; 2702 SimObjectParam<PciConfigAll *> configspace; 2703 SimObjectParam<PciConfigData *> configdata; 2704 SimObjectParam<Platform *> platform; 2705 Param<uint32_t> pci_bus; 2706 Param<uint32_t> pci_dev; 2707 Param<uint32_t> pci_func; 2708 Param<uint32_t> tx_fifo_size; 2709 Param<uint32_t> rx_fifo_size; 2710 2711END_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2712 2713BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE) 2714 2715 INIT_PARAM(addr, "Device Address"), 2716 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000), 2717 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000), 2718 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0), 2719 INIT_PARAM(mmu, "Memory Controller"), 2720 INIT_PARAM(physmem, "Physical Memory"), 2721 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true), 2722 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address", 2723 "00:99:00:00:00:01"), 2724 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL), 2725 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL), 2726 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams), 2727 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 2728 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false), 2729 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false), 2730 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0), 2731 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0), 2732 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0), 2733 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0), 2734 INIT_PARAM(configspace, "PCI Configspace"), 2735 INIT_PARAM(configdata, "PCI Config data"), 2736 INIT_PARAM(platform, "Platform"), 2737 INIT_PARAM(pci_bus, "PCI bus"), 2738 INIT_PARAM(pci_dev, "PCI device number"), 2739 INIT_PARAM(pci_func, "PCI function code"), 2740 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072), 2741 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072) 2742 2743END_INIT_SIM_OBJECT_PARAMS(NSGigE) 2744 2745 2746CREATE_SIM_OBJECT(NSGigE) 2747{ 2748 NSGigE::Params *params = new NSGigE::Params; 2749 2750 params->name = getInstanceName(); 2751 params->mmu = mmu; 2752 params->configSpace = configspace; 2753 params->configData = configdata; 2754 params->plat = platform; 2755 params->busNum = pci_bus; 2756 params->deviceNum = pci_dev; 2757 params->functionNum = pci_func; 2758 2759 params->intr_delay = intr_delay; 2760 params->pmem = physmem; 2761 params->tx_delay = tx_delay; 2762 params->rx_delay = rx_delay; 2763 params->hier = hier; 2764 params->header_bus = io_bus; 2765 params->payload_bus = payload_bus; 2766 params->pio_latency = pio_latency; 2767 params->dma_desc_free = dma_desc_free; 2768 params->dma_data_free = dma_data_free; 2769 params->dma_read_delay = dma_read_delay; 2770 params->dma_write_delay = dma_write_delay; 2771 params->dma_read_factor = dma_read_factor; 2772 params->dma_write_factor = dma_write_factor; 2773 params->rx_filter = rx_filter; 2774 params->eaddr = hardware_address; 2775 params->tx_fifo_size = tx_fifo_size; 2776 params->rx_fifo_size = rx_fifo_size; 2777 return new NSGigE(params); 2778} 2779 2780REGISTER_SIM_OBJECT("NSGigE", NSGigE) 2781