ns_gige.cc revision 1893
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/** @file 30 * Device module for modelling the National Semiconductor 31 * DP83820 ethernet controller. Does not support priority queueing 32 */ 33#include <cstdio> 34#include <deque> 35#include <string> 36 37#include "base/inet.hh" 38#include "cpu/exec_context.hh" 39#include "dev/etherlink.hh" 40#include "dev/ns_gige.hh" 41#include "dev/pciconfigall.hh" 42#include "mem/bus/bus.hh" 43#include "mem/bus/dma_interface.hh" 44#include "mem/bus/pio_interface.hh" 45#include "mem/bus/pio_interface_impl.hh" 46#include "mem/functional/memory_control.hh" 47#include "mem/functional/physical.hh" 48#include "sim/builder.hh" 49#include "sim/debug.hh" 50#include "sim/host.hh" 51#include "sim/stats.hh" 52#include "targetarch/vtophys.hh" 53 54const char *NsRxStateStrings[] = 55{ 56 "rxIdle", 57 "rxDescRefr", 58 "rxDescRead", 59 "rxFifoBlock", 60 "rxFragWrite", 61 "rxDescWrite", 62 "rxAdvance" 63}; 64 65const char *NsTxStateStrings[] = 66{ 67 "txIdle", 68 "txDescRefr", 69 "txDescRead", 70 "txFifoBlock", 71 "txFragRead", 72 "txDescWrite", 73 "txAdvance" 74}; 75 76const char *NsDmaState[] = 77{ 78 "dmaIdle", 79 "dmaReading", 80 "dmaWriting", 81 "dmaReadWaiting", 82 "dmaWriteWaiting" 83}; 84 85using namespace std; 86using namespace Net; 87 88/////////////////////////////////////////////////////////////////////// 89// 90// NSGigE PCI Device 91// 92NSGigE::NSGigE(Params *p) 93 : PciDev(p), ioEnable(false), 94 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 96 txXferLen(0), rxXferLen(0), clock(p->clock), 97 txState(txIdle), txEnable(false), CTDD(false), 98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 99 rxEnable(false), CRDD(false), rxPktBytes(0), 100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 101 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this), 102 txDmaReadEvent(this), txDmaWriteEvent(this), 103 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 104 txDelay(p->tx_delay), rxDelay(p->rx_delay), 105 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this), 106 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false), 107 acceptMulticast(false), acceptUnicast(false), 108 acceptPerfect(false), acceptArp(false), multicastHashEnable(false), 109 physmem(p->pmem), intrTick(0), cpuPendingIntr(false), 110 intrEvent(0), interface(0) 111{ 112 if (p->header_bus) { 113 pioInterface = newPioInterface(name() + ".pio", p->hier, 114 p->header_bus, this, 115 &NSGigE::cacheAccess); 116 117 pioLatency = p->pio_latency * p->header_bus->clockRate; 118 119 if (p->payload_bus) 120 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 121 p->header_bus, 122 p->payload_bus, 1, 123 p->dma_no_allocate); 124 else 125 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 126 p->header_bus, 127 p->header_bus, 1, 128 p->dma_no_allocate); 129 } else if (p->payload_bus) { 130 pioInterface = newPioInterface(name() + ".pio2", p->hier, 131 p->payload_bus, this, 132 &NSGigE::cacheAccess); 133 134 pioLatency = p->pio_latency * p->payload_bus->clockRate; 135 136 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 137 p->payload_bus, 138 p->payload_bus, 1, 139 p->dma_no_allocate); 140 } 141 142 143 intrDelay = p->intr_delay; 144 dmaReadDelay = p->dma_read_delay; 145 dmaWriteDelay = p->dma_write_delay; 146 dmaReadFactor = p->dma_read_factor; 147 dmaWriteFactor = p->dma_write_factor; 148 149 regsReset(); 150 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN); 151} 152 153NSGigE::~NSGigE() 154{} 155 156void 157NSGigE::regStats() 158{ 159 txBytes 160 .name(name() + ".txBytes") 161 .desc("Bytes Transmitted") 162 .prereq(txBytes) 163 ; 164 165 rxBytes 166 .name(name() + ".rxBytes") 167 .desc("Bytes Received") 168 .prereq(rxBytes) 169 ; 170 171 txPackets 172 .name(name() + ".txPackets") 173 .desc("Number of Packets Transmitted") 174 .prereq(txBytes) 175 ; 176 177 rxPackets 178 .name(name() + ".rxPackets") 179 .desc("Number of Packets Received") 180 .prereq(rxBytes) 181 ; 182 183 txIpChecksums 184 .name(name() + ".txIpChecksums") 185 .desc("Number of tx IP Checksums done by device") 186 .precision(0) 187 .prereq(txBytes) 188 ; 189 190 rxIpChecksums 191 .name(name() + ".rxIpChecksums") 192 .desc("Number of rx IP Checksums done by device") 193 .precision(0) 194 .prereq(rxBytes) 195 ; 196 197 txTcpChecksums 198 .name(name() + ".txTcpChecksums") 199 .desc("Number of tx TCP Checksums done by device") 200 .precision(0) 201 .prereq(txBytes) 202 ; 203 204 rxTcpChecksums 205 .name(name() + ".rxTcpChecksums") 206 .desc("Number of rx TCP Checksums done by device") 207 .precision(0) 208 .prereq(rxBytes) 209 ; 210 211 txUdpChecksums 212 .name(name() + ".txUdpChecksums") 213 .desc("Number of tx UDP Checksums done by device") 214 .precision(0) 215 .prereq(txBytes) 216 ; 217 218 rxUdpChecksums 219 .name(name() + ".rxUdpChecksums") 220 .desc("Number of rx UDP Checksums done by device") 221 .precision(0) 222 .prereq(rxBytes) 223 ; 224 225 descDmaReads 226 .name(name() + ".descDMAReads") 227 .desc("Number of descriptors the device read w/ DMA") 228 .precision(0) 229 ; 230 231 descDmaWrites 232 .name(name() + ".descDMAWrites") 233 .desc("Number of descriptors the device wrote w/ DMA") 234 .precision(0) 235 ; 236 237 descDmaRdBytes 238 .name(name() + ".descDmaReadBytes") 239 .desc("number of descriptor bytes read w/ DMA") 240 .precision(0) 241 ; 242 243 descDmaWrBytes 244 .name(name() + ".descDmaWriteBytes") 245 .desc("number of descriptor bytes write w/ DMA") 246 .precision(0) 247 ; 248 249 txBandwidth 250 .name(name() + ".txBandwidth") 251 .desc("Transmit Bandwidth (bits/s)") 252 .precision(0) 253 .prereq(txBytes) 254 ; 255 256 rxBandwidth 257 .name(name() + ".rxBandwidth") 258 .desc("Receive Bandwidth (bits/s)") 259 .precision(0) 260 .prereq(rxBytes) 261 ; 262 263 totBandwidth 264 .name(name() + ".totBandwidth") 265 .desc("Total Bandwidth (bits/s)") 266 .precision(0) 267 .prereq(totBytes) 268 ; 269 270 totPackets 271 .name(name() + ".totPackets") 272 .desc("Total Packets") 273 .precision(0) 274 .prereq(totBytes) 275 ; 276 277 totBytes 278 .name(name() + ".totBytes") 279 .desc("Total Bytes") 280 .precision(0) 281 .prereq(totBytes) 282 ; 283 284 totPacketRate 285 .name(name() + ".totPPS") 286 .desc("Total Tranmission Rate (packets/s)") 287 .precision(0) 288 .prereq(totBytes) 289 ; 290 291 txPacketRate 292 .name(name() + ".txPPS") 293 .desc("Packet Tranmission Rate (packets/s)") 294 .precision(0) 295 .prereq(txBytes) 296 ; 297 298 rxPacketRate 299 .name(name() + ".rxPPS") 300 .desc("Packet Reception Rate (packets/s)") 301 .precision(0) 302 .prereq(rxBytes) 303 ; 304 305 postedSwi 306 .name(name() + ".postedSwi") 307 .desc("number of software interrupts posted to CPU") 308 .precision(0) 309 ; 310 311 totalSwi 312 .name(name() + ".totalSwi") 313 .desc("number of total Swi written to ISR") 314 .precision(0) 315 ; 316 317 coalescedSwi 318 .name(name() + ".coalescedSwi") 319 .desc("average number of Swi's coalesced into each post") 320 .precision(0) 321 ; 322 323 postedRxIdle 324 .name(name() + ".postedRxIdle") 325 .desc("number of rxIdle interrupts posted to CPU") 326 .precision(0) 327 ; 328 329 totalRxIdle 330 .name(name() + ".totalRxIdle") 331 .desc("number of total RxIdle written to ISR") 332 .precision(0) 333 ; 334 335 coalescedRxIdle 336 .name(name() + ".coalescedRxIdle") 337 .desc("average number of RxIdle's coalesced into each post") 338 .precision(0) 339 ; 340 341 postedRxOk 342 .name(name() + ".postedRxOk") 343 .desc("number of RxOk interrupts posted to CPU") 344 .precision(0) 345 ; 346 347 totalRxOk 348 .name(name() + ".totalRxOk") 349 .desc("number of total RxOk written to ISR") 350 .precision(0) 351 ; 352 353 coalescedRxOk 354 .name(name() + ".coalescedRxOk") 355 .desc("average number of RxOk's coalesced into each post") 356 .precision(0) 357 ; 358 359 postedRxDesc 360 .name(name() + ".postedRxDesc") 361 .desc("number of RxDesc interrupts posted to CPU") 362 .precision(0) 363 ; 364 365 totalRxDesc 366 .name(name() + ".totalRxDesc") 367 .desc("number of total RxDesc written to ISR") 368 .precision(0) 369 ; 370 371 coalescedRxDesc 372 .name(name() + ".coalescedRxDesc") 373 .desc("average number of RxDesc's coalesced into each post") 374 .precision(0) 375 ; 376 377 postedTxOk 378 .name(name() + ".postedTxOk") 379 .desc("number of TxOk interrupts posted to CPU") 380 .precision(0) 381 ; 382 383 totalTxOk 384 .name(name() + ".totalTxOk") 385 .desc("number of total TxOk written to ISR") 386 .precision(0) 387 ; 388 389 coalescedTxOk 390 .name(name() + ".coalescedTxOk") 391 .desc("average number of TxOk's coalesced into each post") 392 .precision(0) 393 ; 394 395 postedTxIdle 396 .name(name() + ".postedTxIdle") 397 .desc("number of TxIdle interrupts posted to CPU") 398 .precision(0) 399 ; 400 401 totalTxIdle 402 .name(name() + ".totalTxIdle") 403 .desc("number of total TxIdle written to ISR") 404 .precision(0) 405 ; 406 407 coalescedTxIdle 408 .name(name() + ".coalescedTxIdle") 409 .desc("average number of TxIdle's coalesced into each post") 410 .precision(0) 411 ; 412 413 postedTxDesc 414 .name(name() + ".postedTxDesc") 415 .desc("number of TxDesc interrupts posted to CPU") 416 .precision(0) 417 ; 418 419 totalTxDesc 420 .name(name() + ".totalTxDesc") 421 .desc("number of total TxDesc written to ISR") 422 .precision(0) 423 ; 424 425 coalescedTxDesc 426 .name(name() + ".coalescedTxDesc") 427 .desc("average number of TxDesc's coalesced into each post") 428 .precision(0) 429 ; 430 431 postedRxOrn 432 .name(name() + ".postedRxOrn") 433 .desc("number of RxOrn posted to CPU") 434 .precision(0) 435 ; 436 437 totalRxOrn 438 .name(name() + ".totalRxOrn") 439 .desc("number of total RxOrn written to ISR") 440 .precision(0) 441 ; 442 443 coalescedRxOrn 444 .name(name() + ".coalescedRxOrn") 445 .desc("average number of RxOrn's coalesced into each post") 446 .precision(0) 447 ; 448 449 coalescedTotal 450 .name(name() + ".coalescedTotal") 451 .desc("average number of interrupts coalesced into each post") 452 .precision(0) 453 ; 454 455 postedInterrupts 456 .name(name() + ".postedInterrupts") 457 .desc("number of posts to CPU") 458 .precision(0) 459 ; 460 461 droppedPackets 462 .name(name() + ".droppedPackets") 463 .desc("number of packets dropped") 464 .precision(0) 465 ; 466 467 coalescedSwi = totalSwi / postedInterrupts; 468 coalescedRxIdle = totalRxIdle / postedInterrupts; 469 coalescedRxOk = totalRxOk / postedInterrupts; 470 coalescedRxDesc = totalRxDesc / postedInterrupts; 471 coalescedTxOk = totalTxOk / postedInterrupts; 472 coalescedTxIdle = totalTxIdle / postedInterrupts; 473 coalescedTxDesc = totalTxDesc / postedInterrupts; 474 coalescedRxOrn = totalRxOrn / postedInterrupts; 475 476 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + 477 totalTxOk + totalTxIdle + totalTxDesc + 478 totalRxOrn) / postedInterrupts; 479 480 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 481 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 482 totBandwidth = txBandwidth + rxBandwidth; 483 totBytes = txBytes + rxBytes; 484 totPackets = txPackets + rxPackets; 485 486 txPacketRate = txPackets / simSeconds; 487 rxPacketRate = rxPackets / simSeconds; 488} 489 490/** 491 * This is to read the PCI general configuration registers 492 */ 493void 494NSGigE::readConfig(int offset, int size, uint8_t *data) 495{ 496 if (offset < PCI_DEVICE_SPECIFIC) 497 PciDev::readConfig(offset, size, data); 498 else 499 panic("Device specific PCI config space not implemented!\n"); 500} 501 502/** 503 * This is to write to the PCI general configuration registers 504 */ 505void 506NSGigE::writeConfig(int offset, int size, const uint8_t* data) 507{ 508 if (offset < PCI_DEVICE_SPECIFIC) 509 PciDev::writeConfig(offset, size, data); 510 else 511 panic("Device specific PCI config space not implemented!\n"); 512 513 // Need to catch writes to BARs to update the PIO interface 514 switch (offset) { 515 // seems to work fine without all these PCI settings, but i 516 // put in the IO to double check, an assertion will fail if we 517 // need to properly implement it 518 case PCI_COMMAND: 519 if (config.data[offset] & PCI_CMD_IOSE) 520 ioEnable = true; 521 else 522 ioEnable = false; 523 524#if 0 525 if (config.data[offset] & PCI_CMD_BME) { 526 bmEnabled = true; 527 } 528 else { 529 bmEnabled = false; 530 } 531 532 if (config.data[offset] & PCI_CMD_MSE) { 533 memEnable = true; 534 } 535 else { 536 memEnable = false; 537 } 538#endif 539 break; 540 541 case PCI0_BASE_ADDR0: 542 if (BARAddrs[0] != 0) { 543 if (pioInterface) 544 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 545 546 BARAddrs[0] &= EV5::PAddrUncachedMask; 547 } 548 break; 549 case PCI0_BASE_ADDR1: 550 if (BARAddrs[1] != 0) { 551 if (pioInterface) 552 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 553 554 BARAddrs[1] &= EV5::PAddrUncachedMask; 555 } 556 break; 557 } 558} 559 560/** 561 * This reads the device registers, which are detailed in the NS83820 562 * spec sheet 563 */ 564Fault 565NSGigE::read(MemReqPtr &req, uint8_t *data) 566{ 567 assert(ioEnable); 568 569 //The mask is to give you only the offset into the device register file 570 Addr daddr = req->paddr & 0xfff; 571 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n", 572 daddr, req->paddr, req->vaddr, req->size); 573 574 575 // there are some reserved registers, you can see ns_gige_reg.h and 576 // the spec sheet for details 577 if (daddr > LAST && daddr <= RESERVED) { 578 panic("Accessing reserved register"); 579 } else if (daddr > RESERVED && daddr <= 0x3FC) { 580 readConfig(daddr & 0xff, req->size, data); 581 return No_Fault; 582 } else if (daddr >= MIB_START && daddr <= MIB_END) { 583 // don't implement all the MIB's. hopefully the kernel 584 // doesn't actually DEPEND upon their values 585 // MIB are just hardware stats keepers 586 uint32_t ® = *(uint32_t *) data; 587 reg = 0; 588 return No_Fault; 589 } else if (daddr > 0x3FC) 590 panic("Something is messed up!\n"); 591 592 switch (req->size) { 593 case sizeof(uint32_t): 594 { 595 uint32_t ® = *(uint32_t *)data; 596 uint16_t rfaddr; 597 598 switch (daddr) { 599 case CR: 600 reg = regs.command; 601 //these are supposed to be cleared on a read 602 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 603 break; 604 605 case CFGR: 606 reg = regs.config; 607 break; 608 609 case MEAR: 610 reg = regs.mear; 611 break; 612 613 case PTSCR: 614 reg = regs.ptscr; 615 break; 616 617 case ISR: 618 reg = regs.isr; 619 devIntrClear(ISR_ALL); 620 break; 621 622 case IMR: 623 reg = regs.imr; 624 break; 625 626 case IER: 627 reg = regs.ier; 628 break; 629 630 case IHR: 631 reg = regs.ihr; 632 break; 633 634 case TXDP: 635 reg = regs.txdp; 636 break; 637 638 case TXDP_HI: 639 reg = regs.txdp_hi; 640 break; 641 642 case TX_CFG: 643 reg = regs.txcfg; 644 break; 645 646 case GPIOR: 647 reg = regs.gpior; 648 break; 649 650 case RXDP: 651 reg = regs.rxdp; 652 break; 653 654 case RXDP_HI: 655 reg = regs.rxdp_hi; 656 break; 657 658 case RX_CFG: 659 reg = regs.rxcfg; 660 break; 661 662 case PQCR: 663 reg = regs.pqcr; 664 break; 665 666 case WCSR: 667 reg = regs.wcsr; 668 break; 669 670 case PCR: 671 reg = regs.pcr; 672 break; 673 674 // see the spec sheet for how RFCR and RFDR work 675 // basically, you write to RFCR to tell the machine 676 // what you want to do next, then you act upon RFDR, 677 // and the device will be prepared b/c of what you 678 // wrote to RFCR 679 case RFCR: 680 reg = regs.rfcr; 681 break; 682 683 case RFDR: 684 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 685 switch (rfaddr) { 686 // Read from perfect match ROM octets 687 case 0x000: 688 reg = rom.perfectMatch[1]; 689 reg = reg << 8; 690 reg += rom.perfectMatch[0]; 691 break; 692 case 0x002: 693 reg = rom.perfectMatch[3] << 8; 694 reg += rom.perfectMatch[2]; 695 break; 696 case 0x004: 697 reg = rom.perfectMatch[5] << 8; 698 reg += rom.perfectMatch[4]; 699 break; 700 default: 701 // Read filter hash table 702 if (rfaddr >= FHASH_ADDR && 703 rfaddr < FHASH_ADDR + FHASH_SIZE) { 704 705 // Only word-aligned reads supported 706 if (rfaddr % 2) 707 panic("unaligned read from filter hash table!"); 708 709 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8; 710 reg += rom.filterHash[rfaddr - FHASH_ADDR]; 711 break; 712 } 713 714 panic("reading RFDR for something other than pattern" 715 " matching or hashing! %#x\n", rfaddr); 716 } 717 break; 718 719 case SRR: 720 reg = regs.srr; 721 break; 722 723 case MIBC: 724 reg = regs.mibc; 725 reg &= ~(MIBC_MIBS | MIBC_ACLR); 726 break; 727 728 case VRCR: 729 reg = regs.vrcr; 730 break; 731 732 case VTCR: 733 reg = regs.vtcr; 734 break; 735 736 case VDR: 737 reg = regs.vdr; 738 break; 739 740 case CCSR: 741 reg = regs.ccsr; 742 break; 743 744 case TBICR: 745 reg = regs.tbicr; 746 break; 747 748 case TBISR: 749 reg = regs.tbisr; 750 break; 751 752 case TANAR: 753 reg = regs.tanar; 754 break; 755 756 case TANLPAR: 757 reg = regs.tanlpar; 758 break; 759 760 case TANER: 761 reg = regs.taner; 762 break; 763 764 case TESR: 765 reg = regs.tesr; 766 break; 767 768 case M5REG: 769 reg = params()->m5reg; 770 break; 771 772 default: 773 panic("reading unimplemented register: addr=%#x", daddr); 774 } 775 776 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 777 daddr, reg, reg); 778 } 779 break; 780 781 default: 782 panic("accessing register with invalid size: addr=%#x, size=%d", 783 daddr, req->size); 784 } 785 786 return No_Fault; 787} 788 789Fault 790NSGigE::write(MemReqPtr &req, const uint8_t *data) 791{ 792 assert(ioEnable); 793 794 Addr daddr = req->paddr & 0xfff; 795 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n", 796 daddr, req->paddr, req->vaddr, req->size); 797 798 if (daddr > LAST && daddr <= RESERVED) { 799 panic("Accessing reserved register"); 800 } else if (daddr > RESERVED && daddr <= 0x3FC) { 801 writeConfig(daddr & 0xff, req->size, data); 802 return No_Fault; 803 } else if (daddr > 0x3FC) 804 panic("Something is messed up!\n"); 805 806 if (req->size == sizeof(uint32_t)) { 807 uint32_t reg = *(uint32_t *)data; 808 uint16_t rfaddr; 809 810 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 811 812 switch (daddr) { 813 case CR: 814 regs.command = reg; 815 if (reg & CR_TXD) { 816 txEnable = false; 817 } else if (reg & CR_TXE) { 818 txEnable = true; 819 820 // the kernel is enabling the transmit machine 821 if (txState == txIdle) 822 txKick(); 823 } 824 825 if (reg & CR_RXD) { 826 rxEnable = false; 827 } else if (reg & CR_RXE) { 828 rxEnable = true; 829 830 if (rxState == rxIdle) 831 rxKick(); 832 } 833 834 if (reg & CR_TXR) 835 txReset(); 836 837 if (reg & CR_RXR) 838 rxReset(); 839 840 if (reg & CR_SWI) 841 devIntrPost(ISR_SWI); 842 843 if (reg & CR_RST) { 844 txReset(); 845 rxReset(); 846 847 regsReset(); 848 } 849 break; 850 851 case CFGR: 852 if (reg & CFGR_LNKSTS || 853 reg & CFGR_SPDSTS || 854 reg & CFGR_DUPSTS || 855 reg & CFGR_RESERVED || 856 reg & CFGR_T64ADDR || 857 reg & CFGR_PCI64_DET) 858 859 // First clear all writable bits 860 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 861 CFGR_RESERVED | CFGR_T64ADDR | 862 CFGR_PCI64_DET; 863 // Now set the appropriate writable bits 864 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 865 CFGR_RESERVED | CFGR_T64ADDR | 866 CFGR_PCI64_DET); 867 868// all these #if 0's are because i don't THINK the kernel needs to 869// have these implemented. if there is a problem relating to one of 870// these, you may need to add functionality in. 871#if 0 872 if (reg & CFGR_TBI_EN) ; 873 if (reg & CFGR_MODE_1000) ; 874#endif 875 876 if (reg & CFGR_AUTO_1000) 877 panic("CFGR_AUTO_1000 not implemented!\n"); 878 879#if 0 880 if (reg & CFGR_PINT_DUPSTS || 881 reg & CFGR_PINT_LNKSTS || 882 reg & CFGR_PINT_SPDSTS) 883 ; 884 885 if (reg & CFGR_TMRTEST) ; 886 if (reg & CFGR_MRM_DIS) ; 887 if (reg & CFGR_MWI_DIS) ; 888 889 if (reg & CFGR_T64ADDR) 890 panic("CFGR_T64ADDR is read only register!\n"); 891 892 if (reg & CFGR_PCI64_DET) 893 panic("CFGR_PCI64_DET is read only register!\n"); 894 895 if (reg & CFGR_DATA64_EN) ; 896 if (reg & CFGR_M64ADDR) ; 897 if (reg & CFGR_PHY_RST) ; 898 if (reg & CFGR_PHY_DIS) ; 899#endif 900 901 if (reg & CFGR_EXTSTS_EN) 902 extstsEnable = true; 903 else 904 extstsEnable = false; 905 906#if 0 907 if (reg & CFGR_REQALG) ; 908 if (reg & CFGR_SB) ; 909 if (reg & CFGR_POW) ; 910 if (reg & CFGR_EXD) ; 911 if (reg & CFGR_PESEL) ; 912 if (reg & CFGR_BROM_DIS) ; 913 if (reg & CFGR_EXT_125) ; 914 if (reg & CFGR_BEM) ; 915#endif 916 break; 917 918 case MEAR: 919 // Clear writable bits 920 regs.mear &= MEAR_EEDO; 921 // Set appropriate writable bits 922 regs.mear |= reg & ~MEAR_EEDO; 923 924 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address) 925 // even though it could get it through RFDR 926 if (reg & MEAR_EESEL) { 927 // Rising edge of clock 928 if (reg & MEAR_EECLK && !eepromClk) 929 eepromKick(); 930 } 931 else { 932 eepromState = eepromStart; 933 regs.mear &= ~MEAR_EEDI; 934 } 935 936 eepromClk = reg & MEAR_EECLK; 937 938 // since phy is completely faked, MEAR_MD* don't matter 939#if 0 940 if (reg & MEAR_MDIO) ; 941 if (reg & MEAR_MDDIR) ; 942 if (reg & MEAR_MDC) ; 943#endif 944 break; 945 946 case PTSCR: 947 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 948 // these control BISTs for various parts of chip - we 949 // don't care or do just fake that the BIST is done 950 if (reg & PTSCR_RBIST_EN) 951 regs.ptscr |= PTSCR_RBIST_DONE; 952 if (reg & PTSCR_EEBIST_EN) 953 regs.ptscr &= ~PTSCR_EEBIST_EN; 954 if (reg & PTSCR_EELOAD_EN) 955 regs.ptscr &= ~PTSCR_EELOAD_EN; 956 break; 957 958 case ISR: /* writing to the ISR has no effect */ 959 panic("ISR is a read only register!\n"); 960 961 case IMR: 962 regs.imr = reg; 963 devIntrChangeMask(); 964 break; 965 966 case IER: 967 regs.ier = reg; 968 break; 969 970 case IHR: 971 regs.ihr = reg; 972 /* not going to implement real interrupt holdoff */ 973 break; 974 975 case TXDP: 976 regs.txdp = (reg & 0xFFFFFFFC); 977 assert(txState == txIdle); 978 CTDD = false; 979 break; 980 981 case TXDP_HI: 982 regs.txdp_hi = reg; 983 break; 984 985 case TX_CFG: 986 regs.txcfg = reg; 987#if 0 988 if (reg & TX_CFG_CSI) ; 989 if (reg & TX_CFG_HBI) ; 990 if (reg & TX_CFG_MLB) ; 991 if (reg & TX_CFG_ATP) ; 992 if (reg & TX_CFG_ECRETRY) { 993 /* 994 * this could easily be implemented, but considering 995 * the network is just a fake pipe, wouldn't make 996 * sense to do this 997 */ 998 } 999 1000 if (reg & TX_CFG_BRST_DIS) ; 1001#endif 1002 1003#if 0 1004 /* we handle our own DMA, ignore the kernel's exhortations */ 1005 if (reg & TX_CFG_MXDMA) ; 1006#endif 1007 1008 // also, we currently don't care about fill/drain 1009 // thresholds though this may change in the future with 1010 // more realistic networks or a driver which changes it 1011 // according to feedback 1012 1013 break; 1014 1015 case GPIOR: 1016 // Only write writable bits 1017 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 1018 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN; 1019 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 1020 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN); 1021 /* these just control general purpose i/o pins, don't matter */ 1022 break; 1023 1024 case RXDP: 1025 regs.rxdp = reg; 1026 CRDD = false; 1027 break; 1028 1029 case RXDP_HI: 1030 regs.rxdp_hi = reg; 1031 break; 1032 1033 case RX_CFG: 1034 regs.rxcfg = reg; 1035#if 0 1036 if (reg & RX_CFG_AEP) ; 1037 if (reg & RX_CFG_ARP) ; 1038 if (reg & RX_CFG_STRIPCRC) ; 1039 if (reg & RX_CFG_RX_RD) ; 1040 if (reg & RX_CFG_ALP) ; 1041 if (reg & RX_CFG_AIRL) ; 1042 1043 /* we handle our own DMA, ignore what kernel says about it */ 1044 if (reg & RX_CFG_MXDMA) ; 1045 1046 //also, we currently don't care about fill/drain thresholds 1047 //though this may change in the future with more realistic 1048 //networks or a driver which changes it according to feedback 1049 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ; 1050#endif 1051 break; 1052 1053 case PQCR: 1054 /* there is no priority queueing used in the linux 2.6 driver */ 1055 regs.pqcr = reg; 1056 break; 1057 1058 case WCSR: 1059 /* not going to implement wake on LAN */ 1060 regs.wcsr = reg; 1061 break; 1062 1063 case PCR: 1064 /* not going to implement pause control */ 1065 regs.pcr = reg; 1066 break; 1067 1068 case RFCR: 1069 regs.rfcr = reg; 1070 1071 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 1072 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 1073 acceptMulticast = (reg & RFCR_AAM) ? true : false; 1074 acceptUnicast = (reg & RFCR_AAU) ? true : false; 1075 acceptPerfect = (reg & RFCR_APM) ? true : false; 1076 acceptArp = (reg & RFCR_AARP) ? true : false; 1077 multicastHashEnable = (reg & RFCR_MHEN) ? true : false; 1078 1079#if 0 1080 if (reg & RFCR_APAT) 1081 panic("RFCR_APAT not implemented!\n"); 1082#endif 1083 if (reg & RFCR_UHEN) 1084 panic("Unicast hash filtering not used by drivers!\n"); 1085 1086 if (reg & RFCR_ULM) 1087 panic("RFCR_ULM not implemented!\n"); 1088 1089 break; 1090 1091 case RFDR: 1092 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 1093 switch (rfaddr) { 1094 case 0x000: 1095 rom.perfectMatch[0] = (uint8_t)reg; 1096 rom.perfectMatch[1] = (uint8_t)(reg >> 8); 1097 break; 1098 case 0x002: 1099 rom.perfectMatch[2] = (uint8_t)reg; 1100 rom.perfectMatch[3] = (uint8_t)(reg >> 8); 1101 break; 1102 case 0x004: 1103 rom.perfectMatch[4] = (uint8_t)reg; 1104 rom.perfectMatch[5] = (uint8_t)(reg >> 8); 1105 break; 1106 default: 1107 1108 if (rfaddr >= FHASH_ADDR && 1109 rfaddr < FHASH_ADDR + FHASH_SIZE) { 1110 1111 // Only word-aligned writes supported 1112 if (rfaddr % 2) 1113 panic("unaligned write to filter hash table!"); 1114 1115 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg; 1116 rom.filterHash[rfaddr - FHASH_ADDR + 1] 1117 = (uint8_t)(reg >> 8); 1118 break; 1119 } 1120 panic("writing RFDR for something other than pattern matching\ 1121 or hashing! %#x\n", rfaddr); 1122 } 1123 1124 case BRAR: 1125 regs.brar = reg; 1126 break; 1127 1128 case BRDR: 1129 panic("the driver never uses BRDR, something is wrong!\n"); 1130 1131 case SRR: 1132 panic("SRR is read only register!\n"); 1133 1134 case MIBC: 1135 panic("the driver never uses MIBC, something is wrong!\n"); 1136 1137 case VRCR: 1138 regs.vrcr = reg; 1139 break; 1140 1141 case VTCR: 1142 regs.vtcr = reg; 1143 break; 1144 1145 case VDR: 1146 panic("the driver never uses VDR, something is wrong!\n"); 1147 1148 case CCSR: 1149 /* not going to implement clockrun stuff */ 1150 regs.ccsr = reg; 1151 break; 1152 1153 case TBICR: 1154 regs.tbicr = reg; 1155 if (reg & TBICR_MR_LOOPBACK) 1156 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 1157 1158 if (reg & TBICR_MR_AN_ENABLE) { 1159 regs.tanlpar = regs.tanar; 1160 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 1161 } 1162 1163#if 0 1164 if (reg & TBICR_MR_RESTART_AN) ; 1165#endif 1166 1167 break; 1168 1169 case TBISR: 1170 panic("TBISR is read only register!\n"); 1171 1172 case TANAR: 1173 // Only write the writable bits 1174 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED; 1175 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED); 1176 1177 // Pause capability unimplemented 1178#if 0 1179 if (reg & TANAR_PS2) ; 1180 if (reg & TANAR_PS1) ; 1181#endif 1182 1183 break; 1184 1185 case TANLPAR: 1186 panic("this should only be written to by the fake phy!\n"); 1187 1188 case TANER: 1189 panic("TANER is read only register!\n"); 1190 1191 case TESR: 1192 regs.tesr = reg; 1193 break; 1194 1195 default: 1196 panic("invalid register access daddr=%#x", daddr); 1197 } 1198 } else { 1199 panic("Invalid Request Size"); 1200 } 1201 1202 return No_Fault; 1203} 1204 1205void 1206NSGigE::devIntrPost(uint32_t interrupts) 1207{ 1208 if (interrupts & ISR_RESERVE) 1209 panic("Cannot set a reserved interrupt"); 1210 1211 if (interrupts & ISR_NOIMPL) 1212 warn("interrupt not implemented %#x\n", interrupts); 1213 1214 interrupts &= ISR_IMPL; 1215 regs.isr |= interrupts; 1216 1217 if (interrupts & regs.imr) { 1218 if (interrupts & ISR_SWI) { 1219 totalSwi++; 1220 } 1221 if (interrupts & ISR_RXIDLE) { 1222 totalRxIdle++; 1223 } 1224 if (interrupts & ISR_RXOK) { 1225 totalRxOk++; 1226 } 1227 if (interrupts & ISR_RXDESC) { 1228 totalRxDesc++; 1229 } 1230 if (interrupts & ISR_TXOK) { 1231 totalTxOk++; 1232 } 1233 if (interrupts & ISR_TXIDLE) { 1234 totalTxIdle++; 1235 } 1236 if (interrupts & ISR_TXDESC) { 1237 totalTxDesc++; 1238 } 1239 if (interrupts & ISR_RXORN) { 1240 totalRxOrn++; 1241 } 1242 } 1243 1244 DPRINTF(EthernetIntr, 1245 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 1246 interrupts, regs.isr, regs.imr); 1247 1248 if ((regs.isr & regs.imr)) { 1249 Tick when = curTick; 1250 if ((regs.isr & regs.imr & ISR_NODELAY) == 0) 1251 when += intrDelay; 1252 cpuIntrPost(when); 1253 } 1254} 1255 1256/* writing this interrupt counting stats inside this means that this function 1257 is now limited to being used to clear all interrupts upon the kernel 1258 reading isr and servicing. just telling you in case you were thinking 1259 of expanding use. 1260*/ 1261void 1262NSGigE::devIntrClear(uint32_t interrupts) 1263{ 1264 if (interrupts & ISR_RESERVE) 1265 panic("Cannot clear a reserved interrupt"); 1266 1267 if (regs.isr & regs.imr & ISR_SWI) { 1268 postedSwi++; 1269 } 1270 if (regs.isr & regs.imr & ISR_RXIDLE) { 1271 postedRxIdle++; 1272 } 1273 if (regs.isr & regs.imr & ISR_RXOK) { 1274 postedRxOk++; 1275 } 1276 if (regs.isr & regs.imr & ISR_RXDESC) { 1277 postedRxDesc++; 1278 } 1279 if (regs.isr & regs.imr & ISR_TXOK) { 1280 postedTxOk++; 1281 } 1282 if (regs.isr & regs.imr & ISR_TXIDLE) { 1283 postedTxIdle++; 1284 } 1285 if (regs.isr & regs.imr & ISR_TXDESC) { 1286 postedTxDesc++; 1287 } 1288 if (regs.isr & regs.imr & ISR_RXORN) { 1289 postedRxOrn++; 1290 } 1291 1292 if (regs.isr & regs.imr & ISR_IMPL) 1293 postedInterrupts++; 1294 1295 interrupts &= ~ISR_NOIMPL; 1296 regs.isr &= ~interrupts; 1297 1298 DPRINTF(EthernetIntr, 1299 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 1300 interrupts, regs.isr, regs.imr); 1301 1302 if (!(regs.isr & regs.imr)) 1303 cpuIntrClear(); 1304} 1305 1306void 1307NSGigE::devIntrChangeMask() 1308{ 1309 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 1310 regs.isr, regs.imr, regs.isr & regs.imr); 1311 1312 if (regs.isr & regs.imr) 1313 cpuIntrPost(curTick); 1314 else 1315 cpuIntrClear(); 1316} 1317 1318void 1319NSGigE::cpuIntrPost(Tick when) 1320{ 1321 // If the interrupt you want to post is later than an interrupt 1322 // already scheduled, just let it post in the coming one and don't 1323 // schedule another. 1324 // HOWEVER, must be sure that the scheduled intrTick is in the 1325 // future (this was formerly the source of a bug) 1326 /** 1327 * @todo this warning should be removed and the intrTick code should 1328 * be fixed. 1329 */ 1330 assert(when >= curTick); 1331 assert(intrTick >= curTick || intrTick == 0); 1332 if (when > intrTick && intrTick != 0) { 1333 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 1334 intrTick); 1335 return; 1336 } 1337 1338 intrTick = when; 1339 if (intrTick < curTick) { 1340 debug_break(); 1341 intrTick = curTick; 1342 } 1343 1344 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 1345 intrTick); 1346 1347 if (intrEvent) 1348 intrEvent->squash(); 1349 intrEvent = new IntrEvent(this, true); 1350 intrEvent->schedule(intrTick); 1351} 1352 1353void 1354NSGigE::cpuInterrupt() 1355{ 1356 assert(intrTick == curTick); 1357 1358 // Whether or not there's a pending interrupt, we don't care about 1359 // it anymore 1360 intrEvent = 0; 1361 intrTick = 0; 1362 1363 // Don't send an interrupt if there's already one 1364 if (cpuPendingIntr) { 1365 DPRINTF(EthernetIntr, 1366 "would send an interrupt now, but there's already pending\n"); 1367 } else { 1368 // Send interrupt 1369 cpuPendingIntr = true; 1370 1371 DPRINTF(EthernetIntr, "posting interrupt\n"); 1372 intrPost(); 1373 } 1374} 1375 1376void 1377NSGigE::cpuIntrClear() 1378{ 1379 if (!cpuPendingIntr) 1380 return; 1381 1382 if (intrEvent) { 1383 intrEvent->squash(); 1384 intrEvent = 0; 1385 } 1386 1387 intrTick = 0; 1388 1389 cpuPendingIntr = false; 1390 1391 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1392 intrClear(); 1393} 1394 1395bool 1396NSGigE::cpuIntrPending() const 1397{ return cpuPendingIntr; } 1398 1399void 1400NSGigE::txReset() 1401{ 1402 1403 DPRINTF(Ethernet, "transmit reset\n"); 1404 1405 CTDD = false; 1406 txEnable = false;; 1407 txFragPtr = 0; 1408 assert(txDescCnt == 0); 1409 txFifo.clear(); 1410 txState = txIdle; 1411 assert(txDmaState == dmaIdle); 1412} 1413 1414void 1415NSGigE::rxReset() 1416{ 1417 DPRINTF(Ethernet, "receive reset\n"); 1418 1419 CRDD = false; 1420 assert(rxPktBytes == 0); 1421 rxEnable = false; 1422 rxFragPtr = 0; 1423 assert(rxDescCnt == 0); 1424 assert(rxDmaState == dmaIdle); 1425 rxFifo.clear(); 1426 rxState = rxIdle; 1427} 1428 1429void 1430NSGigE::regsReset() 1431{ 1432 memset(®s, 0, sizeof(regs)); 1433 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000); 1434 regs.mear = 0x12; 1435 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1436 // fill threshold to 32 bytes 1437 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1438 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1439 regs.mibc = MIBC_FRZ; 1440 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1441 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1442 regs.brar = 0xffffffff; 1443 1444 extstsEnable = false; 1445 acceptBroadcast = false; 1446 acceptMulticast = false; 1447 acceptUnicast = false; 1448 acceptPerfect = false; 1449 acceptArp = false; 1450} 1451 1452void 1453NSGigE::rxDmaReadCopy() 1454{ 1455 assert(rxDmaState == dmaReading); 1456 1457 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen); 1458 rxDmaState = dmaIdle; 1459 1460 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1461 rxDmaAddr, rxDmaLen); 1462 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1463} 1464 1465bool 1466NSGigE::doRxDmaRead() 1467{ 1468 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1469 rxDmaState = dmaReading; 1470 1471 if (dmaInterface && !rxDmaFree) { 1472 if (dmaInterface->busy()) 1473 rxDmaState = dmaReadWaiting; 1474 else 1475 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick, 1476 &rxDmaReadEvent, true); 1477 return true; 1478 } 1479 1480 if (dmaReadDelay == 0 && dmaReadFactor == 0) { 1481 rxDmaReadCopy(); 1482 return false; 1483 } 1484 1485 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1486 Tick start = curTick + dmaReadDelay + factor; 1487 rxDmaReadEvent.schedule(start); 1488 return true; 1489} 1490 1491void 1492NSGigE::rxDmaReadDone() 1493{ 1494 assert(rxDmaState == dmaReading); 1495 rxDmaReadCopy(); 1496 1497 // If the transmit state machine has a pending DMA, let it go first 1498 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1499 txKick(); 1500 1501 rxKick(); 1502} 1503 1504void 1505NSGigE::rxDmaWriteCopy() 1506{ 1507 assert(rxDmaState == dmaWriting); 1508 1509 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen); 1510 rxDmaState = dmaIdle; 1511 1512 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1513 rxDmaAddr, rxDmaLen); 1514 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1515} 1516 1517bool 1518NSGigE::doRxDmaWrite() 1519{ 1520 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1521 rxDmaState = dmaWriting; 1522 1523 if (dmaInterface && !rxDmaFree) { 1524 if (dmaInterface->busy()) 1525 rxDmaState = dmaWriteWaiting; 1526 else 1527 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick, 1528 &rxDmaWriteEvent, true); 1529 return true; 1530 } 1531 1532 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) { 1533 rxDmaWriteCopy(); 1534 return false; 1535 } 1536 1537 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1538 Tick start = curTick + dmaWriteDelay + factor; 1539 rxDmaWriteEvent.schedule(start); 1540 return true; 1541} 1542 1543void 1544NSGigE::rxDmaWriteDone() 1545{ 1546 assert(rxDmaState == dmaWriting); 1547 rxDmaWriteCopy(); 1548 1549 // If the transmit state machine has a pending DMA, let it go first 1550 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1551 txKick(); 1552 1553 rxKick(); 1554} 1555 1556void 1557NSGigE::rxKick() 1558{ 1559 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n", 1560 NsRxStateStrings[rxState], rxFifo.size()); 1561 1562 next: 1563 if (clock) { 1564 if (rxKickTick > curTick) { 1565 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1566 rxKickTick); 1567 1568 goto exit; 1569 } 1570 1571 // Go to the next state machine clock tick. 1572 rxKickTick = curTick + cycles(1); 1573 } 1574 1575 switch(rxDmaState) { 1576 case dmaReadWaiting: 1577 if (doRxDmaRead()) 1578 goto exit; 1579 break; 1580 case dmaWriteWaiting: 1581 if (doRxDmaWrite()) 1582 goto exit; 1583 break; 1584 default: 1585 break; 1586 } 1587 1588 // see state machine from spec for details 1589 // the way this works is, if you finish work on one state and can 1590 // go directly to another, you do that through jumping to the 1591 // label "next". however, if you have intermediate work, like DMA 1592 // so that you can't go to the next state yet, you go to exit and 1593 // exit the loop. however, when the DMA is done it will trigger 1594 // an event and come back to this loop. 1595 switch (rxState) { 1596 case rxIdle: 1597 if (!rxEnable) { 1598 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1599 goto exit; 1600 } 1601 1602 if (CRDD) { 1603 rxState = rxDescRefr; 1604 1605 rxDmaAddr = regs.rxdp & 0x3fffffff; 1606 rxDmaData = &rxDescCache + offsetof(ns_desc, link); 1607 rxDmaLen = sizeof(rxDescCache.link); 1608 rxDmaFree = dmaDescFree; 1609 1610 descDmaReads++; 1611 descDmaRdBytes += rxDmaLen; 1612 1613 if (doRxDmaRead()) 1614 goto exit; 1615 } else { 1616 rxState = rxDescRead; 1617 1618 rxDmaAddr = regs.rxdp & 0x3fffffff; 1619 rxDmaData = &rxDescCache; 1620 rxDmaLen = sizeof(ns_desc); 1621 rxDmaFree = dmaDescFree; 1622 1623 descDmaReads++; 1624 descDmaRdBytes += rxDmaLen; 1625 1626 if (doRxDmaRead()) 1627 goto exit; 1628 } 1629 break; 1630 1631 case rxDescRefr: 1632 if (rxDmaState != dmaIdle) 1633 goto exit; 1634 1635 rxState = rxAdvance; 1636 break; 1637 1638 case rxDescRead: 1639 if (rxDmaState != dmaIdle) 1640 goto exit; 1641 1642 DPRINTF(EthernetDesc, "rxDescCache: addr=%08x read descriptor\n", 1643 regs.rxdp & 0x3fffffff); 1644 DPRINTF(EthernetDesc, 1645 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1646 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1647 rxDescCache.extsts); 1648 1649 if (rxDescCache.cmdsts & CMDSTS_OWN) { 1650 devIntrPost(ISR_RXIDLE); 1651 rxState = rxIdle; 1652 goto exit; 1653 } else { 1654 rxState = rxFifoBlock; 1655 rxFragPtr = rxDescCache.bufptr; 1656 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK; 1657 } 1658 break; 1659 1660 case rxFifoBlock: 1661 if (!rxPacket) { 1662 /** 1663 * @todo in reality, we should be able to start processing 1664 * the packet as it arrives, and not have to wait for the 1665 * full packet ot be in the receive fifo. 1666 */ 1667 if (rxFifo.empty()) 1668 goto exit; 1669 1670 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1671 1672 // If we don't have a packet, grab a new one from the fifo. 1673 rxPacket = rxFifo.front(); 1674 rxPktBytes = rxPacket->length; 1675 rxPacketBufPtr = rxPacket->data; 1676 1677#if TRACING_ON 1678 if (DTRACE(Ethernet)) { 1679 IpPtr ip(rxPacket); 1680 if (ip) { 1681 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1682 TcpPtr tcp(ip); 1683 if (tcp) { 1684 DPRINTF(Ethernet, 1685 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1686 tcp->sport(), tcp->dport(), tcp->seq(), 1687 tcp->ack()); 1688 } 1689 } 1690 } 1691#endif 1692 1693 // sanity check - i think the driver behaves like this 1694 assert(rxDescCnt >= rxPktBytes); 1695 rxFifo.pop(); 1696 } 1697 1698 1699 // dont' need the && rxDescCnt > 0 if driver sanity check 1700 // above holds 1701 if (rxPktBytes > 0) { 1702 rxState = rxFragWrite; 1703 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1704 // check holds 1705 rxXferLen = rxPktBytes; 1706 1707 rxDmaAddr = rxFragPtr & 0x3fffffff; 1708 rxDmaData = rxPacketBufPtr; 1709 rxDmaLen = rxXferLen; 1710 rxDmaFree = dmaDataFree; 1711 1712 if (doRxDmaWrite()) 1713 goto exit; 1714 1715 } else { 1716 rxState = rxDescWrite; 1717 1718 //if (rxPktBytes == 0) { /* packet is done */ 1719 assert(rxPktBytes == 0); 1720 DPRINTF(EthernetSM, "done with receiving packet\n"); 1721 1722 rxDescCache.cmdsts |= CMDSTS_OWN; 1723 rxDescCache.cmdsts &= ~CMDSTS_MORE; 1724 rxDescCache.cmdsts |= CMDSTS_OK; 1725 rxDescCache.cmdsts &= 0xffff0000; 1726 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1727 1728#if 0 1729 /* 1730 * all the driver uses these are for its own stats keeping 1731 * which we don't care about, aren't necessary for 1732 * functionality and doing this would just slow us down. 1733 * if they end up using this in a later version for 1734 * functional purposes, just undef 1735 */ 1736 if (rxFilterEnable) { 1737 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK; 1738 const EthAddr &dst = rxFifoFront()->dst(); 1739 if (dst->unicast()) 1740 rxDescCache.cmdsts |= CMDSTS_DEST_SELF; 1741 if (dst->multicast()) 1742 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI; 1743 if (dst->broadcast()) 1744 rxDescCache.cmdsts |= CMDSTS_DEST_MASK; 1745 } 1746#endif 1747 1748 IpPtr ip(rxPacket); 1749 if (extstsEnable && ip) { 1750 rxDescCache.extsts |= EXTSTS_IPPKT; 1751 rxIpChecksums++; 1752 if (cksum(ip) != 0) { 1753 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1754 rxDescCache.extsts |= EXTSTS_IPERR; 1755 } 1756 TcpPtr tcp(ip); 1757 UdpPtr udp(ip); 1758 if (tcp) { 1759 rxDescCache.extsts |= EXTSTS_TCPPKT; 1760 rxTcpChecksums++; 1761 if (cksum(tcp) != 0) { 1762 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1763 rxDescCache.extsts |= EXTSTS_TCPERR; 1764 1765 } 1766 } else if (udp) { 1767 rxDescCache.extsts |= EXTSTS_UDPPKT; 1768 rxUdpChecksums++; 1769 if (cksum(udp) != 0) { 1770 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1771 rxDescCache.extsts |= EXTSTS_UDPERR; 1772 } 1773 } 1774 } 1775 rxPacket = 0; 1776 1777 /* 1778 * the driver seems to always receive into desc buffers 1779 * of size 1514, so you never have a pkt that is split 1780 * into multiple descriptors on the receive side, so 1781 * i don't implement that case, hence the assert above. 1782 */ 1783 1784 DPRINTF(EthernetDesc, 1785 "rxDescCache: addr=%08x writeback cmdsts extsts\n", 1786 regs.rxdp & 0x3fffffff); 1787 DPRINTF(EthernetDesc, 1788 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1789 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1790 rxDescCache.extsts); 1791 1792 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1793 rxDmaData = &(rxDescCache.cmdsts); 1794 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts); 1795 rxDmaFree = dmaDescFree; 1796 1797 descDmaWrites++; 1798 descDmaWrBytes += rxDmaLen; 1799 1800 if (doRxDmaWrite()) 1801 goto exit; 1802 } 1803 break; 1804 1805 case rxFragWrite: 1806 if (rxDmaState != dmaIdle) 1807 goto exit; 1808 1809 rxPacketBufPtr += rxXferLen; 1810 rxFragPtr += rxXferLen; 1811 rxPktBytes -= rxXferLen; 1812 1813 rxState = rxFifoBlock; 1814 break; 1815 1816 case rxDescWrite: 1817 if (rxDmaState != dmaIdle) 1818 goto exit; 1819 1820 assert(rxDescCache.cmdsts & CMDSTS_OWN); 1821 1822 assert(rxPacket == 0); 1823 devIntrPost(ISR_RXOK); 1824 1825 if (rxDescCache.cmdsts & CMDSTS_INTR) 1826 devIntrPost(ISR_RXDESC); 1827 1828 if (!rxEnable) { 1829 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1830 rxState = rxIdle; 1831 goto exit; 1832 } else 1833 rxState = rxAdvance; 1834 break; 1835 1836 case rxAdvance: 1837 if (rxDescCache.link == 0) { 1838 devIntrPost(ISR_RXIDLE); 1839 rxState = rxIdle; 1840 CRDD = true; 1841 goto exit; 1842 } else { 1843 if (rxDmaState != dmaIdle) 1844 goto exit; 1845 rxState = rxDescRead; 1846 regs.rxdp = rxDescCache.link; 1847 CRDD = false; 1848 1849 rxDmaAddr = regs.rxdp & 0x3fffffff; 1850 rxDmaData = &rxDescCache; 1851 rxDmaLen = sizeof(ns_desc); 1852 rxDmaFree = dmaDescFree; 1853 1854 if (doRxDmaRead()) 1855 goto exit; 1856 } 1857 break; 1858 1859 default: 1860 panic("Invalid rxState!"); 1861 } 1862 1863 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1864 NsRxStateStrings[rxState]); 1865 goto next; 1866 1867 exit: 1868 /** 1869 * @todo do we want to schedule a future kick? 1870 */ 1871 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1872 NsRxStateStrings[rxState]); 1873 1874 if (clock && !rxKickEvent.scheduled()) 1875 rxKickEvent.schedule(rxKickTick); 1876} 1877 1878void 1879NSGigE::transmit() 1880{ 1881 if (txFifo.empty()) { 1882 DPRINTF(Ethernet, "nothing to transmit\n"); 1883 return; 1884 } 1885 1886 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1887 txFifo.size()); 1888 if (interface->sendPacket(txFifo.front())) { 1889#if TRACING_ON 1890 if (DTRACE(Ethernet)) { 1891 IpPtr ip(txFifo.front()); 1892 if (ip) { 1893 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1894 TcpPtr tcp(ip); 1895 if (tcp) { 1896 DPRINTF(Ethernet, 1897 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1898 tcp->sport(), tcp->dport(), tcp->seq(), tcp->ack()); 1899 } 1900 } 1901 } 1902#endif 1903 1904 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1905 txBytes += txFifo.front()->length; 1906 txPackets++; 1907 1908 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1909 txFifo.avail()); 1910 txFifo.pop(); 1911 1912 /* 1913 * normally do a writeback of the descriptor here, and ONLY 1914 * after that is done, send this interrupt. but since our 1915 * stuff never actually fails, just do this interrupt here, 1916 * otherwise the code has to stray from this nice format. 1917 * besides, it's functionally the same. 1918 */ 1919 devIntrPost(ISR_TXOK); 1920 } 1921 1922 if (!txFifo.empty() && !txEvent.scheduled()) { 1923 DPRINTF(Ethernet, "reschedule transmit\n"); 1924 txEvent.schedule(curTick + retryTime); 1925 } 1926} 1927 1928void 1929NSGigE::txDmaReadCopy() 1930{ 1931 assert(txDmaState == dmaReading); 1932 1933 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen); 1934 txDmaState = dmaIdle; 1935 1936 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1937 txDmaAddr, txDmaLen); 1938 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1939} 1940 1941bool 1942NSGigE::doTxDmaRead() 1943{ 1944 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1945 txDmaState = dmaReading; 1946 1947 if (dmaInterface && !txDmaFree) { 1948 if (dmaInterface->busy()) 1949 txDmaState = dmaReadWaiting; 1950 else 1951 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick, 1952 &txDmaReadEvent, true); 1953 return true; 1954 } 1955 1956 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) { 1957 txDmaReadCopy(); 1958 return false; 1959 } 1960 1961 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1962 Tick start = curTick + dmaReadDelay + factor; 1963 txDmaReadEvent.schedule(start); 1964 return true; 1965} 1966 1967void 1968NSGigE::txDmaReadDone() 1969{ 1970 assert(txDmaState == dmaReading); 1971 txDmaReadCopy(); 1972 1973 // If the receive state machine has a pending DMA, let it go first 1974 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1975 rxKick(); 1976 1977 txKick(); 1978} 1979 1980void 1981NSGigE::txDmaWriteCopy() 1982{ 1983 assert(txDmaState == dmaWriting); 1984 1985 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen); 1986 txDmaState = dmaIdle; 1987 1988 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1989 txDmaAddr, txDmaLen); 1990 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1991} 1992 1993bool 1994NSGigE::doTxDmaWrite() 1995{ 1996 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1997 txDmaState = dmaWriting; 1998 1999 if (dmaInterface && !txDmaFree) { 2000 if (dmaInterface->busy()) 2001 txDmaState = dmaWriteWaiting; 2002 else 2003 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick, 2004 &txDmaWriteEvent, true); 2005 return true; 2006 } 2007 2008 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) { 2009 txDmaWriteCopy(); 2010 return false; 2011 } 2012 2013 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 2014 Tick start = curTick + dmaWriteDelay + factor; 2015 txDmaWriteEvent.schedule(start); 2016 return true; 2017} 2018 2019void 2020NSGigE::txDmaWriteDone() 2021{ 2022 assert(txDmaState == dmaWriting); 2023 txDmaWriteCopy(); 2024 2025 // If the receive state machine has a pending DMA, let it go first 2026 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 2027 rxKick(); 2028 2029 txKick(); 2030} 2031 2032void 2033NSGigE::txKick() 2034{ 2035 DPRINTF(EthernetSM, "transmit kick txState=%s\n", 2036 NsTxStateStrings[txState]); 2037 2038 next: 2039 if (clock) { 2040 if (txKickTick > curTick) { 2041 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 2042 txKickTick); 2043 goto exit; 2044 } 2045 2046 // Go to the next state machine clock tick. 2047 txKickTick = curTick + cycles(1); 2048 } 2049 2050 switch(txDmaState) { 2051 case dmaReadWaiting: 2052 if (doTxDmaRead()) 2053 goto exit; 2054 break; 2055 case dmaWriteWaiting: 2056 if (doTxDmaWrite()) 2057 goto exit; 2058 break; 2059 default: 2060 break; 2061 } 2062 2063 switch (txState) { 2064 case txIdle: 2065 if (!txEnable) { 2066 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 2067 goto exit; 2068 } 2069 2070 if (CTDD) { 2071 txState = txDescRefr; 2072 2073 txDmaAddr = regs.txdp & 0x3fffffff; 2074 txDmaData = &txDescCache + offsetof(ns_desc, link); 2075 txDmaLen = sizeof(txDescCache.link); 2076 txDmaFree = dmaDescFree; 2077 2078 descDmaReads++; 2079 descDmaRdBytes += txDmaLen; 2080 2081 if (doTxDmaRead()) 2082 goto exit; 2083 2084 } else { 2085 txState = txDescRead; 2086 2087 txDmaAddr = regs.txdp & 0x3fffffff; 2088 txDmaData = &txDescCache; 2089 txDmaLen = sizeof(ns_desc); 2090 txDmaFree = dmaDescFree; 2091 2092 descDmaReads++; 2093 descDmaRdBytes += txDmaLen; 2094 2095 if (doTxDmaRead()) 2096 goto exit; 2097 } 2098 break; 2099 2100 case txDescRefr: 2101 if (txDmaState != dmaIdle) 2102 goto exit; 2103 2104 txState = txAdvance; 2105 break; 2106 2107 case txDescRead: 2108 if (txDmaState != dmaIdle) 2109 goto exit; 2110 2111 DPRINTF(EthernetDesc, "txDescCache: addr=%08x read descriptor\n", 2112 regs.txdp & 0x3fffffff); 2113 DPRINTF(EthernetDesc, 2114 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 2115 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts, 2116 txDescCache.extsts); 2117 2118 if (txDescCache.cmdsts & CMDSTS_OWN) { 2119 txState = txFifoBlock; 2120 txFragPtr = txDescCache.bufptr; 2121 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK; 2122 } else { 2123 devIntrPost(ISR_TXIDLE); 2124 txState = txIdle; 2125 goto exit; 2126 } 2127 break; 2128 2129 case txFifoBlock: 2130 if (!txPacket) { 2131 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 2132 txPacket = new PacketData(16384); 2133 txPacketBufPtr = txPacket->data; 2134 } 2135 2136 if (txDescCnt == 0) { 2137 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 2138 if (txDescCache.cmdsts & CMDSTS_MORE) { 2139 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 2140 txState = txDescWrite; 2141 2142 txDescCache.cmdsts &= ~CMDSTS_OWN; 2143 2144 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2145 txDmaAddr &= 0x3fffffff; 2146 txDmaData = &(txDescCache.cmdsts); 2147 txDmaLen = sizeof(txDescCache.cmdsts); 2148 txDmaFree = dmaDescFree; 2149 2150 if (doTxDmaWrite()) 2151 goto exit; 2152 2153 } else { /* this packet is totally done */ 2154 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 2155 /* deal with the the packet that just finished */ 2156 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 2157 IpPtr ip(txPacket); 2158 if (txDescCache.extsts & EXTSTS_UDPPKT) { 2159 UdpPtr udp(ip); 2160 udp->sum(0); 2161 udp->sum(cksum(udp)); 2162 txUdpChecksums++; 2163 } else if (txDescCache.extsts & EXTSTS_TCPPKT) { 2164 TcpPtr tcp(ip); 2165 tcp->sum(0); 2166 tcp->sum(cksum(tcp)); 2167 txTcpChecksums++; 2168 } 2169 if (txDescCache.extsts & EXTSTS_IPPKT) { 2170 ip->sum(0); 2171 ip->sum(cksum(ip)); 2172 txIpChecksums++; 2173 } 2174 } 2175 2176 txPacket->length = txPacketBufPtr - txPacket->data; 2177 // this is just because the receive can't handle a 2178 // packet bigger want to make sure 2179 assert(txPacket->length <= 1514); 2180#ifndef NDEBUG 2181 bool success = 2182#endif 2183 txFifo.push(txPacket); 2184 assert(success); 2185 2186 /* 2187 * this following section is not tqo spec, but 2188 * functionally shouldn't be any different. normally, 2189 * the chip will wait til the transmit has occurred 2190 * before writing back the descriptor because it has 2191 * to wait to see that it was successfully transmitted 2192 * to decide whether to set CMDSTS_OK or not. 2193 * however, in the simulator since it is always 2194 * successfully transmitted, and writing it exactly to 2195 * spec would complicate the code, we just do it here 2196 */ 2197 2198 txDescCache.cmdsts &= ~CMDSTS_OWN; 2199 txDescCache.cmdsts |= CMDSTS_OK; 2200 2201 DPRINTF(EthernetDesc, 2202 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 2203 txDescCache.cmdsts, txDescCache.extsts); 2204 2205 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2206 txDmaAddr &= 0x3fffffff; 2207 txDmaData = &(txDescCache.cmdsts); 2208 txDmaLen = sizeof(txDescCache.cmdsts) + 2209 sizeof(txDescCache.extsts); 2210 txDmaFree = dmaDescFree; 2211 2212 descDmaWrites++; 2213 descDmaWrBytes += txDmaLen; 2214 2215 transmit(); 2216 txPacket = 0; 2217 2218 if (!txEnable) { 2219 DPRINTF(EthernetSM, "halting TX state machine\n"); 2220 txState = txIdle; 2221 goto exit; 2222 } else 2223 txState = txAdvance; 2224 2225 if (doTxDmaWrite()) 2226 goto exit; 2227 } 2228 } else { 2229 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 2230 if (!txFifo.full()) { 2231 txState = txFragRead; 2232 2233 /* 2234 * The number of bytes transferred is either whatever 2235 * is left in the descriptor (txDescCnt), or if there 2236 * is not enough room in the fifo, just whatever room 2237 * is left in the fifo 2238 */ 2239 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 2240 2241 txDmaAddr = txFragPtr & 0x3fffffff; 2242 txDmaData = txPacketBufPtr; 2243 txDmaLen = txXferLen; 2244 txDmaFree = dmaDataFree; 2245 2246 if (doTxDmaRead()) 2247 goto exit; 2248 } else { 2249 txState = txFifoBlock; 2250 transmit(); 2251 2252 goto exit; 2253 } 2254 2255 } 2256 break; 2257 2258 case txFragRead: 2259 if (txDmaState != dmaIdle) 2260 goto exit; 2261 2262 txPacketBufPtr += txXferLen; 2263 txFragPtr += txXferLen; 2264 txDescCnt -= txXferLen; 2265 txFifo.reserve(txXferLen); 2266 2267 txState = txFifoBlock; 2268 break; 2269 2270 case txDescWrite: 2271 if (txDmaState != dmaIdle) 2272 goto exit; 2273 2274 if (txDescCache.cmdsts & CMDSTS_INTR) 2275 devIntrPost(ISR_TXDESC); 2276 2277 if (!txEnable) { 2278 DPRINTF(EthernetSM, "halting TX state machine\n"); 2279 txState = txIdle; 2280 goto exit; 2281 } else 2282 txState = txAdvance; 2283 break; 2284 2285 case txAdvance: 2286 if (txDescCache.link == 0) { 2287 devIntrPost(ISR_TXIDLE); 2288 txState = txIdle; 2289 goto exit; 2290 } else { 2291 if (txDmaState != dmaIdle) 2292 goto exit; 2293 txState = txDescRead; 2294 regs.txdp = txDescCache.link; 2295 CTDD = false; 2296 2297 txDmaAddr = txDescCache.link & 0x3fffffff; 2298 txDmaData = &txDescCache; 2299 txDmaLen = sizeof(ns_desc); 2300 txDmaFree = dmaDescFree; 2301 2302 if (doTxDmaRead()) 2303 goto exit; 2304 } 2305 break; 2306 2307 default: 2308 panic("invalid state"); 2309 } 2310 2311 DPRINTF(EthernetSM, "entering next txState=%s\n", 2312 NsTxStateStrings[txState]); 2313 goto next; 2314 2315 exit: 2316 /** 2317 * @todo do we want to schedule a future kick? 2318 */ 2319 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 2320 NsTxStateStrings[txState]); 2321 2322 if (clock && !txKickEvent.scheduled()) 2323 txKickEvent.schedule(txKickTick); 2324} 2325 2326/** 2327 * Advance the EEPROM state machine 2328 * Called on rising edge of EEPROM clock bit in MEAR 2329 */ 2330void 2331NSGigE::eepromKick() 2332{ 2333 switch (eepromState) { 2334 2335 case eepromStart: 2336 2337 // Wait for start bit 2338 if (regs.mear & MEAR_EEDI) { 2339 // Set up to get 2 opcode bits 2340 eepromState = eepromGetOpcode; 2341 eepromBitsToRx = 2; 2342 eepromOpcode = 0; 2343 } 2344 break; 2345 2346 case eepromGetOpcode: 2347 eepromOpcode <<= 1; 2348 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0; 2349 --eepromBitsToRx; 2350 2351 // Done getting opcode 2352 if (eepromBitsToRx == 0) { 2353 if (eepromOpcode != EEPROM_READ) 2354 panic("only EEPROM reads are implemented!"); 2355 2356 // Set up to get address 2357 eepromState = eepromGetAddress; 2358 eepromBitsToRx = 6; 2359 eepromAddress = 0; 2360 } 2361 break; 2362 2363 case eepromGetAddress: 2364 eepromAddress <<= 1; 2365 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0; 2366 --eepromBitsToRx; 2367 2368 // Done getting address 2369 if (eepromBitsToRx == 0) { 2370 2371 if (eepromAddress >= EEPROM_SIZE) 2372 panic("EEPROM read access out of range!"); 2373 2374 switch (eepromAddress) { 2375 2376 case EEPROM_PMATCH2_ADDR: 2377 eepromData = rom.perfectMatch[5]; 2378 eepromData <<= 8; 2379 eepromData += rom.perfectMatch[4]; 2380 break; 2381 2382 case EEPROM_PMATCH1_ADDR: 2383 eepromData = rom.perfectMatch[3]; 2384 eepromData <<= 8; 2385 eepromData += rom.perfectMatch[2]; 2386 break; 2387 2388 case EEPROM_PMATCH0_ADDR: 2389 eepromData = rom.perfectMatch[1]; 2390 eepromData <<= 8; 2391 eepromData += rom.perfectMatch[0]; 2392 break; 2393 2394 default: 2395 panic("FreeBSD driver only uses EEPROM to read PMATCH!"); 2396 } 2397 // Set up to read data 2398 eepromState = eepromRead; 2399 eepromBitsToRx = 16; 2400 2401 // Clear data in bit 2402 regs.mear &= ~MEAR_EEDI; 2403 } 2404 break; 2405 2406 case eepromRead: 2407 // Clear Data Out bit 2408 regs.mear &= ~MEAR_EEDO; 2409 // Set bit to value of current EEPROM bit 2410 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0; 2411 2412 eepromData <<= 1; 2413 --eepromBitsToRx; 2414 2415 // All done 2416 if (eepromBitsToRx == 0) { 2417 eepromState = eepromStart; 2418 } 2419 break; 2420 2421 default: 2422 panic("invalid EEPROM state"); 2423 } 2424 2425} 2426 2427void 2428NSGigE::transferDone() 2429{ 2430 if (txFifo.empty()) { 2431 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2432 return; 2433 } 2434 2435 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2436 2437 if (txEvent.scheduled()) 2438 txEvent.reschedule(curTick + cycles(1)); 2439 else 2440 txEvent.schedule(curTick + cycles(1)); 2441} 2442 2443bool 2444NSGigE::rxFilter(const PacketPtr &packet) 2445{ 2446 EthPtr eth = packet; 2447 bool drop = true; 2448 string type; 2449 2450 const EthAddr &dst = eth->dst(); 2451 if (dst.unicast()) { 2452 // If we're accepting all unicast addresses 2453 if (acceptUnicast) 2454 drop = false; 2455 2456 // If we make a perfect match 2457 if (acceptPerfect && dst == rom.perfectMatch) 2458 drop = false; 2459 2460 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2461 drop = false; 2462 2463 } else if (dst.broadcast()) { 2464 // if we're accepting broadcasts 2465 if (acceptBroadcast) 2466 drop = false; 2467 2468 } else if (dst.multicast()) { 2469 // if we're accepting all multicasts 2470 if (acceptMulticast) 2471 drop = false; 2472 2473 // Multicast hashing faked - all packets accepted 2474 if (multicastHashEnable) 2475 drop = false; 2476 } 2477 2478 if (drop) { 2479 DPRINTF(Ethernet, "rxFilter drop\n"); 2480 DDUMP(EthernetData, packet->data, packet->length); 2481 } 2482 2483 return drop; 2484} 2485 2486bool 2487NSGigE::recvPacket(PacketPtr packet) 2488{ 2489 rxBytes += packet->length; 2490 rxPackets++; 2491 2492 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2493 rxFifo.avail()); 2494 2495 if (!rxEnable) { 2496 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2497 interface->recvDone(); 2498 return true; 2499 } 2500 2501 if (!rxFilterEnable) { 2502 DPRINTF(Ethernet, 2503 "receive packet filtering disabled . . . packet dropped\n"); 2504 interface->recvDone(); 2505 return true; 2506 } 2507 2508 if (rxFilter(packet)) { 2509 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2510 interface->recvDone(); 2511 return true; 2512 } 2513 2514 if (rxFifo.avail() < packet->length) { 2515#if TRACING_ON 2516 IpPtr ip(packet); 2517 TcpPtr tcp(ip); 2518 if (ip) { 2519 DPRINTF(Ethernet, 2520 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2521 ip->id()); 2522 if (tcp) { 2523 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2524 } 2525 } 2526#endif 2527 droppedPackets++; 2528 devIntrPost(ISR_RXORN); 2529 return false; 2530 } 2531 2532 rxFifo.push(packet); 2533 interface->recvDone(); 2534 2535 rxKick(); 2536 return true; 2537} 2538 2539//===================================================================== 2540// 2541// 2542void 2543NSGigE::serialize(ostream &os) 2544{ 2545 // Serialize the PciDev base class 2546 PciDev::serialize(os); 2547 2548 /* 2549 * Finalize any DMA events now. 2550 */ 2551 if (rxDmaReadEvent.scheduled()) 2552 rxDmaReadCopy(); 2553 if (rxDmaWriteEvent.scheduled()) 2554 rxDmaWriteCopy(); 2555 if (txDmaReadEvent.scheduled()) 2556 txDmaReadCopy(); 2557 if (txDmaWriteEvent.scheduled()) 2558 txDmaWriteCopy(); 2559 2560 /* 2561 * Serialize the device registers 2562 */ 2563 SERIALIZE_SCALAR(regs.command); 2564 SERIALIZE_SCALAR(regs.config); 2565 SERIALIZE_SCALAR(regs.mear); 2566 SERIALIZE_SCALAR(regs.ptscr); 2567 SERIALIZE_SCALAR(regs.isr); 2568 SERIALIZE_SCALAR(regs.imr); 2569 SERIALIZE_SCALAR(regs.ier); 2570 SERIALIZE_SCALAR(regs.ihr); 2571 SERIALIZE_SCALAR(regs.txdp); 2572 SERIALIZE_SCALAR(regs.txdp_hi); 2573 SERIALIZE_SCALAR(regs.txcfg); 2574 SERIALIZE_SCALAR(regs.gpior); 2575 SERIALIZE_SCALAR(regs.rxdp); 2576 SERIALIZE_SCALAR(regs.rxdp_hi); 2577 SERIALIZE_SCALAR(regs.rxcfg); 2578 SERIALIZE_SCALAR(regs.pqcr); 2579 SERIALIZE_SCALAR(regs.wcsr); 2580 SERIALIZE_SCALAR(regs.pcr); 2581 SERIALIZE_SCALAR(regs.rfcr); 2582 SERIALIZE_SCALAR(regs.rfdr); 2583 SERIALIZE_SCALAR(regs.brar); 2584 SERIALIZE_SCALAR(regs.brdr); 2585 SERIALIZE_SCALAR(regs.srr); 2586 SERIALIZE_SCALAR(regs.mibc); 2587 SERIALIZE_SCALAR(regs.vrcr); 2588 SERIALIZE_SCALAR(regs.vtcr); 2589 SERIALIZE_SCALAR(regs.vdr); 2590 SERIALIZE_SCALAR(regs.ccsr); 2591 SERIALIZE_SCALAR(regs.tbicr); 2592 SERIALIZE_SCALAR(regs.tbisr); 2593 SERIALIZE_SCALAR(regs.tanar); 2594 SERIALIZE_SCALAR(regs.tanlpar); 2595 SERIALIZE_SCALAR(regs.taner); 2596 SERIALIZE_SCALAR(regs.tesr); 2597 2598 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2599 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2600 2601 SERIALIZE_SCALAR(ioEnable); 2602 2603 /* 2604 * Serialize the data Fifos 2605 */ 2606 rxFifo.serialize("rxFifo", os); 2607 txFifo.serialize("txFifo", os); 2608 2609 /* 2610 * Serialize the various helper variables 2611 */ 2612 bool txPacketExists = txPacket; 2613 SERIALIZE_SCALAR(txPacketExists); 2614 if (txPacketExists) { 2615 txPacket->length = txPacketBufPtr - txPacket->data; 2616 txPacket->serialize("txPacket", os); 2617 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2618 SERIALIZE_SCALAR(txPktBufPtr); 2619 } 2620 2621 bool rxPacketExists = rxPacket; 2622 SERIALIZE_SCALAR(rxPacketExists); 2623 if (rxPacketExists) { 2624 rxPacket->serialize("rxPacket", os); 2625 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2626 SERIALIZE_SCALAR(rxPktBufPtr); 2627 } 2628 2629 SERIALIZE_SCALAR(txXferLen); 2630 SERIALIZE_SCALAR(rxXferLen); 2631 2632 /* 2633 * Serialize DescCaches 2634 */ 2635 SERIALIZE_SCALAR(txDescCache.link); 2636 SERIALIZE_SCALAR(txDescCache.bufptr); 2637 SERIALIZE_SCALAR(txDescCache.cmdsts); 2638 SERIALIZE_SCALAR(txDescCache.extsts); 2639 SERIALIZE_SCALAR(rxDescCache.link); 2640 SERIALIZE_SCALAR(rxDescCache.bufptr); 2641 SERIALIZE_SCALAR(rxDescCache.cmdsts); 2642 SERIALIZE_SCALAR(rxDescCache.extsts); 2643 SERIALIZE_SCALAR(extstsEnable); 2644 2645 /* 2646 * Serialize tx state machine 2647 */ 2648 int txState = this->txState; 2649 SERIALIZE_SCALAR(txState); 2650 SERIALIZE_SCALAR(txEnable); 2651 SERIALIZE_SCALAR(CTDD); 2652 SERIALIZE_SCALAR(txFragPtr); 2653 SERIALIZE_SCALAR(txDescCnt); 2654 int txDmaState = this->txDmaState; 2655 SERIALIZE_SCALAR(txDmaState); 2656 SERIALIZE_SCALAR(txKickTick); 2657 2658 /* 2659 * Serialize rx state machine 2660 */ 2661 int rxState = this->rxState; 2662 SERIALIZE_SCALAR(rxState); 2663 SERIALIZE_SCALAR(rxEnable); 2664 SERIALIZE_SCALAR(CRDD); 2665 SERIALIZE_SCALAR(rxPktBytes); 2666 SERIALIZE_SCALAR(rxFragPtr); 2667 SERIALIZE_SCALAR(rxDescCnt); 2668 int rxDmaState = this->rxDmaState; 2669 SERIALIZE_SCALAR(rxDmaState); 2670 SERIALIZE_SCALAR(rxKickTick); 2671 2672 /* 2673 * Serialize EEPROM state machine 2674 */ 2675 int eepromState = this->eepromState; 2676 SERIALIZE_SCALAR(eepromState); 2677 SERIALIZE_SCALAR(eepromClk); 2678 SERIALIZE_SCALAR(eepromBitsToRx); 2679 SERIALIZE_SCALAR(eepromOpcode); 2680 SERIALIZE_SCALAR(eepromAddress); 2681 SERIALIZE_SCALAR(eepromData); 2682 2683 /* 2684 * If there's a pending transmit, store the time so we can 2685 * reschedule it later 2686 */ 2687 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2688 SERIALIZE_SCALAR(transmitTick); 2689 2690 /* 2691 * receive address filter settings 2692 */ 2693 SERIALIZE_SCALAR(rxFilterEnable); 2694 SERIALIZE_SCALAR(acceptBroadcast); 2695 SERIALIZE_SCALAR(acceptMulticast); 2696 SERIALIZE_SCALAR(acceptUnicast); 2697 SERIALIZE_SCALAR(acceptPerfect); 2698 SERIALIZE_SCALAR(acceptArp); 2699 SERIALIZE_SCALAR(multicastHashEnable); 2700 2701 /* 2702 * Keep track of pending interrupt status. 2703 */ 2704 SERIALIZE_SCALAR(intrTick); 2705 SERIALIZE_SCALAR(cpuPendingIntr); 2706 Tick intrEventTick = 0; 2707 if (intrEvent) 2708 intrEventTick = intrEvent->when(); 2709 SERIALIZE_SCALAR(intrEventTick); 2710 2711} 2712 2713void 2714NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2715{ 2716 // Unserialize the PciDev base class 2717 PciDev::unserialize(cp, section); 2718 2719 UNSERIALIZE_SCALAR(regs.command); 2720 UNSERIALIZE_SCALAR(regs.config); 2721 UNSERIALIZE_SCALAR(regs.mear); 2722 UNSERIALIZE_SCALAR(regs.ptscr); 2723 UNSERIALIZE_SCALAR(regs.isr); 2724 UNSERIALIZE_SCALAR(regs.imr); 2725 UNSERIALIZE_SCALAR(regs.ier); 2726 UNSERIALIZE_SCALAR(regs.ihr); 2727 UNSERIALIZE_SCALAR(regs.txdp); 2728 UNSERIALIZE_SCALAR(regs.txdp_hi); 2729 UNSERIALIZE_SCALAR(regs.txcfg); 2730 UNSERIALIZE_SCALAR(regs.gpior); 2731 UNSERIALIZE_SCALAR(regs.rxdp); 2732 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2733 UNSERIALIZE_SCALAR(regs.rxcfg); 2734 UNSERIALIZE_SCALAR(regs.pqcr); 2735 UNSERIALIZE_SCALAR(regs.wcsr); 2736 UNSERIALIZE_SCALAR(regs.pcr); 2737 UNSERIALIZE_SCALAR(regs.rfcr); 2738 UNSERIALIZE_SCALAR(regs.rfdr); 2739 UNSERIALIZE_SCALAR(regs.brar); 2740 UNSERIALIZE_SCALAR(regs.brdr); 2741 UNSERIALIZE_SCALAR(regs.srr); 2742 UNSERIALIZE_SCALAR(regs.mibc); 2743 UNSERIALIZE_SCALAR(regs.vrcr); 2744 UNSERIALIZE_SCALAR(regs.vtcr); 2745 UNSERIALIZE_SCALAR(regs.vdr); 2746 UNSERIALIZE_SCALAR(regs.ccsr); 2747 UNSERIALIZE_SCALAR(regs.tbicr); 2748 UNSERIALIZE_SCALAR(regs.tbisr); 2749 UNSERIALIZE_SCALAR(regs.tanar); 2750 UNSERIALIZE_SCALAR(regs.tanlpar); 2751 UNSERIALIZE_SCALAR(regs.taner); 2752 UNSERIALIZE_SCALAR(regs.tesr); 2753 2754 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2755 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2756 2757 UNSERIALIZE_SCALAR(ioEnable); 2758 2759 /* 2760 * unserialize the data fifos 2761 */ 2762 rxFifo.unserialize("rxFifo", cp, section); 2763 txFifo.unserialize("txFifo", cp, section); 2764 2765 /* 2766 * unserialize the various helper variables 2767 */ 2768 bool txPacketExists; 2769 UNSERIALIZE_SCALAR(txPacketExists); 2770 if (txPacketExists) { 2771 txPacket = new PacketData(16384); 2772 txPacket->unserialize("txPacket", cp, section); 2773 uint32_t txPktBufPtr; 2774 UNSERIALIZE_SCALAR(txPktBufPtr); 2775 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2776 } else 2777 txPacket = 0; 2778 2779 bool rxPacketExists; 2780 UNSERIALIZE_SCALAR(rxPacketExists); 2781 rxPacket = 0; 2782 if (rxPacketExists) { 2783 rxPacket = new PacketData(16384); 2784 rxPacket->unserialize("rxPacket", cp, section); 2785 uint32_t rxPktBufPtr; 2786 UNSERIALIZE_SCALAR(rxPktBufPtr); 2787 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2788 } else 2789 rxPacket = 0; 2790 2791 UNSERIALIZE_SCALAR(txXferLen); 2792 UNSERIALIZE_SCALAR(rxXferLen); 2793 2794 /* 2795 * Unserialize DescCaches 2796 */ 2797 UNSERIALIZE_SCALAR(txDescCache.link); 2798 UNSERIALIZE_SCALAR(txDescCache.bufptr); 2799 UNSERIALIZE_SCALAR(txDescCache.cmdsts); 2800 UNSERIALIZE_SCALAR(txDescCache.extsts); 2801 UNSERIALIZE_SCALAR(rxDescCache.link); 2802 UNSERIALIZE_SCALAR(rxDescCache.bufptr); 2803 UNSERIALIZE_SCALAR(rxDescCache.cmdsts); 2804 UNSERIALIZE_SCALAR(rxDescCache.extsts); 2805 UNSERIALIZE_SCALAR(extstsEnable); 2806 2807 /* 2808 * unserialize tx state machine 2809 */ 2810 int txState; 2811 UNSERIALIZE_SCALAR(txState); 2812 this->txState = (TxState) txState; 2813 UNSERIALIZE_SCALAR(txEnable); 2814 UNSERIALIZE_SCALAR(CTDD); 2815 UNSERIALIZE_SCALAR(txFragPtr); 2816 UNSERIALIZE_SCALAR(txDescCnt); 2817 int txDmaState; 2818 UNSERIALIZE_SCALAR(txDmaState); 2819 this->txDmaState = (DmaState) txDmaState; 2820 UNSERIALIZE_SCALAR(txKickTick); 2821 if (txKickTick) 2822 txKickEvent.schedule(txKickTick); 2823 2824 /* 2825 * unserialize rx state machine 2826 */ 2827 int rxState; 2828 UNSERIALIZE_SCALAR(rxState); 2829 this->rxState = (RxState) rxState; 2830 UNSERIALIZE_SCALAR(rxEnable); 2831 UNSERIALIZE_SCALAR(CRDD); 2832 UNSERIALIZE_SCALAR(rxPktBytes); 2833 UNSERIALIZE_SCALAR(rxFragPtr); 2834 UNSERIALIZE_SCALAR(rxDescCnt); 2835 int rxDmaState; 2836 UNSERIALIZE_SCALAR(rxDmaState); 2837 this->rxDmaState = (DmaState) rxDmaState; 2838 UNSERIALIZE_SCALAR(rxKickTick); 2839 if (rxKickTick) 2840 rxKickEvent.schedule(rxKickTick); 2841 2842 /* 2843 * Unserialize EEPROM state machine 2844 */ 2845 int eepromState; 2846 UNSERIALIZE_SCALAR(eepromState); 2847 this->eepromState = (EEPROMState) eepromState; 2848 UNSERIALIZE_SCALAR(eepromClk); 2849 UNSERIALIZE_SCALAR(eepromBitsToRx); 2850 UNSERIALIZE_SCALAR(eepromOpcode); 2851 UNSERIALIZE_SCALAR(eepromAddress); 2852 UNSERIALIZE_SCALAR(eepromData); 2853 2854 /* 2855 * If there's a pending transmit, reschedule it now 2856 */ 2857 Tick transmitTick; 2858 UNSERIALIZE_SCALAR(transmitTick); 2859 if (transmitTick) 2860 txEvent.schedule(curTick + transmitTick); 2861 2862 /* 2863 * unserialize receive address filter settings 2864 */ 2865 UNSERIALIZE_SCALAR(rxFilterEnable); 2866 UNSERIALIZE_SCALAR(acceptBroadcast); 2867 UNSERIALIZE_SCALAR(acceptMulticast); 2868 UNSERIALIZE_SCALAR(acceptUnicast); 2869 UNSERIALIZE_SCALAR(acceptPerfect); 2870 UNSERIALIZE_SCALAR(acceptArp); 2871 UNSERIALIZE_SCALAR(multicastHashEnable); 2872 2873 /* 2874 * Keep track of pending interrupt status. 2875 */ 2876 UNSERIALIZE_SCALAR(intrTick); 2877 UNSERIALIZE_SCALAR(cpuPendingIntr); 2878 Tick intrEventTick; 2879 UNSERIALIZE_SCALAR(intrEventTick); 2880 if (intrEventTick) { 2881 intrEvent = new IntrEvent(this, true); 2882 intrEvent->schedule(intrEventTick); 2883 } 2884 2885 /* 2886 * re-add addrRanges to bus bridges 2887 */ 2888 if (pioInterface) { 2889 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 2890 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 2891 } 2892} 2893 2894Tick 2895NSGigE::cacheAccess(MemReqPtr &req) 2896{ 2897 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n", 2898 req->paddr, req->paddr - addr); 2899 return curTick + pioLatency; 2900} 2901 2902BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2903 2904 SimObjectParam<EtherInt *> peer; 2905 SimObjectParam<NSGigE *> device; 2906 2907END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2908 2909BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2910 2911 INIT_PARAM_DFLT(peer, "peer interface", NULL), 2912 INIT_PARAM(device, "Ethernet device of this interface") 2913 2914END_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2915 2916CREATE_SIM_OBJECT(NSGigEInt) 2917{ 2918 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device); 2919 2920 EtherInt *p = (EtherInt *)peer; 2921 if (p) { 2922 dev_int->setPeer(p); 2923 p->setPeer(dev_int); 2924 } 2925 2926 return dev_int; 2927} 2928 2929REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt) 2930 2931 2932BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2933 2934 Param<Addr> addr; 2935 Param<Tick> clock; 2936 Param<Tick> tx_delay; 2937 Param<Tick> rx_delay; 2938 Param<Tick> intr_delay; 2939 SimObjectParam<MemoryController *> mmu; 2940 SimObjectParam<PhysicalMemory *> physmem; 2941 Param<bool> rx_filter; 2942 Param<string> hardware_address; 2943 SimObjectParam<Bus*> io_bus; 2944 SimObjectParam<Bus*> payload_bus; 2945 SimObjectParam<HierParams *> hier; 2946 Param<Tick> pio_latency; 2947 Param<bool> dma_desc_free; 2948 Param<bool> dma_data_free; 2949 Param<Tick> dma_read_delay; 2950 Param<Tick> dma_write_delay; 2951 Param<Tick> dma_read_factor; 2952 Param<Tick> dma_write_factor; 2953 SimObjectParam<PciConfigAll *> configspace; 2954 SimObjectParam<PciConfigData *> configdata; 2955 SimObjectParam<Platform *> platform; 2956 Param<uint32_t> pci_bus; 2957 Param<uint32_t> pci_dev; 2958 Param<uint32_t> pci_func; 2959 Param<uint32_t> tx_fifo_size; 2960 Param<uint32_t> rx_fifo_size; 2961 Param<uint32_t> m5reg; 2962 Param<bool> dma_no_allocate; 2963 2964END_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2965 2966BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE) 2967 2968 INIT_PARAM(addr, "Device Address"), 2969 INIT_PARAM(clock, "State machine processor frequency"), 2970 INIT_PARAM(tx_delay, "Transmit Delay"), 2971 INIT_PARAM(rx_delay, "Receive Delay"), 2972 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"), 2973 INIT_PARAM(mmu, "Memory Controller"), 2974 INIT_PARAM(physmem, "Physical Memory"), 2975 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true), 2976 INIT_PARAM(hardware_address, "Ethernet Hardware Address"), 2977 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL), 2978 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL), 2979 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams), 2980 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 2981 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false), 2982 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false), 2983 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0), 2984 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0), 2985 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0), 2986 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0), 2987 INIT_PARAM(configspace, "PCI Configspace"), 2988 INIT_PARAM(configdata, "PCI Config data"), 2989 INIT_PARAM(platform, "Platform"), 2990 INIT_PARAM(pci_bus, "PCI bus"), 2991 INIT_PARAM(pci_dev, "PCI device number"), 2992 INIT_PARAM(pci_func, "PCI function code"), 2993 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072), 2994 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072), 2995 INIT_PARAM(m5reg, "m5 register"), 2996 INIT_PARAM_DFLT(dma_no_allocate, "Should DMA reads allocate cache lines", true) 2997 2998END_INIT_SIM_OBJECT_PARAMS(NSGigE) 2999 3000 3001CREATE_SIM_OBJECT(NSGigE) 3002{ 3003 NSGigE::Params *params = new NSGigE::Params; 3004 3005 params->name = getInstanceName(); 3006 params->mmu = mmu; 3007 params->configSpace = configspace; 3008 params->configData = configdata; 3009 params->plat = platform; 3010 params->busNum = pci_bus; 3011 params->deviceNum = pci_dev; 3012 params->functionNum = pci_func; 3013 3014 params->clock = clock; 3015 params->intr_delay = intr_delay; 3016 params->pmem = physmem; 3017 params->tx_delay = tx_delay; 3018 params->rx_delay = rx_delay; 3019 params->hier = hier; 3020 params->header_bus = io_bus; 3021 params->payload_bus = payload_bus; 3022 params->pio_latency = pio_latency; 3023 params->dma_desc_free = dma_desc_free; 3024 params->dma_data_free = dma_data_free; 3025 params->dma_read_delay = dma_read_delay; 3026 params->dma_write_delay = dma_write_delay; 3027 params->dma_read_factor = dma_read_factor; 3028 params->dma_write_factor = dma_write_factor; 3029 params->rx_filter = rx_filter; 3030 params->eaddr = hardware_address; 3031 params->tx_fifo_size = tx_fifo_size; 3032 params->rx_fifo_size = rx_fifo_size; 3033 params->m5reg = m5reg; 3034 params->dma_no_allocate = dma_no_allocate; 3035 return new NSGigE(params); 3036} 3037 3038REGISTER_SIM_OBJECT("NSGigE", NSGigE) 3039