ns_gige.cc revision 1843
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/** @file 30 * Device module for modelling the National Semiconductor 31 * DP83820 ethernet controller. Does not support priority queueing 32 */ 33#include <cstdio> 34#include <deque> 35#include <string> 36 37#include "base/inet.hh" 38#include "cpu/exec_context.hh" 39#include "dev/etherlink.hh" 40#include "dev/ns_gige.hh" 41#include "dev/pciconfigall.hh" 42#include "mem/bus/bus.hh" 43#include "mem/bus/dma_interface.hh" 44#include "mem/bus/pio_interface.hh" 45#include "mem/bus/pio_interface_impl.hh" 46#include "mem/functional/memory_control.hh" 47#include "mem/functional/physical.hh" 48#include "sim/builder.hh" 49#include "sim/debug.hh" 50#include "sim/host.hh" 51#include "sim/stats.hh" 52#include "targetarch/vtophys.hh" 53 54const char *NsRxStateStrings[] = 55{ 56 "rxIdle", 57 "rxDescRefr", 58 "rxDescRead", 59 "rxFifoBlock", 60 "rxFragWrite", 61 "rxDescWrite", 62 "rxAdvance" 63}; 64 65const char *NsTxStateStrings[] = 66{ 67 "txIdle", 68 "txDescRefr", 69 "txDescRead", 70 "txFifoBlock", 71 "txFragRead", 72 "txDescWrite", 73 "txAdvance" 74}; 75 76const char *NsDmaState[] = 77{ 78 "dmaIdle", 79 "dmaReading", 80 "dmaWriting", 81 "dmaReadWaiting", 82 "dmaWriteWaiting" 83}; 84 85using namespace std; 86using namespace Net; 87 88/////////////////////////////////////////////////////////////////////// 89// 90// NSGigE PCI Device 91// 92NSGigE::NSGigE(Params *p) 93 : PciDev(p), ioEnable(false), 94 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 96 txXferLen(0), rxXferLen(0), clock(p->clock), 97 txState(txIdle), txEnable(false), CTDD(false), 98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 99 rxEnable(false), CRDD(false), rxPktBytes(0), 100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 101 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this), 102 txDmaReadEvent(this), txDmaWriteEvent(this), 103 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 104 txDelay(p->tx_delay), rxDelay(p->rx_delay), 105 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this), 106 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false), 107 acceptMulticast(false), acceptUnicast(false), 108 acceptPerfect(false), acceptArp(false), multicastHashEnable(false), 109 physmem(p->pmem), intrTick(0), cpuPendingIntr(false), 110 intrEvent(0), interface(0) 111{ 112 if (p->header_bus) { 113 pioInterface = newPioInterface(name(), p->hier, 114 p->header_bus, this, 115 &NSGigE::cacheAccess); 116 117 pioLatency = p->pio_latency * p->header_bus->clockRate; 118 119 if (p->payload_bus) 120 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 121 p->header_bus, 122 p->payload_bus, 1, 123 p->dma_no_allocate); 124 else 125 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 126 p->header_bus, 127 p->header_bus, 1, 128 p->dma_no_allocate); 129 } else if (p->payload_bus) { 130 pioInterface = newPioInterface(name(), p->hier, 131 p->payload_bus, this, 132 &NSGigE::cacheAccess); 133 134 pioLatency = p->pio_latency * p->payload_bus->clockRate; 135 136 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 137 p->payload_bus, 138 p->payload_bus, 1, 139 p->dma_no_allocate); 140 } 141 142 143 intrDelay = p->intr_delay; 144 dmaReadDelay = p->dma_read_delay; 145 dmaWriteDelay = p->dma_write_delay; 146 dmaReadFactor = p->dma_read_factor; 147 dmaWriteFactor = p->dma_write_factor; 148 149 regsReset(); 150 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN); 151} 152 153NSGigE::~NSGigE() 154{} 155 156void 157NSGigE::regStats() 158{ 159 txBytes 160 .name(name() + ".txBytes") 161 .desc("Bytes Transmitted") 162 .prereq(txBytes) 163 ; 164 165 rxBytes 166 .name(name() + ".rxBytes") 167 .desc("Bytes Received") 168 .prereq(rxBytes) 169 ; 170 171 txPackets 172 .name(name() + ".txPackets") 173 .desc("Number of Packets Transmitted") 174 .prereq(txBytes) 175 ; 176 177 rxPackets 178 .name(name() + ".rxPackets") 179 .desc("Number of Packets Received") 180 .prereq(rxBytes) 181 ; 182 183 txIpChecksums 184 .name(name() + ".txIpChecksums") 185 .desc("Number of tx IP Checksums done by device") 186 .precision(0) 187 .prereq(txBytes) 188 ; 189 190 rxIpChecksums 191 .name(name() + ".rxIpChecksums") 192 .desc("Number of rx IP Checksums done by device") 193 .precision(0) 194 .prereq(rxBytes) 195 ; 196 197 txTcpChecksums 198 .name(name() + ".txTcpChecksums") 199 .desc("Number of tx TCP Checksums done by device") 200 .precision(0) 201 .prereq(txBytes) 202 ; 203 204 rxTcpChecksums 205 .name(name() + ".rxTcpChecksums") 206 .desc("Number of rx TCP Checksums done by device") 207 .precision(0) 208 .prereq(rxBytes) 209 ; 210 211 txUdpChecksums 212 .name(name() + ".txUdpChecksums") 213 .desc("Number of tx UDP Checksums done by device") 214 .precision(0) 215 .prereq(txBytes) 216 ; 217 218 rxUdpChecksums 219 .name(name() + ".rxUdpChecksums") 220 .desc("Number of rx UDP Checksums done by device") 221 .precision(0) 222 .prereq(rxBytes) 223 ; 224 225 descDmaReads 226 .name(name() + ".descDMAReads") 227 .desc("Number of descriptors the device read w/ DMA") 228 .precision(0) 229 ; 230 231 descDmaWrites 232 .name(name() + ".descDMAWrites") 233 .desc("Number of descriptors the device wrote w/ DMA") 234 .precision(0) 235 ; 236 237 descDmaRdBytes 238 .name(name() + ".descDmaReadBytes") 239 .desc("number of descriptor bytes read w/ DMA") 240 .precision(0) 241 ; 242 243 descDmaWrBytes 244 .name(name() + ".descDmaWriteBytes") 245 .desc("number of descriptor bytes write w/ DMA") 246 .precision(0) 247 ; 248 249 txBandwidth 250 .name(name() + ".txBandwidth") 251 .desc("Transmit Bandwidth (bits/s)") 252 .precision(0) 253 .prereq(txBytes) 254 ; 255 256 rxBandwidth 257 .name(name() + ".rxBandwidth") 258 .desc("Receive Bandwidth (bits/s)") 259 .precision(0) 260 .prereq(rxBytes) 261 ; 262 263 totBandwidth 264 .name(name() + ".totBandwidth") 265 .desc("Total Bandwidth (bits/s)") 266 .precision(0) 267 .prereq(totBytes) 268 ; 269 270 totPackets 271 .name(name() + ".totPackets") 272 .desc("Total Packets") 273 .precision(0) 274 .prereq(totBytes) 275 ; 276 277 totBytes 278 .name(name() + ".totBytes") 279 .desc("Total Bytes") 280 .precision(0) 281 .prereq(totBytes) 282 ; 283 284 totPacketRate 285 .name(name() + ".totPPS") 286 .desc("Total Tranmission Rate (packets/s)") 287 .precision(0) 288 .prereq(totBytes) 289 ; 290 291 txPacketRate 292 .name(name() + ".txPPS") 293 .desc("Packet Tranmission Rate (packets/s)") 294 .precision(0) 295 .prereq(txBytes) 296 ; 297 298 rxPacketRate 299 .name(name() + ".rxPPS") 300 .desc("Packet Reception Rate (packets/s)") 301 .precision(0) 302 .prereq(rxBytes) 303 ; 304 305 postedSwi 306 .name(name() + ".postedSwi") 307 .desc("number of software interrupts posted to CPU") 308 .precision(0) 309 ; 310 311 totalSwi 312 .name(name() + ".totalSwi") 313 .desc("number of total Swi written to ISR") 314 .precision(0) 315 ; 316 317 coalescedSwi 318 .name(name() + ".coalescedSwi") 319 .desc("average number of Swi's coalesced into each post") 320 .precision(0) 321 ; 322 323 postedRxIdle 324 .name(name() + ".postedRxIdle") 325 .desc("number of rxIdle interrupts posted to CPU") 326 .precision(0) 327 ; 328 329 totalRxIdle 330 .name(name() + ".totalRxIdle") 331 .desc("number of total RxIdle written to ISR") 332 .precision(0) 333 ; 334 335 coalescedRxIdle 336 .name(name() + ".coalescedRxIdle") 337 .desc("average number of RxIdle's coalesced into each post") 338 .precision(0) 339 ; 340 341 postedRxOk 342 .name(name() + ".postedRxOk") 343 .desc("number of RxOk interrupts posted to CPU") 344 .precision(0) 345 ; 346 347 totalRxOk 348 .name(name() + ".totalRxOk") 349 .desc("number of total RxOk written to ISR") 350 .precision(0) 351 ; 352 353 coalescedRxOk 354 .name(name() + ".coalescedRxOk") 355 .desc("average number of RxOk's coalesced into each post") 356 .precision(0) 357 ; 358 359 postedRxDesc 360 .name(name() + ".postedRxDesc") 361 .desc("number of RxDesc interrupts posted to CPU") 362 .precision(0) 363 ; 364 365 totalRxDesc 366 .name(name() + ".totalRxDesc") 367 .desc("number of total RxDesc written to ISR") 368 .precision(0) 369 ; 370 371 coalescedRxDesc 372 .name(name() + ".coalescedRxDesc") 373 .desc("average number of RxDesc's coalesced into each post") 374 .precision(0) 375 ; 376 377 postedTxOk 378 .name(name() + ".postedTxOk") 379 .desc("number of TxOk interrupts posted to CPU") 380 .precision(0) 381 ; 382 383 totalTxOk 384 .name(name() + ".totalTxOk") 385 .desc("number of total TxOk written to ISR") 386 .precision(0) 387 ; 388 389 coalescedTxOk 390 .name(name() + ".coalescedTxOk") 391 .desc("average number of TxOk's coalesced into each post") 392 .precision(0) 393 ; 394 395 postedTxIdle 396 .name(name() + ".postedTxIdle") 397 .desc("number of TxIdle interrupts posted to CPU") 398 .precision(0) 399 ; 400 401 totalTxIdle 402 .name(name() + ".totalTxIdle") 403 .desc("number of total TxIdle written to ISR") 404 .precision(0) 405 ; 406 407 coalescedTxIdle 408 .name(name() + ".coalescedTxIdle") 409 .desc("average number of TxIdle's coalesced into each post") 410 .precision(0) 411 ; 412 413 postedTxDesc 414 .name(name() + ".postedTxDesc") 415 .desc("number of TxDesc interrupts posted to CPU") 416 .precision(0) 417 ; 418 419 totalTxDesc 420 .name(name() + ".totalTxDesc") 421 .desc("number of total TxDesc written to ISR") 422 .precision(0) 423 ; 424 425 coalescedTxDesc 426 .name(name() + ".coalescedTxDesc") 427 .desc("average number of TxDesc's coalesced into each post") 428 .precision(0) 429 ; 430 431 postedRxOrn 432 .name(name() + ".postedRxOrn") 433 .desc("number of RxOrn posted to CPU") 434 .precision(0) 435 ; 436 437 totalRxOrn 438 .name(name() + ".totalRxOrn") 439 .desc("number of total RxOrn written to ISR") 440 .precision(0) 441 ; 442 443 coalescedRxOrn 444 .name(name() + ".coalescedRxOrn") 445 .desc("average number of RxOrn's coalesced into each post") 446 .precision(0) 447 ; 448 449 coalescedTotal 450 .name(name() + ".coalescedTotal") 451 .desc("average number of interrupts coalesced into each post") 452 .precision(0) 453 ; 454 455 postedInterrupts 456 .name(name() + ".postedInterrupts") 457 .desc("number of posts to CPU") 458 .precision(0) 459 ; 460 461 droppedPackets 462 .name(name() + ".droppedPackets") 463 .desc("number of packets dropped") 464 .precision(0) 465 ; 466 467 coalescedSwi = totalSwi / postedInterrupts; 468 coalescedRxIdle = totalRxIdle / postedInterrupts; 469 coalescedRxOk = totalRxOk / postedInterrupts; 470 coalescedRxDesc = totalRxDesc / postedInterrupts; 471 coalescedTxOk = totalTxOk / postedInterrupts; 472 coalescedTxIdle = totalTxIdle / postedInterrupts; 473 coalescedTxDesc = totalTxDesc / postedInterrupts; 474 coalescedRxOrn = totalRxOrn / postedInterrupts; 475 476 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + 477 totalTxOk + totalTxIdle + totalTxDesc + 478 totalRxOrn) / postedInterrupts; 479 480 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 481 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 482 totBandwidth = txBandwidth + rxBandwidth; 483 totBytes = txBytes + rxBytes; 484 totPackets = txPackets + rxPackets; 485 486 txPacketRate = txPackets / simSeconds; 487 rxPacketRate = rxPackets / simSeconds; 488} 489 490/** 491 * This is to read the PCI general configuration registers 492 */ 493void 494NSGigE::ReadConfig(int offset, int size, uint8_t *data) 495{ 496 if (offset < PCI_DEVICE_SPECIFIC) 497 PciDev::ReadConfig(offset, size, data); 498 else 499 panic("Device specific PCI config space not implemented!\n"); 500} 501 502/** 503 * This is to write to the PCI general configuration registers 504 */ 505void 506NSGigE::WriteConfig(int offset, int size, uint32_t data) 507{ 508 if (offset < PCI_DEVICE_SPECIFIC) 509 PciDev::WriteConfig(offset, size, data); 510 else 511 panic("Device specific PCI config space not implemented!\n"); 512 513 // Need to catch writes to BARs to update the PIO interface 514 switch (offset) { 515 // seems to work fine without all these PCI settings, but i 516 // put in the IO to double check, an assertion will fail if we 517 // need to properly implement it 518 case PCI_COMMAND: 519 if (config.data[offset] & PCI_CMD_IOSE) 520 ioEnable = true; 521 else 522 ioEnable = false; 523 524#if 0 525 if (config.data[offset] & PCI_CMD_BME) { 526 bmEnabled = true; 527 } 528 else { 529 bmEnabled = false; 530 } 531 532 if (config.data[offset] & PCI_CMD_MSE) { 533 memEnable = true; 534 } 535 else { 536 memEnable = false; 537 } 538#endif 539 break; 540 541 case PCI0_BASE_ADDR0: 542 if (BARAddrs[0] != 0) { 543 if (pioInterface) 544 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 545 546 BARAddrs[0] &= EV5::PAddrUncachedMask; 547 } 548 break; 549 case PCI0_BASE_ADDR1: 550 if (BARAddrs[1] != 0) { 551 if (pioInterface) 552 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 553 554 BARAddrs[1] &= EV5::PAddrUncachedMask; 555 } 556 break; 557 } 558} 559 560/** 561 * This reads the device registers, which are detailed in the NS83820 562 * spec sheet 563 */ 564Fault 565NSGigE::read(MemReqPtr &req, uint8_t *data) 566{ 567 assert(ioEnable); 568 569 //The mask is to give you only the offset into the device register file 570 Addr daddr = req->paddr & 0xfff; 571 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n", 572 daddr, req->paddr, req->vaddr, req->size); 573 574 575 // there are some reserved registers, you can see ns_gige_reg.h and 576 // the spec sheet for details 577 if (daddr > LAST && daddr <= RESERVED) { 578 panic("Accessing reserved register"); 579 } else if (daddr > RESERVED && daddr <= 0x3FC) { 580 ReadConfig(daddr & 0xff, req->size, data); 581 return No_Fault; 582 } else if (daddr >= MIB_START && daddr <= MIB_END) { 583 // don't implement all the MIB's. hopefully the kernel 584 // doesn't actually DEPEND upon their values 585 // MIB are just hardware stats keepers 586 uint32_t ® = *(uint32_t *) data; 587 reg = 0; 588 return No_Fault; 589 } else if (daddr > 0x3FC) 590 panic("Something is messed up!\n"); 591 592 switch (req->size) { 593 case sizeof(uint32_t): 594 { 595 uint32_t ® = *(uint32_t *)data; 596 597 switch (daddr) { 598 case CR: 599 reg = regs.command; 600 //these are supposed to be cleared on a read 601 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 602 break; 603 604 case CFGR: 605 reg = regs.config; 606 break; 607 608 case MEAR: 609 reg = regs.mear; 610 break; 611 612 case PTSCR: 613 reg = regs.ptscr; 614 break; 615 616 case ISR: 617 reg = regs.isr; 618 devIntrClear(ISR_ALL); 619 break; 620 621 case IMR: 622 reg = regs.imr; 623 break; 624 625 case IER: 626 reg = regs.ier; 627 break; 628 629 case IHR: 630 reg = regs.ihr; 631 break; 632 633 case TXDP: 634 reg = regs.txdp; 635 break; 636 637 case TXDP_HI: 638 reg = regs.txdp_hi; 639 break; 640 641 case TX_CFG: 642 reg = regs.txcfg; 643 break; 644 645 case GPIOR: 646 reg = regs.gpior; 647 break; 648 649 case RXDP: 650 reg = regs.rxdp; 651 break; 652 653 case RXDP_HI: 654 reg = regs.rxdp_hi; 655 break; 656 657 case RX_CFG: 658 reg = regs.rxcfg; 659 break; 660 661 case PQCR: 662 reg = regs.pqcr; 663 break; 664 665 case WCSR: 666 reg = regs.wcsr; 667 break; 668 669 case PCR: 670 reg = regs.pcr; 671 break; 672 673 // see the spec sheet for how RFCR and RFDR work 674 // basically, you write to RFCR to tell the machine 675 // what you want to do next, then you act upon RFDR, 676 // and the device will be prepared b/c of what you 677 // wrote to RFCR 678 case RFCR: 679 reg = regs.rfcr; 680 break; 681 682 case RFDR: 683 uint16_t rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 684 switch (rfaddr) { 685 // Read from perfect match ROM octets 686 case 0x000: 687 reg = rom.perfectMatch[1]; 688 reg = reg << 8; 689 reg += rom.perfectMatch[0]; 690 break; 691 case 0x002: 692 reg = rom.perfectMatch[3] << 8; 693 reg += rom.perfectMatch[2]; 694 break; 695 case 0x004: 696 reg = rom.perfectMatch[5] << 8; 697 reg += rom.perfectMatch[4]; 698 break; 699 default: 700 // Read filter hash table 701 if (rfaddr >= FHASH_ADDR && 702 rfaddr < FHASH_ADDR + FHASH_SIZE) { 703 704 // Only word-aligned reads supported 705 if (rfaddr % 2) 706 panic("unaligned read from filter hash table!"); 707 708 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8; 709 reg += rom.filterHash[rfaddr - FHASH_ADDR]; 710 break; 711 } 712 713 panic("reading RFDR for something other than pattern\ 714 matching or hashing! %#x\n", rfaddr); 715 } 716 break; 717 718 case SRR: 719 reg = regs.srr; 720 break; 721 722 case MIBC: 723 reg = regs.mibc; 724 reg &= ~(MIBC_MIBS | MIBC_ACLR); 725 break; 726 727 case VRCR: 728 reg = regs.vrcr; 729 break; 730 731 case VTCR: 732 reg = regs.vtcr; 733 break; 734 735 case VDR: 736 reg = regs.vdr; 737 break; 738 739 case CCSR: 740 reg = regs.ccsr; 741 break; 742 743 case TBICR: 744 reg = regs.tbicr; 745 break; 746 747 case TBISR: 748 reg = regs.tbisr; 749 break; 750 751 case TANAR: 752 reg = regs.tanar; 753 break; 754 755 case TANLPAR: 756 reg = regs.tanlpar; 757 break; 758 759 case TANER: 760 reg = regs.taner; 761 break; 762 763 case TESR: 764 reg = regs.tesr; 765 break; 766 767 case M5REG: 768 reg = params()->m5reg; 769 break; 770 771 default: 772 panic("reading unimplemented register: addr=%#x", daddr); 773 } 774 775 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 776 daddr, reg, reg); 777 } 778 break; 779 780 default: 781 panic("accessing register with invalid size: addr=%#x, size=%d", 782 daddr, req->size); 783 } 784 785 return No_Fault; 786} 787 788Fault 789NSGigE::write(MemReqPtr &req, const uint8_t *data) 790{ 791 assert(ioEnable); 792 793 Addr daddr = req->paddr & 0xfff; 794 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n", 795 daddr, req->paddr, req->vaddr, req->size); 796 797 if (daddr > LAST && daddr <= RESERVED) { 798 panic("Accessing reserved register"); 799 } else if (daddr > RESERVED && daddr <= 0x3FC) { 800 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data); 801 return No_Fault; 802 } else if (daddr > 0x3FC) 803 panic("Something is messed up!\n"); 804 805 if (req->size == sizeof(uint32_t)) { 806 uint32_t reg = *(uint32_t *)data; 807 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 808 809 switch (daddr) { 810 case CR: 811 regs.command = reg; 812 if (reg & CR_TXD) { 813 txEnable = false; 814 } else if (reg & CR_TXE) { 815 txEnable = true; 816 817 // the kernel is enabling the transmit machine 818 if (txState == txIdle) 819 txKick(); 820 } 821 822 if (reg & CR_RXD) { 823 rxEnable = false; 824 } else if (reg & CR_RXE) { 825 rxEnable = true; 826 827 if (rxState == rxIdle) 828 rxKick(); 829 } 830 831 if (reg & CR_TXR) 832 txReset(); 833 834 if (reg & CR_RXR) 835 rxReset(); 836 837 if (reg & CR_SWI) 838 devIntrPost(ISR_SWI); 839 840 if (reg & CR_RST) { 841 txReset(); 842 rxReset(); 843 844 regsReset(); 845 } 846 break; 847 848 case CFGR: 849 if (reg & CFGR_LNKSTS || 850 reg & CFGR_SPDSTS || 851 reg & CFGR_DUPSTS || 852 reg & CFGR_RESERVED || 853 reg & CFGR_T64ADDR || 854 reg & CFGR_PCI64_DET) 855 856 // First clear all writable bits 857 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 858 CFGR_RESERVED | CFGR_T64ADDR | 859 CFGR_PCI64_DET; 860 // Now set the appropriate writable bits 861 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 862 CFGR_RESERVED | CFGR_T64ADDR | 863 CFGR_PCI64_DET); 864 865// all these #if 0's are because i don't THINK the kernel needs to 866// have these implemented. if there is a problem relating to one of 867// these, you may need to add functionality in. 868#if 0 869 if (reg & CFGR_TBI_EN) ; 870 if (reg & CFGR_MODE_1000) ; 871#endif 872 873 if (reg & CFGR_AUTO_1000) 874 panic("CFGR_AUTO_1000 not implemented!\n"); 875 876#if 0 877 if (reg & CFGR_PINT_DUPSTS || 878 reg & CFGR_PINT_LNKSTS || 879 reg & CFGR_PINT_SPDSTS) 880 ; 881 882 if (reg & CFGR_TMRTEST) ; 883 if (reg & CFGR_MRM_DIS) ; 884 if (reg & CFGR_MWI_DIS) ; 885 886 if (reg & CFGR_T64ADDR) 887 panic("CFGR_T64ADDR is read only register!\n"); 888 889 if (reg & CFGR_PCI64_DET) 890 panic("CFGR_PCI64_DET is read only register!\n"); 891 892 if (reg & CFGR_DATA64_EN) ; 893 if (reg & CFGR_M64ADDR) ; 894 if (reg & CFGR_PHY_RST) ; 895 if (reg & CFGR_PHY_DIS) ; 896#endif 897 898 if (reg & CFGR_EXTSTS_EN) 899 extstsEnable = true; 900 else 901 extstsEnable = false; 902 903#if 0 904 if (reg & CFGR_REQALG) ; 905 if (reg & CFGR_SB) ; 906 if (reg & CFGR_POW) ; 907 if (reg & CFGR_EXD) ; 908 if (reg & CFGR_PESEL) ; 909 if (reg & CFGR_BROM_DIS) ; 910 if (reg & CFGR_EXT_125) ; 911 if (reg & CFGR_BEM) ; 912#endif 913 break; 914 915 case MEAR: 916 // Clear writable bits 917 regs.mear &= MEAR_EEDO; 918 // Set appropriate writable bits 919 regs.mear |= reg & ~MEAR_EEDO; 920 921 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address) 922 // even though it could get it through RFDR 923 if (reg & MEAR_EESEL) { 924 // Rising edge of clock 925 if (reg & MEAR_EECLK && !eepromClk) 926 eepromKick(); 927 } 928 else { 929 eepromState = eepromStart; 930 regs.mear &= ~MEAR_EEDI; 931 } 932 933 eepromClk = reg & MEAR_EECLK; 934 935 // since phy is completely faked, MEAR_MD* don't matter 936#if 0 937 if (reg & MEAR_MDIO) ; 938 if (reg & MEAR_MDDIR) ; 939 if (reg & MEAR_MDC) ; 940#endif 941 break; 942 943 case PTSCR: 944 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 945 // these control BISTs for various parts of chip - we 946 // don't care or do just fake that the BIST is done 947 if (reg & PTSCR_RBIST_EN) 948 regs.ptscr |= PTSCR_RBIST_DONE; 949 if (reg & PTSCR_EEBIST_EN) 950 regs.ptscr &= ~PTSCR_EEBIST_EN; 951 if (reg & PTSCR_EELOAD_EN) 952 regs.ptscr &= ~PTSCR_EELOAD_EN; 953 break; 954 955 case ISR: /* writing to the ISR has no effect */ 956 panic("ISR is a read only register!\n"); 957 958 case IMR: 959 regs.imr = reg; 960 devIntrChangeMask(); 961 break; 962 963 case IER: 964 regs.ier = reg; 965 break; 966 967 case IHR: 968 regs.ihr = reg; 969 /* not going to implement real interrupt holdoff */ 970 break; 971 972 case TXDP: 973 regs.txdp = (reg & 0xFFFFFFFC); 974 assert(txState == txIdle); 975 CTDD = false; 976 break; 977 978 case TXDP_HI: 979 regs.txdp_hi = reg; 980 break; 981 982 case TX_CFG: 983 regs.txcfg = reg; 984#if 0 985 if (reg & TX_CFG_CSI) ; 986 if (reg & TX_CFG_HBI) ; 987 if (reg & TX_CFG_MLB) ; 988 if (reg & TX_CFG_ATP) ; 989 if (reg & TX_CFG_ECRETRY) { 990 /* 991 * this could easily be implemented, but considering 992 * the network is just a fake pipe, wouldn't make 993 * sense to do this 994 */ 995 } 996 997 if (reg & TX_CFG_BRST_DIS) ; 998#endif 999 1000#if 0 1001 /* we handle our own DMA, ignore the kernel's exhortations */ 1002 if (reg & TX_CFG_MXDMA) ; 1003#endif 1004 1005 // also, we currently don't care about fill/drain 1006 // thresholds though this may change in the future with 1007 // more realistic networks or a driver which changes it 1008 // according to feedback 1009 1010 break; 1011 1012 case GPIOR: 1013 // Only write writable bits 1014 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 1015 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN; 1016 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 1017 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN); 1018 /* these just control general purpose i/o pins, don't matter */ 1019 break; 1020 1021 case RXDP: 1022 regs.rxdp = reg; 1023 CRDD = false; 1024 break; 1025 1026 case RXDP_HI: 1027 regs.rxdp_hi = reg; 1028 break; 1029 1030 case RX_CFG: 1031 regs.rxcfg = reg; 1032#if 0 1033 if (reg & RX_CFG_AEP) ; 1034 if (reg & RX_CFG_ARP) ; 1035 if (reg & RX_CFG_STRIPCRC) ; 1036 if (reg & RX_CFG_RX_RD) ; 1037 if (reg & RX_CFG_ALP) ; 1038 if (reg & RX_CFG_AIRL) ; 1039 1040 /* we handle our own DMA, ignore what kernel says about it */ 1041 if (reg & RX_CFG_MXDMA) ; 1042 1043 //also, we currently don't care about fill/drain thresholds 1044 //though this may change in the future with more realistic 1045 //networks or a driver which changes it according to feedback 1046 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ; 1047#endif 1048 break; 1049 1050 case PQCR: 1051 /* there is no priority queueing used in the linux 2.6 driver */ 1052 regs.pqcr = reg; 1053 break; 1054 1055 case WCSR: 1056 /* not going to implement wake on LAN */ 1057 regs.wcsr = reg; 1058 break; 1059 1060 case PCR: 1061 /* not going to implement pause control */ 1062 regs.pcr = reg; 1063 break; 1064 1065 case RFCR: 1066 regs.rfcr = reg; 1067 1068 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 1069 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 1070 acceptMulticast = (reg & RFCR_AAM) ? true : false; 1071 acceptUnicast = (reg & RFCR_AAU) ? true : false; 1072 acceptPerfect = (reg & RFCR_APM) ? true : false; 1073 acceptArp = (reg & RFCR_AARP) ? true : false; 1074 multicastHashEnable = (reg & RFCR_MHEN) ? true : false; 1075 1076#if 0 1077 if (reg & RFCR_APAT) 1078 panic("RFCR_APAT not implemented!\n"); 1079#endif 1080 if (reg & RFCR_UHEN) 1081 panic("Unicast hash filtering not used by drivers!\n"); 1082 1083 if (reg & RFCR_ULM) 1084 panic("RFCR_ULM not implemented!\n"); 1085 1086 break; 1087 1088 case RFDR: 1089 uint16_t rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 1090 switch (rfaddr) { 1091 case 0x000: 1092 rom.perfectMatch[0] = (uint8_t)reg; 1093 rom.perfectMatch[1] = (uint8_t)(reg >> 8); 1094 break; 1095 case 0x002: 1096 rom.perfectMatch[2] = (uint8_t)reg; 1097 rom.perfectMatch[3] = (uint8_t)(reg >> 8); 1098 break; 1099 case 0x004: 1100 rom.perfectMatch[4] = (uint8_t)reg; 1101 rom.perfectMatch[5] = (uint8_t)(reg >> 8); 1102 break; 1103 default: 1104 1105 if (rfaddr >= FHASH_ADDR && 1106 rfaddr < FHASH_ADDR + FHASH_SIZE) { 1107 1108 // Only word-aligned writes supported 1109 if (rfaddr % 2) 1110 panic("unaligned write to filter hash table!"); 1111 1112 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg; 1113 rom.filterHash[rfaddr - FHASH_ADDR + 1] 1114 = (uint8_t)(reg >> 8); 1115 break; 1116 } 1117 panic("writing RFDR for something other than pattern matching\ 1118 or hashing! %#x\n", rfaddr); 1119 } 1120 1121 case BRAR: 1122 regs.brar = reg; 1123 break; 1124 1125 case BRDR: 1126 panic("the driver never uses BRDR, something is wrong!\n"); 1127 1128 case SRR: 1129 panic("SRR is read only register!\n"); 1130 1131 case MIBC: 1132 panic("the driver never uses MIBC, something is wrong!\n"); 1133 1134 case VRCR: 1135 regs.vrcr = reg; 1136 break; 1137 1138 case VTCR: 1139 regs.vtcr = reg; 1140 break; 1141 1142 case VDR: 1143 panic("the driver never uses VDR, something is wrong!\n"); 1144 1145 case CCSR: 1146 /* not going to implement clockrun stuff */ 1147 regs.ccsr = reg; 1148 break; 1149 1150 case TBICR: 1151 regs.tbicr = reg; 1152 if (reg & TBICR_MR_LOOPBACK) 1153 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 1154 1155 if (reg & TBICR_MR_AN_ENABLE) { 1156 regs.tanlpar = regs.tanar; 1157 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 1158 } 1159 1160#if 0 1161 if (reg & TBICR_MR_RESTART_AN) ; 1162#endif 1163 1164 break; 1165 1166 case TBISR: 1167 panic("TBISR is read only register!\n"); 1168 1169 case TANAR: 1170 // Only write the writable bits 1171 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED; 1172 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED); 1173 1174 // Pause capability unimplemented 1175#if 0 1176 if (reg & TANAR_PS2) ; 1177 if (reg & TANAR_PS1) ; 1178#endif 1179 1180 break; 1181 1182 case TANLPAR: 1183 panic("this should only be written to by the fake phy!\n"); 1184 1185 case TANER: 1186 panic("TANER is read only register!\n"); 1187 1188 case TESR: 1189 regs.tesr = reg; 1190 break; 1191 1192 default: 1193 panic("invalid register access daddr=%#x", daddr); 1194 } 1195 } else { 1196 panic("Invalid Request Size"); 1197 } 1198 1199 return No_Fault; 1200} 1201 1202void 1203NSGigE::devIntrPost(uint32_t interrupts) 1204{ 1205 if (interrupts & ISR_RESERVE) 1206 panic("Cannot set a reserved interrupt"); 1207 1208 if (interrupts & ISR_NOIMPL) 1209 warn("interrupt not implemented %#x\n", interrupts); 1210 1211 interrupts &= ~ISR_NOIMPL; 1212 regs.isr |= interrupts; 1213 1214 if (interrupts & regs.imr) { 1215 if (interrupts & ISR_SWI) { 1216 totalSwi++; 1217 } 1218 if (interrupts & ISR_RXIDLE) { 1219 totalRxIdle++; 1220 } 1221 if (interrupts & ISR_RXOK) { 1222 totalRxOk++; 1223 } 1224 if (interrupts & ISR_RXDESC) { 1225 totalRxDesc++; 1226 } 1227 if (interrupts & ISR_TXOK) { 1228 totalTxOk++; 1229 } 1230 if (interrupts & ISR_TXIDLE) { 1231 totalTxIdle++; 1232 } 1233 if (interrupts & ISR_TXDESC) { 1234 totalTxDesc++; 1235 } 1236 if (interrupts & ISR_RXORN) { 1237 totalRxOrn++; 1238 } 1239 } 1240 1241 DPRINTF(EthernetIntr, 1242 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 1243 interrupts, regs.isr, regs.imr); 1244 1245 if ((regs.isr & regs.imr)) { 1246 Tick when = curTick; 1247 if (!(regs.isr & regs.imr & ISR_NODELAY)) 1248 when += intrDelay; 1249 cpuIntrPost(when); 1250 } 1251} 1252 1253/* writing this interrupt counting stats inside this means that this function 1254 is now limited to being used to clear all interrupts upon the kernel 1255 reading isr and servicing. just telling you in case you were thinking 1256 of expanding use. 1257*/ 1258void 1259NSGigE::devIntrClear(uint32_t interrupts) 1260{ 1261 if (interrupts & ISR_RESERVE) 1262 panic("Cannot clear a reserved interrupt"); 1263 1264 if (regs.isr & regs.imr & ISR_SWI) { 1265 postedSwi++; 1266 } 1267 if (regs.isr & regs.imr & ISR_RXIDLE) { 1268 postedRxIdle++; 1269 } 1270 if (regs.isr & regs.imr & ISR_RXOK) { 1271 postedRxOk++; 1272 } 1273 if (regs.isr & regs.imr & ISR_RXDESC) { 1274 postedRxDesc++; 1275 } 1276 if (regs.isr & regs.imr & ISR_TXOK) { 1277 postedTxOk++; 1278 } 1279 if (regs.isr & regs.imr & ISR_TXIDLE) { 1280 postedTxIdle++; 1281 } 1282 if (regs.isr & regs.imr & ISR_TXDESC) { 1283 postedTxDesc++; 1284 } 1285 if (regs.isr & regs.imr & ISR_RXORN) { 1286 postedRxOrn++; 1287 } 1288 1289 if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC | 1290 ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) ) 1291 postedInterrupts++; 1292 1293 interrupts &= ~ISR_NOIMPL; 1294 regs.isr &= ~interrupts; 1295 1296 DPRINTF(EthernetIntr, 1297 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 1298 interrupts, regs.isr, regs.imr); 1299 1300 if (!(regs.isr & regs.imr)) 1301 cpuIntrClear(); 1302} 1303 1304void 1305NSGigE::devIntrChangeMask() 1306{ 1307 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 1308 regs.isr, regs.imr, regs.isr & regs.imr); 1309 1310 if (regs.isr & regs.imr) 1311 cpuIntrPost(curTick); 1312 else 1313 cpuIntrClear(); 1314} 1315 1316void 1317NSGigE::cpuIntrPost(Tick when) 1318{ 1319 // If the interrupt you want to post is later than an interrupt 1320 // already scheduled, just let it post in the coming one and don't 1321 // schedule another. 1322 // HOWEVER, must be sure that the scheduled intrTick is in the 1323 // future (this was formerly the source of a bug) 1324 /** 1325 * @todo this warning should be removed and the intrTick code should 1326 * be fixed. 1327 */ 1328 assert(when >= curTick); 1329 assert(intrTick >= curTick || intrTick == 0); 1330 if (when > intrTick && intrTick != 0) { 1331 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 1332 intrTick); 1333 return; 1334 } 1335 1336 intrTick = when; 1337 if (intrTick < curTick) { 1338 debug_break(); 1339 intrTick = curTick; 1340 } 1341 1342 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 1343 intrTick); 1344 1345 if (intrEvent) 1346 intrEvent->squash(); 1347 intrEvent = new IntrEvent(this, true); 1348 intrEvent->schedule(intrTick); 1349} 1350 1351void 1352NSGigE::cpuInterrupt() 1353{ 1354 assert(intrTick == curTick); 1355 1356 // Whether or not there's a pending interrupt, we don't care about 1357 // it anymore 1358 intrEvent = 0; 1359 intrTick = 0; 1360 1361 // Don't send an interrupt if there's already one 1362 if (cpuPendingIntr) { 1363 DPRINTF(EthernetIntr, 1364 "would send an interrupt now, but there's already pending\n"); 1365 } else { 1366 // Send interrupt 1367 cpuPendingIntr = true; 1368 1369 DPRINTF(EthernetIntr, "posting interrupt\n"); 1370 intrPost(); 1371 } 1372} 1373 1374void 1375NSGigE::cpuIntrClear() 1376{ 1377 if (!cpuPendingIntr) 1378 return; 1379 1380 if (intrEvent) { 1381 intrEvent->squash(); 1382 intrEvent = 0; 1383 } 1384 1385 intrTick = 0; 1386 1387 cpuPendingIntr = false; 1388 1389 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1390 intrClear(); 1391} 1392 1393bool 1394NSGigE::cpuIntrPending() const 1395{ return cpuPendingIntr; } 1396 1397void 1398NSGigE::txReset() 1399{ 1400 1401 DPRINTF(Ethernet, "transmit reset\n"); 1402 1403 CTDD = false; 1404 txEnable = false;; 1405 txFragPtr = 0; 1406 assert(txDescCnt == 0); 1407 txFifo.clear(); 1408 txState = txIdle; 1409 assert(txDmaState == dmaIdle); 1410} 1411 1412void 1413NSGigE::rxReset() 1414{ 1415 DPRINTF(Ethernet, "receive reset\n"); 1416 1417 CRDD = false; 1418 assert(rxPktBytes == 0); 1419 rxEnable = false; 1420 rxFragPtr = 0; 1421 assert(rxDescCnt == 0); 1422 assert(rxDmaState == dmaIdle); 1423 rxFifo.clear(); 1424 rxState = rxIdle; 1425} 1426 1427void 1428NSGigE::regsReset() 1429{ 1430 memset(®s, 0, sizeof(regs)); 1431 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000); 1432 regs.mear = 0x12; 1433 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1434 // fill threshold to 32 bytes 1435 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1436 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1437 regs.mibc = MIBC_FRZ; 1438 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1439 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1440 regs.brar = 0xffffffff; 1441 1442 extstsEnable = false; 1443 acceptBroadcast = false; 1444 acceptMulticast = false; 1445 acceptUnicast = false; 1446 acceptPerfect = false; 1447 acceptArp = false; 1448} 1449 1450void 1451NSGigE::rxDmaReadCopy() 1452{ 1453 assert(rxDmaState == dmaReading); 1454 1455 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen); 1456 rxDmaState = dmaIdle; 1457 1458 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1459 rxDmaAddr, rxDmaLen); 1460 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1461} 1462 1463bool 1464NSGigE::doRxDmaRead() 1465{ 1466 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1467 rxDmaState = dmaReading; 1468 1469 if (dmaInterface && !rxDmaFree) { 1470 if (dmaInterface->busy()) 1471 rxDmaState = dmaReadWaiting; 1472 else 1473 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick, 1474 &rxDmaReadEvent, true); 1475 return true; 1476 } 1477 1478 if (dmaReadDelay == 0 && dmaReadFactor == 0) { 1479 rxDmaReadCopy(); 1480 return false; 1481 } 1482 1483 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1484 Tick start = curTick + dmaReadDelay + factor; 1485 rxDmaReadEvent.schedule(start); 1486 return true; 1487} 1488 1489void 1490NSGigE::rxDmaReadDone() 1491{ 1492 assert(rxDmaState == dmaReading); 1493 rxDmaReadCopy(); 1494 1495 // If the transmit state machine has a pending DMA, let it go first 1496 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1497 txKick(); 1498 1499 rxKick(); 1500} 1501 1502void 1503NSGigE::rxDmaWriteCopy() 1504{ 1505 assert(rxDmaState == dmaWriting); 1506 1507 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen); 1508 rxDmaState = dmaIdle; 1509 1510 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1511 rxDmaAddr, rxDmaLen); 1512 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1513} 1514 1515bool 1516NSGigE::doRxDmaWrite() 1517{ 1518 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1519 rxDmaState = dmaWriting; 1520 1521 if (dmaInterface && !rxDmaFree) { 1522 if (dmaInterface->busy()) 1523 rxDmaState = dmaWriteWaiting; 1524 else 1525 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick, 1526 &rxDmaWriteEvent, true); 1527 return true; 1528 } 1529 1530 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) { 1531 rxDmaWriteCopy(); 1532 return false; 1533 } 1534 1535 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1536 Tick start = curTick + dmaWriteDelay + factor; 1537 rxDmaWriteEvent.schedule(start); 1538 return true; 1539} 1540 1541void 1542NSGigE::rxDmaWriteDone() 1543{ 1544 assert(rxDmaState == dmaWriting); 1545 rxDmaWriteCopy(); 1546 1547 // If the transmit state machine has a pending DMA, let it go first 1548 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1549 txKick(); 1550 1551 rxKick(); 1552} 1553 1554void 1555NSGigE::rxKick() 1556{ 1557 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n", 1558 NsRxStateStrings[rxState], rxFifo.size()); 1559 1560 next: 1561 if (clock) { 1562 if (rxKickTick > curTick) { 1563 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1564 rxKickTick); 1565 1566 goto exit; 1567 } 1568 1569 // Go to the next state machine clock tick. 1570 rxKickTick = curTick + cycles(1); 1571 } 1572 1573 switch(rxDmaState) { 1574 case dmaReadWaiting: 1575 if (doRxDmaRead()) 1576 goto exit; 1577 break; 1578 case dmaWriteWaiting: 1579 if (doRxDmaWrite()) 1580 goto exit; 1581 break; 1582 default: 1583 break; 1584 } 1585 1586 // see state machine from spec for details 1587 // the way this works is, if you finish work on one state and can 1588 // go directly to another, you do that through jumping to the 1589 // label "next". however, if you have intermediate work, like DMA 1590 // so that you can't go to the next state yet, you go to exit and 1591 // exit the loop. however, when the DMA is done it will trigger 1592 // an event and come back to this loop. 1593 switch (rxState) { 1594 case rxIdle: 1595 if (!rxEnable) { 1596 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1597 goto exit; 1598 } 1599 1600 if (CRDD) { 1601 rxState = rxDescRefr; 1602 1603 rxDmaAddr = regs.rxdp & 0x3fffffff; 1604 rxDmaData = &rxDescCache + offsetof(ns_desc, link); 1605 rxDmaLen = sizeof(rxDescCache.link); 1606 rxDmaFree = dmaDescFree; 1607 1608 descDmaReads++; 1609 descDmaRdBytes += rxDmaLen; 1610 1611 if (doRxDmaRead()) 1612 goto exit; 1613 } else { 1614 rxState = rxDescRead; 1615 1616 rxDmaAddr = regs.rxdp & 0x3fffffff; 1617 rxDmaData = &rxDescCache; 1618 rxDmaLen = sizeof(ns_desc); 1619 rxDmaFree = dmaDescFree; 1620 1621 descDmaReads++; 1622 descDmaRdBytes += rxDmaLen; 1623 1624 if (doRxDmaRead()) 1625 goto exit; 1626 } 1627 break; 1628 1629 case rxDescRefr: 1630 if (rxDmaState != dmaIdle) 1631 goto exit; 1632 1633 rxState = rxAdvance; 1634 break; 1635 1636 case rxDescRead: 1637 if (rxDmaState != dmaIdle) 1638 goto exit; 1639 1640 DPRINTF(EthernetDesc, "rxDescCache: addr=%08x read descriptor\n", 1641 regs.rxdp & 0x3fffffff); 1642 DPRINTF(EthernetDesc, 1643 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1644 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1645 rxDescCache.extsts); 1646 1647 if (rxDescCache.cmdsts & CMDSTS_OWN) { 1648 devIntrPost(ISR_RXIDLE); 1649 rxState = rxIdle; 1650 goto exit; 1651 } else { 1652 rxState = rxFifoBlock; 1653 rxFragPtr = rxDescCache.bufptr; 1654 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK; 1655 } 1656 break; 1657 1658 case rxFifoBlock: 1659 if (!rxPacket) { 1660 /** 1661 * @todo in reality, we should be able to start processing 1662 * the packet as it arrives, and not have to wait for the 1663 * full packet ot be in the receive fifo. 1664 */ 1665 if (rxFifo.empty()) 1666 goto exit; 1667 1668 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1669 1670 // If we don't have a packet, grab a new one from the fifo. 1671 rxPacket = rxFifo.front(); 1672 rxPktBytes = rxPacket->length; 1673 rxPacketBufPtr = rxPacket->data; 1674 1675#if TRACING_ON 1676 if (DTRACE(Ethernet)) { 1677 IpPtr ip(rxPacket); 1678 if (ip) { 1679 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1680 TcpPtr tcp(ip); 1681 if (tcp) { 1682 DPRINTF(Ethernet, 1683 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1684 tcp->sport(), tcp->dport(), tcp->seq(), 1685 tcp->ack()); 1686 } 1687 } 1688 } 1689#endif 1690 1691 // sanity check - i think the driver behaves like this 1692 assert(rxDescCnt >= rxPktBytes); 1693 rxFifo.pop(); 1694 } 1695 1696 1697 // dont' need the && rxDescCnt > 0 if driver sanity check 1698 // above holds 1699 if (rxPktBytes > 0) { 1700 rxState = rxFragWrite; 1701 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1702 // check holds 1703 rxXferLen = rxPktBytes; 1704 1705 rxDmaAddr = rxFragPtr & 0x3fffffff; 1706 rxDmaData = rxPacketBufPtr; 1707 rxDmaLen = rxXferLen; 1708 rxDmaFree = dmaDataFree; 1709 1710 if (doRxDmaWrite()) 1711 goto exit; 1712 1713 } else { 1714 rxState = rxDescWrite; 1715 1716 //if (rxPktBytes == 0) { /* packet is done */ 1717 assert(rxPktBytes == 0); 1718 DPRINTF(EthernetSM, "done with receiving packet\n"); 1719 1720 rxDescCache.cmdsts |= CMDSTS_OWN; 1721 rxDescCache.cmdsts &= ~CMDSTS_MORE; 1722 rxDescCache.cmdsts |= CMDSTS_OK; 1723 rxDescCache.cmdsts &= 0xffff0000; 1724 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1725 1726#if 0 1727 /* 1728 * all the driver uses these are for its own stats keeping 1729 * which we don't care about, aren't necessary for 1730 * functionality and doing this would just slow us down. 1731 * if they end up using this in a later version for 1732 * functional purposes, just undef 1733 */ 1734 if (rxFilterEnable) { 1735 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK; 1736 const EthAddr &dst = rxFifoFront()->dst(); 1737 if (dst->unicast()) 1738 rxDescCache.cmdsts |= CMDSTS_DEST_SELF; 1739 if (dst->multicast()) 1740 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI; 1741 if (dst->broadcast()) 1742 rxDescCache.cmdsts |= CMDSTS_DEST_MASK; 1743 } 1744#endif 1745 1746 IpPtr ip(rxPacket); 1747 if (extstsEnable && ip) { 1748 rxDescCache.extsts |= EXTSTS_IPPKT; 1749 rxIpChecksums++; 1750 if (cksum(ip) != 0) { 1751 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1752 rxDescCache.extsts |= EXTSTS_IPERR; 1753 } 1754 TcpPtr tcp(ip); 1755 UdpPtr udp(ip); 1756 if (tcp) { 1757 rxDescCache.extsts |= EXTSTS_TCPPKT; 1758 rxTcpChecksums++; 1759 if (cksum(tcp) != 0) { 1760 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1761 rxDescCache.extsts |= EXTSTS_TCPERR; 1762 1763 } 1764 } else if (udp) { 1765 rxDescCache.extsts |= EXTSTS_UDPPKT; 1766 rxUdpChecksums++; 1767 if (cksum(udp) != 0) { 1768 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1769 rxDescCache.extsts |= EXTSTS_UDPERR; 1770 } 1771 } 1772 } 1773 rxPacket = 0; 1774 1775 /* 1776 * the driver seems to always receive into desc buffers 1777 * of size 1514, so you never have a pkt that is split 1778 * into multiple descriptors on the receive side, so 1779 * i don't implement that case, hence the assert above. 1780 */ 1781 1782 DPRINTF(EthernetDesc, 1783 "rxDescCache: addr=%08x writeback cmdsts extsts\n", 1784 regs.rxdp & 0x3fffffff); 1785 DPRINTF(EthernetDesc, 1786 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1787 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1788 rxDescCache.extsts); 1789 1790 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1791 rxDmaData = &(rxDescCache.cmdsts); 1792 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts); 1793 rxDmaFree = dmaDescFree; 1794 1795 descDmaWrites++; 1796 descDmaWrBytes += rxDmaLen; 1797 1798 if (doRxDmaWrite()) 1799 goto exit; 1800 } 1801 break; 1802 1803 case rxFragWrite: 1804 if (rxDmaState != dmaIdle) 1805 goto exit; 1806 1807 rxPacketBufPtr += rxXferLen; 1808 rxFragPtr += rxXferLen; 1809 rxPktBytes -= rxXferLen; 1810 1811 rxState = rxFifoBlock; 1812 break; 1813 1814 case rxDescWrite: 1815 if (rxDmaState != dmaIdle) 1816 goto exit; 1817 1818 assert(rxDescCache.cmdsts & CMDSTS_OWN); 1819 1820 assert(rxPacket == 0); 1821 devIntrPost(ISR_RXOK); 1822 1823 if (rxDescCache.cmdsts & CMDSTS_INTR) 1824 devIntrPost(ISR_RXDESC); 1825 1826 if (!rxEnable) { 1827 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1828 rxState = rxIdle; 1829 goto exit; 1830 } else 1831 rxState = rxAdvance; 1832 break; 1833 1834 case rxAdvance: 1835 if (rxDescCache.link == 0) { 1836 devIntrPost(ISR_RXIDLE); 1837 rxState = rxIdle; 1838 CRDD = true; 1839 goto exit; 1840 } else { 1841 rxState = rxDescRead; 1842 regs.rxdp = rxDescCache.link; 1843 CRDD = false; 1844 1845 rxDmaAddr = regs.rxdp & 0x3fffffff; 1846 rxDmaData = &rxDescCache; 1847 rxDmaLen = sizeof(ns_desc); 1848 rxDmaFree = dmaDescFree; 1849 1850 if (doRxDmaRead()) 1851 goto exit; 1852 } 1853 break; 1854 1855 default: 1856 panic("Invalid rxState!"); 1857 } 1858 1859 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1860 NsRxStateStrings[rxState]); 1861 goto next; 1862 1863 exit: 1864 /** 1865 * @todo do we want to schedule a future kick? 1866 */ 1867 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1868 NsRxStateStrings[rxState]); 1869 1870 if (clock && !rxKickEvent.scheduled()) 1871 rxKickEvent.schedule(rxKickTick); 1872} 1873 1874void 1875NSGigE::transmit() 1876{ 1877 if (txFifo.empty()) { 1878 DPRINTF(Ethernet, "nothing to transmit\n"); 1879 return; 1880 } 1881 1882 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1883 txFifo.size()); 1884 if (interface->sendPacket(txFifo.front())) { 1885#if TRACING_ON 1886 if (DTRACE(Ethernet)) { 1887 IpPtr ip(txFifo.front()); 1888 if (ip) { 1889 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1890 TcpPtr tcp(ip); 1891 if (tcp) { 1892 DPRINTF(Ethernet, 1893 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1894 tcp->sport(), tcp->dport(), tcp->seq(), tcp->ack()); 1895 } 1896 } 1897 } 1898#endif 1899 1900 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1901 txBytes += txFifo.front()->length; 1902 txPackets++; 1903 1904 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1905 txFifo.avail()); 1906 txFifo.pop(); 1907 1908 /* 1909 * normally do a writeback of the descriptor here, and ONLY 1910 * after that is done, send this interrupt. but since our 1911 * stuff never actually fails, just do this interrupt here, 1912 * otherwise the code has to stray from this nice format. 1913 * besides, it's functionally the same. 1914 */ 1915 devIntrPost(ISR_TXOK); 1916 } 1917 1918 if (!txFifo.empty() && !txEvent.scheduled()) { 1919 DPRINTF(Ethernet, "reschedule transmit\n"); 1920 txEvent.schedule(curTick + retryTime); 1921 } 1922} 1923 1924void 1925NSGigE::txDmaReadCopy() 1926{ 1927 assert(txDmaState == dmaReading); 1928 1929 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen); 1930 txDmaState = dmaIdle; 1931 1932 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1933 txDmaAddr, txDmaLen); 1934 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1935} 1936 1937bool 1938NSGigE::doTxDmaRead() 1939{ 1940 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1941 txDmaState = dmaReading; 1942 1943 if (dmaInterface && !txDmaFree) { 1944 if (dmaInterface->busy()) 1945 txDmaState = dmaReadWaiting; 1946 else 1947 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick, 1948 &txDmaReadEvent, true); 1949 return true; 1950 } 1951 1952 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) { 1953 txDmaReadCopy(); 1954 return false; 1955 } 1956 1957 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1958 Tick start = curTick + dmaReadDelay + factor; 1959 txDmaReadEvent.schedule(start); 1960 return true; 1961} 1962 1963void 1964NSGigE::txDmaReadDone() 1965{ 1966 assert(txDmaState == dmaReading); 1967 txDmaReadCopy(); 1968 1969 // If the receive state machine has a pending DMA, let it go first 1970 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1971 rxKick(); 1972 1973 txKick(); 1974} 1975 1976void 1977NSGigE::txDmaWriteCopy() 1978{ 1979 assert(txDmaState == dmaWriting); 1980 1981 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen); 1982 txDmaState = dmaIdle; 1983 1984 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1985 txDmaAddr, txDmaLen); 1986 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1987} 1988 1989bool 1990NSGigE::doTxDmaWrite() 1991{ 1992 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1993 txDmaState = dmaWriting; 1994 1995 if (dmaInterface && !txDmaFree) { 1996 if (dmaInterface->busy()) 1997 txDmaState = dmaWriteWaiting; 1998 else 1999 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick, 2000 &txDmaWriteEvent, true); 2001 return true; 2002 } 2003 2004 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) { 2005 txDmaWriteCopy(); 2006 return false; 2007 } 2008 2009 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 2010 Tick start = curTick + dmaWriteDelay + factor; 2011 txDmaWriteEvent.schedule(start); 2012 return true; 2013} 2014 2015void 2016NSGigE::txDmaWriteDone() 2017{ 2018 assert(txDmaState == dmaWriting); 2019 txDmaWriteCopy(); 2020 2021 // If the receive state machine has a pending DMA, let it go first 2022 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 2023 rxKick(); 2024 2025 txKick(); 2026} 2027 2028void 2029NSGigE::txKick() 2030{ 2031 DPRINTF(EthernetSM, "transmit kick txState=%s\n", 2032 NsTxStateStrings[txState]); 2033 2034 next: 2035 if (clock) { 2036 if (txKickTick > curTick) { 2037 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 2038 txKickTick); 2039 goto exit; 2040 } 2041 2042 // Go to the next state machine clock tick. 2043 txKickTick = curTick + cycles(1); 2044 } 2045 2046 switch(txDmaState) { 2047 case dmaReadWaiting: 2048 if (doTxDmaRead()) 2049 goto exit; 2050 break; 2051 case dmaWriteWaiting: 2052 if (doTxDmaWrite()) 2053 goto exit; 2054 break; 2055 default: 2056 break; 2057 } 2058 2059 switch (txState) { 2060 case txIdle: 2061 if (!txEnable) { 2062 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 2063 goto exit; 2064 } 2065 2066 if (CTDD) { 2067 txState = txDescRefr; 2068 2069 txDmaAddr = regs.txdp & 0x3fffffff; 2070 txDmaData = &txDescCache + offsetof(ns_desc, link); 2071 txDmaLen = sizeof(txDescCache.link); 2072 txDmaFree = dmaDescFree; 2073 2074 descDmaReads++; 2075 descDmaRdBytes += txDmaLen; 2076 2077 if (doTxDmaRead()) 2078 goto exit; 2079 2080 } else { 2081 txState = txDescRead; 2082 2083 txDmaAddr = regs.txdp & 0x3fffffff; 2084 txDmaData = &txDescCache; 2085 txDmaLen = sizeof(ns_desc); 2086 txDmaFree = dmaDescFree; 2087 2088 descDmaReads++; 2089 descDmaRdBytes += txDmaLen; 2090 2091 if (doTxDmaRead()) 2092 goto exit; 2093 } 2094 break; 2095 2096 case txDescRefr: 2097 if (txDmaState != dmaIdle) 2098 goto exit; 2099 2100 txState = txAdvance; 2101 break; 2102 2103 case txDescRead: 2104 if (txDmaState != dmaIdle) 2105 goto exit; 2106 2107 DPRINTF(EthernetDesc, "txDescCache: addr=%08x read descriptor\n", 2108 regs.txdp & 0x3fffffff); 2109 DPRINTF(EthernetDesc, 2110 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 2111 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts, 2112 txDescCache.extsts); 2113 2114 if (txDescCache.cmdsts & CMDSTS_OWN) { 2115 txState = txFifoBlock; 2116 txFragPtr = txDescCache.bufptr; 2117 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK; 2118 } else { 2119 devIntrPost(ISR_TXIDLE); 2120 txState = txIdle; 2121 goto exit; 2122 } 2123 break; 2124 2125 case txFifoBlock: 2126 if (!txPacket) { 2127 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 2128 txPacket = new PacketData(16384); 2129 txPacketBufPtr = txPacket->data; 2130 } 2131 2132 if (txDescCnt == 0) { 2133 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 2134 if (txDescCache.cmdsts & CMDSTS_MORE) { 2135 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 2136 txState = txDescWrite; 2137 2138 txDescCache.cmdsts &= ~CMDSTS_OWN; 2139 2140 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2141 txDmaAddr &= 0x3fffffff; 2142 txDmaData = &(txDescCache.cmdsts); 2143 txDmaLen = sizeof(txDescCache.cmdsts); 2144 txDmaFree = dmaDescFree; 2145 2146 if (doTxDmaWrite()) 2147 goto exit; 2148 2149 } else { /* this packet is totally done */ 2150 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 2151 /* deal with the the packet that just finished */ 2152 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 2153 IpPtr ip(txPacket); 2154 if (txDescCache.extsts & EXTSTS_UDPPKT) { 2155 UdpPtr udp(ip); 2156 udp->sum(0); 2157 udp->sum(cksum(udp)); 2158 txUdpChecksums++; 2159 } else if (txDescCache.extsts & EXTSTS_TCPPKT) { 2160 TcpPtr tcp(ip); 2161 tcp->sum(0); 2162 tcp->sum(cksum(tcp)); 2163 txTcpChecksums++; 2164 } 2165 if (txDescCache.extsts & EXTSTS_IPPKT) { 2166 ip->sum(0); 2167 ip->sum(cksum(ip)); 2168 txIpChecksums++; 2169 } 2170 } 2171 2172 txPacket->length = txPacketBufPtr - txPacket->data; 2173 // this is just because the receive can't handle a 2174 // packet bigger want to make sure 2175 assert(txPacket->length <= 1514); 2176#ifndef NDEBUG 2177 bool success = 2178#endif 2179 txFifo.push(txPacket); 2180 assert(success); 2181 2182 /* 2183 * this following section is not tqo spec, but 2184 * functionally shouldn't be any different. normally, 2185 * the chip will wait til the transmit has occurred 2186 * before writing back the descriptor because it has 2187 * to wait to see that it was successfully transmitted 2188 * to decide whether to set CMDSTS_OK or not. 2189 * however, in the simulator since it is always 2190 * successfully transmitted, and writing it exactly to 2191 * spec would complicate the code, we just do it here 2192 */ 2193 2194 txDescCache.cmdsts &= ~CMDSTS_OWN; 2195 txDescCache.cmdsts |= CMDSTS_OK; 2196 2197 DPRINTF(EthernetDesc, 2198 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 2199 txDescCache.cmdsts, txDescCache.extsts); 2200 2201 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2202 txDmaAddr &= 0x3fffffff; 2203 txDmaData = &(txDescCache.cmdsts); 2204 txDmaLen = sizeof(txDescCache.cmdsts) + 2205 sizeof(txDescCache.extsts); 2206 txDmaFree = dmaDescFree; 2207 2208 descDmaWrites++; 2209 descDmaWrBytes += txDmaLen; 2210 2211 transmit(); 2212 txPacket = 0; 2213 2214 if (!txEnable) { 2215 DPRINTF(EthernetSM, "halting TX state machine\n"); 2216 txState = txIdle; 2217 goto exit; 2218 } else 2219 txState = txAdvance; 2220 2221 if (doTxDmaWrite()) 2222 goto exit; 2223 } 2224 } else { 2225 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 2226 if (!txFifo.full()) { 2227 txState = txFragRead; 2228 2229 /* 2230 * The number of bytes transferred is either whatever 2231 * is left in the descriptor (txDescCnt), or if there 2232 * is not enough room in the fifo, just whatever room 2233 * is left in the fifo 2234 */ 2235 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 2236 2237 txDmaAddr = txFragPtr & 0x3fffffff; 2238 txDmaData = txPacketBufPtr; 2239 txDmaLen = txXferLen; 2240 txDmaFree = dmaDataFree; 2241 2242 if (doTxDmaRead()) 2243 goto exit; 2244 } else { 2245 txState = txFifoBlock; 2246 transmit(); 2247 2248 goto exit; 2249 } 2250 2251 } 2252 break; 2253 2254 case txFragRead: 2255 if (txDmaState != dmaIdle) 2256 goto exit; 2257 2258 txPacketBufPtr += txXferLen; 2259 txFragPtr += txXferLen; 2260 txDescCnt -= txXferLen; 2261 txFifo.reserve(txXferLen); 2262 2263 txState = txFifoBlock; 2264 break; 2265 2266 case txDescWrite: 2267 if (txDmaState != dmaIdle) 2268 goto exit; 2269 2270 if (txDescCache.cmdsts & CMDSTS_INTR) 2271 devIntrPost(ISR_TXDESC); 2272 2273 if (!txEnable) { 2274 DPRINTF(EthernetSM, "halting TX state machine\n"); 2275 txState = txIdle; 2276 goto exit; 2277 } else 2278 txState = txAdvance; 2279 break; 2280 2281 case txAdvance: 2282 if (txDescCache.link == 0) { 2283 devIntrPost(ISR_TXIDLE); 2284 txState = txIdle; 2285 goto exit; 2286 } else { 2287 txState = txDescRead; 2288 regs.txdp = txDescCache.link; 2289 CTDD = false; 2290 2291 txDmaAddr = txDescCache.link & 0x3fffffff; 2292 txDmaData = &txDescCache; 2293 txDmaLen = sizeof(ns_desc); 2294 txDmaFree = dmaDescFree; 2295 2296 if (doTxDmaRead()) 2297 goto exit; 2298 } 2299 break; 2300 2301 default: 2302 panic("invalid state"); 2303 } 2304 2305 DPRINTF(EthernetSM, "entering next txState=%s\n", 2306 NsTxStateStrings[txState]); 2307 goto next; 2308 2309 exit: 2310 /** 2311 * @todo do we want to schedule a future kick? 2312 */ 2313 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 2314 NsTxStateStrings[txState]); 2315 2316 if (clock && !txKickEvent.scheduled()) 2317 txKickEvent.schedule(txKickTick); 2318} 2319 2320/** 2321 * Advance the EEPROM state machine 2322 * Called on rising edge of EEPROM clock bit in MEAR 2323 */ 2324void 2325NSGigE::eepromKick() 2326{ 2327 switch (eepromState) { 2328 2329 case eepromStart: 2330 2331 // Wait for start bit 2332 if (regs.mear & MEAR_EEDI) { 2333 // Set up to get 2 opcode bits 2334 eepromState = eepromGetOpcode; 2335 eepromBitsToRx = 2; 2336 eepromOpcode = 0; 2337 } 2338 break; 2339 2340 case eepromGetOpcode: 2341 eepromOpcode <<= 1; 2342 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0; 2343 --eepromBitsToRx; 2344 2345 // Done getting opcode 2346 if (eepromBitsToRx == 0) { 2347 if (eepromOpcode != EEPROM_READ) 2348 panic("only EEPROM reads are implemented!"); 2349 2350 // Set up to get address 2351 eepromState = eepromGetAddress; 2352 eepromBitsToRx = 6; 2353 eepromAddress = 0; 2354 } 2355 break; 2356 2357 case eepromGetAddress: 2358 eepromAddress <<= 1; 2359 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0; 2360 --eepromBitsToRx; 2361 2362 // Done getting address 2363 if (eepromBitsToRx == 0) { 2364 2365 if (eepromAddress >= EEPROM_SIZE) 2366 panic("EEPROM read access out of range!"); 2367 2368 switch (eepromAddress) { 2369 2370 case EEPROM_PMATCH2_ADDR: 2371 eepromData = rom.perfectMatch[5]; 2372 eepromData <<= 8; 2373 eepromData += rom.perfectMatch[4]; 2374 break; 2375 2376 case EEPROM_PMATCH1_ADDR: 2377 eepromData = rom.perfectMatch[3]; 2378 eepromData <<= 8; 2379 eepromData += rom.perfectMatch[2]; 2380 break; 2381 2382 case EEPROM_PMATCH0_ADDR: 2383 eepromData = rom.perfectMatch[1]; 2384 eepromData <<= 8; 2385 eepromData += rom.perfectMatch[0]; 2386 break; 2387 2388 default: 2389 panic("FreeBSD driver only uses EEPROM to read PMATCH!"); 2390 } 2391 // Set up to read data 2392 eepromState = eepromRead; 2393 eepromBitsToRx = 16; 2394 2395 // Clear data in bit 2396 regs.mear &= ~MEAR_EEDI; 2397 } 2398 break; 2399 2400 case eepromRead: 2401 // Clear Data Out bit 2402 regs.mear &= ~MEAR_EEDO; 2403 // Set bit to value of current EEPROM bit 2404 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0; 2405 2406 eepromData <<= 1; 2407 --eepromBitsToRx; 2408 2409 // All done 2410 if (eepromBitsToRx == 0) { 2411 eepromState = eepromStart; 2412 } 2413 break; 2414 2415 default: 2416 panic("invalid EEPROM state"); 2417 } 2418 2419} 2420 2421void 2422NSGigE::transferDone() 2423{ 2424 if (txFifo.empty()) { 2425 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2426 return; 2427 } 2428 2429 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2430 2431 if (txEvent.scheduled()) 2432 txEvent.reschedule(curTick + cycles(1)); 2433 else 2434 txEvent.schedule(curTick + cycles(1)); 2435} 2436 2437bool 2438NSGigE::rxFilter(const PacketPtr &packet) 2439{ 2440 EthPtr eth = packet; 2441 bool drop = true; 2442 string type; 2443 2444 const EthAddr &dst = eth->dst(); 2445 if (dst.unicast()) { 2446 // If we're accepting all unicast addresses 2447 if (acceptUnicast) 2448 drop = false; 2449 2450 // If we make a perfect match 2451 if (acceptPerfect && dst == rom.perfectMatch) 2452 drop = false; 2453 2454 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2455 drop = false; 2456 2457 } else if (dst.broadcast()) { 2458 // if we're accepting broadcasts 2459 if (acceptBroadcast) 2460 drop = false; 2461 2462 } else if (dst.multicast()) { 2463 // if we're accepting all multicasts 2464 if (acceptMulticast) 2465 drop = false; 2466 2467 // Multicast hashing faked - all packets accepted 2468 if (multicastHashEnable) 2469 drop = false; 2470 } 2471 2472 if (drop) { 2473 DPRINTF(Ethernet, "rxFilter drop\n"); 2474 DDUMP(EthernetData, packet->data, packet->length); 2475 } 2476 2477 return drop; 2478} 2479 2480bool 2481NSGigE::recvPacket(PacketPtr packet) 2482{ 2483 rxBytes += packet->length; 2484 rxPackets++; 2485 2486 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2487 rxFifo.avail()); 2488 2489 if (!rxEnable) { 2490 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2491 interface->recvDone(); 2492 return true; 2493 } 2494 2495 if (!rxFilterEnable) { 2496 DPRINTF(Ethernet, 2497 "receive packet filtering disabled . . . packet dropped\n"); 2498 interface->recvDone(); 2499 return true; 2500 } 2501 2502 if (rxFilter(packet)) { 2503 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2504 interface->recvDone(); 2505 return true; 2506 } 2507 2508 if (rxFifo.avail() < packet->length) { 2509#if TRACING_ON 2510 IpPtr ip(packet); 2511 TcpPtr tcp(ip); 2512 if (ip) { 2513 DPRINTF(Ethernet, 2514 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2515 ip->id()); 2516 if (tcp) { 2517 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2518 } 2519 } 2520#endif 2521 droppedPackets++; 2522 devIntrPost(ISR_RXORN); 2523 return false; 2524 } 2525 2526 rxFifo.push(packet); 2527 interface->recvDone(); 2528 2529 rxKick(); 2530 return true; 2531} 2532 2533//===================================================================== 2534// 2535// 2536void 2537NSGigE::serialize(ostream &os) 2538{ 2539 // Serialize the PciDev base class 2540 PciDev::serialize(os); 2541 2542 /* 2543 * Finalize any DMA events now. 2544 */ 2545 if (rxDmaReadEvent.scheduled()) 2546 rxDmaReadCopy(); 2547 if (rxDmaWriteEvent.scheduled()) 2548 rxDmaWriteCopy(); 2549 if (txDmaReadEvent.scheduled()) 2550 txDmaReadCopy(); 2551 if (txDmaWriteEvent.scheduled()) 2552 txDmaWriteCopy(); 2553 2554 /* 2555 * Serialize the device registers 2556 */ 2557 SERIALIZE_SCALAR(regs.command); 2558 SERIALIZE_SCALAR(regs.config); 2559 SERIALIZE_SCALAR(regs.mear); 2560 SERIALIZE_SCALAR(regs.ptscr); 2561 SERIALIZE_SCALAR(regs.isr); 2562 SERIALIZE_SCALAR(regs.imr); 2563 SERIALIZE_SCALAR(regs.ier); 2564 SERIALIZE_SCALAR(regs.ihr); 2565 SERIALIZE_SCALAR(regs.txdp); 2566 SERIALIZE_SCALAR(regs.txdp_hi); 2567 SERIALIZE_SCALAR(regs.txcfg); 2568 SERIALIZE_SCALAR(regs.gpior); 2569 SERIALIZE_SCALAR(regs.rxdp); 2570 SERIALIZE_SCALAR(regs.rxdp_hi); 2571 SERIALIZE_SCALAR(regs.rxcfg); 2572 SERIALIZE_SCALAR(regs.pqcr); 2573 SERIALIZE_SCALAR(regs.wcsr); 2574 SERIALIZE_SCALAR(regs.pcr); 2575 SERIALIZE_SCALAR(regs.rfcr); 2576 SERIALIZE_SCALAR(regs.rfdr); 2577 SERIALIZE_SCALAR(regs.brar); 2578 SERIALIZE_SCALAR(regs.brdr); 2579 SERIALIZE_SCALAR(regs.srr); 2580 SERIALIZE_SCALAR(regs.mibc); 2581 SERIALIZE_SCALAR(regs.vrcr); 2582 SERIALIZE_SCALAR(regs.vtcr); 2583 SERIALIZE_SCALAR(regs.vdr); 2584 SERIALIZE_SCALAR(regs.ccsr); 2585 SERIALIZE_SCALAR(regs.tbicr); 2586 SERIALIZE_SCALAR(regs.tbisr); 2587 SERIALIZE_SCALAR(regs.tanar); 2588 SERIALIZE_SCALAR(regs.tanlpar); 2589 SERIALIZE_SCALAR(regs.taner); 2590 SERIALIZE_SCALAR(regs.tesr); 2591 2592 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2593 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2594 2595 SERIALIZE_SCALAR(ioEnable); 2596 2597 /* 2598 * Serialize the data Fifos 2599 */ 2600 rxFifo.serialize("rxFifo", os); 2601 txFifo.serialize("txFifo", os); 2602 2603 /* 2604 * Serialize the various helper variables 2605 */ 2606 bool txPacketExists = txPacket; 2607 SERIALIZE_SCALAR(txPacketExists); 2608 if (txPacketExists) { 2609 txPacket->length = txPacketBufPtr - txPacket->data; 2610 txPacket->serialize("txPacket", os); 2611 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2612 SERIALIZE_SCALAR(txPktBufPtr); 2613 } 2614 2615 bool rxPacketExists = rxPacket; 2616 SERIALIZE_SCALAR(rxPacketExists); 2617 if (rxPacketExists) { 2618 rxPacket->serialize("rxPacket", os); 2619 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2620 SERIALIZE_SCALAR(rxPktBufPtr); 2621 } 2622 2623 SERIALIZE_SCALAR(txXferLen); 2624 SERIALIZE_SCALAR(rxXferLen); 2625 2626 /* 2627 * Serialize DescCaches 2628 */ 2629 SERIALIZE_SCALAR(txDescCache.link); 2630 SERIALIZE_SCALAR(txDescCache.bufptr); 2631 SERIALIZE_SCALAR(txDescCache.cmdsts); 2632 SERIALIZE_SCALAR(txDescCache.extsts); 2633 SERIALIZE_SCALAR(rxDescCache.link); 2634 SERIALIZE_SCALAR(rxDescCache.bufptr); 2635 SERIALIZE_SCALAR(rxDescCache.cmdsts); 2636 SERIALIZE_SCALAR(rxDescCache.extsts); 2637 SERIALIZE_SCALAR(extstsEnable); 2638 2639 /* 2640 * Serialize tx state machine 2641 */ 2642 int txState = this->txState; 2643 SERIALIZE_SCALAR(txState); 2644 SERIALIZE_SCALAR(txEnable); 2645 SERIALIZE_SCALAR(CTDD); 2646 SERIALIZE_SCALAR(txFragPtr); 2647 SERIALIZE_SCALAR(txDescCnt); 2648 int txDmaState = this->txDmaState; 2649 SERIALIZE_SCALAR(txDmaState); 2650 SERIALIZE_SCALAR(txKickTick); 2651 2652 /* 2653 * Serialize rx state machine 2654 */ 2655 int rxState = this->rxState; 2656 SERIALIZE_SCALAR(rxState); 2657 SERIALIZE_SCALAR(rxEnable); 2658 SERIALIZE_SCALAR(CRDD); 2659 SERIALIZE_SCALAR(rxPktBytes); 2660 SERIALIZE_SCALAR(rxFragPtr); 2661 SERIALIZE_SCALAR(rxDescCnt); 2662 int rxDmaState = this->rxDmaState; 2663 SERIALIZE_SCALAR(rxDmaState); 2664 SERIALIZE_SCALAR(rxKickTick); 2665 2666 /* 2667 * Serialize EEPROM state machine 2668 */ 2669 int eepromState = this->eepromState; 2670 SERIALIZE_SCALAR(eepromState); 2671 SERIALIZE_SCALAR(eepromClk); 2672 SERIALIZE_SCALAR(eepromBitsToRx); 2673 SERIALIZE_SCALAR(eepromOpcode); 2674 SERIALIZE_SCALAR(eepromAddress); 2675 SERIALIZE_SCALAR(eepromData); 2676 2677 /* 2678 * If there's a pending transmit, store the time so we can 2679 * reschedule it later 2680 */ 2681 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2682 SERIALIZE_SCALAR(transmitTick); 2683 2684 /* 2685 * receive address filter settings 2686 */ 2687 SERIALIZE_SCALAR(rxFilterEnable); 2688 SERIALIZE_SCALAR(acceptBroadcast); 2689 SERIALIZE_SCALAR(acceptMulticast); 2690 SERIALIZE_SCALAR(acceptUnicast); 2691 SERIALIZE_SCALAR(acceptPerfect); 2692 SERIALIZE_SCALAR(acceptArp); 2693 SERIALIZE_SCALAR(multicastHashEnable); 2694 2695 /* 2696 * Keep track of pending interrupt status. 2697 */ 2698 SERIALIZE_SCALAR(intrTick); 2699 SERIALIZE_SCALAR(cpuPendingIntr); 2700 Tick intrEventTick = 0; 2701 if (intrEvent) 2702 intrEventTick = intrEvent->when(); 2703 SERIALIZE_SCALAR(intrEventTick); 2704 2705} 2706 2707void 2708NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2709{ 2710 // Unserialize the PciDev base class 2711 PciDev::unserialize(cp, section); 2712 2713 UNSERIALIZE_SCALAR(regs.command); 2714 UNSERIALIZE_SCALAR(regs.config); 2715 UNSERIALIZE_SCALAR(regs.mear); 2716 UNSERIALIZE_SCALAR(regs.ptscr); 2717 UNSERIALIZE_SCALAR(regs.isr); 2718 UNSERIALIZE_SCALAR(regs.imr); 2719 UNSERIALIZE_SCALAR(regs.ier); 2720 UNSERIALIZE_SCALAR(regs.ihr); 2721 UNSERIALIZE_SCALAR(regs.txdp); 2722 UNSERIALIZE_SCALAR(regs.txdp_hi); 2723 UNSERIALIZE_SCALAR(regs.txcfg); 2724 UNSERIALIZE_SCALAR(regs.gpior); 2725 UNSERIALIZE_SCALAR(regs.rxdp); 2726 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2727 UNSERIALIZE_SCALAR(regs.rxcfg); 2728 UNSERIALIZE_SCALAR(regs.pqcr); 2729 UNSERIALIZE_SCALAR(regs.wcsr); 2730 UNSERIALIZE_SCALAR(regs.pcr); 2731 UNSERIALIZE_SCALAR(regs.rfcr); 2732 UNSERIALIZE_SCALAR(regs.rfdr); 2733 UNSERIALIZE_SCALAR(regs.brar); 2734 UNSERIALIZE_SCALAR(regs.brdr); 2735 UNSERIALIZE_SCALAR(regs.srr); 2736 UNSERIALIZE_SCALAR(regs.mibc); 2737 UNSERIALIZE_SCALAR(regs.vrcr); 2738 UNSERIALIZE_SCALAR(regs.vtcr); 2739 UNSERIALIZE_SCALAR(regs.vdr); 2740 UNSERIALIZE_SCALAR(regs.ccsr); 2741 UNSERIALIZE_SCALAR(regs.tbicr); 2742 UNSERIALIZE_SCALAR(regs.tbisr); 2743 UNSERIALIZE_SCALAR(regs.tanar); 2744 UNSERIALIZE_SCALAR(regs.tanlpar); 2745 UNSERIALIZE_SCALAR(regs.taner); 2746 UNSERIALIZE_SCALAR(regs.tesr); 2747 2748 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2749 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2750 2751 UNSERIALIZE_SCALAR(ioEnable); 2752 2753 /* 2754 * unserialize the data fifos 2755 */ 2756 rxFifo.unserialize("rxFifo", cp, section); 2757 txFifo.unserialize("txFifo", cp, section); 2758 2759 /* 2760 * unserialize the various helper variables 2761 */ 2762 bool txPacketExists; 2763 UNSERIALIZE_SCALAR(txPacketExists); 2764 if (txPacketExists) { 2765 txPacket = new PacketData(16384); 2766 txPacket->unserialize("txPacket", cp, section); 2767 uint32_t txPktBufPtr; 2768 UNSERIALIZE_SCALAR(txPktBufPtr); 2769 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2770 } else 2771 txPacket = 0; 2772 2773 bool rxPacketExists; 2774 UNSERIALIZE_SCALAR(rxPacketExists); 2775 rxPacket = 0; 2776 if (rxPacketExists) { 2777 rxPacket = new PacketData(16384); 2778 rxPacket->unserialize("rxPacket", cp, section); 2779 uint32_t rxPktBufPtr; 2780 UNSERIALIZE_SCALAR(rxPktBufPtr); 2781 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2782 } else 2783 rxPacket = 0; 2784 2785 UNSERIALIZE_SCALAR(txXferLen); 2786 UNSERIALIZE_SCALAR(rxXferLen); 2787 2788 /* 2789 * Unserialize DescCaches 2790 */ 2791 UNSERIALIZE_SCALAR(txDescCache.link); 2792 UNSERIALIZE_SCALAR(txDescCache.bufptr); 2793 UNSERIALIZE_SCALAR(txDescCache.cmdsts); 2794 UNSERIALIZE_SCALAR(txDescCache.extsts); 2795 UNSERIALIZE_SCALAR(rxDescCache.link); 2796 UNSERIALIZE_SCALAR(rxDescCache.bufptr); 2797 UNSERIALIZE_SCALAR(rxDescCache.cmdsts); 2798 UNSERIALIZE_SCALAR(rxDescCache.extsts); 2799 UNSERIALIZE_SCALAR(extstsEnable); 2800 2801 /* 2802 * unserialize tx state machine 2803 */ 2804 int txState; 2805 UNSERIALIZE_SCALAR(txState); 2806 this->txState = (TxState) txState; 2807 UNSERIALIZE_SCALAR(txEnable); 2808 UNSERIALIZE_SCALAR(CTDD); 2809 UNSERIALIZE_SCALAR(txFragPtr); 2810 UNSERIALIZE_SCALAR(txDescCnt); 2811 int txDmaState; 2812 UNSERIALIZE_SCALAR(txDmaState); 2813 this->txDmaState = (DmaState) txDmaState; 2814 UNSERIALIZE_SCALAR(txKickTick); 2815 if (txKickTick) 2816 txKickEvent.schedule(txKickTick); 2817 2818 /* 2819 * unserialize rx state machine 2820 */ 2821 int rxState; 2822 UNSERIALIZE_SCALAR(rxState); 2823 this->rxState = (RxState) rxState; 2824 UNSERIALIZE_SCALAR(rxEnable); 2825 UNSERIALIZE_SCALAR(CRDD); 2826 UNSERIALIZE_SCALAR(rxPktBytes); 2827 UNSERIALIZE_SCALAR(rxFragPtr); 2828 UNSERIALIZE_SCALAR(rxDescCnt); 2829 int rxDmaState; 2830 UNSERIALIZE_SCALAR(rxDmaState); 2831 this->rxDmaState = (DmaState) rxDmaState; 2832 UNSERIALIZE_SCALAR(rxKickTick); 2833 if (rxKickTick) 2834 rxKickEvent.schedule(rxKickTick); 2835 2836 /* 2837 * Unserialize EEPROM state machine 2838 */ 2839 int eepromState; 2840 UNSERIALIZE_SCALAR(eepromState); 2841 this->eepromState = (EEPROMState) eepromState; 2842 UNSERIALIZE_SCALAR(eepromClk); 2843 UNSERIALIZE_SCALAR(eepromBitsToRx); 2844 UNSERIALIZE_SCALAR(eepromOpcode); 2845 UNSERIALIZE_SCALAR(eepromAddress); 2846 UNSERIALIZE_SCALAR(eepromData); 2847 2848 /* 2849 * If there's a pending transmit, reschedule it now 2850 */ 2851 Tick transmitTick; 2852 UNSERIALIZE_SCALAR(transmitTick); 2853 if (transmitTick) 2854 txEvent.schedule(curTick + transmitTick); 2855 2856 /* 2857 * unserialize receive address filter settings 2858 */ 2859 UNSERIALIZE_SCALAR(rxFilterEnable); 2860 UNSERIALIZE_SCALAR(acceptBroadcast); 2861 UNSERIALIZE_SCALAR(acceptMulticast); 2862 UNSERIALIZE_SCALAR(acceptUnicast); 2863 UNSERIALIZE_SCALAR(acceptPerfect); 2864 UNSERIALIZE_SCALAR(acceptArp); 2865 UNSERIALIZE_SCALAR(multicastHashEnable); 2866 2867 /* 2868 * Keep track of pending interrupt status. 2869 */ 2870 UNSERIALIZE_SCALAR(intrTick); 2871 UNSERIALIZE_SCALAR(cpuPendingIntr); 2872 Tick intrEventTick; 2873 UNSERIALIZE_SCALAR(intrEventTick); 2874 if (intrEventTick) { 2875 intrEvent = new IntrEvent(this, true); 2876 intrEvent->schedule(intrEventTick); 2877 } 2878 2879 /* 2880 * re-add addrRanges to bus bridges 2881 */ 2882 if (pioInterface) { 2883 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 2884 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 2885 } 2886} 2887 2888Tick 2889NSGigE::cacheAccess(MemReqPtr &req) 2890{ 2891 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n", 2892 req->paddr, req->paddr - addr); 2893 return curTick + pioLatency; 2894} 2895 2896BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2897 2898 SimObjectParam<EtherInt *> peer; 2899 SimObjectParam<NSGigE *> device; 2900 2901END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2902 2903BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2904 2905 INIT_PARAM_DFLT(peer, "peer interface", NULL), 2906 INIT_PARAM(device, "Ethernet device of this interface") 2907 2908END_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2909 2910CREATE_SIM_OBJECT(NSGigEInt) 2911{ 2912 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device); 2913 2914 EtherInt *p = (EtherInt *)peer; 2915 if (p) { 2916 dev_int->setPeer(p); 2917 p->setPeer(dev_int); 2918 } 2919 2920 return dev_int; 2921} 2922 2923REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt) 2924 2925 2926BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2927 2928 Param<Addr> addr; 2929 Param<Tick> clock; 2930 Param<Tick> tx_delay; 2931 Param<Tick> rx_delay; 2932 Param<Tick> intr_delay; 2933 SimObjectParam<MemoryController *> mmu; 2934 SimObjectParam<PhysicalMemory *> physmem; 2935 Param<bool> rx_filter; 2936 Param<string> hardware_address; 2937 SimObjectParam<Bus*> io_bus; 2938 SimObjectParam<Bus*> payload_bus; 2939 SimObjectParam<HierParams *> hier; 2940 Param<Tick> pio_latency; 2941 Param<bool> dma_desc_free; 2942 Param<bool> dma_data_free; 2943 Param<Tick> dma_read_delay; 2944 Param<Tick> dma_write_delay; 2945 Param<Tick> dma_read_factor; 2946 Param<Tick> dma_write_factor; 2947 SimObjectParam<PciConfigAll *> configspace; 2948 SimObjectParam<PciConfigData *> configdata; 2949 SimObjectParam<Platform *> platform; 2950 Param<uint32_t> pci_bus; 2951 Param<uint32_t> pci_dev; 2952 Param<uint32_t> pci_func; 2953 Param<uint32_t> tx_fifo_size; 2954 Param<uint32_t> rx_fifo_size; 2955 Param<uint32_t> m5reg; 2956 Param<bool> dma_no_allocate; 2957 2958END_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2959 2960BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE) 2961 2962 INIT_PARAM(addr, "Device Address"), 2963 INIT_PARAM(clock, "State machine processor frequency"), 2964 INIT_PARAM(tx_delay, "Transmit Delay"), 2965 INIT_PARAM(rx_delay, "Receive Delay"), 2966 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"), 2967 INIT_PARAM(mmu, "Memory Controller"), 2968 INIT_PARAM(physmem, "Physical Memory"), 2969 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true), 2970 INIT_PARAM(hardware_address, "Ethernet Hardware Address"), 2971 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL), 2972 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL), 2973 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams), 2974 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 2975 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false), 2976 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false), 2977 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0), 2978 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0), 2979 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0), 2980 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0), 2981 INIT_PARAM(configspace, "PCI Configspace"), 2982 INIT_PARAM(configdata, "PCI Config data"), 2983 INIT_PARAM(platform, "Platform"), 2984 INIT_PARAM(pci_bus, "PCI bus"), 2985 INIT_PARAM(pci_dev, "PCI device number"), 2986 INIT_PARAM(pci_func, "PCI function code"), 2987 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072), 2988 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072), 2989 INIT_PARAM(m5reg, "m5 register"), 2990 INIT_PARAM_DFLT(dma_no_allocate, "Should DMA reads allocate cache lines", true) 2991 2992END_INIT_SIM_OBJECT_PARAMS(NSGigE) 2993 2994 2995CREATE_SIM_OBJECT(NSGigE) 2996{ 2997 NSGigE::Params *params = new NSGigE::Params; 2998 2999 params->name = getInstanceName(); 3000 params->mmu = mmu; 3001 params->configSpace = configspace; 3002 params->configData = configdata; 3003 params->plat = platform; 3004 params->busNum = pci_bus; 3005 params->deviceNum = pci_dev; 3006 params->functionNum = pci_func; 3007 3008 params->clock = clock; 3009 params->intr_delay = intr_delay; 3010 params->pmem = physmem; 3011 params->tx_delay = tx_delay; 3012 params->rx_delay = rx_delay; 3013 params->hier = hier; 3014 params->header_bus = io_bus; 3015 params->payload_bus = payload_bus; 3016 params->pio_latency = pio_latency; 3017 params->dma_desc_free = dma_desc_free; 3018 params->dma_data_free = dma_data_free; 3019 params->dma_read_delay = dma_read_delay; 3020 params->dma_write_delay = dma_write_delay; 3021 params->dma_read_factor = dma_read_factor; 3022 params->dma_write_factor = dma_write_factor; 3023 params->rx_filter = rx_filter; 3024 params->eaddr = hardware_address; 3025 params->tx_fifo_size = tx_fifo_size; 3026 params->rx_fifo_size = rx_fifo_size; 3027 params->m5reg = m5reg; 3028 params->dma_no_allocate = dma_no_allocate; 3029 return new NSGigE(params); 3030} 3031 3032REGISTER_SIM_OBJECT("NSGigE", NSGigE) 3033