ns_gige.cc revision 1848
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/** @file 30 * Device module for modelling the National Semiconductor 31 * DP83820 ethernet controller. Does not support priority queueing 32 */ 33#include <cstdio> 34#include <deque> 35#include <string> 36 37#include "base/inet.hh" 38#include "cpu/exec_context.hh" 39#include "dev/etherlink.hh" 40#include "dev/ns_gige.hh" 41#include "dev/pciconfigall.hh" 42#include "mem/bus/bus.hh" 43#include "mem/bus/dma_interface.hh" 44#include "mem/bus/pio_interface.hh" 45#include "mem/bus/pio_interface_impl.hh" 46#include "mem/functional/memory_control.hh" 47#include "mem/functional/physical.hh" 48#include "sim/builder.hh" 49#include "sim/debug.hh" 50#include "sim/host.hh" 51#include "sim/stats.hh" 52#include "targetarch/vtophys.hh" 53 54const char *NsRxStateStrings[] = 55{ 56 "rxIdle", 57 "rxDescRefr", 58 "rxDescRead", 59 "rxFifoBlock", 60 "rxFragWrite", 61 "rxDescWrite", 62 "rxAdvance" 63}; 64 65const char *NsTxStateStrings[] = 66{ 67 "txIdle", 68 "txDescRefr", 69 "txDescRead", 70 "txFifoBlock", 71 "txFragRead", 72 "txDescWrite", 73 "txAdvance" 74}; 75 76const char *NsDmaState[] = 77{ 78 "dmaIdle", 79 "dmaReading", 80 "dmaWriting", 81 "dmaReadWaiting", 82 "dmaWriteWaiting" 83}; 84 85using namespace std; 86using namespace Net; 87 88/////////////////////////////////////////////////////////////////////// 89// 90// NSGigE PCI Device 91// 92NSGigE::NSGigE(Params *p) 93 : PciDev(p), ioEnable(false), 94 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 96 txXferLen(0), rxXferLen(0), clock(p->clock), 97 txState(txIdle), txEnable(false), CTDD(false), 98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 99 rxEnable(false), CRDD(false), rxPktBytes(0), 100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 101 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this), 102 txDmaReadEvent(this), txDmaWriteEvent(this), 103 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 104 txDelay(p->tx_delay), rxDelay(p->rx_delay), 105 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this), 106 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false), 107 acceptMulticast(false), acceptUnicast(false), 108 acceptPerfect(false), acceptArp(false), multicastHashEnable(false), 109 physmem(p->pmem), intrTick(0), cpuPendingIntr(false), 110 intrEvent(0), interface(0) 111{ 112 if (p->header_bus) { 113 pioInterface = newPioInterface(name(), p->hier, 114 p->header_bus, this, 115 &NSGigE::cacheAccess); 116 117 pioLatency = p->pio_latency * p->header_bus->clockRate; 118 119 if (p->payload_bus) 120 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 121 p->header_bus, 122 p->payload_bus, 1, 123 p->dma_no_allocate); 124 else 125 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 126 p->header_bus, 127 p->header_bus, 1, 128 p->dma_no_allocate); 129 } else if (p->payload_bus) { 130 pioInterface = newPioInterface(name(), p->hier, 131 p->payload_bus, this, 132 &NSGigE::cacheAccess); 133 134 pioLatency = p->pio_latency * p->payload_bus->clockRate; 135 136 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 137 p->payload_bus, 138 p->payload_bus, 1, 139 p->dma_no_allocate); 140 } 141 142 143 intrDelay = p->intr_delay; 144 dmaReadDelay = p->dma_read_delay; 145 dmaWriteDelay = p->dma_write_delay; 146 dmaReadFactor = p->dma_read_factor; 147 dmaWriteFactor = p->dma_write_factor; 148 149 regsReset(); 150 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN); 151} 152 153NSGigE::~NSGigE() 154{} 155 156void 157NSGigE::regStats() 158{ 159 txBytes 160 .name(name() + ".txBytes") 161 .desc("Bytes Transmitted") 162 .prereq(txBytes) 163 ; 164 165 rxBytes 166 .name(name() + ".rxBytes") 167 .desc("Bytes Received") 168 .prereq(rxBytes) 169 ; 170 171 txPackets 172 .name(name() + ".txPackets") 173 .desc("Number of Packets Transmitted") 174 .prereq(txBytes) 175 ; 176 177 rxPackets 178 .name(name() + ".rxPackets") 179 .desc("Number of Packets Received") 180 .prereq(rxBytes) 181 ; 182 183 txIpChecksums 184 .name(name() + ".txIpChecksums") 185 .desc("Number of tx IP Checksums done by device") 186 .precision(0) 187 .prereq(txBytes) 188 ; 189 190 rxIpChecksums 191 .name(name() + ".rxIpChecksums") 192 .desc("Number of rx IP Checksums done by device") 193 .precision(0) 194 .prereq(rxBytes) 195 ; 196 197 txTcpChecksums 198 .name(name() + ".txTcpChecksums") 199 .desc("Number of tx TCP Checksums done by device") 200 .precision(0) 201 .prereq(txBytes) 202 ; 203 204 rxTcpChecksums 205 .name(name() + ".rxTcpChecksums") 206 .desc("Number of rx TCP Checksums done by device") 207 .precision(0) 208 .prereq(rxBytes) 209 ; 210 211 txUdpChecksums 212 .name(name() + ".txUdpChecksums") 213 .desc("Number of tx UDP Checksums done by device") 214 .precision(0) 215 .prereq(txBytes) 216 ; 217 218 rxUdpChecksums 219 .name(name() + ".rxUdpChecksums") 220 .desc("Number of rx UDP Checksums done by device") 221 .precision(0) 222 .prereq(rxBytes) 223 ; 224 225 descDmaReads 226 .name(name() + ".descDMAReads") 227 .desc("Number of descriptors the device read w/ DMA") 228 .precision(0) 229 ; 230 231 descDmaWrites 232 .name(name() + ".descDMAWrites") 233 .desc("Number of descriptors the device wrote w/ DMA") 234 .precision(0) 235 ; 236 237 descDmaRdBytes 238 .name(name() + ".descDmaReadBytes") 239 .desc("number of descriptor bytes read w/ DMA") 240 .precision(0) 241 ; 242 243 descDmaWrBytes 244 .name(name() + ".descDmaWriteBytes") 245 .desc("number of descriptor bytes write w/ DMA") 246 .precision(0) 247 ; 248 249 txBandwidth 250 .name(name() + ".txBandwidth") 251 .desc("Transmit Bandwidth (bits/s)") 252 .precision(0) 253 .prereq(txBytes) 254 ; 255 256 rxBandwidth 257 .name(name() + ".rxBandwidth") 258 .desc("Receive Bandwidth (bits/s)") 259 .precision(0) 260 .prereq(rxBytes) 261 ; 262 263 totBandwidth 264 .name(name() + ".totBandwidth") 265 .desc("Total Bandwidth (bits/s)") 266 .precision(0) 267 .prereq(totBytes) 268 ; 269 270 totPackets 271 .name(name() + ".totPackets") 272 .desc("Total Packets") 273 .precision(0) 274 .prereq(totBytes) 275 ; 276 277 totBytes 278 .name(name() + ".totBytes") 279 .desc("Total Bytes") 280 .precision(0) 281 .prereq(totBytes) 282 ; 283 284 totPacketRate 285 .name(name() + ".totPPS") 286 .desc("Total Tranmission Rate (packets/s)") 287 .precision(0) 288 .prereq(totBytes) 289 ; 290 291 txPacketRate 292 .name(name() + ".txPPS") 293 .desc("Packet Tranmission Rate (packets/s)") 294 .precision(0) 295 .prereq(txBytes) 296 ; 297 298 rxPacketRate 299 .name(name() + ".rxPPS") 300 .desc("Packet Reception Rate (packets/s)") 301 .precision(0) 302 .prereq(rxBytes) 303 ; 304 305 postedSwi 306 .name(name() + ".postedSwi") 307 .desc("number of software interrupts posted to CPU") 308 .precision(0) 309 ; 310 311 totalSwi 312 .name(name() + ".totalSwi") 313 .desc("number of total Swi written to ISR") 314 .precision(0) 315 ; 316 317 coalescedSwi 318 .name(name() + ".coalescedSwi") 319 .desc("average number of Swi's coalesced into each post") 320 .precision(0) 321 ; 322 323 postedRxIdle 324 .name(name() + ".postedRxIdle") 325 .desc("number of rxIdle interrupts posted to CPU") 326 .precision(0) 327 ; 328 329 totalRxIdle 330 .name(name() + ".totalRxIdle") 331 .desc("number of total RxIdle written to ISR") 332 .precision(0) 333 ; 334 335 coalescedRxIdle 336 .name(name() + ".coalescedRxIdle") 337 .desc("average number of RxIdle's coalesced into each post") 338 .precision(0) 339 ; 340 341 postedRxOk 342 .name(name() + ".postedRxOk") 343 .desc("number of RxOk interrupts posted to CPU") 344 .precision(0) 345 ; 346 347 totalRxOk 348 .name(name() + ".totalRxOk") 349 .desc("number of total RxOk written to ISR") 350 .precision(0) 351 ; 352 353 coalescedRxOk 354 .name(name() + ".coalescedRxOk") 355 .desc("average number of RxOk's coalesced into each post") 356 .precision(0) 357 ; 358 359 postedRxDesc 360 .name(name() + ".postedRxDesc") 361 .desc("number of RxDesc interrupts posted to CPU") 362 .precision(0) 363 ; 364 365 totalRxDesc 366 .name(name() + ".totalRxDesc") 367 .desc("number of total RxDesc written to ISR") 368 .precision(0) 369 ; 370 371 coalescedRxDesc 372 .name(name() + ".coalescedRxDesc") 373 .desc("average number of RxDesc's coalesced into each post") 374 .precision(0) 375 ; 376 377 postedTxOk 378 .name(name() + ".postedTxOk") 379 .desc("number of TxOk interrupts posted to CPU") 380 .precision(0) 381 ; 382 383 totalTxOk 384 .name(name() + ".totalTxOk") 385 .desc("number of total TxOk written to ISR") 386 .precision(0) 387 ; 388 389 coalescedTxOk 390 .name(name() + ".coalescedTxOk") 391 .desc("average number of TxOk's coalesced into each post") 392 .precision(0) 393 ; 394 395 postedTxIdle 396 .name(name() + ".postedTxIdle") 397 .desc("number of TxIdle interrupts posted to CPU") 398 .precision(0) 399 ; 400 401 totalTxIdle 402 .name(name() + ".totalTxIdle") 403 .desc("number of total TxIdle written to ISR") 404 .precision(0) 405 ; 406 407 coalescedTxIdle 408 .name(name() + ".coalescedTxIdle") 409 .desc("average number of TxIdle's coalesced into each post") 410 .precision(0) 411 ; 412 413 postedTxDesc 414 .name(name() + ".postedTxDesc") 415 .desc("number of TxDesc interrupts posted to CPU") 416 .precision(0) 417 ; 418 419 totalTxDesc 420 .name(name() + ".totalTxDesc") 421 .desc("number of total TxDesc written to ISR") 422 .precision(0) 423 ; 424 425 coalescedTxDesc 426 .name(name() + ".coalescedTxDesc") 427 .desc("average number of TxDesc's coalesced into each post") 428 .precision(0) 429 ; 430 431 postedRxOrn 432 .name(name() + ".postedRxOrn") 433 .desc("number of RxOrn posted to CPU") 434 .precision(0) 435 ; 436 437 totalRxOrn 438 .name(name() + ".totalRxOrn") 439 .desc("number of total RxOrn written to ISR") 440 .precision(0) 441 ; 442 443 coalescedRxOrn 444 .name(name() + ".coalescedRxOrn") 445 .desc("average number of RxOrn's coalesced into each post") 446 .precision(0) 447 ; 448 449 coalescedTotal 450 .name(name() + ".coalescedTotal") 451 .desc("average number of interrupts coalesced into each post") 452 .precision(0) 453 ; 454 455 postedInterrupts 456 .name(name() + ".postedInterrupts") 457 .desc("number of posts to CPU") 458 .precision(0) 459 ; 460 461 droppedPackets 462 .name(name() + ".droppedPackets") 463 .desc("number of packets dropped") 464 .precision(0) 465 ; 466 467 coalescedSwi = totalSwi / postedInterrupts; 468 coalescedRxIdle = totalRxIdle / postedInterrupts; 469 coalescedRxOk = totalRxOk / postedInterrupts; 470 coalescedRxDesc = totalRxDesc / postedInterrupts; 471 coalescedTxOk = totalTxOk / postedInterrupts; 472 coalescedTxIdle = totalTxIdle / postedInterrupts; 473 coalescedTxDesc = totalTxDesc / postedInterrupts; 474 coalescedRxOrn = totalRxOrn / postedInterrupts; 475 476 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + 477 totalTxOk + totalTxIdle + totalTxDesc + 478 totalRxOrn) / postedInterrupts; 479 480 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 481 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 482 totBandwidth = txBandwidth + rxBandwidth; 483 totBytes = txBytes + rxBytes; 484 totPackets = txPackets + rxPackets; 485 486 txPacketRate = txPackets / simSeconds; 487 rxPacketRate = rxPackets / simSeconds; 488} 489 490/** 491 * This is to read the PCI general configuration registers 492 */ 493void 494NSGigE::readConfig(int offset, int size, uint8_t *data) 495{ 496 if (offset < PCI_DEVICE_SPECIFIC) 497 PciDev::readConfig(offset, size, data); 498 else 499 panic("Device specific PCI config space not implemented!\n"); 500} 501 502/** 503 * This is to write to the PCI general configuration registers 504 */ 505void 506NSGigE::writeConfig(int offset, int size, const uint8_t* data) 507{ 508 if (offset < PCI_DEVICE_SPECIFIC) 509 PciDev::writeConfig(offset, size, data); 510 else 511 panic("Device specific PCI config space not implemented!\n"); 512 513 // Need to catch writes to BARs to update the PIO interface 514 switch (offset) { 515 // seems to work fine without all these PCI settings, but i 516 // put in the IO to double check, an assertion will fail if we 517 // need to properly implement it 518 case PCI_COMMAND: 519 if (config.data[offset] & PCI_CMD_IOSE) 520 ioEnable = true; 521 else 522 ioEnable = false; 523 524#if 0 525 if (config.data[offset] & PCI_CMD_BME) { 526 bmEnabled = true; 527 } 528 else { 529 bmEnabled = false; 530 } 531 532 if (config.data[offset] & PCI_CMD_MSE) { 533 memEnable = true; 534 } 535 else { 536 memEnable = false; 537 } 538#endif 539 break; 540 541 case PCI0_BASE_ADDR0: 542 if (BARAddrs[0] != 0) { 543 if (pioInterface) 544 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 545 546 BARAddrs[0] &= EV5::PAddrUncachedMask; 547 } 548 break; 549 case PCI0_BASE_ADDR1: 550 if (BARAddrs[1] != 0) { 551 if (pioInterface) 552 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 553 554 BARAddrs[1] &= EV5::PAddrUncachedMask; 555 } 556 break; 557 } 558} 559 560/** 561 * This reads the device registers, which are detailed in the NS83820 562 * spec sheet 563 */ 564Fault 565NSGigE::read(MemReqPtr &req, uint8_t *data) 566{ 567 assert(ioEnable); 568 569 //The mask is to give you only the offset into the device register file 570 Addr daddr = req->paddr & 0xfff; 571 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n", 572 daddr, req->paddr, req->vaddr, req->size); 573 574 575 // there are some reserved registers, you can see ns_gige_reg.h and 576 // the spec sheet for details 577 if (daddr > LAST && daddr <= RESERVED) { 578 panic("Accessing reserved register"); 579 } else if (daddr > RESERVED && daddr <= 0x3FC) { 580 readConfig(daddr & 0xff, req->size, data); 581 return No_Fault; 582 } else if (daddr >= MIB_START && daddr <= MIB_END) { 583 // don't implement all the MIB's. hopefully the kernel 584 // doesn't actually DEPEND upon their values 585 // MIB are just hardware stats keepers 586 uint32_t ® = *(uint32_t *) data; 587 reg = 0; 588 return No_Fault; 589 } else if (daddr > 0x3FC) 590 panic("Something is messed up!\n"); 591 592 switch (req->size) { 593 case sizeof(uint32_t): 594 { 595 uint32_t ® = *(uint32_t *)data; 596 uint16_t rfaddr; 597 598 switch (daddr) { 599 case CR: 600 reg = regs.command; 601 //these are supposed to be cleared on a read 602 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 603 break; 604 605 case CFGR: 606 reg = regs.config; 607 break; 608 609 case MEAR: 610 reg = regs.mear; 611 break; 612 613 case PTSCR: 614 reg = regs.ptscr; 615 break; 616 617 case ISR: 618 reg = regs.isr; 619 devIntrClear(ISR_ALL); 620 break; 621 622 case IMR: 623 reg = regs.imr; 624 break; 625 626 case IER: 627 reg = regs.ier; 628 break; 629 630 case IHR: 631 reg = regs.ihr; 632 break; 633 634 case TXDP: 635 reg = regs.txdp; 636 break; 637 638 case TXDP_HI: 639 reg = regs.txdp_hi; 640 break; 641 642 case TX_CFG: 643 reg = regs.txcfg; 644 break; 645 646 case GPIOR: 647 reg = regs.gpior; 648 break; 649 650 case RXDP: 651 reg = regs.rxdp; 652 break; 653 654 case RXDP_HI: 655 reg = regs.rxdp_hi; 656 break; 657 658 case RX_CFG: 659 reg = regs.rxcfg; 660 break; 661 662 case PQCR: 663 reg = regs.pqcr; 664 break; 665 666 case WCSR: 667 reg = regs.wcsr; 668 break; 669 670 case PCR: 671 reg = regs.pcr; 672 break; 673 674 // see the spec sheet for how RFCR and RFDR work 675 // basically, you write to RFCR to tell the machine 676 // what you want to do next, then you act upon RFDR, 677 // and the device will be prepared b/c of what you 678 // wrote to RFCR 679 case RFCR: 680 reg = regs.rfcr; 681 break; 682 683 case RFDR: 684 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 685 switch (rfaddr) { 686 // Read from perfect match ROM octets 687 case 0x000: 688 reg = rom.perfectMatch[1]; 689 reg = reg << 8; 690 reg += rom.perfectMatch[0]; 691 break; 692 case 0x002: 693 reg = rom.perfectMatch[3] << 8; 694 reg += rom.perfectMatch[2]; 695 break; 696 case 0x004: 697 reg = rom.perfectMatch[5] << 8; 698 reg += rom.perfectMatch[4]; 699 break; 700 default: 701 // Read filter hash table 702 if (rfaddr >= FHASH_ADDR && 703 rfaddr < FHASH_ADDR + FHASH_SIZE) { 704 705 // Only word-aligned reads supported 706 if (rfaddr % 2) 707 panic("unaligned read from filter hash table!"); 708 709 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8; 710 reg += rom.filterHash[rfaddr - FHASH_ADDR]; 711 break; 712 } 713 714 panic("reading RFDR for something other than pattern" 715 " matching or hashing! %#x\n", rfaddr); 716 } 717 break; 718 719 case SRR: 720 reg = regs.srr; 721 break; 722 723 case MIBC: 724 reg = regs.mibc; 725 reg &= ~(MIBC_MIBS | MIBC_ACLR); 726 break; 727 728 case VRCR: 729 reg = regs.vrcr; 730 break; 731 732 case VTCR: 733 reg = regs.vtcr; 734 break; 735 736 case VDR: 737 reg = regs.vdr; 738 break; 739 740 case CCSR: 741 reg = regs.ccsr; 742 break; 743 744 case TBICR: 745 reg = regs.tbicr; 746 break; 747 748 case TBISR: 749 reg = regs.tbisr; 750 break; 751 752 case TANAR: 753 reg = regs.tanar; 754 break; 755 756 case TANLPAR: 757 reg = regs.tanlpar; 758 break; 759 760 case TANER: 761 reg = regs.taner; 762 break; 763 764 case TESR: 765 reg = regs.tesr; 766 break; 767 768 case M5REG: 769 reg = params()->m5reg; 770 break; 771 772 default: 773 panic("reading unimplemented register: addr=%#x", daddr); 774 } 775 776 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 777 daddr, reg, reg); 778 } 779 break; 780 781 default: 782 panic("accessing register with invalid size: addr=%#x, size=%d", 783 daddr, req->size); 784 } 785 786 return No_Fault; 787} 788 789Fault 790NSGigE::write(MemReqPtr &req, const uint8_t *data) 791{ 792 assert(ioEnable); 793 794 Addr daddr = req->paddr & 0xfff; 795 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n", 796 daddr, req->paddr, req->vaddr, req->size); 797 798 if (daddr > LAST && daddr <= RESERVED) { 799 panic("Accessing reserved register"); 800 } else if (daddr > RESERVED && daddr <= 0x3FC) { 801 writeConfig(daddr & 0xff, req->size, data); 802 return No_Fault; 803 } else if (daddr > 0x3FC) 804 panic("Something is messed up!\n"); 805 806 if (req->size == sizeof(uint32_t)) { 807 uint32_t reg = *(uint32_t *)data; 808 uint16_t rfaddr; 809 810 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 811 812 switch (daddr) { 813 case CR: 814 regs.command = reg; 815 if (reg & CR_TXD) { 816 txEnable = false; 817 } else if (reg & CR_TXE) { 818 txEnable = true; 819 820 // the kernel is enabling the transmit machine 821 if (txState == txIdle) 822 txKick(); 823 } 824 825 if (reg & CR_RXD) { 826 rxEnable = false; 827 } else if (reg & CR_RXE) { 828 rxEnable = true; 829 830 if (rxState == rxIdle) 831 rxKick(); 832 } 833 834 if (reg & CR_TXR) 835 txReset(); 836 837 if (reg & CR_RXR) 838 rxReset(); 839 840 if (reg & CR_SWI) 841 devIntrPost(ISR_SWI); 842 843 if (reg & CR_RST) { 844 txReset(); 845 rxReset(); 846 847 regsReset(); 848 } 849 break; 850 851 case CFGR: 852 if (reg & CFGR_LNKSTS || 853 reg & CFGR_SPDSTS || 854 reg & CFGR_DUPSTS || 855 reg & CFGR_RESERVED || 856 reg & CFGR_T64ADDR || 857 reg & CFGR_PCI64_DET) 858 859 // First clear all writable bits 860 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 861 CFGR_RESERVED | CFGR_T64ADDR | 862 CFGR_PCI64_DET; 863 // Now set the appropriate writable bits 864 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 865 CFGR_RESERVED | CFGR_T64ADDR | 866 CFGR_PCI64_DET); 867 868// all these #if 0's are because i don't THINK the kernel needs to 869// have these implemented. if there is a problem relating to one of 870// these, you may need to add functionality in. 871#if 0 872 if (reg & CFGR_TBI_EN) ; 873 if (reg & CFGR_MODE_1000) ; 874#endif 875 876 if (reg & CFGR_AUTO_1000) 877 panic("CFGR_AUTO_1000 not implemented!\n"); 878 879#if 0 880 if (reg & CFGR_PINT_DUPSTS || 881 reg & CFGR_PINT_LNKSTS || 882 reg & CFGR_PINT_SPDSTS) 883 ; 884 885 if (reg & CFGR_TMRTEST) ; 886 if (reg & CFGR_MRM_DIS) ; 887 if (reg & CFGR_MWI_DIS) ; 888 889 if (reg & CFGR_T64ADDR) 890 panic("CFGR_T64ADDR is read only register!\n"); 891 892 if (reg & CFGR_PCI64_DET) 893 panic("CFGR_PCI64_DET is read only register!\n"); 894 895 if (reg & CFGR_DATA64_EN) ; 896 if (reg & CFGR_M64ADDR) ; 897 if (reg & CFGR_PHY_RST) ; 898 if (reg & CFGR_PHY_DIS) ; 899#endif 900 901 if (reg & CFGR_EXTSTS_EN) 902 extstsEnable = true; 903 else 904 extstsEnable = false; 905 906#if 0 907 if (reg & CFGR_REQALG) ; 908 if (reg & CFGR_SB) ; 909 if (reg & CFGR_POW) ; 910 if (reg & CFGR_EXD) ; 911 if (reg & CFGR_PESEL) ; 912 if (reg & CFGR_BROM_DIS) ; 913 if (reg & CFGR_EXT_125) ; 914 if (reg & CFGR_BEM) ; 915#endif 916 break; 917 918 case MEAR: 919 // Clear writable bits 920 regs.mear &= MEAR_EEDO; 921 // Set appropriate writable bits 922 regs.mear |= reg & ~MEAR_EEDO; 923 924 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address) 925 // even though it could get it through RFDR 926 if (reg & MEAR_EESEL) { 927 // Rising edge of clock 928 if (reg & MEAR_EECLK && !eepromClk) 929 eepromKick(); 930 } 931 else { 932 eepromState = eepromStart; 933 regs.mear &= ~MEAR_EEDI; 934 } 935 936 eepromClk = reg & MEAR_EECLK; 937 938 // since phy is completely faked, MEAR_MD* don't matter 939#if 0 940 if (reg & MEAR_MDIO) ; 941 if (reg & MEAR_MDDIR) ; 942 if (reg & MEAR_MDC) ; 943#endif 944 break; 945 946 case PTSCR: 947 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 948 // these control BISTs for various parts of chip - we 949 // don't care or do just fake that the BIST is done 950 if (reg & PTSCR_RBIST_EN) 951 regs.ptscr |= PTSCR_RBIST_DONE; 952 if (reg & PTSCR_EEBIST_EN) 953 regs.ptscr &= ~PTSCR_EEBIST_EN; 954 if (reg & PTSCR_EELOAD_EN) 955 regs.ptscr &= ~PTSCR_EELOAD_EN; 956 break; 957 958 case ISR: /* writing to the ISR has no effect */ 959 panic("ISR is a read only register!\n"); 960 961 case IMR: 962 regs.imr = reg; 963 devIntrChangeMask(); 964 break; 965 966 case IER: 967 regs.ier = reg; 968 break; 969 970 case IHR: 971 regs.ihr = reg; 972 /* not going to implement real interrupt holdoff */ 973 break; 974 975 case TXDP: 976 regs.txdp = (reg & 0xFFFFFFFC); 977 assert(txState == txIdle); 978 CTDD = false; 979 break; 980 981 case TXDP_HI: 982 regs.txdp_hi = reg; 983 break; 984 985 case TX_CFG: 986 regs.txcfg = reg; 987#if 0 988 if (reg & TX_CFG_CSI) ; 989 if (reg & TX_CFG_HBI) ; 990 if (reg & TX_CFG_MLB) ; 991 if (reg & TX_CFG_ATP) ; 992 if (reg & TX_CFG_ECRETRY) { 993 /* 994 * this could easily be implemented, but considering 995 * the network is just a fake pipe, wouldn't make 996 * sense to do this 997 */ 998 } 999 1000 if (reg & TX_CFG_BRST_DIS) ; 1001#endif 1002 1003#if 0 1004 /* we handle our own DMA, ignore the kernel's exhortations */ 1005 if (reg & TX_CFG_MXDMA) ; 1006#endif 1007 1008 // also, we currently don't care about fill/drain 1009 // thresholds though this may change in the future with 1010 // more realistic networks or a driver which changes it 1011 // according to feedback 1012 1013 break; 1014 1015 case GPIOR: 1016 // Only write writable bits 1017 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 1018 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN; 1019 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 1020 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN); 1021 /* these just control general purpose i/o pins, don't matter */ 1022 break; 1023 1024 case RXDP: 1025 regs.rxdp = reg; 1026 CRDD = false; 1027 break; 1028 1029 case RXDP_HI: 1030 regs.rxdp_hi = reg; 1031 break; 1032 1033 case RX_CFG: 1034 regs.rxcfg = reg; 1035#if 0 1036 if (reg & RX_CFG_AEP) ; 1037 if (reg & RX_CFG_ARP) ; 1038 if (reg & RX_CFG_STRIPCRC) ; 1039 if (reg & RX_CFG_RX_RD) ; 1040 if (reg & RX_CFG_ALP) ; 1041 if (reg & RX_CFG_AIRL) ; 1042 1043 /* we handle our own DMA, ignore what kernel says about it */ 1044 if (reg & RX_CFG_MXDMA) ; 1045 1046 //also, we currently don't care about fill/drain thresholds 1047 //though this may change in the future with more realistic 1048 //networks or a driver which changes it according to feedback 1049 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ; 1050#endif 1051 break; 1052 1053 case PQCR: 1054 /* there is no priority queueing used in the linux 2.6 driver */ 1055 regs.pqcr = reg; 1056 break; 1057 1058 case WCSR: 1059 /* not going to implement wake on LAN */ 1060 regs.wcsr = reg; 1061 break; 1062 1063 case PCR: 1064 /* not going to implement pause control */ 1065 regs.pcr = reg; 1066 break; 1067 1068 case RFCR: 1069 regs.rfcr = reg; 1070 1071 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 1072 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 1073 acceptMulticast = (reg & RFCR_AAM) ? true : false; 1074 acceptUnicast = (reg & RFCR_AAU) ? true : false; 1075 acceptPerfect = (reg & RFCR_APM) ? true : false; 1076 acceptArp = (reg & RFCR_AARP) ? true : false; 1077 multicastHashEnable = (reg & RFCR_MHEN) ? true : false; 1078 1079#if 0 1080 if (reg & RFCR_APAT) 1081 panic("RFCR_APAT not implemented!\n"); 1082#endif 1083 if (reg & RFCR_UHEN) 1084 panic("Unicast hash filtering not used by drivers!\n"); 1085 1086 if (reg & RFCR_ULM) 1087 panic("RFCR_ULM not implemented!\n"); 1088 1089 break; 1090 1091 case RFDR: 1092 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 1093 switch (rfaddr) { 1094 case 0x000: 1095 rom.perfectMatch[0] = (uint8_t)reg; 1096 rom.perfectMatch[1] = (uint8_t)(reg >> 8); 1097 break; 1098 case 0x002: 1099 rom.perfectMatch[2] = (uint8_t)reg; 1100 rom.perfectMatch[3] = (uint8_t)(reg >> 8); 1101 break; 1102 case 0x004: 1103 rom.perfectMatch[4] = (uint8_t)reg; 1104 rom.perfectMatch[5] = (uint8_t)(reg >> 8); 1105 break; 1106 default: 1107 1108 if (rfaddr >= FHASH_ADDR && 1109 rfaddr < FHASH_ADDR + FHASH_SIZE) { 1110 1111 // Only word-aligned writes supported 1112 if (rfaddr % 2) 1113 panic("unaligned write to filter hash table!"); 1114 1115 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg; 1116 rom.filterHash[rfaddr - FHASH_ADDR + 1] 1117 = (uint8_t)(reg >> 8); 1118 break; 1119 } 1120 panic("writing RFDR for something other than pattern matching\ 1121 or hashing! %#x\n", rfaddr); 1122 } 1123 1124 case BRAR: 1125 regs.brar = reg; 1126 break; 1127 1128 case BRDR: 1129 panic("the driver never uses BRDR, something is wrong!\n"); 1130 1131 case SRR: 1132 panic("SRR is read only register!\n"); 1133 1134 case MIBC: 1135 panic("the driver never uses MIBC, something is wrong!\n"); 1136 1137 case VRCR: 1138 regs.vrcr = reg; 1139 break; 1140 1141 case VTCR: 1142 regs.vtcr = reg; 1143 break; 1144 1145 case VDR: 1146 panic("the driver never uses VDR, something is wrong!\n"); 1147 1148 case CCSR: 1149 /* not going to implement clockrun stuff */ 1150 regs.ccsr = reg; 1151 break; 1152 1153 case TBICR: 1154 regs.tbicr = reg; 1155 if (reg & TBICR_MR_LOOPBACK) 1156 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 1157 1158 if (reg & TBICR_MR_AN_ENABLE) { 1159 regs.tanlpar = regs.tanar; 1160 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 1161 } 1162 1163#if 0 1164 if (reg & TBICR_MR_RESTART_AN) ; 1165#endif 1166 1167 break; 1168 1169 case TBISR: 1170 panic("TBISR is read only register!\n"); 1171 1172 case TANAR: 1173 // Only write the writable bits 1174 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED; 1175 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED); 1176 1177 // Pause capability unimplemented 1178#if 0 1179 if (reg & TANAR_PS2) ; 1180 if (reg & TANAR_PS1) ; 1181#endif 1182 1183 break; 1184 1185 case TANLPAR: 1186 panic("this should only be written to by the fake phy!\n"); 1187 1188 case TANER: 1189 panic("TANER is read only register!\n"); 1190 1191 case TESR: 1192 regs.tesr = reg; 1193 break; 1194 1195 default: 1196 panic("invalid register access daddr=%#x", daddr); 1197 } 1198 } else { 1199 panic("Invalid Request Size"); 1200 } 1201 1202 return No_Fault; 1203} 1204 1205void 1206NSGigE::devIntrPost(uint32_t interrupts) 1207{ 1208 if (interrupts & ISR_RESERVE) 1209 panic("Cannot set a reserved interrupt"); 1210 1211 if (interrupts & ISR_NOIMPL) 1212 warn("interrupt not implemented %#x\n", interrupts); 1213 1214 interrupts &= ~ISR_NOIMPL; 1215 regs.isr |= interrupts; 1216 1217 if (interrupts & regs.imr) { 1218 if (interrupts & ISR_SWI) { 1219 totalSwi++; 1220 } 1221 if (interrupts & ISR_RXIDLE) { 1222 totalRxIdle++; 1223 } 1224 if (interrupts & ISR_RXOK) { 1225 totalRxOk++; 1226 } 1227 if (interrupts & ISR_RXDESC) { 1228 totalRxDesc++; 1229 } 1230 if (interrupts & ISR_TXOK) { 1231 totalTxOk++; 1232 } 1233 if (interrupts & ISR_TXIDLE) { 1234 totalTxIdle++; 1235 } 1236 if (interrupts & ISR_TXDESC) { 1237 totalTxDesc++; 1238 } 1239 if (interrupts & ISR_RXORN) { 1240 totalRxOrn++; 1241 } 1242 } 1243 1244 DPRINTF(EthernetIntr, 1245 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 1246 interrupts, regs.isr, regs.imr); 1247 1248 if ((regs.isr & regs.imr)) { 1249 Tick when = curTick; 1250 if (!(regs.isr & regs.imr & ISR_NODELAY)) 1251 when += intrDelay; 1252 cpuIntrPost(when); 1253 } 1254} 1255 1256/* writing this interrupt counting stats inside this means that this function 1257 is now limited to being used to clear all interrupts upon the kernel 1258 reading isr and servicing. just telling you in case you were thinking 1259 of expanding use. 1260*/ 1261void 1262NSGigE::devIntrClear(uint32_t interrupts) 1263{ 1264 if (interrupts & ISR_RESERVE) 1265 panic("Cannot clear a reserved interrupt"); 1266 1267 if (regs.isr & regs.imr & ISR_SWI) { 1268 postedSwi++; 1269 } 1270 if (regs.isr & regs.imr & ISR_RXIDLE) { 1271 postedRxIdle++; 1272 } 1273 if (regs.isr & regs.imr & ISR_RXOK) { 1274 postedRxOk++; 1275 } 1276 if (regs.isr & regs.imr & ISR_RXDESC) { 1277 postedRxDesc++; 1278 } 1279 if (regs.isr & regs.imr & ISR_TXOK) { 1280 postedTxOk++; 1281 } 1282 if (regs.isr & regs.imr & ISR_TXIDLE) { 1283 postedTxIdle++; 1284 } 1285 if (regs.isr & regs.imr & ISR_TXDESC) { 1286 postedTxDesc++; 1287 } 1288 if (regs.isr & regs.imr & ISR_RXORN) { 1289 postedRxOrn++; 1290 } 1291 1292 if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC | 1293 ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) ) 1294 postedInterrupts++; 1295 1296 interrupts &= ~ISR_NOIMPL; 1297 regs.isr &= ~interrupts; 1298 1299 DPRINTF(EthernetIntr, 1300 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 1301 interrupts, regs.isr, regs.imr); 1302 1303 if (!(regs.isr & regs.imr)) 1304 cpuIntrClear(); 1305} 1306 1307void 1308NSGigE::devIntrChangeMask() 1309{ 1310 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 1311 regs.isr, regs.imr, regs.isr & regs.imr); 1312 1313 if (regs.isr & regs.imr) 1314 cpuIntrPost(curTick); 1315 else 1316 cpuIntrClear(); 1317} 1318 1319void 1320NSGigE::cpuIntrPost(Tick when) 1321{ 1322 // If the interrupt you want to post is later than an interrupt 1323 // already scheduled, just let it post in the coming one and don't 1324 // schedule another. 1325 // HOWEVER, must be sure that the scheduled intrTick is in the 1326 // future (this was formerly the source of a bug) 1327 /** 1328 * @todo this warning should be removed and the intrTick code should 1329 * be fixed. 1330 */ 1331 assert(when >= curTick); 1332 assert(intrTick >= curTick || intrTick == 0); 1333 if (when > intrTick && intrTick != 0) { 1334 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 1335 intrTick); 1336 return; 1337 } 1338 1339 intrTick = when; 1340 if (intrTick < curTick) { 1341 debug_break(); 1342 intrTick = curTick; 1343 } 1344 1345 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 1346 intrTick); 1347 1348 if (intrEvent) 1349 intrEvent->squash(); 1350 intrEvent = new IntrEvent(this, true); 1351 intrEvent->schedule(intrTick); 1352} 1353 1354void 1355NSGigE::cpuInterrupt() 1356{ 1357 assert(intrTick == curTick); 1358 1359 // Whether or not there's a pending interrupt, we don't care about 1360 // it anymore 1361 intrEvent = 0; 1362 intrTick = 0; 1363 1364 // Don't send an interrupt if there's already one 1365 if (cpuPendingIntr) { 1366 DPRINTF(EthernetIntr, 1367 "would send an interrupt now, but there's already pending\n"); 1368 } else { 1369 // Send interrupt 1370 cpuPendingIntr = true; 1371 1372 DPRINTF(EthernetIntr, "posting interrupt\n"); 1373 intrPost(); 1374 } 1375} 1376 1377void 1378NSGigE::cpuIntrClear() 1379{ 1380 if (!cpuPendingIntr) 1381 return; 1382 1383 if (intrEvent) { 1384 intrEvent->squash(); 1385 intrEvent = 0; 1386 } 1387 1388 intrTick = 0; 1389 1390 cpuPendingIntr = false; 1391 1392 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1393 intrClear(); 1394} 1395 1396bool 1397NSGigE::cpuIntrPending() const 1398{ return cpuPendingIntr; } 1399 1400void 1401NSGigE::txReset() 1402{ 1403 1404 DPRINTF(Ethernet, "transmit reset\n"); 1405 1406 CTDD = false; 1407 txEnable = false;; 1408 txFragPtr = 0; 1409 assert(txDescCnt == 0); 1410 txFifo.clear(); 1411 txState = txIdle; 1412 assert(txDmaState == dmaIdle); 1413} 1414 1415void 1416NSGigE::rxReset() 1417{ 1418 DPRINTF(Ethernet, "receive reset\n"); 1419 1420 CRDD = false; 1421 assert(rxPktBytes == 0); 1422 rxEnable = false; 1423 rxFragPtr = 0; 1424 assert(rxDescCnt == 0); 1425 assert(rxDmaState == dmaIdle); 1426 rxFifo.clear(); 1427 rxState = rxIdle; 1428} 1429 1430void 1431NSGigE::regsReset() 1432{ 1433 memset(®s, 0, sizeof(regs)); 1434 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000); 1435 regs.mear = 0x12; 1436 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1437 // fill threshold to 32 bytes 1438 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1439 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1440 regs.mibc = MIBC_FRZ; 1441 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1442 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1443 regs.brar = 0xffffffff; 1444 1445 extstsEnable = false; 1446 acceptBroadcast = false; 1447 acceptMulticast = false; 1448 acceptUnicast = false; 1449 acceptPerfect = false; 1450 acceptArp = false; 1451} 1452 1453void 1454NSGigE::rxDmaReadCopy() 1455{ 1456 assert(rxDmaState == dmaReading); 1457 1458 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen); 1459 rxDmaState = dmaIdle; 1460 1461 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1462 rxDmaAddr, rxDmaLen); 1463 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1464} 1465 1466bool 1467NSGigE::doRxDmaRead() 1468{ 1469 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1470 rxDmaState = dmaReading; 1471 1472 if (dmaInterface && !rxDmaFree) { 1473 if (dmaInterface->busy()) 1474 rxDmaState = dmaReadWaiting; 1475 else 1476 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick, 1477 &rxDmaReadEvent, true); 1478 return true; 1479 } 1480 1481 if (dmaReadDelay == 0 && dmaReadFactor == 0) { 1482 rxDmaReadCopy(); 1483 return false; 1484 } 1485 1486 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1487 Tick start = curTick + dmaReadDelay + factor; 1488 rxDmaReadEvent.schedule(start); 1489 return true; 1490} 1491 1492void 1493NSGigE::rxDmaReadDone() 1494{ 1495 assert(rxDmaState == dmaReading); 1496 rxDmaReadCopy(); 1497 1498 // If the transmit state machine has a pending DMA, let it go first 1499 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1500 txKick(); 1501 1502 rxKick(); 1503} 1504 1505void 1506NSGigE::rxDmaWriteCopy() 1507{ 1508 assert(rxDmaState == dmaWriting); 1509 1510 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen); 1511 rxDmaState = dmaIdle; 1512 1513 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1514 rxDmaAddr, rxDmaLen); 1515 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1516} 1517 1518bool 1519NSGigE::doRxDmaWrite() 1520{ 1521 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1522 rxDmaState = dmaWriting; 1523 1524 if (dmaInterface && !rxDmaFree) { 1525 if (dmaInterface->busy()) 1526 rxDmaState = dmaWriteWaiting; 1527 else 1528 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick, 1529 &rxDmaWriteEvent, true); 1530 return true; 1531 } 1532 1533 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) { 1534 rxDmaWriteCopy(); 1535 return false; 1536 } 1537 1538 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1539 Tick start = curTick + dmaWriteDelay + factor; 1540 rxDmaWriteEvent.schedule(start); 1541 return true; 1542} 1543 1544void 1545NSGigE::rxDmaWriteDone() 1546{ 1547 assert(rxDmaState == dmaWriting); 1548 rxDmaWriteCopy(); 1549 1550 // If the transmit state machine has a pending DMA, let it go first 1551 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1552 txKick(); 1553 1554 rxKick(); 1555} 1556 1557void 1558NSGigE::rxKick() 1559{ 1560 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n", 1561 NsRxStateStrings[rxState], rxFifo.size()); 1562 1563 next: 1564 if (clock) { 1565 if (rxKickTick > curTick) { 1566 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1567 rxKickTick); 1568 1569 goto exit; 1570 } 1571 1572 // Go to the next state machine clock tick. 1573 rxKickTick = curTick + cycles(1); 1574 } 1575 1576 switch(rxDmaState) { 1577 case dmaReadWaiting: 1578 if (doRxDmaRead()) 1579 goto exit; 1580 break; 1581 case dmaWriteWaiting: 1582 if (doRxDmaWrite()) 1583 goto exit; 1584 break; 1585 default: 1586 break; 1587 } 1588 1589 // see state machine from spec for details 1590 // the way this works is, if you finish work on one state and can 1591 // go directly to another, you do that through jumping to the 1592 // label "next". however, if you have intermediate work, like DMA 1593 // so that you can't go to the next state yet, you go to exit and 1594 // exit the loop. however, when the DMA is done it will trigger 1595 // an event and come back to this loop. 1596 switch (rxState) { 1597 case rxIdle: 1598 if (!rxEnable) { 1599 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1600 goto exit; 1601 } 1602 1603 if (CRDD) { 1604 rxState = rxDescRefr; 1605 1606 rxDmaAddr = regs.rxdp & 0x3fffffff; 1607 rxDmaData = &rxDescCache + offsetof(ns_desc, link); 1608 rxDmaLen = sizeof(rxDescCache.link); 1609 rxDmaFree = dmaDescFree; 1610 1611 descDmaReads++; 1612 descDmaRdBytes += rxDmaLen; 1613 1614 if (doRxDmaRead()) 1615 goto exit; 1616 } else { 1617 rxState = rxDescRead; 1618 1619 rxDmaAddr = regs.rxdp & 0x3fffffff; 1620 rxDmaData = &rxDescCache; 1621 rxDmaLen = sizeof(ns_desc); 1622 rxDmaFree = dmaDescFree; 1623 1624 descDmaReads++; 1625 descDmaRdBytes += rxDmaLen; 1626 1627 if (doRxDmaRead()) 1628 goto exit; 1629 } 1630 break; 1631 1632 case rxDescRefr: 1633 if (rxDmaState != dmaIdle) 1634 goto exit; 1635 1636 rxState = rxAdvance; 1637 break; 1638 1639 case rxDescRead: 1640 if (rxDmaState != dmaIdle) 1641 goto exit; 1642 1643 DPRINTF(EthernetDesc, "rxDescCache: addr=%08x read descriptor\n", 1644 regs.rxdp & 0x3fffffff); 1645 DPRINTF(EthernetDesc, 1646 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1647 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1648 rxDescCache.extsts); 1649 1650 if (rxDescCache.cmdsts & CMDSTS_OWN) { 1651 devIntrPost(ISR_RXIDLE); 1652 rxState = rxIdle; 1653 goto exit; 1654 } else { 1655 rxState = rxFifoBlock; 1656 rxFragPtr = rxDescCache.bufptr; 1657 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK; 1658 } 1659 break; 1660 1661 case rxFifoBlock: 1662 if (!rxPacket) { 1663 /** 1664 * @todo in reality, we should be able to start processing 1665 * the packet as it arrives, and not have to wait for the 1666 * full packet ot be in the receive fifo. 1667 */ 1668 if (rxFifo.empty()) 1669 goto exit; 1670 1671 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1672 1673 // If we don't have a packet, grab a new one from the fifo. 1674 rxPacket = rxFifo.front(); 1675 rxPktBytes = rxPacket->length; 1676 rxPacketBufPtr = rxPacket->data; 1677 1678#if TRACING_ON 1679 if (DTRACE(Ethernet)) { 1680 IpPtr ip(rxPacket); 1681 if (ip) { 1682 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1683 TcpPtr tcp(ip); 1684 if (tcp) { 1685 DPRINTF(Ethernet, 1686 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1687 tcp->sport(), tcp->dport(), tcp->seq(), 1688 tcp->ack()); 1689 } 1690 } 1691 } 1692#endif 1693 1694 // sanity check - i think the driver behaves like this 1695 assert(rxDescCnt >= rxPktBytes); 1696 rxFifo.pop(); 1697 } 1698 1699 1700 // dont' need the && rxDescCnt > 0 if driver sanity check 1701 // above holds 1702 if (rxPktBytes > 0) { 1703 rxState = rxFragWrite; 1704 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1705 // check holds 1706 rxXferLen = rxPktBytes; 1707 1708 rxDmaAddr = rxFragPtr & 0x3fffffff; 1709 rxDmaData = rxPacketBufPtr; 1710 rxDmaLen = rxXferLen; 1711 rxDmaFree = dmaDataFree; 1712 1713 if (doRxDmaWrite()) 1714 goto exit; 1715 1716 } else { 1717 rxState = rxDescWrite; 1718 1719 //if (rxPktBytes == 0) { /* packet is done */ 1720 assert(rxPktBytes == 0); 1721 DPRINTF(EthernetSM, "done with receiving packet\n"); 1722 1723 rxDescCache.cmdsts |= CMDSTS_OWN; 1724 rxDescCache.cmdsts &= ~CMDSTS_MORE; 1725 rxDescCache.cmdsts |= CMDSTS_OK; 1726 rxDescCache.cmdsts &= 0xffff0000; 1727 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1728 1729#if 0 1730 /* 1731 * all the driver uses these are for its own stats keeping 1732 * which we don't care about, aren't necessary for 1733 * functionality and doing this would just slow us down. 1734 * if they end up using this in a later version for 1735 * functional purposes, just undef 1736 */ 1737 if (rxFilterEnable) { 1738 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK; 1739 const EthAddr &dst = rxFifoFront()->dst(); 1740 if (dst->unicast()) 1741 rxDescCache.cmdsts |= CMDSTS_DEST_SELF; 1742 if (dst->multicast()) 1743 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI; 1744 if (dst->broadcast()) 1745 rxDescCache.cmdsts |= CMDSTS_DEST_MASK; 1746 } 1747#endif 1748 1749 IpPtr ip(rxPacket); 1750 if (extstsEnable && ip) { 1751 rxDescCache.extsts |= EXTSTS_IPPKT; 1752 rxIpChecksums++; 1753 if (cksum(ip) != 0) { 1754 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1755 rxDescCache.extsts |= EXTSTS_IPERR; 1756 } 1757 TcpPtr tcp(ip); 1758 UdpPtr udp(ip); 1759 if (tcp) { 1760 rxDescCache.extsts |= EXTSTS_TCPPKT; 1761 rxTcpChecksums++; 1762 if (cksum(tcp) != 0) { 1763 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1764 rxDescCache.extsts |= EXTSTS_TCPERR; 1765 1766 } 1767 } else if (udp) { 1768 rxDescCache.extsts |= EXTSTS_UDPPKT; 1769 rxUdpChecksums++; 1770 if (cksum(udp) != 0) { 1771 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1772 rxDescCache.extsts |= EXTSTS_UDPERR; 1773 } 1774 } 1775 } 1776 rxPacket = 0; 1777 1778 /* 1779 * the driver seems to always receive into desc buffers 1780 * of size 1514, so you never have a pkt that is split 1781 * into multiple descriptors on the receive side, so 1782 * i don't implement that case, hence the assert above. 1783 */ 1784 1785 DPRINTF(EthernetDesc, 1786 "rxDescCache: addr=%08x writeback cmdsts extsts\n", 1787 regs.rxdp & 0x3fffffff); 1788 DPRINTF(EthernetDesc, 1789 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1790 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1791 rxDescCache.extsts); 1792 1793 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1794 rxDmaData = &(rxDescCache.cmdsts); 1795 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts); 1796 rxDmaFree = dmaDescFree; 1797 1798 descDmaWrites++; 1799 descDmaWrBytes += rxDmaLen; 1800 1801 if (doRxDmaWrite()) 1802 goto exit; 1803 } 1804 break; 1805 1806 case rxFragWrite: 1807 if (rxDmaState != dmaIdle) 1808 goto exit; 1809 1810 rxPacketBufPtr += rxXferLen; 1811 rxFragPtr += rxXferLen; 1812 rxPktBytes -= rxXferLen; 1813 1814 rxState = rxFifoBlock; 1815 break; 1816 1817 case rxDescWrite: 1818 if (rxDmaState != dmaIdle) 1819 goto exit; 1820 1821 assert(rxDescCache.cmdsts & CMDSTS_OWN); 1822 1823 assert(rxPacket == 0); 1824 devIntrPost(ISR_RXOK); 1825 1826 if (rxDescCache.cmdsts & CMDSTS_INTR) 1827 devIntrPost(ISR_RXDESC); 1828 1829 if (!rxEnable) { 1830 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1831 rxState = rxIdle; 1832 goto exit; 1833 } else 1834 rxState = rxAdvance; 1835 break; 1836 1837 case rxAdvance: 1838 if (rxDescCache.link == 0) { 1839 devIntrPost(ISR_RXIDLE); 1840 rxState = rxIdle; 1841 CRDD = true; 1842 goto exit; 1843 } else { 1844 rxState = rxDescRead; 1845 regs.rxdp = rxDescCache.link; 1846 CRDD = false; 1847 1848 rxDmaAddr = regs.rxdp & 0x3fffffff; 1849 rxDmaData = &rxDescCache; 1850 rxDmaLen = sizeof(ns_desc); 1851 rxDmaFree = dmaDescFree; 1852 1853 if (doRxDmaRead()) 1854 goto exit; 1855 } 1856 break; 1857 1858 default: 1859 panic("Invalid rxState!"); 1860 } 1861 1862 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1863 NsRxStateStrings[rxState]); 1864 goto next; 1865 1866 exit: 1867 /** 1868 * @todo do we want to schedule a future kick? 1869 */ 1870 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1871 NsRxStateStrings[rxState]); 1872 1873 if (clock && !rxKickEvent.scheduled()) 1874 rxKickEvent.schedule(rxKickTick); 1875} 1876 1877void 1878NSGigE::transmit() 1879{ 1880 if (txFifo.empty()) { 1881 DPRINTF(Ethernet, "nothing to transmit\n"); 1882 return; 1883 } 1884 1885 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1886 txFifo.size()); 1887 if (interface->sendPacket(txFifo.front())) { 1888#if TRACING_ON 1889 if (DTRACE(Ethernet)) { 1890 IpPtr ip(txFifo.front()); 1891 if (ip) { 1892 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1893 TcpPtr tcp(ip); 1894 if (tcp) { 1895 DPRINTF(Ethernet, 1896 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1897 tcp->sport(), tcp->dport(), tcp->seq(), tcp->ack()); 1898 } 1899 } 1900 } 1901#endif 1902 1903 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1904 txBytes += txFifo.front()->length; 1905 txPackets++; 1906 1907 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1908 txFifo.avail()); 1909 txFifo.pop(); 1910 1911 /* 1912 * normally do a writeback of the descriptor here, and ONLY 1913 * after that is done, send this interrupt. but since our 1914 * stuff never actually fails, just do this interrupt here, 1915 * otherwise the code has to stray from this nice format. 1916 * besides, it's functionally the same. 1917 */ 1918 devIntrPost(ISR_TXOK); 1919 } 1920 1921 if (!txFifo.empty() && !txEvent.scheduled()) { 1922 DPRINTF(Ethernet, "reschedule transmit\n"); 1923 txEvent.schedule(curTick + retryTime); 1924 } 1925} 1926 1927void 1928NSGigE::txDmaReadCopy() 1929{ 1930 assert(txDmaState == dmaReading); 1931 1932 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen); 1933 txDmaState = dmaIdle; 1934 1935 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1936 txDmaAddr, txDmaLen); 1937 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1938} 1939 1940bool 1941NSGigE::doTxDmaRead() 1942{ 1943 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1944 txDmaState = dmaReading; 1945 1946 if (dmaInterface && !txDmaFree) { 1947 if (dmaInterface->busy()) 1948 txDmaState = dmaReadWaiting; 1949 else 1950 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick, 1951 &txDmaReadEvent, true); 1952 return true; 1953 } 1954 1955 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) { 1956 txDmaReadCopy(); 1957 return false; 1958 } 1959 1960 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1961 Tick start = curTick + dmaReadDelay + factor; 1962 txDmaReadEvent.schedule(start); 1963 return true; 1964} 1965 1966void 1967NSGigE::txDmaReadDone() 1968{ 1969 assert(txDmaState == dmaReading); 1970 txDmaReadCopy(); 1971 1972 // If the receive state machine has a pending DMA, let it go first 1973 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1974 rxKick(); 1975 1976 txKick(); 1977} 1978 1979void 1980NSGigE::txDmaWriteCopy() 1981{ 1982 assert(txDmaState == dmaWriting); 1983 1984 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen); 1985 txDmaState = dmaIdle; 1986 1987 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1988 txDmaAddr, txDmaLen); 1989 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1990} 1991 1992bool 1993NSGigE::doTxDmaWrite() 1994{ 1995 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1996 txDmaState = dmaWriting; 1997 1998 if (dmaInterface && !txDmaFree) { 1999 if (dmaInterface->busy()) 2000 txDmaState = dmaWriteWaiting; 2001 else 2002 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick, 2003 &txDmaWriteEvent, true); 2004 return true; 2005 } 2006 2007 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) { 2008 txDmaWriteCopy(); 2009 return false; 2010 } 2011 2012 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 2013 Tick start = curTick + dmaWriteDelay + factor; 2014 txDmaWriteEvent.schedule(start); 2015 return true; 2016} 2017 2018void 2019NSGigE::txDmaWriteDone() 2020{ 2021 assert(txDmaState == dmaWriting); 2022 txDmaWriteCopy(); 2023 2024 // If the receive state machine has a pending DMA, let it go first 2025 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 2026 rxKick(); 2027 2028 txKick(); 2029} 2030 2031void 2032NSGigE::txKick() 2033{ 2034 DPRINTF(EthernetSM, "transmit kick txState=%s\n", 2035 NsTxStateStrings[txState]); 2036 2037 next: 2038 if (clock) { 2039 if (txKickTick > curTick) { 2040 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 2041 txKickTick); 2042 goto exit; 2043 } 2044 2045 // Go to the next state machine clock tick. 2046 txKickTick = curTick + cycles(1); 2047 } 2048 2049 switch(txDmaState) { 2050 case dmaReadWaiting: 2051 if (doTxDmaRead()) 2052 goto exit; 2053 break; 2054 case dmaWriteWaiting: 2055 if (doTxDmaWrite()) 2056 goto exit; 2057 break; 2058 default: 2059 break; 2060 } 2061 2062 switch (txState) { 2063 case txIdle: 2064 if (!txEnable) { 2065 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 2066 goto exit; 2067 } 2068 2069 if (CTDD) { 2070 txState = txDescRefr; 2071 2072 txDmaAddr = regs.txdp & 0x3fffffff; 2073 txDmaData = &txDescCache + offsetof(ns_desc, link); 2074 txDmaLen = sizeof(txDescCache.link); 2075 txDmaFree = dmaDescFree; 2076 2077 descDmaReads++; 2078 descDmaRdBytes += txDmaLen; 2079 2080 if (doTxDmaRead()) 2081 goto exit; 2082 2083 } else { 2084 txState = txDescRead; 2085 2086 txDmaAddr = regs.txdp & 0x3fffffff; 2087 txDmaData = &txDescCache; 2088 txDmaLen = sizeof(ns_desc); 2089 txDmaFree = dmaDescFree; 2090 2091 descDmaReads++; 2092 descDmaRdBytes += txDmaLen; 2093 2094 if (doTxDmaRead()) 2095 goto exit; 2096 } 2097 break; 2098 2099 case txDescRefr: 2100 if (txDmaState != dmaIdle) 2101 goto exit; 2102 2103 txState = txAdvance; 2104 break; 2105 2106 case txDescRead: 2107 if (txDmaState != dmaIdle) 2108 goto exit; 2109 2110 DPRINTF(EthernetDesc, "txDescCache: addr=%08x read descriptor\n", 2111 regs.txdp & 0x3fffffff); 2112 DPRINTF(EthernetDesc, 2113 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 2114 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts, 2115 txDescCache.extsts); 2116 2117 if (txDescCache.cmdsts & CMDSTS_OWN) { 2118 txState = txFifoBlock; 2119 txFragPtr = txDescCache.bufptr; 2120 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK; 2121 } else { 2122 devIntrPost(ISR_TXIDLE); 2123 txState = txIdle; 2124 goto exit; 2125 } 2126 break; 2127 2128 case txFifoBlock: 2129 if (!txPacket) { 2130 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 2131 txPacket = new PacketData(16384); 2132 txPacketBufPtr = txPacket->data; 2133 } 2134 2135 if (txDescCnt == 0) { 2136 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 2137 if (txDescCache.cmdsts & CMDSTS_MORE) { 2138 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 2139 txState = txDescWrite; 2140 2141 txDescCache.cmdsts &= ~CMDSTS_OWN; 2142 2143 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2144 txDmaAddr &= 0x3fffffff; 2145 txDmaData = &(txDescCache.cmdsts); 2146 txDmaLen = sizeof(txDescCache.cmdsts); 2147 txDmaFree = dmaDescFree; 2148 2149 if (doTxDmaWrite()) 2150 goto exit; 2151 2152 } else { /* this packet is totally done */ 2153 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 2154 /* deal with the the packet that just finished */ 2155 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 2156 IpPtr ip(txPacket); 2157 if (txDescCache.extsts & EXTSTS_UDPPKT) { 2158 UdpPtr udp(ip); 2159 udp->sum(0); 2160 udp->sum(cksum(udp)); 2161 txUdpChecksums++; 2162 } else if (txDescCache.extsts & EXTSTS_TCPPKT) { 2163 TcpPtr tcp(ip); 2164 tcp->sum(0); 2165 tcp->sum(cksum(tcp)); 2166 txTcpChecksums++; 2167 } 2168 if (txDescCache.extsts & EXTSTS_IPPKT) { 2169 ip->sum(0); 2170 ip->sum(cksum(ip)); 2171 txIpChecksums++; 2172 } 2173 } 2174 2175 txPacket->length = txPacketBufPtr - txPacket->data; 2176 // this is just because the receive can't handle a 2177 // packet bigger want to make sure 2178 assert(txPacket->length <= 1514); 2179#ifndef NDEBUG 2180 bool success = 2181#endif 2182 txFifo.push(txPacket); 2183 assert(success); 2184 2185 /* 2186 * this following section is not tqo spec, but 2187 * functionally shouldn't be any different. normally, 2188 * the chip will wait til the transmit has occurred 2189 * before writing back the descriptor because it has 2190 * to wait to see that it was successfully transmitted 2191 * to decide whether to set CMDSTS_OK or not. 2192 * however, in the simulator since it is always 2193 * successfully transmitted, and writing it exactly to 2194 * spec would complicate the code, we just do it here 2195 */ 2196 2197 txDescCache.cmdsts &= ~CMDSTS_OWN; 2198 txDescCache.cmdsts |= CMDSTS_OK; 2199 2200 DPRINTF(EthernetDesc, 2201 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 2202 txDescCache.cmdsts, txDescCache.extsts); 2203 2204 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2205 txDmaAddr &= 0x3fffffff; 2206 txDmaData = &(txDescCache.cmdsts); 2207 txDmaLen = sizeof(txDescCache.cmdsts) + 2208 sizeof(txDescCache.extsts); 2209 txDmaFree = dmaDescFree; 2210 2211 descDmaWrites++; 2212 descDmaWrBytes += txDmaLen; 2213 2214 transmit(); 2215 txPacket = 0; 2216 2217 if (!txEnable) { 2218 DPRINTF(EthernetSM, "halting TX state machine\n"); 2219 txState = txIdle; 2220 goto exit; 2221 } else 2222 txState = txAdvance; 2223 2224 if (doTxDmaWrite()) 2225 goto exit; 2226 } 2227 } else { 2228 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 2229 if (!txFifo.full()) { 2230 txState = txFragRead; 2231 2232 /* 2233 * The number of bytes transferred is either whatever 2234 * is left in the descriptor (txDescCnt), or if there 2235 * is not enough room in the fifo, just whatever room 2236 * is left in the fifo 2237 */ 2238 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 2239 2240 txDmaAddr = txFragPtr & 0x3fffffff; 2241 txDmaData = txPacketBufPtr; 2242 txDmaLen = txXferLen; 2243 txDmaFree = dmaDataFree; 2244 2245 if (doTxDmaRead()) 2246 goto exit; 2247 } else { 2248 txState = txFifoBlock; 2249 transmit(); 2250 2251 goto exit; 2252 } 2253 2254 } 2255 break; 2256 2257 case txFragRead: 2258 if (txDmaState != dmaIdle) 2259 goto exit; 2260 2261 txPacketBufPtr += txXferLen; 2262 txFragPtr += txXferLen; 2263 txDescCnt -= txXferLen; 2264 txFifo.reserve(txXferLen); 2265 2266 txState = txFifoBlock; 2267 break; 2268 2269 case txDescWrite: 2270 if (txDmaState != dmaIdle) 2271 goto exit; 2272 2273 if (txDescCache.cmdsts & CMDSTS_INTR) 2274 devIntrPost(ISR_TXDESC); 2275 2276 if (!txEnable) { 2277 DPRINTF(EthernetSM, "halting TX state machine\n"); 2278 txState = txIdle; 2279 goto exit; 2280 } else 2281 txState = txAdvance; 2282 break; 2283 2284 case txAdvance: 2285 if (txDescCache.link == 0) { 2286 devIntrPost(ISR_TXIDLE); 2287 txState = txIdle; 2288 goto exit; 2289 } else { 2290 txState = txDescRead; 2291 regs.txdp = txDescCache.link; 2292 CTDD = false; 2293 2294 txDmaAddr = txDescCache.link & 0x3fffffff; 2295 txDmaData = &txDescCache; 2296 txDmaLen = sizeof(ns_desc); 2297 txDmaFree = dmaDescFree; 2298 2299 if (doTxDmaRead()) 2300 goto exit; 2301 } 2302 break; 2303 2304 default: 2305 panic("invalid state"); 2306 } 2307 2308 DPRINTF(EthernetSM, "entering next txState=%s\n", 2309 NsTxStateStrings[txState]); 2310 goto next; 2311 2312 exit: 2313 /** 2314 * @todo do we want to schedule a future kick? 2315 */ 2316 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 2317 NsTxStateStrings[txState]); 2318 2319 if (clock && !txKickEvent.scheduled()) 2320 txKickEvent.schedule(txKickTick); 2321} 2322 2323/** 2324 * Advance the EEPROM state machine 2325 * Called on rising edge of EEPROM clock bit in MEAR 2326 */ 2327void 2328NSGigE::eepromKick() 2329{ 2330 switch (eepromState) { 2331 2332 case eepromStart: 2333 2334 // Wait for start bit 2335 if (regs.mear & MEAR_EEDI) { 2336 // Set up to get 2 opcode bits 2337 eepromState = eepromGetOpcode; 2338 eepromBitsToRx = 2; 2339 eepromOpcode = 0; 2340 } 2341 break; 2342 2343 case eepromGetOpcode: 2344 eepromOpcode <<= 1; 2345 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0; 2346 --eepromBitsToRx; 2347 2348 // Done getting opcode 2349 if (eepromBitsToRx == 0) { 2350 if (eepromOpcode != EEPROM_READ) 2351 panic("only EEPROM reads are implemented!"); 2352 2353 // Set up to get address 2354 eepromState = eepromGetAddress; 2355 eepromBitsToRx = 6; 2356 eepromAddress = 0; 2357 } 2358 break; 2359 2360 case eepromGetAddress: 2361 eepromAddress <<= 1; 2362 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0; 2363 --eepromBitsToRx; 2364 2365 // Done getting address 2366 if (eepromBitsToRx == 0) { 2367 2368 if (eepromAddress >= EEPROM_SIZE) 2369 panic("EEPROM read access out of range!"); 2370 2371 switch (eepromAddress) { 2372 2373 case EEPROM_PMATCH2_ADDR: 2374 eepromData = rom.perfectMatch[5]; 2375 eepromData <<= 8; 2376 eepromData += rom.perfectMatch[4]; 2377 break; 2378 2379 case EEPROM_PMATCH1_ADDR: 2380 eepromData = rom.perfectMatch[3]; 2381 eepromData <<= 8; 2382 eepromData += rom.perfectMatch[2]; 2383 break; 2384 2385 case EEPROM_PMATCH0_ADDR: 2386 eepromData = rom.perfectMatch[1]; 2387 eepromData <<= 8; 2388 eepromData += rom.perfectMatch[0]; 2389 break; 2390 2391 default: 2392 panic("FreeBSD driver only uses EEPROM to read PMATCH!"); 2393 } 2394 // Set up to read data 2395 eepromState = eepromRead; 2396 eepromBitsToRx = 16; 2397 2398 // Clear data in bit 2399 regs.mear &= ~MEAR_EEDI; 2400 } 2401 break; 2402 2403 case eepromRead: 2404 // Clear Data Out bit 2405 regs.mear &= ~MEAR_EEDO; 2406 // Set bit to value of current EEPROM bit 2407 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0; 2408 2409 eepromData <<= 1; 2410 --eepromBitsToRx; 2411 2412 // All done 2413 if (eepromBitsToRx == 0) { 2414 eepromState = eepromStart; 2415 } 2416 break; 2417 2418 default: 2419 panic("invalid EEPROM state"); 2420 } 2421 2422} 2423 2424void 2425NSGigE::transferDone() 2426{ 2427 if (txFifo.empty()) { 2428 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2429 return; 2430 } 2431 2432 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2433 2434 if (txEvent.scheduled()) 2435 txEvent.reschedule(curTick + cycles(1)); 2436 else 2437 txEvent.schedule(curTick + cycles(1)); 2438} 2439 2440bool 2441NSGigE::rxFilter(const PacketPtr &packet) 2442{ 2443 EthPtr eth = packet; 2444 bool drop = true; 2445 string type; 2446 2447 const EthAddr &dst = eth->dst(); 2448 if (dst.unicast()) { 2449 // If we're accepting all unicast addresses 2450 if (acceptUnicast) 2451 drop = false; 2452 2453 // If we make a perfect match 2454 if (acceptPerfect && dst == rom.perfectMatch) 2455 drop = false; 2456 2457 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2458 drop = false; 2459 2460 } else if (dst.broadcast()) { 2461 // if we're accepting broadcasts 2462 if (acceptBroadcast) 2463 drop = false; 2464 2465 } else if (dst.multicast()) { 2466 // if we're accepting all multicasts 2467 if (acceptMulticast) 2468 drop = false; 2469 2470 // Multicast hashing faked - all packets accepted 2471 if (multicastHashEnable) 2472 drop = false; 2473 } 2474 2475 if (drop) { 2476 DPRINTF(Ethernet, "rxFilter drop\n"); 2477 DDUMP(EthernetData, packet->data, packet->length); 2478 } 2479 2480 return drop; 2481} 2482 2483bool 2484NSGigE::recvPacket(PacketPtr packet) 2485{ 2486 rxBytes += packet->length; 2487 rxPackets++; 2488 2489 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2490 rxFifo.avail()); 2491 2492 if (!rxEnable) { 2493 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2494 interface->recvDone(); 2495 return true; 2496 } 2497 2498 if (!rxFilterEnable) { 2499 DPRINTF(Ethernet, 2500 "receive packet filtering disabled . . . packet dropped\n"); 2501 interface->recvDone(); 2502 return true; 2503 } 2504 2505 if (rxFilter(packet)) { 2506 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2507 interface->recvDone(); 2508 return true; 2509 } 2510 2511 if (rxFifo.avail() < packet->length) { 2512#if TRACING_ON 2513 IpPtr ip(packet); 2514 TcpPtr tcp(ip); 2515 if (ip) { 2516 DPRINTF(Ethernet, 2517 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2518 ip->id()); 2519 if (tcp) { 2520 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2521 } 2522 } 2523#endif 2524 droppedPackets++; 2525 devIntrPost(ISR_RXORN); 2526 return false; 2527 } 2528 2529 rxFifo.push(packet); 2530 interface->recvDone(); 2531 2532 rxKick(); 2533 return true; 2534} 2535 2536//===================================================================== 2537// 2538// 2539void 2540NSGigE::serialize(ostream &os) 2541{ 2542 // Serialize the PciDev base class 2543 PciDev::serialize(os); 2544 2545 /* 2546 * Finalize any DMA events now. 2547 */ 2548 if (rxDmaReadEvent.scheduled()) 2549 rxDmaReadCopy(); 2550 if (rxDmaWriteEvent.scheduled()) 2551 rxDmaWriteCopy(); 2552 if (txDmaReadEvent.scheduled()) 2553 txDmaReadCopy(); 2554 if (txDmaWriteEvent.scheduled()) 2555 txDmaWriteCopy(); 2556 2557 /* 2558 * Serialize the device registers 2559 */ 2560 SERIALIZE_SCALAR(regs.command); 2561 SERIALIZE_SCALAR(regs.config); 2562 SERIALIZE_SCALAR(regs.mear); 2563 SERIALIZE_SCALAR(regs.ptscr); 2564 SERIALIZE_SCALAR(regs.isr); 2565 SERIALIZE_SCALAR(regs.imr); 2566 SERIALIZE_SCALAR(regs.ier); 2567 SERIALIZE_SCALAR(regs.ihr); 2568 SERIALIZE_SCALAR(regs.txdp); 2569 SERIALIZE_SCALAR(regs.txdp_hi); 2570 SERIALIZE_SCALAR(regs.txcfg); 2571 SERIALIZE_SCALAR(regs.gpior); 2572 SERIALIZE_SCALAR(regs.rxdp); 2573 SERIALIZE_SCALAR(regs.rxdp_hi); 2574 SERIALIZE_SCALAR(regs.rxcfg); 2575 SERIALIZE_SCALAR(regs.pqcr); 2576 SERIALIZE_SCALAR(regs.wcsr); 2577 SERIALIZE_SCALAR(regs.pcr); 2578 SERIALIZE_SCALAR(regs.rfcr); 2579 SERIALIZE_SCALAR(regs.rfdr); 2580 SERIALIZE_SCALAR(regs.brar); 2581 SERIALIZE_SCALAR(regs.brdr); 2582 SERIALIZE_SCALAR(regs.srr); 2583 SERIALIZE_SCALAR(regs.mibc); 2584 SERIALIZE_SCALAR(regs.vrcr); 2585 SERIALIZE_SCALAR(regs.vtcr); 2586 SERIALIZE_SCALAR(regs.vdr); 2587 SERIALIZE_SCALAR(regs.ccsr); 2588 SERIALIZE_SCALAR(regs.tbicr); 2589 SERIALIZE_SCALAR(regs.tbisr); 2590 SERIALIZE_SCALAR(regs.tanar); 2591 SERIALIZE_SCALAR(regs.tanlpar); 2592 SERIALIZE_SCALAR(regs.taner); 2593 SERIALIZE_SCALAR(regs.tesr); 2594 2595 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2596 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2597 2598 SERIALIZE_SCALAR(ioEnable); 2599 2600 /* 2601 * Serialize the data Fifos 2602 */ 2603 rxFifo.serialize("rxFifo", os); 2604 txFifo.serialize("txFifo", os); 2605 2606 /* 2607 * Serialize the various helper variables 2608 */ 2609 bool txPacketExists = txPacket; 2610 SERIALIZE_SCALAR(txPacketExists); 2611 if (txPacketExists) { 2612 txPacket->length = txPacketBufPtr - txPacket->data; 2613 txPacket->serialize("txPacket", os); 2614 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2615 SERIALIZE_SCALAR(txPktBufPtr); 2616 } 2617 2618 bool rxPacketExists = rxPacket; 2619 SERIALIZE_SCALAR(rxPacketExists); 2620 if (rxPacketExists) { 2621 rxPacket->serialize("rxPacket", os); 2622 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2623 SERIALIZE_SCALAR(rxPktBufPtr); 2624 } 2625 2626 SERIALIZE_SCALAR(txXferLen); 2627 SERIALIZE_SCALAR(rxXferLen); 2628 2629 /* 2630 * Serialize DescCaches 2631 */ 2632 SERIALIZE_SCALAR(txDescCache.link); 2633 SERIALIZE_SCALAR(txDescCache.bufptr); 2634 SERIALIZE_SCALAR(txDescCache.cmdsts); 2635 SERIALIZE_SCALAR(txDescCache.extsts); 2636 SERIALIZE_SCALAR(rxDescCache.link); 2637 SERIALIZE_SCALAR(rxDescCache.bufptr); 2638 SERIALIZE_SCALAR(rxDescCache.cmdsts); 2639 SERIALIZE_SCALAR(rxDescCache.extsts); 2640 SERIALIZE_SCALAR(extstsEnable); 2641 2642 /* 2643 * Serialize tx state machine 2644 */ 2645 int txState = this->txState; 2646 SERIALIZE_SCALAR(txState); 2647 SERIALIZE_SCALAR(txEnable); 2648 SERIALIZE_SCALAR(CTDD); 2649 SERIALIZE_SCALAR(txFragPtr); 2650 SERIALIZE_SCALAR(txDescCnt); 2651 int txDmaState = this->txDmaState; 2652 SERIALIZE_SCALAR(txDmaState); 2653 SERIALIZE_SCALAR(txKickTick); 2654 2655 /* 2656 * Serialize rx state machine 2657 */ 2658 int rxState = this->rxState; 2659 SERIALIZE_SCALAR(rxState); 2660 SERIALIZE_SCALAR(rxEnable); 2661 SERIALIZE_SCALAR(CRDD); 2662 SERIALIZE_SCALAR(rxPktBytes); 2663 SERIALIZE_SCALAR(rxFragPtr); 2664 SERIALIZE_SCALAR(rxDescCnt); 2665 int rxDmaState = this->rxDmaState; 2666 SERIALIZE_SCALAR(rxDmaState); 2667 SERIALIZE_SCALAR(rxKickTick); 2668 2669 /* 2670 * Serialize EEPROM state machine 2671 */ 2672 int eepromState = this->eepromState; 2673 SERIALIZE_SCALAR(eepromState); 2674 SERIALIZE_SCALAR(eepromClk); 2675 SERIALIZE_SCALAR(eepromBitsToRx); 2676 SERIALIZE_SCALAR(eepromOpcode); 2677 SERIALIZE_SCALAR(eepromAddress); 2678 SERIALIZE_SCALAR(eepromData); 2679 2680 /* 2681 * If there's a pending transmit, store the time so we can 2682 * reschedule it later 2683 */ 2684 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2685 SERIALIZE_SCALAR(transmitTick); 2686 2687 /* 2688 * receive address filter settings 2689 */ 2690 SERIALIZE_SCALAR(rxFilterEnable); 2691 SERIALIZE_SCALAR(acceptBroadcast); 2692 SERIALIZE_SCALAR(acceptMulticast); 2693 SERIALIZE_SCALAR(acceptUnicast); 2694 SERIALIZE_SCALAR(acceptPerfect); 2695 SERIALIZE_SCALAR(acceptArp); 2696 SERIALIZE_SCALAR(multicastHashEnable); 2697 2698 /* 2699 * Keep track of pending interrupt status. 2700 */ 2701 SERIALIZE_SCALAR(intrTick); 2702 SERIALIZE_SCALAR(cpuPendingIntr); 2703 Tick intrEventTick = 0; 2704 if (intrEvent) 2705 intrEventTick = intrEvent->when(); 2706 SERIALIZE_SCALAR(intrEventTick); 2707 2708} 2709 2710void 2711NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2712{ 2713 // Unserialize the PciDev base class 2714 PciDev::unserialize(cp, section); 2715 2716 UNSERIALIZE_SCALAR(regs.command); 2717 UNSERIALIZE_SCALAR(regs.config); 2718 UNSERIALIZE_SCALAR(regs.mear); 2719 UNSERIALIZE_SCALAR(regs.ptscr); 2720 UNSERIALIZE_SCALAR(regs.isr); 2721 UNSERIALIZE_SCALAR(regs.imr); 2722 UNSERIALIZE_SCALAR(regs.ier); 2723 UNSERIALIZE_SCALAR(regs.ihr); 2724 UNSERIALIZE_SCALAR(regs.txdp); 2725 UNSERIALIZE_SCALAR(regs.txdp_hi); 2726 UNSERIALIZE_SCALAR(regs.txcfg); 2727 UNSERIALIZE_SCALAR(regs.gpior); 2728 UNSERIALIZE_SCALAR(regs.rxdp); 2729 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2730 UNSERIALIZE_SCALAR(regs.rxcfg); 2731 UNSERIALIZE_SCALAR(regs.pqcr); 2732 UNSERIALIZE_SCALAR(regs.wcsr); 2733 UNSERIALIZE_SCALAR(regs.pcr); 2734 UNSERIALIZE_SCALAR(regs.rfcr); 2735 UNSERIALIZE_SCALAR(regs.rfdr); 2736 UNSERIALIZE_SCALAR(regs.brar); 2737 UNSERIALIZE_SCALAR(regs.brdr); 2738 UNSERIALIZE_SCALAR(regs.srr); 2739 UNSERIALIZE_SCALAR(regs.mibc); 2740 UNSERIALIZE_SCALAR(regs.vrcr); 2741 UNSERIALIZE_SCALAR(regs.vtcr); 2742 UNSERIALIZE_SCALAR(regs.vdr); 2743 UNSERIALIZE_SCALAR(regs.ccsr); 2744 UNSERIALIZE_SCALAR(regs.tbicr); 2745 UNSERIALIZE_SCALAR(regs.tbisr); 2746 UNSERIALIZE_SCALAR(regs.tanar); 2747 UNSERIALIZE_SCALAR(regs.tanlpar); 2748 UNSERIALIZE_SCALAR(regs.taner); 2749 UNSERIALIZE_SCALAR(regs.tesr); 2750 2751 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2752 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2753 2754 UNSERIALIZE_SCALAR(ioEnable); 2755 2756 /* 2757 * unserialize the data fifos 2758 */ 2759 rxFifo.unserialize("rxFifo", cp, section); 2760 txFifo.unserialize("txFifo", cp, section); 2761 2762 /* 2763 * unserialize the various helper variables 2764 */ 2765 bool txPacketExists; 2766 UNSERIALIZE_SCALAR(txPacketExists); 2767 if (txPacketExists) { 2768 txPacket = new PacketData(16384); 2769 txPacket->unserialize("txPacket", cp, section); 2770 uint32_t txPktBufPtr; 2771 UNSERIALIZE_SCALAR(txPktBufPtr); 2772 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2773 } else 2774 txPacket = 0; 2775 2776 bool rxPacketExists; 2777 UNSERIALIZE_SCALAR(rxPacketExists); 2778 rxPacket = 0; 2779 if (rxPacketExists) { 2780 rxPacket = new PacketData(16384); 2781 rxPacket->unserialize("rxPacket", cp, section); 2782 uint32_t rxPktBufPtr; 2783 UNSERIALIZE_SCALAR(rxPktBufPtr); 2784 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2785 } else 2786 rxPacket = 0; 2787 2788 UNSERIALIZE_SCALAR(txXferLen); 2789 UNSERIALIZE_SCALAR(rxXferLen); 2790 2791 /* 2792 * Unserialize DescCaches 2793 */ 2794 UNSERIALIZE_SCALAR(txDescCache.link); 2795 UNSERIALIZE_SCALAR(txDescCache.bufptr); 2796 UNSERIALIZE_SCALAR(txDescCache.cmdsts); 2797 UNSERIALIZE_SCALAR(txDescCache.extsts); 2798 UNSERIALIZE_SCALAR(rxDescCache.link); 2799 UNSERIALIZE_SCALAR(rxDescCache.bufptr); 2800 UNSERIALIZE_SCALAR(rxDescCache.cmdsts); 2801 UNSERIALIZE_SCALAR(rxDescCache.extsts); 2802 UNSERIALIZE_SCALAR(extstsEnable); 2803 2804 /* 2805 * unserialize tx state machine 2806 */ 2807 int txState; 2808 UNSERIALIZE_SCALAR(txState); 2809 this->txState = (TxState) txState; 2810 UNSERIALIZE_SCALAR(txEnable); 2811 UNSERIALIZE_SCALAR(CTDD); 2812 UNSERIALIZE_SCALAR(txFragPtr); 2813 UNSERIALIZE_SCALAR(txDescCnt); 2814 int txDmaState; 2815 UNSERIALIZE_SCALAR(txDmaState); 2816 this->txDmaState = (DmaState) txDmaState; 2817 UNSERIALIZE_SCALAR(txKickTick); 2818 if (txKickTick) 2819 txKickEvent.schedule(txKickTick); 2820 2821 /* 2822 * unserialize rx state machine 2823 */ 2824 int rxState; 2825 UNSERIALIZE_SCALAR(rxState); 2826 this->rxState = (RxState) rxState; 2827 UNSERIALIZE_SCALAR(rxEnable); 2828 UNSERIALIZE_SCALAR(CRDD); 2829 UNSERIALIZE_SCALAR(rxPktBytes); 2830 UNSERIALIZE_SCALAR(rxFragPtr); 2831 UNSERIALIZE_SCALAR(rxDescCnt); 2832 int rxDmaState; 2833 UNSERIALIZE_SCALAR(rxDmaState); 2834 this->rxDmaState = (DmaState) rxDmaState; 2835 UNSERIALIZE_SCALAR(rxKickTick); 2836 if (rxKickTick) 2837 rxKickEvent.schedule(rxKickTick); 2838 2839 /* 2840 * Unserialize EEPROM state machine 2841 */ 2842 int eepromState; 2843 UNSERIALIZE_SCALAR(eepromState); 2844 this->eepromState = (EEPROMState) eepromState; 2845 UNSERIALIZE_SCALAR(eepromClk); 2846 UNSERIALIZE_SCALAR(eepromBitsToRx); 2847 UNSERIALIZE_SCALAR(eepromOpcode); 2848 UNSERIALIZE_SCALAR(eepromAddress); 2849 UNSERIALIZE_SCALAR(eepromData); 2850 2851 /* 2852 * If there's a pending transmit, reschedule it now 2853 */ 2854 Tick transmitTick; 2855 UNSERIALIZE_SCALAR(transmitTick); 2856 if (transmitTick) 2857 txEvent.schedule(curTick + transmitTick); 2858 2859 /* 2860 * unserialize receive address filter settings 2861 */ 2862 UNSERIALIZE_SCALAR(rxFilterEnable); 2863 UNSERIALIZE_SCALAR(acceptBroadcast); 2864 UNSERIALIZE_SCALAR(acceptMulticast); 2865 UNSERIALIZE_SCALAR(acceptUnicast); 2866 UNSERIALIZE_SCALAR(acceptPerfect); 2867 UNSERIALIZE_SCALAR(acceptArp); 2868 UNSERIALIZE_SCALAR(multicastHashEnable); 2869 2870 /* 2871 * Keep track of pending interrupt status. 2872 */ 2873 UNSERIALIZE_SCALAR(intrTick); 2874 UNSERIALIZE_SCALAR(cpuPendingIntr); 2875 Tick intrEventTick; 2876 UNSERIALIZE_SCALAR(intrEventTick); 2877 if (intrEventTick) { 2878 intrEvent = new IntrEvent(this, true); 2879 intrEvent->schedule(intrEventTick); 2880 } 2881 2882 /* 2883 * re-add addrRanges to bus bridges 2884 */ 2885 if (pioInterface) { 2886 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 2887 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 2888 } 2889} 2890 2891Tick 2892NSGigE::cacheAccess(MemReqPtr &req) 2893{ 2894 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n", 2895 req->paddr, req->paddr - addr); 2896 return curTick + pioLatency; 2897} 2898 2899BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2900 2901 SimObjectParam<EtherInt *> peer; 2902 SimObjectParam<NSGigE *> device; 2903 2904END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2905 2906BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2907 2908 INIT_PARAM_DFLT(peer, "peer interface", NULL), 2909 INIT_PARAM(device, "Ethernet device of this interface") 2910 2911END_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2912 2913CREATE_SIM_OBJECT(NSGigEInt) 2914{ 2915 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device); 2916 2917 EtherInt *p = (EtherInt *)peer; 2918 if (p) { 2919 dev_int->setPeer(p); 2920 p->setPeer(dev_int); 2921 } 2922 2923 return dev_int; 2924} 2925 2926REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt) 2927 2928 2929BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2930 2931 Param<Addr> addr; 2932 Param<Tick> clock; 2933 Param<Tick> tx_delay; 2934 Param<Tick> rx_delay; 2935 Param<Tick> intr_delay; 2936 SimObjectParam<MemoryController *> mmu; 2937 SimObjectParam<PhysicalMemory *> physmem; 2938 Param<bool> rx_filter; 2939 Param<string> hardware_address; 2940 SimObjectParam<Bus*> io_bus; 2941 SimObjectParam<Bus*> payload_bus; 2942 SimObjectParam<HierParams *> hier; 2943 Param<Tick> pio_latency; 2944 Param<bool> dma_desc_free; 2945 Param<bool> dma_data_free; 2946 Param<Tick> dma_read_delay; 2947 Param<Tick> dma_write_delay; 2948 Param<Tick> dma_read_factor; 2949 Param<Tick> dma_write_factor; 2950 SimObjectParam<PciConfigAll *> configspace; 2951 SimObjectParam<PciConfigData *> configdata; 2952 SimObjectParam<Platform *> platform; 2953 Param<uint32_t> pci_bus; 2954 Param<uint32_t> pci_dev; 2955 Param<uint32_t> pci_func; 2956 Param<uint32_t> tx_fifo_size; 2957 Param<uint32_t> rx_fifo_size; 2958 Param<uint32_t> m5reg; 2959 Param<bool> dma_no_allocate; 2960 2961END_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2962 2963BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE) 2964 2965 INIT_PARAM(addr, "Device Address"), 2966 INIT_PARAM(clock, "State machine processor frequency"), 2967 INIT_PARAM(tx_delay, "Transmit Delay"), 2968 INIT_PARAM(rx_delay, "Receive Delay"), 2969 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"), 2970 INIT_PARAM(mmu, "Memory Controller"), 2971 INIT_PARAM(physmem, "Physical Memory"), 2972 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true), 2973 INIT_PARAM(hardware_address, "Ethernet Hardware Address"), 2974 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL), 2975 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL), 2976 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams), 2977 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 2978 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false), 2979 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false), 2980 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0), 2981 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0), 2982 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0), 2983 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0), 2984 INIT_PARAM(configspace, "PCI Configspace"), 2985 INIT_PARAM(configdata, "PCI Config data"), 2986 INIT_PARAM(platform, "Platform"), 2987 INIT_PARAM(pci_bus, "PCI bus"), 2988 INIT_PARAM(pci_dev, "PCI device number"), 2989 INIT_PARAM(pci_func, "PCI function code"), 2990 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072), 2991 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072), 2992 INIT_PARAM(m5reg, "m5 register"), 2993 INIT_PARAM_DFLT(dma_no_allocate, "Should DMA reads allocate cache lines", true) 2994 2995END_INIT_SIM_OBJECT_PARAMS(NSGigE) 2996 2997 2998CREATE_SIM_OBJECT(NSGigE) 2999{ 3000 NSGigE::Params *params = new NSGigE::Params; 3001 3002 params->name = getInstanceName(); 3003 params->mmu = mmu; 3004 params->configSpace = configspace; 3005 params->configData = configdata; 3006 params->plat = platform; 3007 params->busNum = pci_bus; 3008 params->deviceNum = pci_dev; 3009 params->functionNum = pci_func; 3010 3011 params->clock = clock; 3012 params->intr_delay = intr_delay; 3013 params->pmem = physmem; 3014 params->tx_delay = tx_delay; 3015 params->rx_delay = rx_delay; 3016 params->hier = hier; 3017 params->header_bus = io_bus; 3018 params->payload_bus = payload_bus; 3019 params->pio_latency = pio_latency; 3020 params->dma_desc_free = dma_desc_free; 3021 params->dma_data_free = dma_data_free; 3022 params->dma_read_delay = dma_read_delay; 3023 params->dma_write_delay = dma_write_delay; 3024 params->dma_read_factor = dma_read_factor; 3025 params->dma_write_factor = dma_write_factor; 3026 params->rx_filter = rx_filter; 3027 params->eaddr = hardware_address; 3028 params->tx_fifo_size = tx_fifo_size; 3029 params->rx_fifo_size = rx_fifo_size; 3030 params->m5reg = m5reg; 3031 params->dma_no_allocate = dma_no_allocate; 3032 return new NSGigE(params); 3033} 3034 3035REGISTER_SIM_OBJECT("NSGigE", NSGigE) 3036