ns_gige.cc revision 1762
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/** @file 30 * Device module for modelling the National Semiconductor 31 * DP83820 ethernet controller. Does not support priority queueing 32 */ 33#include <cstdio> 34#include <deque> 35#include <string> 36 37#include "base/inet.hh" 38#include "cpu/exec_context.hh" 39#include "dev/etherlink.hh" 40#include "dev/ns_gige.hh" 41#include "dev/pciconfigall.hh" 42#include "mem/bus/bus.hh" 43#include "mem/bus/dma_interface.hh" 44#include "mem/bus/pio_interface.hh" 45#include "mem/bus/pio_interface_impl.hh" 46#include "mem/functional/memory_control.hh" 47#include "mem/functional/physical.hh" 48#include "sim/builder.hh" 49#include "sim/debug.hh" 50#include "sim/host.hh" 51#include "sim/stats.hh" 52#include "targetarch/vtophys.hh" 53 54const char *NsRxStateStrings[] = 55{ 56 "rxIdle", 57 "rxDescRefr", 58 "rxDescRead", 59 "rxFifoBlock", 60 "rxFragWrite", 61 "rxDescWrite", 62 "rxAdvance" 63}; 64 65const char *NsTxStateStrings[] = 66{ 67 "txIdle", 68 "txDescRefr", 69 "txDescRead", 70 "txFifoBlock", 71 "txFragRead", 72 "txDescWrite", 73 "txAdvance" 74}; 75 76const char *NsDmaState[] = 77{ 78 "dmaIdle", 79 "dmaReading", 80 "dmaWriting", 81 "dmaReadWaiting", 82 "dmaWriteWaiting" 83}; 84 85using namespace std; 86using namespace Net; 87 88/////////////////////////////////////////////////////////////////////// 89// 90// NSGigE PCI Device 91// 92NSGigE::NSGigE(Params *p) 93 : PciDev(p), ioEnable(false), 94 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 96 txXferLen(0), rxXferLen(0), clock(p->clock), 97 txState(txIdle), txEnable(false), CTDD(false), 98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 99 rxEnable(false), CRDD(false), rxPktBytes(0), 100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 101 rxDmaReadEvent(this), rxDmaWriteEvent(this), 102 txDmaReadEvent(this), txDmaWriteEvent(this), 103 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 104 txDelay(p->tx_delay), rxDelay(p->rx_delay), 105 rxKickTick(0), txKickTick(0), 106 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false), 107 acceptMulticast(false), acceptUnicast(false), 108 acceptPerfect(false), acceptArp(false), 109 physmem(p->pmem), intrTick(0), cpuPendingIntr(false), 110 intrEvent(0), interface(0) 111{ 112 if (p->header_bus) { 113 pioInterface = newPioInterface(name(), p->hier, 114 p->header_bus, this, 115 &NSGigE::cacheAccess); 116 117 pioLatency = p->pio_latency * p->header_bus->clockRate; 118 119 if (p->payload_bus) 120 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 121 p->header_bus, 122 p->payload_bus, 1, 123 p->dma_no_allocate); 124 else 125 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 126 p->header_bus, 127 p->header_bus, 1, 128 p->dma_no_allocate); 129 } else if (p->payload_bus) { 130 pioInterface = newPioInterface(name(), p->hier, 131 p->payload_bus, this, 132 &NSGigE::cacheAccess); 133 134 pioLatency = p->pio_latency * p->payload_bus->clockRate; 135 136 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 137 p->payload_bus, 138 p->payload_bus, 1, 139 p->dma_no_allocate); 140 } 141 142 143 intrDelay = p->intr_delay; 144 dmaReadDelay = p->dma_read_delay; 145 dmaWriteDelay = p->dma_write_delay; 146 dmaReadFactor = p->dma_read_factor; 147 dmaWriteFactor = p->dma_write_factor; 148 149 regsReset(); 150 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN); 151} 152 153NSGigE::~NSGigE() 154{} 155 156void 157NSGigE::regStats() 158{ 159 txBytes 160 .name(name() + ".txBytes") 161 .desc("Bytes Transmitted") 162 .prereq(txBytes) 163 ; 164 165 rxBytes 166 .name(name() + ".rxBytes") 167 .desc("Bytes Received") 168 .prereq(rxBytes) 169 ; 170 171 txPackets 172 .name(name() + ".txPackets") 173 .desc("Number of Packets Transmitted") 174 .prereq(txBytes) 175 ; 176 177 rxPackets 178 .name(name() + ".rxPackets") 179 .desc("Number of Packets Received") 180 .prereq(rxBytes) 181 ; 182 183 txIpChecksums 184 .name(name() + ".txIpChecksums") 185 .desc("Number of tx IP Checksums done by device") 186 .precision(0) 187 .prereq(txBytes) 188 ; 189 190 rxIpChecksums 191 .name(name() + ".rxIpChecksums") 192 .desc("Number of rx IP Checksums done by device") 193 .precision(0) 194 .prereq(rxBytes) 195 ; 196 197 txTcpChecksums 198 .name(name() + ".txTcpChecksums") 199 .desc("Number of tx TCP Checksums done by device") 200 .precision(0) 201 .prereq(txBytes) 202 ; 203 204 rxTcpChecksums 205 .name(name() + ".rxTcpChecksums") 206 .desc("Number of rx TCP Checksums done by device") 207 .precision(0) 208 .prereq(rxBytes) 209 ; 210 211 txUdpChecksums 212 .name(name() + ".txUdpChecksums") 213 .desc("Number of tx UDP Checksums done by device") 214 .precision(0) 215 .prereq(txBytes) 216 ; 217 218 rxUdpChecksums 219 .name(name() + ".rxUdpChecksums") 220 .desc("Number of rx UDP Checksums done by device") 221 .precision(0) 222 .prereq(rxBytes) 223 ; 224 225 descDmaReads 226 .name(name() + ".descDMAReads") 227 .desc("Number of descriptors the device read w/ DMA") 228 .precision(0) 229 ; 230 231 descDmaWrites 232 .name(name() + ".descDMAWrites") 233 .desc("Number of descriptors the device wrote w/ DMA") 234 .precision(0) 235 ; 236 237 descDmaRdBytes 238 .name(name() + ".descDmaReadBytes") 239 .desc("number of descriptor bytes read w/ DMA") 240 .precision(0) 241 ; 242 243 descDmaWrBytes 244 .name(name() + ".descDmaWriteBytes") 245 .desc("number of descriptor bytes write w/ DMA") 246 .precision(0) 247 ; 248 249 txBandwidth 250 .name(name() + ".txBandwidth") 251 .desc("Transmit Bandwidth (bits/s)") 252 .precision(0) 253 .prereq(txBytes) 254 ; 255 256 rxBandwidth 257 .name(name() + ".rxBandwidth") 258 .desc("Receive Bandwidth (bits/s)") 259 .precision(0) 260 .prereq(rxBytes) 261 ; 262 263 totBandwidth 264 .name(name() + ".totBandwidth") 265 .desc("Total Bandwidth (bits/s)") 266 .precision(0) 267 .prereq(totBytes) 268 ; 269 270 totPackets 271 .name(name() + ".totPackets") 272 .desc("Total Packets") 273 .precision(0) 274 .prereq(totBytes) 275 ; 276 277 totBytes 278 .name(name() + ".totBytes") 279 .desc("Total Bytes") 280 .precision(0) 281 .prereq(totBytes) 282 ; 283 284 totPacketRate 285 .name(name() + ".totPPS") 286 .desc("Total Tranmission Rate (packets/s)") 287 .precision(0) 288 .prereq(totBytes) 289 ; 290 291 txPacketRate 292 .name(name() + ".txPPS") 293 .desc("Packet Tranmission Rate (packets/s)") 294 .precision(0) 295 .prereq(txBytes) 296 ; 297 298 rxPacketRate 299 .name(name() + ".rxPPS") 300 .desc("Packet Reception Rate (packets/s)") 301 .precision(0) 302 .prereq(rxBytes) 303 ; 304 305 postedSwi 306 .name(name() + ".postedSwi") 307 .desc("number of software interrupts posted to CPU") 308 .precision(0) 309 ; 310 311 totalSwi 312 .name(name() + ".totalSwi") 313 .desc("number of total Swi written to ISR") 314 .precision(0) 315 ; 316 317 coalescedSwi 318 .name(name() + ".coalescedSwi") 319 .desc("average number of Swi's coalesced into each post") 320 .precision(0) 321 ; 322 323 postedRxIdle 324 .name(name() + ".postedRxIdle") 325 .desc("number of rxIdle interrupts posted to CPU") 326 .precision(0) 327 ; 328 329 totalRxIdle 330 .name(name() + ".totalRxIdle") 331 .desc("number of total RxIdle written to ISR") 332 .precision(0) 333 ; 334 335 coalescedRxIdle 336 .name(name() + ".coalescedRxIdle") 337 .desc("average number of RxIdle's coalesced into each post") 338 .precision(0) 339 ; 340 341 postedRxOk 342 .name(name() + ".postedRxOk") 343 .desc("number of RxOk interrupts posted to CPU") 344 .precision(0) 345 ; 346 347 totalRxOk 348 .name(name() + ".totalRxOk") 349 .desc("number of total RxOk written to ISR") 350 .precision(0) 351 ; 352 353 coalescedRxOk 354 .name(name() + ".coalescedRxOk") 355 .desc("average number of RxOk's coalesced into each post") 356 .precision(0) 357 ; 358 359 postedRxDesc 360 .name(name() + ".postedRxDesc") 361 .desc("number of RxDesc interrupts posted to CPU") 362 .precision(0) 363 ; 364 365 totalRxDesc 366 .name(name() + ".totalRxDesc") 367 .desc("number of total RxDesc written to ISR") 368 .precision(0) 369 ; 370 371 coalescedRxDesc 372 .name(name() + ".coalescedRxDesc") 373 .desc("average number of RxDesc's coalesced into each post") 374 .precision(0) 375 ; 376 377 postedTxOk 378 .name(name() + ".postedTxOk") 379 .desc("number of TxOk interrupts posted to CPU") 380 .precision(0) 381 ; 382 383 totalTxOk 384 .name(name() + ".totalTxOk") 385 .desc("number of total TxOk written to ISR") 386 .precision(0) 387 ; 388 389 coalescedTxOk 390 .name(name() + ".coalescedTxOk") 391 .desc("average number of TxOk's coalesced into each post") 392 .precision(0) 393 ; 394 395 postedTxIdle 396 .name(name() + ".postedTxIdle") 397 .desc("number of TxIdle interrupts posted to CPU") 398 .precision(0) 399 ; 400 401 totalTxIdle 402 .name(name() + ".totalTxIdle") 403 .desc("number of total TxIdle written to ISR") 404 .precision(0) 405 ; 406 407 coalescedTxIdle 408 .name(name() + ".coalescedTxIdle") 409 .desc("average number of TxIdle's coalesced into each post") 410 .precision(0) 411 ; 412 413 postedTxDesc 414 .name(name() + ".postedTxDesc") 415 .desc("number of TxDesc interrupts posted to CPU") 416 .precision(0) 417 ; 418 419 totalTxDesc 420 .name(name() + ".totalTxDesc") 421 .desc("number of total TxDesc written to ISR") 422 .precision(0) 423 ; 424 425 coalescedTxDesc 426 .name(name() + ".coalescedTxDesc") 427 .desc("average number of TxDesc's coalesced into each post") 428 .precision(0) 429 ; 430 431 postedRxOrn 432 .name(name() + ".postedRxOrn") 433 .desc("number of RxOrn posted to CPU") 434 .precision(0) 435 ; 436 437 totalRxOrn 438 .name(name() + ".totalRxOrn") 439 .desc("number of total RxOrn written to ISR") 440 .precision(0) 441 ; 442 443 coalescedRxOrn 444 .name(name() + ".coalescedRxOrn") 445 .desc("average number of RxOrn's coalesced into each post") 446 .precision(0) 447 ; 448 449 coalescedTotal 450 .name(name() + ".coalescedTotal") 451 .desc("average number of interrupts coalesced into each post") 452 .precision(0) 453 ; 454 455 postedInterrupts 456 .name(name() + ".postedInterrupts") 457 .desc("number of posts to CPU") 458 .precision(0) 459 ; 460 461 droppedPackets 462 .name(name() + ".droppedPackets") 463 .desc("number of packets dropped") 464 .precision(0) 465 ; 466 467 coalescedSwi = totalSwi / postedInterrupts; 468 coalescedRxIdle = totalRxIdle / postedInterrupts; 469 coalescedRxOk = totalRxOk / postedInterrupts; 470 coalescedRxDesc = totalRxDesc / postedInterrupts; 471 coalescedTxOk = totalTxOk / postedInterrupts; 472 coalescedTxIdle = totalTxIdle / postedInterrupts; 473 coalescedTxDesc = totalTxDesc / postedInterrupts; 474 coalescedRxOrn = totalRxOrn / postedInterrupts; 475 476 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + totalTxOk 477 + totalTxIdle + totalTxDesc + totalRxOrn) / postedInterrupts; 478 479 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 480 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 481 totBandwidth = txBandwidth + rxBandwidth; 482 totBytes = txBytes + rxBytes; 483 totPackets = txPackets + rxPackets; 484 485 txPacketRate = txPackets / simSeconds; 486 rxPacketRate = rxPackets / simSeconds; 487} 488 489/** 490 * This is to read the PCI general configuration registers 491 */ 492void 493NSGigE::ReadConfig(int offset, int size, uint8_t *data) 494{ 495 if (offset < PCI_DEVICE_SPECIFIC) 496 PciDev::ReadConfig(offset, size, data); 497 else 498 panic("Device specific PCI config space not implemented!\n"); 499} 500 501/** 502 * This is to write to the PCI general configuration registers 503 */ 504void 505NSGigE::WriteConfig(int offset, int size, uint32_t data) 506{ 507 if (offset < PCI_DEVICE_SPECIFIC) 508 PciDev::WriteConfig(offset, size, data); 509 else 510 panic("Device specific PCI config space not implemented!\n"); 511 512 // Need to catch writes to BARs to update the PIO interface 513 switch (offset) { 514 // seems to work fine without all these PCI settings, but i 515 // put in the IO to double check, an assertion will fail if we 516 // need to properly implement it 517 case PCI_COMMAND: 518 if (config.data[offset] & PCI_CMD_IOSE) 519 ioEnable = true; 520 else 521 ioEnable = false; 522 523#if 0 524 if (config.data[offset] & PCI_CMD_BME) { 525 bmEnabled = true; 526 } 527 else { 528 bmEnabled = false; 529 } 530 531 if (config.data[offset] & PCI_CMD_MSE) { 532 memEnable = true; 533 } 534 else { 535 memEnable = false; 536 } 537#endif 538 break; 539 540 case PCI0_BASE_ADDR0: 541 if (BARAddrs[0] != 0) { 542 if (pioInterface) 543 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 544 545 BARAddrs[0] &= EV5::PAddrUncachedMask; 546 } 547 break; 548 case PCI0_BASE_ADDR1: 549 if (BARAddrs[1] != 0) { 550 if (pioInterface) 551 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 552 553 BARAddrs[1] &= EV5::PAddrUncachedMask; 554 } 555 break; 556 } 557} 558 559/** 560 * This reads the device registers, which are detailed in the NS83820 561 * spec sheet 562 */ 563Fault 564NSGigE::read(MemReqPtr &req, uint8_t *data) 565{ 566 assert(ioEnable); 567 568 //The mask is to give you only the offset into the device register file 569 Addr daddr = req->paddr & 0xfff; 570 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n", 571 daddr, req->paddr, req->vaddr, req->size); 572 573 574 // there are some reserved registers, you can see ns_gige_reg.h and 575 // the spec sheet for details 576 if (daddr > LAST && daddr <= RESERVED) { 577 panic("Accessing reserved register"); 578 } else if (daddr > RESERVED && daddr <= 0x3FC) { 579 ReadConfig(daddr & 0xff, req->size, data); 580 return No_Fault; 581 } else if (daddr >= MIB_START && daddr <= MIB_END) { 582 // don't implement all the MIB's. hopefully the kernel 583 // doesn't actually DEPEND upon their values 584 // MIB are just hardware stats keepers 585 uint32_t ® = *(uint32_t *) data; 586 reg = 0; 587 return No_Fault; 588 } else if (daddr > 0x3FC) 589 panic("Something is messed up!\n"); 590 591 switch (req->size) { 592 case sizeof(uint32_t): 593 { 594 uint32_t ® = *(uint32_t *)data; 595 596 switch (daddr) { 597 case CR: 598 reg = regs.command; 599 //these are supposed to be cleared on a read 600 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 601 break; 602 603 case CFGR: 604 reg = regs.config; 605 break; 606 607 case MEAR: 608 reg = regs.mear; 609 break; 610 611 case PTSCR: 612 reg = regs.ptscr; 613 break; 614 615 case ISR: 616 reg = regs.isr; 617 devIntrClear(ISR_ALL); 618 break; 619 620 case IMR: 621 reg = regs.imr; 622 break; 623 624 case IER: 625 reg = regs.ier; 626 break; 627 628 case IHR: 629 reg = regs.ihr; 630 break; 631 632 case TXDP: 633 reg = regs.txdp; 634 break; 635 636 case TXDP_HI: 637 reg = regs.txdp_hi; 638 break; 639 640 case TX_CFG: 641 reg = regs.txcfg; 642 break; 643 644 case GPIOR: 645 reg = regs.gpior; 646 break; 647 648 case RXDP: 649 reg = regs.rxdp; 650 break; 651 652 case RXDP_HI: 653 reg = regs.rxdp_hi; 654 break; 655 656 case RX_CFG: 657 reg = regs.rxcfg; 658 break; 659 660 case PQCR: 661 reg = regs.pqcr; 662 break; 663 664 case WCSR: 665 reg = regs.wcsr; 666 break; 667 668 case PCR: 669 reg = regs.pcr; 670 break; 671 672 // see the spec sheet for how RFCR and RFDR work 673 // basically, you write to RFCR to tell the machine 674 // what you want to do next, then you act upon RFDR, 675 // and the device will be prepared b/c of what you 676 // wrote to RFCR 677 case RFCR: 678 reg = regs.rfcr; 679 break; 680 681 case RFDR: 682 switch (regs.rfcr & RFCR_RFADDR) { 683 case 0x000: 684 reg = rom.perfectMatch[1]; 685 reg = reg << 8; 686 reg += rom.perfectMatch[0]; 687 break; 688 case 0x002: 689 reg = rom.perfectMatch[3] << 8; 690 reg += rom.perfectMatch[2]; 691 break; 692 case 0x004: 693 reg = rom.perfectMatch[5] << 8; 694 reg += rom.perfectMatch[4]; 695 break; 696 default: 697 panic("reading RFDR for something other than PMATCH!\n"); 698 // didn't implement other RFDR functionality b/c 699 // driver didn't use it 700 } 701 break; 702 703 case SRR: 704 reg = regs.srr; 705 break; 706 707 case MIBC: 708 reg = regs.mibc; 709 reg &= ~(MIBC_MIBS | MIBC_ACLR); 710 break; 711 712 case VRCR: 713 reg = regs.vrcr; 714 break; 715 716 case VTCR: 717 reg = regs.vtcr; 718 break; 719 720 case VDR: 721 reg = regs.vdr; 722 break; 723 724 case CCSR: 725 reg = regs.ccsr; 726 break; 727 728 case TBICR: 729 reg = regs.tbicr; 730 break; 731 732 case TBISR: 733 reg = regs.tbisr; 734 break; 735 736 case TANAR: 737 reg = regs.tanar; 738 break; 739 740 case TANLPAR: 741 reg = regs.tanlpar; 742 break; 743 744 case TANER: 745 reg = regs.taner; 746 break; 747 748 case TESR: 749 reg = regs.tesr; 750 break; 751 752 case M5REG: 753 reg = params()->m5reg; 754 break; 755 756 default: 757 panic("reading unimplemented register: addr=%#x", daddr); 758 } 759 760 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 761 daddr, reg, reg); 762 } 763 break; 764 765 default: 766 panic("accessing register with invalid size: addr=%#x, size=%d", 767 daddr, req->size); 768 } 769 770 return No_Fault; 771} 772 773Fault 774NSGigE::write(MemReqPtr &req, const uint8_t *data) 775{ 776 assert(ioEnable); 777 778 Addr daddr = req->paddr & 0xfff; 779 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n", 780 daddr, req->paddr, req->vaddr, req->size); 781 782 if (daddr > LAST && daddr <= RESERVED) { 783 panic("Accessing reserved register"); 784 } else if (daddr > RESERVED && daddr <= 0x3FC) { 785 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data); 786 return No_Fault; 787 } else if (daddr > 0x3FC) 788 panic("Something is messed up!\n"); 789 790 if (req->size == sizeof(uint32_t)) { 791 uint32_t reg = *(uint32_t *)data; 792 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 793 794 switch (daddr) { 795 case CR: 796 regs.command = reg; 797 if (reg & CR_TXD) { 798 txEnable = false; 799 } else if (reg & CR_TXE) { 800 txEnable = true; 801 802 // the kernel is enabling the transmit machine 803 if (txState == txIdle) 804 txKick(); 805 } 806 807 if (reg & CR_RXD) { 808 rxEnable = false; 809 } else if (reg & CR_RXE) { 810 rxEnable = true; 811 812 if (rxState == rxIdle) 813 rxKick(); 814 } 815 816 if (reg & CR_TXR) 817 txReset(); 818 819 if (reg & CR_RXR) 820 rxReset(); 821 822 if (reg & CR_SWI) 823 devIntrPost(ISR_SWI); 824 825 if (reg & CR_RST) { 826 txReset(); 827 rxReset(); 828 829 regsReset(); 830 } 831 break; 832 833 case CFGR: 834 if (reg & CFGR_LNKSTS || 835 reg & CFGR_SPDSTS || 836 reg & CFGR_DUPSTS || 837 reg & CFGR_RESERVED || 838 reg & CFGR_T64ADDR || 839 reg & CFGR_PCI64_DET) 840 panic("writing to read-only or reserved CFGR bits!\n"); 841 842 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 843 CFGR_RESERVED | CFGR_T64ADDR | CFGR_PCI64_DET); 844 845// all these #if 0's are because i don't THINK the kernel needs to 846// have these implemented. if there is a problem relating to one of 847// these, you may need to add functionality in. 848#if 0 849 if (reg & CFGR_TBI_EN) ; 850 if (reg & CFGR_MODE_1000) ; 851#endif 852 853 if (reg & CFGR_AUTO_1000) 854 panic("CFGR_AUTO_1000 not implemented!\n"); 855 856#if 0 857 if (reg & CFGR_PINT_DUPSTS || 858 reg & CFGR_PINT_LNKSTS || 859 reg & CFGR_PINT_SPDSTS) 860 ; 861 862 if (reg & CFGR_TMRTEST) ; 863 if (reg & CFGR_MRM_DIS) ; 864 if (reg & CFGR_MWI_DIS) ; 865 866 if (reg & CFGR_T64ADDR) 867 panic("CFGR_T64ADDR is read only register!\n"); 868 869 if (reg & CFGR_PCI64_DET) 870 panic("CFGR_PCI64_DET is read only register!\n"); 871 872 if (reg & CFGR_DATA64_EN) ; 873 if (reg & CFGR_M64ADDR) ; 874 if (reg & CFGR_PHY_RST) ; 875 if (reg & CFGR_PHY_DIS) ; 876#endif 877 878 if (reg & CFGR_EXTSTS_EN) 879 extstsEnable = true; 880 else 881 extstsEnable = false; 882 883#if 0 884 if (reg & CFGR_REQALG) ; 885 if (reg & CFGR_SB) ; 886 if (reg & CFGR_POW) ; 887 if (reg & CFGR_EXD) ; 888 if (reg & CFGR_PESEL) ; 889 if (reg & CFGR_BROM_DIS) ; 890 if (reg & CFGR_EXT_125) ; 891 if (reg & CFGR_BEM) ; 892#endif 893 break; 894 895 case MEAR: 896 regs.mear = reg; 897 // since phy is completely faked, MEAR_MD* don't matter 898 // and since the driver never uses MEAR_EE*, they don't 899 // matter 900#if 0 901 if (reg & MEAR_EEDI) ; 902 if (reg & MEAR_EEDO) ; // this one is read only 903 if (reg & MEAR_EECLK) ; 904 if (reg & MEAR_EESEL) ; 905 if (reg & MEAR_MDIO) ; 906 if (reg & MEAR_MDDIR) ; 907 if (reg & MEAR_MDC) ; 908#endif 909 break; 910 911 case PTSCR: 912 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 913 // these control BISTs for various parts of chip - we 914 // don't care or do just fake that the BIST is done 915 if (reg & PTSCR_RBIST_EN) 916 regs.ptscr |= PTSCR_RBIST_DONE; 917 if (reg & PTSCR_EEBIST_EN) 918 regs.ptscr &= ~PTSCR_EEBIST_EN; 919 if (reg & PTSCR_EELOAD_EN) 920 regs.ptscr &= ~PTSCR_EELOAD_EN; 921 break; 922 923 case ISR: /* writing to the ISR has no effect */ 924 panic("ISR is a read only register!\n"); 925 926 case IMR: 927 regs.imr = reg; 928 devIntrChangeMask(); 929 break; 930 931 case IER: 932 regs.ier = reg; 933 break; 934 935 case IHR: 936 regs.ihr = reg; 937 /* not going to implement real interrupt holdoff */ 938 break; 939 940 case TXDP: 941 regs.txdp = (reg & 0xFFFFFFFC); 942 assert(txState == txIdle); 943 CTDD = false; 944 break; 945 946 case TXDP_HI: 947 regs.txdp_hi = reg; 948 break; 949 950 case TX_CFG: 951 regs.txcfg = reg; 952#if 0 953 if (reg & TX_CFG_CSI) ; 954 if (reg & TX_CFG_HBI) ; 955 if (reg & TX_CFG_MLB) ; 956 if (reg & TX_CFG_ATP) ; 957 if (reg & TX_CFG_ECRETRY) { 958 /* 959 * this could easily be implemented, but considering 960 * the network is just a fake pipe, wouldn't make 961 * sense to do this 962 */ 963 } 964 965 if (reg & TX_CFG_BRST_DIS) ; 966#endif 967 968#if 0 969 /* we handle our own DMA, ignore the kernel's exhortations */ 970 if (reg & TX_CFG_MXDMA) ; 971#endif 972 973 // also, we currently don't care about fill/drain 974 // thresholds though this may change in the future with 975 // more realistic networks or a driver which changes it 976 // according to feedback 977 978 break; 979 980 case GPIOR: 981 regs.gpior = reg; 982 /* these just control general purpose i/o pins, don't matter */ 983 break; 984 985 case RXDP: 986 regs.rxdp = reg; 987 CRDD = false; 988 break; 989 990 case RXDP_HI: 991 regs.rxdp_hi = reg; 992 break; 993 994 case RX_CFG: 995 regs.rxcfg = reg; 996#if 0 997 if (reg & RX_CFG_AEP) ; 998 if (reg & RX_CFG_ARP) ; 999 if (reg & RX_CFG_STRIPCRC) ; 1000 if (reg & RX_CFG_RX_RD) ; 1001 if (reg & RX_CFG_ALP) ; 1002 if (reg & RX_CFG_AIRL) ; 1003 1004 /* we handle our own DMA, ignore what kernel says about it */ 1005 if (reg & RX_CFG_MXDMA) ; 1006 1007 //also, we currently don't care about fill/drain thresholds 1008 //though this may change in the future with more realistic 1009 //networks or a driver which changes it according to feedback 1010 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ; 1011#endif 1012 break; 1013 1014 case PQCR: 1015 /* there is no priority queueing used in the linux 2.6 driver */ 1016 regs.pqcr = reg; 1017 break; 1018 1019 case WCSR: 1020 /* not going to implement wake on LAN */ 1021 regs.wcsr = reg; 1022 break; 1023 1024 case PCR: 1025 /* not going to implement pause control */ 1026 regs.pcr = reg; 1027 break; 1028 1029 case RFCR: 1030 regs.rfcr = reg; 1031 1032 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 1033 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 1034 acceptMulticast = (reg & RFCR_AAM) ? true : false; 1035 acceptUnicast = (reg & RFCR_AAU) ? true : false; 1036 acceptPerfect = (reg & RFCR_APM) ? true : false; 1037 acceptArp = (reg & RFCR_AARP) ? true : false; 1038 1039#if 0 1040 if (reg & RFCR_APAT) 1041 panic("RFCR_APAT not implemented!\n"); 1042#endif 1043 1044 if (reg & RFCR_MHEN || reg & RFCR_UHEN) 1045 panic("hash filtering not implemented!\n"); 1046 1047 if (reg & RFCR_ULM) 1048 panic("RFCR_ULM not implemented!\n"); 1049 1050 break; 1051 1052 case RFDR: 1053 panic("the driver never writes to RFDR, something is wrong!\n"); 1054 1055 case BRAR: 1056 panic("the driver never uses BRAR, something is wrong!\n"); 1057 1058 case BRDR: 1059 panic("the driver never uses BRDR, something is wrong!\n"); 1060 1061 case SRR: 1062 panic("SRR is read only register!\n"); 1063 1064 case MIBC: 1065 panic("the driver never uses MIBC, something is wrong!\n"); 1066 1067 case VRCR: 1068 regs.vrcr = reg; 1069 break; 1070 1071 case VTCR: 1072 regs.vtcr = reg; 1073 break; 1074 1075 case VDR: 1076 panic("the driver never uses VDR, something is wrong!\n"); 1077 break; 1078 1079 case CCSR: 1080 /* not going to implement clockrun stuff */ 1081 regs.ccsr = reg; 1082 break; 1083 1084 case TBICR: 1085 regs.tbicr = reg; 1086 if (reg & TBICR_MR_LOOPBACK) 1087 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 1088 1089 if (reg & TBICR_MR_AN_ENABLE) { 1090 regs.tanlpar = regs.tanar; 1091 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 1092 } 1093 1094#if 0 1095 if (reg & TBICR_MR_RESTART_AN) ; 1096#endif 1097 1098 break; 1099 1100 case TBISR: 1101 panic("TBISR is read only register!\n"); 1102 1103 case TANAR: 1104 regs.tanar = reg; 1105 if (reg & TANAR_PS2) 1106 panic("this isn't used in driver, something wrong!\n"); 1107 1108 if (reg & TANAR_PS1) 1109 panic("this isn't used in driver, something wrong!\n"); 1110 break; 1111 1112 case TANLPAR: 1113 panic("this should only be written to by the fake phy!\n"); 1114 1115 case TANER: 1116 panic("TANER is read only register!\n"); 1117 1118 case TESR: 1119 regs.tesr = reg; 1120 break; 1121 1122 default: 1123 panic("invalid register access daddr=%#x", daddr); 1124 } 1125 } else { 1126 panic("Invalid Request Size"); 1127 } 1128 1129 return No_Fault; 1130} 1131 1132void 1133NSGigE::devIntrPost(uint32_t interrupts) 1134{ 1135 if (interrupts & ISR_RESERVE) 1136 panic("Cannot set a reserved interrupt"); 1137 1138 if (interrupts & ISR_NOIMPL) 1139 warn("interrupt not implemented %#x\n", interrupts); 1140 1141 interrupts &= ~ISR_NOIMPL; 1142 regs.isr |= interrupts; 1143 1144 if (interrupts & regs.imr) { 1145 if (interrupts & ISR_SWI) { 1146 totalSwi++; 1147 } 1148 if (interrupts & ISR_RXIDLE) { 1149 totalRxIdle++; 1150 } 1151 if (interrupts & ISR_RXOK) { 1152 totalRxOk++; 1153 } 1154 if (interrupts & ISR_RXDESC) { 1155 totalRxDesc++; 1156 } 1157 if (interrupts & ISR_TXOK) { 1158 totalTxOk++; 1159 } 1160 if (interrupts & ISR_TXIDLE) { 1161 totalTxIdle++; 1162 } 1163 if (interrupts & ISR_TXDESC) { 1164 totalTxDesc++; 1165 } 1166 if (interrupts & ISR_RXORN) { 1167 totalRxOrn++; 1168 } 1169 } 1170 1171 DPRINTF(EthernetIntr, 1172 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 1173 interrupts, regs.isr, regs.imr); 1174 1175 if ((regs.isr & regs.imr)) { 1176 Tick when = curTick; 1177 if (!(regs.isr & regs.imr & ISR_NODELAY)) 1178 when += intrDelay; 1179 cpuIntrPost(when); 1180 } 1181} 1182 1183/* writing this interrupt counting stats inside this means that this function 1184 is now limited to being used to clear all interrupts upon the kernel 1185 reading isr and servicing. just telling you in case you were thinking 1186 of expanding use. 1187*/ 1188void 1189NSGigE::devIntrClear(uint32_t interrupts) 1190{ 1191 if (interrupts & ISR_RESERVE) 1192 panic("Cannot clear a reserved interrupt"); 1193 1194 if (regs.isr & regs.imr & ISR_SWI) { 1195 postedSwi++; 1196 } 1197 if (regs.isr & regs.imr & ISR_RXIDLE) { 1198 postedRxIdle++; 1199 } 1200 if (regs.isr & regs.imr & ISR_RXOK) { 1201 postedRxOk++; 1202 } 1203 if (regs.isr & regs.imr & ISR_RXDESC) { 1204 postedRxDesc++; 1205 } 1206 if (regs.isr & regs.imr & ISR_TXOK) { 1207 postedTxOk++; 1208 } 1209 if (regs.isr & regs.imr & ISR_TXIDLE) { 1210 postedTxIdle++; 1211 } 1212 if (regs.isr & regs.imr & ISR_TXDESC) { 1213 postedTxDesc++; 1214 } 1215 if (regs.isr & regs.imr & ISR_RXORN) { 1216 postedRxOrn++; 1217 } 1218 1219 if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC | 1220 ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) ) 1221 postedInterrupts++; 1222 1223 interrupts &= ~ISR_NOIMPL; 1224 regs.isr &= ~interrupts; 1225 1226 DPRINTF(EthernetIntr, 1227 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 1228 interrupts, regs.isr, regs.imr); 1229 1230 if (!(regs.isr & regs.imr)) 1231 cpuIntrClear(); 1232} 1233 1234void 1235NSGigE::devIntrChangeMask() 1236{ 1237 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 1238 regs.isr, regs.imr, regs.isr & regs.imr); 1239 1240 if (regs.isr & regs.imr) 1241 cpuIntrPost(curTick); 1242 else 1243 cpuIntrClear(); 1244} 1245 1246void 1247NSGigE::cpuIntrPost(Tick when) 1248{ 1249 // If the interrupt you want to post is later than an interrupt 1250 // already scheduled, just let it post in the coming one and don't 1251 // schedule another. 1252 // HOWEVER, must be sure that the scheduled intrTick is in the 1253 // future (this was formerly the source of a bug) 1254 /** 1255 * @todo this warning should be removed and the intrTick code should 1256 * be fixed. 1257 */ 1258 assert(when >= curTick); 1259 assert(intrTick >= curTick || intrTick == 0); 1260 if (when > intrTick && intrTick != 0) { 1261 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 1262 intrTick); 1263 return; 1264 } 1265 1266 intrTick = when; 1267 if (intrTick < curTick) { 1268 debug_break(); 1269 intrTick = curTick; 1270 } 1271 1272 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 1273 intrTick); 1274 1275 if (intrEvent) 1276 intrEvent->squash(); 1277 intrEvent = new IntrEvent(this, true); 1278 intrEvent->schedule(intrTick); 1279} 1280 1281void 1282NSGigE::cpuInterrupt() 1283{ 1284 assert(intrTick == curTick); 1285 1286 // Whether or not there's a pending interrupt, we don't care about 1287 // it anymore 1288 intrEvent = 0; 1289 intrTick = 0; 1290 1291 // Don't send an interrupt if there's already one 1292 if (cpuPendingIntr) { 1293 DPRINTF(EthernetIntr, 1294 "would send an interrupt now, but there's already pending\n"); 1295 } else { 1296 // Send interrupt 1297 cpuPendingIntr = true; 1298 1299 DPRINTF(EthernetIntr, "posting interrupt\n"); 1300 intrPost(); 1301 } 1302} 1303 1304void 1305NSGigE::cpuIntrClear() 1306{ 1307 if (!cpuPendingIntr) 1308 return; 1309 1310 if (intrEvent) { 1311 intrEvent->squash(); 1312 intrEvent = 0; 1313 } 1314 1315 intrTick = 0; 1316 1317 cpuPendingIntr = false; 1318 1319 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1320 intrClear(); 1321} 1322 1323bool 1324NSGigE::cpuIntrPending() const 1325{ return cpuPendingIntr; } 1326 1327void 1328NSGigE::txReset() 1329{ 1330 1331 DPRINTF(Ethernet, "transmit reset\n"); 1332 1333 CTDD = false; 1334 txEnable = false;; 1335 txFragPtr = 0; 1336 assert(txDescCnt == 0); 1337 txFifo.clear(); 1338 txState = txIdle; 1339 assert(txDmaState == dmaIdle); 1340} 1341 1342void 1343NSGigE::rxReset() 1344{ 1345 DPRINTF(Ethernet, "receive reset\n"); 1346 1347 CRDD = false; 1348 assert(rxPktBytes == 0); 1349 rxEnable = false; 1350 rxFragPtr = 0; 1351 assert(rxDescCnt == 0); 1352 assert(rxDmaState == dmaIdle); 1353 rxFifo.clear(); 1354 rxState = rxIdle; 1355} 1356 1357void 1358NSGigE::regsReset() 1359{ 1360 memset(®s, 0, sizeof(regs)); 1361 regs.config = CFGR_LNKSTS; 1362 regs.mear = 0x22; 1363 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1364 // fill threshold to 32 bytes 1365 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1366 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1367 regs.mibc = MIBC_FRZ; 1368 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1369 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1370 1371 extstsEnable = false; 1372 acceptBroadcast = false; 1373 acceptMulticast = false; 1374 acceptUnicast = false; 1375 acceptPerfect = false; 1376 acceptArp = false; 1377} 1378 1379void 1380NSGigE::rxDmaReadCopy() 1381{ 1382 assert(rxDmaState == dmaReading); 1383 1384 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen); 1385 rxDmaState = dmaIdle; 1386 1387 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1388 rxDmaAddr, rxDmaLen); 1389 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1390} 1391 1392bool 1393NSGigE::doRxDmaRead() 1394{ 1395 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1396 rxDmaState = dmaReading; 1397 1398 if (dmaInterface && !rxDmaFree) { 1399 if (dmaInterface->busy()) 1400 rxDmaState = dmaReadWaiting; 1401 else 1402 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick, 1403 &rxDmaReadEvent, true); 1404 return true; 1405 } 1406 1407 if (dmaReadDelay == 0 && dmaReadFactor == 0) { 1408 rxDmaReadCopy(); 1409 return false; 1410 } 1411 1412 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1413 Tick start = curTick + dmaReadDelay + factor; 1414 rxDmaReadEvent.schedule(start); 1415 return true; 1416} 1417 1418void 1419NSGigE::rxDmaReadDone() 1420{ 1421 assert(rxDmaState == dmaReading); 1422 rxDmaReadCopy(); 1423 1424 // If the transmit state machine has a pending DMA, let it go first 1425 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1426 txKick(); 1427 1428 rxKick(); 1429} 1430 1431void 1432NSGigE::rxDmaWriteCopy() 1433{ 1434 assert(rxDmaState == dmaWriting); 1435 1436 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen); 1437 rxDmaState = dmaIdle; 1438 1439 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1440 rxDmaAddr, rxDmaLen); 1441 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1442} 1443 1444bool 1445NSGigE::doRxDmaWrite() 1446{ 1447 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1448 rxDmaState = dmaWriting; 1449 1450 if (dmaInterface && !rxDmaFree) { 1451 if (dmaInterface->busy()) 1452 rxDmaState = dmaWriteWaiting; 1453 else 1454 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick, 1455 &rxDmaWriteEvent, true); 1456 return true; 1457 } 1458 1459 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) { 1460 rxDmaWriteCopy(); 1461 return false; 1462 } 1463 1464 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1465 Tick start = curTick + dmaWriteDelay + factor; 1466 rxDmaWriteEvent.schedule(start); 1467 return true; 1468} 1469 1470void 1471NSGigE::rxDmaWriteDone() 1472{ 1473 assert(rxDmaState == dmaWriting); 1474 rxDmaWriteCopy(); 1475 1476 // If the transmit state machine has a pending DMA, let it go first 1477 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1478 txKick(); 1479 1480 rxKick(); 1481} 1482 1483void 1484NSGigE::rxKick() 1485{ 1486 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n", 1487 NsRxStateStrings[rxState], rxFifo.size()); 1488 1489 if (rxKickTick > curTick) { 1490 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1491 rxKickTick); 1492 return; 1493 } 1494 1495 next: 1496 switch(rxDmaState) { 1497 case dmaReadWaiting: 1498 if (doRxDmaRead()) 1499 goto exit; 1500 break; 1501 case dmaWriteWaiting: 1502 if (doRxDmaWrite()) 1503 goto exit; 1504 break; 1505 default: 1506 break; 1507 } 1508 1509 // see state machine from spec for details 1510 // the way this works is, if you finish work on one state and can 1511 // go directly to another, you do that through jumping to the 1512 // label "next". however, if you have intermediate work, like DMA 1513 // so that you can't go to the next state yet, you go to exit and 1514 // exit the loop. however, when the DMA is done it will trigger 1515 // an event and come back to this loop. 1516 switch (rxState) { 1517 case rxIdle: 1518 if (!rxEnable) { 1519 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1520 goto exit; 1521 } 1522 1523 if (CRDD) { 1524 rxState = rxDescRefr; 1525 1526 rxDmaAddr = regs.rxdp & 0x3fffffff; 1527 rxDmaData = &rxDescCache + offsetof(ns_desc, link); 1528 rxDmaLen = sizeof(rxDescCache.link); 1529 rxDmaFree = dmaDescFree; 1530 1531 descDmaReads++; 1532 descDmaRdBytes += rxDmaLen; 1533 1534 if (doRxDmaRead()) 1535 goto exit; 1536 } else { 1537 rxState = rxDescRead; 1538 1539 rxDmaAddr = regs.rxdp & 0x3fffffff; 1540 rxDmaData = &rxDescCache; 1541 rxDmaLen = sizeof(ns_desc); 1542 rxDmaFree = dmaDescFree; 1543 1544 descDmaReads++; 1545 descDmaRdBytes += rxDmaLen; 1546 1547 if (doRxDmaRead()) 1548 goto exit; 1549 } 1550 break; 1551 1552 case rxDescRefr: 1553 if (rxDmaState != dmaIdle) 1554 goto exit; 1555 1556 rxState = rxAdvance; 1557 break; 1558 1559 case rxDescRead: 1560 if (rxDmaState != dmaIdle) 1561 goto exit; 1562 1563 DPRINTF(EthernetDesc, 1564 "rxDescCache: addr=%08x read descriptor\n", 1565 regs.rxdp & 0x3fffffff); 1566 DPRINTF(EthernetDesc, 1567 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1568 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1569 rxDescCache.extsts); 1570 1571 if (rxDescCache.cmdsts & CMDSTS_OWN) { 1572 devIntrPost(ISR_RXIDLE); 1573 rxState = rxIdle; 1574 goto exit; 1575 } else { 1576 rxState = rxFifoBlock; 1577 rxFragPtr = rxDescCache.bufptr; 1578 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK; 1579 } 1580 break; 1581 1582 case rxFifoBlock: 1583 if (!rxPacket) { 1584 /** 1585 * @todo in reality, we should be able to start processing 1586 * the packet as it arrives, and not have to wait for the 1587 * full packet ot be in the receive fifo. 1588 */ 1589 if (rxFifo.empty()) 1590 goto exit; 1591 1592 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1593 1594 // If we don't have a packet, grab a new one from the fifo. 1595 rxPacket = rxFifo.front(); 1596 rxPktBytes = rxPacket->length; 1597 rxPacketBufPtr = rxPacket->data; 1598 1599#if TRACING_ON 1600 if (DTRACE(Ethernet)) { 1601 IpPtr ip(rxPacket); 1602 if (ip) { 1603 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1604 TcpPtr tcp(ip); 1605 if (tcp) { 1606 DPRINTF(Ethernet, 1607 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1608 tcp->sport(), tcp->dport(), tcp->seq(), 1609 tcp->ack()); 1610 } 1611 } 1612 } 1613#endif 1614 1615 // sanity check - i think the driver behaves like this 1616 assert(rxDescCnt >= rxPktBytes); 1617 rxFifo.pop(); 1618 } 1619 1620 1621 // dont' need the && rxDescCnt > 0 if driver sanity check 1622 // above holds 1623 if (rxPktBytes > 0) { 1624 rxState = rxFragWrite; 1625 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1626 // check holds 1627 rxXferLen = rxPktBytes; 1628 1629 rxDmaAddr = rxFragPtr & 0x3fffffff; 1630 rxDmaData = rxPacketBufPtr; 1631 rxDmaLen = rxXferLen; 1632 rxDmaFree = dmaDataFree; 1633 1634 if (doRxDmaWrite()) 1635 goto exit; 1636 1637 } else { 1638 rxState = rxDescWrite; 1639 1640 //if (rxPktBytes == 0) { /* packet is done */ 1641 assert(rxPktBytes == 0); 1642 DPRINTF(EthernetSM, "done with receiving packet\n"); 1643 1644 rxDescCache.cmdsts |= CMDSTS_OWN; 1645 rxDescCache.cmdsts &= ~CMDSTS_MORE; 1646 rxDescCache.cmdsts |= CMDSTS_OK; 1647 rxDescCache.cmdsts &= 0xffff0000; 1648 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1649 1650#if 0 1651 /* 1652 * all the driver uses these are for its own stats keeping 1653 * which we don't care about, aren't necessary for 1654 * functionality and doing this would just slow us down. 1655 * if they end up using this in a later version for 1656 * functional purposes, just undef 1657 */ 1658 if (rxFilterEnable) { 1659 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK; 1660 const EthAddr &dst = rxFifoFront()->dst(); 1661 if (dst->unicast()) 1662 rxDescCache.cmdsts |= CMDSTS_DEST_SELF; 1663 if (dst->multicast()) 1664 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI; 1665 if (dst->broadcast()) 1666 rxDescCache.cmdsts |= CMDSTS_DEST_MASK; 1667 } 1668#endif 1669 1670 IpPtr ip(rxPacket); 1671 if (extstsEnable && ip) { 1672 rxDescCache.extsts |= EXTSTS_IPPKT; 1673 rxIpChecksums++; 1674 if (cksum(ip) != 0) { 1675 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1676 rxDescCache.extsts |= EXTSTS_IPERR; 1677 } 1678 TcpPtr tcp(ip); 1679 UdpPtr udp(ip); 1680 if (tcp) { 1681 rxDescCache.extsts |= EXTSTS_TCPPKT; 1682 rxTcpChecksums++; 1683 if (cksum(tcp) != 0) { 1684 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1685 rxDescCache.extsts |= EXTSTS_TCPERR; 1686 1687 } 1688 } else if (udp) { 1689 rxDescCache.extsts |= EXTSTS_UDPPKT; 1690 rxUdpChecksums++; 1691 if (cksum(udp) != 0) { 1692 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1693 rxDescCache.extsts |= EXTSTS_UDPERR; 1694 } 1695 } 1696 } 1697 rxPacket = 0; 1698 1699 /* 1700 * the driver seems to always receive into desc buffers 1701 * of size 1514, so you never have a pkt that is split 1702 * into multiple descriptors on the receive side, so 1703 * i don't implement that case, hence the assert above. 1704 */ 1705 1706 DPRINTF(EthernetDesc, 1707 "rxDescCache: addr=%08x writeback cmdsts extsts\n", 1708 regs.rxdp & 0x3fffffff); 1709 DPRINTF(EthernetDesc, 1710 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1711 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1712 rxDescCache.extsts); 1713 1714 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1715 rxDmaData = &(rxDescCache.cmdsts); 1716 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts); 1717 rxDmaFree = dmaDescFree; 1718 1719 descDmaWrites++; 1720 descDmaWrBytes += rxDmaLen; 1721 1722 if (doRxDmaWrite()) 1723 goto exit; 1724 } 1725 break; 1726 1727 case rxFragWrite: 1728 if (rxDmaState != dmaIdle) 1729 goto exit; 1730 1731 rxPacketBufPtr += rxXferLen; 1732 rxFragPtr += rxXferLen; 1733 rxPktBytes -= rxXferLen; 1734 1735 rxState = rxFifoBlock; 1736 break; 1737 1738 case rxDescWrite: 1739 if (rxDmaState != dmaIdle) 1740 goto exit; 1741 1742 assert(rxDescCache.cmdsts & CMDSTS_OWN); 1743 1744 assert(rxPacket == 0); 1745 devIntrPost(ISR_RXOK); 1746 1747 if (rxDescCache.cmdsts & CMDSTS_INTR) 1748 devIntrPost(ISR_RXDESC); 1749 1750 if (!rxEnable) { 1751 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1752 rxState = rxIdle; 1753 goto exit; 1754 } else 1755 rxState = rxAdvance; 1756 break; 1757 1758 case rxAdvance: 1759 if (rxDescCache.link == 0) { 1760 devIntrPost(ISR_RXIDLE); 1761 rxState = rxIdle; 1762 CRDD = true; 1763 goto exit; 1764 } else { 1765 rxState = rxDescRead; 1766 regs.rxdp = rxDescCache.link; 1767 CRDD = false; 1768 1769 rxDmaAddr = regs.rxdp & 0x3fffffff; 1770 rxDmaData = &rxDescCache; 1771 rxDmaLen = sizeof(ns_desc); 1772 rxDmaFree = dmaDescFree; 1773 1774 if (doRxDmaRead()) 1775 goto exit; 1776 } 1777 break; 1778 1779 default: 1780 panic("Invalid rxState!"); 1781 } 1782 1783 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1784 NsRxStateStrings[rxState]); 1785 1786 goto next; 1787 1788 exit: 1789 /** 1790 * @todo do we want to schedule a future kick? 1791 */ 1792 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1793 NsRxStateStrings[rxState]); 1794} 1795 1796void 1797NSGigE::transmit() 1798{ 1799 if (txFifo.empty()) { 1800 DPRINTF(Ethernet, "nothing to transmit\n"); 1801 return; 1802 } 1803 1804 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1805 txFifo.size()); 1806 if (interface->sendPacket(txFifo.front())) { 1807#if TRACING_ON 1808 if (DTRACE(Ethernet)) { 1809 IpPtr ip(txFifo.front()); 1810 if (ip) { 1811 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1812 TcpPtr tcp(ip); 1813 if (tcp) { 1814 DPRINTF(Ethernet, 1815 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1816 tcp->sport(), tcp->dport(), tcp->seq(), tcp->ack()); 1817 } 1818 } 1819 } 1820#endif 1821 1822 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1823 txBytes += txFifo.front()->length; 1824 txPackets++; 1825 1826 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1827 txFifo.avail()); 1828 txFifo.pop(); 1829 1830 /* 1831 * normally do a writeback of the descriptor here, and ONLY 1832 * after that is done, send this interrupt. but since our 1833 * stuff never actually fails, just do this interrupt here, 1834 * otherwise the code has to stray from this nice format. 1835 * besides, it's functionally the same. 1836 */ 1837 devIntrPost(ISR_TXOK); 1838 } 1839 1840 if (!txFifo.empty() && !txEvent.scheduled()) { 1841 DPRINTF(Ethernet, "reschedule transmit\n"); 1842 txEvent.schedule(curTick + retryTime); 1843 } 1844} 1845 1846void 1847NSGigE::txDmaReadCopy() 1848{ 1849 assert(txDmaState == dmaReading); 1850 1851 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen); 1852 txDmaState = dmaIdle; 1853 1854 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1855 txDmaAddr, txDmaLen); 1856 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1857} 1858 1859bool 1860NSGigE::doTxDmaRead() 1861{ 1862 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1863 txDmaState = dmaReading; 1864 1865 if (dmaInterface && !txDmaFree) { 1866 if (dmaInterface->busy()) 1867 txDmaState = dmaReadWaiting; 1868 else 1869 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick, 1870 &txDmaReadEvent, true); 1871 return true; 1872 } 1873 1874 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) { 1875 txDmaReadCopy(); 1876 return false; 1877 } 1878 1879 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1880 Tick start = curTick + dmaReadDelay + factor; 1881 txDmaReadEvent.schedule(start); 1882 return true; 1883} 1884 1885void 1886NSGigE::txDmaReadDone() 1887{ 1888 assert(txDmaState == dmaReading); 1889 txDmaReadCopy(); 1890 1891 // If the receive state machine has a pending DMA, let it go first 1892 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1893 rxKick(); 1894 1895 txKick(); 1896} 1897 1898void 1899NSGigE::txDmaWriteCopy() 1900{ 1901 assert(txDmaState == dmaWriting); 1902 1903 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen); 1904 txDmaState = dmaIdle; 1905 1906 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1907 txDmaAddr, txDmaLen); 1908 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1909} 1910 1911bool 1912NSGigE::doTxDmaWrite() 1913{ 1914 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1915 txDmaState = dmaWriting; 1916 1917 if (dmaInterface && !txDmaFree) { 1918 if (dmaInterface->busy()) 1919 txDmaState = dmaWriteWaiting; 1920 else 1921 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick, 1922 &txDmaWriteEvent, true); 1923 return true; 1924 } 1925 1926 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) { 1927 txDmaWriteCopy(); 1928 return false; 1929 } 1930 1931 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1932 Tick start = curTick + dmaWriteDelay + factor; 1933 txDmaWriteEvent.schedule(start); 1934 return true; 1935} 1936 1937void 1938NSGigE::txDmaWriteDone() 1939{ 1940 assert(txDmaState == dmaWriting); 1941 txDmaWriteCopy(); 1942 1943 // If the receive state machine has a pending DMA, let it go first 1944 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1945 rxKick(); 1946 1947 txKick(); 1948} 1949 1950void 1951NSGigE::txKick() 1952{ 1953 DPRINTF(EthernetSM, "transmit kick txState=%s\n", 1954 NsTxStateStrings[txState]); 1955 1956 if (txKickTick > curTick) { 1957 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1958 txKickTick); 1959 1960 return; 1961 } 1962 1963 next: 1964 switch(txDmaState) { 1965 case dmaReadWaiting: 1966 if (doTxDmaRead()) 1967 goto exit; 1968 break; 1969 case dmaWriteWaiting: 1970 if (doTxDmaWrite()) 1971 goto exit; 1972 break; 1973 default: 1974 break; 1975 } 1976 1977 switch (txState) { 1978 case txIdle: 1979 if (!txEnable) { 1980 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1981 goto exit; 1982 } 1983 1984 if (CTDD) { 1985 txState = txDescRefr; 1986 1987 txDmaAddr = regs.txdp & 0x3fffffff; 1988 txDmaData = &txDescCache + offsetof(ns_desc, link); 1989 txDmaLen = sizeof(txDescCache.link); 1990 txDmaFree = dmaDescFree; 1991 1992 descDmaReads++; 1993 descDmaRdBytes += txDmaLen; 1994 1995 if (doTxDmaRead()) 1996 goto exit; 1997 1998 } else { 1999 txState = txDescRead; 2000 2001 txDmaAddr = regs.txdp & 0x3fffffff; 2002 txDmaData = &txDescCache; 2003 txDmaLen = sizeof(ns_desc); 2004 txDmaFree = dmaDescFree; 2005 2006 descDmaReads++; 2007 descDmaRdBytes += txDmaLen; 2008 2009 if (doTxDmaRead()) 2010 goto exit; 2011 } 2012 break; 2013 2014 case txDescRefr: 2015 if (txDmaState != dmaIdle) 2016 goto exit; 2017 2018 txState = txAdvance; 2019 break; 2020 2021 case txDescRead: 2022 if (txDmaState != dmaIdle) 2023 goto exit; 2024 2025 DPRINTF(EthernetDesc, 2026 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 2027 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts, 2028 txDescCache.extsts); 2029 2030 if (txDescCache.cmdsts & CMDSTS_OWN) { 2031 txState = txFifoBlock; 2032 txFragPtr = txDescCache.bufptr; 2033 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK; 2034 } else { 2035 devIntrPost(ISR_TXIDLE); 2036 txState = txIdle; 2037 goto exit; 2038 } 2039 break; 2040 2041 case txFifoBlock: 2042 if (!txPacket) { 2043 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 2044 txPacket = new PacketData(16384); 2045 txPacketBufPtr = txPacket->data; 2046 } 2047 2048 if (txDescCnt == 0) { 2049 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 2050 if (txDescCache.cmdsts & CMDSTS_MORE) { 2051 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 2052 txState = txDescWrite; 2053 2054 txDescCache.cmdsts &= ~CMDSTS_OWN; 2055 2056 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2057 txDmaAddr &= 0x3fffffff; 2058 txDmaData = &(txDescCache.cmdsts); 2059 txDmaLen = sizeof(txDescCache.cmdsts); 2060 txDmaFree = dmaDescFree; 2061 2062 if (doTxDmaWrite()) 2063 goto exit; 2064 2065 } else { /* this packet is totally done */ 2066 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 2067 /* deal with the the packet that just finished */ 2068 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 2069 IpPtr ip(txPacket); 2070 if (txDescCache.extsts & EXTSTS_UDPPKT) { 2071 UdpPtr udp(ip); 2072 udp->sum(0); 2073 udp->sum(cksum(udp)); 2074 txUdpChecksums++; 2075 } else if (txDescCache.extsts & EXTSTS_TCPPKT) { 2076 TcpPtr tcp(ip); 2077 tcp->sum(0); 2078 tcp->sum(cksum(tcp)); 2079 txTcpChecksums++; 2080 } 2081 if (txDescCache.extsts & EXTSTS_IPPKT) { 2082 ip->sum(0); 2083 ip->sum(cksum(ip)); 2084 txIpChecksums++; 2085 } 2086 } 2087 2088 txPacket->length = txPacketBufPtr - txPacket->data; 2089 // this is just because the receive can't handle a 2090 // packet bigger want to make sure 2091 assert(txPacket->length <= 1514); 2092#ifndef NDEBUG 2093 bool success = 2094#endif 2095 txFifo.push(txPacket); 2096 assert(success); 2097 2098 /* 2099 * this following section is not tqo spec, but 2100 * functionally shouldn't be any different. normally, 2101 * the chip will wait til the transmit has occurred 2102 * before writing back the descriptor because it has 2103 * to wait to see that it was successfully transmitted 2104 * to decide whether to set CMDSTS_OK or not. 2105 * however, in the simulator since it is always 2106 * successfully transmitted, and writing it exactly to 2107 * spec would complicate the code, we just do it here 2108 */ 2109 2110 txDescCache.cmdsts &= ~CMDSTS_OWN; 2111 txDescCache.cmdsts |= CMDSTS_OK; 2112 2113 DPRINTF(EthernetDesc, 2114 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 2115 txDescCache.cmdsts, txDescCache.extsts); 2116 2117 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2118 txDmaAddr &= 0x3fffffff; 2119 txDmaData = &(txDescCache.cmdsts); 2120 txDmaLen = sizeof(txDescCache.cmdsts) + 2121 sizeof(txDescCache.extsts); 2122 txDmaFree = dmaDescFree; 2123 2124 descDmaWrites++; 2125 descDmaWrBytes += txDmaLen; 2126 2127 transmit(); 2128 txPacket = 0; 2129 2130 if (!txEnable) { 2131 DPRINTF(EthernetSM, "halting TX state machine\n"); 2132 txState = txIdle; 2133 goto exit; 2134 } else 2135 txState = txAdvance; 2136 2137 if (doTxDmaWrite()) 2138 goto exit; 2139 } 2140 } else { 2141 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 2142 if (!txFifo.full()) { 2143 txState = txFragRead; 2144 2145 /* 2146 * The number of bytes transferred is either whatever 2147 * is left in the descriptor (txDescCnt), or if there 2148 * is not enough room in the fifo, just whatever room 2149 * is left in the fifo 2150 */ 2151 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 2152 2153 txDmaAddr = txFragPtr & 0x3fffffff; 2154 txDmaData = txPacketBufPtr; 2155 txDmaLen = txXferLen; 2156 txDmaFree = dmaDataFree; 2157 2158 if (doTxDmaRead()) 2159 goto exit; 2160 } else { 2161 txState = txFifoBlock; 2162 transmit(); 2163 2164 goto exit; 2165 } 2166 2167 } 2168 break; 2169 2170 case txFragRead: 2171 if (txDmaState != dmaIdle) 2172 goto exit; 2173 2174 txPacketBufPtr += txXferLen; 2175 txFragPtr += txXferLen; 2176 txDescCnt -= txXferLen; 2177 txFifo.reserve(txXferLen); 2178 2179 txState = txFifoBlock; 2180 break; 2181 2182 case txDescWrite: 2183 if (txDmaState != dmaIdle) 2184 goto exit; 2185 2186 if (txDescCache.cmdsts & CMDSTS_INTR) 2187 devIntrPost(ISR_TXDESC); 2188 2189 txState = txAdvance; 2190 break; 2191 2192 case txAdvance: 2193 if (txDescCache.link == 0) { 2194 devIntrPost(ISR_TXIDLE); 2195 txState = txIdle; 2196 goto exit; 2197 } else { 2198 txState = txDescRead; 2199 regs.txdp = txDescCache.link; 2200 CTDD = false; 2201 2202 txDmaAddr = txDescCache.link & 0x3fffffff; 2203 txDmaData = &txDescCache; 2204 txDmaLen = sizeof(ns_desc); 2205 txDmaFree = dmaDescFree; 2206 2207 if (doTxDmaRead()) 2208 goto exit; 2209 } 2210 break; 2211 2212 default: 2213 panic("invalid state"); 2214 } 2215 2216 DPRINTF(EthernetSM, "entering next txState=%s\n", 2217 NsTxStateStrings[txState]); 2218 2219 goto next; 2220 2221 exit: 2222 /** 2223 * @todo do we want to schedule a future kick? 2224 */ 2225 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 2226 NsTxStateStrings[txState]); 2227} 2228 2229void 2230NSGigE::transferDone() 2231{ 2232 if (txFifo.empty()) { 2233 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2234 return; 2235 } 2236 2237 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2238 2239 if (txEvent.scheduled()) 2240 txEvent.reschedule(curTick + cycles(1)); 2241 else 2242 txEvent.schedule(curTick + cycles(1)); 2243} 2244 2245bool 2246NSGigE::rxFilter(const PacketPtr &packet) 2247{ 2248 EthPtr eth = packet; 2249 bool drop = true; 2250 string type; 2251 2252 const EthAddr &dst = eth->dst(); 2253 if (dst.unicast()) { 2254 // If we're accepting all unicast addresses 2255 if (acceptUnicast) 2256 drop = false; 2257 2258 // If we make a perfect match 2259 if (acceptPerfect && dst == rom.perfectMatch) 2260 drop = false; 2261 2262 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2263 drop = false; 2264 2265 } else if (dst.broadcast()) { 2266 // if we're accepting broadcasts 2267 if (acceptBroadcast) 2268 drop = false; 2269 2270 } else if (dst.multicast()) { 2271 // if we're accepting all multicasts 2272 if (acceptMulticast) 2273 drop = false; 2274 2275 } 2276 2277 if (drop) { 2278 DPRINTF(Ethernet, "rxFilter drop\n"); 2279 DDUMP(EthernetData, packet->data, packet->length); 2280 } 2281 2282 return drop; 2283} 2284 2285bool 2286NSGigE::recvPacket(PacketPtr packet) 2287{ 2288 rxBytes += packet->length; 2289 rxPackets++; 2290 2291 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2292 rxFifo.avail()); 2293 2294 if (!rxEnable) { 2295 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2296 debug_break(); 2297 interface->recvDone(); 2298 return true; 2299 } 2300 2301 if (rxFilterEnable && rxFilter(packet)) { 2302 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2303 interface->recvDone(); 2304 return true; 2305 } 2306 2307 if (rxFifo.avail() < packet->length) { 2308#if TRACING_ON 2309 IpPtr ip(packet); 2310 TcpPtr tcp(ip); 2311 if (ip) { 2312 DPRINTF(Ethernet, 2313 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2314 ip->id()); 2315 if (tcp) { 2316 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2317 } 2318 } 2319#endif 2320 droppedPackets++; 2321 devIntrPost(ISR_RXORN); 2322 return false; 2323 } 2324 2325 rxFifo.push(packet); 2326 interface->recvDone(); 2327 2328 rxKick(); 2329 return true; 2330} 2331 2332//===================================================================== 2333// 2334// 2335void 2336NSGigE::serialize(ostream &os) 2337{ 2338 // Serialize the PciDev base class 2339 PciDev::serialize(os); 2340 2341 /* 2342 * Finalize any DMA events now. 2343 */ 2344 if (rxDmaReadEvent.scheduled()) 2345 rxDmaReadCopy(); 2346 if (rxDmaWriteEvent.scheduled()) 2347 rxDmaWriteCopy(); 2348 if (txDmaReadEvent.scheduled()) 2349 txDmaReadCopy(); 2350 if (txDmaWriteEvent.scheduled()) 2351 txDmaWriteCopy(); 2352 2353 /* 2354 * Serialize the device registers 2355 */ 2356 SERIALIZE_SCALAR(regs.command); 2357 SERIALIZE_SCALAR(regs.config); 2358 SERIALIZE_SCALAR(regs.mear); 2359 SERIALIZE_SCALAR(regs.ptscr); 2360 SERIALIZE_SCALAR(regs.isr); 2361 SERIALIZE_SCALAR(regs.imr); 2362 SERIALIZE_SCALAR(regs.ier); 2363 SERIALIZE_SCALAR(regs.ihr); 2364 SERIALIZE_SCALAR(regs.txdp); 2365 SERIALIZE_SCALAR(regs.txdp_hi); 2366 SERIALIZE_SCALAR(regs.txcfg); 2367 SERIALIZE_SCALAR(regs.gpior); 2368 SERIALIZE_SCALAR(regs.rxdp); 2369 SERIALIZE_SCALAR(regs.rxdp_hi); 2370 SERIALIZE_SCALAR(regs.rxcfg); 2371 SERIALIZE_SCALAR(regs.pqcr); 2372 SERIALIZE_SCALAR(regs.wcsr); 2373 SERIALIZE_SCALAR(regs.pcr); 2374 SERIALIZE_SCALAR(regs.rfcr); 2375 SERIALIZE_SCALAR(regs.rfdr); 2376 SERIALIZE_SCALAR(regs.srr); 2377 SERIALIZE_SCALAR(regs.mibc); 2378 SERIALIZE_SCALAR(regs.vrcr); 2379 SERIALIZE_SCALAR(regs.vtcr); 2380 SERIALIZE_SCALAR(regs.vdr); 2381 SERIALIZE_SCALAR(regs.ccsr); 2382 SERIALIZE_SCALAR(regs.tbicr); 2383 SERIALIZE_SCALAR(regs.tbisr); 2384 SERIALIZE_SCALAR(regs.tanar); 2385 SERIALIZE_SCALAR(regs.tanlpar); 2386 SERIALIZE_SCALAR(regs.taner); 2387 SERIALIZE_SCALAR(regs.tesr); 2388 2389 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2390 2391 SERIALIZE_SCALAR(ioEnable); 2392 2393 /* 2394 * Serialize the data Fifos 2395 */ 2396 rxFifo.serialize("rxFifo", os); 2397 txFifo.serialize("txFifo", os); 2398 2399 /* 2400 * Serialize the various helper variables 2401 */ 2402 bool txPacketExists = txPacket; 2403 SERIALIZE_SCALAR(txPacketExists); 2404 if (txPacketExists) { 2405 txPacket->length = txPacketBufPtr - txPacket->data; 2406 txPacket->serialize("txPacket", os); 2407 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2408 SERIALIZE_SCALAR(txPktBufPtr); 2409 } 2410 2411 bool rxPacketExists = rxPacket; 2412 SERIALIZE_SCALAR(rxPacketExists); 2413 if (rxPacketExists) { 2414 rxPacket->serialize("rxPacket", os); 2415 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2416 SERIALIZE_SCALAR(rxPktBufPtr); 2417 } 2418 2419 SERIALIZE_SCALAR(txXferLen); 2420 SERIALIZE_SCALAR(rxXferLen); 2421 2422 /* 2423 * Serialize DescCaches 2424 */ 2425 SERIALIZE_SCALAR(txDescCache.link); 2426 SERIALIZE_SCALAR(txDescCache.bufptr); 2427 SERIALIZE_SCALAR(txDescCache.cmdsts); 2428 SERIALIZE_SCALAR(txDescCache.extsts); 2429 SERIALIZE_SCALAR(rxDescCache.link); 2430 SERIALIZE_SCALAR(rxDescCache.bufptr); 2431 SERIALIZE_SCALAR(rxDescCache.cmdsts); 2432 SERIALIZE_SCALAR(rxDescCache.extsts); 2433 2434 /* 2435 * Serialize tx state machine 2436 */ 2437 int txState = this->txState; 2438 SERIALIZE_SCALAR(txState); 2439 SERIALIZE_SCALAR(txEnable); 2440 SERIALIZE_SCALAR(CTDD); 2441 SERIALIZE_SCALAR(txFragPtr); 2442 SERIALIZE_SCALAR(txDescCnt); 2443 int txDmaState = this->txDmaState; 2444 SERIALIZE_SCALAR(txDmaState); 2445 2446 /* 2447 * Serialize rx state machine 2448 */ 2449 int rxState = this->rxState; 2450 SERIALIZE_SCALAR(rxState); 2451 SERIALIZE_SCALAR(rxEnable); 2452 SERIALIZE_SCALAR(CRDD); 2453 SERIALIZE_SCALAR(rxPktBytes); 2454 SERIALIZE_SCALAR(rxFragPtr); 2455 SERIALIZE_SCALAR(rxDescCnt); 2456 int rxDmaState = this->rxDmaState; 2457 SERIALIZE_SCALAR(rxDmaState); 2458 2459 SERIALIZE_SCALAR(extstsEnable); 2460 2461 /* 2462 * If there's a pending transmit, store the time so we can 2463 * reschedule it later 2464 */ 2465 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2466 SERIALIZE_SCALAR(transmitTick); 2467 2468 /* 2469 * receive address filter settings 2470 */ 2471 SERIALIZE_SCALAR(rxFilterEnable); 2472 SERIALIZE_SCALAR(acceptBroadcast); 2473 SERIALIZE_SCALAR(acceptMulticast); 2474 SERIALIZE_SCALAR(acceptUnicast); 2475 SERIALIZE_SCALAR(acceptPerfect); 2476 SERIALIZE_SCALAR(acceptArp); 2477 2478 /* 2479 * Keep track of pending interrupt status. 2480 */ 2481 SERIALIZE_SCALAR(intrTick); 2482 SERIALIZE_SCALAR(cpuPendingIntr); 2483 Tick intrEventTick = 0; 2484 if (intrEvent) 2485 intrEventTick = intrEvent->when(); 2486 SERIALIZE_SCALAR(intrEventTick); 2487 2488} 2489 2490void 2491NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2492{ 2493 // Unserialize the PciDev base class 2494 PciDev::unserialize(cp, section); 2495 2496 UNSERIALIZE_SCALAR(regs.command); 2497 UNSERIALIZE_SCALAR(regs.config); 2498 UNSERIALIZE_SCALAR(regs.mear); 2499 UNSERIALIZE_SCALAR(regs.ptscr); 2500 UNSERIALIZE_SCALAR(regs.isr); 2501 UNSERIALIZE_SCALAR(regs.imr); 2502 UNSERIALIZE_SCALAR(regs.ier); 2503 UNSERIALIZE_SCALAR(regs.ihr); 2504 UNSERIALIZE_SCALAR(regs.txdp); 2505 UNSERIALIZE_SCALAR(regs.txdp_hi); 2506 UNSERIALIZE_SCALAR(regs.txcfg); 2507 UNSERIALIZE_SCALAR(regs.gpior); 2508 UNSERIALIZE_SCALAR(regs.rxdp); 2509 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2510 UNSERIALIZE_SCALAR(regs.rxcfg); 2511 UNSERIALIZE_SCALAR(regs.pqcr); 2512 UNSERIALIZE_SCALAR(regs.wcsr); 2513 UNSERIALIZE_SCALAR(regs.pcr); 2514 UNSERIALIZE_SCALAR(regs.rfcr); 2515 UNSERIALIZE_SCALAR(regs.rfdr); 2516 UNSERIALIZE_SCALAR(regs.srr); 2517 UNSERIALIZE_SCALAR(regs.mibc); 2518 UNSERIALIZE_SCALAR(regs.vrcr); 2519 UNSERIALIZE_SCALAR(regs.vtcr); 2520 UNSERIALIZE_SCALAR(regs.vdr); 2521 UNSERIALIZE_SCALAR(regs.ccsr); 2522 UNSERIALIZE_SCALAR(regs.tbicr); 2523 UNSERIALIZE_SCALAR(regs.tbisr); 2524 UNSERIALIZE_SCALAR(regs.tanar); 2525 UNSERIALIZE_SCALAR(regs.tanlpar); 2526 UNSERIALIZE_SCALAR(regs.taner); 2527 UNSERIALIZE_SCALAR(regs.tesr); 2528 2529 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2530 2531 UNSERIALIZE_SCALAR(ioEnable); 2532 2533 /* 2534 * unserialize the data fifos 2535 */ 2536 rxFifo.unserialize("rxFifo", cp, section); 2537 txFifo.unserialize("txFifo", cp, section); 2538 2539 /* 2540 * unserialize the various helper variables 2541 */ 2542 bool txPacketExists; 2543 UNSERIALIZE_SCALAR(txPacketExists); 2544 if (txPacketExists) { 2545 txPacket = new PacketData(16384); 2546 txPacket->unserialize("txPacket", cp, section); 2547 uint32_t txPktBufPtr; 2548 UNSERIALIZE_SCALAR(txPktBufPtr); 2549 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2550 } else 2551 txPacket = 0; 2552 2553 bool rxPacketExists; 2554 UNSERIALIZE_SCALAR(rxPacketExists); 2555 rxPacket = 0; 2556 if (rxPacketExists) { 2557 rxPacket = new PacketData(16384); 2558 rxPacket->unserialize("rxPacket", cp, section); 2559 uint32_t rxPktBufPtr; 2560 UNSERIALIZE_SCALAR(rxPktBufPtr); 2561 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2562 } else 2563 rxPacket = 0; 2564 2565 UNSERIALIZE_SCALAR(txXferLen); 2566 UNSERIALIZE_SCALAR(rxXferLen); 2567 2568 /* 2569 * Unserialize DescCaches 2570 */ 2571 UNSERIALIZE_SCALAR(txDescCache.link); 2572 UNSERIALIZE_SCALAR(txDescCache.bufptr); 2573 UNSERIALIZE_SCALAR(txDescCache.cmdsts); 2574 UNSERIALIZE_SCALAR(txDescCache.extsts); 2575 UNSERIALIZE_SCALAR(rxDescCache.link); 2576 UNSERIALIZE_SCALAR(rxDescCache.bufptr); 2577 UNSERIALIZE_SCALAR(rxDescCache.cmdsts); 2578 UNSERIALIZE_SCALAR(rxDescCache.extsts); 2579 2580 /* 2581 * unserialize tx state machine 2582 */ 2583 int txState; 2584 UNSERIALIZE_SCALAR(txState); 2585 this->txState = (TxState) txState; 2586 UNSERIALIZE_SCALAR(txEnable); 2587 UNSERIALIZE_SCALAR(CTDD); 2588 UNSERIALIZE_SCALAR(txFragPtr); 2589 UNSERIALIZE_SCALAR(txDescCnt); 2590 int txDmaState; 2591 UNSERIALIZE_SCALAR(txDmaState); 2592 this->txDmaState = (DmaState) txDmaState; 2593 2594 /* 2595 * unserialize rx state machine 2596 */ 2597 int rxState; 2598 UNSERIALIZE_SCALAR(rxState); 2599 this->rxState = (RxState) rxState; 2600 UNSERIALIZE_SCALAR(rxEnable); 2601 UNSERIALIZE_SCALAR(CRDD); 2602 UNSERIALIZE_SCALAR(rxPktBytes); 2603 UNSERIALIZE_SCALAR(rxFragPtr); 2604 UNSERIALIZE_SCALAR(rxDescCnt); 2605 int rxDmaState; 2606 UNSERIALIZE_SCALAR(rxDmaState); 2607 this->rxDmaState = (DmaState) rxDmaState; 2608 2609 UNSERIALIZE_SCALAR(extstsEnable); 2610 2611 /* 2612 * If there's a pending transmit, reschedule it now 2613 */ 2614 Tick transmitTick; 2615 UNSERIALIZE_SCALAR(transmitTick); 2616 if (transmitTick) 2617 txEvent.schedule(curTick + transmitTick); 2618 2619 /* 2620 * unserialize receive address filter settings 2621 */ 2622 UNSERIALIZE_SCALAR(rxFilterEnable); 2623 UNSERIALIZE_SCALAR(acceptBroadcast); 2624 UNSERIALIZE_SCALAR(acceptMulticast); 2625 UNSERIALIZE_SCALAR(acceptUnicast); 2626 UNSERIALIZE_SCALAR(acceptPerfect); 2627 UNSERIALIZE_SCALAR(acceptArp); 2628 2629 /* 2630 * Keep track of pending interrupt status. 2631 */ 2632 UNSERIALIZE_SCALAR(intrTick); 2633 UNSERIALIZE_SCALAR(cpuPendingIntr); 2634 Tick intrEventTick; 2635 UNSERIALIZE_SCALAR(intrEventTick); 2636 if (intrEventTick) { 2637 intrEvent = new IntrEvent(this, true); 2638 intrEvent->schedule(intrEventTick); 2639 } 2640 2641 /* 2642 * re-add addrRanges to bus bridges 2643 */ 2644 if (pioInterface) { 2645 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 2646 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 2647 } 2648} 2649 2650Tick 2651NSGigE::cacheAccess(MemReqPtr &req) 2652{ 2653 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n", 2654 req->paddr, req->paddr - addr); 2655 return curTick + pioLatency; 2656} 2657 2658BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2659 2660 SimObjectParam<EtherInt *> peer; 2661 SimObjectParam<NSGigE *> device; 2662 2663END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2664 2665BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2666 2667 INIT_PARAM_DFLT(peer, "peer interface", NULL), 2668 INIT_PARAM(device, "Ethernet device of this interface") 2669 2670END_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2671 2672CREATE_SIM_OBJECT(NSGigEInt) 2673{ 2674 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device); 2675 2676 EtherInt *p = (EtherInt *)peer; 2677 if (p) { 2678 dev_int->setPeer(p); 2679 p->setPeer(dev_int); 2680 } 2681 2682 return dev_int; 2683} 2684 2685REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt) 2686 2687 2688BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2689 2690 Param<Addr> addr; 2691 Param<Tick> clock; 2692 Param<Tick> tx_delay; 2693 Param<Tick> rx_delay; 2694 Param<Tick> intr_delay; 2695 SimObjectParam<MemoryController *> mmu; 2696 SimObjectParam<PhysicalMemory *> physmem; 2697 Param<bool> rx_filter; 2698 Param<string> hardware_address; 2699 SimObjectParam<Bus*> io_bus; 2700 SimObjectParam<Bus*> payload_bus; 2701 SimObjectParam<HierParams *> hier; 2702 Param<Tick> pio_latency; 2703 Param<bool> dma_desc_free; 2704 Param<bool> dma_data_free; 2705 Param<Tick> dma_read_delay; 2706 Param<Tick> dma_write_delay; 2707 Param<Tick> dma_read_factor; 2708 Param<Tick> dma_write_factor; 2709 SimObjectParam<PciConfigAll *> configspace; 2710 SimObjectParam<PciConfigData *> configdata; 2711 SimObjectParam<Platform *> platform; 2712 Param<uint32_t> pci_bus; 2713 Param<uint32_t> pci_dev; 2714 Param<uint32_t> pci_func; 2715 Param<uint32_t> tx_fifo_size; 2716 Param<uint32_t> rx_fifo_size; 2717 Param<uint32_t> m5reg; 2718 Param<bool> dma_no_allocate; 2719 2720END_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2721 2722BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE) 2723 2724 INIT_PARAM(addr, "Device Address"), 2725 INIT_PARAM(clock, "State machine processor frequency"), 2726 INIT_PARAM(tx_delay, "Transmit Delay"), 2727 INIT_PARAM(rx_delay, "Receive Delay"), 2728 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"), 2729 INIT_PARAM(mmu, "Memory Controller"), 2730 INIT_PARAM(physmem, "Physical Memory"), 2731 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true), 2732 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address", 2733 "00:99:00:00:00:01"), 2734 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL), 2735 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL), 2736 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams), 2737 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 2738 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false), 2739 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false), 2740 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0), 2741 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0), 2742 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0), 2743 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0), 2744 INIT_PARAM(configspace, "PCI Configspace"), 2745 INIT_PARAM(configdata, "PCI Config data"), 2746 INIT_PARAM(platform, "Platform"), 2747 INIT_PARAM(pci_bus, "PCI bus"), 2748 INIT_PARAM(pci_dev, "PCI device number"), 2749 INIT_PARAM(pci_func, "PCI function code"), 2750 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072), 2751 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072), 2752 INIT_PARAM(m5reg, "m5 register"), 2753 INIT_PARAM_DFLT(dma_no_allocate, "Should DMA reads allocate cache lines", true) 2754 2755END_INIT_SIM_OBJECT_PARAMS(NSGigE) 2756 2757 2758CREATE_SIM_OBJECT(NSGigE) 2759{ 2760 NSGigE::Params *params = new NSGigE::Params; 2761 2762 params->name = getInstanceName(); 2763 params->mmu = mmu; 2764 params->configSpace = configspace; 2765 params->configData = configdata; 2766 params->plat = platform; 2767 params->busNum = pci_bus; 2768 params->deviceNum = pci_dev; 2769 params->functionNum = pci_func; 2770 2771 params->clock = clock; 2772 params->intr_delay = intr_delay; 2773 params->pmem = physmem; 2774 params->tx_delay = tx_delay; 2775 params->rx_delay = rx_delay; 2776 params->hier = hier; 2777 params->header_bus = io_bus; 2778 params->payload_bus = payload_bus; 2779 params->pio_latency = pio_latency; 2780 params->dma_desc_free = dma_desc_free; 2781 params->dma_data_free = dma_data_free; 2782 params->dma_read_delay = dma_read_delay; 2783 params->dma_write_delay = dma_write_delay; 2784 params->dma_read_factor = dma_read_factor; 2785 params->dma_write_factor = dma_write_factor; 2786 params->rx_filter = rx_filter; 2787 params->eaddr = hardware_address; 2788 params->tx_fifo_size = tx_fifo_size; 2789 params->rx_fifo_size = rx_fifo_size; 2790 params->m5reg = m5reg; 2791 params->dma_no_allocate = dma_no_allocate; 2792 return new NSGigE(params); 2793} 2794 2795REGISTER_SIM_OBJECT("NSGigE", NSGigE) 2796