ns_gige.cc revision 1154
1/* 2 * Copyright (c) 2004 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/* @file 30 * Device module for modelling the National Semiconductor 31 * DP83820 ethernet controller. Does not support priority queueing 32 */ 33#include <cstdio> 34#include <deque> 35#include <string> 36 37#include "base/inet.hh" 38#include "cpu/exec_context.hh" 39#include "cpu/intr_control.hh" 40#include "dev/dma.hh" 41#include "dev/etherlink.hh" 42#include "dev/ns_gige.hh" 43#include "dev/pciconfigall.hh" 44#include "mem/bus/bus.hh" 45#include "mem/bus/dma_interface.hh" 46#include "mem/bus/pio_interface.hh" 47#include "mem/bus/pio_interface_impl.hh" 48#include "mem/functional_mem/memory_control.hh" 49#include "mem/functional_mem/physical_memory.hh" 50#include "sim/builder.hh" 51#include "sim/debug.hh" 52#include "sim/host.hh" 53#include "sim/sim_stats.hh" 54#include "targetarch/vtophys.hh" 55 56const char *NsRxStateStrings[] = 57{ 58 "rxIdle", 59 "rxDescRefr", 60 "rxDescRead", 61 "rxFifoBlock", 62 "rxFragWrite", 63 "rxDescWrite", 64 "rxAdvance" 65}; 66 67const char *NsTxStateStrings[] = 68{ 69 "txIdle", 70 "txDescRefr", 71 "txDescRead", 72 "txFifoBlock", 73 "txFragRead", 74 "txDescWrite", 75 "txAdvance" 76}; 77 78const char *NsDmaState[] = 79{ 80 "dmaIdle", 81 "dmaReading", 82 "dmaWriting", 83 "dmaReadWaiting", 84 "dmaWriteWaiting" 85}; 86 87using namespace std; 88using namespace Net; 89 90/////////////////////////////////////////////////////////////////////// 91// 92// NSGigE PCI Device 93// 94NSGigE::NSGigE(Params *p) 95 : PciDev(p), ioEnable(false), 96 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 97 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 98 txXferLen(0), rxXferLen(0), txState(txIdle), txEnable(false), 99 CTDD(false), 100 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 101 rxEnable(false), CRDD(false), rxPktBytes(0), 102 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 103 rxDmaReadEvent(this), rxDmaWriteEvent(this), 104 txDmaReadEvent(this), txDmaWriteEvent(this), 105 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 106 txDelay(p->tx_delay), rxDelay(p->rx_delay), 107 rxKickTick(0), txKickTick(0), 108 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false), 109 acceptMulticast(false), acceptUnicast(false), 110 acceptPerfect(false), acceptArp(false), 111 physmem(p->pmem), intrTick(0), cpuPendingIntr(false), 112 intrEvent(0), interface(0) 113{ 114 if (p->header_bus) { 115 pioInterface = newPioInterface(name(), p->hier, 116 p->header_bus, this, 117 &NSGigE::cacheAccess); 118 119 pioLatency = p->pio_latency * p->header_bus->clockRatio; 120 121 if (p->payload_bus) 122 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 123 p->header_bus, 124 p->payload_bus, 1); 125 else 126 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 127 p->header_bus, 128 p->header_bus, 1); 129 } else if (p->payload_bus) { 130 pioInterface = newPioInterface(name(), p->hier, 131 p->payload_bus, this, 132 &NSGigE::cacheAccess); 133 134 pioLatency = p->pio_latency * p->payload_bus->clockRatio; 135 136 dmaInterface = new DMAInterface<Bus>(name() + ".dma", 137 p->payload_bus, 138 p->payload_bus, 1); 139 } 140 141 142 intrDelay = US2Ticks(p->intr_delay); 143 dmaReadDelay = p->dma_read_delay; 144 dmaWriteDelay = p->dma_write_delay; 145 dmaReadFactor = p->dma_read_factor; 146 dmaWriteFactor = p->dma_write_factor; 147 148 regsReset(); 149 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN); 150} 151 152NSGigE::~NSGigE() 153{} 154 155void 156NSGigE::regStats() 157{ 158 txBytes 159 .name(name() + ".txBytes") 160 .desc("Bytes Transmitted") 161 .prereq(txBytes) 162 ; 163 164 rxBytes 165 .name(name() + ".rxBytes") 166 .desc("Bytes Received") 167 .prereq(rxBytes) 168 ; 169 170 txPackets 171 .name(name() + ".txPackets") 172 .desc("Number of Packets Transmitted") 173 .prereq(txBytes) 174 ; 175 176 rxPackets 177 .name(name() + ".rxPackets") 178 .desc("Number of Packets Received") 179 .prereq(rxBytes) 180 ; 181 182 txIpChecksums 183 .name(name() + ".txIpChecksums") 184 .desc("Number of tx IP Checksums done by device") 185 .precision(0) 186 .prereq(txBytes) 187 ; 188 189 rxIpChecksums 190 .name(name() + ".rxIpChecksums") 191 .desc("Number of rx IP Checksums done by device") 192 .precision(0) 193 .prereq(rxBytes) 194 ; 195 196 txTcpChecksums 197 .name(name() + ".txTcpChecksums") 198 .desc("Number of tx TCP Checksums done by device") 199 .precision(0) 200 .prereq(txBytes) 201 ; 202 203 rxTcpChecksums 204 .name(name() + ".rxTcpChecksums") 205 .desc("Number of rx TCP Checksums done by device") 206 .precision(0) 207 .prereq(rxBytes) 208 ; 209 210 txUdpChecksums 211 .name(name() + ".txUdpChecksums") 212 .desc("Number of tx UDP Checksums done by device") 213 .precision(0) 214 .prereq(txBytes) 215 ; 216 217 rxUdpChecksums 218 .name(name() + ".rxUdpChecksums") 219 .desc("Number of rx UDP Checksums done by device") 220 .precision(0) 221 .prereq(rxBytes) 222 ; 223 224 descDmaReads 225 .name(name() + ".descDMAReads") 226 .desc("Number of descriptors the device read w/ DMA") 227 .precision(0) 228 ; 229 230 descDmaWrites 231 .name(name() + ".descDMAWrites") 232 .desc("Number of descriptors the device wrote w/ DMA") 233 .precision(0) 234 ; 235 236 descDmaRdBytes 237 .name(name() + ".descDmaReadBytes") 238 .desc("number of descriptor bytes read w/ DMA") 239 .precision(0) 240 ; 241 242 descDmaWrBytes 243 .name(name() + ".descDmaWriteBytes") 244 .desc("number of descriptor bytes write w/ DMA") 245 .precision(0) 246 ; 247 248 249 txBandwidth 250 .name(name() + ".txBandwidth") 251 .desc("Transmit Bandwidth (bits/s)") 252 .precision(0) 253 .prereq(txBytes) 254 ; 255 256 rxBandwidth 257 .name(name() + ".rxBandwidth") 258 .desc("Receive Bandwidth (bits/s)") 259 .precision(0) 260 .prereq(rxBytes) 261 ; 262 263 txPacketRate 264 .name(name() + ".txPPS") 265 .desc("Packet Tranmission Rate (packets/s)") 266 .precision(0) 267 .prereq(txBytes) 268 ; 269 270 rxPacketRate 271 .name(name() + ".rxPPS") 272 .desc("Packet Reception Rate (packets/s)") 273 .precision(0) 274 .prereq(rxBytes) 275 ; 276 277 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 278 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 279 txPacketRate = txPackets / simSeconds; 280 rxPacketRate = rxPackets / simSeconds; 281} 282 283/** 284 * This is to read the PCI general configuration registers 285 */ 286void 287NSGigE::ReadConfig(int offset, int size, uint8_t *data) 288{ 289 if (offset < PCI_DEVICE_SPECIFIC) 290 PciDev::ReadConfig(offset, size, data); 291 else 292 panic("Device specific PCI config space not implemented!\n"); 293} 294 295/** 296 * This is to write to the PCI general configuration registers 297 */ 298void 299NSGigE::WriteConfig(int offset, int size, uint32_t data) 300{ 301 if (offset < PCI_DEVICE_SPECIFIC) 302 PciDev::WriteConfig(offset, size, data); 303 else 304 panic("Device specific PCI config space not implemented!\n"); 305 306 // Need to catch writes to BARs to update the PIO interface 307 switch (offset) { 308 // seems to work fine without all these PCI settings, but i 309 // put in the IO to double check, an assertion will fail if we 310 // need to properly implement it 311 case PCI_COMMAND: 312 if (config.data[offset] & PCI_CMD_IOSE) 313 ioEnable = true; 314 else 315 ioEnable = false; 316 317#if 0 318 if (config.data[offset] & PCI_CMD_BME) { 319 bmEnabled = true; 320 } 321 else { 322 bmEnabled = false; 323 } 324 325 if (config.data[offset] & PCI_CMD_MSE) { 326 memEnable = true; 327 } 328 else { 329 memEnable = false; 330 } 331#endif 332 break; 333 334 case PCI0_BASE_ADDR0: 335 if (BARAddrs[0] != 0) { 336 if (pioInterface) 337 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 338 339 BARAddrs[0] &= EV5::PAddrUncachedMask; 340 } 341 break; 342 case PCI0_BASE_ADDR1: 343 if (BARAddrs[1] != 0) { 344 if (pioInterface) 345 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 346 347 BARAddrs[1] &= EV5::PAddrUncachedMask; 348 } 349 break; 350 } 351} 352 353/** 354 * This reads the device registers, which are detailed in the NS83820 355 * spec sheet 356 */ 357Fault 358NSGigE::read(MemReqPtr &req, uint8_t *data) 359{ 360 assert(ioEnable); 361 362 //The mask is to give you only the offset into the device register file 363 Addr daddr = req->paddr & 0xfff; 364 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n", 365 daddr, req->paddr, req->vaddr, req->size); 366 367 368 // there are some reserved registers, you can see ns_gige_reg.h and 369 // the spec sheet for details 370 if (daddr > LAST && daddr <= RESERVED) { 371 panic("Accessing reserved register"); 372 } else if (daddr > RESERVED && daddr <= 0x3FC) { 373 ReadConfig(daddr & 0xff, req->size, data); 374 return No_Fault; 375 } else if (daddr >= MIB_START && daddr <= MIB_END) { 376 // don't implement all the MIB's. hopefully the kernel 377 // doesn't actually DEPEND upon their values 378 // MIB are just hardware stats keepers 379 uint32_t ® = *(uint32_t *) data; 380 reg = 0; 381 return No_Fault; 382 } else if (daddr > 0x3FC) 383 panic("Something is messed up!\n"); 384 385 switch (req->size) { 386 case sizeof(uint32_t): 387 { 388 uint32_t ® = *(uint32_t *)data; 389 390 switch (daddr) { 391 case CR: 392 reg = regs.command; 393 //these are supposed to be cleared on a read 394 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 395 break; 396 397 case CFG: 398 reg = regs.config; 399 break; 400 401 case MEAR: 402 reg = regs.mear; 403 break; 404 405 case PTSCR: 406 reg = regs.ptscr; 407 break; 408 409 case ISR: 410 reg = regs.isr; 411 devIntrClear(ISR_ALL); 412 break; 413 414 case IMR: 415 reg = regs.imr; 416 break; 417 418 case IER: 419 reg = regs.ier; 420 break; 421 422 case IHR: 423 reg = regs.ihr; 424 break; 425 426 case TXDP: 427 reg = regs.txdp; 428 break; 429 430 case TXDP_HI: 431 reg = regs.txdp_hi; 432 break; 433 434 case TXCFG: 435 reg = regs.txcfg; 436 break; 437 438 case GPIOR: 439 reg = regs.gpior; 440 break; 441 442 case RXDP: 443 reg = regs.rxdp; 444 break; 445 446 case RXDP_HI: 447 reg = regs.rxdp_hi; 448 break; 449 450 case RXCFG: 451 reg = regs.rxcfg; 452 break; 453 454 case PQCR: 455 reg = regs.pqcr; 456 break; 457 458 case WCSR: 459 reg = regs.wcsr; 460 break; 461 462 case PCR: 463 reg = regs.pcr; 464 break; 465 466 // see the spec sheet for how RFCR and RFDR work 467 // basically, you write to RFCR to tell the machine 468 // what you want to do next, then you act upon RFDR, 469 // and the device will be prepared b/c of what you 470 // wrote to RFCR 471 case RFCR: 472 reg = regs.rfcr; 473 break; 474 475 case RFDR: 476 switch (regs.rfcr & RFCR_RFADDR) { 477 case 0x000: 478 reg = rom.perfectMatch[1]; 479 reg = reg << 8; 480 reg += rom.perfectMatch[0]; 481 break; 482 case 0x002: 483 reg = rom.perfectMatch[3] << 8; 484 reg += rom.perfectMatch[2]; 485 break; 486 case 0x004: 487 reg = rom.perfectMatch[5] << 8; 488 reg += rom.perfectMatch[4]; 489 break; 490 default: 491 panic("reading RFDR for something other than PMATCH!\n"); 492 // didn't implement other RFDR functionality b/c 493 // driver didn't use it 494 } 495 break; 496 497 case SRR: 498 reg = regs.srr; 499 break; 500 501 case MIBC: 502 reg = regs.mibc; 503 reg &= ~(MIBC_MIBS | MIBC_ACLR); 504 break; 505 506 case VRCR: 507 reg = regs.vrcr; 508 break; 509 510 case VTCR: 511 reg = regs.vtcr; 512 break; 513 514 case VDR: 515 reg = regs.vdr; 516 break; 517 518 case CCSR: 519 reg = regs.ccsr; 520 break; 521 522 case TBICR: 523 reg = regs.tbicr; 524 break; 525 526 case TBISR: 527 reg = regs.tbisr; 528 break; 529 530 case TANAR: 531 reg = regs.tanar; 532 break; 533 534 case TANLPAR: 535 reg = regs.tanlpar; 536 break; 537 538 case TANER: 539 reg = regs.taner; 540 break; 541 542 case TESR: 543 reg = regs.tesr; 544 break; 545 546 default: 547 panic("reading unimplemented register: addr=%#x", daddr); 548 } 549 550 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 551 daddr, reg, reg); 552 } 553 break; 554 555 default: 556 panic("accessing register with invalid size: addr=%#x, size=%d", 557 daddr, req->size); 558 } 559 560 return No_Fault; 561} 562 563Fault 564NSGigE::write(MemReqPtr &req, const uint8_t *data) 565{ 566 assert(ioEnable); 567 568 Addr daddr = req->paddr & 0xfff; 569 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n", 570 daddr, req->paddr, req->vaddr, req->size); 571 572 if (daddr > LAST && daddr <= RESERVED) { 573 panic("Accessing reserved register"); 574 } else if (daddr > RESERVED && daddr <= 0x3FC) { 575 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data); 576 return No_Fault; 577 } else if (daddr > 0x3FC) 578 panic("Something is messed up!\n"); 579 580 if (req->size == sizeof(uint32_t)) { 581 uint32_t reg = *(uint32_t *)data; 582 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 583 584 switch (daddr) { 585 case CR: 586 regs.command = reg; 587 if (reg & CR_TXD) { 588 txEnable = false; 589 } else if (reg & CR_TXE) { 590 txEnable = true; 591 592 // the kernel is enabling the transmit machine 593 if (txState == txIdle) 594 txKick(); 595 } 596 597 if (reg & CR_RXD) { 598 rxEnable = false; 599 } else if (reg & CR_RXE) { 600 rxEnable = true; 601 602 if (rxState == rxIdle) 603 rxKick(); 604 } 605 606 if (reg & CR_TXR) 607 txReset(); 608 609 if (reg & CR_RXR) 610 rxReset(); 611 612 if (reg & CR_SWI) 613 devIntrPost(ISR_SWI); 614 615 if (reg & CR_RST) { 616 txReset(); 617 rxReset(); 618 619 regsReset(); 620 } 621 break; 622 623 case CFG: 624 if (reg & CFG_LNKSTS || 625 reg & CFG_SPDSTS || 626 reg & CFG_DUPSTS || 627 reg & CFG_RESERVED || 628 reg & CFG_T64ADDR || 629 reg & CFG_PCI64_DET) 630 panic("writing to read-only or reserved CFG bits!\n"); 631 632 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | 633 CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET); 634 635// all these #if 0's are because i don't THINK the kernel needs to 636// have these implemented. if there is a problem relating to one of 637// these, you may need to add functionality in. 638#if 0 639 if (reg & CFG_TBI_EN) ; 640 if (reg & CFG_MODE_1000) ; 641#endif 642 643 if (reg & CFG_AUTO_1000) 644 panic("CFG_AUTO_1000 not implemented!\n"); 645 646#if 0 647 if (reg & CFG_PINT_DUPSTS || 648 reg & CFG_PINT_LNKSTS || 649 reg & CFG_PINT_SPDSTS) 650 ; 651 652 if (reg & CFG_TMRTEST) ; 653 if (reg & CFG_MRM_DIS) ; 654 if (reg & CFG_MWI_DIS) ; 655 656 if (reg & CFG_T64ADDR) 657 panic("CFG_T64ADDR is read only register!\n"); 658 659 if (reg & CFG_PCI64_DET) 660 panic("CFG_PCI64_DET is read only register!\n"); 661 662 if (reg & CFG_DATA64_EN) ; 663 if (reg & CFG_M64ADDR) ; 664 if (reg & CFG_PHY_RST) ; 665 if (reg & CFG_PHY_DIS) ; 666#endif 667 668 if (reg & CFG_EXTSTS_EN) 669 extstsEnable = true; 670 else 671 extstsEnable = false; 672 673#if 0 674 if (reg & CFG_REQALG) ; 675 if (reg & CFG_SB) ; 676 if (reg & CFG_POW) ; 677 if (reg & CFG_EXD) ; 678 if (reg & CFG_PESEL) ; 679 if (reg & CFG_BROM_DIS) ; 680 if (reg & CFG_EXT_125) ; 681 if (reg & CFG_BEM) ; 682#endif 683 break; 684 685 case MEAR: 686 regs.mear = reg; 687 // since phy is completely faked, MEAR_MD* don't matter 688 // and since the driver never uses MEAR_EE*, they don't 689 // matter 690#if 0 691 if (reg & MEAR_EEDI) ; 692 if (reg & MEAR_EEDO) ; // this one is read only 693 if (reg & MEAR_EECLK) ; 694 if (reg & MEAR_EESEL) ; 695 if (reg & MEAR_MDIO) ; 696 if (reg & MEAR_MDDIR) ; 697 if (reg & MEAR_MDC) ; 698#endif 699 break; 700 701 case PTSCR: 702 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 703 // these control BISTs for various parts of chip - we 704 // don't care or do just fake that the BIST is done 705 if (reg & PTSCR_RBIST_EN) 706 regs.ptscr |= PTSCR_RBIST_DONE; 707 if (reg & PTSCR_EEBIST_EN) 708 regs.ptscr &= ~PTSCR_EEBIST_EN; 709 if (reg & PTSCR_EELOAD_EN) 710 regs.ptscr &= ~PTSCR_EELOAD_EN; 711 break; 712 713 case ISR: /* writing to the ISR has no effect */ 714 panic("ISR is a read only register!\n"); 715 716 case IMR: 717 regs.imr = reg; 718 devIntrChangeMask(); 719 break; 720 721 case IER: 722 regs.ier = reg; 723 break; 724 725 case IHR: 726 regs.ihr = reg; 727 /* not going to implement real interrupt holdoff */ 728 break; 729 730 case TXDP: 731 regs.txdp = (reg & 0xFFFFFFFC); 732 assert(txState == txIdle); 733 CTDD = false; 734 break; 735 736 case TXDP_HI: 737 regs.txdp_hi = reg; 738 break; 739 740 case TXCFG: 741 regs.txcfg = reg; 742#if 0 743 if (reg & TXCFG_CSI) ; 744 if (reg & TXCFG_HBI) ; 745 if (reg & TXCFG_MLB) ; 746 if (reg & TXCFG_ATP) ; 747 if (reg & TXCFG_ECRETRY) { 748 /* 749 * this could easily be implemented, but considering 750 * the network is just a fake pipe, wouldn't make 751 * sense to do this 752 */ 753 } 754 755 if (reg & TXCFG_BRST_DIS) ; 756#endif 757 758#if 0 759 /* we handle our own DMA, ignore the kernel's exhortations */ 760 if (reg & TXCFG_MXDMA) ; 761#endif 762 763 // also, we currently don't care about fill/drain 764 // thresholds though this may change in the future with 765 // more realistic networks or a driver which changes it 766 // according to feedback 767 768 break; 769 770 case GPIOR: 771 regs.gpior = reg; 772 /* these just control general purpose i/o pins, don't matter */ 773 break; 774 775 case RXDP: 776 regs.rxdp = reg; 777 CRDD = false; 778 break; 779 780 case RXDP_HI: 781 regs.rxdp_hi = reg; 782 break; 783 784 case RXCFG: 785 regs.rxcfg = reg; 786#if 0 787 if (reg & RXCFG_AEP) ; 788 if (reg & RXCFG_ARP) ; 789 if (reg & RXCFG_STRIPCRC) ; 790 if (reg & RXCFG_RX_RD) ; 791 if (reg & RXCFG_ALP) ; 792 if (reg & RXCFG_AIRL) ; 793 794 /* we handle our own DMA, ignore what kernel says about it */ 795 if (reg & RXCFG_MXDMA) ; 796 797 //also, we currently don't care about fill/drain thresholds 798 //though this may change in the future with more realistic 799 //networks or a driver which changes it according to feedback 800 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ; 801#endif 802 break; 803 804 case PQCR: 805 /* there is no priority queueing used in the linux 2.6 driver */ 806 regs.pqcr = reg; 807 break; 808 809 case WCSR: 810 /* not going to implement wake on LAN */ 811 regs.wcsr = reg; 812 break; 813 814 case PCR: 815 /* not going to implement pause control */ 816 regs.pcr = reg; 817 break; 818 819 case RFCR: 820 regs.rfcr = reg; 821 822 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 823 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 824 acceptMulticast = (reg & RFCR_AAM) ? true : false; 825 acceptUnicast = (reg & RFCR_AAU) ? true : false; 826 acceptPerfect = (reg & RFCR_APM) ? true : false; 827 acceptArp = (reg & RFCR_AARP) ? true : false; 828 829#if 0 830 if (reg & RFCR_APAT) 831 panic("RFCR_APAT not implemented!\n"); 832#endif 833 834 if (reg & RFCR_MHEN || reg & RFCR_UHEN) 835 panic("hash filtering not implemented!\n"); 836 837 if (reg & RFCR_ULM) 838 panic("RFCR_ULM not implemented!\n"); 839 840 break; 841 842 case RFDR: 843 panic("the driver never writes to RFDR, something is wrong!\n"); 844 845 case BRAR: 846 panic("the driver never uses BRAR, something is wrong!\n"); 847 848 case BRDR: 849 panic("the driver never uses BRDR, something is wrong!\n"); 850 851 case SRR: 852 panic("SRR is read only register!\n"); 853 854 case MIBC: 855 panic("the driver never uses MIBC, something is wrong!\n"); 856 857 case VRCR: 858 regs.vrcr = reg; 859 break; 860 861 case VTCR: 862 regs.vtcr = reg; 863 break; 864 865 case VDR: 866 panic("the driver never uses VDR, something is wrong!\n"); 867 break; 868 869 case CCSR: 870 /* not going to implement clockrun stuff */ 871 regs.ccsr = reg; 872 break; 873 874 case TBICR: 875 regs.tbicr = reg; 876 if (reg & TBICR_MR_LOOPBACK) 877 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 878 879 if (reg & TBICR_MR_AN_ENABLE) { 880 regs.tanlpar = regs.tanar; 881 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 882 } 883 884#if 0 885 if (reg & TBICR_MR_RESTART_AN) ; 886#endif 887 888 break; 889 890 case TBISR: 891 panic("TBISR is read only register!\n"); 892 893 case TANAR: 894 regs.tanar = reg; 895 if (reg & TANAR_PS2) 896 panic("this isn't used in driver, something wrong!\n"); 897 898 if (reg & TANAR_PS1) 899 panic("this isn't used in driver, something wrong!\n"); 900 break; 901 902 case TANLPAR: 903 panic("this should only be written to by the fake phy!\n"); 904 905 case TANER: 906 panic("TANER is read only register!\n"); 907 908 case TESR: 909 regs.tesr = reg; 910 break; 911 912 default: 913 panic("invalid register access daddr=%#x", daddr); 914 } 915 } else { 916 panic("Invalid Request Size"); 917 } 918 919 return No_Fault; 920} 921 922void 923NSGigE::devIntrPost(uint32_t interrupts) 924{ 925 if (interrupts & ISR_RESERVE) 926 panic("Cannot set a reserved interrupt"); 927 928 if (interrupts & ISR_NOIMPL) 929 warn("interrupt not implemented %#x\n", interrupts); 930 931 interrupts &= ~ISR_NOIMPL; 932 regs.isr |= interrupts; 933 934 DPRINTF(EthernetIntr, 935 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 936 interrupts, regs.isr, regs.imr); 937 938 if ((regs.isr & regs.imr)) { 939 Tick when = curTick; 940 if (!(regs.isr & regs.imr & ISR_NODELAY)) 941 when += intrDelay; 942 cpuIntrPost(when); 943 } 944} 945 946void 947NSGigE::devIntrClear(uint32_t interrupts) 948{ 949 if (interrupts & ISR_RESERVE) 950 panic("Cannot clear a reserved interrupt"); 951 952 interrupts &= ~ISR_NOIMPL; 953 regs.isr &= ~interrupts; 954 955 DPRINTF(EthernetIntr, 956 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 957 interrupts, regs.isr, regs.imr); 958 959 if (!(regs.isr & regs.imr)) 960 cpuIntrClear(); 961} 962 963void 964NSGigE::devIntrChangeMask() 965{ 966 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 967 regs.isr, regs.imr, regs.isr & regs.imr); 968 969 if (regs.isr & regs.imr) 970 cpuIntrPost(curTick); 971 else 972 cpuIntrClear(); 973} 974 975void 976NSGigE::cpuIntrPost(Tick when) 977{ 978 // If the interrupt you want to post is later than an interrupt 979 // already scheduled, just let it post in the coming one and don't 980 // schedule another. 981 // HOWEVER, must be sure that the scheduled intrTick is in the 982 // future (this was formerly the source of a bug) 983 /** 984 * @todo this warning should be removed and the intrTick code should 985 * be fixed. 986 */ 987 assert(when >= curTick); 988 assert(intrTick >= curTick || intrTick == 0); 989 if (when > intrTick && intrTick != 0) { 990 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 991 intrTick); 992 return; 993 } 994 995 intrTick = when; 996 if (intrTick < curTick) { 997 debug_break(); 998 intrTick = curTick; 999 } 1000 1001 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 1002 intrTick); 1003 1004 if (intrEvent) 1005 intrEvent->squash(); 1006 intrEvent = new IntrEvent(this, true); 1007 intrEvent->schedule(intrTick); 1008} 1009 1010void 1011NSGigE::cpuInterrupt() 1012{ 1013 assert(intrTick == curTick); 1014 1015 // Whether or not there's a pending interrupt, we don't care about 1016 // it anymore 1017 intrEvent = 0; 1018 intrTick = 0; 1019 1020 // Don't send an interrupt if there's already one 1021 if (cpuPendingIntr) { 1022 DPRINTF(EthernetIntr, 1023 "would send an interrupt now, but there's already pending\n"); 1024 } else { 1025 // Send interrupt 1026 cpuPendingIntr = true; 1027 1028 DPRINTF(EthernetIntr, "posting interrupt\n"); 1029 intrPost(); 1030 } 1031} 1032 1033void 1034NSGigE::cpuIntrClear() 1035{ 1036 if (!cpuPendingIntr) 1037 return; 1038 1039 if (intrEvent) { 1040 intrEvent->squash(); 1041 intrEvent = 0; 1042 } 1043 1044 intrTick = 0; 1045 1046 cpuPendingIntr = false; 1047 1048 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1049 intrClear(); 1050} 1051 1052bool 1053NSGigE::cpuIntrPending() const 1054{ return cpuPendingIntr; } 1055 1056void 1057NSGigE::txReset() 1058{ 1059 1060 DPRINTF(Ethernet, "transmit reset\n"); 1061 1062 CTDD = false; 1063 txEnable = false;; 1064 txFragPtr = 0; 1065 assert(txDescCnt == 0); 1066 txFifo.clear(); 1067 txState = txIdle; 1068 assert(txDmaState == dmaIdle); 1069} 1070 1071void 1072NSGigE::rxReset() 1073{ 1074 DPRINTF(Ethernet, "receive reset\n"); 1075 1076 CRDD = false; 1077 assert(rxPktBytes == 0); 1078 rxEnable = false; 1079 rxFragPtr = 0; 1080 assert(rxDescCnt == 0); 1081 assert(rxDmaState == dmaIdle); 1082 rxFifo.clear(); 1083 rxState = rxIdle; 1084} 1085 1086void 1087NSGigE::regsReset() 1088{ 1089 memset(®s, 0, sizeof(regs)); 1090 regs.config = CFG_LNKSTS; 1091 regs.mear = MEAR_MDDIR | MEAR_EEDO; 1092 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1093 // fill threshold to 32 bytes 1094 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1095 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1096 regs.mibc = MIBC_FRZ; 1097 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1098 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1099 1100 extstsEnable = false; 1101 acceptBroadcast = false; 1102 acceptMulticast = false; 1103 acceptUnicast = false; 1104 acceptPerfect = false; 1105 acceptArp = false; 1106} 1107 1108void 1109NSGigE::rxDmaReadCopy() 1110{ 1111 assert(rxDmaState == dmaReading); 1112 1113 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen); 1114 rxDmaState = dmaIdle; 1115 1116 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1117 rxDmaAddr, rxDmaLen); 1118 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1119} 1120 1121bool 1122NSGigE::doRxDmaRead() 1123{ 1124 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1125 rxDmaState = dmaReading; 1126 1127 if (dmaInterface && !rxDmaFree) { 1128 if (dmaInterface->busy()) 1129 rxDmaState = dmaReadWaiting; 1130 else 1131 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick, 1132 &rxDmaReadEvent, true); 1133 return true; 1134 } 1135 1136 if (dmaReadDelay == 0 && dmaReadFactor == 0) { 1137 rxDmaReadCopy(); 1138 return false; 1139 } 1140 1141 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1142 Tick start = curTick + dmaReadDelay + factor; 1143 rxDmaReadEvent.schedule(start); 1144 return true; 1145} 1146 1147void 1148NSGigE::rxDmaReadDone() 1149{ 1150 assert(rxDmaState == dmaReading); 1151 rxDmaReadCopy(); 1152 1153 // If the transmit state machine has a pending DMA, let it go first 1154 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1155 txKick(); 1156 1157 rxKick(); 1158} 1159 1160void 1161NSGigE::rxDmaWriteCopy() 1162{ 1163 assert(rxDmaState == dmaWriting); 1164 1165 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen); 1166 rxDmaState = dmaIdle; 1167 1168 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1169 rxDmaAddr, rxDmaLen); 1170 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1171} 1172 1173bool 1174NSGigE::doRxDmaWrite() 1175{ 1176 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1177 rxDmaState = dmaWriting; 1178 1179 if (dmaInterface && !rxDmaFree) { 1180 if (dmaInterface->busy()) 1181 rxDmaState = dmaWriteWaiting; 1182 else 1183 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick, 1184 &rxDmaWriteEvent, true); 1185 return true; 1186 } 1187 1188 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) { 1189 rxDmaWriteCopy(); 1190 return false; 1191 } 1192 1193 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1194 Tick start = curTick + dmaWriteDelay + factor; 1195 rxDmaWriteEvent.schedule(start); 1196 return true; 1197} 1198 1199void 1200NSGigE::rxDmaWriteDone() 1201{ 1202 assert(rxDmaState == dmaWriting); 1203 rxDmaWriteCopy(); 1204 1205 // If the transmit state machine has a pending DMA, let it go first 1206 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1207 txKick(); 1208 1209 rxKick(); 1210} 1211 1212void 1213NSGigE::rxKick() 1214{ 1215 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n", 1216 NsRxStateStrings[rxState], rxFifo.size()); 1217 1218 if (rxKickTick > curTick) { 1219 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1220 rxKickTick); 1221 return; 1222 } 1223 1224 next: 1225 switch(rxDmaState) { 1226 case dmaReadWaiting: 1227 if (doRxDmaRead()) 1228 goto exit; 1229 break; 1230 case dmaWriteWaiting: 1231 if (doRxDmaWrite()) 1232 goto exit; 1233 break; 1234 default: 1235 break; 1236 } 1237 1238 // see state machine from spec for details 1239 // the way this works is, if you finish work on one state and can 1240 // go directly to another, you do that through jumping to the 1241 // label "next". however, if you have intermediate work, like DMA 1242 // so that you can't go to the next state yet, you go to exit and 1243 // exit the loop. however, when the DMA is done it will trigger 1244 // an event and come back to this loop. 1245 switch (rxState) { 1246 case rxIdle: 1247 if (!rxEnable) { 1248 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1249 goto exit; 1250 } 1251 1252 if (CRDD) { 1253 rxState = rxDescRefr; 1254 1255 rxDmaAddr = regs.rxdp & 0x3fffffff; 1256 rxDmaData = &rxDescCache + offsetof(ns_desc, link); 1257 rxDmaLen = sizeof(rxDescCache.link); 1258 rxDmaFree = dmaDescFree; 1259 1260 descDmaReads++; 1261 descDmaRdBytes += rxDmaLen; 1262 1263 if (doRxDmaRead()) 1264 goto exit; 1265 } else { 1266 rxState = rxDescRead; 1267 1268 rxDmaAddr = regs.rxdp & 0x3fffffff; 1269 rxDmaData = &rxDescCache; 1270 rxDmaLen = sizeof(ns_desc); 1271 rxDmaFree = dmaDescFree; 1272 1273 descDmaReads++; 1274 descDmaRdBytes += rxDmaLen; 1275 1276 if (doRxDmaRead()) 1277 goto exit; 1278 } 1279 break; 1280 1281 case rxDescRefr: 1282 if (rxDmaState != dmaIdle) 1283 goto exit; 1284 1285 rxState = rxAdvance; 1286 break; 1287 1288 case rxDescRead: 1289 if (rxDmaState != dmaIdle) 1290 goto exit; 1291 1292 DPRINTF(EthernetDesc, 1293 "rxDescCache: addr=%08x read descriptor\n", 1294 regs.rxdp & 0x3fffffff); 1295 DPRINTF(EthernetDesc, 1296 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1297 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1298 rxDescCache.extsts); 1299 1300 if (rxDescCache.cmdsts & CMDSTS_OWN) { 1301 devIntrPost(ISR_RXIDLE); 1302 rxState = rxIdle; 1303 goto exit; 1304 } else { 1305 rxState = rxFifoBlock; 1306 rxFragPtr = rxDescCache.bufptr; 1307 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK; 1308 } 1309 break; 1310 1311 case rxFifoBlock: 1312 if (!rxPacket) { 1313 /** 1314 * @todo in reality, we should be able to start processing 1315 * the packet as it arrives, and not have to wait for the 1316 * full packet ot be in the receive fifo. 1317 */ 1318 if (rxFifo.empty()) 1319 goto exit; 1320 1321 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1322 1323 // If we don't have a packet, grab a new one from the fifo. 1324 rxPacket = rxFifo.front(); 1325 rxPktBytes = rxPacket->length; 1326 rxPacketBufPtr = rxPacket->data; 1327 1328#if TRACING_ON 1329 if (DTRACE(Ethernet)) { 1330 IpPtr ip(rxPacket); 1331 if (ip) { 1332 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1333 TcpPtr tcp(ip); 1334 if (tcp) { 1335 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n", 1336 tcp->sport(), tcp->dport()); 1337 } 1338 } 1339 } 1340#endif 1341 1342 // sanity check - i think the driver behaves like this 1343 assert(rxDescCnt >= rxPktBytes); 1344 1345 // Must clear the value before popping to decrement the 1346 // reference count 1347 rxFifo.pop(); 1348 } 1349 1350 1351 // dont' need the && rxDescCnt > 0 if driver sanity check 1352 // above holds 1353 if (rxPktBytes > 0) { 1354 rxState = rxFragWrite; 1355 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1356 // check holds 1357 rxXferLen = rxPktBytes; 1358 1359 rxDmaAddr = rxFragPtr & 0x3fffffff; 1360 rxDmaData = rxPacketBufPtr; 1361 rxDmaLen = rxXferLen; 1362 rxDmaFree = dmaDataFree; 1363 1364 if (doRxDmaWrite()) 1365 goto exit; 1366 1367 } else { 1368 rxState = rxDescWrite; 1369 1370 //if (rxPktBytes == 0) { /* packet is done */ 1371 assert(rxPktBytes == 0); 1372 DPRINTF(EthernetSM, "done with receiving packet\n"); 1373 1374 rxDescCache.cmdsts |= CMDSTS_OWN; 1375 rxDescCache.cmdsts &= ~CMDSTS_MORE; 1376 rxDescCache.cmdsts |= CMDSTS_OK; 1377 rxDescCache.cmdsts &= 0xffff0000; 1378 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1379 1380#if 0 1381 /* 1382 * all the driver uses these are for its own stats keeping 1383 * which we don't care about, aren't necessary for 1384 * functionality and doing this would just slow us down. 1385 * if they end up using this in a later version for 1386 * functional purposes, just undef 1387 */ 1388 if (rxFilterEnable) { 1389 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK; 1390 const EthAddr &dst = rxFifoFront()->dst(); 1391 if (dst->unicast()) 1392 rxDescCache.cmdsts |= CMDSTS_DEST_SELF; 1393 if (dst->multicast()) 1394 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI; 1395 if (dst->broadcast()) 1396 rxDescCache.cmdsts |= CMDSTS_DEST_MASK; 1397 } 1398#endif 1399 1400 IpPtr ip(rxPacket); 1401 if (extstsEnable && ip) { 1402 rxDescCache.extsts |= EXTSTS_IPPKT; 1403 rxIpChecksums++; 1404 if (cksum(ip) != 0) { 1405 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1406 rxDescCache.extsts |= EXTSTS_IPERR; 1407 } 1408 TcpPtr tcp(ip); 1409 UdpPtr udp(ip); 1410 if (tcp) { 1411 rxDescCache.extsts |= EXTSTS_TCPPKT; 1412 rxTcpChecksums++; 1413 if (cksum(tcp) != 0) { 1414 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1415 rxDescCache.extsts |= EXTSTS_TCPERR; 1416 1417 } 1418 } else if (udp) { 1419 rxDescCache.extsts |= EXTSTS_UDPPKT; 1420 rxUdpChecksums++; 1421 if (cksum(udp) != 0) { 1422 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1423 rxDescCache.extsts |= EXTSTS_UDPERR; 1424 } 1425 } 1426 } 1427 rxPacket = 0; 1428 1429 /* 1430 * the driver seems to always receive into desc buffers 1431 * of size 1514, so you never have a pkt that is split 1432 * into multiple descriptors on the receive side, so 1433 * i don't implement that case, hence the assert above. 1434 */ 1435 1436 DPRINTF(EthernetDesc, 1437 "rxDescCache: addr=%08x writeback cmdsts extsts\n", 1438 regs.rxdp & 0x3fffffff); 1439 DPRINTF(EthernetDesc, 1440 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1441 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1442 rxDescCache.extsts); 1443 1444 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1445 rxDmaData = &(rxDescCache.cmdsts); 1446 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts); 1447 rxDmaFree = dmaDescFree; 1448 1449 descDmaWrites++; 1450 descDmaWrBytes += rxDmaLen; 1451 1452 if (doRxDmaWrite()) 1453 goto exit; 1454 } 1455 break; 1456 1457 case rxFragWrite: 1458 if (rxDmaState != dmaIdle) 1459 goto exit; 1460 1461 rxPacketBufPtr += rxXferLen; 1462 rxFragPtr += rxXferLen; 1463 rxPktBytes -= rxXferLen; 1464 1465 rxState = rxFifoBlock; 1466 break; 1467 1468 case rxDescWrite: 1469 if (rxDmaState != dmaIdle) 1470 goto exit; 1471 1472 assert(rxDescCache.cmdsts & CMDSTS_OWN); 1473 1474 assert(rxPacket == 0); 1475 devIntrPost(ISR_RXOK); 1476 1477 if (rxDescCache.cmdsts & CMDSTS_INTR) 1478 devIntrPost(ISR_RXDESC); 1479 1480 if (!rxEnable) { 1481 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1482 rxState = rxIdle; 1483 goto exit; 1484 } else 1485 rxState = rxAdvance; 1486 break; 1487 1488 case rxAdvance: 1489 if (rxDescCache.link == 0) { 1490 devIntrPost(ISR_RXIDLE); 1491 rxState = rxIdle; 1492 CRDD = true; 1493 goto exit; 1494 } else { 1495 rxState = rxDescRead; 1496 regs.rxdp = rxDescCache.link; 1497 CRDD = false; 1498 1499 rxDmaAddr = regs.rxdp & 0x3fffffff; 1500 rxDmaData = &rxDescCache; 1501 rxDmaLen = sizeof(ns_desc); 1502 rxDmaFree = dmaDescFree; 1503 1504 if (doRxDmaRead()) 1505 goto exit; 1506 } 1507 break; 1508 1509 default: 1510 panic("Invalid rxState!"); 1511 } 1512 1513 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1514 NsRxStateStrings[rxState]); 1515 1516 goto next; 1517 1518 exit: 1519 /** 1520 * @todo do we want to schedule a future kick? 1521 */ 1522 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1523 NsRxStateStrings[rxState]); 1524} 1525 1526void 1527NSGigE::transmit() 1528{ 1529 if (txFifo.empty()) { 1530 DPRINTF(Ethernet, "nothing to transmit\n"); 1531 return; 1532 } 1533 1534 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1535 txFifo.size()); 1536 if (interface->sendPacket(txFifo.front())) { 1537#if TRACING_ON 1538 if (DTRACE(Ethernet)) { 1539 IpPtr ip(txFifo.front()); 1540 if (ip) { 1541 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1542 TcpPtr tcp(ip); 1543 if (tcp) { 1544 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n", 1545 tcp->sport(), tcp->dport()); 1546 } 1547 } 1548 } 1549#endif 1550 1551 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length); 1552 txBytes += txFifo.front()->length; 1553 txPackets++; 1554 1555 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1556 txFifo.avail()); 1557 txFifo.pop(); 1558 1559 /* 1560 * normally do a writeback of the descriptor here, and ONLY 1561 * after that is done, send this interrupt. but since our 1562 * stuff never actually fails, just do this interrupt here, 1563 * otherwise the code has to stray from this nice format. 1564 * besides, it's functionally the same. 1565 */ 1566 devIntrPost(ISR_TXOK); 1567 } else { 1568 DPRINTF(Ethernet, 1569 "May need to rethink always sending the descriptors back?\n"); 1570 } 1571 1572 if (!txFifo.empty() && !txEvent.scheduled()) { 1573 DPRINTF(Ethernet, "reschedule transmit\n"); 1574 txEvent.schedule(curTick + 1000); 1575 } 1576} 1577 1578void 1579NSGigE::txDmaReadCopy() 1580{ 1581 assert(txDmaState == dmaReading); 1582 1583 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen); 1584 txDmaState = dmaIdle; 1585 1586 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1587 txDmaAddr, txDmaLen); 1588 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1589} 1590 1591bool 1592NSGigE::doTxDmaRead() 1593{ 1594 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1595 txDmaState = dmaReading; 1596 1597 if (dmaInterface && !txDmaFree) { 1598 if (dmaInterface->busy()) 1599 txDmaState = dmaReadWaiting; 1600 else 1601 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick, 1602 &txDmaReadEvent, true); 1603 return true; 1604 } 1605 1606 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) { 1607 txDmaReadCopy(); 1608 return false; 1609 } 1610 1611 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1612 Tick start = curTick + dmaReadDelay + factor; 1613 txDmaReadEvent.schedule(start); 1614 return true; 1615} 1616 1617void 1618NSGigE::txDmaReadDone() 1619{ 1620 assert(txDmaState == dmaReading); 1621 txDmaReadCopy(); 1622 1623 // If the receive state machine has a pending DMA, let it go first 1624 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1625 rxKick(); 1626 1627 txKick(); 1628} 1629 1630void 1631NSGigE::txDmaWriteCopy() 1632{ 1633 assert(txDmaState == dmaWriting); 1634 1635 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen); 1636 txDmaState = dmaIdle; 1637 1638 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1639 txDmaAddr, txDmaLen); 1640 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1641} 1642 1643bool 1644NSGigE::doTxDmaWrite() 1645{ 1646 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1647 txDmaState = dmaWriting; 1648 1649 if (dmaInterface && !txDmaFree) { 1650 if (dmaInterface->busy()) 1651 txDmaState = dmaWriteWaiting; 1652 else 1653 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick, 1654 &txDmaWriteEvent, true); 1655 return true; 1656 } 1657 1658 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) { 1659 txDmaWriteCopy(); 1660 return false; 1661 } 1662 1663 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1664 Tick start = curTick + dmaWriteDelay + factor; 1665 txDmaWriteEvent.schedule(start); 1666 return true; 1667} 1668 1669void 1670NSGigE::txDmaWriteDone() 1671{ 1672 assert(txDmaState == dmaWriting); 1673 txDmaWriteCopy(); 1674 1675 // If the receive state machine has a pending DMA, let it go first 1676 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1677 rxKick(); 1678 1679 txKick(); 1680} 1681 1682void 1683NSGigE::txKick() 1684{ 1685 DPRINTF(EthernetSM, "transmit kick txState=%s\n", 1686 NsTxStateStrings[txState]); 1687 1688 if (txKickTick > curTick) { 1689 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1690 txKickTick); 1691 1692 return; 1693 } 1694 1695 next: 1696 switch(txDmaState) { 1697 case dmaReadWaiting: 1698 if (doTxDmaRead()) 1699 goto exit; 1700 break; 1701 case dmaWriteWaiting: 1702 if (doTxDmaWrite()) 1703 goto exit; 1704 break; 1705 default: 1706 break; 1707 } 1708 1709 switch (txState) { 1710 case txIdle: 1711 if (!txEnable) { 1712 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1713 goto exit; 1714 } 1715 1716 if (CTDD) { 1717 txState = txDescRefr; 1718 1719 txDmaAddr = regs.txdp & 0x3fffffff; 1720 txDmaData = &txDescCache + offsetof(ns_desc, link); 1721 txDmaLen = sizeof(txDescCache.link); 1722 txDmaFree = dmaDescFree; 1723 1724 descDmaReads++; 1725 descDmaRdBytes += txDmaLen; 1726 1727 if (doTxDmaRead()) 1728 goto exit; 1729 1730 } else { 1731 txState = txDescRead; 1732 1733 txDmaAddr = regs.txdp & 0x3fffffff; 1734 txDmaData = &txDescCache; 1735 txDmaLen = sizeof(ns_desc); 1736 txDmaFree = dmaDescFree; 1737 1738 descDmaReads++; 1739 descDmaRdBytes += txDmaLen; 1740 1741 if (doTxDmaRead()) 1742 goto exit; 1743 } 1744 break; 1745 1746 case txDescRefr: 1747 if (txDmaState != dmaIdle) 1748 goto exit; 1749 1750 txState = txAdvance; 1751 break; 1752 1753 case txDescRead: 1754 if (txDmaState != dmaIdle) 1755 goto exit; 1756 1757 DPRINTF(EthernetDesc, 1758 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1759 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts, 1760 txDescCache.extsts); 1761 1762 if (txDescCache.cmdsts & CMDSTS_OWN) { 1763 txState = txFifoBlock; 1764 txFragPtr = txDescCache.bufptr; 1765 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK; 1766 } else { 1767 devIntrPost(ISR_TXIDLE); 1768 txState = txIdle; 1769 goto exit; 1770 } 1771 break; 1772 1773 case txFifoBlock: 1774 if (!txPacket) { 1775 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 1776 txPacket = new PacketData; 1777 txPacket->data = new uint8_t[16384]; 1778 txPacketBufPtr = txPacket->data; 1779 } 1780 1781 if (txDescCnt == 0) { 1782 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 1783 if (txDescCache.cmdsts & CMDSTS_MORE) { 1784 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 1785 txState = txDescWrite; 1786 1787 txDescCache.cmdsts &= ~CMDSTS_OWN; 1788 1789 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 1790 txDmaAddr &= 0x3fffffff; 1791 txDmaData = &(txDescCache.cmdsts); 1792 txDmaLen = sizeof(txDescCache.cmdsts); 1793 txDmaFree = dmaDescFree; 1794 1795 if (doTxDmaWrite()) 1796 goto exit; 1797 1798 } else { /* this packet is totally done */ 1799 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 1800 /* deal with the the packet that just finished */ 1801 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 1802 IpPtr ip(txPacket); 1803 if (txDescCache.extsts & EXTSTS_UDPPKT) { 1804 UdpPtr udp(ip); 1805 udp->sum(0); 1806 udp->sum(cksum(udp)); 1807 txUdpChecksums++; 1808 } else if (txDescCache.extsts & EXTSTS_TCPPKT) { 1809 TcpPtr tcp(ip); 1810 tcp->sum(0); 1811 tcp->sum(cksum(tcp)); 1812 txTcpChecksums++; 1813 } 1814 if (txDescCache.extsts & EXTSTS_IPPKT) { 1815 ip->sum(0); 1816 ip->sum(cksum(ip)); 1817 txIpChecksums++; 1818 } 1819 } 1820 1821 txPacket->length = txPacketBufPtr - txPacket->data; 1822 // this is just because the receive can't handle a 1823 // packet bigger want to make sure 1824 assert(txPacket->length <= 1514); 1825 txFifo.push(txPacket); 1826 1827 /* 1828 * this following section is not tqo spec, but 1829 * functionally shouldn't be any different. normally, 1830 * the chip will wait til the transmit has occurred 1831 * before writing back the descriptor because it has 1832 * to wait to see that it was successfully transmitted 1833 * to decide whether to set CMDSTS_OK or not. 1834 * however, in the simulator since it is always 1835 * successfully transmitted, and writing it exactly to 1836 * spec would complicate the code, we just do it here 1837 */ 1838 1839 txDescCache.cmdsts &= ~CMDSTS_OWN; 1840 txDescCache.cmdsts |= CMDSTS_OK; 1841 1842 DPRINTF(EthernetDesc, 1843 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 1844 txDescCache.cmdsts, txDescCache.extsts); 1845 1846 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 1847 txDmaAddr &= 0x3fffffff; 1848 txDmaData = &(txDescCache.cmdsts); 1849 txDmaLen = sizeof(txDescCache.cmdsts) + 1850 sizeof(txDescCache.extsts); 1851 txDmaFree = dmaDescFree; 1852 1853 descDmaWrites++; 1854 descDmaWrBytes += txDmaLen; 1855 1856 transmit(); 1857 txPacket = 0; 1858 1859 if (!txEnable) { 1860 DPRINTF(EthernetSM, "halting TX state machine\n"); 1861 txState = txIdle; 1862 goto exit; 1863 } else 1864 txState = txAdvance; 1865 1866 if (doTxDmaWrite()) 1867 goto exit; 1868 } 1869 } else { 1870 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 1871 if (!txFifo.full()) { 1872 txState = txFragRead; 1873 1874 /* 1875 * The number of bytes transferred is either whatever 1876 * is left in the descriptor (txDescCnt), or if there 1877 * is not enough room in the fifo, just whatever room 1878 * is left in the fifo 1879 */ 1880 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 1881 1882 txDmaAddr = txFragPtr & 0x3fffffff; 1883 txDmaData = txPacketBufPtr; 1884 txDmaLen = txXferLen; 1885 txDmaFree = dmaDataFree; 1886 1887 if (doTxDmaRead()) 1888 goto exit; 1889 } else { 1890 txState = txFifoBlock; 1891 transmit(); 1892 1893 goto exit; 1894 } 1895 1896 } 1897 break; 1898 1899 case txFragRead: 1900 if (txDmaState != dmaIdle) 1901 goto exit; 1902 1903 txPacketBufPtr += txXferLen; 1904 txFragPtr += txXferLen; 1905 txDescCnt -= txXferLen; 1906 1907 txState = txFifoBlock; 1908 break; 1909 1910 case txDescWrite: 1911 if (txDmaState != dmaIdle) 1912 goto exit; 1913 1914 if (txDescCache.cmdsts & CMDSTS_INTR) 1915 devIntrPost(ISR_TXDESC); 1916 1917 txState = txAdvance; 1918 break; 1919 1920 case txAdvance: 1921 if (txDescCache.link == 0) { 1922 devIntrPost(ISR_TXIDLE); 1923 txState = txIdle; 1924 goto exit; 1925 } else { 1926 txState = txDescRead; 1927 regs.txdp = txDescCache.link; 1928 CTDD = false; 1929 1930 txDmaAddr = txDescCache.link & 0x3fffffff; 1931 txDmaData = &txDescCache; 1932 txDmaLen = sizeof(ns_desc); 1933 txDmaFree = dmaDescFree; 1934 1935 if (doTxDmaRead()) 1936 goto exit; 1937 } 1938 break; 1939 1940 default: 1941 panic("invalid state"); 1942 } 1943 1944 DPRINTF(EthernetSM, "entering next txState=%s\n", 1945 NsTxStateStrings[txState]); 1946 1947 goto next; 1948 1949 exit: 1950 /** 1951 * @todo do we want to schedule a future kick? 1952 */ 1953 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 1954 NsTxStateStrings[txState]); 1955} 1956 1957void 1958NSGigE::transferDone() 1959{ 1960 if (txFifo.empty()) { 1961 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 1962 return; 1963 } 1964 1965 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 1966 1967 if (txEvent.scheduled()) 1968 txEvent.reschedule(curTick + 1); 1969 else 1970 txEvent.schedule(curTick + 1); 1971} 1972 1973bool 1974NSGigE::rxFilter(const PacketPtr &packet) 1975{ 1976 EthPtr eth = packet; 1977 bool drop = true; 1978 string type; 1979 1980 const EthAddr &dst = eth->dst(); 1981 if (dst.unicast()) { 1982 // If we're accepting all unicast addresses 1983 if (acceptUnicast) 1984 drop = false; 1985 1986 // If we make a perfect match 1987 if (acceptPerfect && dst == rom.perfectMatch) 1988 drop = false; 1989 1990 if (acceptArp && eth->type() == ETH_TYPE_ARP) 1991 drop = false; 1992 1993 } else if (dst.broadcast()) { 1994 // if we're accepting broadcasts 1995 if (acceptBroadcast) 1996 drop = false; 1997 1998 } else if (dst.multicast()) { 1999 // if we're accepting all multicasts 2000 if (acceptMulticast) 2001 drop = false; 2002 2003 } 2004 2005 if (drop) { 2006 DPRINTF(Ethernet, "rxFilter drop\n"); 2007 DDUMP(EthernetData, packet->data, packet->length); 2008 } 2009 2010 return drop; 2011} 2012 2013bool 2014NSGigE::recvPacket(PacketPtr packet) 2015{ 2016 rxBytes += packet->length; 2017 rxPackets++; 2018 2019 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2020 rxFifo.avail()); 2021 2022 if (!rxEnable) { 2023 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2024 debug_break(); 2025 interface->recvDone(); 2026 return true; 2027 } 2028 2029 if (rxFilterEnable && rxFilter(packet)) { 2030 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2031 interface->recvDone(); 2032 return true; 2033 } 2034 2035 if (rxFifo.avail() < packet->length) { 2036 DPRINTF(Ethernet, 2037 "packet will not fit in receive buffer...packet dropped\n"); 2038 devIntrPost(ISR_RXORN); 2039 return false; 2040 } 2041 2042 rxFifo.push(packet); 2043 interface->recvDone(); 2044 2045 rxKick(); 2046 return true; 2047} 2048 2049//===================================================================== 2050// 2051// 2052void 2053NSGigE::serialize(ostream &os) 2054{ 2055 // Serialize the PciDev base class 2056 PciDev::serialize(os); 2057 2058 /* 2059 * Finalize any DMA events now. 2060 */ 2061 if (rxDmaReadEvent.scheduled()) 2062 rxDmaReadCopy(); 2063 if (rxDmaWriteEvent.scheduled()) 2064 rxDmaWriteCopy(); 2065 if (txDmaReadEvent.scheduled()) 2066 txDmaReadCopy(); 2067 if (txDmaWriteEvent.scheduled()) 2068 txDmaWriteCopy(); 2069 2070 /* 2071 * Serialize the device registers 2072 */ 2073 SERIALIZE_SCALAR(regs.command); 2074 SERIALIZE_SCALAR(regs.config); 2075 SERIALIZE_SCALAR(regs.mear); 2076 SERIALIZE_SCALAR(regs.ptscr); 2077 SERIALIZE_SCALAR(regs.isr); 2078 SERIALIZE_SCALAR(regs.imr); 2079 SERIALIZE_SCALAR(regs.ier); 2080 SERIALIZE_SCALAR(regs.ihr); 2081 SERIALIZE_SCALAR(regs.txdp); 2082 SERIALIZE_SCALAR(regs.txdp_hi); 2083 SERIALIZE_SCALAR(regs.txcfg); 2084 SERIALIZE_SCALAR(regs.gpior); 2085 SERIALIZE_SCALAR(regs.rxdp); 2086 SERIALIZE_SCALAR(regs.rxdp_hi); 2087 SERIALIZE_SCALAR(regs.rxcfg); 2088 SERIALIZE_SCALAR(regs.pqcr); 2089 SERIALIZE_SCALAR(regs.wcsr); 2090 SERIALIZE_SCALAR(regs.pcr); 2091 SERIALIZE_SCALAR(regs.rfcr); 2092 SERIALIZE_SCALAR(regs.rfdr); 2093 SERIALIZE_SCALAR(regs.srr); 2094 SERIALIZE_SCALAR(regs.mibc); 2095 SERIALIZE_SCALAR(regs.vrcr); 2096 SERIALIZE_SCALAR(regs.vtcr); 2097 SERIALIZE_SCALAR(regs.vdr); 2098 SERIALIZE_SCALAR(regs.ccsr); 2099 SERIALIZE_SCALAR(regs.tbicr); 2100 SERIALIZE_SCALAR(regs.tbisr); 2101 SERIALIZE_SCALAR(regs.tanar); 2102 SERIALIZE_SCALAR(regs.tanlpar); 2103 SERIALIZE_SCALAR(regs.taner); 2104 SERIALIZE_SCALAR(regs.tesr); 2105 2106 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2107 2108 SERIALIZE_SCALAR(ioEnable); 2109 2110 /* 2111 * Serialize the data Fifos 2112 */ 2113 rxFifo.serialize("rxFifo", os); 2114 txFifo.serialize("txFifo", os); 2115 2116 /* 2117 * Serialize the various helper variables 2118 */ 2119 bool txPacketExists = txPacket; 2120 SERIALIZE_SCALAR(txPacketExists); 2121 if (txPacketExists) { 2122 txPacket->serialize("txPacket", os); 2123 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2124 SERIALIZE_SCALAR(txPktBufPtr); 2125 } 2126 2127 bool rxPacketExists = rxPacket; 2128 SERIALIZE_SCALAR(rxPacketExists); 2129 if (rxPacketExists) { 2130 rxPacket->serialize("rxPacket", os); 2131 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2132 SERIALIZE_SCALAR(rxPktBufPtr); 2133 } 2134 2135 SERIALIZE_SCALAR(txXferLen); 2136 SERIALIZE_SCALAR(rxXferLen); 2137 2138 /* 2139 * Serialize DescCaches 2140 */ 2141 SERIALIZE_SCALAR(txDescCache.link); 2142 SERIALIZE_SCALAR(txDescCache.bufptr); 2143 SERIALIZE_SCALAR(txDescCache.cmdsts); 2144 SERIALIZE_SCALAR(txDescCache.extsts); 2145 SERIALIZE_SCALAR(rxDescCache.link); 2146 SERIALIZE_SCALAR(rxDescCache.bufptr); 2147 SERIALIZE_SCALAR(rxDescCache.cmdsts); 2148 SERIALIZE_SCALAR(rxDescCache.extsts); 2149 2150 /* 2151 * Serialize tx state machine 2152 */ 2153 int txState = this->txState; 2154 SERIALIZE_SCALAR(txState); 2155 SERIALIZE_SCALAR(txEnable); 2156 SERIALIZE_SCALAR(CTDD); 2157 SERIALIZE_SCALAR(txFragPtr); 2158 SERIALIZE_SCALAR(txDescCnt); 2159 int txDmaState = this->txDmaState; 2160 SERIALIZE_SCALAR(txDmaState); 2161 2162 /* 2163 * Serialize rx state machine 2164 */ 2165 int rxState = this->rxState; 2166 SERIALIZE_SCALAR(rxState); 2167 SERIALIZE_SCALAR(rxEnable); 2168 SERIALIZE_SCALAR(CRDD); 2169 SERIALIZE_SCALAR(rxPktBytes); 2170 SERIALIZE_SCALAR(rxDescCnt); 2171 int rxDmaState = this->rxDmaState; 2172 SERIALIZE_SCALAR(rxDmaState); 2173 2174 SERIALIZE_SCALAR(extstsEnable); 2175 2176 /* 2177 * If there's a pending transmit, store the time so we can 2178 * reschedule it later 2179 */ 2180 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2181 SERIALIZE_SCALAR(transmitTick); 2182 2183 /* 2184 * receive address filter settings 2185 */ 2186 SERIALIZE_SCALAR(rxFilterEnable); 2187 SERIALIZE_SCALAR(acceptBroadcast); 2188 SERIALIZE_SCALAR(acceptMulticast); 2189 SERIALIZE_SCALAR(acceptUnicast); 2190 SERIALIZE_SCALAR(acceptPerfect); 2191 SERIALIZE_SCALAR(acceptArp); 2192 2193 /* 2194 * Keep track of pending interrupt status. 2195 */ 2196 SERIALIZE_SCALAR(intrTick); 2197 SERIALIZE_SCALAR(cpuPendingIntr); 2198 Tick intrEventTick = 0; 2199 if (intrEvent) 2200 intrEventTick = intrEvent->when(); 2201 SERIALIZE_SCALAR(intrEventTick); 2202 2203} 2204 2205void 2206NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2207{ 2208 // Unserialize the PciDev base class 2209 PciDev::unserialize(cp, section); 2210 2211 UNSERIALIZE_SCALAR(regs.command); 2212 UNSERIALIZE_SCALAR(regs.config); 2213 UNSERIALIZE_SCALAR(regs.mear); 2214 UNSERIALIZE_SCALAR(regs.ptscr); 2215 UNSERIALIZE_SCALAR(regs.isr); 2216 UNSERIALIZE_SCALAR(regs.imr); 2217 UNSERIALIZE_SCALAR(regs.ier); 2218 UNSERIALIZE_SCALAR(regs.ihr); 2219 UNSERIALIZE_SCALAR(regs.txdp); 2220 UNSERIALIZE_SCALAR(regs.txdp_hi); 2221 UNSERIALIZE_SCALAR(regs.txcfg); 2222 UNSERIALIZE_SCALAR(regs.gpior); 2223 UNSERIALIZE_SCALAR(regs.rxdp); 2224 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2225 UNSERIALIZE_SCALAR(regs.rxcfg); 2226 UNSERIALIZE_SCALAR(regs.pqcr); 2227 UNSERIALIZE_SCALAR(regs.wcsr); 2228 UNSERIALIZE_SCALAR(regs.pcr); 2229 UNSERIALIZE_SCALAR(regs.rfcr); 2230 UNSERIALIZE_SCALAR(regs.rfdr); 2231 UNSERIALIZE_SCALAR(regs.srr); 2232 UNSERIALIZE_SCALAR(regs.mibc); 2233 UNSERIALIZE_SCALAR(regs.vrcr); 2234 UNSERIALIZE_SCALAR(regs.vtcr); 2235 UNSERIALIZE_SCALAR(regs.vdr); 2236 UNSERIALIZE_SCALAR(regs.ccsr); 2237 UNSERIALIZE_SCALAR(regs.tbicr); 2238 UNSERIALIZE_SCALAR(regs.tbisr); 2239 UNSERIALIZE_SCALAR(regs.tanar); 2240 UNSERIALIZE_SCALAR(regs.tanlpar); 2241 UNSERIALIZE_SCALAR(regs.taner); 2242 UNSERIALIZE_SCALAR(regs.tesr); 2243 2244 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2245 2246 UNSERIALIZE_SCALAR(ioEnable); 2247 2248 /* 2249 * unserialize the data fifos 2250 */ 2251 rxFifo.unserialize("rxFifo", cp, section); 2252 txFifo.unserialize("txFifo", cp, section); 2253 2254 /* 2255 * unserialize the various helper variables 2256 */ 2257 bool txPacketExists; 2258 UNSERIALIZE_SCALAR(txPacketExists); 2259 if (txPacketExists) { 2260 txPacket = new PacketData; 2261 txPacket->unserialize("txPacket", cp, section); 2262 uint32_t txPktBufPtr; 2263 UNSERIALIZE_SCALAR(txPktBufPtr); 2264 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2265 } else 2266 txPacket = 0; 2267 2268 bool rxPacketExists; 2269 UNSERIALIZE_SCALAR(rxPacketExists); 2270 rxPacket = 0; 2271 if (rxPacketExists) { 2272 rxPacket = new PacketData; 2273 rxPacket->unserialize("rxPacket", cp, section); 2274 uint32_t rxPktBufPtr; 2275 UNSERIALIZE_SCALAR(rxPktBufPtr); 2276 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2277 } else 2278 rxPacket = 0; 2279 2280 UNSERIALIZE_SCALAR(txXferLen); 2281 UNSERIALIZE_SCALAR(rxXferLen); 2282 2283 /* 2284 * Unserialize DescCaches 2285 */ 2286 UNSERIALIZE_SCALAR(txDescCache.link); 2287 UNSERIALIZE_SCALAR(txDescCache.bufptr); 2288 UNSERIALIZE_SCALAR(txDescCache.cmdsts); 2289 UNSERIALIZE_SCALAR(txDescCache.extsts); 2290 UNSERIALIZE_SCALAR(rxDescCache.link); 2291 UNSERIALIZE_SCALAR(rxDescCache.bufptr); 2292 UNSERIALIZE_SCALAR(rxDescCache.cmdsts); 2293 UNSERIALIZE_SCALAR(rxDescCache.extsts); 2294 2295 /* 2296 * unserialize tx state machine 2297 */ 2298 int txState; 2299 UNSERIALIZE_SCALAR(txState); 2300 this->txState = (TxState) txState; 2301 UNSERIALIZE_SCALAR(txEnable); 2302 UNSERIALIZE_SCALAR(CTDD); 2303 UNSERIALIZE_SCALAR(txFragPtr); 2304 UNSERIALIZE_SCALAR(txDescCnt); 2305 int txDmaState; 2306 UNSERIALIZE_SCALAR(txDmaState); 2307 this->txDmaState = (DmaState) txDmaState; 2308 2309 /* 2310 * unserialize rx state machine 2311 */ 2312 int rxState; 2313 UNSERIALIZE_SCALAR(rxState); 2314 this->rxState = (RxState) rxState; 2315 UNSERIALIZE_SCALAR(rxEnable); 2316 UNSERIALIZE_SCALAR(CRDD); 2317 UNSERIALIZE_SCALAR(rxPktBytes); 2318 UNSERIALIZE_SCALAR(rxDescCnt); 2319 int rxDmaState; 2320 UNSERIALIZE_SCALAR(rxDmaState); 2321 this->rxDmaState = (DmaState) rxDmaState; 2322 2323 UNSERIALIZE_SCALAR(extstsEnable); 2324 2325 /* 2326 * If there's a pending transmit, reschedule it now 2327 */ 2328 Tick transmitTick; 2329 UNSERIALIZE_SCALAR(transmitTick); 2330 if (transmitTick) 2331 txEvent.schedule(curTick + transmitTick); 2332 2333 /* 2334 * unserialize receive address filter settings 2335 */ 2336 UNSERIALIZE_SCALAR(rxFilterEnable); 2337 UNSERIALIZE_SCALAR(acceptBroadcast); 2338 UNSERIALIZE_SCALAR(acceptMulticast); 2339 UNSERIALIZE_SCALAR(acceptUnicast); 2340 UNSERIALIZE_SCALAR(acceptPerfect); 2341 UNSERIALIZE_SCALAR(acceptArp); 2342 2343 /* 2344 * Keep track of pending interrupt status. 2345 */ 2346 UNSERIALIZE_SCALAR(intrTick); 2347 UNSERIALIZE_SCALAR(cpuPendingIntr); 2348 Tick intrEventTick; 2349 UNSERIALIZE_SCALAR(intrEventTick); 2350 if (intrEventTick) { 2351 intrEvent = new IntrEvent(this, true); 2352 intrEvent->schedule(intrEventTick); 2353 } 2354 2355 /* 2356 * re-add addrRanges to bus bridges 2357 */ 2358 if (pioInterface) { 2359 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 2360 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 2361 } 2362} 2363 2364Tick 2365NSGigE::cacheAccess(MemReqPtr &req) 2366{ 2367 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n", 2368 req->paddr, req->paddr - addr); 2369 return curTick + pioLatency; 2370} 2371 2372BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2373 2374 SimObjectParam<EtherInt *> peer; 2375 SimObjectParam<NSGigE *> device; 2376 2377END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2378 2379BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2380 2381 INIT_PARAM_DFLT(peer, "peer interface", NULL), 2382 INIT_PARAM(device, "Ethernet device of this interface") 2383 2384END_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2385 2386CREATE_SIM_OBJECT(NSGigEInt) 2387{ 2388 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device); 2389 2390 EtherInt *p = (EtherInt *)peer; 2391 if (p) { 2392 dev_int->setPeer(p); 2393 p->setPeer(dev_int); 2394 } 2395 2396 return dev_int; 2397} 2398 2399REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt) 2400 2401 2402BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2403 2404 Param<Tick> tx_delay; 2405 Param<Tick> rx_delay; 2406 Param<Tick> intr_delay; 2407 SimObjectParam<MemoryController *> mmu; 2408 SimObjectParam<PhysicalMemory *> physmem; 2409 Param<bool> rx_filter; 2410 Param<string> hardware_address; 2411 SimObjectParam<Bus*> header_bus; 2412 SimObjectParam<Bus*> payload_bus; 2413 SimObjectParam<HierParams *> hier; 2414 Param<Tick> pio_latency; 2415 Param<bool> dma_desc_free; 2416 Param<bool> dma_data_free; 2417 Param<Tick> dma_read_delay; 2418 Param<Tick> dma_write_delay; 2419 Param<Tick> dma_read_factor; 2420 Param<Tick> dma_write_factor; 2421 SimObjectParam<PciConfigAll *> configspace; 2422 SimObjectParam<PciConfigData *> configdata; 2423 SimObjectParam<Platform *> platform; 2424 Param<uint32_t> pci_bus; 2425 Param<uint32_t> pci_dev; 2426 Param<uint32_t> pci_func; 2427 Param<uint32_t> tx_fifo_size; 2428 Param<uint32_t> rx_fifo_size; 2429 2430END_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2431 2432BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE) 2433 2434 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000), 2435 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000), 2436 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0), 2437 INIT_PARAM(mmu, "Memory Controller"), 2438 INIT_PARAM(physmem, "Physical Memory"), 2439 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true), 2440 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address", 2441 "00:99:00:00:00:01"), 2442 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL), 2443 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL), 2444 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams), 2445 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 2446 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false), 2447 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false), 2448 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0), 2449 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0), 2450 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0), 2451 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0), 2452 INIT_PARAM(configspace, "PCI Configspace"), 2453 INIT_PARAM(configdata, "PCI Config data"), 2454 INIT_PARAM(platform, "Platform"), 2455 INIT_PARAM(pci_bus, "PCI bus"), 2456 INIT_PARAM(pci_dev, "PCI device number"), 2457 INIT_PARAM(pci_func, "PCI function code"), 2458 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072), 2459 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072) 2460 2461END_INIT_SIM_OBJECT_PARAMS(NSGigE) 2462 2463 2464CREATE_SIM_OBJECT(NSGigE) 2465{ 2466 NSGigE::Params *params = new NSGigE::Params; 2467 2468 params->name = getInstanceName(); 2469 params->mmu = mmu; 2470 params->configSpace = configspace; 2471 params->configData = configdata; 2472 params->plat = platform; 2473 params->busNum = pci_bus; 2474 params->deviceNum = pci_dev; 2475 params->functionNum = pci_func; 2476 2477 params->intr_delay = intr_delay; 2478 params->pmem = physmem; 2479 params->tx_delay = tx_delay; 2480 params->rx_delay = rx_delay; 2481 params->hier = hier; 2482 params->header_bus = header_bus; 2483 params->payload_bus = payload_bus; 2484 params->pio_latency = pio_latency; 2485 params->dma_desc_free = dma_desc_free; 2486 params->dma_data_free = dma_data_free; 2487 params->dma_read_delay = dma_read_delay; 2488 params->dma_write_delay = dma_write_delay; 2489 params->dma_read_factor = dma_read_factor; 2490 params->dma_write_factor = dma_write_factor; 2491 params->rx_filter = rx_filter; 2492 params->eaddr = hardware_address; 2493 params->tx_fifo_size = tx_fifo_size; 2494 params->rx_fifo_size = rx_fifo_size; 2495 return new NSGigE(params); 2496} 2497 2498REGISTER_SIM_OBJECT("NSGigE", NSGigE) 2499