ns_gige.cc revision 992
1/* 2 * Copyright (c) 2004 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/* @file 30 * Device module for modelling the National Semiconductor 31 * DP83820 ethernet controller. Does not support priority queueing 32 */ 33#include <cstdio> 34#include <deque> 35#include <string> 36 37#include "base/inet.hh" 38#include "cpu/exec_context.hh" 39#include "cpu/intr_control.hh" 40#include "dev/dma.hh" 41#include "dev/ns_gige.hh" 42#include "dev/etherlink.hh" 43#include "mem/bus/bus.hh" 44#include "mem/bus/dma_interface.hh" 45#include "mem/bus/pio_interface.hh" 46#include "mem/bus/pio_interface_impl.hh" 47#include "mem/functional_mem/memory_control.hh" 48#include "mem/functional_mem/physical_memory.hh" 49#include "sim/builder.hh" 50#include "sim/host.hh" 51#include "sim/sim_stats.hh" 52#include "targetarch/vtophys.hh" 53#include "dev/pciconfigall.hh" 54#include "dev/tsunami_cchip.hh" 55 56const char *NsRxStateStrings[] = 57{ 58 "rxIdle", 59 "rxDescRefr", 60 "rxDescRead", 61 "rxFifoBlock", 62 "rxFragWrite", 63 "rxDescWrite", 64 "rxAdvance" 65}; 66 67const char *NsTxStateStrings[] = 68{ 69 "txIdle", 70 "txDescRefr", 71 "txDescRead", 72 "txFifoBlock", 73 "txFragRead", 74 "txDescWrite", 75 "txAdvance" 76}; 77 78const char *NsDmaState[] = 79{ 80 "dmaIdle", 81 "dmaReading", 82 "dmaWriting", 83 "dmaReadWaiting", 84 "dmaWriteWaiting" 85}; 86 87using namespace std; 88 89//helper function declarations 90//These functions reverse Endianness so we can evaluate network data correctly 91uint16_t reverseEnd16(uint16_t); 92uint32_t reverseEnd32(uint32_t); 93 94/////////////////////////////////////////////////////////////////////// 95// 96// NSGigE PCI Device 97// 98NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay, 99 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay, 100 MemoryController *mmu, HierParams *hier, Bus *header_bus, 101 Bus *payload_bus, Tick pio_latency, bool dma_desc_free, 102 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay, 103 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf, 104 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev, 105 uint32_t func, bool rx_filter, const int eaddr[6], 106 uint32_t tx_fifo_size, uint32_t rx_fifo_size) 107 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t), ioEnable(false), 108 maxTxFifoSize(tx_fifo_size), maxRxFifoSize(rx_fifo_size), 109 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 110 txXferLen(0), rxXferLen(0), txState(txIdle), CTDD(false), 111 txFifoAvail(tx_fifo_size), txHalt(false), 112 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 113 CRDD(false), rxPktBytes(0), rxFifoCnt(0), rxHalt(false), 114 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 115 rxDmaReadEvent(this), rxDmaWriteEvent(this), 116 txDmaReadEvent(this), txDmaWriteEvent(this), 117 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free), 118 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0), 119 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false), 120 acceptMulticast(false), acceptUnicast(false), 121 acceptPerfect(false), acceptArp(false), 122 physmem(pmem), intctrl(i), intrTick(0), cpuPendingIntr(false), 123 intrEvent(0), interface(0), pioLatency(pio_latency) 124{ 125 tsunami->ethernet = this; 126 127 if (header_bus) { 128 pioInterface = newPioInterface(name, hier, header_bus, this, 129 &NSGigE::cacheAccess); 130 131 if (payload_bus) 132 dmaInterface = new DMAInterface<Bus>(name + ".dma", 133 header_bus, payload_bus, 1); 134 else 135 dmaInterface = new DMAInterface<Bus>(name + ".dma", 136 header_bus, header_bus, 1); 137 } else if (payload_bus) { 138 pioInterface = newPioInterface(name, hier, payload_bus, this, 139 &NSGigE::cacheAccess); 140 141 dmaInterface = new DMAInterface<Bus>(name + ".dma", payload_bus, 142 payload_bus, 1); 143 144 } 145 146 147 intrDelay = US2Ticks(intr_delay); 148 dmaReadDelay = dma_read_delay; 149 dmaWriteDelay = dma_write_delay; 150 dmaReadFactor = dma_read_factor; 151 dmaWriteFactor = dma_write_factor; 152 153 regsReset(); 154 rom.perfectMatch[0] = eaddr[0]; 155 rom.perfectMatch[1] = eaddr[1]; 156 rom.perfectMatch[2] = eaddr[2]; 157 rom.perfectMatch[3] = eaddr[3]; 158 rom.perfectMatch[4] = eaddr[4]; 159 rom.perfectMatch[5] = eaddr[5]; 160} 161 162NSGigE::~NSGigE() 163{} 164 165void 166NSGigE::regStats() 167{ 168 txBytes 169 .name(name() + ".txBytes") 170 .desc("Bytes Transmitted") 171 .prereq(txBytes) 172 ; 173 174 rxBytes 175 .name(name() + ".rxBytes") 176 .desc("Bytes Received") 177 .prereq(rxBytes) 178 ; 179 180 txPackets 181 .name(name() + ".txPackets") 182 .desc("Number of Packets Transmitted") 183 .prereq(txBytes) 184 ; 185 186 rxPackets 187 .name(name() + ".rxPackets") 188 .desc("Number of Packets Received") 189 .prereq(rxBytes) 190 ; 191 192 txIPChecksums 193 .name(name() + ".txIPChecksums") 194 .desc("Number of tx IP Checksums done by device") 195 .precision(0) 196 .prereq(txBytes) 197 ; 198 199 rxIPChecksums 200 .name(name() + ".rxIPChecksums") 201 .desc("Number of rx IP Checksums done by device") 202 .precision(0) 203 .prereq(rxBytes) 204 ; 205 206 txTCPChecksums 207 .name(name() + ".txTCPChecksums") 208 .desc("Number of tx TCP Checksums done by device") 209 .precision(0) 210 .prereq(txBytes) 211 ; 212 213 rxTCPChecksums 214 .name(name() + ".rxTCPChecksums") 215 .desc("Number of rx TCP Checksums done by device") 216 .precision(0) 217 .prereq(rxBytes) 218 ; 219 220 descDmaReads 221 .name(name() + ".descDMAReads") 222 .desc("Number of descriptors the device read w/ DMA") 223 .precision(0) 224 ; 225 226 descDmaWrites 227 .name(name() + ".descDMAWrites") 228 .desc("Number of descriptors the device wrote w/ DMA") 229 .precision(0) 230 ; 231 232 descDmaRdBytes 233 .name(name() + ".descDmaReadBytes") 234 .desc("number of descriptor bytes read w/ DMA") 235 .precision(0) 236 ; 237 238 descDmaWrBytes 239 .name(name() + ".descDmaWriteBytes") 240 .desc("number of descriptor bytes write w/ DMA") 241 .precision(0) 242 ; 243 244 245 txBandwidth 246 .name(name() + ".txBandwidth") 247 .desc("Transmit Bandwidth (bits/s)") 248 .precision(0) 249 .prereq(txBytes) 250 ; 251 252 rxBandwidth 253 .name(name() + ".rxBandwidth") 254 .desc("Receive Bandwidth (bits/s)") 255 .precision(0) 256 .prereq(rxBytes) 257 ; 258 259 txPacketRate 260 .name(name() + ".txPPS") 261 .desc("Packet Tranmission Rate (packets/s)") 262 .precision(0) 263 .prereq(txBytes) 264 ; 265 266 rxPacketRate 267 .name(name() + ".rxPPS") 268 .desc("Packet Reception Rate (packets/s)") 269 .precision(0) 270 .prereq(rxBytes) 271 ; 272 273 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 274 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 275 txPacketRate = txPackets / simSeconds; 276 rxPacketRate = rxPackets / simSeconds; 277} 278 279/** 280 * This is to read the PCI general configuration registers 281 */ 282void 283NSGigE::ReadConfig(int offset, int size, uint8_t *data) 284{ 285 if (offset < PCI_DEVICE_SPECIFIC) 286 PciDev::ReadConfig(offset, size, data); 287 else 288 panic("Device specific PCI config space not implemented!\n"); 289} 290 291/** 292 * This is to write to the PCI general configuration registers 293 */ 294void 295NSGigE::WriteConfig(int offset, int size, uint32_t data) 296{ 297 if (offset < PCI_DEVICE_SPECIFIC) 298 PciDev::WriteConfig(offset, size, data); 299 else 300 panic("Device specific PCI config space not implemented!\n"); 301 302 // Need to catch writes to BARs to update the PIO interface 303 switch (offset) { 304 //seems to work fine without all these PCI settings, but i put in the IO 305 //to double check, an assertion will fail if we need to properly 306 // implement it 307 case PCI_COMMAND: 308 if (config.data[offset] & PCI_CMD_IOSE) 309 ioEnable = true; 310 else 311 ioEnable = false; 312 313#if 0 314 if (config.data[offset] & PCI_CMD_BME) { 315 bmEnabled = true; 316 } 317 else { 318 bmEnabled = false; 319 } 320 321 if (config.data[offset] & PCI_CMD_MSE) { 322 memEnable = true; 323 } 324 else { 325 memEnable = false; 326 } 327#endif 328 break; 329 330 case PCI0_BASE_ADDR0: 331 if (BARAddrs[0] != 0) { 332 333 if (pioInterface) 334 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1); 335 336 BARAddrs[0] &= PA_UNCACHED_MASK; 337 338 } 339 break; 340 case PCI0_BASE_ADDR1: 341 if (BARAddrs[1] != 0) { 342 343 if (pioInterface) 344 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1); 345 346 BARAddrs[1] &= PA_UNCACHED_MASK; 347 348 } 349 break; 350 } 351} 352 353/** 354 * This reads the device registers, which are detailed in the NS83820 355 * spec sheet 356 */ 357Fault 358NSGigE::read(MemReqPtr &req, uint8_t *data) 359{ 360 assert(ioEnable); 361 362 //The mask is to give you only the offset into the device register file 363 Addr daddr = req->paddr & 0xfff; 364 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n", 365 daddr, req->paddr, req->vaddr, req->size); 366 367 368 //there are some reserved registers, you can see ns_gige_reg.h and 369 //the spec sheet for details 370 if (daddr > LAST && daddr <= RESERVED) { 371 panic("Accessing reserved register"); 372 } else if (daddr > RESERVED && daddr <= 0x3FC) { 373 ReadConfig(daddr & 0xff, req->size, data); 374 return No_Fault; 375 } else if (daddr >= MIB_START && daddr <= MIB_END) { 376 // don't implement all the MIB's. hopefully the kernel 377 // doesn't actually DEPEND upon their values 378 // MIB are just hardware stats keepers 379 uint32_t ® = *(uint32_t *) data; 380 reg = 0; 381 return No_Fault; 382 } else if (daddr > 0x3FC) 383 panic("Something is messed up!\n"); 384 385 switch (req->size) { 386 case sizeof(uint32_t): 387 { 388 uint32_t ® = *(uint32_t *)data; 389 390 switch (daddr) { 391 case CR: 392 reg = regs.command; 393 //these are supposed to be cleared on a read 394 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 395 break; 396 397 case CFG: 398 reg = regs.config; 399 break; 400 401 case MEAR: 402 reg = regs.mear; 403 break; 404 405 case PTSCR: 406 reg = regs.ptscr; 407 break; 408 409 case ISR: 410 reg = regs.isr; 411 devIntrClear(ISR_ALL); 412 break; 413 414 case IMR: 415 reg = regs.imr; 416 break; 417 418 case IER: 419 reg = regs.ier; 420 break; 421 422 case IHR: 423 reg = regs.ihr; 424 break; 425 426 case TXDP: 427 reg = regs.txdp; 428 break; 429 430 case TXDP_HI: 431 reg = regs.txdp_hi; 432 break; 433 434 case TXCFG: 435 reg = regs.txcfg; 436 break; 437 438 case GPIOR: 439 reg = regs.gpior; 440 break; 441 442 case RXDP: 443 reg = regs.rxdp; 444 break; 445 446 case RXDP_HI: 447 reg = regs.rxdp_hi; 448 break; 449 450 case RXCFG: 451 reg = regs.rxcfg; 452 break; 453 454 case PQCR: 455 reg = regs.pqcr; 456 break; 457 458 case WCSR: 459 reg = regs.wcsr; 460 break; 461 462 case PCR: 463 reg = regs.pcr; 464 break; 465 466 //see the spec sheet for how RFCR and RFDR work 467 //basically, you write to RFCR to tell the machine what you want to do next 468 //then you act upon RFDR, and the device will be prepared b/c 469 //of what you wrote to RFCR 470 case RFCR: 471 reg = regs.rfcr; 472 break; 473 474 case RFDR: 475 switch (regs.rfcr & RFCR_RFADDR) { 476 case 0x000: 477 reg = rom.perfectMatch[1]; 478 reg = reg << 8; 479 reg += rom.perfectMatch[0]; 480 break; 481 case 0x002: 482 reg = rom.perfectMatch[3] << 8; 483 reg += rom.perfectMatch[2]; 484 break; 485 case 0x004: 486 reg = rom.perfectMatch[5] << 8; 487 reg += rom.perfectMatch[4]; 488 break; 489 default: 490 panic("reading from RFDR for something for other than PMATCH!\n"); 491 //didn't implement other RFDR functionality b/c driver didn't use 492 } 493 break; 494 495 case SRR: 496 reg = regs.srr; 497 break; 498 499 case MIBC: 500 reg = regs.mibc; 501 reg &= ~(MIBC_MIBS | MIBC_ACLR); 502 break; 503 504 case VRCR: 505 reg = regs.vrcr; 506 break; 507 508 case VTCR: 509 reg = regs.vtcr; 510 break; 511 512 case VDR: 513 reg = regs.vdr; 514 break; 515 516 case CCSR: 517 reg = regs.ccsr; 518 break; 519 520 case TBICR: 521 reg = regs.tbicr; 522 break; 523 524 case TBISR: 525 reg = regs.tbisr; 526 break; 527 528 case TANAR: 529 reg = regs.tanar; 530 break; 531 532 case TANLPAR: 533 reg = regs.tanlpar; 534 break; 535 536 case TANER: 537 reg = regs.taner; 538 break; 539 540 case TESR: 541 reg = regs.tesr; 542 break; 543 544 default: 545 panic("reading unimplemented register: addr = %#x", daddr); 546 } 547 548 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 549 daddr, reg, reg); 550 } 551 break; 552 553 default: 554 panic("accessing register with invalid size: addr=%#x, size=%d", 555 daddr, req->size); 556 } 557 558 return No_Fault; 559} 560 561Fault 562NSGigE::write(MemReqPtr &req, const uint8_t *data) 563{ 564 assert(ioEnable); 565 566 Addr daddr = req->paddr & 0xfff; 567 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n", 568 daddr, req->paddr, req->vaddr, req->size); 569 570 if (daddr > LAST && daddr <= RESERVED) { 571 panic("Accessing reserved register"); 572 } else if (daddr > RESERVED && daddr <= 0x3FC) { 573 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data); 574 return No_Fault; 575 } else if (daddr > 0x3FC) 576 panic("Something is messed up!\n"); 577 578 if (req->size == sizeof(uint32_t)) { 579 uint32_t reg = *(uint32_t *)data; 580 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 581 582 switch (daddr) { 583 case CR: 584 regs.command = reg; 585 if ((reg & (CR_TXE | CR_TXD)) == (CR_TXE | CR_TXD)) { 586 txHalt = true; 587 } else if (reg & CR_TXE) { 588 //the kernel is enabling the transmit machine 589 if (txState == txIdle) 590 txKick(); 591 } else if (reg & CR_TXD) { 592 txHalt = true; 593 } 594 595 if ((reg & (CR_RXE | CR_RXD)) == (CR_RXE | CR_RXD)) { 596 rxHalt = true; 597 } else if (reg & CR_RXE) { 598 if (rxState == rxIdle) { 599 rxKick(); 600 } 601 } else if (reg & CR_RXD) { 602 rxHalt = true; 603 } 604 605 if (reg & CR_TXR) 606 txReset(); 607 608 if (reg & CR_RXR) 609 rxReset(); 610 611 if (reg & CR_SWI) 612 devIntrPost(ISR_SWI); 613 614 if (reg & CR_RST) { 615 txReset(); 616 rxReset(); 617 618 regsReset(); 619 } 620 break; 621 622 case CFG: 623 if (reg & CFG_LNKSTS || reg & CFG_SPDSTS || reg & CFG_DUPSTS 624 || reg & CFG_RESERVED || reg & CFG_T64ADDR 625 || reg & CFG_PCI64_DET) 626 panic("writing to read-only or reserved CFG bits!\n"); 627 628 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | CFG_RESERVED | 629 CFG_T64ADDR | CFG_PCI64_DET); 630 631// all these #if 0's are because i don't THINK the kernel needs to have these implemented 632// if there is a problem relating to one of these, you may need to add functionality in 633#if 0 634 if (reg & CFG_TBI_EN) ; 635 if (reg & CFG_MODE_1000) ; 636#endif 637 638 if (reg & CFG_AUTO_1000) 639 panic("CFG_AUTO_1000 not implemented!\n"); 640 641#if 0 642 if (reg & CFG_PINT_DUPSTS || reg & CFG_PINT_LNKSTS || reg & CFG_PINT_SPDSTS) ; 643 if (reg & CFG_TMRTEST) ; 644 if (reg & CFG_MRM_DIS) ; 645 if (reg & CFG_MWI_DIS) ; 646 647 if (reg & CFG_T64ADDR) 648 panic("CFG_T64ADDR is read only register!\n"); 649 650 if (reg & CFG_PCI64_DET) 651 panic("CFG_PCI64_DET is read only register!\n"); 652 653 if (reg & CFG_DATA64_EN) ; 654 if (reg & CFG_M64ADDR) ; 655 if (reg & CFG_PHY_RST) ; 656 if (reg & CFG_PHY_DIS) ; 657#endif 658 659 if (reg & CFG_EXTSTS_EN) 660 extstsEnable = true; 661 else 662 extstsEnable = false; 663 664#if 0 665 if (reg & CFG_REQALG) ; 666 if (reg & CFG_SB) ; 667 if (reg & CFG_POW) ; 668 if (reg & CFG_EXD) ; 669 if (reg & CFG_PESEL) ; 670 if (reg & CFG_BROM_DIS) ; 671 if (reg & CFG_EXT_125) ; 672 if (reg & CFG_BEM) ; 673#endif 674 break; 675 676 case MEAR: 677 regs.mear = reg; 678 /* since phy is completely faked, MEAR_MD* don't matter 679 and since the driver never uses MEAR_EE*, they don't matter */ 680#if 0 681 if (reg & MEAR_EEDI) ; 682 if (reg & MEAR_EEDO) ; //this one is read only 683 if (reg & MEAR_EECLK) ; 684 if (reg & MEAR_EESEL) ; 685 if (reg & MEAR_MDIO) ; 686 if (reg & MEAR_MDDIR) ; 687 if (reg & MEAR_MDC) ; 688#endif 689 break; 690 691 case PTSCR: 692 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 693 /* these control BISTs for various parts of chip - we don't care or do 694 just fake that the BIST is done */ 695 if (reg & PTSCR_RBIST_EN) 696 regs.ptscr |= PTSCR_RBIST_DONE; 697 if (reg & PTSCR_EEBIST_EN) 698 regs.ptscr &= ~PTSCR_EEBIST_EN; 699 if (reg & PTSCR_EELOAD_EN) 700 regs.ptscr &= ~PTSCR_EELOAD_EN; 701 break; 702 703 case ISR: /* writing to the ISR has no effect */ 704 panic("ISR is a read only register!\n"); 705 706 case IMR: 707 regs.imr = reg; 708 devIntrChangeMask(); 709 break; 710 711 case IER: 712 regs.ier = reg; 713 break; 714 715 case IHR: 716 regs.ihr = reg; 717 /* not going to implement real interrupt holdoff */ 718 break; 719 720 case TXDP: 721 regs.txdp = (reg & 0xFFFFFFFC); 722 assert(txState == txIdle); 723 CTDD = false; 724 break; 725 726 case TXDP_HI: 727 regs.txdp_hi = reg; 728 break; 729 730 case TXCFG: 731 regs.txcfg = reg; 732#if 0 733 if (reg & TXCFG_CSI) ; 734 if (reg & TXCFG_HBI) ; 735 if (reg & TXCFG_MLB) ; 736 if (reg & TXCFG_ATP) ; 737 if (reg & TXCFG_ECRETRY) ; /* this could easily be implemented, but 738 considering the network is just a fake 739 pipe, wouldn't make sense to do this */ 740 741 if (reg & TXCFG_BRST_DIS) ; 742#endif 743 744 745 /* we handle our own DMA, ignore the kernel's exhortations */ 746 //if (reg & TXCFG_MXDMA) ; 747 748 //also, we currently don't care about fill/drain thresholds 749 //though this may change in the future with more realistic 750 //networks or a driver which changes it according to feedback 751 752 break; 753 754 case GPIOR: 755 regs.gpior = reg; 756 /* these just control general purpose i/o pins, don't matter */ 757 break; 758 759 case RXDP: 760 regs.rxdp = reg; 761 break; 762 763 case RXDP_HI: 764 regs.rxdp_hi = reg; 765 break; 766 767 case RXCFG: 768 regs.rxcfg = reg; 769#if 0 770 if (reg & RXCFG_AEP) ; 771 if (reg & RXCFG_ARP) ; 772 if (reg & RXCFG_STRIPCRC) ; 773 if (reg & RXCFG_RX_RD) ; 774 if (reg & RXCFG_ALP) ; 775 if (reg & RXCFG_AIRL) ; 776#endif 777 778 /* we handle our own DMA, ignore what kernel says about it */ 779 //if (reg & RXCFG_MXDMA) ; 780 781#if 0 782 //also, we currently don't care about fill/drain thresholds 783 //though this may change in the future with more realistic 784 //networks or a driver which changes it according to feedback 785 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ; 786#endif 787 break; 788 789 case PQCR: 790 /* there is no priority queueing used in the linux 2.6 driver */ 791 regs.pqcr = reg; 792 break; 793 794 case WCSR: 795 /* not going to implement wake on LAN */ 796 regs.wcsr = reg; 797 break; 798 799 case PCR: 800 /* not going to implement pause control */ 801 regs.pcr = reg; 802 break; 803 804 case RFCR: 805 regs.rfcr = reg; 806 807 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 808 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 809 acceptMulticast = (reg & RFCR_AAM) ? true : false; 810 acceptUnicast = (reg & RFCR_AAU) ? true : false; 811 acceptPerfect = (reg & RFCR_APM) ? true : false; 812 acceptArp = (reg & RFCR_AARP) ? true : false; 813 814 if (reg & RFCR_APAT) ; 815// panic("RFCR_APAT not implemented!\n"); 816 817 if (reg & RFCR_MHEN || reg & RFCR_UHEN) 818 panic("hash filtering not implemented!\n"); 819 820 if (reg & RFCR_ULM) 821 panic("RFCR_ULM not implemented!\n"); 822 823 break; 824 825 case RFDR: 826 panic("the driver never writes to RFDR, something is wrong!\n"); 827 828 case BRAR: 829 panic("the driver never uses BRAR, something is wrong!\n"); 830 831 case BRDR: 832 panic("the driver never uses BRDR, something is wrong!\n"); 833 834 case SRR: 835 panic("SRR is read only register!\n"); 836 837 case MIBC: 838 panic("the driver never uses MIBC, something is wrong!\n"); 839 840 case VRCR: 841 regs.vrcr = reg; 842 break; 843 844 case VTCR: 845 regs.vtcr = reg; 846 break; 847 848 case VDR: 849 panic("the driver never uses VDR, something is wrong!\n"); 850 break; 851 852 case CCSR: 853 /* not going to implement clockrun stuff */ 854 regs.ccsr = reg; 855 break; 856 857 case TBICR: 858 regs.tbicr = reg; 859 if (reg & TBICR_MR_LOOPBACK) 860 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 861 862 if (reg & TBICR_MR_AN_ENABLE) { 863 regs.tanlpar = regs.tanar; 864 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 865 } 866 867#if 0 868 if (reg & TBICR_MR_RESTART_AN) ; 869#endif 870 871 break; 872 873 case TBISR: 874 panic("TBISR is read only register!\n"); 875 876 case TANAR: 877 regs.tanar = reg; 878 if (reg & TANAR_PS2) 879 panic("this isn't used in driver, something wrong!\n"); 880 881 if (reg & TANAR_PS1) 882 panic("this isn't used in driver, something wrong!\n"); 883 break; 884 885 case TANLPAR: 886 panic("this should only be written to by the fake phy!\n"); 887 888 case TANER: 889 panic("TANER is read only register!\n"); 890 891 case TESR: 892 regs.tesr = reg; 893 break; 894 895 default: 896 panic("thought i covered all the register, what is this? addr=%#x", 897 daddr); 898 } 899 } else 900 panic("Invalid Request Size"); 901 902 return No_Fault; 903} 904 905void 906NSGigE::devIntrPost(uint32_t interrupts) 907{ 908 bool delay = false; 909 910 if (interrupts & ISR_RESERVE) 911 panic("Cannot set a reserved interrupt"); 912 913 if (interrupts & ISR_TXRCMP) 914 regs.isr |= ISR_TXRCMP; 915 916 if (interrupts & ISR_RXRCMP) 917 regs.isr |= ISR_RXRCMP; 918 919//ISR_DPERR not implemented 920//ISR_SSERR not implemented 921//ISR_RMABT not implemented 922//ISR_RXSOVR not implemented 923//ISR_HIBINT not implemented 924//ISR_PHY not implemented 925//ISR_PME not implemented 926 927 if (interrupts & ISR_SWI) 928 regs.isr |= ISR_SWI; 929 930//ISR_MIB not implemented 931//ISR_TXURN not implemented 932 933 if (interrupts & ISR_TXIDLE) 934 regs.isr |= ISR_TXIDLE; 935 936 if (interrupts & ISR_TXERR) 937 regs.isr |= ISR_TXERR; 938 939 if (interrupts & ISR_TXDESC) 940 regs.isr |= ISR_TXDESC; 941 942 if (interrupts & ISR_TXOK) { 943 regs.isr |= ISR_TXOK; 944 delay = true; 945 } 946 947 if (interrupts & ISR_RXORN) 948 regs.isr |= ISR_RXORN; 949 950 if (interrupts & ISR_RXIDLE) 951 regs.isr |= ISR_RXIDLE; 952 953//ISR_RXEARLY not implemented 954 955 if (interrupts & ISR_RXERR) 956 regs.isr |= ISR_RXERR; 957 958 if (interrupts & ISR_RXDESC) 959 regs.isr |= ISR_RXDESC; 960 961 if (interrupts & ISR_RXOK) { 962 delay = true; 963 regs.isr |= ISR_RXOK; 964 } 965 966 if ((regs.isr & regs.imr)) { 967 Tick when = curTick; 968 if (delay) 969 when += intrDelay; 970 cpuIntrPost(when); 971 } 972 973 DPRINTF(EthernetIntr, "**interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 974 interrupts, regs.isr, regs.imr); 975} 976 977void 978NSGigE::devIntrClear(uint32_t interrupts) 979{ 980 if (interrupts & ISR_RESERVE) 981 panic("Cannot clear a reserved interrupt"); 982 983 if (interrupts & ISR_TXRCMP) 984 regs.isr &= ~ISR_TXRCMP; 985 986 if (interrupts & ISR_RXRCMP) 987 regs.isr &= ~ISR_RXRCMP; 988 989//ISR_DPERR not implemented 990//ISR_SSERR not implemented 991//ISR_RMABT not implemented 992//ISR_RXSOVR not implemented 993//ISR_HIBINT not implemented 994//ISR_PHY not implemented 995//ISR_PME not implemented 996 997 if (interrupts & ISR_SWI) 998 regs.isr &= ~ISR_SWI; 999 1000//ISR_MIB not implemented 1001//ISR_TXURN not implemented 1002 1003 if (interrupts & ISR_TXIDLE) 1004 regs.isr &= ~ISR_TXIDLE; 1005 1006 if (interrupts & ISR_TXERR) 1007 regs.isr &= ~ISR_TXERR; 1008 1009 if (interrupts & ISR_TXDESC) 1010 regs.isr &= ~ISR_TXDESC; 1011 1012 if (interrupts & ISR_TXOK) 1013 regs.isr &= ~ISR_TXOK; 1014 1015 if (interrupts & ISR_RXORN) 1016 regs.isr &= ~ISR_RXORN; 1017 1018 if (interrupts & ISR_RXIDLE) 1019 regs.isr &= ~ISR_RXIDLE; 1020 1021//ISR_RXEARLY not implemented 1022 1023 if (interrupts & ISR_RXERR) 1024 regs.isr &= ~ISR_RXERR; 1025 1026 if (interrupts & ISR_RXDESC) 1027 regs.isr &= ~ISR_RXDESC; 1028 1029 if (interrupts & ISR_RXOK) 1030 regs.isr &= ~ISR_RXOK; 1031 1032 if (!(regs.isr & regs.imr)) 1033 cpuIntrClear(); 1034 1035 DPRINTF(EthernetIntr, "**interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 1036 interrupts, regs.isr, regs.imr); 1037} 1038 1039void 1040NSGigE::devIntrChangeMask() 1041{ 1042 DPRINTF(EthernetIntr, "interrupt mask changed\n"); 1043 1044 if (regs.isr & regs.imr) 1045 cpuIntrPost(curTick); 1046 else 1047 cpuIntrClear(); 1048} 1049 1050void 1051NSGigE::cpuIntrPost(Tick when) 1052{ 1053 //If the interrupt you want to post is later than an 1054 //interrupt already scheduled, just let it post in the coming one and 1055 //don't schedule another. 1056 //HOWEVER, must be sure that the scheduled intrTick is in the future 1057 //(this was formerly the source of a bug) 1058 assert((intrTick >= curTick) || (intrTick == 0)); 1059 if (when > intrTick && intrTick != 0) 1060 return; 1061 1062 intrTick = when; 1063 1064 if (intrEvent) { 1065 intrEvent->squash(); 1066 intrEvent = 0; 1067 } 1068 1069 if (when < curTick) { 1070 cpuInterrupt(); 1071 } else { 1072 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 1073 intrTick); 1074 intrEvent = new IntrEvent(this, true); 1075 intrEvent->schedule(intrTick); 1076 } 1077} 1078 1079void 1080NSGigE::cpuInterrupt() 1081{ 1082 // Don't send an interrupt if there's already one 1083 if (cpuPendingIntr) { 1084 DPRINTF(EthernetIntr, 1085 "would send an interrupt now, but there's already pending\n"); 1086 intrTick = 0; 1087 return; 1088 } 1089 // Don't send an interrupt if it's supposed to be delayed 1090 if (intrTick > curTick) { 1091 DPRINTF(EthernetIntr, "an interrupt is scheduled for %d, wait til then\n", 1092 intrTick); 1093 return; 1094 } 1095 1096 // Whether or not there's a pending interrupt, we don't care about 1097 // it anymore 1098 intrEvent = 0; 1099 intrTick = 0; 1100 1101 // Send interrupt 1102 cpuPendingIntr = true; 1103 /** @todo rework the intctrl to be tsunami ok */ 1104 //intctrl->post(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET); 1105 DPRINTF(EthernetIntr, "Posting interrupts to cchip!\n"); 1106 tsunami->cchip->postDRIR(configData->config.hdr.pci0.interruptLine); 1107} 1108 1109void 1110NSGigE::cpuIntrClear() 1111{ 1112 if (cpuPendingIntr) { 1113 cpuPendingIntr = false; 1114 /** @todo rework the intctrl to be tsunami ok */ 1115 //intctrl->clear(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET); 1116 DPRINTF(EthernetIntr, "clearing all interrupts from cchip\n"); 1117 tsunami->cchip->clearDRIR(configData->config.hdr.pci0.interruptLine); 1118 } 1119} 1120 1121bool 1122NSGigE::cpuIntrPending() const 1123{ return cpuPendingIntr; } 1124 1125void 1126NSGigE::txReset() 1127{ 1128 1129 DPRINTF(Ethernet, "transmit reset\n"); 1130 1131 CTDD = false; 1132 txFifoAvail = maxTxFifoSize; 1133 txHalt = false; 1134 txFragPtr = 0; 1135 assert(txDescCnt == 0); 1136 txFifo.clear(); 1137 regs.command &= ~CR_TXE; 1138 txState = txIdle; 1139 assert(txDmaState == dmaIdle); 1140} 1141 1142void 1143NSGigE::rxReset() 1144{ 1145 DPRINTF(Ethernet, "receive reset\n"); 1146 1147 CRDD = false; 1148 assert(rxPktBytes == 0); 1149 rxFifoCnt = 0; 1150 rxHalt = false; 1151 rxFragPtr = 0; 1152 assert(rxDescCnt == 0); 1153 assert(rxDmaState == dmaIdle); 1154 rxFifo.clear(); 1155 regs.command &= ~CR_RXE; 1156 rxState = rxIdle; 1157} 1158 1159void NSGigE::regsReset() 1160{ 1161 memset(®s, 0, sizeof(regs)); 1162 regs.config = 0x80000000; 1163 regs.mear = 0x12; 1164 regs.isr = 0x00608000; 1165 regs.txcfg = 0x120; 1166 regs.rxcfg = 0x4; 1167 regs.srr = 0x0103; 1168 regs.mibc = 0x2; 1169 regs.vdr = 0x81; 1170 regs.tesr = 0xc000; 1171 1172 extstsEnable = false; 1173 acceptBroadcast = false; 1174 acceptMulticast = false; 1175 acceptUnicast = false; 1176 acceptPerfect = false; 1177 acceptArp = false; 1178} 1179 1180void 1181NSGigE::rxDmaReadCopy() 1182{ 1183 assert(rxDmaState == dmaReading); 1184 1185 memcpy(rxDmaData, physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaLen); 1186 rxDmaState = dmaIdle; 1187 1188 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1189 rxDmaAddr, rxDmaLen); 1190 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1191} 1192 1193bool 1194NSGigE::doRxDmaRead() 1195{ 1196 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1197 rxDmaState = dmaReading; 1198 1199 if (dmaInterface && !rxDmaFree) { 1200 if (dmaInterface->busy()) 1201 rxDmaState = dmaReadWaiting; 1202 else 1203 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick, 1204 &rxDmaReadEvent, true); 1205 return true; 1206 } 1207 1208 if (dmaReadDelay == 0 && dmaReadFactor == 0) { 1209 rxDmaReadCopy(); 1210 return false; 1211 } 1212 1213 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1214 Tick start = curTick + dmaReadDelay + factor; 1215 rxDmaReadEvent.schedule(start); 1216 return true; 1217} 1218 1219void 1220NSGigE::rxDmaReadDone() 1221{ 1222 assert(rxDmaState == dmaReading); 1223 rxDmaReadCopy(); 1224 1225 // If the transmit state machine has a pending DMA, let it go first 1226 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1227 txKick(); 1228 1229 rxKick(); 1230} 1231 1232void 1233NSGigE::rxDmaWriteCopy() 1234{ 1235 assert(rxDmaState == dmaWriting); 1236 1237 memcpy(physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaData, rxDmaLen); 1238 rxDmaState = dmaIdle; 1239 1240 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1241 rxDmaAddr, rxDmaLen); 1242 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1243} 1244 1245bool 1246NSGigE::doRxDmaWrite() 1247{ 1248 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1249 rxDmaState = dmaWriting; 1250 1251 if (dmaInterface && !rxDmaFree) { 1252 if (dmaInterface->busy()) 1253 rxDmaState = dmaWriteWaiting; 1254 else 1255 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick, 1256 &rxDmaWriteEvent, true); 1257 return true; 1258 } 1259 1260 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) { 1261 rxDmaWriteCopy(); 1262 return false; 1263 } 1264 1265 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1266 Tick start = curTick + dmaWriteDelay + factor; 1267 rxDmaWriteEvent.schedule(start); 1268 return true; 1269} 1270 1271void 1272NSGigE::rxDmaWriteDone() 1273{ 1274 assert(rxDmaState == dmaWriting); 1275 rxDmaWriteCopy(); 1276 1277 // If the transmit state machine has a pending DMA, let it go first 1278 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1279 txKick(); 1280 1281 rxKick(); 1282} 1283 1284void 1285NSGigE::rxKick() 1286{ 1287 DPRINTF(EthernetSM, "receive kick state=%s (rxBuf.size=%d)\n", 1288 NsRxStateStrings[rxState], rxFifo.size()); 1289 1290 if (rxKickTick > curTick) { 1291 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1292 rxKickTick); 1293 return; 1294 } 1295 1296 next: 1297 switch(rxDmaState) { 1298 case dmaReadWaiting: 1299 if (doRxDmaRead()) 1300 goto exit; 1301 break; 1302 case dmaWriteWaiting: 1303 if (doRxDmaWrite()) 1304 goto exit; 1305 break; 1306 default: 1307 break; 1308 } 1309 1310 // see state machine from spec for details 1311 // the way this works is, if you finish work on one state and can go directly to 1312 // another, you do that through jumping to the label "next". however, if you have 1313 // intermediate work, like DMA so that you can't go to the next state yet, you go to 1314 // exit and exit the loop. however, when the DMA is done it will trigger an 1315 // event and come back to this loop. 1316 switch (rxState) { 1317 case rxIdle: 1318 if (!regs.command & CR_RXE) { 1319 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1320 goto exit; 1321 } 1322 1323 if (CRDD) { 1324 rxState = rxDescRefr; 1325 1326 rxDmaAddr = regs.rxdp & 0x3fffffff; 1327 rxDmaData = &rxDescCache + offsetof(ns_desc, link); 1328 rxDmaLen = sizeof(rxDescCache.link); 1329 rxDmaFree = dmaDescFree; 1330 1331 descDmaReads++; 1332 descDmaRdBytes += rxDmaLen; 1333 1334 if (doRxDmaRead()) 1335 goto exit; 1336 } else { 1337 rxState = rxDescRead; 1338 1339 rxDmaAddr = regs.rxdp & 0x3fffffff; 1340 rxDmaData = &rxDescCache; 1341 rxDmaLen = sizeof(ns_desc); 1342 rxDmaFree = dmaDescFree; 1343 1344 descDmaReads++; 1345 descDmaRdBytes += rxDmaLen; 1346 1347 if (doRxDmaRead()) 1348 goto exit; 1349 } 1350 break; 1351 1352 case rxDescRefr: 1353 if (rxDmaState != dmaIdle) 1354 goto exit; 1355 1356 rxState = rxAdvance; 1357 break; 1358 1359 case rxDescRead: 1360 if (rxDmaState != dmaIdle) 1361 goto exit; 1362 1363 DPRINTF(EthernetDesc, 1364 "rxDescCache:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n" 1365 ,rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1366 rxDescCache.extsts); 1367 1368 if (rxDescCache.cmdsts & CMDSTS_OWN) { 1369 rxState = rxIdle; 1370 } else { 1371 rxState = rxFifoBlock; 1372 rxFragPtr = rxDescCache.bufptr; 1373 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK; 1374 } 1375 break; 1376 1377 case rxFifoBlock: 1378 if (!rxPacket) { 1379 /** 1380 * @todo in reality, we should be able to start processing 1381 * the packet as it arrives, and not have to wait for the 1382 * full packet ot be in the receive fifo. 1383 */ 1384 if (rxFifo.empty()) 1385 goto exit; 1386 1387 DPRINTF(EthernetSM, "\n\n*****processing receive of new packet\n"); 1388 1389 // If we don't have a packet, grab a new one from the fifo. 1390 rxPacket = rxFifo.front(); 1391 rxPktBytes = rxPacket->length; 1392 rxPacketBufPtr = rxPacket->data; 1393 1394 if (DTRACE(Ethernet)) { 1395 if (rxPacket->isIpPkt()) { 1396 ip_header *ip = rxPacket->getIpHdr(); 1397 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID)); 1398 if (rxPacket->isTcpPkt()) { 1399 tcp_header *tcp = rxPacket->getTcpHdr(ip); 1400 DPRINTF(Ethernet, "Src Port = %d, Dest Port = %d\n", 1401 reverseEnd16(tcp->src_port_num), 1402 reverseEnd16(tcp->dest_port_num)); 1403 } 1404 } 1405 } 1406 1407 // sanity check - i think the driver behaves like this 1408 assert(rxDescCnt >= rxPktBytes); 1409 1410 // Must clear the value before popping to decrement the 1411 // reference count 1412 rxFifo.front() = NULL; 1413 rxFifo.pop_front(); 1414 rxFifoCnt -= rxPacket->length; 1415 } 1416 1417 1418 // dont' need the && rxDescCnt > 0 if driver sanity check above holds 1419 if (rxPktBytes > 0) { 1420 rxState = rxFragWrite; 1421 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity check holds 1422 rxXferLen = rxPktBytes; 1423 1424 rxDmaAddr = rxFragPtr & 0x3fffffff; 1425 rxDmaData = rxPacketBufPtr; 1426 rxDmaLen = rxXferLen; 1427 rxDmaFree = dmaDataFree; 1428 1429 if (doRxDmaWrite()) 1430 goto exit; 1431 1432 } else { 1433 rxState = rxDescWrite; 1434 1435 //if (rxPktBytes == 0) { /* packet is done */ 1436 assert(rxPktBytes == 0); 1437 DPRINTF(EthernetSM, "done with receiving packet\n"); 1438 1439 rxDescCache.cmdsts |= CMDSTS_OWN; 1440 rxDescCache.cmdsts &= ~CMDSTS_MORE; 1441 rxDescCache.cmdsts |= CMDSTS_OK; 1442 rxDescCache.cmdsts &= 0xffff0000; 1443 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1444 1445#if 0 1446 /* all the driver uses these are for its own stats keeping 1447 which we don't care about, aren't necessary for functionality 1448 and doing this would just slow us down. if they end up using 1449 this in a later version for functional purposes, just undef 1450 */ 1451 if (rxFilterEnable) { 1452 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK; 1453 if (rxFifo.front()->IsUnicast()) 1454 rxDescCache.cmdsts |= CMDSTS_DEST_SELF; 1455 if (rxFifo.front()->IsMulticast()) 1456 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI; 1457 if (rxFifo.front()->IsBroadcast()) 1458 rxDescCache.cmdsts |= CMDSTS_DEST_MASK; 1459 } 1460#endif 1461 1462 if (rxPacket->isIpPkt() && extstsEnable) { 1463 rxDescCache.extsts |= EXTSTS_IPPKT; 1464 rxIPChecksums++; 1465 if (!ipChecksum(rxPacket, false)) { 1466 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1467 rxDescCache.extsts |= EXTSTS_IPERR; 1468 } 1469 if (rxPacket->isTcpPkt()) { 1470 rxDescCache.extsts |= EXTSTS_TCPPKT; 1471 rxTCPChecksums++; 1472 if (!tcpChecksum(rxPacket, false)) { 1473 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1474 rxDescCache.extsts |= EXTSTS_TCPERR; 1475 1476 } 1477 } else if (rxPacket->isUdpPkt()) { 1478 rxDescCache.extsts |= EXTSTS_UDPPKT; 1479 if (!udpChecksum(rxPacket, false)) { 1480 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1481 rxDescCache.extsts |= EXTSTS_UDPERR; 1482 } 1483 } 1484 } 1485 rxPacket = 0; 1486 1487 /* the driver seems to always receive into desc buffers 1488 of size 1514, so you never have a pkt that is split 1489 into multiple descriptors on the receive side, so 1490 i don't implement that case, hence the assert above. 1491 */ 1492 1493 DPRINTF(EthernetDesc, "rxDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n", 1494 rxDescCache.cmdsts, rxDescCache.extsts); 1495 1496 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1497 rxDmaData = &(rxDescCache.cmdsts); 1498 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts); 1499 rxDmaFree = dmaDescFree; 1500 1501 descDmaWrites++; 1502 descDmaWrBytes += rxDmaLen; 1503 1504 if (doRxDmaWrite()) 1505 goto exit; 1506 } 1507 break; 1508 1509 case rxFragWrite: 1510 if (rxDmaState != dmaIdle) 1511 goto exit; 1512 1513 rxPacketBufPtr += rxXferLen; 1514 rxFragPtr += rxXferLen; 1515 rxPktBytes -= rxXferLen; 1516 1517 rxState = rxFifoBlock; 1518 break; 1519 1520 case rxDescWrite: 1521 if (rxDmaState != dmaIdle) 1522 goto exit; 1523 1524 assert(rxDescCache.cmdsts & CMDSTS_OWN); 1525 1526 assert(rxPacket == 0); 1527 devIntrPost(ISR_RXOK); 1528 1529 if (rxDescCache.cmdsts & CMDSTS_INTR) 1530 devIntrPost(ISR_RXDESC); 1531 1532 if (rxHalt) { 1533 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1534 rxState = rxIdle; 1535 rxHalt = false; 1536 } else 1537 rxState = rxAdvance; 1538 break; 1539 1540 case rxAdvance: 1541 if (rxDescCache.link == 0) { 1542 rxState = rxIdle; 1543 return; 1544 } else { 1545 rxState = rxDescRead; 1546 regs.rxdp = rxDescCache.link; 1547 CRDD = false; 1548 1549 rxDmaAddr = regs.rxdp & 0x3fffffff; 1550 rxDmaData = &rxDescCache; 1551 rxDmaLen = sizeof(ns_desc); 1552 rxDmaFree = dmaDescFree; 1553 1554 if (doRxDmaRead()) 1555 goto exit; 1556 } 1557 break; 1558 1559 default: 1560 panic("Invalid rxState!"); 1561 } 1562 1563 1564 DPRINTF(EthernetSM, "entering next rx state = %s\n", 1565 NsRxStateStrings[rxState]); 1566 1567 if (rxState == rxIdle) { 1568 regs.command &= ~CR_RXE; 1569 devIntrPost(ISR_RXIDLE); 1570 return; 1571 } 1572 1573 goto next; 1574 1575 exit: 1576 /** 1577 * @todo do we want to schedule a future kick? 1578 */ 1579 DPRINTF(EthernetSM, "rx state machine exited state=%s\n", 1580 NsRxStateStrings[rxState]); 1581} 1582 1583void 1584NSGigE::transmit() 1585{ 1586 if (txFifo.empty()) { 1587 DPRINTF(Ethernet, "nothing to transmit\n"); 1588 return; 1589 } 1590 1591 DPRINTF(Ethernet, "\n\nAttempt Pkt Transmit: txFifo length = %d\n", 1592 maxTxFifoSize - txFifoAvail); 1593 if (interface->sendPacket(txFifo.front())) { 1594 if (DTRACE(Ethernet)) { 1595 if (txFifo.front()->isIpPkt()) { 1596 ip_header *ip = txFifo.front()->getIpHdr(); 1597 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID)); 1598 if (txFifo.front()->isTcpPkt()) { 1599 tcp_header *tcp = txFifo.front()->getTcpHdr(ip); 1600 DPRINTF(Ethernet, "Src Port = %d, Dest Port = %d\n", 1601 reverseEnd16(tcp->src_port_num), 1602 reverseEnd16(tcp->dest_port_num)); 1603 } 1604 } 1605 } 1606 1607 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length); 1608 txBytes += txFifo.front()->length; 1609 txPackets++; 1610 1611 txFifoAvail += txFifo.front()->length; 1612 1613 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", txFifoAvail); 1614 txFifo.front() = NULL; 1615 txFifo.pop_front(); 1616 1617 /* normally do a writeback of the descriptor here, and ONLY after that is 1618 done, send this interrupt. but since our stuff never actually fails, 1619 just do this interrupt here, otherwise the code has to stray from this 1620 nice format. besides, it's functionally the same. 1621 */ 1622 devIntrPost(ISR_TXOK); 1623 } else 1624 DPRINTF(Ethernet, "May need to rethink always sending the descriptors back?\n"); 1625 1626 if (!txFifo.empty() && !txEvent.scheduled()) { 1627 DPRINTF(Ethernet, "reschedule transmit\n"); 1628 txEvent.schedule(curTick + 1000); 1629 } 1630} 1631 1632void 1633NSGigE::txDmaReadCopy() 1634{ 1635 assert(txDmaState == dmaReading); 1636 1637 memcpy(txDmaData, physmem->dma_addr(txDmaAddr, txDmaLen), txDmaLen); 1638 txDmaState = dmaIdle; 1639 1640 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1641 txDmaAddr, txDmaLen); 1642 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1643} 1644 1645bool 1646NSGigE::doTxDmaRead() 1647{ 1648 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1649 txDmaState = dmaReading; 1650 1651 if (dmaInterface && !txDmaFree) { 1652 if (dmaInterface->busy()) 1653 txDmaState = dmaReadWaiting; 1654 else 1655 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick, 1656 &txDmaReadEvent, true); 1657 return true; 1658 } 1659 1660 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) { 1661 txDmaReadCopy(); 1662 return false; 1663 } 1664 1665 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1666 Tick start = curTick + dmaReadDelay + factor; 1667 txDmaReadEvent.schedule(start); 1668 return true; 1669} 1670 1671void 1672NSGigE::txDmaReadDone() 1673{ 1674 assert(txDmaState == dmaReading); 1675 txDmaReadCopy(); 1676 1677 // If the receive state machine has a pending DMA, let it go first 1678 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1679 rxKick(); 1680 1681 txKick(); 1682} 1683 1684void 1685NSGigE::txDmaWriteCopy() 1686{ 1687 assert(txDmaState == dmaWriting); 1688 1689 memcpy(physmem->dma_addr(txDmaAddr, txDmaLen), txDmaData, txDmaLen); 1690 txDmaState = dmaIdle; 1691 1692 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1693 txDmaAddr, txDmaLen); 1694 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1695} 1696 1697bool 1698NSGigE::doTxDmaWrite() 1699{ 1700 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1701 txDmaState = dmaWriting; 1702 1703 if (dmaInterface && !txDmaFree) { 1704 if (dmaInterface->busy()) 1705 txDmaState = dmaWriteWaiting; 1706 else 1707 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick, 1708 &txDmaWriteEvent, true); 1709 return true; 1710 } 1711 1712 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) { 1713 txDmaWriteCopy(); 1714 return false; 1715 } 1716 1717 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1718 Tick start = curTick + dmaWriteDelay + factor; 1719 txDmaWriteEvent.schedule(start); 1720 return true; 1721} 1722 1723void 1724NSGigE::txDmaWriteDone() 1725{ 1726 assert(txDmaState == dmaWriting); 1727 txDmaWriteCopy(); 1728 1729 // If the receive state machine has a pending DMA, let it go first 1730 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1731 rxKick(); 1732 1733 txKick(); 1734} 1735 1736void 1737NSGigE::txKick() 1738{ 1739 DPRINTF(EthernetSM, "transmit kick state=%s\n", NsTxStateStrings[txState]); 1740 1741 if (txKickTick > curTick) { 1742 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1743 txKickTick); 1744 1745 return; 1746 } 1747 1748 next: 1749 switch(txDmaState) { 1750 case dmaReadWaiting: 1751 if (doTxDmaRead()) 1752 goto exit; 1753 break; 1754 case dmaWriteWaiting: 1755 if (doTxDmaWrite()) 1756 goto exit; 1757 break; 1758 default: 1759 break; 1760 } 1761 1762 switch (txState) { 1763 case txIdle: 1764 if (!regs.command & CR_TXE) { 1765 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1766 goto exit; 1767 } 1768 1769 if (CTDD) { 1770 txState = txDescRefr; 1771 1772 txDmaAddr = regs.txdp & 0x3fffffff; 1773 txDmaData = &txDescCache + offsetof(ns_desc, link); 1774 txDmaLen = sizeof(txDescCache.link); 1775 txDmaFree = dmaDescFree; 1776 1777 descDmaReads++; 1778 descDmaRdBytes += txDmaLen; 1779 1780 if (doTxDmaRead()) 1781 goto exit; 1782 1783 } else { 1784 txState = txDescRead; 1785 1786 txDmaAddr = regs.txdp & 0x3fffffff; 1787 txDmaData = &txDescCache; 1788 txDmaLen = sizeof(ns_desc); 1789 txDmaFree = dmaDescFree; 1790 1791 descDmaReads++; 1792 descDmaRdBytes += txDmaLen; 1793 1794 if (doTxDmaRead()) 1795 goto exit; 1796 } 1797 break; 1798 1799 case txDescRefr: 1800 if (txDmaState != dmaIdle) 1801 goto exit; 1802 1803 txState = txAdvance; 1804 break; 1805 1806 case txDescRead: 1807 if (txDmaState != dmaIdle) 1808 goto exit; 1809 1810 DPRINTF(EthernetDesc, 1811 "txDescCache data:\n\tlink=%08x\n\tbufptr=%08x\n\tcmdsts=%08x\n\textsts=%08x\n" 1812 ,txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts, 1813 txDescCache.extsts); 1814 1815 if (txDescCache.cmdsts & CMDSTS_OWN) { 1816 txState = txFifoBlock; 1817 txFragPtr = txDescCache.bufptr; 1818 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK; 1819 } else { 1820 txState = txIdle; 1821 } 1822 break; 1823 1824 case txFifoBlock: 1825 if (!txPacket) { 1826 DPRINTF(EthernetSM, "\n\n*****starting the tx of a new packet\n"); 1827 txPacket = new EtherPacket; 1828 txPacket->data = new uint8_t[16384]; 1829 txPacketBufPtr = txPacket->data; 1830 } 1831 1832 if (txDescCnt == 0) { 1833 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 1834 if (txDescCache.cmdsts & CMDSTS_MORE) { 1835 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 1836 txState = txDescWrite; 1837 1838 txDescCache.cmdsts &= ~CMDSTS_OWN; 1839 1840 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1841 txDmaData = &(txDescCache.cmdsts); 1842 txDmaLen = sizeof(txDescCache.cmdsts); 1843 txDmaFree = dmaDescFree; 1844 1845 if (doTxDmaWrite()) 1846 goto exit; 1847 1848 } else { /* this packet is totally done */ 1849 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 1850 /* deal with the the packet that just finished */ 1851 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 1852 if (txDescCache.extsts & EXTSTS_UDPPKT) { 1853 udpChecksum(txPacket, true); 1854 } else if (txDescCache.extsts & EXTSTS_TCPPKT) { 1855 tcpChecksum(txPacket, true); 1856 txTCPChecksums++; 1857 } 1858 if (txDescCache.extsts & EXTSTS_IPPKT) { 1859 ipChecksum(txPacket, true); 1860 txIPChecksums++; 1861 } 1862 } 1863 1864 txPacket->length = txPacketBufPtr - txPacket->data; 1865 /* this is just because the receive can't handle a packet bigger 1866 want to make sure */ 1867 assert(txPacket->length <= 1514); 1868 txFifo.push_back(txPacket); 1869 1870 /* this following section is not to spec, but functionally shouldn't 1871 be any different. normally, the chip will wait til the transmit has 1872 occurred before writing back the descriptor because it has to wait 1873 to see that it was successfully transmitted to decide whether to set 1874 CMDSTS_OK or not. however, in the simulator since it is always 1875 successfully transmitted, and writing it exactly to spec would 1876 complicate the code, we just do it here 1877 */ 1878 1879 txDescCache.cmdsts &= ~CMDSTS_OWN; 1880 txDescCache.cmdsts |= CMDSTS_OK; 1881 1882 DPRINTF(EthernetDesc, 1883 "txDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n", 1884 txDescCache.cmdsts, txDescCache.extsts); 1885 1886 txDmaAddr = (regs.txdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1887 txDmaData = &(txDescCache.cmdsts); 1888 txDmaLen = sizeof(txDescCache.cmdsts) + sizeof(txDescCache.extsts); 1889 txDmaFree = dmaDescFree; 1890 1891 descDmaWrites++; 1892 descDmaWrBytes += txDmaLen; 1893 1894 transmit(); 1895 txPacket = 0; 1896 1897 if (txHalt) { 1898 DPRINTF(EthernetSM, "halting TX state machine\n"); 1899 txState = txIdle; 1900 txHalt = false; 1901 } else 1902 txState = txAdvance; 1903 1904 if (doTxDmaWrite()) 1905 goto exit; 1906 } 1907 } else { 1908 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 1909 if (txFifoAvail) { 1910 txState = txFragRead; 1911 1912 /* The number of bytes transferred is either whatever is left 1913 in the descriptor (txDescCnt), or if there is not enough 1914 room in the fifo, just whatever room is left in the fifo 1915 */ 1916 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail); 1917 1918 txDmaAddr = txFragPtr & 0x3fffffff; 1919 txDmaData = txPacketBufPtr; 1920 txDmaLen = txXferLen; 1921 txDmaFree = dmaDataFree; 1922 1923 if (doTxDmaRead()) 1924 goto exit; 1925 } else { 1926 txState = txFifoBlock; 1927 transmit(); 1928 1929 break; 1930 } 1931 1932 } 1933 break; 1934 1935 case txFragRead: 1936 if (txDmaState != dmaIdle) 1937 goto exit; 1938 1939 txPacketBufPtr += txXferLen; 1940 txFragPtr += txXferLen; 1941 txDescCnt -= txXferLen; 1942 txFifoAvail -= txXferLen; 1943 1944 txState = txFifoBlock; 1945 break; 1946 1947 case txDescWrite: 1948 if (txDmaState != dmaIdle) 1949 goto exit; 1950 1951 if (txDescCache.cmdsts & CMDSTS_INTR) { 1952 devIntrPost(ISR_TXDESC); 1953 } 1954 1955 txState = txAdvance; 1956 break; 1957 1958 case txAdvance: 1959 if (txDescCache.link == 0) { 1960 txState = txIdle; 1961 } else { 1962 txState = txDescRead; 1963 regs.txdp = txDescCache.link; 1964 CTDD = false; 1965 1966 txDmaAddr = txDescCache.link & 0x3fffffff; 1967 txDmaData = &txDescCache; 1968 txDmaLen = sizeof(ns_desc); 1969 txDmaFree = dmaDescFree; 1970 1971 if (doTxDmaRead()) 1972 goto exit; 1973 } 1974 break; 1975 1976 default: 1977 panic("invalid state"); 1978 } 1979 1980 DPRINTF(EthernetSM, "entering next tx state=%s\n", 1981 NsTxStateStrings[txState]); 1982 1983 if (txState == txIdle) { 1984 regs.command &= ~CR_TXE; 1985 devIntrPost(ISR_TXIDLE); 1986 return; 1987 } 1988 1989 goto next; 1990 1991 exit: 1992 /** 1993 * @todo do we want to schedule a future kick? 1994 */ 1995 DPRINTF(EthernetSM, "tx state machine exited state=%s\n", 1996 NsTxStateStrings[txState]); 1997} 1998 1999void 2000NSGigE::transferDone() 2001{ 2002 if (txFifo.empty()) 2003 return; 2004 2005 if (txEvent.scheduled()) 2006 txEvent.reschedule(curTick + 1); 2007 else 2008 txEvent.schedule(curTick + 1); 2009} 2010 2011bool 2012NSGigE::rxFilter(PacketPtr packet) 2013{ 2014 bool drop = true; 2015 string type; 2016 2017 if (packet->IsUnicast()) { 2018 type = "unicast"; 2019 2020 // If we're accepting all unicast addresses 2021 if (acceptUnicast) 2022 drop = false; 2023 2024 // If we make a perfect match 2025 if ((acceptPerfect) 2026 && (memcmp(rom.perfectMatch, packet->data, sizeof(rom.perfectMatch)) == 0)) 2027 drop = false; 2028 2029 eth_header *eth = (eth_header *) packet->data; 2030 if ((acceptArp) && (eth->type == 0x608)) 2031 drop = false; 2032 2033 } else if (packet->IsBroadcast()) { 2034 type = "broadcast"; 2035 2036 // if we're accepting broadcasts 2037 if (acceptBroadcast) 2038 drop = false; 2039 2040 } else if (packet->IsMulticast()) { 2041 type = "multicast"; 2042 2043 // if we're accepting all multicasts 2044 if (acceptMulticast) 2045 drop = false; 2046 2047 } else { 2048 type = "unknown"; 2049 2050 // oh well, punt on this one 2051 } 2052 2053 if (drop) { 2054 DPRINTF(Ethernet, "rxFilter drop\n"); 2055 DDUMP(EthernetData, packet->data, packet->length); 2056 } 2057 2058 return drop; 2059} 2060 2061bool 2062NSGigE::recvPacket(PacketPtr packet) 2063{ 2064 rxBytes += packet->length; 2065 rxPackets++; 2066 2067 DPRINTF(Ethernet, "\n\nReceiving packet from wire, rxFifoAvail = %d\n", maxRxFifoSize - rxFifoCnt); 2068 2069 if (rxState == rxIdle) { 2070 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2071 interface->recvDone(); 2072 return true; 2073 } 2074 2075 if (rxFilterEnable && rxFilter(packet)) { 2076 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2077 interface->recvDone(); 2078 return true; 2079 } 2080 2081 if ((rxFifoCnt + packet->length) >= maxRxFifoSize) { 2082 DPRINTF(Ethernet, 2083 "packet will not fit in receive buffer...packet dropped\n"); 2084 devIntrPost(ISR_RXORN); 2085 return false; 2086 } 2087 2088 rxFifo.push_back(packet); 2089 rxFifoCnt += packet->length; 2090 interface->recvDone(); 2091 2092 rxKick(); 2093 return true; 2094} 2095 2096/** 2097 * does a udp checksum. if gen is true, then it generates it and puts it in the right place 2098 * else, it just checks what it calculates against the value in the header in packet 2099 */ 2100bool 2101NSGigE::udpChecksum(PacketPtr packet, bool gen) 2102{ 2103 ip_header *ip = packet->getIpHdr(); 2104 udp_header *hdr = packet->getUdpHdr(ip); 2105 2106 pseudo_header *pseudo = new pseudo_header; 2107 2108 pseudo->src_ip_addr = ip->src_ip_addr; 2109 pseudo->dest_ip_addr = ip->dest_ip_addr; 2110 pseudo->protocol = ip->protocol; 2111 pseudo->len = hdr->len; 2112 2113 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr, 2114 (uint32_t) hdr->len); 2115 2116 delete pseudo; 2117 if (gen) 2118 hdr->chksum = cksum; 2119 else 2120 if (cksum != 0) 2121 return false; 2122 2123 return true; 2124} 2125 2126bool 2127NSGigE::tcpChecksum(PacketPtr packet, bool gen) 2128{ 2129 ip_header *ip = packet->getIpHdr(); 2130 tcp_header *hdr = packet->getTcpHdr(ip); 2131 2132 uint16_t cksum; 2133 pseudo_header *pseudo = new pseudo_header; 2134 if (!gen) { 2135 pseudo->src_ip_addr = ip->src_ip_addr; 2136 pseudo->dest_ip_addr = ip->dest_ip_addr; 2137 pseudo->protocol = reverseEnd16(ip->protocol); 2138 pseudo->len = reverseEnd16(reverseEnd16(ip->dgram_len) - (ip->vers_len & 0xf)*4); 2139 2140 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr, 2141 (uint32_t) reverseEnd16(pseudo->len)); 2142 } else { 2143 pseudo->src_ip_addr = 0; 2144 pseudo->dest_ip_addr = 0; 2145 pseudo->protocol = hdr->chksum; 2146 pseudo->len = 0; 2147 hdr->chksum = 0; 2148 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr, 2149 (uint32_t) (reverseEnd16(ip->dgram_len) - (ip->vers_len & 0xf)*4)); 2150 } 2151 2152 delete pseudo; 2153 if (gen) 2154 hdr->chksum = cksum; 2155 else 2156 if (cksum != 0) 2157 return false; 2158 2159 return true; 2160} 2161 2162bool 2163NSGigE::ipChecksum(PacketPtr packet, bool gen) 2164{ 2165 ip_header *hdr = packet->getIpHdr(); 2166 2167 uint16_t cksum = checksumCalc(NULL, (uint16_t *) hdr, (hdr->vers_len & 0xf)*4); 2168 2169 if (gen) { 2170 DPRINTF(EthernetCksum, "generated checksum: %#x\n", cksum); 2171 hdr->hdr_chksum = cksum; 2172 } 2173 else 2174 if (cksum != 0) 2175 return false; 2176 2177 return true; 2178} 2179 2180uint16_t 2181NSGigE::checksumCalc(uint16_t *pseudo, uint16_t *buf, uint32_t len) 2182{ 2183 uint32_t sum = 0; 2184 2185 uint16_t last_pad = 0; 2186 if (len & 1) { 2187 last_pad = buf[len/2] & 0xff; 2188 len--; 2189 sum += last_pad; 2190 } 2191 2192 if (pseudo) { 2193 sum = pseudo[0] + pseudo[1] + pseudo[2] + 2194 pseudo[3] + pseudo[4] + pseudo[5]; 2195 } 2196 2197 for (int i=0; i < (len/2); ++i) { 2198 sum += buf[i]; 2199 } 2200 2201 while (sum >> 16) 2202 sum = (sum >> 16) + (sum & 0xffff); 2203 2204 return ~sum; 2205} 2206 2207//===================================================================== 2208// 2209// 2210void 2211NSGigE::serialize(ostream &os) 2212{ 2213 // Serialize the PciDev base class 2214 PciDev::serialize(os); 2215 2216 /* 2217 * Finalize any DMA events now. 2218 */ 2219 if (rxDmaReadEvent.scheduled()) 2220 rxDmaReadCopy(); 2221 if (rxDmaWriteEvent.scheduled()) 2222 rxDmaWriteCopy(); 2223 if (txDmaReadEvent.scheduled()) 2224 txDmaReadCopy(); 2225 if (txDmaWriteEvent.scheduled()) 2226 txDmaWriteCopy(); 2227 2228 /* 2229 * Serialize the device registers 2230 */ 2231 SERIALIZE_SCALAR(regs.command); 2232 SERIALIZE_SCALAR(regs.config); 2233 SERIALIZE_SCALAR(regs.mear); 2234 SERIALIZE_SCALAR(regs.ptscr); 2235 SERIALIZE_SCALAR(regs.isr); 2236 SERIALIZE_SCALAR(regs.imr); 2237 SERIALIZE_SCALAR(regs.ier); 2238 SERIALIZE_SCALAR(regs.ihr); 2239 SERIALIZE_SCALAR(regs.txdp); 2240 SERIALIZE_SCALAR(regs.txdp_hi); 2241 SERIALIZE_SCALAR(regs.txcfg); 2242 SERIALIZE_SCALAR(regs.gpior); 2243 SERIALIZE_SCALAR(regs.rxdp); 2244 SERIALIZE_SCALAR(regs.rxdp_hi); 2245 SERIALIZE_SCALAR(regs.rxcfg); 2246 SERIALIZE_SCALAR(regs.pqcr); 2247 SERIALIZE_SCALAR(regs.wcsr); 2248 SERIALIZE_SCALAR(regs.pcr); 2249 SERIALIZE_SCALAR(regs.rfcr); 2250 SERIALIZE_SCALAR(regs.rfdr); 2251 SERIALIZE_SCALAR(regs.srr); 2252 SERIALIZE_SCALAR(regs.mibc); 2253 SERIALIZE_SCALAR(regs.vrcr); 2254 SERIALIZE_SCALAR(regs.vtcr); 2255 SERIALIZE_SCALAR(regs.vdr); 2256 SERIALIZE_SCALAR(regs.ccsr); 2257 SERIALIZE_SCALAR(regs.tbicr); 2258 SERIALIZE_SCALAR(regs.tbisr); 2259 SERIALIZE_SCALAR(regs.tanar); 2260 SERIALIZE_SCALAR(regs.tanlpar); 2261 SERIALIZE_SCALAR(regs.taner); 2262 SERIALIZE_SCALAR(regs.tesr); 2263 2264 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN); 2265 2266 SERIALIZE_SCALAR(ioEnable); 2267 2268 /* 2269 * Serialize the data Fifos 2270 */ 2271 int txNumPkts = txFifo.size(); 2272 SERIALIZE_SCALAR(txNumPkts); 2273 int i = 0; 2274 pktiter_t end = txFifo.end(); 2275 for (pktiter_t p = txFifo.begin(); p != end; ++p) { 2276 nameOut(os, csprintf("%s.txFifo%d", name(), i++)); 2277 (*p)->serialize(os); 2278 } 2279 2280 int rxNumPkts = rxFifo.size(); 2281 SERIALIZE_SCALAR(rxNumPkts); 2282 i = 0; 2283 end = rxFifo.end(); 2284 for (pktiter_t p = rxFifo.begin(); p != end; ++p) { 2285 nameOut(os, csprintf("%s.rxFifo%d", name(), i++)); 2286 (*p)->serialize(os); 2287 } 2288 2289 /* 2290 * Serialize the various helper variables 2291 */ 2292 bool txPacketExists = txPacket; 2293 SERIALIZE_SCALAR(txPacketExists); 2294 if (txPacketExists) { 2295 nameOut(os, csprintf("%s.txPacket", name())); 2296 txPacket->serialize(os); 2297 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2298 SERIALIZE_SCALAR(txPktBufPtr); 2299 } 2300 2301 bool rxPacketExists = rxPacket; 2302 SERIALIZE_SCALAR(rxPacketExists); 2303 if (rxPacketExists) { 2304 nameOut(os, csprintf("%s.rxPacket", name())); 2305 rxPacket->serialize(os); 2306 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2307 SERIALIZE_SCALAR(rxPktBufPtr); 2308 } 2309 2310 SERIALIZE_SCALAR(txXferLen); 2311 SERIALIZE_SCALAR(rxXferLen); 2312 2313 /* 2314 * Serialize DescCaches 2315 */ 2316 SERIALIZE_SCALAR(txDescCache.link); 2317 SERIALIZE_SCALAR(txDescCache.bufptr); 2318 SERIALIZE_SCALAR(txDescCache.cmdsts); 2319 SERIALIZE_SCALAR(txDescCache.extsts); 2320 SERIALIZE_SCALAR(rxDescCache.link); 2321 SERIALIZE_SCALAR(rxDescCache.bufptr); 2322 SERIALIZE_SCALAR(rxDescCache.cmdsts); 2323 SERIALIZE_SCALAR(rxDescCache.extsts); 2324 2325 /* 2326 * Serialize tx state machine 2327 */ 2328 int txState = this->txState; 2329 SERIALIZE_SCALAR(txState); 2330 SERIALIZE_SCALAR(CTDD); 2331 SERIALIZE_SCALAR(txFifoAvail); 2332 SERIALIZE_SCALAR(txHalt); 2333 SERIALIZE_SCALAR(txFragPtr); 2334 SERIALIZE_SCALAR(txDescCnt); 2335 int txDmaState = this->txDmaState; 2336 SERIALIZE_SCALAR(txDmaState); 2337 2338 /* 2339 * Serialize rx state machine 2340 */ 2341 int rxState = this->rxState; 2342 SERIALIZE_SCALAR(rxState); 2343 SERIALIZE_SCALAR(CRDD); 2344 SERIALIZE_SCALAR(rxPktBytes); 2345 SERIALIZE_SCALAR(rxFifoCnt); 2346 SERIALIZE_SCALAR(rxHalt); 2347 SERIALIZE_SCALAR(rxDescCnt); 2348 int rxDmaState = this->rxDmaState; 2349 SERIALIZE_SCALAR(rxDmaState); 2350 2351 SERIALIZE_SCALAR(extstsEnable); 2352 2353 /* 2354 * If there's a pending transmit, store the time so we can 2355 * reschedule it later 2356 */ 2357 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2358 SERIALIZE_SCALAR(transmitTick); 2359 2360 /* 2361 * receive address filter settings 2362 */ 2363 SERIALIZE_SCALAR(rxFilterEnable); 2364 SERIALIZE_SCALAR(acceptBroadcast); 2365 SERIALIZE_SCALAR(acceptMulticast); 2366 SERIALIZE_SCALAR(acceptUnicast); 2367 SERIALIZE_SCALAR(acceptPerfect); 2368 SERIALIZE_SCALAR(acceptArp); 2369 2370 /* 2371 * Keep track of pending interrupt status. 2372 */ 2373 SERIALIZE_SCALAR(intrTick); 2374 SERIALIZE_SCALAR(cpuPendingIntr); 2375 Tick intrEventTick = 0; 2376 if (intrEvent) 2377 intrEventTick = intrEvent->when(); 2378 SERIALIZE_SCALAR(intrEventTick); 2379 2380} 2381 2382void 2383NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2384{ 2385 // Unserialize the PciDev base class 2386 PciDev::unserialize(cp, section); 2387 2388 UNSERIALIZE_SCALAR(regs.command); 2389 UNSERIALIZE_SCALAR(regs.config); 2390 UNSERIALIZE_SCALAR(regs.mear); 2391 UNSERIALIZE_SCALAR(regs.ptscr); 2392 UNSERIALIZE_SCALAR(regs.isr); 2393 UNSERIALIZE_SCALAR(regs.imr); 2394 UNSERIALIZE_SCALAR(regs.ier); 2395 UNSERIALIZE_SCALAR(regs.ihr); 2396 UNSERIALIZE_SCALAR(regs.txdp); 2397 UNSERIALIZE_SCALAR(regs.txdp_hi); 2398 UNSERIALIZE_SCALAR(regs.txcfg); 2399 UNSERIALIZE_SCALAR(regs.gpior); 2400 UNSERIALIZE_SCALAR(regs.rxdp); 2401 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2402 UNSERIALIZE_SCALAR(regs.rxcfg); 2403 UNSERIALIZE_SCALAR(regs.pqcr); 2404 UNSERIALIZE_SCALAR(regs.wcsr); 2405 UNSERIALIZE_SCALAR(regs.pcr); 2406 UNSERIALIZE_SCALAR(regs.rfcr); 2407 UNSERIALIZE_SCALAR(regs.rfdr); 2408 UNSERIALIZE_SCALAR(regs.srr); 2409 UNSERIALIZE_SCALAR(regs.mibc); 2410 UNSERIALIZE_SCALAR(regs.vrcr); 2411 UNSERIALIZE_SCALAR(regs.vtcr); 2412 UNSERIALIZE_SCALAR(regs.vdr); 2413 UNSERIALIZE_SCALAR(regs.ccsr); 2414 UNSERIALIZE_SCALAR(regs.tbicr); 2415 UNSERIALIZE_SCALAR(regs.tbisr); 2416 UNSERIALIZE_SCALAR(regs.tanar); 2417 UNSERIALIZE_SCALAR(regs.tanlpar); 2418 UNSERIALIZE_SCALAR(regs.taner); 2419 UNSERIALIZE_SCALAR(regs.tesr); 2420 2421 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN); 2422 2423 UNSERIALIZE_SCALAR(ioEnable); 2424 2425 /* 2426 * unserialize the data fifos 2427 */ 2428 int txNumPkts; 2429 UNSERIALIZE_SCALAR(txNumPkts); 2430 int i; 2431 for (i = 0; i < txNumPkts; ++i) { 2432 PacketPtr p = new EtherPacket; 2433 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i)); 2434 txFifo.push_back(p); 2435 } 2436 2437 int rxNumPkts; 2438 UNSERIALIZE_SCALAR(rxNumPkts); 2439 for (i = 0; i < rxNumPkts; ++i) { 2440 PacketPtr p = new EtherPacket; 2441 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i)); 2442 rxFifo.push_back(p); 2443 } 2444 2445 /* 2446 * unserialize the various helper variables 2447 */ 2448 bool txPacketExists; 2449 UNSERIALIZE_SCALAR(txPacketExists); 2450 if (txPacketExists) { 2451 txPacket = new EtherPacket; 2452 txPacket->unserialize(cp, csprintf("%s.txPacket", section)); 2453 uint32_t txPktBufPtr; 2454 UNSERIALIZE_SCALAR(txPktBufPtr); 2455 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2456 } else 2457 txPacket = 0; 2458 2459 bool rxPacketExists; 2460 UNSERIALIZE_SCALAR(rxPacketExists); 2461 rxPacket = 0; 2462 if (rxPacketExists) { 2463 rxPacket = new EtherPacket; 2464 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section)); 2465 uint32_t rxPktBufPtr; 2466 UNSERIALIZE_SCALAR(rxPktBufPtr); 2467 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2468 } else 2469 rxPacket = 0; 2470 2471 UNSERIALIZE_SCALAR(txXferLen); 2472 UNSERIALIZE_SCALAR(rxXferLen); 2473 2474 /* 2475 * Unserialize DescCaches 2476 */ 2477 UNSERIALIZE_SCALAR(txDescCache.link); 2478 UNSERIALIZE_SCALAR(txDescCache.bufptr); 2479 UNSERIALIZE_SCALAR(txDescCache.cmdsts); 2480 UNSERIALIZE_SCALAR(txDescCache.extsts); 2481 UNSERIALIZE_SCALAR(rxDescCache.link); 2482 UNSERIALIZE_SCALAR(rxDescCache.bufptr); 2483 UNSERIALIZE_SCALAR(rxDescCache.cmdsts); 2484 UNSERIALIZE_SCALAR(rxDescCache.extsts); 2485 2486 /* 2487 * unserialize tx state machine 2488 */ 2489 int txState; 2490 UNSERIALIZE_SCALAR(txState); 2491 this->txState = (TxState) txState; 2492 UNSERIALIZE_SCALAR(CTDD); 2493 UNSERIALIZE_SCALAR(txFifoAvail); 2494 UNSERIALIZE_SCALAR(txHalt); 2495 UNSERIALIZE_SCALAR(txFragPtr); 2496 UNSERIALIZE_SCALAR(txDescCnt); 2497 int txDmaState; 2498 UNSERIALIZE_SCALAR(txDmaState); 2499 this->txDmaState = (DmaState) txDmaState; 2500 2501 /* 2502 * unserialize rx state machine 2503 */ 2504 int rxState; 2505 UNSERIALIZE_SCALAR(rxState); 2506 this->rxState = (RxState) rxState; 2507 UNSERIALIZE_SCALAR(CRDD); 2508 UNSERIALIZE_SCALAR(rxPktBytes); 2509 UNSERIALIZE_SCALAR(rxFifoCnt); 2510 UNSERIALIZE_SCALAR(rxHalt); 2511 UNSERIALIZE_SCALAR(rxDescCnt); 2512 int rxDmaState; 2513 UNSERIALIZE_SCALAR(rxDmaState); 2514 this->rxDmaState = (DmaState) rxDmaState; 2515 2516 UNSERIALIZE_SCALAR(extstsEnable); 2517 2518 /* 2519 * If there's a pending transmit, reschedule it now 2520 */ 2521 Tick transmitTick; 2522 UNSERIALIZE_SCALAR(transmitTick); 2523 if (transmitTick) 2524 txEvent.schedule(curTick + transmitTick); 2525 2526 /* 2527 * unserialize receive address filter settings 2528 */ 2529 UNSERIALIZE_SCALAR(rxFilterEnable); 2530 UNSERIALIZE_SCALAR(acceptBroadcast); 2531 UNSERIALIZE_SCALAR(acceptMulticast); 2532 UNSERIALIZE_SCALAR(acceptUnicast); 2533 UNSERIALIZE_SCALAR(acceptPerfect); 2534 UNSERIALIZE_SCALAR(acceptArp); 2535 2536 /* 2537 * Keep track of pending interrupt status. 2538 */ 2539 UNSERIALIZE_SCALAR(intrTick); 2540 UNSERIALIZE_SCALAR(cpuPendingIntr); 2541 Tick intrEventTick; 2542 UNSERIALIZE_SCALAR(intrEventTick); 2543 if (intrEventTick) { 2544 intrEvent = new IntrEvent(this, true); 2545 intrEvent->schedule(intrEventTick); 2546 } 2547 2548 /* 2549 * re-add addrRanges to bus bridges 2550 */ 2551 if (pioInterface) { 2552 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1); 2553 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1); 2554 } 2555} 2556 2557Tick 2558NSGigE::cacheAccess(MemReqPtr &req) 2559{ 2560 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n", 2561 req->paddr, req->paddr - addr); 2562 return curTick + pioLatency; 2563} 2564//===================================================================== 2565 2566 2567//********** helper functions****************************************** 2568 2569uint16_t reverseEnd16(uint16_t num) 2570{ 2571 uint16_t reverse = (num & 0xff)<<8; 2572 reverse += ((num & 0xff00) >> 8); 2573 return reverse; 2574} 2575 2576uint32_t reverseEnd32(uint32_t num) 2577{ 2578 uint32_t reverse = (reverseEnd16(num & 0xffff)) << 16; 2579 reverse += reverseEnd16((uint16_t) ((num & 0xffff0000) >> 8)); 2580 return reverse; 2581} 2582 2583 2584 2585//===================================================================== 2586 2587BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2588 2589 SimObjectParam<EtherInt *> peer; 2590 SimObjectParam<NSGigE *> device; 2591 2592END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2593 2594BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2595 2596 INIT_PARAM_DFLT(peer, "peer interface", NULL), 2597 INIT_PARAM(device, "Ethernet device of this interface") 2598 2599END_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2600 2601CREATE_SIM_OBJECT(NSGigEInt) 2602{ 2603 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device); 2604 2605 EtherInt *p = (EtherInt *)peer; 2606 if (p) { 2607 dev_int->setPeer(p); 2608 p->setPeer(dev_int); 2609 } 2610 2611 return dev_int; 2612} 2613 2614REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt) 2615 2616 2617BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2618 2619 Param<Tick> tx_delay; 2620 Param<Tick> rx_delay; 2621 SimObjectParam<IntrControl *> intr_ctrl; 2622 Param<Tick> intr_delay; 2623 SimObjectParam<MemoryController *> mmu; 2624 SimObjectParam<PhysicalMemory *> physmem; 2625 Param<bool> rx_filter; 2626 Param<string> hardware_address; 2627 SimObjectParam<Bus*> header_bus; 2628 SimObjectParam<Bus*> payload_bus; 2629 SimObjectParam<HierParams *> hier; 2630 Param<Tick> pio_latency; 2631 Param<bool> dma_desc_free; 2632 Param<bool> dma_data_free; 2633 Param<Tick> dma_read_delay; 2634 Param<Tick> dma_write_delay; 2635 Param<Tick> dma_read_factor; 2636 Param<Tick> dma_write_factor; 2637 SimObjectParam<PciConfigAll *> configspace; 2638 SimObjectParam<PciConfigData *> configdata; 2639 SimObjectParam<Tsunami *> tsunami; 2640 Param<uint32_t> pci_bus; 2641 Param<uint32_t> pci_dev; 2642 Param<uint32_t> pci_func; 2643 Param<uint32_t> tx_fifo_size; 2644 Param<uint32_t> rx_fifo_size; 2645 2646END_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2647 2648BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE) 2649 2650 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000), 2651 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000), 2652 INIT_PARAM(intr_ctrl, "Interrupt Controller"), 2653 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0), 2654 INIT_PARAM(mmu, "Memory Controller"), 2655 INIT_PARAM(physmem, "Physical Memory"), 2656 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true), 2657 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address", 2658 "00:99:00:00:00:01"), 2659 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL), 2660 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL), 2661 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams), 2662 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency", 1000), 2663 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false), 2664 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false), 2665 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0), 2666 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0), 2667 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0), 2668 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0), 2669 INIT_PARAM(configspace, "PCI Configspace"), 2670 INIT_PARAM(configdata, "PCI Config data"), 2671 INIT_PARAM(tsunami, "Tsunami"), 2672 INIT_PARAM(pci_bus, "PCI bus"), 2673 INIT_PARAM(pci_dev, "PCI device number"), 2674 INIT_PARAM(pci_func, "PCI function code"), 2675 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072), 2676 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072) 2677 2678END_INIT_SIM_OBJECT_PARAMS(NSGigE) 2679 2680 2681CREATE_SIM_OBJECT(NSGigE) 2682{ 2683 int eaddr[6]; 2684 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x", 2685 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]); 2686 2687 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay, 2688 physmem, tx_delay, rx_delay, mmu, hier, header_bus, 2689 payload_bus, pio_latency, dma_desc_free, dma_data_free, 2690 dma_read_delay, dma_write_delay, dma_read_factor, 2691 dma_write_factor, configspace, configdata, 2692 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr, 2693 tx_fifo_size, rx_fifo_size); 2694} 2695 2696REGISTER_SIM_OBJECT("NSGigE", NSGigE) 2697