ns_gige.cc revision 1027
1/* 2 * Copyright (c) 2004 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/* @file 30 * Device module for modelling the National Semiconductor 31 * DP83820 ethernet controller. Does not support priority queueing 32 */ 33#include <cstdio> 34#include <deque> 35#include <string> 36 37#include "base/inet.hh" 38#include "cpu/exec_context.hh" 39#include "cpu/intr_control.hh" 40#include "dev/dma.hh" 41#include "dev/etherlink.hh" 42#include "dev/ns_gige.hh" 43#include "dev/pciconfigall.hh" 44#include "dev/tsunami_cchip.hh" 45#include "mem/bus/bus.hh" 46#include "mem/bus/dma_interface.hh" 47#include "mem/bus/pio_interface.hh" 48#include "mem/bus/pio_interface_impl.hh" 49#include "mem/functional_mem/memory_control.hh" 50#include "mem/functional_mem/physical_memory.hh" 51#include "sim/builder.hh" 52#include "sim/host.hh" 53#include "sim/sim_stats.hh" 54#include "targetarch/vtophys.hh" 55 56const char *NsRxStateStrings[] = 57{ 58 "rxIdle", 59 "rxDescRefr", 60 "rxDescRead", 61 "rxFifoBlock", 62 "rxFragWrite", 63 "rxDescWrite", 64 "rxAdvance" 65}; 66 67const char *NsTxStateStrings[] = 68{ 69 "txIdle", 70 "txDescRefr", 71 "txDescRead", 72 "txFifoBlock", 73 "txFragRead", 74 "txDescWrite", 75 "txAdvance" 76}; 77 78const char *NsDmaState[] = 79{ 80 "dmaIdle", 81 "dmaReading", 82 "dmaWriting", 83 "dmaReadWaiting", 84 "dmaWriteWaiting" 85}; 86 87using namespace std; 88 89// helper function declarations 90// These functions reverse Endianness so we can evaluate network data 91// correctly 92uint16_t reverseEnd16(uint16_t); 93uint32_t reverseEnd32(uint32_t); 94 95/////////////////////////////////////////////////////////////////////// 96// 97// NSGigE PCI Device 98// 99NSGigE::NSGigE(const std::string &name, IntrControl *i, Tick intr_delay, 100 PhysicalMemory *pmem, Tick tx_delay, Tick rx_delay, 101 MemoryController *mmu, HierParams *hier, Bus *header_bus, 102 Bus *payload_bus, Tick pio_latency, bool dma_desc_free, 103 bool dma_data_free, Tick dma_read_delay, Tick dma_write_delay, 104 Tick dma_read_factor, Tick dma_write_factor, PciConfigAll *cf, 105 PciConfigData *cd, Tsunami *t, uint32_t bus, uint32_t dev, 106 uint32_t func, bool rx_filter, const int eaddr[6], 107 uint32_t tx_fifo_size, uint32_t rx_fifo_size) 108 : PciDev(name, mmu, cf, cd, bus, dev, func), tsunami(t), ioEnable(false), 109 maxTxFifoSize(tx_fifo_size), maxRxFifoSize(rx_fifo_size), 110 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 111 txXferLen(0), rxXferLen(0), txState(txIdle), CTDD(false), 112 txFifoAvail(tx_fifo_size), txHalt(false), 113 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 114 CRDD(false), rxPktBytes(0), rxFifoCnt(0), rxHalt(false), 115 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 116 rxDmaReadEvent(this), rxDmaWriteEvent(this), 117 txDmaReadEvent(this), txDmaWriteEvent(this), 118 dmaDescFree(dma_desc_free), dmaDataFree(dma_data_free), 119 txDelay(tx_delay), rxDelay(rx_delay), rxKickTick(0), txKickTick(0), 120 txEvent(this), rxFilterEnable(rx_filter), acceptBroadcast(false), 121 acceptMulticast(false), acceptUnicast(false), 122 acceptPerfect(false), acceptArp(false), 123 physmem(pmem), intctrl(i), intrTick(0), cpuPendingIntr(false), 124 intrEvent(0), interface(0) 125{ 126 tsunami->ethernet = this; 127 128 if (header_bus) { 129 pioInterface = newPioInterface(name, hier, header_bus, this, 130 &NSGigE::cacheAccess); 131 132 pioLatency = pio_latency * header_bus->clockRatio; 133 134 if (payload_bus) 135 dmaInterface = new DMAInterface<Bus>(name + ".dma", 136 header_bus, payload_bus, 1); 137 else 138 dmaInterface = new DMAInterface<Bus>(name + ".dma", 139 header_bus, header_bus, 1); 140 } else if (payload_bus) { 141 pioInterface = newPioInterface(name, hier, payload_bus, this, 142 &NSGigE::cacheAccess); 143 144 pioLatency = pio_latency * payload_bus->clockRatio; 145 146 dmaInterface = new DMAInterface<Bus>(name + ".dma", payload_bus, 147 payload_bus, 1); 148 } 149 150 151 intrDelay = US2Ticks(intr_delay); 152 dmaReadDelay = dma_read_delay; 153 dmaWriteDelay = dma_write_delay; 154 dmaReadFactor = dma_read_factor; 155 dmaWriteFactor = dma_write_factor; 156 157 regsReset(); 158 rom.perfectMatch[0] = eaddr[0]; 159 rom.perfectMatch[1] = eaddr[1]; 160 rom.perfectMatch[2] = eaddr[2]; 161 rom.perfectMatch[3] = eaddr[3]; 162 rom.perfectMatch[4] = eaddr[4]; 163 rom.perfectMatch[5] = eaddr[5]; 164} 165 166NSGigE::~NSGigE() 167{} 168 169void 170NSGigE::regStats() 171{ 172 txBytes 173 .name(name() + ".txBytes") 174 .desc("Bytes Transmitted") 175 .prereq(txBytes) 176 ; 177 178 rxBytes 179 .name(name() + ".rxBytes") 180 .desc("Bytes Received") 181 .prereq(rxBytes) 182 ; 183 184 txPackets 185 .name(name() + ".txPackets") 186 .desc("Number of Packets Transmitted") 187 .prereq(txBytes) 188 ; 189 190 rxPackets 191 .name(name() + ".rxPackets") 192 .desc("Number of Packets Received") 193 .prereq(rxBytes) 194 ; 195 196 txIPChecksums 197 .name(name() + ".txIPChecksums") 198 .desc("Number of tx IP Checksums done by device") 199 .precision(0) 200 .prereq(txBytes) 201 ; 202 203 rxIPChecksums 204 .name(name() + ".rxIPChecksums") 205 .desc("Number of rx IP Checksums done by device") 206 .precision(0) 207 .prereq(rxBytes) 208 ; 209 210 txTCPChecksums 211 .name(name() + ".txTCPChecksums") 212 .desc("Number of tx TCP Checksums done by device") 213 .precision(0) 214 .prereq(txBytes) 215 ; 216 217 rxTCPChecksums 218 .name(name() + ".rxTCPChecksums") 219 .desc("Number of rx TCP Checksums done by device") 220 .precision(0) 221 .prereq(rxBytes) 222 ; 223 224 descDmaReads 225 .name(name() + ".descDMAReads") 226 .desc("Number of descriptors the device read w/ DMA") 227 .precision(0) 228 ; 229 230 descDmaWrites 231 .name(name() + ".descDMAWrites") 232 .desc("Number of descriptors the device wrote w/ DMA") 233 .precision(0) 234 ; 235 236 descDmaRdBytes 237 .name(name() + ".descDmaReadBytes") 238 .desc("number of descriptor bytes read w/ DMA") 239 .precision(0) 240 ; 241 242 descDmaWrBytes 243 .name(name() + ".descDmaWriteBytes") 244 .desc("number of descriptor bytes write w/ DMA") 245 .precision(0) 246 ; 247 248 249 txBandwidth 250 .name(name() + ".txBandwidth") 251 .desc("Transmit Bandwidth (bits/s)") 252 .precision(0) 253 .prereq(txBytes) 254 ; 255 256 rxBandwidth 257 .name(name() + ".rxBandwidth") 258 .desc("Receive Bandwidth (bits/s)") 259 .precision(0) 260 .prereq(rxBytes) 261 ; 262 263 txPacketRate 264 .name(name() + ".txPPS") 265 .desc("Packet Tranmission Rate (packets/s)") 266 .precision(0) 267 .prereq(txBytes) 268 ; 269 270 rxPacketRate 271 .name(name() + ".rxPPS") 272 .desc("Packet Reception Rate (packets/s)") 273 .precision(0) 274 .prereq(rxBytes) 275 ; 276 277 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 278 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 279 txPacketRate = txPackets / simSeconds; 280 rxPacketRate = rxPackets / simSeconds; 281} 282 283/** 284 * This is to read the PCI general configuration registers 285 */ 286void 287NSGigE::ReadConfig(int offset, int size, uint8_t *data) 288{ 289 if (offset < PCI_DEVICE_SPECIFIC) 290 PciDev::ReadConfig(offset, size, data); 291 else 292 panic("Device specific PCI config space not implemented!\n"); 293} 294 295/** 296 * This is to write to the PCI general configuration registers 297 */ 298void 299NSGigE::WriteConfig(int offset, int size, uint32_t data) 300{ 301 if (offset < PCI_DEVICE_SPECIFIC) 302 PciDev::WriteConfig(offset, size, data); 303 else 304 panic("Device specific PCI config space not implemented!\n"); 305 306 // Need to catch writes to BARs to update the PIO interface 307 switch (offset) { 308 // seems to work fine without all these PCI settings, but i 309 // put in the IO to double check, an assertion will fail if we 310 // need to properly implement it 311 case PCI_COMMAND: 312 if (config.data[offset] & PCI_CMD_IOSE) 313 ioEnable = true; 314 else 315 ioEnable = false; 316 317#if 0 318 if (config.data[offset] & PCI_CMD_BME) { 319 bmEnabled = true; 320 } 321 else { 322 bmEnabled = false; 323 } 324 325 if (config.data[offset] & PCI_CMD_MSE) { 326 memEnable = true; 327 } 328 else { 329 memEnable = false; 330 } 331#endif 332 break; 333 334 case PCI0_BASE_ADDR0: 335 if (BARAddrs[0] != 0) { 336 if (pioInterface) 337 pioInterface->addAddrRange(BARAddrs[0], 338 BARAddrs[0] + BARSize[0] - 1); 339 340 BARAddrs[0] &= PA_UNCACHED_MASK; 341 } 342 break; 343 case PCI0_BASE_ADDR1: 344 if (BARAddrs[1] != 0) { 345 if (pioInterface) 346 pioInterface->addAddrRange(BARAddrs[1], 347 BARAddrs[1] + BARSize[1] - 1); 348 349 BARAddrs[1] &= PA_UNCACHED_MASK; 350 } 351 break; 352 } 353} 354 355/** 356 * This reads the device registers, which are detailed in the NS83820 357 * spec sheet 358 */ 359Fault 360NSGigE::read(MemReqPtr &req, uint8_t *data) 361{ 362 assert(ioEnable); 363 364 //The mask is to give you only the offset into the device register file 365 Addr daddr = req->paddr & 0xfff; 366 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n", 367 daddr, req->paddr, req->vaddr, req->size); 368 369 370 // there are some reserved registers, you can see ns_gige_reg.h and 371 // the spec sheet for details 372 if (daddr > LAST && daddr <= RESERVED) { 373 panic("Accessing reserved register"); 374 } else if (daddr > RESERVED && daddr <= 0x3FC) { 375 ReadConfig(daddr & 0xff, req->size, data); 376 return No_Fault; 377 } else if (daddr >= MIB_START && daddr <= MIB_END) { 378 // don't implement all the MIB's. hopefully the kernel 379 // doesn't actually DEPEND upon their values 380 // MIB are just hardware stats keepers 381 uint32_t ® = *(uint32_t *) data; 382 reg = 0; 383 return No_Fault; 384 } else if (daddr > 0x3FC) 385 panic("Something is messed up!\n"); 386 387 switch (req->size) { 388 case sizeof(uint32_t): 389 { 390 uint32_t ® = *(uint32_t *)data; 391 392 switch (daddr) { 393 case CR: 394 reg = regs.command; 395 //these are supposed to be cleared on a read 396 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 397 break; 398 399 case CFG: 400 reg = regs.config; 401 break; 402 403 case MEAR: 404 reg = regs.mear; 405 break; 406 407 case PTSCR: 408 reg = regs.ptscr; 409 break; 410 411 case ISR: 412 reg = regs.isr; 413 devIntrClear(ISR_ALL); 414 break; 415 416 case IMR: 417 reg = regs.imr; 418 break; 419 420 case IER: 421 reg = regs.ier; 422 break; 423 424 case IHR: 425 reg = regs.ihr; 426 break; 427 428 case TXDP: 429 reg = regs.txdp; 430 break; 431 432 case TXDP_HI: 433 reg = regs.txdp_hi; 434 break; 435 436 case TXCFG: 437 reg = regs.txcfg; 438 break; 439 440 case GPIOR: 441 reg = regs.gpior; 442 break; 443 444 case RXDP: 445 reg = regs.rxdp; 446 break; 447 448 case RXDP_HI: 449 reg = regs.rxdp_hi; 450 break; 451 452 case RXCFG: 453 reg = regs.rxcfg; 454 break; 455 456 case PQCR: 457 reg = regs.pqcr; 458 break; 459 460 case WCSR: 461 reg = regs.wcsr; 462 break; 463 464 case PCR: 465 reg = regs.pcr; 466 break; 467 468 // see the spec sheet for how RFCR and RFDR work 469 // basically, you write to RFCR to tell the machine 470 // what you want to do next, then you act upon RFDR, 471 // and the device will be prepared b/c of what you 472 // wrote to RFCR 473 case RFCR: 474 reg = regs.rfcr; 475 break; 476 477 case RFDR: 478 switch (regs.rfcr & RFCR_RFADDR) { 479 case 0x000: 480 reg = rom.perfectMatch[1]; 481 reg = reg << 8; 482 reg += rom.perfectMatch[0]; 483 break; 484 case 0x002: 485 reg = rom.perfectMatch[3] << 8; 486 reg += rom.perfectMatch[2]; 487 break; 488 case 0x004: 489 reg = rom.perfectMatch[5] << 8; 490 reg += rom.perfectMatch[4]; 491 break; 492 default: 493 panic("reading RFDR for something other than PMATCH!\n"); 494 // didn't implement other RFDR functionality b/c 495 // driver didn't use it 496 } 497 break; 498 499 case SRR: 500 reg = regs.srr; 501 break; 502 503 case MIBC: 504 reg = regs.mibc; 505 reg &= ~(MIBC_MIBS | MIBC_ACLR); 506 break; 507 508 case VRCR: 509 reg = regs.vrcr; 510 break; 511 512 case VTCR: 513 reg = regs.vtcr; 514 break; 515 516 case VDR: 517 reg = regs.vdr; 518 break; 519 520 case CCSR: 521 reg = regs.ccsr; 522 break; 523 524 case TBICR: 525 reg = regs.tbicr; 526 break; 527 528 case TBISR: 529 reg = regs.tbisr; 530 break; 531 532 case TANAR: 533 reg = regs.tanar; 534 break; 535 536 case TANLPAR: 537 reg = regs.tanlpar; 538 break; 539 540 case TANER: 541 reg = regs.taner; 542 break; 543 544 case TESR: 545 reg = regs.tesr; 546 break; 547 548 default: 549 panic("reading unimplemented register: addr = %#x", daddr); 550 } 551 552 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 553 daddr, reg, reg); 554 } 555 break; 556 557 default: 558 panic("accessing register with invalid size: addr=%#x, size=%d", 559 daddr, req->size); 560 } 561 562 return No_Fault; 563} 564 565Fault 566NSGigE::write(MemReqPtr &req, const uint8_t *data) 567{ 568 assert(ioEnable); 569 570 Addr daddr = req->paddr & 0xfff; 571 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n", 572 daddr, req->paddr, req->vaddr, req->size); 573 574 if (daddr > LAST && daddr <= RESERVED) { 575 panic("Accessing reserved register"); 576 } else if (daddr > RESERVED && daddr <= 0x3FC) { 577 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data); 578 return No_Fault; 579 } else if (daddr > 0x3FC) 580 panic("Something is messed up!\n"); 581 582 if (req->size == sizeof(uint32_t)) { 583 uint32_t reg = *(uint32_t *)data; 584 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 585 586 switch (daddr) { 587 case CR: 588 regs.command = reg; 589 if ((reg & (CR_TXE | CR_TXD)) == (CR_TXE | CR_TXD)) { 590 txHalt = true; 591 } else if (reg & CR_TXE) { 592 //the kernel is enabling the transmit machine 593 if (txState == txIdle) 594 txKick(); 595 } else if (reg & CR_TXD) { 596 txHalt = true; 597 } 598 599 if ((reg & (CR_RXE | CR_RXD)) == (CR_RXE | CR_RXD)) { 600 rxHalt = true; 601 } else if (reg & CR_RXE) { 602 if (rxState == rxIdle) { 603 rxKick(); 604 } 605 } else if (reg & CR_RXD) { 606 rxHalt = true; 607 } 608 609 if (reg & CR_TXR) 610 txReset(); 611 612 if (reg & CR_RXR) 613 rxReset(); 614 615 if (reg & CR_SWI) 616 devIntrPost(ISR_SWI); 617 618 if (reg & CR_RST) { 619 txReset(); 620 rxReset(); 621 622 regsReset(); 623 } 624 break; 625 626 case CFG: 627 if (reg & CFG_LNKSTS || 628 reg & CFG_SPDSTS || 629 reg & CFG_DUPSTS || 630 reg & CFG_RESERVED || 631 reg & CFG_T64ADDR || 632 reg & CFG_PCI64_DET) 633 panic("writing to read-only or reserved CFG bits!\n"); 634 635 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | 636 CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET); 637 638// all these #if 0's are because i don't THINK the kernel needs to 639// have these implemented. if there is a problem relating to one of 640// these, you may need to add functionality in. 641#if 0 642 if (reg & CFG_TBI_EN) ; 643 if (reg & CFG_MODE_1000) ; 644#endif 645 646 if (reg & CFG_AUTO_1000) 647 panic("CFG_AUTO_1000 not implemented!\n"); 648 649#if 0 650 if (reg & CFG_PINT_DUPSTS || 651 reg & CFG_PINT_LNKSTS || 652 reg & CFG_PINT_SPDSTS) 653 ; 654 655 if (reg & CFG_TMRTEST) ; 656 if (reg & CFG_MRM_DIS) ; 657 if (reg & CFG_MWI_DIS) ; 658 659 if (reg & CFG_T64ADDR) 660 panic("CFG_T64ADDR is read only register!\n"); 661 662 if (reg & CFG_PCI64_DET) 663 panic("CFG_PCI64_DET is read only register!\n"); 664 665 if (reg & CFG_DATA64_EN) ; 666 if (reg & CFG_M64ADDR) ; 667 if (reg & CFG_PHY_RST) ; 668 if (reg & CFG_PHY_DIS) ; 669#endif 670 671 if (reg & CFG_EXTSTS_EN) 672 extstsEnable = true; 673 else 674 extstsEnable = false; 675 676#if 0 677 if (reg & CFG_REQALG) ; 678 if (reg & CFG_SB) ; 679 if (reg & CFG_POW) ; 680 if (reg & CFG_EXD) ; 681 if (reg & CFG_PESEL) ; 682 if (reg & CFG_BROM_DIS) ; 683 if (reg & CFG_EXT_125) ; 684 if (reg & CFG_BEM) ; 685#endif 686 break; 687 688 case MEAR: 689 regs.mear = reg; 690 // since phy is completely faked, MEAR_MD* don't matter 691 // and since the driver never uses MEAR_EE*, they don't 692 // matter 693#if 0 694 if (reg & MEAR_EEDI) ; 695 if (reg & MEAR_EEDO) ; // this one is read only 696 if (reg & MEAR_EECLK) ; 697 if (reg & MEAR_EESEL) ; 698 if (reg & MEAR_MDIO) ; 699 if (reg & MEAR_MDDIR) ; 700 if (reg & MEAR_MDC) ; 701#endif 702 break; 703 704 case PTSCR: 705 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 706 // these control BISTs for various parts of chip - we 707 // don't care or do just fake that the BIST is done 708 if (reg & PTSCR_RBIST_EN) 709 regs.ptscr |= PTSCR_RBIST_DONE; 710 if (reg & PTSCR_EEBIST_EN) 711 regs.ptscr &= ~PTSCR_EEBIST_EN; 712 if (reg & PTSCR_EELOAD_EN) 713 regs.ptscr &= ~PTSCR_EELOAD_EN; 714 break; 715 716 case ISR: /* writing to the ISR has no effect */ 717 panic("ISR is a read only register!\n"); 718 719 case IMR: 720 regs.imr = reg; 721 devIntrChangeMask(); 722 break; 723 724 case IER: 725 regs.ier = reg; 726 break; 727 728 case IHR: 729 regs.ihr = reg; 730 /* not going to implement real interrupt holdoff */ 731 break; 732 733 case TXDP: 734 regs.txdp = (reg & 0xFFFFFFFC); 735 assert(txState == txIdle); 736 CTDD = false; 737 break; 738 739 case TXDP_HI: 740 regs.txdp_hi = reg; 741 break; 742 743 case TXCFG: 744 regs.txcfg = reg; 745#if 0 746 if (reg & TXCFG_CSI) ; 747 if (reg & TXCFG_HBI) ; 748 if (reg & TXCFG_MLB) ; 749 if (reg & TXCFG_ATP) ; 750 if (reg & TXCFG_ECRETRY) { 751 /* 752 * this could easily be implemented, but considering 753 * the network is just a fake pipe, wouldn't make 754 * sense to do this 755 */ 756 } 757 758 if (reg & TXCFG_BRST_DIS) ; 759#endif 760 761#if 0 762 /* we handle our own DMA, ignore the kernel's exhortations */ 763 if (reg & TXCFG_MXDMA) ; 764#endif 765 766 // also, we currently don't care about fill/drain 767 // thresholds though this may change in the future with 768 // more realistic networks or a driver which changes it 769 // according to feedback 770 771 break; 772 773 case GPIOR: 774 regs.gpior = reg; 775 /* these just control general purpose i/o pins, don't matter */ 776 break; 777 778 case RXDP: 779 regs.rxdp = reg; 780 break; 781 782 case RXDP_HI: 783 regs.rxdp_hi = reg; 784 break; 785 786 case RXCFG: 787 regs.rxcfg = reg; 788#if 0 789 if (reg & RXCFG_AEP) ; 790 if (reg & RXCFG_ARP) ; 791 if (reg & RXCFG_STRIPCRC) ; 792 if (reg & RXCFG_RX_RD) ; 793 if (reg & RXCFG_ALP) ; 794 if (reg & RXCFG_AIRL) ; 795 796 /* we handle our own DMA, ignore what kernel says about it */ 797 if (reg & RXCFG_MXDMA) ; 798 799 //also, we currently don't care about fill/drain thresholds 800 //though this may change in the future with more realistic 801 //networks or a driver which changes it according to feedback 802 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ; 803#endif 804 break; 805 806 case PQCR: 807 /* there is no priority queueing used in the linux 2.6 driver */ 808 regs.pqcr = reg; 809 break; 810 811 case WCSR: 812 /* not going to implement wake on LAN */ 813 regs.wcsr = reg; 814 break; 815 816 case PCR: 817 /* not going to implement pause control */ 818 regs.pcr = reg; 819 break; 820 821 case RFCR: 822 regs.rfcr = reg; 823 824 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 825 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 826 acceptMulticast = (reg & RFCR_AAM) ? true : false; 827 acceptUnicast = (reg & RFCR_AAU) ? true : false; 828 acceptPerfect = (reg & RFCR_APM) ? true : false; 829 acceptArp = (reg & RFCR_AARP) ? true : false; 830 831#if 0 832 if (reg & RFCR_APAT) 833 panic("RFCR_APAT not implemented!\n"); 834#endif 835 836 if (reg & RFCR_MHEN || reg & RFCR_UHEN) 837 panic("hash filtering not implemented!\n"); 838 839 if (reg & RFCR_ULM) 840 panic("RFCR_ULM not implemented!\n"); 841 842 break; 843 844 case RFDR: 845 panic("the driver never writes to RFDR, something is wrong!\n"); 846 847 case BRAR: 848 panic("the driver never uses BRAR, something is wrong!\n"); 849 850 case BRDR: 851 panic("the driver never uses BRDR, something is wrong!\n"); 852 853 case SRR: 854 panic("SRR is read only register!\n"); 855 856 case MIBC: 857 panic("the driver never uses MIBC, something is wrong!\n"); 858 859 case VRCR: 860 regs.vrcr = reg; 861 break; 862 863 case VTCR: 864 regs.vtcr = reg; 865 break; 866 867 case VDR: 868 panic("the driver never uses VDR, something is wrong!\n"); 869 break; 870 871 case CCSR: 872 /* not going to implement clockrun stuff */ 873 regs.ccsr = reg; 874 break; 875 876 case TBICR: 877 regs.tbicr = reg; 878 if (reg & TBICR_MR_LOOPBACK) 879 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 880 881 if (reg & TBICR_MR_AN_ENABLE) { 882 regs.tanlpar = regs.tanar; 883 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 884 } 885 886#if 0 887 if (reg & TBICR_MR_RESTART_AN) ; 888#endif 889 890 break; 891 892 case TBISR: 893 panic("TBISR is read only register!\n"); 894 895 case TANAR: 896 regs.tanar = reg; 897 if (reg & TANAR_PS2) 898 panic("this isn't used in driver, something wrong!\n"); 899 900 if (reg & TANAR_PS1) 901 panic("this isn't used in driver, something wrong!\n"); 902 break; 903 904 case TANLPAR: 905 panic("this should only be written to by the fake phy!\n"); 906 907 case TANER: 908 panic("TANER is read only register!\n"); 909 910 case TESR: 911 regs.tesr = reg; 912 break; 913 914 default: 915 panic("invalid register access daddr=%#x", daddr); 916 } 917 } else { 918 panic("Invalid Request Size"); 919 } 920 921 return No_Fault; 922} 923 924void 925NSGigE::devIntrPost(uint32_t interrupts) 926{ 927 bool delay = false; 928 929 if (interrupts & ISR_RESERVE) 930 panic("Cannot set a reserved interrupt"); 931 932 if (interrupts & ISR_TXRCMP) 933 regs.isr |= ISR_TXRCMP; 934 935 if (interrupts & ISR_RXRCMP) 936 regs.isr |= ISR_RXRCMP; 937 938//ISR_DPERR not implemented 939//ISR_SSERR not implemented 940//ISR_RMABT not implemented 941//ISR_RXSOVR not implemented 942//ISR_HIBINT not implemented 943//ISR_PHY not implemented 944//ISR_PME not implemented 945 946 if (interrupts & ISR_SWI) 947 regs.isr |= ISR_SWI; 948 949//ISR_MIB not implemented 950//ISR_TXURN not implemented 951 952 if (interrupts & ISR_TXIDLE) 953 regs.isr |= ISR_TXIDLE; 954 955 if (interrupts & ISR_TXERR) 956 regs.isr |= ISR_TXERR; 957 958 if (interrupts & ISR_TXDESC) 959 regs.isr |= ISR_TXDESC; 960 961 if (interrupts & ISR_TXOK) { 962 regs.isr |= ISR_TXOK; 963 delay = true; 964 } 965 966 if (interrupts & ISR_RXORN) 967 regs.isr |= ISR_RXORN; 968 969 if (interrupts & ISR_RXIDLE) 970 regs.isr |= ISR_RXIDLE; 971 972//ISR_RXEARLY not implemented 973 974 if (interrupts & ISR_RXERR) 975 regs.isr |= ISR_RXERR; 976 977 if (interrupts & ISR_RXDESC) 978 regs.isr |= ISR_RXDESC; 979 980 if (interrupts & ISR_RXOK) { 981 delay = true; 982 regs.isr |= ISR_RXOK; 983 } 984 985 if ((regs.isr & regs.imr)) { 986 Tick when = curTick; 987 if (delay) 988 when += intrDelay; 989 cpuIntrPost(when); 990 } 991 992 DPRINTF(EthernetIntr, 993 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 994 interrupts, regs.isr, regs.imr); 995} 996 997void 998NSGigE::devIntrClear(uint32_t interrupts) 999{ 1000 if (interrupts & ISR_RESERVE) 1001 panic("Cannot clear a reserved interrupt"); 1002 1003 if (interrupts & ISR_TXRCMP) 1004 regs.isr &= ~ISR_TXRCMP; 1005 1006 if (interrupts & ISR_RXRCMP) 1007 regs.isr &= ~ISR_RXRCMP; 1008 1009//ISR_DPERR not implemented 1010//ISR_SSERR not implemented 1011//ISR_RMABT not implemented 1012//ISR_RXSOVR not implemented 1013//ISR_HIBINT not implemented 1014//ISR_PHY not implemented 1015//ISR_PME not implemented 1016 1017 if (interrupts & ISR_SWI) 1018 regs.isr &= ~ISR_SWI; 1019 1020//ISR_MIB not implemented 1021//ISR_TXURN not implemented 1022 1023 if (interrupts & ISR_TXIDLE) 1024 regs.isr &= ~ISR_TXIDLE; 1025 1026 if (interrupts & ISR_TXERR) 1027 regs.isr &= ~ISR_TXERR; 1028 1029 if (interrupts & ISR_TXDESC) 1030 regs.isr &= ~ISR_TXDESC; 1031 1032 if (interrupts & ISR_TXOK) 1033 regs.isr &= ~ISR_TXOK; 1034 1035 if (interrupts & ISR_RXORN) 1036 regs.isr &= ~ISR_RXORN; 1037 1038 if (interrupts & ISR_RXIDLE) 1039 regs.isr &= ~ISR_RXIDLE; 1040 1041//ISR_RXEARLY not implemented 1042 1043 if (interrupts & ISR_RXERR) 1044 regs.isr &= ~ISR_RXERR; 1045 1046 if (interrupts & ISR_RXDESC) 1047 regs.isr &= ~ISR_RXDESC; 1048 1049 if (interrupts & ISR_RXOK) 1050 regs.isr &= ~ISR_RXOK; 1051 1052 if (!(regs.isr & regs.imr)) 1053 cpuIntrClear(); 1054 1055 DPRINTF(EthernetIntr, 1056 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 1057 interrupts, regs.isr, regs.imr); 1058} 1059 1060void 1061NSGigE::devIntrChangeMask() 1062{ 1063 DPRINTF(EthernetIntr, "interrupt mask changed\n"); 1064 1065 if (regs.isr & regs.imr) 1066 cpuIntrPost(curTick); 1067 else 1068 cpuIntrClear(); 1069} 1070 1071void 1072NSGigE::cpuIntrPost(Tick when) 1073{ 1074 // If the interrupt you want to post is later than an interrupt 1075 // already scheduled, just let it post in the coming one and don't 1076 // schedule another. 1077 // HOWEVER, must be sure that the scheduled intrTick is in the 1078 // future (this was formerly the source of a bug) 1079 assert((intrTick >= curTick) || (intrTick == 0)); 1080 if (when > intrTick && intrTick != 0) 1081 return; 1082 1083 intrTick = when; 1084 1085 if (intrEvent) { 1086 intrEvent->squash(); 1087 intrEvent = 0; 1088 } 1089 1090 if (when < curTick) { 1091 cpuInterrupt(); 1092 } else { 1093 DPRINTF(EthernetIntr, 1094 "going to schedule an interrupt for intrTick=%d\n", 1095 intrTick); 1096 intrEvent = new IntrEvent(this, true); 1097 intrEvent->schedule(intrTick); 1098 } 1099} 1100 1101void 1102NSGigE::cpuInterrupt() 1103{ 1104 // Don't send an interrupt if there's already one 1105 if (cpuPendingIntr) { 1106 DPRINTF(EthernetIntr, 1107 "would send an interrupt now, but there's already pending\n"); 1108 intrTick = 0; 1109 return; 1110 } 1111 // Don't send an interrupt if it's supposed to be delayed 1112 if (intrTick > curTick) { 1113 DPRINTF(EthernetIntr, 1114 "an interrupt is scheduled for %d, wait til then\n", 1115 intrTick); 1116 return; 1117 } 1118 1119 // Whether or not there's a pending interrupt, we don't care about 1120 // it anymore 1121 intrEvent = 0; 1122 intrTick = 0; 1123 1124 // Send interrupt 1125 cpuPendingIntr = true; 1126 /** @todo rework the intctrl to be tsunami ok */ 1127 //intctrl->post(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET); 1128 DPRINTF(EthernetIntr, "Posting interrupts to cchip!\n"); 1129 tsunami->cchip->postDRIR(configData->config.hdr.pci0.interruptLine); 1130} 1131 1132void 1133NSGigE::cpuIntrClear() 1134{ 1135 if (!cpuPendingIntr) 1136 return; 1137 1138 cpuPendingIntr = false; 1139 /** @todo rework the intctrl to be tsunami ok */ 1140 //intctrl->clear(TheISA::INTLEVEL_IRQ1, TheISA::INTINDEX_ETHERNET); 1141 DPRINTF(EthernetIntr, "clearing all interrupts from cchip\n"); 1142 tsunami->cchip->clearDRIR(configData->config.hdr.pci0.interruptLine); 1143} 1144 1145bool 1146NSGigE::cpuIntrPending() const 1147{ return cpuPendingIntr; } 1148 1149void 1150NSGigE::txReset() 1151{ 1152 1153 DPRINTF(Ethernet, "transmit reset\n"); 1154 1155 CTDD = false; 1156 txFifoAvail = maxTxFifoSize; 1157 txHalt = false; 1158 txFragPtr = 0; 1159 assert(txDescCnt == 0); 1160 txFifo.clear(); 1161 regs.command &= ~CR_TXE; 1162 txState = txIdle; 1163 assert(txDmaState == dmaIdle); 1164} 1165 1166void 1167NSGigE::rxReset() 1168{ 1169 DPRINTF(Ethernet, "receive reset\n"); 1170 1171 CRDD = false; 1172 assert(rxPktBytes == 0); 1173 rxFifoCnt = 0; 1174 rxHalt = false; 1175 rxFragPtr = 0; 1176 assert(rxDescCnt == 0); 1177 assert(rxDmaState == dmaIdle); 1178 rxFifo.clear(); 1179 regs.command &= ~CR_RXE; 1180 rxState = rxIdle; 1181} 1182 1183void NSGigE::regsReset() 1184{ 1185 memset(®s, 0, sizeof(regs)); 1186 regs.config = 0x80000000; 1187 regs.mear = 0x12; 1188 regs.isr = 0x00608000; 1189 regs.txcfg = 0x120; 1190 regs.rxcfg = 0x4; 1191 regs.srr = 0x0103; 1192 regs.mibc = 0x2; 1193 regs.vdr = 0x81; 1194 regs.tesr = 0xc000; 1195 1196 extstsEnable = false; 1197 acceptBroadcast = false; 1198 acceptMulticast = false; 1199 acceptUnicast = false; 1200 acceptPerfect = false; 1201 acceptArp = false; 1202} 1203 1204void 1205NSGigE::rxDmaReadCopy() 1206{ 1207 assert(rxDmaState == dmaReading); 1208 1209 memcpy(rxDmaData, physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaLen); 1210 rxDmaState = dmaIdle; 1211 1212 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1213 rxDmaAddr, rxDmaLen); 1214 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1215} 1216 1217bool 1218NSGigE::doRxDmaRead() 1219{ 1220 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1221 rxDmaState = dmaReading; 1222 1223 if (dmaInterface && !rxDmaFree) { 1224 if (dmaInterface->busy()) 1225 rxDmaState = dmaReadWaiting; 1226 else 1227 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick, 1228 &rxDmaReadEvent, true); 1229 return true; 1230 } 1231 1232 if (dmaReadDelay == 0 && dmaReadFactor == 0) { 1233 rxDmaReadCopy(); 1234 return false; 1235 } 1236 1237 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1238 Tick start = curTick + dmaReadDelay + factor; 1239 rxDmaReadEvent.schedule(start); 1240 return true; 1241} 1242 1243void 1244NSGigE::rxDmaReadDone() 1245{ 1246 assert(rxDmaState == dmaReading); 1247 rxDmaReadCopy(); 1248 1249 // If the transmit state machine has a pending DMA, let it go first 1250 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1251 txKick(); 1252 1253 rxKick(); 1254} 1255 1256void 1257NSGigE::rxDmaWriteCopy() 1258{ 1259 assert(rxDmaState == dmaWriting); 1260 1261 memcpy(physmem->dma_addr(rxDmaAddr, rxDmaLen), rxDmaData, rxDmaLen); 1262 rxDmaState = dmaIdle; 1263 1264 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1265 rxDmaAddr, rxDmaLen); 1266 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1267} 1268 1269bool 1270NSGigE::doRxDmaWrite() 1271{ 1272 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1273 rxDmaState = dmaWriting; 1274 1275 if (dmaInterface && !rxDmaFree) { 1276 if (dmaInterface->busy()) 1277 rxDmaState = dmaWriteWaiting; 1278 else 1279 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick, 1280 &rxDmaWriteEvent, true); 1281 return true; 1282 } 1283 1284 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) { 1285 rxDmaWriteCopy(); 1286 return false; 1287 } 1288 1289 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1290 Tick start = curTick + dmaWriteDelay + factor; 1291 rxDmaWriteEvent.schedule(start); 1292 return true; 1293} 1294 1295void 1296NSGigE::rxDmaWriteDone() 1297{ 1298 assert(rxDmaState == dmaWriting); 1299 rxDmaWriteCopy(); 1300 1301 // If the transmit state machine has a pending DMA, let it go first 1302 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1303 txKick(); 1304 1305 rxKick(); 1306} 1307 1308void 1309NSGigE::rxKick() 1310{ 1311 DPRINTF(EthernetSM, "receive kick state=%s (rxBuf.size=%d)\n", 1312 NsRxStateStrings[rxState], rxFifo.size()); 1313 1314 if (rxKickTick > curTick) { 1315 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1316 rxKickTick); 1317 return; 1318 } 1319 1320 next: 1321 switch(rxDmaState) { 1322 case dmaReadWaiting: 1323 if (doRxDmaRead()) 1324 goto exit; 1325 break; 1326 case dmaWriteWaiting: 1327 if (doRxDmaWrite()) 1328 goto exit; 1329 break; 1330 default: 1331 break; 1332 } 1333 1334 // see state machine from spec for details 1335 // the way this works is, if you finish work on one state and can 1336 // go directly to another, you do that through jumping to the 1337 // label "next". however, if you have intermediate work, like DMA 1338 // so that you can't go to the next state yet, you go to exit and 1339 // exit the loop. however, when the DMA is done it will trigger 1340 // an event and come back to this loop. 1341 switch (rxState) { 1342 case rxIdle: 1343 if (!regs.command & CR_RXE) { 1344 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1345 goto exit; 1346 } 1347 1348 if (CRDD) { 1349 rxState = rxDescRefr; 1350 1351 rxDmaAddr = regs.rxdp & 0x3fffffff; 1352 rxDmaData = &rxDescCache + offsetof(ns_desc, link); 1353 rxDmaLen = sizeof(rxDescCache.link); 1354 rxDmaFree = dmaDescFree; 1355 1356 descDmaReads++; 1357 descDmaRdBytes += rxDmaLen; 1358 1359 if (doRxDmaRead()) 1360 goto exit; 1361 } else { 1362 rxState = rxDescRead; 1363 1364 rxDmaAddr = regs.rxdp & 0x3fffffff; 1365 rxDmaData = &rxDescCache; 1366 rxDmaLen = sizeof(ns_desc); 1367 rxDmaFree = dmaDescFree; 1368 1369 descDmaReads++; 1370 descDmaRdBytes += rxDmaLen; 1371 1372 if (doRxDmaRead()) 1373 goto exit; 1374 } 1375 break; 1376 1377 case rxDescRefr: 1378 if (rxDmaState != dmaIdle) 1379 goto exit; 1380 1381 rxState = rxAdvance; 1382 break; 1383 1384 case rxDescRead: 1385 if (rxDmaState != dmaIdle) 1386 goto exit; 1387 1388 DPRINTF(EthernetDesc, 1389 "rxDescCache:\n" 1390 "\tlink=%08x\n" 1391 "\tbufptr=%08x\n" 1392 "\tcmdsts=%08x\n" 1393 "\textsts=%08x\n", 1394 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1395 rxDescCache.extsts); 1396 1397 if (rxDescCache.cmdsts & CMDSTS_OWN) { 1398 rxState = rxIdle; 1399 } else { 1400 rxState = rxFifoBlock; 1401 rxFragPtr = rxDescCache.bufptr; 1402 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK; 1403 } 1404 break; 1405 1406 case rxFifoBlock: 1407 if (!rxPacket) { 1408 /** 1409 * @todo in reality, we should be able to start processing 1410 * the packet as it arrives, and not have to wait for the 1411 * full packet ot be in the receive fifo. 1412 */ 1413 if (rxFifo.empty()) 1414 goto exit; 1415 1416 DPRINTF(EthernetSM, "\n\n*****processing receive of new packet\n"); 1417 1418 // If we don't have a packet, grab a new one from the fifo. 1419 rxPacket = rxFifo.front(); 1420 rxPktBytes = rxPacket->length; 1421 rxPacketBufPtr = rxPacket->data; 1422 1423#if TRACING_ON 1424 if (DTRACE(Ethernet)) { 1425 if (rxPacket->isIpPkt()) { 1426 ip_header *ip = rxPacket->getIpHdr(); 1427 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID)); 1428 if (rxPacket->isTcpPkt()) { 1429 tcp_header *tcp = rxPacket->getTcpHdr(ip); 1430 DPRINTF(Ethernet, "Src Port = %d, Dest Port = %d\n", 1431 reverseEnd16(tcp->src_port_num), 1432 reverseEnd16(tcp->dest_port_num)); 1433 } 1434 } 1435 } 1436#endif 1437 1438 // sanity check - i think the driver behaves like this 1439 assert(rxDescCnt >= rxPktBytes); 1440 1441 // Must clear the value before popping to decrement the 1442 // reference count 1443 rxFifo.front() = NULL; 1444 rxFifo.pop_front(); 1445 rxFifoCnt -= rxPacket->length; 1446 } 1447 1448 1449 // dont' need the && rxDescCnt > 0 if driver sanity check 1450 // above holds 1451 if (rxPktBytes > 0) { 1452 rxState = rxFragWrite; 1453 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1454 // check holds 1455 rxXferLen = rxPktBytes; 1456 1457 rxDmaAddr = rxFragPtr & 0x3fffffff; 1458 rxDmaData = rxPacketBufPtr; 1459 rxDmaLen = rxXferLen; 1460 rxDmaFree = dmaDataFree; 1461 1462 if (doRxDmaWrite()) 1463 goto exit; 1464 1465 } else { 1466 rxState = rxDescWrite; 1467 1468 //if (rxPktBytes == 0) { /* packet is done */ 1469 assert(rxPktBytes == 0); 1470 DPRINTF(EthernetSM, "done with receiving packet\n"); 1471 1472 rxDescCache.cmdsts |= CMDSTS_OWN; 1473 rxDescCache.cmdsts &= ~CMDSTS_MORE; 1474 rxDescCache.cmdsts |= CMDSTS_OK; 1475 rxDescCache.cmdsts &= 0xffff0000; 1476 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1477 1478#if 0 1479 /* 1480 * all the driver uses these are for its own stats keeping 1481 * which we don't care about, aren't necessary for 1482 * functionality and doing this would just slow us down. 1483 * if they end up using this in a later version for 1484 * functional purposes, just undef 1485 */ 1486 if (rxFilterEnable) { 1487 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK; 1488 if (rxFifo.front()->IsUnicast()) 1489 rxDescCache.cmdsts |= CMDSTS_DEST_SELF; 1490 if (rxFifo.front()->IsMulticast()) 1491 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI; 1492 if (rxFifo.front()->IsBroadcast()) 1493 rxDescCache.cmdsts |= CMDSTS_DEST_MASK; 1494 } 1495#endif 1496 1497 if (rxPacket->isIpPkt() && extstsEnable) { 1498 rxDescCache.extsts |= EXTSTS_IPPKT; 1499 rxIPChecksums++; 1500 if (!ipChecksum(rxPacket, false)) { 1501 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1502 rxDescCache.extsts |= EXTSTS_IPERR; 1503 } 1504 if (rxPacket->isTcpPkt()) { 1505 rxDescCache.extsts |= EXTSTS_TCPPKT; 1506 rxTCPChecksums++; 1507 if (!tcpChecksum(rxPacket, false)) { 1508 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1509 rxDescCache.extsts |= EXTSTS_TCPERR; 1510 1511 } 1512 } else if (rxPacket->isUdpPkt()) { 1513 rxDescCache.extsts |= EXTSTS_UDPPKT; 1514 if (!udpChecksum(rxPacket, false)) { 1515 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1516 rxDescCache.extsts |= EXTSTS_UDPERR; 1517 } 1518 } 1519 } 1520 rxPacket = 0; 1521 1522 /* 1523 * the driver seems to always receive into desc buffers 1524 * of size 1514, so you never have a pkt that is split 1525 * into multiple descriptors on the receive side, so 1526 * i don't implement that case, hence the assert above. 1527 */ 1528 1529 DPRINTF(EthernetDesc, 1530 "rxDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n", 1531 rxDescCache.cmdsts, rxDescCache.extsts); 1532 1533 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1534 rxDmaData = &(rxDescCache.cmdsts); 1535 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts); 1536 rxDmaFree = dmaDescFree; 1537 1538 descDmaWrites++; 1539 descDmaWrBytes += rxDmaLen; 1540 1541 if (doRxDmaWrite()) 1542 goto exit; 1543 } 1544 break; 1545 1546 case rxFragWrite: 1547 if (rxDmaState != dmaIdle) 1548 goto exit; 1549 1550 rxPacketBufPtr += rxXferLen; 1551 rxFragPtr += rxXferLen; 1552 rxPktBytes -= rxXferLen; 1553 1554 rxState = rxFifoBlock; 1555 break; 1556 1557 case rxDescWrite: 1558 if (rxDmaState != dmaIdle) 1559 goto exit; 1560 1561 assert(rxDescCache.cmdsts & CMDSTS_OWN); 1562 1563 assert(rxPacket == 0); 1564 devIntrPost(ISR_RXOK); 1565 1566 if (rxDescCache.cmdsts & CMDSTS_INTR) 1567 devIntrPost(ISR_RXDESC); 1568 1569 if (rxHalt) { 1570 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1571 rxState = rxIdle; 1572 rxHalt = false; 1573 } else 1574 rxState = rxAdvance; 1575 break; 1576 1577 case rxAdvance: 1578 if (rxDescCache.link == 0) { 1579 rxState = rxIdle; 1580 return; 1581 } else { 1582 rxState = rxDescRead; 1583 regs.rxdp = rxDescCache.link; 1584 CRDD = false; 1585 1586 rxDmaAddr = regs.rxdp & 0x3fffffff; 1587 rxDmaData = &rxDescCache; 1588 rxDmaLen = sizeof(ns_desc); 1589 rxDmaFree = dmaDescFree; 1590 1591 if (doRxDmaRead()) 1592 goto exit; 1593 } 1594 break; 1595 1596 default: 1597 panic("Invalid rxState!"); 1598 } 1599 1600 1601 DPRINTF(EthernetSM, "entering next rx state = %s\n", 1602 NsRxStateStrings[rxState]); 1603 1604 if (rxState == rxIdle) { 1605 regs.command &= ~CR_RXE; 1606 devIntrPost(ISR_RXIDLE); 1607 return; 1608 } 1609 1610 goto next; 1611 1612 exit: 1613 /** 1614 * @todo do we want to schedule a future kick? 1615 */ 1616 DPRINTF(EthernetSM, "rx state machine exited state=%s\n", 1617 NsRxStateStrings[rxState]); 1618} 1619 1620void 1621NSGigE::transmit() 1622{ 1623 if (txFifo.empty()) { 1624 DPRINTF(Ethernet, "nothing to transmit\n"); 1625 return; 1626 } 1627 1628 DPRINTF(Ethernet, "\n\nAttempt Pkt Transmit: txFifo length = %d\n", 1629 maxTxFifoSize - txFifoAvail); 1630 if (interface->sendPacket(txFifo.front())) { 1631#if TRACING_ON 1632 if (DTRACE(Ethernet)) { 1633 if (txFifo.front()->isIpPkt()) { 1634 ip_header *ip = txFifo.front()->getIpHdr(); 1635 DPRINTF(Ethernet, "ID is %d\n", reverseEnd16(ip->ID)); 1636 if (txFifo.front()->isTcpPkt()) { 1637 tcp_header *tcp = txFifo.front()->getTcpHdr(ip); 1638 DPRINTF(Ethernet, "Src Port = %d, Dest Port = %d\n", 1639 reverseEnd16(tcp->src_port_num), 1640 reverseEnd16(tcp->dest_port_num)); 1641 } 1642 } 1643 } 1644#endif 1645 1646 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length); 1647 txBytes += txFifo.front()->length; 1648 txPackets++; 1649 1650 txFifoAvail += txFifo.front()->length; 1651 1652 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1653 txFifoAvail); 1654 txFifo.front() = NULL; 1655 txFifo.pop_front(); 1656 1657 /* 1658 * normally do a writeback of the descriptor here, and ONLY 1659 * after that is done, send this interrupt. but since our 1660 * stuff never actually fails, just do this interrupt here, 1661 * otherwise the code has to stray from this nice format. 1662 * besides, it's functionally the same. 1663 */ 1664 devIntrPost(ISR_TXOK); 1665 } else { 1666 DPRINTF(Ethernet, 1667 "May need to rethink always sending the descriptors back?\n"); 1668 } 1669 1670 if (!txFifo.empty() && !txEvent.scheduled()) { 1671 DPRINTF(Ethernet, "reschedule transmit\n"); 1672 txEvent.schedule(curTick + 1000); 1673 } 1674} 1675 1676void 1677NSGigE::txDmaReadCopy() 1678{ 1679 assert(txDmaState == dmaReading); 1680 1681 memcpy(txDmaData, physmem->dma_addr(txDmaAddr, txDmaLen), txDmaLen); 1682 txDmaState = dmaIdle; 1683 1684 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1685 txDmaAddr, txDmaLen); 1686 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1687} 1688 1689bool 1690NSGigE::doTxDmaRead() 1691{ 1692 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1693 txDmaState = dmaReading; 1694 1695 if (dmaInterface && !txDmaFree) { 1696 if (dmaInterface->busy()) 1697 txDmaState = dmaReadWaiting; 1698 else 1699 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick, 1700 &txDmaReadEvent, true); 1701 return true; 1702 } 1703 1704 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) { 1705 txDmaReadCopy(); 1706 return false; 1707 } 1708 1709 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1710 Tick start = curTick + dmaReadDelay + factor; 1711 txDmaReadEvent.schedule(start); 1712 return true; 1713} 1714 1715void 1716NSGigE::txDmaReadDone() 1717{ 1718 assert(txDmaState == dmaReading); 1719 txDmaReadCopy(); 1720 1721 // If the receive state machine has a pending DMA, let it go first 1722 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1723 rxKick(); 1724 1725 txKick(); 1726} 1727 1728void 1729NSGigE::txDmaWriteCopy() 1730{ 1731 assert(txDmaState == dmaWriting); 1732 1733 memcpy(physmem->dma_addr(txDmaAddr, txDmaLen), txDmaData, txDmaLen); 1734 txDmaState = dmaIdle; 1735 1736 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1737 txDmaAddr, txDmaLen); 1738 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1739} 1740 1741bool 1742NSGigE::doTxDmaWrite() 1743{ 1744 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1745 txDmaState = dmaWriting; 1746 1747 if (dmaInterface && !txDmaFree) { 1748 if (dmaInterface->busy()) 1749 txDmaState = dmaWriteWaiting; 1750 else 1751 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick, 1752 &txDmaWriteEvent, true); 1753 return true; 1754 } 1755 1756 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) { 1757 txDmaWriteCopy(); 1758 return false; 1759 } 1760 1761 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1762 Tick start = curTick + dmaWriteDelay + factor; 1763 txDmaWriteEvent.schedule(start); 1764 return true; 1765} 1766 1767void 1768NSGigE::txDmaWriteDone() 1769{ 1770 assert(txDmaState == dmaWriting); 1771 txDmaWriteCopy(); 1772 1773 // If the receive state machine has a pending DMA, let it go first 1774 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1775 rxKick(); 1776 1777 txKick(); 1778} 1779 1780void 1781NSGigE::txKick() 1782{ 1783 DPRINTF(EthernetSM, "transmit kick state=%s\n", NsTxStateStrings[txState]); 1784 1785 if (txKickTick > curTick) { 1786 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1787 txKickTick); 1788 1789 return; 1790 } 1791 1792 next: 1793 switch(txDmaState) { 1794 case dmaReadWaiting: 1795 if (doTxDmaRead()) 1796 goto exit; 1797 break; 1798 case dmaWriteWaiting: 1799 if (doTxDmaWrite()) 1800 goto exit; 1801 break; 1802 default: 1803 break; 1804 } 1805 1806 switch (txState) { 1807 case txIdle: 1808 if (!regs.command & CR_TXE) { 1809 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1810 goto exit; 1811 } 1812 1813 if (CTDD) { 1814 txState = txDescRefr; 1815 1816 txDmaAddr = regs.txdp & 0x3fffffff; 1817 txDmaData = &txDescCache + offsetof(ns_desc, link); 1818 txDmaLen = sizeof(txDescCache.link); 1819 txDmaFree = dmaDescFree; 1820 1821 descDmaReads++; 1822 descDmaRdBytes += txDmaLen; 1823 1824 if (doTxDmaRead()) 1825 goto exit; 1826 1827 } else { 1828 txState = txDescRead; 1829 1830 txDmaAddr = regs.txdp & 0x3fffffff; 1831 txDmaData = &txDescCache; 1832 txDmaLen = sizeof(ns_desc); 1833 txDmaFree = dmaDescFree; 1834 1835 descDmaReads++; 1836 descDmaRdBytes += txDmaLen; 1837 1838 if (doTxDmaRead()) 1839 goto exit; 1840 } 1841 break; 1842 1843 case txDescRefr: 1844 if (txDmaState != dmaIdle) 1845 goto exit; 1846 1847 txState = txAdvance; 1848 break; 1849 1850 case txDescRead: 1851 if (txDmaState != dmaIdle) 1852 goto exit; 1853 1854 DPRINTF(EthernetDesc, 1855 "txDescCache data:\n" 1856 "\tlink=%08x\n" 1857 "\tbufptr=%08x\n" 1858 "\tcmdsts=%08x\n" 1859 "\textsts=%08x\n", 1860 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts, 1861 txDescCache.extsts); 1862 1863 if (txDescCache.cmdsts & CMDSTS_OWN) { 1864 txState = txFifoBlock; 1865 txFragPtr = txDescCache.bufptr; 1866 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK; 1867 } else { 1868 txState = txIdle; 1869 } 1870 break; 1871 1872 case txFifoBlock: 1873 if (!txPacket) { 1874 DPRINTF(EthernetSM, "\n\n*****starting the tx of a new packet\n"); 1875 txPacket = new EtherPacket; 1876 txPacket->data = new uint8_t[16384]; 1877 txPacketBufPtr = txPacket->data; 1878 } 1879 1880 if (txDescCnt == 0) { 1881 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 1882 if (txDescCache.cmdsts & CMDSTS_MORE) { 1883 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 1884 txState = txDescWrite; 1885 1886 txDescCache.cmdsts &= ~CMDSTS_OWN; 1887 1888 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 1889 txDmaAddr &= 0x3fffffff; 1890 txDmaData = &(txDescCache.cmdsts); 1891 txDmaLen = sizeof(txDescCache.cmdsts); 1892 txDmaFree = dmaDescFree; 1893 1894 if (doTxDmaWrite()) 1895 goto exit; 1896 1897 } else { /* this packet is totally done */ 1898 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 1899 /* deal with the the packet that just finished */ 1900 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 1901 if (txDescCache.extsts & EXTSTS_UDPPKT) { 1902 udpChecksum(txPacket, true); 1903 } else if (txDescCache.extsts & EXTSTS_TCPPKT) { 1904 tcpChecksum(txPacket, true); 1905 txTCPChecksums++; 1906 } 1907 if (txDescCache.extsts & EXTSTS_IPPKT) { 1908 ipChecksum(txPacket, true); 1909 txIPChecksums++; 1910 } 1911 } 1912 1913 txPacket->length = txPacketBufPtr - txPacket->data; 1914 // this is just because the receive can't handle a 1915 // packet bigger want to make sure 1916 assert(txPacket->length <= 1514); 1917 txFifo.push_back(txPacket); 1918 1919 /* 1920 * this following section is not tqo spec, but 1921 * functionally shouldn't be any different. normally, 1922 * the chip will wait til the transmit has occurred 1923 * before writing back the descriptor because it has 1924 * to wait to see that it was successfully transmitted 1925 * to decide whether to set CMDSTS_OK or not. 1926 * however, in the simulator since it is always 1927 * successfully transmitted, and writing it exactly to 1928 * spec would complicate the code, we just do it here 1929 */ 1930 1931 txDescCache.cmdsts &= ~CMDSTS_OWN; 1932 txDescCache.cmdsts |= CMDSTS_OK; 1933 1934 DPRINTF(EthernetDesc, 1935 "txDesc writeback:\n\tcmdsts=%08x\n\textsts=%08x\n", 1936 txDescCache.cmdsts, txDescCache.extsts); 1937 1938 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 1939 txDmaAddr &= 0x3fffffff; 1940 txDmaData = &(txDescCache.cmdsts); 1941 txDmaLen = sizeof(txDescCache.cmdsts) + 1942 sizeof(txDescCache.extsts); 1943 txDmaFree = dmaDescFree; 1944 1945 descDmaWrites++; 1946 descDmaWrBytes += txDmaLen; 1947 1948 transmit(); 1949 txPacket = 0; 1950 1951 if (txHalt) { 1952 DPRINTF(EthernetSM, "halting TX state machine\n"); 1953 txState = txIdle; 1954 txHalt = false; 1955 } else 1956 txState = txAdvance; 1957 1958 if (doTxDmaWrite()) 1959 goto exit; 1960 } 1961 } else { 1962 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 1963 if (txFifoAvail) { 1964 txState = txFragRead; 1965 1966 /* 1967 * The number of bytes transferred is either whatever 1968 * is left in the descriptor (txDescCnt), or if there 1969 * is not enough room in the fifo, just whatever room 1970 * is left in the fifo 1971 */ 1972 txXferLen = min<uint32_t>(txDescCnt, txFifoAvail); 1973 1974 txDmaAddr = txFragPtr & 0x3fffffff; 1975 txDmaData = txPacketBufPtr; 1976 txDmaLen = txXferLen; 1977 txDmaFree = dmaDataFree; 1978 1979 if (doTxDmaRead()) 1980 goto exit; 1981 } else { 1982 txState = txFifoBlock; 1983 transmit(); 1984 1985 goto exit; 1986 } 1987 1988 } 1989 break; 1990 1991 case txFragRead: 1992 if (txDmaState != dmaIdle) 1993 goto exit; 1994 1995 txPacketBufPtr += txXferLen; 1996 txFragPtr += txXferLen; 1997 txDescCnt -= txXferLen; 1998 txFifoAvail -= txXferLen; 1999 2000 txState = txFifoBlock; 2001 break; 2002 2003 case txDescWrite: 2004 if (txDmaState != dmaIdle) 2005 goto exit; 2006 2007 if (txDescCache.cmdsts & CMDSTS_INTR) { 2008 devIntrPost(ISR_TXDESC); 2009 } 2010 2011 txState = txAdvance; 2012 break; 2013 2014 case txAdvance: 2015 if (txDescCache.link == 0) { 2016 txState = txIdle; 2017 } else { 2018 txState = txDescRead; 2019 regs.txdp = txDescCache.link; 2020 CTDD = false; 2021 2022 txDmaAddr = txDescCache.link & 0x3fffffff; 2023 txDmaData = &txDescCache; 2024 txDmaLen = sizeof(ns_desc); 2025 txDmaFree = dmaDescFree; 2026 2027 if (doTxDmaRead()) 2028 goto exit; 2029 } 2030 break; 2031 2032 default: 2033 panic("invalid state"); 2034 } 2035 2036 DPRINTF(EthernetSM, "entering next tx state=%s\n", 2037 NsTxStateStrings[txState]); 2038 2039 if (txState == txIdle) { 2040 regs.command &= ~CR_TXE; 2041 devIntrPost(ISR_TXIDLE); 2042 return; 2043 } 2044 2045 goto next; 2046 2047 exit: 2048 /** 2049 * @todo do we want to schedule a future kick? 2050 */ 2051 DPRINTF(EthernetSM, "tx state machine exited state=%s\n", 2052 NsTxStateStrings[txState]); 2053} 2054 2055void 2056NSGigE::transferDone() 2057{ 2058 if (txFifo.empty()) 2059 return; 2060 2061 if (txEvent.scheduled()) 2062 txEvent.reschedule(curTick + 1); 2063 else 2064 txEvent.schedule(curTick + 1); 2065} 2066 2067bool 2068NSGigE::rxFilter(PacketPtr packet) 2069{ 2070 bool drop = true; 2071 string type; 2072 2073 if (packet->IsUnicast()) { 2074 type = "unicast"; 2075 2076 // If we're accepting all unicast addresses 2077 if (acceptUnicast) 2078 drop = false; 2079 2080 // If we make a perfect match 2081 if (acceptPerfect && 2082 memcmp(rom.perfectMatch, packet->data, EADDR_LEN) == 0) 2083 drop = false; 2084 2085 eth_header *eth = (eth_header *) packet->data; 2086 if ((acceptArp) && (eth->type == 0x608)) 2087 drop = false; 2088 2089 } else if (packet->IsBroadcast()) { 2090 type = "broadcast"; 2091 2092 // if we're accepting broadcasts 2093 if (acceptBroadcast) 2094 drop = false; 2095 2096 } else if (packet->IsMulticast()) { 2097 type = "multicast"; 2098 2099 // if we're accepting all multicasts 2100 if (acceptMulticast) 2101 drop = false; 2102 2103 } else { 2104 type = "unknown"; 2105 2106 // oh well, punt on this one 2107 } 2108 2109 if (drop) { 2110 DPRINTF(Ethernet, "rxFilter drop\n"); 2111 DDUMP(EthernetData, packet->data, packet->length); 2112 } 2113 2114 return drop; 2115} 2116 2117bool 2118NSGigE::recvPacket(PacketPtr packet) 2119{ 2120 rxBytes += packet->length; 2121 rxPackets++; 2122 2123 DPRINTF(Ethernet, "\n\nReceiving packet from wire, rxFifoAvail=%d\n", 2124 maxRxFifoSize - rxFifoCnt); 2125 2126 if (rxState == rxIdle) { 2127 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2128 interface->recvDone(); 2129 return true; 2130 } 2131 2132 if (rxFilterEnable && rxFilter(packet)) { 2133 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2134 interface->recvDone(); 2135 return true; 2136 } 2137 2138 if ((rxFifoCnt + packet->length) >= maxRxFifoSize) { 2139 DPRINTF(Ethernet, 2140 "packet will not fit in receive buffer...packet dropped\n"); 2141 devIntrPost(ISR_RXORN); 2142 return false; 2143 } 2144 2145 rxFifo.push_back(packet); 2146 rxFifoCnt += packet->length; 2147 interface->recvDone(); 2148 2149 rxKick(); 2150 return true; 2151} 2152 2153/** 2154 * does a udp checksum. if gen is true, then it generates it and puts 2155 * it in the right place else, it just checks what it calculates 2156 * against the value in the header in packet 2157 */ 2158bool 2159NSGigE::udpChecksum(PacketPtr packet, bool gen) 2160{ 2161 ip_header *ip = packet->getIpHdr(); 2162 udp_header *hdr = packet->getUdpHdr(ip); 2163 2164 pseudo_header *pseudo = new pseudo_header; 2165 2166 pseudo->src_ip_addr = ip->src_ip_addr; 2167 pseudo->dest_ip_addr = ip->dest_ip_addr; 2168 pseudo->protocol = ip->protocol; 2169 pseudo->len = hdr->len; 2170 2171 uint16_t cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr, 2172 (uint32_t) hdr->len); 2173 2174 delete pseudo; 2175 if (gen) 2176 hdr->chksum = cksum; 2177 else 2178 if (cksum != 0) 2179 return false; 2180 2181 return true; 2182} 2183 2184bool 2185NSGigE::tcpChecksum(PacketPtr packet, bool gen) 2186{ 2187 ip_header *ip = packet->getIpHdr(); 2188 tcp_header *hdr = packet->getTcpHdr(ip); 2189 2190 uint16_t cksum; 2191 pseudo_header *pseudo = new pseudo_header; 2192 if (!gen) { 2193 pseudo->src_ip_addr = ip->src_ip_addr; 2194 pseudo->dest_ip_addr = ip->dest_ip_addr; 2195 pseudo->protocol = reverseEnd16(ip->protocol); 2196 pseudo->len = reverseEnd16(reverseEnd16(ip->dgram_len) - 2197 (ip->vers_len & 0xf)*4); 2198 2199 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr, 2200 (uint32_t) reverseEnd16(pseudo->len)); 2201 } else { 2202 pseudo->src_ip_addr = 0; 2203 pseudo->dest_ip_addr = 0; 2204 pseudo->protocol = hdr->chksum; 2205 pseudo->len = 0; 2206 hdr->chksum = 0; 2207 cksum = checksumCalc((uint16_t *) pseudo, (uint16_t *) hdr, 2208 (uint32_t) (reverseEnd16(ip->dgram_len) - 2209 (ip->vers_len & 0xf)*4)); 2210 } 2211 2212 delete pseudo; 2213 if (gen) 2214 hdr->chksum = cksum; 2215 else 2216 if (cksum != 0) 2217 return false; 2218 2219 return true; 2220} 2221 2222bool 2223NSGigE::ipChecksum(PacketPtr packet, bool gen) 2224{ 2225 ip_header *hdr = packet->getIpHdr(); 2226 2227 uint16_t cksum = checksumCalc(NULL, (uint16_t *) hdr, 2228 (hdr->vers_len & 0xf)*4); 2229 2230 if (gen) { 2231 DPRINTF(EthernetCksum, "generated checksum: %#x\n", cksum); 2232 hdr->hdr_chksum = cksum; 2233 } 2234 else 2235 if (cksum != 0) 2236 return false; 2237 2238 return true; 2239} 2240 2241uint16_t 2242NSGigE::checksumCalc(uint16_t *pseudo, uint16_t *buf, uint32_t len) 2243{ 2244 uint32_t sum = 0; 2245 2246 uint16_t last_pad = 0; 2247 if (len & 1) { 2248 last_pad = buf[len/2] & 0xff; 2249 len--; 2250 sum += last_pad; 2251 } 2252 2253 if (pseudo) { 2254 sum = pseudo[0] + pseudo[1] + pseudo[2] + 2255 pseudo[3] + pseudo[4] + pseudo[5]; 2256 } 2257 2258 for (int i=0; i < (len/2); ++i) { 2259 sum += buf[i]; 2260 } 2261 2262 while (sum >> 16) 2263 sum = (sum >> 16) + (sum & 0xffff); 2264 2265 return ~sum; 2266} 2267 2268//===================================================================== 2269// 2270// 2271void 2272NSGigE::serialize(ostream &os) 2273{ 2274 // Serialize the PciDev base class 2275 PciDev::serialize(os); 2276 2277 /* 2278 * Finalize any DMA events now. 2279 */ 2280 if (rxDmaReadEvent.scheduled()) 2281 rxDmaReadCopy(); 2282 if (rxDmaWriteEvent.scheduled()) 2283 rxDmaWriteCopy(); 2284 if (txDmaReadEvent.scheduled()) 2285 txDmaReadCopy(); 2286 if (txDmaWriteEvent.scheduled()) 2287 txDmaWriteCopy(); 2288 2289 /* 2290 * Serialize the device registers 2291 */ 2292 SERIALIZE_SCALAR(regs.command); 2293 SERIALIZE_SCALAR(regs.config); 2294 SERIALIZE_SCALAR(regs.mear); 2295 SERIALIZE_SCALAR(regs.ptscr); 2296 SERIALIZE_SCALAR(regs.isr); 2297 SERIALIZE_SCALAR(regs.imr); 2298 SERIALIZE_SCALAR(regs.ier); 2299 SERIALIZE_SCALAR(regs.ihr); 2300 SERIALIZE_SCALAR(regs.txdp); 2301 SERIALIZE_SCALAR(regs.txdp_hi); 2302 SERIALIZE_SCALAR(regs.txcfg); 2303 SERIALIZE_SCALAR(regs.gpior); 2304 SERIALIZE_SCALAR(regs.rxdp); 2305 SERIALIZE_SCALAR(regs.rxdp_hi); 2306 SERIALIZE_SCALAR(regs.rxcfg); 2307 SERIALIZE_SCALAR(regs.pqcr); 2308 SERIALIZE_SCALAR(regs.wcsr); 2309 SERIALIZE_SCALAR(regs.pcr); 2310 SERIALIZE_SCALAR(regs.rfcr); 2311 SERIALIZE_SCALAR(regs.rfdr); 2312 SERIALIZE_SCALAR(regs.srr); 2313 SERIALIZE_SCALAR(regs.mibc); 2314 SERIALIZE_SCALAR(regs.vrcr); 2315 SERIALIZE_SCALAR(regs.vtcr); 2316 SERIALIZE_SCALAR(regs.vdr); 2317 SERIALIZE_SCALAR(regs.ccsr); 2318 SERIALIZE_SCALAR(regs.tbicr); 2319 SERIALIZE_SCALAR(regs.tbisr); 2320 SERIALIZE_SCALAR(regs.tanar); 2321 SERIALIZE_SCALAR(regs.tanlpar); 2322 SERIALIZE_SCALAR(regs.taner); 2323 SERIALIZE_SCALAR(regs.tesr); 2324 2325 SERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN); 2326 2327 SERIALIZE_SCALAR(ioEnable); 2328 2329 /* 2330 * Serialize the data Fifos 2331 */ 2332 int txNumPkts = txFifo.size(); 2333 SERIALIZE_SCALAR(txNumPkts); 2334 int i = 0; 2335 pktiter_t end = txFifo.end(); 2336 for (pktiter_t p = txFifo.begin(); p != end; ++p) { 2337 nameOut(os, csprintf("%s.txFifo%d", name(), i++)); 2338 (*p)->serialize(os); 2339 } 2340 2341 int rxNumPkts = rxFifo.size(); 2342 SERIALIZE_SCALAR(rxNumPkts); 2343 i = 0; 2344 end = rxFifo.end(); 2345 for (pktiter_t p = rxFifo.begin(); p != end; ++p) { 2346 nameOut(os, csprintf("%s.rxFifo%d", name(), i++)); 2347 (*p)->serialize(os); 2348 } 2349 2350 /* 2351 * Serialize the various helper variables 2352 */ 2353 bool txPacketExists = txPacket; 2354 SERIALIZE_SCALAR(txPacketExists); 2355 if (txPacketExists) { 2356 nameOut(os, csprintf("%s.txPacket", name())); 2357 txPacket->serialize(os); 2358 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2359 SERIALIZE_SCALAR(txPktBufPtr); 2360 } 2361 2362 bool rxPacketExists = rxPacket; 2363 SERIALIZE_SCALAR(rxPacketExists); 2364 if (rxPacketExists) { 2365 nameOut(os, csprintf("%s.rxPacket", name())); 2366 rxPacket->serialize(os); 2367 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2368 SERIALIZE_SCALAR(rxPktBufPtr); 2369 } 2370 2371 SERIALIZE_SCALAR(txXferLen); 2372 SERIALIZE_SCALAR(rxXferLen); 2373 2374 /* 2375 * Serialize DescCaches 2376 */ 2377 SERIALIZE_SCALAR(txDescCache.link); 2378 SERIALIZE_SCALAR(txDescCache.bufptr); 2379 SERIALIZE_SCALAR(txDescCache.cmdsts); 2380 SERIALIZE_SCALAR(txDescCache.extsts); 2381 SERIALIZE_SCALAR(rxDescCache.link); 2382 SERIALIZE_SCALAR(rxDescCache.bufptr); 2383 SERIALIZE_SCALAR(rxDescCache.cmdsts); 2384 SERIALIZE_SCALAR(rxDescCache.extsts); 2385 2386 /* 2387 * Serialize tx state machine 2388 */ 2389 int txState = this->txState; 2390 SERIALIZE_SCALAR(txState); 2391 SERIALIZE_SCALAR(CTDD); 2392 SERIALIZE_SCALAR(txFifoAvail); 2393 SERIALIZE_SCALAR(txHalt); 2394 SERIALIZE_SCALAR(txFragPtr); 2395 SERIALIZE_SCALAR(txDescCnt); 2396 int txDmaState = this->txDmaState; 2397 SERIALIZE_SCALAR(txDmaState); 2398 2399 /* 2400 * Serialize rx state machine 2401 */ 2402 int rxState = this->rxState; 2403 SERIALIZE_SCALAR(rxState); 2404 SERIALIZE_SCALAR(CRDD); 2405 SERIALIZE_SCALAR(rxPktBytes); 2406 SERIALIZE_SCALAR(rxFifoCnt); 2407 SERIALIZE_SCALAR(rxHalt); 2408 SERIALIZE_SCALAR(rxDescCnt); 2409 int rxDmaState = this->rxDmaState; 2410 SERIALIZE_SCALAR(rxDmaState); 2411 2412 SERIALIZE_SCALAR(extstsEnable); 2413 2414 /* 2415 * If there's a pending transmit, store the time so we can 2416 * reschedule it later 2417 */ 2418 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2419 SERIALIZE_SCALAR(transmitTick); 2420 2421 /* 2422 * receive address filter settings 2423 */ 2424 SERIALIZE_SCALAR(rxFilterEnable); 2425 SERIALIZE_SCALAR(acceptBroadcast); 2426 SERIALIZE_SCALAR(acceptMulticast); 2427 SERIALIZE_SCALAR(acceptUnicast); 2428 SERIALIZE_SCALAR(acceptPerfect); 2429 SERIALIZE_SCALAR(acceptArp); 2430 2431 /* 2432 * Keep track of pending interrupt status. 2433 */ 2434 SERIALIZE_SCALAR(intrTick); 2435 SERIALIZE_SCALAR(cpuPendingIntr); 2436 Tick intrEventTick = 0; 2437 if (intrEvent) 2438 intrEventTick = intrEvent->when(); 2439 SERIALIZE_SCALAR(intrEventTick); 2440 2441} 2442 2443void 2444NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2445{ 2446 // Unserialize the PciDev base class 2447 PciDev::unserialize(cp, section); 2448 2449 UNSERIALIZE_SCALAR(regs.command); 2450 UNSERIALIZE_SCALAR(regs.config); 2451 UNSERIALIZE_SCALAR(regs.mear); 2452 UNSERIALIZE_SCALAR(regs.ptscr); 2453 UNSERIALIZE_SCALAR(regs.isr); 2454 UNSERIALIZE_SCALAR(regs.imr); 2455 UNSERIALIZE_SCALAR(regs.ier); 2456 UNSERIALIZE_SCALAR(regs.ihr); 2457 UNSERIALIZE_SCALAR(regs.txdp); 2458 UNSERIALIZE_SCALAR(regs.txdp_hi); 2459 UNSERIALIZE_SCALAR(regs.txcfg); 2460 UNSERIALIZE_SCALAR(regs.gpior); 2461 UNSERIALIZE_SCALAR(regs.rxdp); 2462 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2463 UNSERIALIZE_SCALAR(regs.rxcfg); 2464 UNSERIALIZE_SCALAR(regs.pqcr); 2465 UNSERIALIZE_SCALAR(regs.wcsr); 2466 UNSERIALIZE_SCALAR(regs.pcr); 2467 UNSERIALIZE_SCALAR(regs.rfcr); 2468 UNSERIALIZE_SCALAR(regs.rfdr); 2469 UNSERIALIZE_SCALAR(regs.srr); 2470 UNSERIALIZE_SCALAR(regs.mibc); 2471 UNSERIALIZE_SCALAR(regs.vrcr); 2472 UNSERIALIZE_SCALAR(regs.vtcr); 2473 UNSERIALIZE_SCALAR(regs.vdr); 2474 UNSERIALIZE_SCALAR(regs.ccsr); 2475 UNSERIALIZE_SCALAR(regs.tbicr); 2476 UNSERIALIZE_SCALAR(regs.tbisr); 2477 UNSERIALIZE_SCALAR(regs.tanar); 2478 UNSERIALIZE_SCALAR(regs.tanlpar); 2479 UNSERIALIZE_SCALAR(regs.taner); 2480 UNSERIALIZE_SCALAR(regs.tesr); 2481 2482 UNSERIALIZE_ARRAY(rom.perfectMatch, EADDR_LEN); 2483 2484 UNSERIALIZE_SCALAR(ioEnable); 2485 2486 /* 2487 * unserialize the data fifos 2488 */ 2489 int txNumPkts; 2490 UNSERIALIZE_SCALAR(txNumPkts); 2491 int i; 2492 for (i = 0; i < txNumPkts; ++i) { 2493 PacketPtr p = new EtherPacket; 2494 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i)); 2495 txFifo.push_back(p); 2496 } 2497 2498 int rxNumPkts; 2499 UNSERIALIZE_SCALAR(rxNumPkts); 2500 for (i = 0; i < rxNumPkts; ++i) { 2501 PacketPtr p = new EtherPacket; 2502 p->unserialize(cp, csprintf("%s.rxFifo%d", section, i)); 2503 rxFifo.push_back(p); 2504 } 2505 2506 /* 2507 * unserialize the various helper variables 2508 */ 2509 bool txPacketExists; 2510 UNSERIALIZE_SCALAR(txPacketExists); 2511 if (txPacketExists) { 2512 txPacket = new EtherPacket; 2513 txPacket->unserialize(cp, csprintf("%s.txPacket", section)); 2514 uint32_t txPktBufPtr; 2515 UNSERIALIZE_SCALAR(txPktBufPtr); 2516 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2517 } else 2518 txPacket = 0; 2519 2520 bool rxPacketExists; 2521 UNSERIALIZE_SCALAR(rxPacketExists); 2522 rxPacket = 0; 2523 if (rxPacketExists) { 2524 rxPacket = new EtherPacket; 2525 rxPacket->unserialize(cp, csprintf("%s.rxPacket", section)); 2526 uint32_t rxPktBufPtr; 2527 UNSERIALIZE_SCALAR(rxPktBufPtr); 2528 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2529 } else 2530 rxPacket = 0; 2531 2532 UNSERIALIZE_SCALAR(txXferLen); 2533 UNSERIALIZE_SCALAR(rxXferLen); 2534 2535 /* 2536 * Unserialize DescCaches 2537 */ 2538 UNSERIALIZE_SCALAR(txDescCache.link); 2539 UNSERIALIZE_SCALAR(txDescCache.bufptr); 2540 UNSERIALIZE_SCALAR(txDescCache.cmdsts); 2541 UNSERIALIZE_SCALAR(txDescCache.extsts); 2542 UNSERIALIZE_SCALAR(rxDescCache.link); 2543 UNSERIALIZE_SCALAR(rxDescCache.bufptr); 2544 UNSERIALIZE_SCALAR(rxDescCache.cmdsts); 2545 UNSERIALIZE_SCALAR(rxDescCache.extsts); 2546 2547 /* 2548 * unserialize tx state machine 2549 */ 2550 int txState; 2551 UNSERIALIZE_SCALAR(txState); 2552 this->txState = (TxState) txState; 2553 UNSERIALIZE_SCALAR(CTDD); 2554 UNSERIALIZE_SCALAR(txFifoAvail); 2555 UNSERIALIZE_SCALAR(txHalt); 2556 UNSERIALIZE_SCALAR(txFragPtr); 2557 UNSERIALIZE_SCALAR(txDescCnt); 2558 int txDmaState; 2559 UNSERIALIZE_SCALAR(txDmaState); 2560 this->txDmaState = (DmaState) txDmaState; 2561 2562 /* 2563 * unserialize rx state machine 2564 */ 2565 int rxState; 2566 UNSERIALIZE_SCALAR(rxState); 2567 this->rxState = (RxState) rxState; 2568 UNSERIALIZE_SCALAR(CRDD); 2569 UNSERIALIZE_SCALAR(rxPktBytes); 2570 UNSERIALIZE_SCALAR(rxFifoCnt); 2571 UNSERIALIZE_SCALAR(rxHalt); 2572 UNSERIALIZE_SCALAR(rxDescCnt); 2573 int rxDmaState; 2574 UNSERIALIZE_SCALAR(rxDmaState); 2575 this->rxDmaState = (DmaState) rxDmaState; 2576 2577 UNSERIALIZE_SCALAR(extstsEnable); 2578 2579 /* 2580 * If there's a pending transmit, reschedule it now 2581 */ 2582 Tick transmitTick; 2583 UNSERIALIZE_SCALAR(transmitTick); 2584 if (transmitTick) 2585 txEvent.schedule(curTick + transmitTick); 2586 2587 /* 2588 * unserialize receive address filter settings 2589 */ 2590 UNSERIALIZE_SCALAR(rxFilterEnable); 2591 UNSERIALIZE_SCALAR(acceptBroadcast); 2592 UNSERIALIZE_SCALAR(acceptMulticast); 2593 UNSERIALIZE_SCALAR(acceptUnicast); 2594 UNSERIALIZE_SCALAR(acceptPerfect); 2595 UNSERIALIZE_SCALAR(acceptArp); 2596 2597 /* 2598 * Keep track of pending interrupt status. 2599 */ 2600 UNSERIALIZE_SCALAR(intrTick); 2601 UNSERIALIZE_SCALAR(cpuPendingIntr); 2602 Tick intrEventTick; 2603 UNSERIALIZE_SCALAR(intrEventTick); 2604 if (intrEventTick) { 2605 intrEvent = new IntrEvent(this, true); 2606 intrEvent->schedule(intrEventTick); 2607 } 2608 2609 /* 2610 * re-add addrRanges to bus bridges 2611 */ 2612 if (pioInterface) { 2613 pioInterface->addAddrRange(BARAddrs[0], BARAddrs[0] + BARSize[0] - 1); 2614 pioInterface->addAddrRange(BARAddrs[1], BARAddrs[1] + BARSize[1] - 1); 2615 } 2616} 2617 2618Tick 2619NSGigE::cacheAccess(MemReqPtr &req) 2620{ 2621 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n", 2622 req->paddr, req->paddr - addr); 2623 return curTick + pioLatency; 2624} 2625//===================================================================== 2626 2627 2628//********** helper functions****************************************** 2629 2630uint16_t reverseEnd16(uint16_t num) 2631{ 2632 uint16_t reverse = (num & 0xff)<<8; 2633 reverse += ((num & 0xff00) >> 8); 2634 return reverse; 2635} 2636 2637uint32_t reverseEnd32(uint32_t num) 2638{ 2639 uint32_t reverse = (reverseEnd16(num & 0xffff)) << 16; 2640 reverse += reverseEnd16((uint16_t) ((num & 0xffff0000) >> 8)); 2641 return reverse; 2642} 2643 2644 2645 2646//===================================================================== 2647 2648BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2649 2650 SimObjectParam<EtherInt *> peer; 2651 SimObjectParam<NSGigE *> device; 2652 2653END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2654 2655BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2656 2657 INIT_PARAM_DFLT(peer, "peer interface", NULL), 2658 INIT_PARAM(device, "Ethernet device of this interface") 2659 2660END_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2661 2662CREATE_SIM_OBJECT(NSGigEInt) 2663{ 2664 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device); 2665 2666 EtherInt *p = (EtherInt *)peer; 2667 if (p) { 2668 dev_int->setPeer(p); 2669 p->setPeer(dev_int); 2670 } 2671 2672 return dev_int; 2673} 2674 2675REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt) 2676 2677 2678BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2679 2680 Param<Tick> tx_delay; 2681 Param<Tick> rx_delay; 2682 SimObjectParam<IntrControl *> intr_ctrl; 2683 Param<Tick> intr_delay; 2684 SimObjectParam<MemoryController *> mmu; 2685 SimObjectParam<PhysicalMemory *> physmem; 2686 Param<bool> rx_filter; 2687 Param<string> hardware_address; 2688 SimObjectParam<Bus*> header_bus; 2689 SimObjectParam<Bus*> payload_bus; 2690 SimObjectParam<HierParams *> hier; 2691 Param<Tick> pio_latency; 2692 Param<bool> dma_desc_free; 2693 Param<bool> dma_data_free; 2694 Param<Tick> dma_read_delay; 2695 Param<Tick> dma_write_delay; 2696 Param<Tick> dma_read_factor; 2697 Param<Tick> dma_write_factor; 2698 SimObjectParam<PciConfigAll *> configspace; 2699 SimObjectParam<PciConfigData *> configdata; 2700 SimObjectParam<Tsunami *> tsunami; 2701 Param<uint32_t> pci_bus; 2702 Param<uint32_t> pci_dev; 2703 Param<uint32_t> pci_func; 2704 Param<uint32_t> tx_fifo_size; 2705 Param<uint32_t> rx_fifo_size; 2706 2707END_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2708 2709BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE) 2710 2711 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000), 2712 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000), 2713 INIT_PARAM(intr_ctrl, "Interrupt Controller"), 2714 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0), 2715 INIT_PARAM(mmu, "Memory Controller"), 2716 INIT_PARAM(physmem, "Physical Memory"), 2717 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true), 2718 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address", 2719 "00:99:00:00:00:01"), 2720 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL), 2721 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL), 2722 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams), 2723 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 2724 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false), 2725 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false), 2726 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0), 2727 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0), 2728 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0), 2729 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0), 2730 INIT_PARAM(configspace, "PCI Configspace"), 2731 INIT_PARAM(configdata, "PCI Config data"), 2732 INIT_PARAM(tsunami, "Tsunami"), 2733 INIT_PARAM(pci_bus, "PCI bus"), 2734 INIT_PARAM(pci_dev, "PCI device number"), 2735 INIT_PARAM(pci_func, "PCI function code"), 2736 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072), 2737 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072) 2738 2739END_INIT_SIM_OBJECT_PARAMS(NSGigE) 2740 2741 2742CREATE_SIM_OBJECT(NSGigE) 2743{ 2744 int eaddr[6]; 2745 sscanf(((string)hardware_address).c_str(), "%x:%x:%x:%x:%x:%x", 2746 &eaddr[0], &eaddr[1], &eaddr[2], &eaddr[3], &eaddr[4], &eaddr[5]); 2747 2748 return new NSGigE(getInstanceName(), intr_ctrl, intr_delay, 2749 physmem, tx_delay, rx_delay, mmu, hier, header_bus, 2750 payload_bus, pio_latency, dma_desc_free, dma_data_free, 2751 dma_read_delay, dma_write_delay, dma_read_factor, 2752 dma_write_factor, configspace, configdata, 2753 tsunami, pci_bus, pci_dev, pci_func, rx_filter, eaddr, 2754 tx_fifo_size, rx_fifo_size); 2755} 2756 2757REGISTER_SIM_OBJECT("NSGigE", NSGigE) 2758