ns_gige.cc revision 10367
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 * Lisa Hsu 30 */ 31 32/** @file 33 * Device module for modelling the National Semiconductor 34 * DP83820 ethernet controller. Does not support priority queueing 35 */ 36#include <deque> 37#include <string> 38 39#include "base/debug.hh" 40#include "base/inet.hh" 41#include "base/types.hh" 42#include "config/the_isa.hh" 43#include "debug/EthernetAll.hh" 44#include "dev/etherlink.hh" 45#include "dev/ns_gige.hh" 46#include "dev/pciconfigall.hh" 47#include "mem/packet.hh" 48#include "mem/packet_access.hh" 49#include "params/NSGigE.hh" 50#include "sim/system.hh" 51 52// clang complains about std::set being overloaded with Packet::set if 53// we open up the entire namespace std 54using std::min; 55using std::ostream; 56using std::string; 57 58const char *NsRxStateStrings[] = 59{ 60 "rxIdle", 61 "rxDescRefr", 62 "rxDescRead", 63 "rxFifoBlock", 64 "rxFragWrite", 65 "rxDescWrite", 66 "rxAdvance" 67}; 68 69const char *NsTxStateStrings[] = 70{ 71 "txIdle", 72 "txDescRefr", 73 "txDescRead", 74 "txFifoBlock", 75 "txFragRead", 76 "txDescWrite", 77 "txAdvance" 78}; 79 80const char *NsDmaState[] = 81{ 82 "dmaIdle", 83 "dmaReading", 84 "dmaWriting", 85 "dmaReadWaiting", 86 "dmaWriteWaiting" 87}; 88 89using namespace Net; 90using namespace TheISA; 91 92/////////////////////////////////////////////////////////////////////// 93// 94// NSGigE PCI Device 95// 96NSGigE::NSGigE(Params *p) 97 : EtherDevBase(p), ioEnable(false), 98 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 99 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 100 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false), 101 txState(txIdle), txEnable(false), CTDD(false), txHalt(false), 102 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 103 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false), 104 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 105 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0), 106 eepromOpcode(0), eepromAddress(0), eepromData(0), 107 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay), 108 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor), 109 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0), 110 txDmaData(NULL), txDmaAddr(0), txDmaLen(0), 111 rxDmaReadEvent(this), rxDmaWriteEvent(this), 112 txDmaReadEvent(this), txDmaWriteEvent(this), 113 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 114 txDelay(p->tx_delay), rxDelay(p->rx_delay), 115 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this), 116 txEvent(this), rxFilterEnable(p->rx_filter), 117 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false), 118 acceptPerfect(false), acceptArp(false), multicastHashEnable(false), 119 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false), 120 intrEvent(0), interface(0) 121{ 122 123 124 interface = new NSGigEInt(name() + ".int0", this); 125 126 regsReset(); 127 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN); 128 129 memset(&rxDesc32, 0, sizeof(rxDesc32)); 130 memset(&txDesc32, 0, sizeof(txDesc32)); 131 memset(&rxDesc64, 0, sizeof(rxDesc64)); 132 memset(&txDesc64, 0, sizeof(txDesc64)); 133} 134 135NSGigE::~NSGigE() 136{ 137 delete interface; 138} 139 140/** 141 * This is to write to the PCI general configuration registers 142 */ 143Tick 144NSGigE::writeConfig(PacketPtr pkt) 145{ 146 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 147 if (offset < PCI_DEVICE_SPECIFIC) 148 PciDevice::writeConfig(pkt); 149 else 150 panic("Device specific PCI config space not implemented!\n"); 151 152 switch (offset) { 153 // seems to work fine without all these PCI settings, but i 154 // put in the IO to double check, an assertion will fail if we 155 // need to properly implement it 156 case PCI_COMMAND: 157 if (config.data[offset] & PCI_CMD_IOSE) 158 ioEnable = true; 159 else 160 ioEnable = false; 161 break; 162 } 163 164 return configDelay; 165} 166 167EtherInt* 168NSGigE::getEthPort(const std::string &if_name, int idx) 169{ 170 if (if_name == "interface") { 171 if (interface->getPeer()) 172 panic("interface already connected to\n"); 173 return interface; 174 } 175 return NULL; 176} 177 178/** 179 * This reads the device registers, which are detailed in the NS83820 180 * spec sheet 181 */ 182Tick 183NSGigE::read(PacketPtr pkt) 184{ 185 assert(ioEnable); 186 187 pkt->allocate(); 188 189 //The mask is to give you only the offset into the device register file 190 Addr daddr = pkt->getAddr() & 0xfff; 191 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n", 192 daddr, pkt->getAddr(), pkt->getSize()); 193 194 195 // there are some reserved registers, you can see ns_gige_reg.h and 196 // the spec sheet for details 197 if (daddr > LAST && daddr <= RESERVED) { 198 panic("Accessing reserved register"); 199 } else if (daddr > RESERVED && daddr <= 0x3FC) { 200 return readConfig(pkt); 201 } else if (daddr >= MIB_START && daddr <= MIB_END) { 202 // don't implement all the MIB's. hopefully the kernel 203 // doesn't actually DEPEND upon their values 204 // MIB are just hardware stats keepers 205 pkt->set<uint32_t>(0); 206 pkt->makeAtomicResponse(); 207 return pioDelay; 208 } else if (daddr > 0x3FC) 209 panic("Something is messed up!\n"); 210 211 assert(pkt->getSize() == sizeof(uint32_t)); 212 uint32_t ® = *pkt->getPtr<uint32_t>(); 213 uint16_t rfaddr; 214 215 switch (daddr) { 216 case CR: 217 reg = regs.command; 218 //these are supposed to be cleared on a read 219 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 220 break; 221 222 case CFGR: 223 reg = regs.config; 224 break; 225 226 case MEAR: 227 reg = regs.mear; 228 break; 229 230 case PTSCR: 231 reg = regs.ptscr; 232 break; 233 234 case ISR: 235 reg = regs.isr; 236 devIntrClear(ISR_ALL); 237 break; 238 239 case IMR: 240 reg = regs.imr; 241 break; 242 243 case IER: 244 reg = regs.ier; 245 break; 246 247 case IHR: 248 reg = regs.ihr; 249 break; 250 251 case TXDP: 252 reg = regs.txdp; 253 break; 254 255 case TXDP_HI: 256 reg = regs.txdp_hi; 257 break; 258 259 case TX_CFG: 260 reg = regs.txcfg; 261 break; 262 263 case GPIOR: 264 reg = regs.gpior; 265 break; 266 267 case RXDP: 268 reg = regs.rxdp; 269 break; 270 271 case RXDP_HI: 272 reg = regs.rxdp_hi; 273 break; 274 275 case RX_CFG: 276 reg = regs.rxcfg; 277 break; 278 279 case PQCR: 280 reg = regs.pqcr; 281 break; 282 283 case WCSR: 284 reg = regs.wcsr; 285 break; 286 287 case PCR: 288 reg = regs.pcr; 289 break; 290 291 // see the spec sheet for how RFCR and RFDR work 292 // basically, you write to RFCR to tell the machine 293 // what you want to do next, then you act upon RFDR, 294 // and the device will be prepared b/c of what you 295 // wrote to RFCR 296 case RFCR: 297 reg = regs.rfcr; 298 break; 299 300 case RFDR: 301 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 302 switch (rfaddr) { 303 // Read from perfect match ROM octets 304 case 0x000: 305 reg = rom.perfectMatch[1]; 306 reg = reg << 8; 307 reg += rom.perfectMatch[0]; 308 break; 309 case 0x002: 310 reg = rom.perfectMatch[3] << 8; 311 reg += rom.perfectMatch[2]; 312 break; 313 case 0x004: 314 reg = rom.perfectMatch[5] << 8; 315 reg += rom.perfectMatch[4]; 316 break; 317 default: 318 // Read filter hash table 319 if (rfaddr >= FHASH_ADDR && 320 rfaddr < FHASH_ADDR + FHASH_SIZE) { 321 322 // Only word-aligned reads supported 323 if (rfaddr % 2) 324 panic("unaligned read from filter hash table!"); 325 326 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8; 327 reg += rom.filterHash[rfaddr - FHASH_ADDR]; 328 break; 329 } 330 331 panic("reading RFDR for something other than pattern" 332 " matching or hashing! %#x\n", rfaddr); 333 } 334 break; 335 336 case SRR: 337 reg = regs.srr; 338 break; 339 340 case MIBC: 341 reg = regs.mibc; 342 reg &= ~(MIBC_MIBS | MIBC_ACLR); 343 break; 344 345 case VRCR: 346 reg = regs.vrcr; 347 break; 348 349 case VTCR: 350 reg = regs.vtcr; 351 break; 352 353 case VDR: 354 reg = regs.vdr; 355 break; 356 357 case CCSR: 358 reg = regs.ccsr; 359 break; 360 361 case TBICR: 362 reg = regs.tbicr; 363 break; 364 365 case TBISR: 366 reg = regs.tbisr; 367 break; 368 369 case TANAR: 370 reg = regs.tanar; 371 break; 372 373 case TANLPAR: 374 reg = regs.tanlpar; 375 break; 376 377 case TANER: 378 reg = regs.taner; 379 break; 380 381 case TESR: 382 reg = regs.tesr; 383 break; 384 385 case M5REG: 386 reg = 0; 387 if (params()->rx_thread) 388 reg |= M5REG_RX_THREAD; 389 if (params()->tx_thread) 390 reg |= M5REG_TX_THREAD; 391 if (params()->rss) 392 reg |= M5REG_RSS; 393 break; 394 395 default: 396 panic("reading unimplemented register: addr=%#x", daddr); 397 } 398 399 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 400 daddr, reg, reg); 401 402 pkt->makeAtomicResponse(); 403 return pioDelay; 404} 405 406Tick 407NSGigE::write(PacketPtr pkt) 408{ 409 assert(ioEnable); 410 411 Addr daddr = pkt->getAddr() & 0xfff; 412 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n", 413 daddr, pkt->getAddr(), pkt->getSize()); 414 415 if (daddr > LAST && daddr <= RESERVED) { 416 panic("Accessing reserved register"); 417 } else if (daddr > RESERVED && daddr <= 0x3FC) { 418 return writeConfig(pkt); 419 } else if (daddr > 0x3FC) 420 panic("Something is messed up!\n"); 421 422 if (pkt->getSize() == sizeof(uint32_t)) { 423 uint32_t reg = pkt->get<uint32_t>(); 424 uint16_t rfaddr; 425 426 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 427 428 switch (daddr) { 429 case CR: 430 regs.command = reg; 431 if (reg & CR_TXD) { 432 txEnable = false; 433 } else if (reg & CR_TXE) { 434 txEnable = true; 435 436 // the kernel is enabling the transmit machine 437 if (txState == txIdle) 438 txKick(); 439 } 440 441 if (reg & CR_RXD) { 442 rxEnable = false; 443 } else if (reg & CR_RXE) { 444 rxEnable = true; 445 446 if (rxState == rxIdle) 447 rxKick(); 448 } 449 450 if (reg & CR_TXR) 451 txReset(); 452 453 if (reg & CR_RXR) 454 rxReset(); 455 456 if (reg & CR_SWI) 457 devIntrPost(ISR_SWI); 458 459 if (reg & CR_RST) { 460 txReset(); 461 rxReset(); 462 463 regsReset(); 464 } 465 break; 466 467 case CFGR: 468 if (reg & CFGR_LNKSTS || 469 reg & CFGR_SPDSTS || 470 reg & CFGR_DUPSTS || 471 reg & CFGR_RESERVED || 472 reg & CFGR_T64ADDR || 473 reg & CFGR_PCI64_DET) { 474 // First clear all writable bits 475 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 476 CFGR_RESERVED | CFGR_T64ADDR | 477 CFGR_PCI64_DET; 478 // Now set the appropriate writable bits 479 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 480 CFGR_RESERVED | CFGR_T64ADDR | 481 CFGR_PCI64_DET); 482 } 483 484// all these #if 0's are because i don't THINK the kernel needs to 485// have these implemented. if there is a problem relating to one of 486// these, you may need to add functionality in. 487 488// grouped together and #if 0'ed to avoid empty if body and make clang happy 489#if 0 490 if (reg & CFGR_TBI_EN) ; 491 if (reg & CFGR_MODE_1000) ; 492 493 if (reg & CFGR_PINT_DUPSTS || 494 reg & CFGR_PINT_LNKSTS || 495 reg & CFGR_PINT_SPDSTS) 496 ; 497 498 if (reg & CFGR_TMRTEST) ; 499 if (reg & CFGR_MRM_DIS) ; 500 if (reg & CFGR_MWI_DIS) ; 501 502 if (reg & CFGR_DATA64_EN) ; 503 if (reg & CFGR_M64ADDR) ; 504 if (reg & CFGR_PHY_RST) ; 505 if (reg & CFGR_PHY_DIS) ; 506 507 if (reg & CFGR_REQALG) ; 508 if (reg & CFGR_SB) ; 509 if (reg & CFGR_POW) ; 510 if (reg & CFGR_EXD) ; 511 if (reg & CFGR_PESEL) ; 512 if (reg & CFGR_BROM_DIS) ; 513 if (reg & CFGR_EXT_125) ; 514 if (reg & CFGR_BEM) ; 515 516 if (reg & CFGR_T64ADDR) ; 517 // panic("CFGR_T64ADDR is read only register!\n"); 518#endif 519 if (reg & CFGR_AUTO_1000) 520 panic("CFGR_AUTO_1000 not implemented!\n"); 521 522 if (reg & CFGR_PCI64_DET) 523 panic("CFGR_PCI64_DET is read only register!\n"); 524 525 if (reg & CFGR_EXTSTS_EN) 526 extstsEnable = true; 527 else 528 extstsEnable = false; 529 break; 530 531 case MEAR: 532 // Clear writable bits 533 regs.mear &= MEAR_EEDO; 534 // Set appropriate writable bits 535 regs.mear |= reg & ~MEAR_EEDO; 536 537 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address) 538 // even though it could get it through RFDR 539 if (reg & MEAR_EESEL) { 540 // Rising edge of clock 541 if (reg & MEAR_EECLK && !eepromClk) 542 eepromKick(); 543 } 544 else { 545 eepromState = eepromStart; 546 regs.mear &= ~MEAR_EEDI; 547 } 548 549 eepromClk = reg & MEAR_EECLK; 550 551 // since phy is completely faked, MEAR_MD* don't matter 552 553// grouped together and #if 0'ed to avoid empty if body and make clang happy 554#if 0 555 if (reg & MEAR_MDIO) ; 556 if (reg & MEAR_MDDIR) ; 557 if (reg & MEAR_MDC) ; 558#endif 559 break; 560 561 case PTSCR: 562 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 563 // these control BISTs for various parts of chip - we 564 // don't care or do just fake that the BIST is done 565 if (reg & PTSCR_RBIST_EN) 566 regs.ptscr |= PTSCR_RBIST_DONE; 567 if (reg & PTSCR_EEBIST_EN) 568 regs.ptscr &= ~PTSCR_EEBIST_EN; 569 if (reg & PTSCR_EELOAD_EN) 570 regs.ptscr &= ~PTSCR_EELOAD_EN; 571 break; 572 573 case ISR: /* writing to the ISR has no effect */ 574 panic("ISR is a read only register!\n"); 575 576 case IMR: 577 regs.imr = reg; 578 devIntrChangeMask(); 579 break; 580 581 case IER: 582 regs.ier = reg; 583 break; 584 585 case IHR: 586 regs.ihr = reg; 587 /* not going to implement real interrupt holdoff */ 588 break; 589 590 case TXDP: 591 regs.txdp = (reg & 0xFFFFFFFC); 592 assert(txState == txIdle); 593 CTDD = false; 594 break; 595 596 case TXDP_HI: 597 regs.txdp_hi = reg; 598 break; 599 600 case TX_CFG: 601 regs.txcfg = reg; 602#if 0 603 if (reg & TX_CFG_CSI) ; 604 if (reg & TX_CFG_HBI) ; 605 if (reg & TX_CFG_MLB) ; 606 if (reg & TX_CFG_ATP) ; 607 if (reg & TX_CFG_ECRETRY) { 608 /* 609 * this could easily be implemented, but considering 610 * the network is just a fake pipe, wouldn't make 611 * sense to do this 612 */ 613 } 614 615 if (reg & TX_CFG_BRST_DIS) ; 616#endif 617 618#if 0 619 /* we handle our own DMA, ignore the kernel's exhortations */ 620 if (reg & TX_CFG_MXDMA) ; 621#endif 622 623 // also, we currently don't care about fill/drain 624 // thresholds though this may change in the future with 625 // more realistic networks or a driver which changes it 626 // according to feedback 627 628 break; 629 630 case GPIOR: 631 // Only write writable bits 632 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 633 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN; 634 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 635 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN); 636 /* these just control general purpose i/o pins, don't matter */ 637 break; 638 639 case RXDP: 640 regs.rxdp = reg; 641 CRDD = false; 642 break; 643 644 case RXDP_HI: 645 regs.rxdp_hi = reg; 646 break; 647 648 case RX_CFG: 649 regs.rxcfg = reg; 650#if 0 651 if (reg & RX_CFG_AEP) ; 652 if (reg & RX_CFG_ARP) ; 653 if (reg & RX_CFG_STRIPCRC) ; 654 if (reg & RX_CFG_RX_RD) ; 655 if (reg & RX_CFG_ALP) ; 656 if (reg & RX_CFG_AIRL) ; 657 658 /* we handle our own DMA, ignore what kernel says about it */ 659 if (reg & RX_CFG_MXDMA) ; 660 661 //also, we currently don't care about fill/drain thresholds 662 //though this may change in the future with more realistic 663 //networks or a driver which changes it according to feedback 664 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ; 665#endif 666 break; 667 668 case PQCR: 669 /* there is no priority queueing used in the linux 2.6 driver */ 670 regs.pqcr = reg; 671 break; 672 673 case WCSR: 674 /* not going to implement wake on LAN */ 675 regs.wcsr = reg; 676 break; 677 678 case PCR: 679 /* not going to implement pause control */ 680 regs.pcr = reg; 681 break; 682 683 case RFCR: 684 regs.rfcr = reg; 685 686 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 687 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 688 acceptMulticast = (reg & RFCR_AAM) ? true : false; 689 acceptUnicast = (reg & RFCR_AAU) ? true : false; 690 acceptPerfect = (reg & RFCR_APM) ? true : false; 691 acceptArp = (reg & RFCR_AARP) ? true : false; 692 multicastHashEnable = (reg & RFCR_MHEN) ? true : false; 693 694#if 0 695 if (reg & RFCR_APAT) 696 panic("RFCR_APAT not implemented!\n"); 697#endif 698 if (reg & RFCR_UHEN) 699 panic("Unicast hash filtering not used by drivers!\n"); 700 701 if (reg & RFCR_ULM) 702 panic("RFCR_ULM not implemented!\n"); 703 704 break; 705 706 case RFDR: 707 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 708 switch (rfaddr) { 709 case 0x000: 710 rom.perfectMatch[0] = (uint8_t)reg; 711 rom.perfectMatch[1] = (uint8_t)(reg >> 8); 712 break; 713 case 0x002: 714 rom.perfectMatch[2] = (uint8_t)reg; 715 rom.perfectMatch[3] = (uint8_t)(reg >> 8); 716 break; 717 case 0x004: 718 rom.perfectMatch[4] = (uint8_t)reg; 719 rom.perfectMatch[5] = (uint8_t)(reg >> 8); 720 break; 721 default: 722 723 if (rfaddr >= FHASH_ADDR && 724 rfaddr < FHASH_ADDR + FHASH_SIZE) { 725 726 // Only word-aligned writes supported 727 if (rfaddr % 2) 728 panic("unaligned write to filter hash table!"); 729 730 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg; 731 rom.filterHash[rfaddr - FHASH_ADDR + 1] 732 = (uint8_t)(reg >> 8); 733 break; 734 } 735 panic("writing RFDR for something other than pattern matching " 736 "or hashing! %#x\n", rfaddr); 737 } 738 739 case BRAR: 740 regs.brar = reg; 741 break; 742 743 case BRDR: 744 panic("the driver never uses BRDR, something is wrong!\n"); 745 746 case SRR: 747 panic("SRR is read only register!\n"); 748 749 case MIBC: 750 panic("the driver never uses MIBC, something is wrong!\n"); 751 752 case VRCR: 753 regs.vrcr = reg; 754 break; 755 756 case VTCR: 757 regs.vtcr = reg; 758 break; 759 760 case VDR: 761 panic("the driver never uses VDR, something is wrong!\n"); 762 763 case CCSR: 764 /* not going to implement clockrun stuff */ 765 regs.ccsr = reg; 766 break; 767 768 case TBICR: 769 regs.tbicr = reg; 770 if (reg & TBICR_MR_LOOPBACK) 771 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 772 773 if (reg & TBICR_MR_AN_ENABLE) { 774 regs.tanlpar = regs.tanar; 775 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 776 } 777 778#if 0 779 if (reg & TBICR_MR_RESTART_AN) ; 780#endif 781 782 break; 783 784 case TBISR: 785 panic("TBISR is read only register!\n"); 786 787 case TANAR: 788 // Only write the writable bits 789 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED; 790 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED); 791 792 // Pause capability unimplemented 793#if 0 794 if (reg & TANAR_PS2) ; 795 if (reg & TANAR_PS1) ; 796#endif 797 798 break; 799 800 case TANLPAR: 801 panic("this should only be written to by the fake phy!\n"); 802 803 case TANER: 804 panic("TANER is read only register!\n"); 805 806 case TESR: 807 regs.tesr = reg; 808 break; 809 810 default: 811 panic("invalid register access daddr=%#x", daddr); 812 } 813 } else { 814 panic("Invalid Request Size"); 815 } 816 pkt->makeAtomicResponse(); 817 return pioDelay; 818} 819 820void 821NSGigE::devIntrPost(uint32_t interrupts) 822{ 823 if (interrupts & ISR_RESERVE) 824 panic("Cannot set a reserved interrupt"); 825 826 if (interrupts & ISR_NOIMPL) 827 warn("interrupt not implemented %#x\n", interrupts); 828 829 interrupts &= ISR_IMPL; 830 regs.isr |= interrupts; 831 832 if (interrupts & regs.imr) { 833 if (interrupts & ISR_SWI) { 834 totalSwi++; 835 } 836 if (interrupts & ISR_RXIDLE) { 837 totalRxIdle++; 838 } 839 if (interrupts & ISR_RXOK) { 840 totalRxOk++; 841 } 842 if (interrupts & ISR_RXDESC) { 843 totalRxDesc++; 844 } 845 if (interrupts & ISR_TXOK) { 846 totalTxOk++; 847 } 848 if (interrupts & ISR_TXIDLE) { 849 totalTxIdle++; 850 } 851 if (interrupts & ISR_TXDESC) { 852 totalTxDesc++; 853 } 854 if (interrupts & ISR_RXORN) { 855 totalRxOrn++; 856 } 857 } 858 859 DPRINTF(EthernetIntr, 860 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 861 interrupts, regs.isr, regs.imr); 862 863 if ((regs.isr & regs.imr)) { 864 Tick when = curTick(); 865 if ((regs.isr & regs.imr & ISR_NODELAY) == 0) 866 when += intrDelay; 867 postedInterrupts++; 868 cpuIntrPost(when); 869 } 870} 871 872/* writing this interrupt counting stats inside this means that this function 873 is now limited to being used to clear all interrupts upon the kernel 874 reading isr and servicing. just telling you in case you were thinking 875 of expanding use. 876*/ 877void 878NSGigE::devIntrClear(uint32_t interrupts) 879{ 880 if (interrupts & ISR_RESERVE) 881 panic("Cannot clear a reserved interrupt"); 882 883 if (regs.isr & regs.imr & ISR_SWI) { 884 postedSwi++; 885 } 886 if (regs.isr & regs.imr & ISR_RXIDLE) { 887 postedRxIdle++; 888 } 889 if (regs.isr & regs.imr & ISR_RXOK) { 890 postedRxOk++; 891 } 892 if (regs.isr & regs.imr & ISR_RXDESC) { 893 postedRxDesc++; 894 } 895 if (regs.isr & regs.imr & ISR_TXOK) { 896 postedTxOk++; 897 } 898 if (regs.isr & regs.imr & ISR_TXIDLE) { 899 postedTxIdle++; 900 } 901 if (regs.isr & regs.imr & ISR_TXDESC) { 902 postedTxDesc++; 903 } 904 if (regs.isr & regs.imr & ISR_RXORN) { 905 postedRxOrn++; 906 } 907 908 interrupts &= ~ISR_NOIMPL; 909 regs.isr &= ~interrupts; 910 911 DPRINTF(EthernetIntr, 912 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 913 interrupts, regs.isr, regs.imr); 914 915 if (!(regs.isr & regs.imr)) 916 cpuIntrClear(); 917} 918 919void 920NSGigE::devIntrChangeMask() 921{ 922 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 923 regs.isr, regs.imr, regs.isr & regs.imr); 924 925 if (regs.isr & regs.imr) 926 cpuIntrPost(curTick()); 927 else 928 cpuIntrClear(); 929} 930 931void 932NSGigE::cpuIntrPost(Tick when) 933{ 934 // If the interrupt you want to post is later than an interrupt 935 // already scheduled, just let it post in the coming one and don't 936 // schedule another. 937 // HOWEVER, must be sure that the scheduled intrTick is in the 938 // future (this was formerly the source of a bug) 939 /** 940 * @todo this warning should be removed and the intrTick code should 941 * be fixed. 942 */ 943 assert(when >= curTick()); 944 assert(intrTick >= curTick() || intrTick == 0); 945 if (when > intrTick && intrTick != 0) { 946 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 947 intrTick); 948 return; 949 } 950 951 intrTick = when; 952 if (intrTick < curTick()) { 953 Debug::breakpoint(); 954 intrTick = curTick(); 955 } 956 957 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 958 intrTick); 959 960 if (intrEvent) 961 intrEvent->squash(); 962 intrEvent = new IntrEvent(this, true); 963 schedule(intrEvent, intrTick); 964} 965 966void 967NSGigE::cpuInterrupt() 968{ 969 assert(intrTick == curTick()); 970 971 // Whether or not there's a pending interrupt, we don't care about 972 // it anymore 973 intrEvent = 0; 974 intrTick = 0; 975 976 // Don't send an interrupt if there's already one 977 if (cpuPendingIntr) { 978 DPRINTF(EthernetIntr, 979 "would send an interrupt now, but there's already pending\n"); 980 } else { 981 // Send interrupt 982 cpuPendingIntr = true; 983 984 DPRINTF(EthernetIntr, "posting interrupt\n"); 985 intrPost(); 986 } 987} 988 989void 990NSGigE::cpuIntrClear() 991{ 992 if (!cpuPendingIntr) 993 return; 994 995 if (intrEvent) { 996 intrEvent->squash(); 997 intrEvent = 0; 998 } 999 1000 intrTick = 0; 1001 1002 cpuPendingIntr = false; 1003 1004 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1005 intrClear(); 1006} 1007 1008bool 1009NSGigE::cpuIntrPending() const 1010{ return cpuPendingIntr; } 1011 1012void 1013NSGigE::txReset() 1014{ 1015 1016 DPRINTF(Ethernet, "transmit reset\n"); 1017 1018 CTDD = false; 1019 txEnable = false;; 1020 txFragPtr = 0; 1021 assert(txDescCnt == 0); 1022 txFifo.clear(); 1023 txState = txIdle; 1024 assert(txDmaState == dmaIdle); 1025} 1026 1027void 1028NSGigE::rxReset() 1029{ 1030 DPRINTF(Ethernet, "receive reset\n"); 1031 1032 CRDD = false; 1033 assert(rxPktBytes == 0); 1034 rxEnable = false; 1035 rxFragPtr = 0; 1036 assert(rxDescCnt == 0); 1037 assert(rxDmaState == dmaIdle); 1038 rxFifo.clear(); 1039 rxState = rxIdle; 1040} 1041 1042void 1043NSGigE::regsReset() 1044{ 1045 memset(®s, 0, sizeof(regs)); 1046 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000); 1047 regs.mear = 0x12; 1048 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1049 // fill threshold to 32 bytes 1050 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1051 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1052 regs.mibc = MIBC_FRZ; 1053 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1054 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1055 regs.brar = 0xffffffff; 1056 1057 extstsEnable = false; 1058 acceptBroadcast = false; 1059 acceptMulticast = false; 1060 acceptUnicast = false; 1061 acceptPerfect = false; 1062 acceptArp = false; 1063} 1064 1065bool 1066NSGigE::doRxDmaRead() 1067{ 1068 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1069 rxDmaState = dmaReading; 1070 1071 if (dmaPending() || getDrainState() != Drainable::Running) 1072 rxDmaState = dmaReadWaiting; 1073 else 1074 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData); 1075 1076 return true; 1077} 1078 1079void 1080NSGigE::rxDmaReadDone() 1081{ 1082 assert(rxDmaState == dmaReading); 1083 rxDmaState = dmaIdle; 1084 1085 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1086 rxDmaAddr, rxDmaLen); 1087 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1088 1089 // If the transmit state machine has a pending DMA, let it go first 1090 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1091 txKick(); 1092 1093 rxKick(); 1094} 1095 1096bool 1097NSGigE::doRxDmaWrite() 1098{ 1099 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1100 rxDmaState = dmaWriting; 1101 1102 if (dmaPending() || getDrainState() != Running) 1103 rxDmaState = dmaWriteWaiting; 1104 else 1105 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData); 1106 return true; 1107} 1108 1109void 1110NSGigE::rxDmaWriteDone() 1111{ 1112 assert(rxDmaState == dmaWriting); 1113 rxDmaState = dmaIdle; 1114 1115 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1116 rxDmaAddr, rxDmaLen); 1117 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1118 1119 // If the transmit state machine has a pending DMA, let it go first 1120 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1121 txKick(); 1122 1123 rxKick(); 1124} 1125 1126void 1127NSGigE::rxKick() 1128{ 1129 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1130 1131 DPRINTF(EthernetSM, 1132 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n", 1133 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32); 1134 1135 Addr link, bufptr; 1136 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts; 1137 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts; 1138 1139 next: 1140 if (rxKickTick > curTick()) { 1141 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1142 rxKickTick); 1143 1144 goto exit; 1145 } 1146 1147 // Go to the next state machine clock tick. 1148 rxKickTick = clockEdge(Cycles(1)); 1149 1150 switch(rxDmaState) { 1151 case dmaReadWaiting: 1152 if (doRxDmaRead()) 1153 goto exit; 1154 break; 1155 case dmaWriteWaiting: 1156 if (doRxDmaWrite()) 1157 goto exit; 1158 break; 1159 default: 1160 break; 1161 } 1162 1163 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link; 1164 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr; 1165 1166 // see state machine from spec for details 1167 // the way this works is, if you finish work on one state and can 1168 // go directly to another, you do that through jumping to the 1169 // label "next". however, if you have intermediate work, like DMA 1170 // so that you can't go to the next state yet, you go to exit and 1171 // exit the loop. however, when the DMA is done it will trigger 1172 // an event and come back to this loop. 1173 switch (rxState) { 1174 case rxIdle: 1175 if (!rxEnable) { 1176 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1177 goto exit; 1178 } 1179 1180 if (CRDD) { 1181 rxState = rxDescRefr; 1182 1183 rxDmaAddr = regs.rxdp & 0x3fffffff; 1184 rxDmaData = 1185 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link; 1186 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link); 1187 rxDmaFree = dmaDescFree; 1188 1189 descDmaReads++; 1190 descDmaRdBytes += rxDmaLen; 1191 1192 if (doRxDmaRead()) 1193 goto exit; 1194 } else { 1195 rxState = rxDescRead; 1196 1197 rxDmaAddr = regs.rxdp & 0x3fffffff; 1198 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1199 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1200 rxDmaFree = dmaDescFree; 1201 1202 descDmaReads++; 1203 descDmaRdBytes += rxDmaLen; 1204 1205 if (doRxDmaRead()) 1206 goto exit; 1207 } 1208 break; 1209 1210 case rxDescRefr: 1211 if (rxDmaState != dmaIdle) 1212 goto exit; 1213 1214 rxState = rxAdvance; 1215 break; 1216 1217 case rxDescRead: 1218 if (rxDmaState != dmaIdle) 1219 goto exit; 1220 1221 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n", 1222 regs.rxdp & 0x3fffffff); 1223 DPRINTF(EthernetDesc, 1224 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1225 link, bufptr, cmdsts, extsts); 1226 1227 if (cmdsts & CMDSTS_OWN) { 1228 devIntrPost(ISR_RXIDLE); 1229 rxState = rxIdle; 1230 goto exit; 1231 } else { 1232 rxState = rxFifoBlock; 1233 rxFragPtr = bufptr; 1234 rxDescCnt = cmdsts & CMDSTS_LEN_MASK; 1235 } 1236 break; 1237 1238 case rxFifoBlock: 1239 if (!rxPacket) { 1240 /** 1241 * @todo in reality, we should be able to start processing 1242 * the packet as it arrives, and not have to wait for the 1243 * full packet ot be in the receive fifo. 1244 */ 1245 if (rxFifo.empty()) 1246 goto exit; 1247 1248 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1249 1250 // If we don't have a packet, grab a new one from the fifo. 1251 rxPacket = rxFifo.front(); 1252 rxPktBytes = rxPacket->length; 1253 rxPacketBufPtr = rxPacket->data; 1254 1255#if TRACING_ON 1256 if (DTRACE(Ethernet)) { 1257 IpPtr ip(rxPacket); 1258 if (ip) { 1259 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1260 TcpPtr tcp(ip); 1261 if (tcp) { 1262 DPRINTF(Ethernet, 1263 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1264 tcp->sport(), tcp->dport(), tcp->seq(), 1265 tcp->ack()); 1266 } 1267 } 1268 } 1269#endif 1270 1271 // sanity check - i think the driver behaves like this 1272 assert(rxDescCnt >= rxPktBytes); 1273 rxFifo.pop(); 1274 } 1275 1276 1277 // dont' need the && rxDescCnt > 0 if driver sanity check 1278 // above holds 1279 if (rxPktBytes > 0) { 1280 rxState = rxFragWrite; 1281 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1282 // check holds 1283 rxXferLen = rxPktBytes; 1284 1285 rxDmaAddr = rxFragPtr & 0x3fffffff; 1286 rxDmaData = rxPacketBufPtr; 1287 rxDmaLen = rxXferLen; 1288 rxDmaFree = dmaDataFree; 1289 1290 if (doRxDmaWrite()) 1291 goto exit; 1292 1293 } else { 1294 rxState = rxDescWrite; 1295 1296 //if (rxPktBytes == 0) { /* packet is done */ 1297 assert(rxPktBytes == 0); 1298 DPRINTF(EthernetSM, "done with receiving packet\n"); 1299 1300 cmdsts |= CMDSTS_OWN; 1301 cmdsts &= ~CMDSTS_MORE; 1302 cmdsts |= CMDSTS_OK; 1303 cmdsts &= 0xffff0000; 1304 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1305 1306#if 0 1307 /* 1308 * all the driver uses these are for its own stats keeping 1309 * which we don't care about, aren't necessary for 1310 * functionality and doing this would just slow us down. 1311 * if they end up using this in a later version for 1312 * functional purposes, just undef 1313 */ 1314 if (rxFilterEnable) { 1315 cmdsts &= ~CMDSTS_DEST_MASK; 1316 const EthAddr &dst = rxFifoFront()->dst(); 1317 if (dst->unicast()) 1318 cmdsts |= CMDSTS_DEST_SELF; 1319 if (dst->multicast()) 1320 cmdsts |= CMDSTS_DEST_MULTI; 1321 if (dst->broadcast()) 1322 cmdsts |= CMDSTS_DEST_MASK; 1323 } 1324#endif 1325 1326 IpPtr ip(rxPacket); 1327 if (extstsEnable && ip) { 1328 extsts |= EXTSTS_IPPKT; 1329 rxIpChecksums++; 1330 if (cksum(ip) != 0) { 1331 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1332 extsts |= EXTSTS_IPERR; 1333 } 1334 TcpPtr tcp(ip); 1335 UdpPtr udp(ip); 1336 if (tcp) { 1337 extsts |= EXTSTS_TCPPKT; 1338 rxTcpChecksums++; 1339 if (cksum(tcp) != 0) { 1340 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1341 extsts |= EXTSTS_TCPERR; 1342 1343 } 1344 } else if (udp) { 1345 extsts |= EXTSTS_UDPPKT; 1346 rxUdpChecksums++; 1347 if (cksum(udp) != 0) { 1348 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1349 extsts |= EXTSTS_UDPERR; 1350 } 1351 } 1352 } 1353 rxPacket = 0; 1354 1355 /* 1356 * the driver seems to always receive into desc buffers 1357 * of size 1514, so you never have a pkt that is split 1358 * into multiple descriptors on the receive side, so 1359 * i don't implement that case, hence the assert above. 1360 */ 1361 1362 DPRINTF(EthernetDesc, 1363 "rxDesc: addr=%08x writeback cmdsts extsts\n", 1364 regs.rxdp & 0x3fffffff); 1365 DPRINTF(EthernetDesc, 1366 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1367 link, bufptr, cmdsts, extsts); 1368 1369 rxDmaAddr = regs.rxdp & 0x3fffffff; 1370 rxDmaData = &cmdsts; 1371 if (is64bit) { 1372 rxDmaAddr += offsetof(ns_desc64, cmdsts); 1373 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts); 1374 } else { 1375 rxDmaAddr += offsetof(ns_desc32, cmdsts); 1376 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts); 1377 } 1378 rxDmaFree = dmaDescFree; 1379 1380 descDmaWrites++; 1381 descDmaWrBytes += rxDmaLen; 1382 1383 if (doRxDmaWrite()) 1384 goto exit; 1385 } 1386 break; 1387 1388 case rxFragWrite: 1389 if (rxDmaState != dmaIdle) 1390 goto exit; 1391 1392 rxPacketBufPtr += rxXferLen; 1393 rxFragPtr += rxXferLen; 1394 rxPktBytes -= rxXferLen; 1395 1396 rxState = rxFifoBlock; 1397 break; 1398 1399 case rxDescWrite: 1400 if (rxDmaState != dmaIdle) 1401 goto exit; 1402 1403 assert(cmdsts & CMDSTS_OWN); 1404 1405 assert(rxPacket == 0); 1406 devIntrPost(ISR_RXOK); 1407 1408 if (cmdsts & CMDSTS_INTR) 1409 devIntrPost(ISR_RXDESC); 1410 1411 if (!rxEnable) { 1412 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1413 rxState = rxIdle; 1414 goto exit; 1415 } else 1416 rxState = rxAdvance; 1417 break; 1418 1419 case rxAdvance: 1420 if (link == 0) { 1421 devIntrPost(ISR_RXIDLE); 1422 rxState = rxIdle; 1423 CRDD = true; 1424 goto exit; 1425 } else { 1426 if (rxDmaState != dmaIdle) 1427 goto exit; 1428 rxState = rxDescRead; 1429 regs.rxdp = link; 1430 CRDD = false; 1431 1432 rxDmaAddr = regs.rxdp & 0x3fffffff; 1433 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1434 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1435 rxDmaFree = dmaDescFree; 1436 1437 if (doRxDmaRead()) 1438 goto exit; 1439 } 1440 break; 1441 1442 default: 1443 panic("Invalid rxState!"); 1444 } 1445 1446 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1447 NsRxStateStrings[rxState]); 1448 goto next; 1449 1450 exit: 1451 /** 1452 * @todo do we want to schedule a future kick? 1453 */ 1454 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1455 NsRxStateStrings[rxState]); 1456 1457 if (!rxKickEvent.scheduled()) 1458 schedule(rxKickEvent, rxKickTick); 1459} 1460 1461void 1462NSGigE::transmit() 1463{ 1464 if (txFifo.empty()) { 1465 DPRINTF(Ethernet, "nothing to transmit\n"); 1466 return; 1467 } 1468 1469 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1470 txFifo.size()); 1471 if (interface->sendPacket(txFifo.front())) { 1472#if TRACING_ON 1473 if (DTRACE(Ethernet)) { 1474 IpPtr ip(txFifo.front()); 1475 if (ip) { 1476 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1477 TcpPtr tcp(ip); 1478 if (tcp) { 1479 DPRINTF(Ethernet, 1480 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1481 tcp->sport(), tcp->dport(), tcp->seq(), 1482 tcp->ack()); 1483 } 1484 } 1485 } 1486#endif 1487 1488 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1489 txBytes += txFifo.front()->length; 1490 txPackets++; 1491 1492 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1493 txFifo.avail()); 1494 txFifo.pop(); 1495 1496 /* 1497 * normally do a writeback of the descriptor here, and ONLY 1498 * after that is done, send this interrupt. but since our 1499 * stuff never actually fails, just do this interrupt here, 1500 * otherwise the code has to stray from this nice format. 1501 * besides, it's functionally the same. 1502 */ 1503 devIntrPost(ISR_TXOK); 1504 } 1505 1506 if (!txFifo.empty() && !txEvent.scheduled()) { 1507 DPRINTF(Ethernet, "reschedule transmit\n"); 1508 schedule(txEvent, curTick() + retryTime); 1509 } 1510} 1511 1512bool 1513NSGigE::doTxDmaRead() 1514{ 1515 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1516 txDmaState = dmaReading; 1517 1518 if (dmaPending() || getDrainState() != Running) 1519 txDmaState = dmaReadWaiting; 1520 else 1521 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData); 1522 1523 return true; 1524} 1525 1526void 1527NSGigE::txDmaReadDone() 1528{ 1529 assert(txDmaState == dmaReading); 1530 txDmaState = dmaIdle; 1531 1532 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1533 txDmaAddr, txDmaLen); 1534 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1535 1536 // If the receive state machine has a pending DMA, let it go first 1537 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1538 rxKick(); 1539 1540 txKick(); 1541} 1542 1543bool 1544NSGigE::doTxDmaWrite() 1545{ 1546 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1547 txDmaState = dmaWriting; 1548 1549 if (dmaPending() || getDrainState() != Running) 1550 txDmaState = dmaWriteWaiting; 1551 else 1552 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData); 1553 return true; 1554} 1555 1556void 1557NSGigE::txDmaWriteDone() 1558{ 1559 assert(txDmaState == dmaWriting); 1560 txDmaState = dmaIdle; 1561 1562 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1563 txDmaAddr, txDmaLen); 1564 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1565 1566 // If the receive state machine has a pending DMA, let it go first 1567 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1568 rxKick(); 1569 1570 txKick(); 1571} 1572 1573void 1574NSGigE::txKick() 1575{ 1576 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1577 1578 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n", 1579 NsTxStateStrings[txState], is64bit ? 64 : 32); 1580 1581 Addr link, bufptr; 1582 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts; 1583 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts; 1584 1585 next: 1586 if (txKickTick > curTick()) { 1587 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1588 txKickTick); 1589 goto exit; 1590 } 1591 1592 // Go to the next state machine clock tick. 1593 txKickTick = clockEdge(Cycles(1)); 1594 1595 switch(txDmaState) { 1596 case dmaReadWaiting: 1597 if (doTxDmaRead()) 1598 goto exit; 1599 break; 1600 case dmaWriteWaiting: 1601 if (doTxDmaWrite()) 1602 goto exit; 1603 break; 1604 default: 1605 break; 1606 } 1607 1608 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link; 1609 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr; 1610 switch (txState) { 1611 case txIdle: 1612 if (!txEnable) { 1613 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1614 goto exit; 1615 } 1616 1617 if (CTDD) { 1618 txState = txDescRefr; 1619 1620 txDmaAddr = regs.txdp & 0x3fffffff; 1621 txDmaData = 1622 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link; 1623 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link); 1624 txDmaFree = dmaDescFree; 1625 1626 descDmaReads++; 1627 descDmaRdBytes += txDmaLen; 1628 1629 if (doTxDmaRead()) 1630 goto exit; 1631 1632 } else { 1633 txState = txDescRead; 1634 1635 txDmaAddr = regs.txdp & 0x3fffffff; 1636 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1637 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1638 txDmaFree = dmaDescFree; 1639 1640 descDmaReads++; 1641 descDmaRdBytes += txDmaLen; 1642 1643 if (doTxDmaRead()) 1644 goto exit; 1645 } 1646 break; 1647 1648 case txDescRefr: 1649 if (txDmaState != dmaIdle) 1650 goto exit; 1651 1652 txState = txAdvance; 1653 break; 1654 1655 case txDescRead: 1656 if (txDmaState != dmaIdle) 1657 goto exit; 1658 1659 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n", 1660 regs.txdp & 0x3fffffff); 1661 DPRINTF(EthernetDesc, 1662 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n", 1663 link, bufptr, cmdsts, extsts); 1664 1665 if (cmdsts & CMDSTS_OWN) { 1666 txState = txFifoBlock; 1667 txFragPtr = bufptr; 1668 txDescCnt = cmdsts & CMDSTS_LEN_MASK; 1669 } else { 1670 devIntrPost(ISR_TXIDLE); 1671 txState = txIdle; 1672 goto exit; 1673 } 1674 break; 1675 1676 case txFifoBlock: 1677 if (!txPacket) { 1678 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 1679 txPacket = new EthPacketData(16384); 1680 txPacketBufPtr = txPacket->data; 1681 } 1682 1683 if (txDescCnt == 0) { 1684 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 1685 if (cmdsts & CMDSTS_MORE) { 1686 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 1687 txState = txDescWrite; 1688 1689 cmdsts &= ~CMDSTS_OWN; 1690 1691 txDmaAddr = regs.txdp & 0x3fffffff; 1692 txDmaData = &cmdsts; 1693 if (is64bit) { 1694 txDmaAddr += offsetof(ns_desc64, cmdsts); 1695 txDmaLen = sizeof(txDesc64.cmdsts); 1696 } else { 1697 txDmaAddr += offsetof(ns_desc32, cmdsts); 1698 txDmaLen = sizeof(txDesc32.cmdsts); 1699 } 1700 txDmaFree = dmaDescFree; 1701 1702 if (doTxDmaWrite()) 1703 goto exit; 1704 1705 } else { /* this packet is totally done */ 1706 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 1707 /* deal with the the packet that just finished */ 1708 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 1709 IpPtr ip(txPacket); 1710 if (extsts & EXTSTS_UDPPKT) { 1711 UdpPtr udp(ip); 1712 if (udp) { 1713 udp->sum(0); 1714 udp->sum(cksum(udp)); 1715 txUdpChecksums++; 1716 } else { 1717 Debug::breakpoint(); 1718 warn_once("UDPPKT set, but not UDP!\n"); 1719 } 1720 } else if (extsts & EXTSTS_TCPPKT) { 1721 TcpPtr tcp(ip); 1722 if (tcp) { 1723 tcp->sum(0); 1724 tcp->sum(cksum(tcp)); 1725 txTcpChecksums++; 1726 } else { 1727 Debug::breakpoint(); 1728 warn_once("TCPPKT set, but not UDP!\n"); 1729 } 1730 } 1731 if (extsts & EXTSTS_IPPKT) { 1732 if (ip) { 1733 ip->sum(0); 1734 ip->sum(cksum(ip)); 1735 txIpChecksums++; 1736 } else { 1737 Debug::breakpoint(); 1738 warn_once("IPPKT set, but not UDP!\n"); 1739 } 1740 } 1741 } 1742 1743 txPacket->length = txPacketBufPtr - txPacket->data; 1744 // this is just because the receive can't handle a 1745 // packet bigger want to make sure 1746 if (txPacket->length > 1514) 1747 panic("transmit packet too large, %s > 1514\n", 1748 txPacket->length); 1749 1750#ifndef NDEBUG 1751 bool success = 1752#endif 1753 txFifo.push(txPacket); 1754 assert(success); 1755 1756 /* 1757 * this following section is not tqo spec, but 1758 * functionally shouldn't be any different. normally, 1759 * the chip will wait til the transmit has occurred 1760 * before writing back the descriptor because it has 1761 * to wait to see that it was successfully transmitted 1762 * to decide whether to set CMDSTS_OK or not. 1763 * however, in the simulator since it is always 1764 * successfully transmitted, and writing it exactly to 1765 * spec would complicate the code, we just do it here 1766 */ 1767 1768 cmdsts &= ~CMDSTS_OWN; 1769 cmdsts |= CMDSTS_OK; 1770 1771 DPRINTF(EthernetDesc, 1772 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 1773 cmdsts, extsts); 1774 1775 txDmaFree = dmaDescFree; 1776 txDmaAddr = regs.txdp & 0x3fffffff; 1777 txDmaData = &cmdsts; 1778 if (is64bit) { 1779 txDmaAddr += offsetof(ns_desc64, cmdsts); 1780 txDmaLen = 1781 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts); 1782 } else { 1783 txDmaAddr += offsetof(ns_desc32, cmdsts); 1784 txDmaLen = 1785 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts); 1786 } 1787 1788 descDmaWrites++; 1789 descDmaWrBytes += txDmaLen; 1790 1791 transmit(); 1792 txPacket = 0; 1793 1794 if (!txEnable) { 1795 DPRINTF(EthernetSM, "halting TX state machine\n"); 1796 txState = txIdle; 1797 goto exit; 1798 } else 1799 txState = txAdvance; 1800 1801 if (doTxDmaWrite()) 1802 goto exit; 1803 } 1804 } else { 1805 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 1806 if (!txFifo.full()) { 1807 txState = txFragRead; 1808 1809 /* 1810 * The number of bytes transferred is either whatever 1811 * is left in the descriptor (txDescCnt), or if there 1812 * is not enough room in the fifo, just whatever room 1813 * is left in the fifo 1814 */ 1815 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 1816 1817 txDmaAddr = txFragPtr & 0x3fffffff; 1818 txDmaData = txPacketBufPtr; 1819 txDmaLen = txXferLen; 1820 txDmaFree = dmaDataFree; 1821 1822 if (doTxDmaRead()) 1823 goto exit; 1824 } else { 1825 txState = txFifoBlock; 1826 transmit(); 1827 1828 goto exit; 1829 } 1830 1831 } 1832 break; 1833 1834 case txFragRead: 1835 if (txDmaState != dmaIdle) 1836 goto exit; 1837 1838 txPacketBufPtr += txXferLen; 1839 txFragPtr += txXferLen; 1840 txDescCnt -= txXferLen; 1841 txFifo.reserve(txXferLen); 1842 1843 txState = txFifoBlock; 1844 break; 1845 1846 case txDescWrite: 1847 if (txDmaState != dmaIdle) 1848 goto exit; 1849 1850 if (cmdsts & CMDSTS_INTR) 1851 devIntrPost(ISR_TXDESC); 1852 1853 if (!txEnable) { 1854 DPRINTF(EthernetSM, "halting TX state machine\n"); 1855 txState = txIdle; 1856 goto exit; 1857 } else 1858 txState = txAdvance; 1859 break; 1860 1861 case txAdvance: 1862 if (link == 0) { 1863 devIntrPost(ISR_TXIDLE); 1864 txState = txIdle; 1865 goto exit; 1866 } else { 1867 if (txDmaState != dmaIdle) 1868 goto exit; 1869 txState = txDescRead; 1870 regs.txdp = link; 1871 CTDD = false; 1872 1873 txDmaAddr = link & 0x3fffffff; 1874 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1875 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1876 txDmaFree = dmaDescFree; 1877 1878 if (doTxDmaRead()) 1879 goto exit; 1880 } 1881 break; 1882 1883 default: 1884 panic("invalid state"); 1885 } 1886 1887 DPRINTF(EthernetSM, "entering next txState=%s\n", 1888 NsTxStateStrings[txState]); 1889 goto next; 1890 1891 exit: 1892 /** 1893 * @todo do we want to schedule a future kick? 1894 */ 1895 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 1896 NsTxStateStrings[txState]); 1897 1898 if (!txKickEvent.scheduled()) 1899 schedule(txKickEvent, txKickTick); 1900} 1901 1902/** 1903 * Advance the EEPROM state machine 1904 * Called on rising edge of EEPROM clock bit in MEAR 1905 */ 1906void 1907NSGigE::eepromKick() 1908{ 1909 switch (eepromState) { 1910 1911 case eepromStart: 1912 1913 // Wait for start bit 1914 if (regs.mear & MEAR_EEDI) { 1915 // Set up to get 2 opcode bits 1916 eepromState = eepromGetOpcode; 1917 eepromBitsToRx = 2; 1918 eepromOpcode = 0; 1919 } 1920 break; 1921 1922 case eepromGetOpcode: 1923 eepromOpcode <<= 1; 1924 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0; 1925 --eepromBitsToRx; 1926 1927 // Done getting opcode 1928 if (eepromBitsToRx == 0) { 1929 if (eepromOpcode != EEPROM_READ) 1930 panic("only EEPROM reads are implemented!"); 1931 1932 // Set up to get address 1933 eepromState = eepromGetAddress; 1934 eepromBitsToRx = 6; 1935 eepromAddress = 0; 1936 } 1937 break; 1938 1939 case eepromGetAddress: 1940 eepromAddress <<= 1; 1941 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0; 1942 --eepromBitsToRx; 1943 1944 // Done getting address 1945 if (eepromBitsToRx == 0) { 1946 1947 if (eepromAddress >= EEPROM_SIZE) 1948 panic("EEPROM read access out of range!"); 1949 1950 switch (eepromAddress) { 1951 1952 case EEPROM_PMATCH2_ADDR: 1953 eepromData = rom.perfectMatch[5]; 1954 eepromData <<= 8; 1955 eepromData += rom.perfectMatch[4]; 1956 break; 1957 1958 case EEPROM_PMATCH1_ADDR: 1959 eepromData = rom.perfectMatch[3]; 1960 eepromData <<= 8; 1961 eepromData += rom.perfectMatch[2]; 1962 break; 1963 1964 case EEPROM_PMATCH0_ADDR: 1965 eepromData = rom.perfectMatch[1]; 1966 eepromData <<= 8; 1967 eepromData += rom.perfectMatch[0]; 1968 break; 1969 1970 default: 1971 panic("FreeBSD driver only uses EEPROM to read PMATCH!"); 1972 } 1973 // Set up to read data 1974 eepromState = eepromRead; 1975 eepromBitsToRx = 16; 1976 1977 // Clear data in bit 1978 regs.mear &= ~MEAR_EEDI; 1979 } 1980 break; 1981 1982 case eepromRead: 1983 // Clear Data Out bit 1984 regs.mear &= ~MEAR_EEDO; 1985 // Set bit to value of current EEPROM bit 1986 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0; 1987 1988 eepromData <<= 1; 1989 --eepromBitsToRx; 1990 1991 // All done 1992 if (eepromBitsToRx == 0) { 1993 eepromState = eepromStart; 1994 } 1995 break; 1996 1997 default: 1998 panic("invalid EEPROM state"); 1999 } 2000 2001} 2002 2003void 2004NSGigE::transferDone() 2005{ 2006 if (txFifo.empty()) { 2007 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2008 return; 2009 } 2010 2011 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2012 2013 reschedule(txEvent, clockEdge(Cycles(1)), true); 2014} 2015 2016bool 2017NSGigE::rxFilter(const EthPacketPtr &packet) 2018{ 2019 EthPtr eth = packet; 2020 bool drop = true; 2021 string type; 2022 2023 const EthAddr &dst = eth->dst(); 2024 if (dst.unicast()) { 2025 // If we're accepting all unicast addresses 2026 if (acceptUnicast) 2027 drop = false; 2028 2029 // If we make a perfect match 2030 if (acceptPerfect && dst == rom.perfectMatch) 2031 drop = false; 2032 2033 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2034 drop = false; 2035 2036 } else if (dst.broadcast()) { 2037 // if we're accepting broadcasts 2038 if (acceptBroadcast) 2039 drop = false; 2040 2041 } else if (dst.multicast()) { 2042 // if we're accepting all multicasts 2043 if (acceptMulticast) 2044 drop = false; 2045 2046 // Multicast hashing faked - all packets accepted 2047 if (multicastHashEnable) 2048 drop = false; 2049 } 2050 2051 if (drop) { 2052 DPRINTF(Ethernet, "rxFilter drop\n"); 2053 DDUMP(EthernetData, packet->data, packet->length); 2054 } 2055 2056 return drop; 2057} 2058 2059bool 2060NSGigE::recvPacket(EthPacketPtr packet) 2061{ 2062 rxBytes += packet->length; 2063 rxPackets++; 2064 2065 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2066 rxFifo.avail()); 2067 2068 if (!rxEnable) { 2069 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2070 return true; 2071 } 2072 2073 if (!rxFilterEnable) { 2074 DPRINTF(Ethernet, 2075 "receive packet filtering disabled . . . packet dropped\n"); 2076 return true; 2077 } 2078 2079 if (rxFilter(packet)) { 2080 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2081 return true; 2082 } 2083 2084 if (rxFifo.avail() < packet->length) { 2085#if TRACING_ON 2086 IpPtr ip(packet); 2087 TcpPtr tcp(ip); 2088 if (ip) { 2089 DPRINTF(Ethernet, 2090 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2091 ip->id()); 2092 if (tcp) { 2093 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2094 } 2095 } 2096#endif 2097 droppedPackets++; 2098 devIntrPost(ISR_RXORN); 2099 return false; 2100 } 2101 2102 rxFifo.push(packet); 2103 2104 rxKick(); 2105 return true; 2106} 2107 2108 2109void 2110NSGigE::drainResume() 2111{ 2112 Drainable::drainResume(); 2113 2114 // During drain we could have left the state machines in a waiting state and 2115 // they wouldn't get out until some other event occured to kick them. 2116 // This way they'll get out immediately 2117 txKick(); 2118 rxKick(); 2119} 2120 2121 2122//===================================================================== 2123// 2124// 2125void 2126NSGigE::serialize(ostream &os) 2127{ 2128 // Serialize the PciDevice base class 2129 PciDevice::serialize(os); 2130 2131 /* 2132 * Finalize any DMA events now. 2133 */ 2134 // @todo will mem system save pending dma? 2135 2136 /* 2137 * Serialize the device registers 2138 */ 2139 SERIALIZE_SCALAR(regs.command); 2140 SERIALIZE_SCALAR(regs.config); 2141 SERIALIZE_SCALAR(regs.mear); 2142 SERIALIZE_SCALAR(regs.ptscr); 2143 SERIALIZE_SCALAR(regs.isr); 2144 SERIALIZE_SCALAR(regs.imr); 2145 SERIALIZE_SCALAR(regs.ier); 2146 SERIALIZE_SCALAR(regs.ihr); 2147 SERIALIZE_SCALAR(regs.txdp); 2148 SERIALIZE_SCALAR(regs.txdp_hi); 2149 SERIALIZE_SCALAR(regs.txcfg); 2150 SERIALIZE_SCALAR(regs.gpior); 2151 SERIALIZE_SCALAR(regs.rxdp); 2152 SERIALIZE_SCALAR(regs.rxdp_hi); 2153 SERIALIZE_SCALAR(regs.rxcfg); 2154 SERIALIZE_SCALAR(regs.pqcr); 2155 SERIALIZE_SCALAR(regs.wcsr); 2156 SERIALIZE_SCALAR(regs.pcr); 2157 SERIALIZE_SCALAR(regs.rfcr); 2158 SERIALIZE_SCALAR(regs.rfdr); 2159 SERIALIZE_SCALAR(regs.brar); 2160 SERIALIZE_SCALAR(regs.brdr); 2161 SERIALIZE_SCALAR(regs.srr); 2162 SERIALIZE_SCALAR(regs.mibc); 2163 SERIALIZE_SCALAR(regs.vrcr); 2164 SERIALIZE_SCALAR(regs.vtcr); 2165 SERIALIZE_SCALAR(regs.vdr); 2166 SERIALIZE_SCALAR(regs.ccsr); 2167 SERIALIZE_SCALAR(regs.tbicr); 2168 SERIALIZE_SCALAR(regs.tbisr); 2169 SERIALIZE_SCALAR(regs.tanar); 2170 SERIALIZE_SCALAR(regs.tanlpar); 2171 SERIALIZE_SCALAR(regs.taner); 2172 SERIALIZE_SCALAR(regs.tesr); 2173 2174 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2175 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2176 2177 SERIALIZE_SCALAR(ioEnable); 2178 2179 /* 2180 * Serialize the data Fifos 2181 */ 2182 rxFifo.serialize("rxFifo", os); 2183 txFifo.serialize("txFifo", os); 2184 2185 /* 2186 * Serialize the various helper variables 2187 */ 2188 bool txPacketExists = txPacket; 2189 SERIALIZE_SCALAR(txPacketExists); 2190 if (txPacketExists) { 2191 txPacket->length = txPacketBufPtr - txPacket->data; 2192 txPacket->serialize("txPacket", os); 2193 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2194 SERIALIZE_SCALAR(txPktBufPtr); 2195 } 2196 2197 bool rxPacketExists = rxPacket; 2198 SERIALIZE_SCALAR(rxPacketExists); 2199 if (rxPacketExists) { 2200 rxPacket->serialize("rxPacket", os); 2201 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2202 SERIALIZE_SCALAR(rxPktBufPtr); 2203 } 2204 2205 SERIALIZE_SCALAR(txXferLen); 2206 SERIALIZE_SCALAR(rxXferLen); 2207 2208 /* 2209 * Serialize Cached Descriptors 2210 */ 2211 SERIALIZE_SCALAR(rxDesc64.link); 2212 SERIALIZE_SCALAR(rxDesc64.bufptr); 2213 SERIALIZE_SCALAR(rxDesc64.cmdsts); 2214 SERIALIZE_SCALAR(rxDesc64.extsts); 2215 SERIALIZE_SCALAR(txDesc64.link); 2216 SERIALIZE_SCALAR(txDesc64.bufptr); 2217 SERIALIZE_SCALAR(txDesc64.cmdsts); 2218 SERIALIZE_SCALAR(txDesc64.extsts); 2219 SERIALIZE_SCALAR(rxDesc32.link); 2220 SERIALIZE_SCALAR(rxDesc32.bufptr); 2221 SERIALIZE_SCALAR(rxDesc32.cmdsts); 2222 SERIALIZE_SCALAR(rxDesc32.extsts); 2223 SERIALIZE_SCALAR(txDesc32.link); 2224 SERIALIZE_SCALAR(txDesc32.bufptr); 2225 SERIALIZE_SCALAR(txDesc32.cmdsts); 2226 SERIALIZE_SCALAR(txDesc32.extsts); 2227 SERIALIZE_SCALAR(extstsEnable); 2228 2229 /* 2230 * Serialize tx state machine 2231 */ 2232 int txState = this->txState; 2233 SERIALIZE_SCALAR(txState); 2234 SERIALIZE_SCALAR(txEnable); 2235 SERIALIZE_SCALAR(CTDD); 2236 SERIALIZE_SCALAR(txFragPtr); 2237 SERIALIZE_SCALAR(txDescCnt); 2238 int txDmaState = this->txDmaState; 2239 SERIALIZE_SCALAR(txDmaState); 2240 SERIALIZE_SCALAR(txKickTick); 2241 2242 /* 2243 * Serialize rx state machine 2244 */ 2245 int rxState = this->rxState; 2246 SERIALIZE_SCALAR(rxState); 2247 SERIALIZE_SCALAR(rxEnable); 2248 SERIALIZE_SCALAR(CRDD); 2249 SERIALIZE_SCALAR(rxPktBytes); 2250 SERIALIZE_SCALAR(rxFragPtr); 2251 SERIALIZE_SCALAR(rxDescCnt); 2252 int rxDmaState = this->rxDmaState; 2253 SERIALIZE_SCALAR(rxDmaState); 2254 SERIALIZE_SCALAR(rxKickTick); 2255 2256 /* 2257 * Serialize EEPROM state machine 2258 */ 2259 int eepromState = this->eepromState; 2260 SERIALIZE_SCALAR(eepromState); 2261 SERIALIZE_SCALAR(eepromClk); 2262 SERIALIZE_SCALAR(eepromBitsToRx); 2263 SERIALIZE_SCALAR(eepromOpcode); 2264 SERIALIZE_SCALAR(eepromAddress); 2265 SERIALIZE_SCALAR(eepromData); 2266 2267 /* 2268 * If there's a pending transmit, store the time so we can 2269 * reschedule it later 2270 */ 2271 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0; 2272 SERIALIZE_SCALAR(transmitTick); 2273 2274 /* 2275 * receive address filter settings 2276 */ 2277 SERIALIZE_SCALAR(rxFilterEnable); 2278 SERIALIZE_SCALAR(acceptBroadcast); 2279 SERIALIZE_SCALAR(acceptMulticast); 2280 SERIALIZE_SCALAR(acceptUnicast); 2281 SERIALIZE_SCALAR(acceptPerfect); 2282 SERIALIZE_SCALAR(acceptArp); 2283 SERIALIZE_SCALAR(multicastHashEnable); 2284 2285 /* 2286 * Keep track of pending interrupt status. 2287 */ 2288 SERIALIZE_SCALAR(intrTick); 2289 SERIALIZE_SCALAR(cpuPendingIntr); 2290 Tick intrEventTick = 0; 2291 if (intrEvent) 2292 intrEventTick = intrEvent->when(); 2293 SERIALIZE_SCALAR(intrEventTick); 2294 2295} 2296 2297void 2298NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2299{ 2300 // Unserialize the PciDevice base class 2301 PciDevice::unserialize(cp, section); 2302 2303 UNSERIALIZE_SCALAR(regs.command); 2304 UNSERIALIZE_SCALAR(regs.config); 2305 UNSERIALIZE_SCALAR(regs.mear); 2306 UNSERIALIZE_SCALAR(regs.ptscr); 2307 UNSERIALIZE_SCALAR(regs.isr); 2308 UNSERIALIZE_SCALAR(regs.imr); 2309 UNSERIALIZE_SCALAR(regs.ier); 2310 UNSERIALIZE_SCALAR(regs.ihr); 2311 UNSERIALIZE_SCALAR(regs.txdp); 2312 UNSERIALIZE_SCALAR(regs.txdp_hi); 2313 UNSERIALIZE_SCALAR(regs.txcfg); 2314 UNSERIALIZE_SCALAR(regs.gpior); 2315 UNSERIALIZE_SCALAR(regs.rxdp); 2316 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2317 UNSERIALIZE_SCALAR(regs.rxcfg); 2318 UNSERIALIZE_SCALAR(regs.pqcr); 2319 UNSERIALIZE_SCALAR(regs.wcsr); 2320 UNSERIALIZE_SCALAR(regs.pcr); 2321 UNSERIALIZE_SCALAR(regs.rfcr); 2322 UNSERIALIZE_SCALAR(regs.rfdr); 2323 UNSERIALIZE_SCALAR(regs.brar); 2324 UNSERIALIZE_SCALAR(regs.brdr); 2325 UNSERIALIZE_SCALAR(regs.srr); 2326 UNSERIALIZE_SCALAR(regs.mibc); 2327 UNSERIALIZE_SCALAR(regs.vrcr); 2328 UNSERIALIZE_SCALAR(regs.vtcr); 2329 UNSERIALIZE_SCALAR(regs.vdr); 2330 UNSERIALIZE_SCALAR(regs.ccsr); 2331 UNSERIALIZE_SCALAR(regs.tbicr); 2332 UNSERIALIZE_SCALAR(regs.tbisr); 2333 UNSERIALIZE_SCALAR(regs.tanar); 2334 UNSERIALIZE_SCALAR(regs.tanlpar); 2335 UNSERIALIZE_SCALAR(regs.taner); 2336 UNSERIALIZE_SCALAR(regs.tesr); 2337 2338 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2339 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2340 2341 UNSERIALIZE_SCALAR(ioEnable); 2342 2343 /* 2344 * unserialize the data fifos 2345 */ 2346 rxFifo.unserialize("rxFifo", cp, section); 2347 txFifo.unserialize("txFifo", cp, section); 2348 2349 /* 2350 * unserialize the various helper variables 2351 */ 2352 bool txPacketExists; 2353 UNSERIALIZE_SCALAR(txPacketExists); 2354 if (txPacketExists) { 2355 txPacket = new EthPacketData(16384); 2356 txPacket->unserialize("txPacket", cp, section); 2357 uint32_t txPktBufPtr; 2358 UNSERIALIZE_SCALAR(txPktBufPtr); 2359 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2360 } else 2361 txPacket = 0; 2362 2363 bool rxPacketExists; 2364 UNSERIALIZE_SCALAR(rxPacketExists); 2365 rxPacket = 0; 2366 if (rxPacketExists) { 2367 rxPacket = new EthPacketData(16384); 2368 rxPacket->unserialize("rxPacket", cp, section); 2369 uint32_t rxPktBufPtr; 2370 UNSERIALIZE_SCALAR(rxPktBufPtr); 2371 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2372 } else 2373 rxPacket = 0; 2374 2375 UNSERIALIZE_SCALAR(txXferLen); 2376 UNSERIALIZE_SCALAR(rxXferLen); 2377 2378 /* 2379 * Unserialize Cached Descriptors 2380 */ 2381 UNSERIALIZE_SCALAR(rxDesc64.link); 2382 UNSERIALIZE_SCALAR(rxDesc64.bufptr); 2383 UNSERIALIZE_SCALAR(rxDesc64.cmdsts); 2384 UNSERIALIZE_SCALAR(rxDesc64.extsts); 2385 UNSERIALIZE_SCALAR(txDesc64.link); 2386 UNSERIALIZE_SCALAR(txDesc64.bufptr); 2387 UNSERIALIZE_SCALAR(txDesc64.cmdsts); 2388 UNSERIALIZE_SCALAR(txDesc64.extsts); 2389 UNSERIALIZE_SCALAR(rxDesc32.link); 2390 UNSERIALIZE_SCALAR(rxDesc32.bufptr); 2391 UNSERIALIZE_SCALAR(rxDesc32.cmdsts); 2392 UNSERIALIZE_SCALAR(rxDesc32.extsts); 2393 UNSERIALIZE_SCALAR(txDesc32.link); 2394 UNSERIALIZE_SCALAR(txDesc32.bufptr); 2395 UNSERIALIZE_SCALAR(txDesc32.cmdsts); 2396 UNSERIALIZE_SCALAR(txDesc32.extsts); 2397 UNSERIALIZE_SCALAR(extstsEnable); 2398 2399 /* 2400 * unserialize tx state machine 2401 */ 2402 int txState; 2403 UNSERIALIZE_SCALAR(txState); 2404 this->txState = (TxState) txState; 2405 UNSERIALIZE_SCALAR(txEnable); 2406 UNSERIALIZE_SCALAR(CTDD); 2407 UNSERIALIZE_SCALAR(txFragPtr); 2408 UNSERIALIZE_SCALAR(txDescCnt); 2409 int txDmaState; 2410 UNSERIALIZE_SCALAR(txDmaState); 2411 this->txDmaState = (DmaState) txDmaState; 2412 UNSERIALIZE_SCALAR(txKickTick); 2413 if (txKickTick) 2414 schedule(txKickEvent, txKickTick); 2415 2416 /* 2417 * unserialize rx state machine 2418 */ 2419 int rxState; 2420 UNSERIALIZE_SCALAR(rxState); 2421 this->rxState = (RxState) rxState; 2422 UNSERIALIZE_SCALAR(rxEnable); 2423 UNSERIALIZE_SCALAR(CRDD); 2424 UNSERIALIZE_SCALAR(rxPktBytes); 2425 UNSERIALIZE_SCALAR(rxFragPtr); 2426 UNSERIALIZE_SCALAR(rxDescCnt); 2427 int rxDmaState; 2428 UNSERIALIZE_SCALAR(rxDmaState); 2429 this->rxDmaState = (DmaState) rxDmaState; 2430 UNSERIALIZE_SCALAR(rxKickTick); 2431 if (rxKickTick) 2432 schedule(rxKickEvent, rxKickTick); 2433 2434 /* 2435 * Unserialize EEPROM state machine 2436 */ 2437 int eepromState; 2438 UNSERIALIZE_SCALAR(eepromState); 2439 this->eepromState = (EEPROMState) eepromState; 2440 UNSERIALIZE_SCALAR(eepromClk); 2441 UNSERIALIZE_SCALAR(eepromBitsToRx); 2442 UNSERIALIZE_SCALAR(eepromOpcode); 2443 UNSERIALIZE_SCALAR(eepromAddress); 2444 UNSERIALIZE_SCALAR(eepromData); 2445 2446 /* 2447 * If there's a pending transmit, reschedule it now 2448 */ 2449 Tick transmitTick; 2450 UNSERIALIZE_SCALAR(transmitTick); 2451 if (transmitTick) 2452 schedule(txEvent, curTick() + transmitTick); 2453 2454 /* 2455 * unserialize receive address filter settings 2456 */ 2457 UNSERIALIZE_SCALAR(rxFilterEnable); 2458 UNSERIALIZE_SCALAR(acceptBroadcast); 2459 UNSERIALIZE_SCALAR(acceptMulticast); 2460 UNSERIALIZE_SCALAR(acceptUnicast); 2461 UNSERIALIZE_SCALAR(acceptPerfect); 2462 UNSERIALIZE_SCALAR(acceptArp); 2463 UNSERIALIZE_SCALAR(multicastHashEnable); 2464 2465 /* 2466 * Keep track of pending interrupt status. 2467 */ 2468 UNSERIALIZE_SCALAR(intrTick); 2469 UNSERIALIZE_SCALAR(cpuPendingIntr); 2470 Tick intrEventTick; 2471 UNSERIALIZE_SCALAR(intrEventTick); 2472 if (intrEventTick) { 2473 intrEvent = new IntrEvent(this, true); 2474 schedule(intrEvent, intrEventTick); 2475 } 2476} 2477 2478NSGigE * 2479NSGigEParams::create() 2480{ 2481 return new NSGigE(this); 2482} 2483