ns_gige.cc revision 10469
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 * Lisa Hsu 30 */ 31 32/** @file 33 * Device module for modelling the National Semiconductor 34 * DP83820 ethernet controller. Does not support priority queueing 35 */ 36#include <deque> 37#include <memory> 38#include <string> 39 40#include "base/debug.hh" 41#include "base/inet.hh" 42#include "base/types.hh" 43#include "config/the_isa.hh" 44#include "debug/EthernetAll.hh" 45#include "dev/etherlink.hh" 46#include "dev/ns_gige.hh" 47#include "dev/pciconfigall.hh" 48#include "mem/packet.hh" 49#include "mem/packet_access.hh" 50#include "params/NSGigE.hh" 51#include "sim/system.hh" 52 53// clang complains about std::set being overloaded with Packet::set if 54// we open up the entire namespace std 55using std::make_shared; 56using std::min; 57using std::ostream; 58using std::string; 59 60const char *NsRxStateStrings[] = 61{ 62 "rxIdle", 63 "rxDescRefr", 64 "rxDescRead", 65 "rxFifoBlock", 66 "rxFragWrite", 67 "rxDescWrite", 68 "rxAdvance" 69}; 70 71const char *NsTxStateStrings[] = 72{ 73 "txIdle", 74 "txDescRefr", 75 "txDescRead", 76 "txFifoBlock", 77 "txFragRead", 78 "txDescWrite", 79 "txAdvance" 80}; 81 82const char *NsDmaState[] = 83{ 84 "dmaIdle", 85 "dmaReading", 86 "dmaWriting", 87 "dmaReadWaiting", 88 "dmaWriteWaiting" 89}; 90 91using namespace Net; 92using namespace TheISA; 93 94/////////////////////////////////////////////////////////////////////// 95// 96// NSGigE PCI Device 97// 98NSGigE::NSGigE(Params *p) 99 : EtherDevBase(p), ioEnable(false), 100 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 101 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 102 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false), 103 txState(txIdle), txEnable(false), CTDD(false), txHalt(false), 104 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 105 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false), 106 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 107 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0), 108 eepromOpcode(0), eepromAddress(0), eepromData(0), 109 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay), 110 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor), 111 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0), 112 txDmaData(NULL), txDmaAddr(0), txDmaLen(0), 113 rxDmaReadEvent(this), rxDmaWriteEvent(this), 114 txDmaReadEvent(this), txDmaWriteEvent(this), 115 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 116 txDelay(p->tx_delay), rxDelay(p->rx_delay), 117 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this), 118 txEvent(this), rxFilterEnable(p->rx_filter), 119 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false), 120 acceptPerfect(false), acceptArp(false), multicastHashEnable(false), 121 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false), 122 intrEvent(0), interface(0) 123{ 124 125 126 interface = new NSGigEInt(name() + ".int0", this); 127 128 regsReset(); 129 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN); 130 131 memset(&rxDesc32, 0, sizeof(rxDesc32)); 132 memset(&txDesc32, 0, sizeof(txDesc32)); 133 memset(&rxDesc64, 0, sizeof(rxDesc64)); 134 memset(&txDesc64, 0, sizeof(txDesc64)); 135} 136 137NSGigE::~NSGigE() 138{ 139 delete interface; 140} 141 142/** 143 * This is to write to the PCI general configuration registers 144 */ 145Tick 146NSGigE::writeConfig(PacketPtr pkt) 147{ 148 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 149 if (offset < PCI_DEVICE_SPECIFIC) 150 PciDevice::writeConfig(pkt); 151 else 152 panic("Device specific PCI config space not implemented!\n"); 153 154 switch (offset) { 155 // seems to work fine without all these PCI settings, but i 156 // put in the IO to double check, an assertion will fail if we 157 // need to properly implement it 158 case PCI_COMMAND: 159 if (config.data[offset] & PCI_CMD_IOSE) 160 ioEnable = true; 161 else 162 ioEnable = false; 163 break; 164 } 165 166 return configDelay; 167} 168 169EtherInt* 170NSGigE::getEthPort(const std::string &if_name, int idx) 171{ 172 if (if_name == "interface") { 173 if (interface->getPeer()) 174 panic("interface already connected to\n"); 175 return interface; 176 } 177 return NULL; 178} 179 180/** 181 * This reads the device registers, which are detailed in the NS83820 182 * spec sheet 183 */ 184Tick 185NSGigE::read(PacketPtr pkt) 186{ 187 assert(ioEnable); 188 189 pkt->allocate(); 190 191 //The mask is to give you only the offset into the device register file 192 Addr daddr = pkt->getAddr() & 0xfff; 193 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n", 194 daddr, pkt->getAddr(), pkt->getSize()); 195 196 197 // there are some reserved registers, you can see ns_gige_reg.h and 198 // the spec sheet for details 199 if (daddr > LAST && daddr <= RESERVED) { 200 panic("Accessing reserved register"); 201 } else if (daddr > RESERVED && daddr <= 0x3FC) { 202 return readConfig(pkt); 203 } else if (daddr >= MIB_START && daddr <= MIB_END) { 204 // don't implement all the MIB's. hopefully the kernel 205 // doesn't actually DEPEND upon their values 206 // MIB are just hardware stats keepers 207 pkt->set<uint32_t>(0); 208 pkt->makeAtomicResponse(); 209 return pioDelay; 210 } else if (daddr > 0x3FC) 211 panic("Something is messed up!\n"); 212 213 assert(pkt->getSize() == sizeof(uint32_t)); 214 uint32_t ® = *pkt->getPtr<uint32_t>(); 215 uint16_t rfaddr; 216 217 switch (daddr) { 218 case CR: 219 reg = regs.command; 220 //these are supposed to be cleared on a read 221 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 222 break; 223 224 case CFGR: 225 reg = regs.config; 226 break; 227 228 case MEAR: 229 reg = regs.mear; 230 break; 231 232 case PTSCR: 233 reg = regs.ptscr; 234 break; 235 236 case ISR: 237 reg = regs.isr; 238 devIntrClear(ISR_ALL); 239 break; 240 241 case IMR: 242 reg = regs.imr; 243 break; 244 245 case IER: 246 reg = regs.ier; 247 break; 248 249 case IHR: 250 reg = regs.ihr; 251 break; 252 253 case TXDP: 254 reg = regs.txdp; 255 break; 256 257 case TXDP_HI: 258 reg = regs.txdp_hi; 259 break; 260 261 case TX_CFG: 262 reg = regs.txcfg; 263 break; 264 265 case GPIOR: 266 reg = regs.gpior; 267 break; 268 269 case RXDP: 270 reg = regs.rxdp; 271 break; 272 273 case RXDP_HI: 274 reg = regs.rxdp_hi; 275 break; 276 277 case RX_CFG: 278 reg = regs.rxcfg; 279 break; 280 281 case PQCR: 282 reg = regs.pqcr; 283 break; 284 285 case WCSR: 286 reg = regs.wcsr; 287 break; 288 289 case PCR: 290 reg = regs.pcr; 291 break; 292 293 // see the spec sheet for how RFCR and RFDR work 294 // basically, you write to RFCR to tell the machine 295 // what you want to do next, then you act upon RFDR, 296 // and the device will be prepared b/c of what you 297 // wrote to RFCR 298 case RFCR: 299 reg = regs.rfcr; 300 break; 301 302 case RFDR: 303 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 304 switch (rfaddr) { 305 // Read from perfect match ROM octets 306 case 0x000: 307 reg = rom.perfectMatch[1]; 308 reg = reg << 8; 309 reg += rom.perfectMatch[0]; 310 break; 311 case 0x002: 312 reg = rom.perfectMatch[3] << 8; 313 reg += rom.perfectMatch[2]; 314 break; 315 case 0x004: 316 reg = rom.perfectMatch[5] << 8; 317 reg += rom.perfectMatch[4]; 318 break; 319 default: 320 // Read filter hash table 321 if (rfaddr >= FHASH_ADDR && 322 rfaddr < FHASH_ADDR + FHASH_SIZE) { 323 324 // Only word-aligned reads supported 325 if (rfaddr % 2) 326 panic("unaligned read from filter hash table!"); 327 328 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8; 329 reg += rom.filterHash[rfaddr - FHASH_ADDR]; 330 break; 331 } 332 333 panic("reading RFDR for something other than pattern" 334 " matching or hashing! %#x\n", rfaddr); 335 } 336 break; 337 338 case SRR: 339 reg = regs.srr; 340 break; 341 342 case MIBC: 343 reg = regs.mibc; 344 reg &= ~(MIBC_MIBS | MIBC_ACLR); 345 break; 346 347 case VRCR: 348 reg = regs.vrcr; 349 break; 350 351 case VTCR: 352 reg = regs.vtcr; 353 break; 354 355 case VDR: 356 reg = regs.vdr; 357 break; 358 359 case CCSR: 360 reg = regs.ccsr; 361 break; 362 363 case TBICR: 364 reg = regs.tbicr; 365 break; 366 367 case TBISR: 368 reg = regs.tbisr; 369 break; 370 371 case TANAR: 372 reg = regs.tanar; 373 break; 374 375 case TANLPAR: 376 reg = regs.tanlpar; 377 break; 378 379 case TANER: 380 reg = regs.taner; 381 break; 382 383 case TESR: 384 reg = regs.tesr; 385 break; 386 387 case M5REG: 388 reg = 0; 389 if (params()->rx_thread) 390 reg |= M5REG_RX_THREAD; 391 if (params()->tx_thread) 392 reg |= M5REG_TX_THREAD; 393 if (params()->rss) 394 reg |= M5REG_RSS; 395 break; 396 397 default: 398 panic("reading unimplemented register: addr=%#x", daddr); 399 } 400 401 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 402 daddr, reg, reg); 403 404 pkt->makeAtomicResponse(); 405 return pioDelay; 406} 407 408Tick 409NSGigE::write(PacketPtr pkt) 410{ 411 assert(ioEnable); 412 413 Addr daddr = pkt->getAddr() & 0xfff; 414 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n", 415 daddr, pkt->getAddr(), pkt->getSize()); 416 417 if (daddr > LAST && daddr <= RESERVED) { 418 panic("Accessing reserved register"); 419 } else if (daddr > RESERVED && daddr <= 0x3FC) { 420 return writeConfig(pkt); 421 } else if (daddr > 0x3FC) 422 panic("Something is messed up!\n"); 423 424 if (pkt->getSize() == sizeof(uint32_t)) { 425 uint32_t reg = pkt->get<uint32_t>(); 426 uint16_t rfaddr; 427 428 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 429 430 switch (daddr) { 431 case CR: 432 regs.command = reg; 433 if (reg & CR_TXD) { 434 txEnable = false; 435 } else if (reg & CR_TXE) { 436 txEnable = true; 437 438 // the kernel is enabling the transmit machine 439 if (txState == txIdle) 440 txKick(); 441 } 442 443 if (reg & CR_RXD) { 444 rxEnable = false; 445 } else if (reg & CR_RXE) { 446 rxEnable = true; 447 448 if (rxState == rxIdle) 449 rxKick(); 450 } 451 452 if (reg & CR_TXR) 453 txReset(); 454 455 if (reg & CR_RXR) 456 rxReset(); 457 458 if (reg & CR_SWI) 459 devIntrPost(ISR_SWI); 460 461 if (reg & CR_RST) { 462 txReset(); 463 rxReset(); 464 465 regsReset(); 466 } 467 break; 468 469 case CFGR: 470 if (reg & CFGR_LNKSTS || 471 reg & CFGR_SPDSTS || 472 reg & CFGR_DUPSTS || 473 reg & CFGR_RESERVED || 474 reg & CFGR_T64ADDR || 475 reg & CFGR_PCI64_DET) { 476 // First clear all writable bits 477 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 478 CFGR_RESERVED | CFGR_T64ADDR | 479 CFGR_PCI64_DET; 480 // Now set the appropriate writable bits 481 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 482 CFGR_RESERVED | CFGR_T64ADDR | 483 CFGR_PCI64_DET); 484 } 485 486// all these #if 0's are because i don't THINK the kernel needs to 487// have these implemented. if there is a problem relating to one of 488// these, you may need to add functionality in. 489 490// grouped together and #if 0'ed to avoid empty if body and make clang happy 491#if 0 492 if (reg & CFGR_TBI_EN) ; 493 if (reg & CFGR_MODE_1000) ; 494 495 if (reg & CFGR_PINT_DUPSTS || 496 reg & CFGR_PINT_LNKSTS || 497 reg & CFGR_PINT_SPDSTS) 498 ; 499 500 if (reg & CFGR_TMRTEST) ; 501 if (reg & CFGR_MRM_DIS) ; 502 if (reg & CFGR_MWI_DIS) ; 503 504 if (reg & CFGR_DATA64_EN) ; 505 if (reg & CFGR_M64ADDR) ; 506 if (reg & CFGR_PHY_RST) ; 507 if (reg & CFGR_PHY_DIS) ; 508 509 if (reg & CFGR_REQALG) ; 510 if (reg & CFGR_SB) ; 511 if (reg & CFGR_POW) ; 512 if (reg & CFGR_EXD) ; 513 if (reg & CFGR_PESEL) ; 514 if (reg & CFGR_BROM_DIS) ; 515 if (reg & CFGR_EXT_125) ; 516 if (reg & CFGR_BEM) ; 517 518 if (reg & CFGR_T64ADDR) ; 519 // panic("CFGR_T64ADDR is read only register!\n"); 520#endif 521 if (reg & CFGR_AUTO_1000) 522 panic("CFGR_AUTO_1000 not implemented!\n"); 523 524 if (reg & CFGR_PCI64_DET) 525 panic("CFGR_PCI64_DET is read only register!\n"); 526 527 if (reg & CFGR_EXTSTS_EN) 528 extstsEnable = true; 529 else 530 extstsEnable = false; 531 break; 532 533 case MEAR: 534 // Clear writable bits 535 regs.mear &= MEAR_EEDO; 536 // Set appropriate writable bits 537 regs.mear |= reg & ~MEAR_EEDO; 538 539 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address) 540 // even though it could get it through RFDR 541 if (reg & MEAR_EESEL) { 542 // Rising edge of clock 543 if (reg & MEAR_EECLK && !eepromClk) 544 eepromKick(); 545 } 546 else { 547 eepromState = eepromStart; 548 regs.mear &= ~MEAR_EEDI; 549 } 550 551 eepromClk = reg & MEAR_EECLK; 552 553 // since phy is completely faked, MEAR_MD* don't matter 554 555// grouped together and #if 0'ed to avoid empty if body and make clang happy 556#if 0 557 if (reg & MEAR_MDIO) ; 558 if (reg & MEAR_MDDIR) ; 559 if (reg & MEAR_MDC) ; 560#endif 561 break; 562 563 case PTSCR: 564 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 565 // these control BISTs for various parts of chip - we 566 // don't care or do just fake that the BIST is done 567 if (reg & PTSCR_RBIST_EN) 568 regs.ptscr |= PTSCR_RBIST_DONE; 569 if (reg & PTSCR_EEBIST_EN) 570 regs.ptscr &= ~PTSCR_EEBIST_EN; 571 if (reg & PTSCR_EELOAD_EN) 572 regs.ptscr &= ~PTSCR_EELOAD_EN; 573 break; 574 575 case ISR: /* writing to the ISR has no effect */ 576 panic("ISR is a read only register!\n"); 577 578 case IMR: 579 regs.imr = reg; 580 devIntrChangeMask(); 581 break; 582 583 case IER: 584 regs.ier = reg; 585 break; 586 587 case IHR: 588 regs.ihr = reg; 589 /* not going to implement real interrupt holdoff */ 590 break; 591 592 case TXDP: 593 regs.txdp = (reg & 0xFFFFFFFC); 594 assert(txState == txIdle); 595 CTDD = false; 596 break; 597 598 case TXDP_HI: 599 regs.txdp_hi = reg; 600 break; 601 602 case TX_CFG: 603 regs.txcfg = reg; 604#if 0 605 if (reg & TX_CFG_CSI) ; 606 if (reg & TX_CFG_HBI) ; 607 if (reg & TX_CFG_MLB) ; 608 if (reg & TX_CFG_ATP) ; 609 if (reg & TX_CFG_ECRETRY) { 610 /* 611 * this could easily be implemented, but considering 612 * the network is just a fake pipe, wouldn't make 613 * sense to do this 614 */ 615 } 616 617 if (reg & TX_CFG_BRST_DIS) ; 618#endif 619 620#if 0 621 /* we handle our own DMA, ignore the kernel's exhortations */ 622 if (reg & TX_CFG_MXDMA) ; 623#endif 624 625 // also, we currently don't care about fill/drain 626 // thresholds though this may change in the future with 627 // more realistic networks or a driver which changes it 628 // according to feedback 629 630 break; 631 632 case GPIOR: 633 // Only write writable bits 634 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 635 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN; 636 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 637 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN); 638 /* these just control general purpose i/o pins, don't matter */ 639 break; 640 641 case RXDP: 642 regs.rxdp = reg; 643 CRDD = false; 644 break; 645 646 case RXDP_HI: 647 regs.rxdp_hi = reg; 648 break; 649 650 case RX_CFG: 651 regs.rxcfg = reg; 652#if 0 653 if (reg & RX_CFG_AEP) ; 654 if (reg & RX_CFG_ARP) ; 655 if (reg & RX_CFG_STRIPCRC) ; 656 if (reg & RX_CFG_RX_RD) ; 657 if (reg & RX_CFG_ALP) ; 658 if (reg & RX_CFG_AIRL) ; 659 660 /* we handle our own DMA, ignore what kernel says about it */ 661 if (reg & RX_CFG_MXDMA) ; 662 663 //also, we currently don't care about fill/drain thresholds 664 //though this may change in the future with more realistic 665 //networks or a driver which changes it according to feedback 666 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ; 667#endif 668 break; 669 670 case PQCR: 671 /* there is no priority queueing used in the linux 2.6 driver */ 672 regs.pqcr = reg; 673 break; 674 675 case WCSR: 676 /* not going to implement wake on LAN */ 677 regs.wcsr = reg; 678 break; 679 680 case PCR: 681 /* not going to implement pause control */ 682 regs.pcr = reg; 683 break; 684 685 case RFCR: 686 regs.rfcr = reg; 687 688 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 689 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 690 acceptMulticast = (reg & RFCR_AAM) ? true : false; 691 acceptUnicast = (reg & RFCR_AAU) ? true : false; 692 acceptPerfect = (reg & RFCR_APM) ? true : false; 693 acceptArp = (reg & RFCR_AARP) ? true : false; 694 multicastHashEnable = (reg & RFCR_MHEN) ? true : false; 695 696#if 0 697 if (reg & RFCR_APAT) 698 panic("RFCR_APAT not implemented!\n"); 699#endif 700 if (reg & RFCR_UHEN) 701 panic("Unicast hash filtering not used by drivers!\n"); 702 703 if (reg & RFCR_ULM) 704 panic("RFCR_ULM not implemented!\n"); 705 706 break; 707 708 case RFDR: 709 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 710 switch (rfaddr) { 711 case 0x000: 712 rom.perfectMatch[0] = (uint8_t)reg; 713 rom.perfectMatch[1] = (uint8_t)(reg >> 8); 714 break; 715 case 0x002: 716 rom.perfectMatch[2] = (uint8_t)reg; 717 rom.perfectMatch[3] = (uint8_t)(reg >> 8); 718 break; 719 case 0x004: 720 rom.perfectMatch[4] = (uint8_t)reg; 721 rom.perfectMatch[5] = (uint8_t)(reg >> 8); 722 break; 723 default: 724 725 if (rfaddr >= FHASH_ADDR && 726 rfaddr < FHASH_ADDR + FHASH_SIZE) { 727 728 // Only word-aligned writes supported 729 if (rfaddr % 2) 730 panic("unaligned write to filter hash table!"); 731 732 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg; 733 rom.filterHash[rfaddr - FHASH_ADDR + 1] 734 = (uint8_t)(reg >> 8); 735 break; 736 } 737 panic("writing RFDR for something other than pattern matching " 738 "or hashing! %#x\n", rfaddr); 739 } 740 741 case BRAR: 742 regs.brar = reg; 743 break; 744 745 case BRDR: 746 panic("the driver never uses BRDR, something is wrong!\n"); 747 748 case SRR: 749 panic("SRR is read only register!\n"); 750 751 case MIBC: 752 panic("the driver never uses MIBC, something is wrong!\n"); 753 754 case VRCR: 755 regs.vrcr = reg; 756 break; 757 758 case VTCR: 759 regs.vtcr = reg; 760 break; 761 762 case VDR: 763 panic("the driver never uses VDR, something is wrong!\n"); 764 765 case CCSR: 766 /* not going to implement clockrun stuff */ 767 regs.ccsr = reg; 768 break; 769 770 case TBICR: 771 regs.tbicr = reg; 772 if (reg & TBICR_MR_LOOPBACK) 773 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 774 775 if (reg & TBICR_MR_AN_ENABLE) { 776 regs.tanlpar = regs.tanar; 777 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 778 } 779 780#if 0 781 if (reg & TBICR_MR_RESTART_AN) ; 782#endif 783 784 break; 785 786 case TBISR: 787 panic("TBISR is read only register!\n"); 788 789 case TANAR: 790 // Only write the writable bits 791 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED; 792 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED); 793 794 // Pause capability unimplemented 795#if 0 796 if (reg & TANAR_PS2) ; 797 if (reg & TANAR_PS1) ; 798#endif 799 800 break; 801 802 case TANLPAR: 803 panic("this should only be written to by the fake phy!\n"); 804 805 case TANER: 806 panic("TANER is read only register!\n"); 807 808 case TESR: 809 regs.tesr = reg; 810 break; 811 812 default: 813 panic("invalid register access daddr=%#x", daddr); 814 } 815 } else { 816 panic("Invalid Request Size"); 817 } 818 pkt->makeAtomicResponse(); 819 return pioDelay; 820} 821 822void 823NSGigE::devIntrPost(uint32_t interrupts) 824{ 825 if (interrupts & ISR_RESERVE) 826 panic("Cannot set a reserved interrupt"); 827 828 if (interrupts & ISR_NOIMPL) 829 warn("interrupt not implemented %#x\n", interrupts); 830 831 interrupts &= ISR_IMPL; 832 regs.isr |= interrupts; 833 834 if (interrupts & regs.imr) { 835 if (interrupts & ISR_SWI) { 836 totalSwi++; 837 } 838 if (interrupts & ISR_RXIDLE) { 839 totalRxIdle++; 840 } 841 if (interrupts & ISR_RXOK) { 842 totalRxOk++; 843 } 844 if (interrupts & ISR_RXDESC) { 845 totalRxDesc++; 846 } 847 if (interrupts & ISR_TXOK) { 848 totalTxOk++; 849 } 850 if (interrupts & ISR_TXIDLE) { 851 totalTxIdle++; 852 } 853 if (interrupts & ISR_TXDESC) { 854 totalTxDesc++; 855 } 856 if (interrupts & ISR_RXORN) { 857 totalRxOrn++; 858 } 859 } 860 861 DPRINTF(EthernetIntr, 862 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 863 interrupts, regs.isr, regs.imr); 864 865 if ((regs.isr & regs.imr)) { 866 Tick when = curTick(); 867 if ((regs.isr & regs.imr & ISR_NODELAY) == 0) 868 when += intrDelay; 869 postedInterrupts++; 870 cpuIntrPost(when); 871 } 872} 873 874/* writing this interrupt counting stats inside this means that this function 875 is now limited to being used to clear all interrupts upon the kernel 876 reading isr and servicing. just telling you in case you were thinking 877 of expanding use. 878*/ 879void 880NSGigE::devIntrClear(uint32_t interrupts) 881{ 882 if (interrupts & ISR_RESERVE) 883 panic("Cannot clear a reserved interrupt"); 884 885 if (regs.isr & regs.imr & ISR_SWI) { 886 postedSwi++; 887 } 888 if (regs.isr & regs.imr & ISR_RXIDLE) { 889 postedRxIdle++; 890 } 891 if (regs.isr & regs.imr & ISR_RXOK) { 892 postedRxOk++; 893 } 894 if (regs.isr & regs.imr & ISR_RXDESC) { 895 postedRxDesc++; 896 } 897 if (regs.isr & regs.imr & ISR_TXOK) { 898 postedTxOk++; 899 } 900 if (regs.isr & regs.imr & ISR_TXIDLE) { 901 postedTxIdle++; 902 } 903 if (regs.isr & regs.imr & ISR_TXDESC) { 904 postedTxDesc++; 905 } 906 if (regs.isr & regs.imr & ISR_RXORN) { 907 postedRxOrn++; 908 } 909 910 interrupts &= ~ISR_NOIMPL; 911 regs.isr &= ~interrupts; 912 913 DPRINTF(EthernetIntr, 914 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 915 interrupts, regs.isr, regs.imr); 916 917 if (!(regs.isr & regs.imr)) 918 cpuIntrClear(); 919} 920 921void 922NSGigE::devIntrChangeMask() 923{ 924 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 925 regs.isr, regs.imr, regs.isr & regs.imr); 926 927 if (regs.isr & regs.imr) 928 cpuIntrPost(curTick()); 929 else 930 cpuIntrClear(); 931} 932 933void 934NSGigE::cpuIntrPost(Tick when) 935{ 936 // If the interrupt you want to post is later than an interrupt 937 // already scheduled, just let it post in the coming one and don't 938 // schedule another. 939 // HOWEVER, must be sure that the scheduled intrTick is in the 940 // future (this was formerly the source of a bug) 941 /** 942 * @todo this warning should be removed and the intrTick code should 943 * be fixed. 944 */ 945 assert(when >= curTick()); 946 assert(intrTick >= curTick() || intrTick == 0); 947 if (when > intrTick && intrTick != 0) { 948 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 949 intrTick); 950 return; 951 } 952 953 intrTick = when; 954 if (intrTick < curTick()) { 955 Debug::breakpoint(); 956 intrTick = curTick(); 957 } 958 959 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 960 intrTick); 961 962 if (intrEvent) 963 intrEvent->squash(); 964 intrEvent = new IntrEvent(this, true); 965 schedule(intrEvent, intrTick); 966} 967 968void 969NSGigE::cpuInterrupt() 970{ 971 assert(intrTick == curTick()); 972 973 // Whether or not there's a pending interrupt, we don't care about 974 // it anymore 975 intrEvent = 0; 976 intrTick = 0; 977 978 // Don't send an interrupt if there's already one 979 if (cpuPendingIntr) { 980 DPRINTF(EthernetIntr, 981 "would send an interrupt now, but there's already pending\n"); 982 } else { 983 // Send interrupt 984 cpuPendingIntr = true; 985 986 DPRINTF(EthernetIntr, "posting interrupt\n"); 987 intrPost(); 988 } 989} 990 991void 992NSGigE::cpuIntrClear() 993{ 994 if (!cpuPendingIntr) 995 return; 996 997 if (intrEvent) { 998 intrEvent->squash(); 999 intrEvent = 0; 1000 } 1001 1002 intrTick = 0; 1003 1004 cpuPendingIntr = false; 1005 1006 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1007 intrClear(); 1008} 1009 1010bool 1011NSGigE::cpuIntrPending() const 1012{ return cpuPendingIntr; } 1013 1014void 1015NSGigE::txReset() 1016{ 1017 1018 DPRINTF(Ethernet, "transmit reset\n"); 1019 1020 CTDD = false; 1021 txEnable = false;; 1022 txFragPtr = 0; 1023 assert(txDescCnt == 0); 1024 txFifo.clear(); 1025 txState = txIdle; 1026 assert(txDmaState == dmaIdle); 1027} 1028 1029void 1030NSGigE::rxReset() 1031{ 1032 DPRINTF(Ethernet, "receive reset\n"); 1033 1034 CRDD = false; 1035 assert(rxPktBytes == 0); 1036 rxEnable = false; 1037 rxFragPtr = 0; 1038 assert(rxDescCnt == 0); 1039 assert(rxDmaState == dmaIdle); 1040 rxFifo.clear(); 1041 rxState = rxIdle; 1042} 1043 1044void 1045NSGigE::regsReset() 1046{ 1047 memset(®s, 0, sizeof(regs)); 1048 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000); 1049 regs.mear = 0x12; 1050 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1051 // fill threshold to 32 bytes 1052 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1053 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1054 regs.mibc = MIBC_FRZ; 1055 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1056 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1057 regs.brar = 0xffffffff; 1058 1059 extstsEnable = false; 1060 acceptBroadcast = false; 1061 acceptMulticast = false; 1062 acceptUnicast = false; 1063 acceptPerfect = false; 1064 acceptArp = false; 1065} 1066 1067bool 1068NSGigE::doRxDmaRead() 1069{ 1070 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1071 rxDmaState = dmaReading; 1072 1073 if (dmaPending() || getDrainState() != Drainable::Running) 1074 rxDmaState = dmaReadWaiting; 1075 else 1076 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData); 1077 1078 return true; 1079} 1080 1081void 1082NSGigE::rxDmaReadDone() 1083{ 1084 assert(rxDmaState == dmaReading); 1085 rxDmaState = dmaIdle; 1086 1087 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1088 rxDmaAddr, rxDmaLen); 1089 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1090 1091 // If the transmit state machine has a pending DMA, let it go first 1092 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1093 txKick(); 1094 1095 rxKick(); 1096} 1097 1098bool 1099NSGigE::doRxDmaWrite() 1100{ 1101 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1102 rxDmaState = dmaWriting; 1103 1104 if (dmaPending() || getDrainState() != Running) 1105 rxDmaState = dmaWriteWaiting; 1106 else 1107 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData); 1108 return true; 1109} 1110 1111void 1112NSGigE::rxDmaWriteDone() 1113{ 1114 assert(rxDmaState == dmaWriting); 1115 rxDmaState = dmaIdle; 1116 1117 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1118 rxDmaAddr, rxDmaLen); 1119 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1120 1121 // If the transmit state machine has a pending DMA, let it go first 1122 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1123 txKick(); 1124 1125 rxKick(); 1126} 1127 1128void 1129NSGigE::rxKick() 1130{ 1131 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1132 1133 DPRINTF(EthernetSM, 1134 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n", 1135 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32); 1136 1137 Addr link, bufptr; 1138 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts; 1139 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts; 1140 1141 next: 1142 if (rxKickTick > curTick()) { 1143 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1144 rxKickTick); 1145 1146 goto exit; 1147 } 1148 1149 // Go to the next state machine clock tick. 1150 rxKickTick = clockEdge(Cycles(1)); 1151 1152 switch(rxDmaState) { 1153 case dmaReadWaiting: 1154 if (doRxDmaRead()) 1155 goto exit; 1156 break; 1157 case dmaWriteWaiting: 1158 if (doRxDmaWrite()) 1159 goto exit; 1160 break; 1161 default: 1162 break; 1163 } 1164 1165 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link; 1166 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr; 1167 1168 // see state machine from spec for details 1169 // the way this works is, if you finish work on one state and can 1170 // go directly to another, you do that through jumping to the 1171 // label "next". however, if you have intermediate work, like DMA 1172 // so that you can't go to the next state yet, you go to exit and 1173 // exit the loop. however, when the DMA is done it will trigger 1174 // an event and come back to this loop. 1175 switch (rxState) { 1176 case rxIdle: 1177 if (!rxEnable) { 1178 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1179 goto exit; 1180 } 1181 1182 if (CRDD) { 1183 rxState = rxDescRefr; 1184 1185 rxDmaAddr = regs.rxdp & 0x3fffffff; 1186 rxDmaData = 1187 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link; 1188 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link); 1189 rxDmaFree = dmaDescFree; 1190 1191 descDmaReads++; 1192 descDmaRdBytes += rxDmaLen; 1193 1194 if (doRxDmaRead()) 1195 goto exit; 1196 } else { 1197 rxState = rxDescRead; 1198 1199 rxDmaAddr = regs.rxdp & 0x3fffffff; 1200 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1201 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1202 rxDmaFree = dmaDescFree; 1203 1204 descDmaReads++; 1205 descDmaRdBytes += rxDmaLen; 1206 1207 if (doRxDmaRead()) 1208 goto exit; 1209 } 1210 break; 1211 1212 case rxDescRefr: 1213 if (rxDmaState != dmaIdle) 1214 goto exit; 1215 1216 rxState = rxAdvance; 1217 break; 1218 1219 case rxDescRead: 1220 if (rxDmaState != dmaIdle) 1221 goto exit; 1222 1223 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n", 1224 regs.rxdp & 0x3fffffff); 1225 DPRINTF(EthernetDesc, 1226 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1227 link, bufptr, cmdsts, extsts); 1228 1229 if (cmdsts & CMDSTS_OWN) { 1230 devIntrPost(ISR_RXIDLE); 1231 rxState = rxIdle; 1232 goto exit; 1233 } else { 1234 rxState = rxFifoBlock; 1235 rxFragPtr = bufptr; 1236 rxDescCnt = cmdsts & CMDSTS_LEN_MASK; 1237 } 1238 break; 1239 1240 case rxFifoBlock: 1241 if (!rxPacket) { 1242 /** 1243 * @todo in reality, we should be able to start processing 1244 * the packet as it arrives, and not have to wait for the 1245 * full packet ot be in the receive fifo. 1246 */ 1247 if (rxFifo.empty()) 1248 goto exit; 1249 1250 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1251 1252 // If we don't have a packet, grab a new one from the fifo. 1253 rxPacket = rxFifo.front(); 1254 rxPktBytes = rxPacket->length; 1255 rxPacketBufPtr = rxPacket->data; 1256 1257#if TRACING_ON 1258 if (DTRACE(Ethernet)) { 1259 IpPtr ip(rxPacket); 1260 if (ip) { 1261 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1262 TcpPtr tcp(ip); 1263 if (tcp) { 1264 DPRINTF(Ethernet, 1265 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1266 tcp->sport(), tcp->dport(), tcp->seq(), 1267 tcp->ack()); 1268 } 1269 } 1270 } 1271#endif 1272 1273 // sanity check - i think the driver behaves like this 1274 assert(rxDescCnt >= rxPktBytes); 1275 rxFifo.pop(); 1276 } 1277 1278 1279 // dont' need the && rxDescCnt > 0 if driver sanity check 1280 // above holds 1281 if (rxPktBytes > 0) { 1282 rxState = rxFragWrite; 1283 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1284 // check holds 1285 rxXferLen = rxPktBytes; 1286 1287 rxDmaAddr = rxFragPtr & 0x3fffffff; 1288 rxDmaData = rxPacketBufPtr; 1289 rxDmaLen = rxXferLen; 1290 rxDmaFree = dmaDataFree; 1291 1292 if (doRxDmaWrite()) 1293 goto exit; 1294 1295 } else { 1296 rxState = rxDescWrite; 1297 1298 //if (rxPktBytes == 0) { /* packet is done */ 1299 assert(rxPktBytes == 0); 1300 DPRINTF(EthernetSM, "done with receiving packet\n"); 1301 1302 cmdsts |= CMDSTS_OWN; 1303 cmdsts &= ~CMDSTS_MORE; 1304 cmdsts |= CMDSTS_OK; 1305 cmdsts &= 0xffff0000; 1306 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1307 1308#if 0 1309 /* 1310 * all the driver uses these are for its own stats keeping 1311 * which we don't care about, aren't necessary for 1312 * functionality and doing this would just slow us down. 1313 * if they end up using this in a later version for 1314 * functional purposes, just undef 1315 */ 1316 if (rxFilterEnable) { 1317 cmdsts &= ~CMDSTS_DEST_MASK; 1318 const EthAddr &dst = rxFifoFront()->dst(); 1319 if (dst->unicast()) 1320 cmdsts |= CMDSTS_DEST_SELF; 1321 if (dst->multicast()) 1322 cmdsts |= CMDSTS_DEST_MULTI; 1323 if (dst->broadcast()) 1324 cmdsts |= CMDSTS_DEST_MASK; 1325 } 1326#endif 1327 1328 IpPtr ip(rxPacket); 1329 if (extstsEnable && ip) { 1330 extsts |= EXTSTS_IPPKT; 1331 rxIpChecksums++; 1332 if (cksum(ip) != 0) { 1333 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1334 extsts |= EXTSTS_IPERR; 1335 } 1336 TcpPtr tcp(ip); 1337 UdpPtr udp(ip); 1338 if (tcp) { 1339 extsts |= EXTSTS_TCPPKT; 1340 rxTcpChecksums++; 1341 if (cksum(tcp) != 0) { 1342 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1343 extsts |= EXTSTS_TCPERR; 1344 1345 } 1346 } else if (udp) { 1347 extsts |= EXTSTS_UDPPKT; 1348 rxUdpChecksums++; 1349 if (cksum(udp) != 0) { 1350 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1351 extsts |= EXTSTS_UDPERR; 1352 } 1353 } 1354 } 1355 rxPacket = 0; 1356 1357 /* 1358 * the driver seems to always receive into desc buffers 1359 * of size 1514, so you never have a pkt that is split 1360 * into multiple descriptors on the receive side, so 1361 * i don't implement that case, hence the assert above. 1362 */ 1363 1364 DPRINTF(EthernetDesc, 1365 "rxDesc: addr=%08x writeback cmdsts extsts\n", 1366 regs.rxdp & 0x3fffffff); 1367 DPRINTF(EthernetDesc, 1368 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1369 link, bufptr, cmdsts, extsts); 1370 1371 rxDmaAddr = regs.rxdp & 0x3fffffff; 1372 rxDmaData = &cmdsts; 1373 if (is64bit) { 1374 rxDmaAddr += offsetof(ns_desc64, cmdsts); 1375 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts); 1376 } else { 1377 rxDmaAddr += offsetof(ns_desc32, cmdsts); 1378 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts); 1379 } 1380 rxDmaFree = dmaDescFree; 1381 1382 descDmaWrites++; 1383 descDmaWrBytes += rxDmaLen; 1384 1385 if (doRxDmaWrite()) 1386 goto exit; 1387 } 1388 break; 1389 1390 case rxFragWrite: 1391 if (rxDmaState != dmaIdle) 1392 goto exit; 1393 1394 rxPacketBufPtr += rxXferLen; 1395 rxFragPtr += rxXferLen; 1396 rxPktBytes -= rxXferLen; 1397 1398 rxState = rxFifoBlock; 1399 break; 1400 1401 case rxDescWrite: 1402 if (rxDmaState != dmaIdle) 1403 goto exit; 1404 1405 assert(cmdsts & CMDSTS_OWN); 1406 1407 assert(rxPacket == 0); 1408 devIntrPost(ISR_RXOK); 1409 1410 if (cmdsts & CMDSTS_INTR) 1411 devIntrPost(ISR_RXDESC); 1412 1413 if (!rxEnable) { 1414 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1415 rxState = rxIdle; 1416 goto exit; 1417 } else 1418 rxState = rxAdvance; 1419 break; 1420 1421 case rxAdvance: 1422 if (link == 0) { 1423 devIntrPost(ISR_RXIDLE); 1424 rxState = rxIdle; 1425 CRDD = true; 1426 goto exit; 1427 } else { 1428 if (rxDmaState != dmaIdle) 1429 goto exit; 1430 rxState = rxDescRead; 1431 regs.rxdp = link; 1432 CRDD = false; 1433 1434 rxDmaAddr = regs.rxdp & 0x3fffffff; 1435 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1436 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1437 rxDmaFree = dmaDescFree; 1438 1439 if (doRxDmaRead()) 1440 goto exit; 1441 } 1442 break; 1443 1444 default: 1445 panic("Invalid rxState!"); 1446 } 1447 1448 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1449 NsRxStateStrings[rxState]); 1450 goto next; 1451 1452 exit: 1453 /** 1454 * @todo do we want to schedule a future kick? 1455 */ 1456 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1457 NsRxStateStrings[rxState]); 1458 1459 if (!rxKickEvent.scheduled()) 1460 schedule(rxKickEvent, rxKickTick); 1461} 1462 1463void 1464NSGigE::transmit() 1465{ 1466 if (txFifo.empty()) { 1467 DPRINTF(Ethernet, "nothing to transmit\n"); 1468 return; 1469 } 1470 1471 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1472 txFifo.size()); 1473 if (interface->sendPacket(txFifo.front())) { 1474#if TRACING_ON 1475 if (DTRACE(Ethernet)) { 1476 IpPtr ip(txFifo.front()); 1477 if (ip) { 1478 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1479 TcpPtr tcp(ip); 1480 if (tcp) { 1481 DPRINTF(Ethernet, 1482 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1483 tcp->sport(), tcp->dport(), tcp->seq(), 1484 tcp->ack()); 1485 } 1486 } 1487 } 1488#endif 1489 1490 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1491 txBytes += txFifo.front()->length; 1492 txPackets++; 1493 1494 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1495 txFifo.avail()); 1496 txFifo.pop(); 1497 1498 /* 1499 * normally do a writeback of the descriptor here, and ONLY 1500 * after that is done, send this interrupt. but since our 1501 * stuff never actually fails, just do this interrupt here, 1502 * otherwise the code has to stray from this nice format. 1503 * besides, it's functionally the same. 1504 */ 1505 devIntrPost(ISR_TXOK); 1506 } 1507 1508 if (!txFifo.empty() && !txEvent.scheduled()) { 1509 DPRINTF(Ethernet, "reschedule transmit\n"); 1510 schedule(txEvent, curTick() + retryTime); 1511 } 1512} 1513 1514bool 1515NSGigE::doTxDmaRead() 1516{ 1517 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1518 txDmaState = dmaReading; 1519 1520 if (dmaPending() || getDrainState() != Running) 1521 txDmaState = dmaReadWaiting; 1522 else 1523 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData); 1524 1525 return true; 1526} 1527 1528void 1529NSGigE::txDmaReadDone() 1530{ 1531 assert(txDmaState == dmaReading); 1532 txDmaState = dmaIdle; 1533 1534 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1535 txDmaAddr, txDmaLen); 1536 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1537 1538 // If the receive state machine has a pending DMA, let it go first 1539 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1540 rxKick(); 1541 1542 txKick(); 1543} 1544 1545bool 1546NSGigE::doTxDmaWrite() 1547{ 1548 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1549 txDmaState = dmaWriting; 1550 1551 if (dmaPending() || getDrainState() != Running) 1552 txDmaState = dmaWriteWaiting; 1553 else 1554 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData); 1555 return true; 1556} 1557 1558void 1559NSGigE::txDmaWriteDone() 1560{ 1561 assert(txDmaState == dmaWriting); 1562 txDmaState = dmaIdle; 1563 1564 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1565 txDmaAddr, txDmaLen); 1566 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1567 1568 // If the receive state machine has a pending DMA, let it go first 1569 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1570 rxKick(); 1571 1572 txKick(); 1573} 1574 1575void 1576NSGigE::txKick() 1577{ 1578 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1579 1580 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n", 1581 NsTxStateStrings[txState], is64bit ? 64 : 32); 1582 1583 Addr link, bufptr; 1584 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts; 1585 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts; 1586 1587 next: 1588 if (txKickTick > curTick()) { 1589 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1590 txKickTick); 1591 goto exit; 1592 } 1593 1594 // Go to the next state machine clock tick. 1595 txKickTick = clockEdge(Cycles(1)); 1596 1597 switch(txDmaState) { 1598 case dmaReadWaiting: 1599 if (doTxDmaRead()) 1600 goto exit; 1601 break; 1602 case dmaWriteWaiting: 1603 if (doTxDmaWrite()) 1604 goto exit; 1605 break; 1606 default: 1607 break; 1608 } 1609 1610 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link; 1611 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr; 1612 switch (txState) { 1613 case txIdle: 1614 if (!txEnable) { 1615 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1616 goto exit; 1617 } 1618 1619 if (CTDD) { 1620 txState = txDescRefr; 1621 1622 txDmaAddr = regs.txdp & 0x3fffffff; 1623 txDmaData = 1624 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link; 1625 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link); 1626 txDmaFree = dmaDescFree; 1627 1628 descDmaReads++; 1629 descDmaRdBytes += txDmaLen; 1630 1631 if (doTxDmaRead()) 1632 goto exit; 1633 1634 } else { 1635 txState = txDescRead; 1636 1637 txDmaAddr = regs.txdp & 0x3fffffff; 1638 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1639 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1640 txDmaFree = dmaDescFree; 1641 1642 descDmaReads++; 1643 descDmaRdBytes += txDmaLen; 1644 1645 if (doTxDmaRead()) 1646 goto exit; 1647 } 1648 break; 1649 1650 case txDescRefr: 1651 if (txDmaState != dmaIdle) 1652 goto exit; 1653 1654 txState = txAdvance; 1655 break; 1656 1657 case txDescRead: 1658 if (txDmaState != dmaIdle) 1659 goto exit; 1660 1661 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n", 1662 regs.txdp & 0x3fffffff); 1663 DPRINTF(EthernetDesc, 1664 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n", 1665 link, bufptr, cmdsts, extsts); 1666 1667 if (cmdsts & CMDSTS_OWN) { 1668 txState = txFifoBlock; 1669 txFragPtr = bufptr; 1670 txDescCnt = cmdsts & CMDSTS_LEN_MASK; 1671 } else { 1672 devIntrPost(ISR_TXIDLE); 1673 txState = txIdle; 1674 goto exit; 1675 } 1676 break; 1677 1678 case txFifoBlock: 1679 if (!txPacket) { 1680 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 1681 txPacket = make_shared<EthPacketData>(16384); 1682 txPacketBufPtr = txPacket->data; 1683 } 1684 1685 if (txDescCnt == 0) { 1686 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 1687 if (cmdsts & CMDSTS_MORE) { 1688 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 1689 txState = txDescWrite; 1690 1691 cmdsts &= ~CMDSTS_OWN; 1692 1693 txDmaAddr = regs.txdp & 0x3fffffff; 1694 txDmaData = &cmdsts; 1695 if (is64bit) { 1696 txDmaAddr += offsetof(ns_desc64, cmdsts); 1697 txDmaLen = sizeof(txDesc64.cmdsts); 1698 } else { 1699 txDmaAddr += offsetof(ns_desc32, cmdsts); 1700 txDmaLen = sizeof(txDesc32.cmdsts); 1701 } 1702 txDmaFree = dmaDescFree; 1703 1704 if (doTxDmaWrite()) 1705 goto exit; 1706 1707 } else { /* this packet is totally done */ 1708 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 1709 /* deal with the the packet that just finished */ 1710 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 1711 IpPtr ip(txPacket); 1712 if (extsts & EXTSTS_UDPPKT) { 1713 UdpPtr udp(ip); 1714 if (udp) { 1715 udp->sum(0); 1716 udp->sum(cksum(udp)); 1717 txUdpChecksums++; 1718 } else { 1719 Debug::breakpoint(); 1720 warn_once("UDPPKT set, but not UDP!\n"); 1721 } 1722 } else if (extsts & EXTSTS_TCPPKT) { 1723 TcpPtr tcp(ip); 1724 if (tcp) { 1725 tcp->sum(0); 1726 tcp->sum(cksum(tcp)); 1727 txTcpChecksums++; 1728 } else { 1729 Debug::breakpoint(); 1730 warn_once("TCPPKT set, but not UDP!\n"); 1731 } 1732 } 1733 if (extsts & EXTSTS_IPPKT) { 1734 if (ip) { 1735 ip->sum(0); 1736 ip->sum(cksum(ip)); 1737 txIpChecksums++; 1738 } else { 1739 Debug::breakpoint(); 1740 warn_once("IPPKT set, but not UDP!\n"); 1741 } 1742 } 1743 } 1744 1745 txPacket->length = txPacketBufPtr - txPacket->data; 1746 // this is just because the receive can't handle a 1747 // packet bigger want to make sure 1748 if (txPacket->length > 1514) 1749 panic("transmit packet too large, %s > 1514\n", 1750 txPacket->length); 1751 1752#ifndef NDEBUG 1753 bool success = 1754#endif 1755 txFifo.push(txPacket); 1756 assert(success); 1757 1758 /* 1759 * this following section is not tqo spec, but 1760 * functionally shouldn't be any different. normally, 1761 * the chip will wait til the transmit has occurred 1762 * before writing back the descriptor because it has 1763 * to wait to see that it was successfully transmitted 1764 * to decide whether to set CMDSTS_OK or not. 1765 * however, in the simulator since it is always 1766 * successfully transmitted, and writing it exactly to 1767 * spec would complicate the code, we just do it here 1768 */ 1769 1770 cmdsts &= ~CMDSTS_OWN; 1771 cmdsts |= CMDSTS_OK; 1772 1773 DPRINTF(EthernetDesc, 1774 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 1775 cmdsts, extsts); 1776 1777 txDmaFree = dmaDescFree; 1778 txDmaAddr = regs.txdp & 0x3fffffff; 1779 txDmaData = &cmdsts; 1780 if (is64bit) { 1781 txDmaAddr += offsetof(ns_desc64, cmdsts); 1782 txDmaLen = 1783 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts); 1784 } else { 1785 txDmaAddr += offsetof(ns_desc32, cmdsts); 1786 txDmaLen = 1787 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts); 1788 } 1789 1790 descDmaWrites++; 1791 descDmaWrBytes += txDmaLen; 1792 1793 transmit(); 1794 txPacket = 0; 1795 1796 if (!txEnable) { 1797 DPRINTF(EthernetSM, "halting TX state machine\n"); 1798 txState = txIdle; 1799 goto exit; 1800 } else 1801 txState = txAdvance; 1802 1803 if (doTxDmaWrite()) 1804 goto exit; 1805 } 1806 } else { 1807 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 1808 if (!txFifo.full()) { 1809 txState = txFragRead; 1810 1811 /* 1812 * The number of bytes transferred is either whatever 1813 * is left in the descriptor (txDescCnt), or if there 1814 * is not enough room in the fifo, just whatever room 1815 * is left in the fifo 1816 */ 1817 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 1818 1819 txDmaAddr = txFragPtr & 0x3fffffff; 1820 txDmaData = txPacketBufPtr; 1821 txDmaLen = txXferLen; 1822 txDmaFree = dmaDataFree; 1823 1824 if (doTxDmaRead()) 1825 goto exit; 1826 } else { 1827 txState = txFifoBlock; 1828 transmit(); 1829 1830 goto exit; 1831 } 1832 1833 } 1834 break; 1835 1836 case txFragRead: 1837 if (txDmaState != dmaIdle) 1838 goto exit; 1839 1840 txPacketBufPtr += txXferLen; 1841 txFragPtr += txXferLen; 1842 txDescCnt -= txXferLen; 1843 txFifo.reserve(txXferLen); 1844 1845 txState = txFifoBlock; 1846 break; 1847 1848 case txDescWrite: 1849 if (txDmaState != dmaIdle) 1850 goto exit; 1851 1852 if (cmdsts & CMDSTS_INTR) 1853 devIntrPost(ISR_TXDESC); 1854 1855 if (!txEnable) { 1856 DPRINTF(EthernetSM, "halting TX state machine\n"); 1857 txState = txIdle; 1858 goto exit; 1859 } else 1860 txState = txAdvance; 1861 break; 1862 1863 case txAdvance: 1864 if (link == 0) { 1865 devIntrPost(ISR_TXIDLE); 1866 txState = txIdle; 1867 goto exit; 1868 } else { 1869 if (txDmaState != dmaIdle) 1870 goto exit; 1871 txState = txDescRead; 1872 regs.txdp = link; 1873 CTDD = false; 1874 1875 txDmaAddr = link & 0x3fffffff; 1876 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1877 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1878 txDmaFree = dmaDescFree; 1879 1880 if (doTxDmaRead()) 1881 goto exit; 1882 } 1883 break; 1884 1885 default: 1886 panic("invalid state"); 1887 } 1888 1889 DPRINTF(EthernetSM, "entering next txState=%s\n", 1890 NsTxStateStrings[txState]); 1891 goto next; 1892 1893 exit: 1894 /** 1895 * @todo do we want to schedule a future kick? 1896 */ 1897 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 1898 NsTxStateStrings[txState]); 1899 1900 if (!txKickEvent.scheduled()) 1901 schedule(txKickEvent, txKickTick); 1902} 1903 1904/** 1905 * Advance the EEPROM state machine 1906 * Called on rising edge of EEPROM clock bit in MEAR 1907 */ 1908void 1909NSGigE::eepromKick() 1910{ 1911 switch (eepromState) { 1912 1913 case eepromStart: 1914 1915 // Wait for start bit 1916 if (regs.mear & MEAR_EEDI) { 1917 // Set up to get 2 opcode bits 1918 eepromState = eepromGetOpcode; 1919 eepromBitsToRx = 2; 1920 eepromOpcode = 0; 1921 } 1922 break; 1923 1924 case eepromGetOpcode: 1925 eepromOpcode <<= 1; 1926 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0; 1927 --eepromBitsToRx; 1928 1929 // Done getting opcode 1930 if (eepromBitsToRx == 0) { 1931 if (eepromOpcode != EEPROM_READ) 1932 panic("only EEPROM reads are implemented!"); 1933 1934 // Set up to get address 1935 eepromState = eepromGetAddress; 1936 eepromBitsToRx = 6; 1937 eepromAddress = 0; 1938 } 1939 break; 1940 1941 case eepromGetAddress: 1942 eepromAddress <<= 1; 1943 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0; 1944 --eepromBitsToRx; 1945 1946 // Done getting address 1947 if (eepromBitsToRx == 0) { 1948 1949 if (eepromAddress >= EEPROM_SIZE) 1950 panic("EEPROM read access out of range!"); 1951 1952 switch (eepromAddress) { 1953 1954 case EEPROM_PMATCH2_ADDR: 1955 eepromData = rom.perfectMatch[5]; 1956 eepromData <<= 8; 1957 eepromData += rom.perfectMatch[4]; 1958 break; 1959 1960 case EEPROM_PMATCH1_ADDR: 1961 eepromData = rom.perfectMatch[3]; 1962 eepromData <<= 8; 1963 eepromData += rom.perfectMatch[2]; 1964 break; 1965 1966 case EEPROM_PMATCH0_ADDR: 1967 eepromData = rom.perfectMatch[1]; 1968 eepromData <<= 8; 1969 eepromData += rom.perfectMatch[0]; 1970 break; 1971 1972 default: 1973 panic("FreeBSD driver only uses EEPROM to read PMATCH!"); 1974 } 1975 // Set up to read data 1976 eepromState = eepromRead; 1977 eepromBitsToRx = 16; 1978 1979 // Clear data in bit 1980 regs.mear &= ~MEAR_EEDI; 1981 } 1982 break; 1983 1984 case eepromRead: 1985 // Clear Data Out bit 1986 regs.mear &= ~MEAR_EEDO; 1987 // Set bit to value of current EEPROM bit 1988 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0; 1989 1990 eepromData <<= 1; 1991 --eepromBitsToRx; 1992 1993 // All done 1994 if (eepromBitsToRx == 0) { 1995 eepromState = eepromStart; 1996 } 1997 break; 1998 1999 default: 2000 panic("invalid EEPROM state"); 2001 } 2002 2003} 2004 2005void 2006NSGigE::transferDone() 2007{ 2008 if (txFifo.empty()) { 2009 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2010 return; 2011 } 2012 2013 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2014 2015 reschedule(txEvent, clockEdge(Cycles(1)), true); 2016} 2017 2018bool 2019NSGigE::rxFilter(const EthPacketPtr &packet) 2020{ 2021 EthPtr eth = packet; 2022 bool drop = true; 2023 string type; 2024 2025 const EthAddr &dst = eth->dst(); 2026 if (dst.unicast()) { 2027 // If we're accepting all unicast addresses 2028 if (acceptUnicast) 2029 drop = false; 2030 2031 // If we make a perfect match 2032 if (acceptPerfect && dst == rom.perfectMatch) 2033 drop = false; 2034 2035 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2036 drop = false; 2037 2038 } else if (dst.broadcast()) { 2039 // if we're accepting broadcasts 2040 if (acceptBroadcast) 2041 drop = false; 2042 2043 } else if (dst.multicast()) { 2044 // if we're accepting all multicasts 2045 if (acceptMulticast) 2046 drop = false; 2047 2048 // Multicast hashing faked - all packets accepted 2049 if (multicastHashEnable) 2050 drop = false; 2051 } 2052 2053 if (drop) { 2054 DPRINTF(Ethernet, "rxFilter drop\n"); 2055 DDUMP(EthernetData, packet->data, packet->length); 2056 } 2057 2058 return drop; 2059} 2060 2061bool 2062NSGigE::recvPacket(EthPacketPtr packet) 2063{ 2064 rxBytes += packet->length; 2065 rxPackets++; 2066 2067 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2068 rxFifo.avail()); 2069 2070 if (!rxEnable) { 2071 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2072 return true; 2073 } 2074 2075 if (!rxFilterEnable) { 2076 DPRINTF(Ethernet, 2077 "receive packet filtering disabled . . . packet dropped\n"); 2078 return true; 2079 } 2080 2081 if (rxFilter(packet)) { 2082 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2083 return true; 2084 } 2085 2086 if (rxFifo.avail() < packet->length) { 2087#if TRACING_ON 2088 IpPtr ip(packet); 2089 TcpPtr tcp(ip); 2090 if (ip) { 2091 DPRINTF(Ethernet, 2092 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2093 ip->id()); 2094 if (tcp) { 2095 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2096 } 2097 } 2098#endif 2099 droppedPackets++; 2100 devIntrPost(ISR_RXORN); 2101 return false; 2102 } 2103 2104 rxFifo.push(packet); 2105 2106 rxKick(); 2107 return true; 2108} 2109 2110 2111void 2112NSGigE::drainResume() 2113{ 2114 Drainable::drainResume(); 2115 2116 // During drain we could have left the state machines in a waiting state and 2117 // they wouldn't get out until some other event occured to kick them. 2118 // This way they'll get out immediately 2119 txKick(); 2120 rxKick(); 2121} 2122 2123 2124//===================================================================== 2125// 2126// 2127void 2128NSGigE::serialize(ostream &os) 2129{ 2130 // Serialize the PciDevice base class 2131 PciDevice::serialize(os); 2132 2133 /* 2134 * Finalize any DMA events now. 2135 */ 2136 // @todo will mem system save pending dma? 2137 2138 /* 2139 * Serialize the device registers 2140 */ 2141 SERIALIZE_SCALAR(regs.command); 2142 SERIALIZE_SCALAR(regs.config); 2143 SERIALIZE_SCALAR(regs.mear); 2144 SERIALIZE_SCALAR(regs.ptscr); 2145 SERIALIZE_SCALAR(regs.isr); 2146 SERIALIZE_SCALAR(regs.imr); 2147 SERIALIZE_SCALAR(regs.ier); 2148 SERIALIZE_SCALAR(regs.ihr); 2149 SERIALIZE_SCALAR(regs.txdp); 2150 SERIALIZE_SCALAR(regs.txdp_hi); 2151 SERIALIZE_SCALAR(regs.txcfg); 2152 SERIALIZE_SCALAR(regs.gpior); 2153 SERIALIZE_SCALAR(regs.rxdp); 2154 SERIALIZE_SCALAR(regs.rxdp_hi); 2155 SERIALIZE_SCALAR(regs.rxcfg); 2156 SERIALIZE_SCALAR(regs.pqcr); 2157 SERIALIZE_SCALAR(regs.wcsr); 2158 SERIALIZE_SCALAR(regs.pcr); 2159 SERIALIZE_SCALAR(regs.rfcr); 2160 SERIALIZE_SCALAR(regs.rfdr); 2161 SERIALIZE_SCALAR(regs.brar); 2162 SERIALIZE_SCALAR(regs.brdr); 2163 SERIALIZE_SCALAR(regs.srr); 2164 SERIALIZE_SCALAR(regs.mibc); 2165 SERIALIZE_SCALAR(regs.vrcr); 2166 SERIALIZE_SCALAR(regs.vtcr); 2167 SERIALIZE_SCALAR(regs.vdr); 2168 SERIALIZE_SCALAR(regs.ccsr); 2169 SERIALIZE_SCALAR(regs.tbicr); 2170 SERIALIZE_SCALAR(regs.tbisr); 2171 SERIALIZE_SCALAR(regs.tanar); 2172 SERIALIZE_SCALAR(regs.tanlpar); 2173 SERIALIZE_SCALAR(regs.taner); 2174 SERIALIZE_SCALAR(regs.tesr); 2175 2176 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2177 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2178 2179 SERIALIZE_SCALAR(ioEnable); 2180 2181 /* 2182 * Serialize the data Fifos 2183 */ 2184 rxFifo.serialize("rxFifo", os); 2185 txFifo.serialize("txFifo", os); 2186 2187 /* 2188 * Serialize the various helper variables 2189 */ 2190 bool txPacketExists = txPacket != nullptr; 2191 SERIALIZE_SCALAR(txPacketExists); 2192 if (txPacketExists) { 2193 txPacket->length = txPacketBufPtr - txPacket->data; 2194 txPacket->serialize("txPacket", os); 2195 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2196 SERIALIZE_SCALAR(txPktBufPtr); 2197 } 2198 2199 bool rxPacketExists = rxPacket != nullptr; 2200 SERIALIZE_SCALAR(rxPacketExists); 2201 if (rxPacketExists) { 2202 rxPacket->serialize("rxPacket", os); 2203 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2204 SERIALIZE_SCALAR(rxPktBufPtr); 2205 } 2206 2207 SERIALIZE_SCALAR(txXferLen); 2208 SERIALIZE_SCALAR(rxXferLen); 2209 2210 /* 2211 * Serialize Cached Descriptors 2212 */ 2213 SERIALIZE_SCALAR(rxDesc64.link); 2214 SERIALIZE_SCALAR(rxDesc64.bufptr); 2215 SERIALIZE_SCALAR(rxDesc64.cmdsts); 2216 SERIALIZE_SCALAR(rxDesc64.extsts); 2217 SERIALIZE_SCALAR(txDesc64.link); 2218 SERIALIZE_SCALAR(txDesc64.bufptr); 2219 SERIALIZE_SCALAR(txDesc64.cmdsts); 2220 SERIALIZE_SCALAR(txDesc64.extsts); 2221 SERIALIZE_SCALAR(rxDesc32.link); 2222 SERIALIZE_SCALAR(rxDesc32.bufptr); 2223 SERIALIZE_SCALAR(rxDesc32.cmdsts); 2224 SERIALIZE_SCALAR(rxDesc32.extsts); 2225 SERIALIZE_SCALAR(txDesc32.link); 2226 SERIALIZE_SCALAR(txDesc32.bufptr); 2227 SERIALIZE_SCALAR(txDesc32.cmdsts); 2228 SERIALIZE_SCALAR(txDesc32.extsts); 2229 SERIALIZE_SCALAR(extstsEnable); 2230 2231 /* 2232 * Serialize tx state machine 2233 */ 2234 int txState = this->txState; 2235 SERIALIZE_SCALAR(txState); 2236 SERIALIZE_SCALAR(txEnable); 2237 SERIALIZE_SCALAR(CTDD); 2238 SERIALIZE_SCALAR(txFragPtr); 2239 SERIALIZE_SCALAR(txDescCnt); 2240 int txDmaState = this->txDmaState; 2241 SERIALIZE_SCALAR(txDmaState); 2242 SERIALIZE_SCALAR(txKickTick); 2243 2244 /* 2245 * Serialize rx state machine 2246 */ 2247 int rxState = this->rxState; 2248 SERIALIZE_SCALAR(rxState); 2249 SERIALIZE_SCALAR(rxEnable); 2250 SERIALIZE_SCALAR(CRDD); 2251 SERIALIZE_SCALAR(rxPktBytes); 2252 SERIALIZE_SCALAR(rxFragPtr); 2253 SERIALIZE_SCALAR(rxDescCnt); 2254 int rxDmaState = this->rxDmaState; 2255 SERIALIZE_SCALAR(rxDmaState); 2256 SERIALIZE_SCALAR(rxKickTick); 2257 2258 /* 2259 * Serialize EEPROM state machine 2260 */ 2261 int eepromState = this->eepromState; 2262 SERIALIZE_SCALAR(eepromState); 2263 SERIALIZE_SCALAR(eepromClk); 2264 SERIALIZE_SCALAR(eepromBitsToRx); 2265 SERIALIZE_SCALAR(eepromOpcode); 2266 SERIALIZE_SCALAR(eepromAddress); 2267 SERIALIZE_SCALAR(eepromData); 2268 2269 /* 2270 * If there's a pending transmit, store the time so we can 2271 * reschedule it later 2272 */ 2273 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0; 2274 SERIALIZE_SCALAR(transmitTick); 2275 2276 /* 2277 * receive address filter settings 2278 */ 2279 SERIALIZE_SCALAR(rxFilterEnable); 2280 SERIALIZE_SCALAR(acceptBroadcast); 2281 SERIALIZE_SCALAR(acceptMulticast); 2282 SERIALIZE_SCALAR(acceptUnicast); 2283 SERIALIZE_SCALAR(acceptPerfect); 2284 SERIALIZE_SCALAR(acceptArp); 2285 SERIALIZE_SCALAR(multicastHashEnable); 2286 2287 /* 2288 * Keep track of pending interrupt status. 2289 */ 2290 SERIALIZE_SCALAR(intrTick); 2291 SERIALIZE_SCALAR(cpuPendingIntr); 2292 Tick intrEventTick = 0; 2293 if (intrEvent) 2294 intrEventTick = intrEvent->when(); 2295 SERIALIZE_SCALAR(intrEventTick); 2296 2297} 2298 2299void 2300NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2301{ 2302 // Unserialize the PciDevice base class 2303 PciDevice::unserialize(cp, section); 2304 2305 UNSERIALIZE_SCALAR(regs.command); 2306 UNSERIALIZE_SCALAR(regs.config); 2307 UNSERIALIZE_SCALAR(regs.mear); 2308 UNSERIALIZE_SCALAR(regs.ptscr); 2309 UNSERIALIZE_SCALAR(regs.isr); 2310 UNSERIALIZE_SCALAR(regs.imr); 2311 UNSERIALIZE_SCALAR(regs.ier); 2312 UNSERIALIZE_SCALAR(regs.ihr); 2313 UNSERIALIZE_SCALAR(regs.txdp); 2314 UNSERIALIZE_SCALAR(regs.txdp_hi); 2315 UNSERIALIZE_SCALAR(regs.txcfg); 2316 UNSERIALIZE_SCALAR(regs.gpior); 2317 UNSERIALIZE_SCALAR(regs.rxdp); 2318 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2319 UNSERIALIZE_SCALAR(regs.rxcfg); 2320 UNSERIALIZE_SCALAR(regs.pqcr); 2321 UNSERIALIZE_SCALAR(regs.wcsr); 2322 UNSERIALIZE_SCALAR(regs.pcr); 2323 UNSERIALIZE_SCALAR(regs.rfcr); 2324 UNSERIALIZE_SCALAR(regs.rfdr); 2325 UNSERIALIZE_SCALAR(regs.brar); 2326 UNSERIALIZE_SCALAR(regs.brdr); 2327 UNSERIALIZE_SCALAR(regs.srr); 2328 UNSERIALIZE_SCALAR(regs.mibc); 2329 UNSERIALIZE_SCALAR(regs.vrcr); 2330 UNSERIALIZE_SCALAR(regs.vtcr); 2331 UNSERIALIZE_SCALAR(regs.vdr); 2332 UNSERIALIZE_SCALAR(regs.ccsr); 2333 UNSERIALIZE_SCALAR(regs.tbicr); 2334 UNSERIALIZE_SCALAR(regs.tbisr); 2335 UNSERIALIZE_SCALAR(regs.tanar); 2336 UNSERIALIZE_SCALAR(regs.tanlpar); 2337 UNSERIALIZE_SCALAR(regs.taner); 2338 UNSERIALIZE_SCALAR(regs.tesr); 2339 2340 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2341 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2342 2343 UNSERIALIZE_SCALAR(ioEnable); 2344 2345 /* 2346 * unserialize the data fifos 2347 */ 2348 rxFifo.unserialize("rxFifo", cp, section); 2349 txFifo.unserialize("txFifo", cp, section); 2350 2351 /* 2352 * unserialize the various helper variables 2353 */ 2354 bool txPacketExists; 2355 UNSERIALIZE_SCALAR(txPacketExists); 2356 if (txPacketExists) { 2357 txPacket = make_shared<EthPacketData>(16384); 2358 txPacket->unserialize("txPacket", cp, section); 2359 uint32_t txPktBufPtr; 2360 UNSERIALIZE_SCALAR(txPktBufPtr); 2361 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2362 } else 2363 txPacket = 0; 2364 2365 bool rxPacketExists; 2366 UNSERIALIZE_SCALAR(rxPacketExists); 2367 rxPacket = 0; 2368 if (rxPacketExists) { 2369 rxPacket = make_shared<EthPacketData>(16384); 2370 rxPacket->unserialize("rxPacket", cp, section); 2371 uint32_t rxPktBufPtr; 2372 UNSERIALIZE_SCALAR(rxPktBufPtr); 2373 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2374 } else 2375 rxPacket = 0; 2376 2377 UNSERIALIZE_SCALAR(txXferLen); 2378 UNSERIALIZE_SCALAR(rxXferLen); 2379 2380 /* 2381 * Unserialize Cached Descriptors 2382 */ 2383 UNSERIALIZE_SCALAR(rxDesc64.link); 2384 UNSERIALIZE_SCALAR(rxDesc64.bufptr); 2385 UNSERIALIZE_SCALAR(rxDesc64.cmdsts); 2386 UNSERIALIZE_SCALAR(rxDesc64.extsts); 2387 UNSERIALIZE_SCALAR(txDesc64.link); 2388 UNSERIALIZE_SCALAR(txDesc64.bufptr); 2389 UNSERIALIZE_SCALAR(txDesc64.cmdsts); 2390 UNSERIALIZE_SCALAR(txDesc64.extsts); 2391 UNSERIALIZE_SCALAR(rxDesc32.link); 2392 UNSERIALIZE_SCALAR(rxDesc32.bufptr); 2393 UNSERIALIZE_SCALAR(rxDesc32.cmdsts); 2394 UNSERIALIZE_SCALAR(rxDesc32.extsts); 2395 UNSERIALIZE_SCALAR(txDesc32.link); 2396 UNSERIALIZE_SCALAR(txDesc32.bufptr); 2397 UNSERIALIZE_SCALAR(txDesc32.cmdsts); 2398 UNSERIALIZE_SCALAR(txDesc32.extsts); 2399 UNSERIALIZE_SCALAR(extstsEnable); 2400 2401 /* 2402 * unserialize tx state machine 2403 */ 2404 int txState; 2405 UNSERIALIZE_SCALAR(txState); 2406 this->txState = (TxState) txState; 2407 UNSERIALIZE_SCALAR(txEnable); 2408 UNSERIALIZE_SCALAR(CTDD); 2409 UNSERIALIZE_SCALAR(txFragPtr); 2410 UNSERIALIZE_SCALAR(txDescCnt); 2411 int txDmaState; 2412 UNSERIALIZE_SCALAR(txDmaState); 2413 this->txDmaState = (DmaState) txDmaState; 2414 UNSERIALIZE_SCALAR(txKickTick); 2415 if (txKickTick) 2416 schedule(txKickEvent, txKickTick); 2417 2418 /* 2419 * unserialize rx state machine 2420 */ 2421 int rxState; 2422 UNSERIALIZE_SCALAR(rxState); 2423 this->rxState = (RxState) rxState; 2424 UNSERIALIZE_SCALAR(rxEnable); 2425 UNSERIALIZE_SCALAR(CRDD); 2426 UNSERIALIZE_SCALAR(rxPktBytes); 2427 UNSERIALIZE_SCALAR(rxFragPtr); 2428 UNSERIALIZE_SCALAR(rxDescCnt); 2429 int rxDmaState; 2430 UNSERIALIZE_SCALAR(rxDmaState); 2431 this->rxDmaState = (DmaState) rxDmaState; 2432 UNSERIALIZE_SCALAR(rxKickTick); 2433 if (rxKickTick) 2434 schedule(rxKickEvent, rxKickTick); 2435 2436 /* 2437 * Unserialize EEPROM state machine 2438 */ 2439 int eepromState; 2440 UNSERIALIZE_SCALAR(eepromState); 2441 this->eepromState = (EEPROMState) eepromState; 2442 UNSERIALIZE_SCALAR(eepromClk); 2443 UNSERIALIZE_SCALAR(eepromBitsToRx); 2444 UNSERIALIZE_SCALAR(eepromOpcode); 2445 UNSERIALIZE_SCALAR(eepromAddress); 2446 UNSERIALIZE_SCALAR(eepromData); 2447 2448 /* 2449 * If there's a pending transmit, reschedule it now 2450 */ 2451 Tick transmitTick; 2452 UNSERIALIZE_SCALAR(transmitTick); 2453 if (transmitTick) 2454 schedule(txEvent, curTick() + transmitTick); 2455 2456 /* 2457 * unserialize receive address filter settings 2458 */ 2459 UNSERIALIZE_SCALAR(rxFilterEnable); 2460 UNSERIALIZE_SCALAR(acceptBroadcast); 2461 UNSERIALIZE_SCALAR(acceptMulticast); 2462 UNSERIALIZE_SCALAR(acceptUnicast); 2463 UNSERIALIZE_SCALAR(acceptPerfect); 2464 UNSERIALIZE_SCALAR(acceptArp); 2465 UNSERIALIZE_SCALAR(multicastHashEnable); 2466 2467 /* 2468 * Keep track of pending interrupt status. 2469 */ 2470 UNSERIALIZE_SCALAR(intrTick); 2471 UNSERIALIZE_SCALAR(cpuPendingIntr); 2472 Tick intrEventTick; 2473 UNSERIALIZE_SCALAR(intrEventTick); 2474 if (intrEventTick) { 2475 intrEvent = new IntrEvent(this, true); 2476 schedule(intrEvent, intrEventTick); 2477 } 2478} 2479 2480NSGigE * 2481NSGigEParams::create() 2482{ 2483 return new NSGigE(this); 2484} 2485