ns_gige.cc revision 9086
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 * Lisa Hsu 30 */ 31 32/** @file 33 * Device module for modelling the National Semiconductor 34 * DP83820 ethernet controller. Does not support priority queueing 35 */ 36#include <deque> 37#include <string> 38 39#include "base/debug.hh" 40#include "base/inet.hh" 41#include "base/types.hh" 42#include "config/the_isa.hh" 43#include "cpu/thread_context.hh" 44#include "debug/EthernetAll.hh" 45#include "dev/etherlink.hh" 46#include "dev/ns_gige.hh" 47#include "dev/pciconfigall.hh" 48#include "mem/packet.hh" 49#include "mem/packet_access.hh" 50#include "params/NSGigE.hh" 51#include "sim/system.hh" 52 53// clang complains about std::set being overloaded with Packet::set if 54// we open up the entire namespace std 55using std::min; 56using std::ostream; 57using std::string; 58 59const char *NsRxStateStrings[] = 60{ 61 "rxIdle", 62 "rxDescRefr", 63 "rxDescRead", 64 "rxFifoBlock", 65 "rxFragWrite", 66 "rxDescWrite", 67 "rxAdvance" 68}; 69 70const char *NsTxStateStrings[] = 71{ 72 "txIdle", 73 "txDescRefr", 74 "txDescRead", 75 "txFifoBlock", 76 "txFragRead", 77 "txDescWrite", 78 "txAdvance" 79}; 80 81const char *NsDmaState[] = 82{ 83 "dmaIdle", 84 "dmaReading", 85 "dmaWriting", 86 "dmaReadWaiting", 87 "dmaWriteWaiting" 88}; 89 90using namespace Net; 91using namespace TheISA; 92 93/////////////////////////////////////////////////////////////////////// 94// 95// NSGigE PCI Device 96// 97NSGigE::NSGigE(Params *p) 98 : EtherDevice(p), ioEnable(false), 99 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 100 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 101 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false), 102 clock(p->clock), 103 txState(txIdle), txEnable(false), CTDD(false), txHalt(false), 104 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 105 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false), 106 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 107 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0), 108 eepromOpcode(0), eepromAddress(0), eepromData(0), 109 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay), 110 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor), 111 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0), 112 txDmaData(NULL), txDmaAddr(0), txDmaLen(0), 113 rxDmaReadEvent(this), rxDmaWriteEvent(this), 114 txDmaReadEvent(this), txDmaWriteEvent(this), 115 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 116 txDelay(p->tx_delay), rxDelay(p->rx_delay), 117 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this), 118 txEvent(this), rxFilterEnable(p->rx_filter), 119 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false), 120 acceptPerfect(false), acceptArp(false), multicastHashEnable(false), 121 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false), 122 intrEvent(0), interface(0) 123{ 124 125 126 interface = new NSGigEInt(name() + ".int0", this); 127 128 regsReset(); 129 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN); 130 131 memset(&rxDesc32, 0, sizeof(rxDesc32)); 132 memset(&txDesc32, 0, sizeof(txDesc32)); 133 memset(&rxDesc64, 0, sizeof(rxDesc64)); 134 memset(&txDesc64, 0, sizeof(txDesc64)); 135} 136 137NSGigE::~NSGigE() 138{ 139 delete interface; 140} 141 142/** 143 * This is to write to the PCI general configuration registers 144 */ 145Tick 146NSGigE::writeConfig(PacketPtr pkt) 147{ 148 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 149 if (offset < PCI_DEVICE_SPECIFIC) 150 PciDev::writeConfig(pkt); 151 else 152 panic("Device specific PCI config space not implemented!\n"); 153 154 switch (offset) { 155 // seems to work fine without all these PCI settings, but i 156 // put in the IO to double check, an assertion will fail if we 157 // need to properly implement it 158 case PCI_COMMAND: 159 if (config.data[offset] & PCI_CMD_IOSE) 160 ioEnable = true; 161 else 162 ioEnable = false; 163 break; 164 } 165 166 return configDelay; 167} 168 169EtherInt* 170NSGigE::getEthPort(const std::string &if_name, int idx) 171{ 172 if (if_name == "interface") { 173 if (interface->getPeer()) 174 panic("interface already connected to\n"); 175 return interface; 176 } 177 return NULL; 178} 179 180/** 181 * This reads the device registers, which are detailed in the NS83820 182 * spec sheet 183 */ 184Tick 185NSGigE::read(PacketPtr pkt) 186{ 187 assert(ioEnable); 188 189 pkt->allocate(); 190 191 //The mask is to give you only the offset into the device register file 192 Addr daddr = pkt->getAddr() & 0xfff; 193 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n", 194 daddr, pkt->getAddr(), pkt->getSize()); 195 196 197 // there are some reserved registers, you can see ns_gige_reg.h and 198 // the spec sheet for details 199 if (daddr > LAST && daddr <= RESERVED) { 200 panic("Accessing reserved register"); 201 } else if (daddr > RESERVED && daddr <= 0x3FC) { 202 return readConfig(pkt); 203 } else if (daddr >= MIB_START && daddr <= MIB_END) { 204 // don't implement all the MIB's. hopefully the kernel 205 // doesn't actually DEPEND upon their values 206 // MIB are just hardware stats keepers 207 pkt->set<uint32_t>(0); 208 pkt->makeAtomicResponse(); 209 return pioDelay; 210 } else if (daddr > 0x3FC) 211 panic("Something is messed up!\n"); 212 213 assert(pkt->getSize() == sizeof(uint32_t)); 214 uint32_t ® = *pkt->getPtr<uint32_t>(); 215 uint16_t rfaddr; 216 217 switch (daddr) { 218 case CR: 219 reg = regs.command; 220 //these are supposed to be cleared on a read 221 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 222 break; 223 224 case CFGR: 225 reg = regs.config; 226 break; 227 228 case MEAR: 229 reg = regs.mear; 230 break; 231 232 case PTSCR: 233 reg = regs.ptscr; 234 break; 235 236 case ISR: 237 reg = regs.isr; 238 devIntrClear(ISR_ALL); 239 break; 240 241 case IMR: 242 reg = regs.imr; 243 break; 244 245 case IER: 246 reg = regs.ier; 247 break; 248 249 case IHR: 250 reg = regs.ihr; 251 break; 252 253 case TXDP: 254 reg = regs.txdp; 255 break; 256 257 case TXDP_HI: 258 reg = regs.txdp_hi; 259 break; 260 261 case TX_CFG: 262 reg = regs.txcfg; 263 break; 264 265 case GPIOR: 266 reg = regs.gpior; 267 break; 268 269 case RXDP: 270 reg = regs.rxdp; 271 break; 272 273 case RXDP_HI: 274 reg = regs.rxdp_hi; 275 break; 276 277 case RX_CFG: 278 reg = regs.rxcfg; 279 break; 280 281 case PQCR: 282 reg = regs.pqcr; 283 break; 284 285 case WCSR: 286 reg = regs.wcsr; 287 break; 288 289 case PCR: 290 reg = regs.pcr; 291 break; 292 293 // see the spec sheet for how RFCR and RFDR work 294 // basically, you write to RFCR to tell the machine 295 // what you want to do next, then you act upon RFDR, 296 // and the device will be prepared b/c of what you 297 // wrote to RFCR 298 case RFCR: 299 reg = regs.rfcr; 300 break; 301 302 case RFDR: 303 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 304 switch (rfaddr) { 305 // Read from perfect match ROM octets 306 case 0x000: 307 reg = rom.perfectMatch[1]; 308 reg = reg << 8; 309 reg += rom.perfectMatch[0]; 310 break; 311 case 0x002: 312 reg = rom.perfectMatch[3] << 8; 313 reg += rom.perfectMatch[2]; 314 break; 315 case 0x004: 316 reg = rom.perfectMatch[5] << 8; 317 reg += rom.perfectMatch[4]; 318 break; 319 default: 320 // Read filter hash table 321 if (rfaddr >= FHASH_ADDR && 322 rfaddr < FHASH_ADDR + FHASH_SIZE) { 323 324 // Only word-aligned reads supported 325 if (rfaddr % 2) 326 panic("unaligned read from filter hash table!"); 327 328 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8; 329 reg += rom.filterHash[rfaddr - FHASH_ADDR]; 330 break; 331 } 332 333 panic("reading RFDR for something other than pattern" 334 " matching or hashing! %#x\n", rfaddr); 335 } 336 break; 337 338 case SRR: 339 reg = regs.srr; 340 break; 341 342 case MIBC: 343 reg = regs.mibc; 344 reg &= ~(MIBC_MIBS | MIBC_ACLR); 345 break; 346 347 case VRCR: 348 reg = regs.vrcr; 349 break; 350 351 case VTCR: 352 reg = regs.vtcr; 353 break; 354 355 case VDR: 356 reg = regs.vdr; 357 break; 358 359 case CCSR: 360 reg = regs.ccsr; 361 break; 362 363 case TBICR: 364 reg = regs.tbicr; 365 break; 366 367 case TBISR: 368 reg = regs.tbisr; 369 break; 370 371 case TANAR: 372 reg = regs.tanar; 373 break; 374 375 case TANLPAR: 376 reg = regs.tanlpar; 377 break; 378 379 case TANER: 380 reg = regs.taner; 381 break; 382 383 case TESR: 384 reg = regs.tesr; 385 break; 386 387 case M5REG: 388 reg = 0; 389 if (params()->rx_thread) 390 reg |= M5REG_RX_THREAD; 391 if (params()->tx_thread) 392 reg |= M5REG_TX_THREAD; 393 if (params()->rss) 394 reg |= M5REG_RSS; 395 break; 396 397 default: 398 panic("reading unimplemented register: addr=%#x", daddr); 399 } 400 401 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 402 daddr, reg, reg); 403 404 pkt->makeAtomicResponse(); 405 return pioDelay; 406} 407 408Tick 409NSGigE::write(PacketPtr pkt) 410{ 411 assert(ioEnable); 412 413 Addr daddr = pkt->getAddr() & 0xfff; 414 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n", 415 daddr, pkt->getAddr(), pkt->getSize()); 416 417 if (daddr > LAST && daddr <= RESERVED) { 418 panic("Accessing reserved register"); 419 } else if (daddr > RESERVED && daddr <= 0x3FC) { 420 return writeConfig(pkt); 421 } else if (daddr > 0x3FC) 422 panic("Something is messed up!\n"); 423 424 if (pkt->getSize() == sizeof(uint32_t)) { 425 uint32_t reg = pkt->get<uint32_t>(); 426 uint16_t rfaddr; 427 428 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 429 430 switch (daddr) { 431 case CR: 432 regs.command = reg; 433 if (reg & CR_TXD) { 434 txEnable = false; 435 } else if (reg & CR_TXE) { 436 txEnable = true; 437 438 // the kernel is enabling the transmit machine 439 if (txState == txIdle) 440 txKick(); 441 } 442 443 if (reg & CR_RXD) { 444 rxEnable = false; 445 } else if (reg & CR_RXE) { 446 rxEnable = true; 447 448 if (rxState == rxIdle) 449 rxKick(); 450 } 451 452 if (reg & CR_TXR) 453 txReset(); 454 455 if (reg & CR_RXR) 456 rxReset(); 457 458 if (reg & CR_SWI) 459 devIntrPost(ISR_SWI); 460 461 if (reg & CR_RST) { 462 txReset(); 463 rxReset(); 464 465 regsReset(); 466 } 467 break; 468 469 case CFGR: 470 if (reg & CFGR_LNKSTS || 471 reg & CFGR_SPDSTS || 472 reg & CFGR_DUPSTS || 473 reg & CFGR_RESERVED || 474 reg & CFGR_T64ADDR || 475 reg & CFGR_PCI64_DET) { 476 // First clear all writable bits 477 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 478 CFGR_RESERVED | CFGR_T64ADDR | 479 CFGR_PCI64_DET; 480 // Now set the appropriate writable bits 481 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 482 CFGR_RESERVED | CFGR_T64ADDR | 483 CFGR_PCI64_DET); 484 } 485 486// all these #if 0's are because i don't THINK the kernel needs to 487// have these implemented. if there is a problem relating to one of 488// these, you may need to add functionality in. 489 490// grouped together and #if 0'ed to avoid empty if body and make clang happy 491#if 0 492 if (reg & CFGR_TBI_EN) ; 493 if (reg & CFGR_MODE_1000) ; 494 495 if (reg & CFGR_PINT_DUPSTS || 496 reg & CFGR_PINT_LNKSTS || 497 reg & CFGR_PINT_SPDSTS) 498 ; 499 500 if (reg & CFGR_TMRTEST) ; 501 if (reg & CFGR_MRM_DIS) ; 502 if (reg & CFGR_MWI_DIS) ; 503 504 if (reg & CFGR_DATA64_EN) ; 505 if (reg & CFGR_M64ADDR) ; 506 if (reg & CFGR_PHY_RST) ; 507 if (reg & CFGR_PHY_DIS) ; 508 509 if (reg & CFGR_REQALG) ; 510 if (reg & CFGR_SB) ; 511 if (reg & CFGR_POW) ; 512 if (reg & CFGR_EXD) ; 513 if (reg & CFGR_PESEL) ; 514 if (reg & CFGR_BROM_DIS) ; 515 if (reg & CFGR_EXT_125) ; 516 if (reg & CFGR_BEM) ; 517 518 if (reg & CFGR_T64ADDR) ; 519 // panic("CFGR_T64ADDR is read only register!\n"); 520#endif 521 if (reg & CFGR_AUTO_1000) 522 panic("CFGR_AUTO_1000 not implemented!\n"); 523 524 if (reg & CFGR_PCI64_DET) 525 panic("CFGR_PCI64_DET is read only register!\n"); 526 527 if (reg & CFGR_EXTSTS_EN) 528 extstsEnable = true; 529 else 530 extstsEnable = false; 531 break; 532 533 case MEAR: 534 // Clear writable bits 535 regs.mear &= MEAR_EEDO; 536 // Set appropriate writable bits 537 regs.mear |= reg & ~MEAR_EEDO; 538 539 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address) 540 // even though it could get it through RFDR 541 if (reg & MEAR_EESEL) { 542 // Rising edge of clock 543 if (reg & MEAR_EECLK && !eepromClk) 544 eepromKick(); 545 } 546 else { 547 eepromState = eepromStart; 548 regs.mear &= ~MEAR_EEDI; 549 } 550 551 eepromClk = reg & MEAR_EECLK; 552 553 // since phy is completely faked, MEAR_MD* don't matter 554 555// grouped together and #if 0'ed to avoid empty if body and make clang happy 556#if 0 557 if (reg & MEAR_MDIO) ; 558 if (reg & MEAR_MDDIR) ; 559 if (reg & MEAR_MDC) ; 560#endif 561 break; 562 563 case PTSCR: 564 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 565 // these control BISTs for various parts of chip - we 566 // don't care or do just fake that the BIST is done 567 if (reg & PTSCR_RBIST_EN) 568 regs.ptscr |= PTSCR_RBIST_DONE; 569 if (reg & PTSCR_EEBIST_EN) 570 regs.ptscr &= ~PTSCR_EEBIST_EN; 571 if (reg & PTSCR_EELOAD_EN) 572 regs.ptscr &= ~PTSCR_EELOAD_EN; 573 break; 574 575 case ISR: /* writing to the ISR has no effect */ 576 panic("ISR is a read only register!\n"); 577 578 case IMR: 579 regs.imr = reg; 580 devIntrChangeMask(); 581 break; 582 583 case IER: 584 regs.ier = reg; 585 break; 586 587 case IHR: 588 regs.ihr = reg; 589 /* not going to implement real interrupt holdoff */ 590 break; 591 592 case TXDP: 593 regs.txdp = (reg & 0xFFFFFFFC); 594 assert(txState == txIdle); 595 CTDD = false; 596 break; 597 598 case TXDP_HI: 599 regs.txdp_hi = reg; 600 break; 601 602 case TX_CFG: 603 regs.txcfg = reg; 604#if 0 605 if (reg & TX_CFG_CSI) ; 606 if (reg & TX_CFG_HBI) ; 607 if (reg & TX_CFG_MLB) ; 608 if (reg & TX_CFG_ATP) ; 609 if (reg & TX_CFG_ECRETRY) { 610 /* 611 * this could easily be implemented, but considering 612 * the network is just a fake pipe, wouldn't make 613 * sense to do this 614 */ 615 } 616 617 if (reg & TX_CFG_BRST_DIS) ; 618#endif 619 620#if 0 621 /* we handle our own DMA, ignore the kernel's exhortations */ 622 if (reg & TX_CFG_MXDMA) ; 623#endif 624 625 // also, we currently don't care about fill/drain 626 // thresholds though this may change in the future with 627 // more realistic networks or a driver which changes it 628 // according to feedback 629 630 break; 631 632 case GPIOR: 633 // Only write writable bits 634 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 635 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN; 636 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 637 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN); 638 /* these just control general purpose i/o pins, don't matter */ 639 break; 640 641 case RXDP: 642 regs.rxdp = reg; 643 CRDD = false; 644 break; 645 646 case RXDP_HI: 647 regs.rxdp_hi = reg; 648 break; 649 650 case RX_CFG: 651 regs.rxcfg = reg; 652#if 0 653 if (reg & RX_CFG_AEP) ; 654 if (reg & RX_CFG_ARP) ; 655 if (reg & RX_CFG_STRIPCRC) ; 656 if (reg & RX_CFG_RX_RD) ; 657 if (reg & RX_CFG_ALP) ; 658 if (reg & RX_CFG_AIRL) ; 659 660 /* we handle our own DMA, ignore what kernel says about it */ 661 if (reg & RX_CFG_MXDMA) ; 662 663 //also, we currently don't care about fill/drain thresholds 664 //though this may change in the future with more realistic 665 //networks or a driver which changes it according to feedback 666 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ; 667#endif 668 break; 669 670 case PQCR: 671 /* there is no priority queueing used in the linux 2.6 driver */ 672 regs.pqcr = reg; 673 break; 674 675 case WCSR: 676 /* not going to implement wake on LAN */ 677 regs.wcsr = reg; 678 break; 679 680 case PCR: 681 /* not going to implement pause control */ 682 regs.pcr = reg; 683 break; 684 685 case RFCR: 686 regs.rfcr = reg; 687 688 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 689 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 690 acceptMulticast = (reg & RFCR_AAM) ? true : false; 691 acceptUnicast = (reg & RFCR_AAU) ? true : false; 692 acceptPerfect = (reg & RFCR_APM) ? true : false; 693 acceptArp = (reg & RFCR_AARP) ? true : false; 694 multicastHashEnable = (reg & RFCR_MHEN) ? true : false; 695 696#if 0 697 if (reg & RFCR_APAT) 698 panic("RFCR_APAT not implemented!\n"); 699#endif 700 if (reg & RFCR_UHEN) 701 panic("Unicast hash filtering not used by drivers!\n"); 702 703 if (reg & RFCR_ULM) 704 panic("RFCR_ULM not implemented!\n"); 705 706 break; 707 708 case RFDR: 709 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 710 switch (rfaddr) { 711 case 0x000: 712 rom.perfectMatch[0] = (uint8_t)reg; 713 rom.perfectMatch[1] = (uint8_t)(reg >> 8); 714 break; 715 case 0x002: 716 rom.perfectMatch[2] = (uint8_t)reg; 717 rom.perfectMatch[3] = (uint8_t)(reg >> 8); 718 break; 719 case 0x004: 720 rom.perfectMatch[4] = (uint8_t)reg; 721 rom.perfectMatch[5] = (uint8_t)(reg >> 8); 722 break; 723 default: 724 725 if (rfaddr >= FHASH_ADDR && 726 rfaddr < FHASH_ADDR + FHASH_SIZE) { 727 728 // Only word-aligned writes supported 729 if (rfaddr % 2) 730 panic("unaligned write to filter hash table!"); 731 732 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg; 733 rom.filterHash[rfaddr - FHASH_ADDR + 1] 734 = (uint8_t)(reg >> 8); 735 break; 736 } 737 panic("writing RFDR for something other than pattern matching\ 738 or hashing! %#x\n", rfaddr); 739 } 740 741 case BRAR: 742 regs.brar = reg; 743 break; 744 745 case BRDR: 746 panic("the driver never uses BRDR, something is wrong!\n"); 747 748 case SRR: 749 panic("SRR is read only register!\n"); 750 751 case MIBC: 752 panic("the driver never uses MIBC, something is wrong!\n"); 753 754 case VRCR: 755 regs.vrcr = reg; 756 break; 757 758 case VTCR: 759 regs.vtcr = reg; 760 break; 761 762 case VDR: 763 panic("the driver never uses VDR, something is wrong!\n"); 764 765 case CCSR: 766 /* not going to implement clockrun stuff */ 767 regs.ccsr = reg; 768 break; 769 770 case TBICR: 771 regs.tbicr = reg; 772 if (reg & TBICR_MR_LOOPBACK) 773 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 774 775 if (reg & TBICR_MR_AN_ENABLE) { 776 regs.tanlpar = regs.tanar; 777 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 778 } 779 780#if 0 781 if (reg & TBICR_MR_RESTART_AN) ; 782#endif 783 784 break; 785 786 case TBISR: 787 panic("TBISR is read only register!\n"); 788 789 case TANAR: 790 // Only write the writable bits 791 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED; 792 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED); 793 794 // Pause capability unimplemented 795#if 0 796 if (reg & TANAR_PS2) ; 797 if (reg & TANAR_PS1) ; 798#endif 799 800 break; 801 802 case TANLPAR: 803 panic("this should only be written to by the fake phy!\n"); 804 805 case TANER: 806 panic("TANER is read only register!\n"); 807 808 case TESR: 809 regs.tesr = reg; 810 break; 811 812 default: 813 panic("invalid register access daddr=%#x", daddr); 814 } 815 } else { 816 panic("Invalid Request Size"); 817 } 818 pkt->makeAtomicResponse(); 819 return pioDelay; 820} 821 822void 823NSGigE::devIntrPost(uint32_t interrupts) 824{ 825 if (interrupts & ISR_RESERVE) 826 panic("Cannot set a reserved interrupt"); 827 828 if (interrupts & ISR_NOIMPL) 829 warn("interrupt not implemented %#x\n", interrupts); 830 831 interrupts &= ISR_IMPL; 832 regs.isr |= interrupts; 833 834 if (interrupts & regs.imr) { 835 if (interrupts & ISR_SWI) { 836 totalSwi++; 837 } 838 if (interrupts & ISR_RXIDLE) { 839 totalRxIdle++; 840 } 841 if (interrupts & ISR_RXOK) { 842 totalRxOk++; 843 } 844 if (interrupts & ISR_RXDESC) { 845 totalRxDesc++; 846 } 847 if (interrupts & ISR_TXOK) { 848 totalTxOk++; 849 } 850 if (interrupts & ISR_TXIDLE) { 851 totalTxIdle++; 852 } 853 if (interrupts & ISR_TXDESC) { 854 totalTxDesc++; 855 } 856 if (interrupts & ISR_RXORN) { 857 totalRxOrn++; 858 } 859 } 860 861 DPRINTF(EthernetIntr, 862 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 863 interrupts, regs.isr, regs.imr); 864 865 if ((regs.isr & regs.imr)) { 866 Tick when = curTick(); 867 if ((regs.isr & regs.imr & ISR_NODELAY) == 0) 868 when += intrDelay; 869 postedInterrupts++; 870 cpuIntrPost(when); 871 } 872} 873 874/* writing this interrupt counting stats inside this means that this function 875 is now limited to being used to clear all interrupts upon the kernel 876 reading isr and servicing. just telling you in case you were thinking 877 of expanding use. 878*/ 879void 880NSGigE::devIntrClear(uint32_t interrupts) 881{ 882 if (interrupts & ISR_RESERVE) 883 panic("Cannot clear a reserved interrupt"); 884 885 if (regs.isr & regs.imr & ISR_SWI) { 886 postedSwi++; 887 } 888 if (regs.isr & regs.imr & ISR_RXIDLE) { 889 postedRxIdle++; 890 } 891 if (regs.isr & regs.imr & ISR_RXOK) { 892 postedRxOk++; 893 } 894 if (regs.isr & regs.imr & ISR_RXDESC) { 895 postedRxDesc++; 896 } 897 if (regs.isr & regs.imr & ISR_TXOK) { 898 postedTxOk++; 899 } 900 if (regs.isr & regs.imr & ISR_TXIDLE) { 901 postedTxIdle++; 902 } 903 if (regs.isr & regs.imr & ISR_TXDESC) { 904 postedTxDesc++; 905 } 906 if (regs.isr & regs.imr & ISR_RXORN) { 907 postedRxOrn++; 908 } 909 910 interrupts &= ~ISR_NOIMPL; 911 regs.isr &= ~interrupts; 912 913 DPRINTF(EthernetIntr, 914 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 915 interrupts, regs.isr, regs.imr); 916 917 if (!(regs.isr & regs.imr)) 918 cpuIntrClear(); 919} 920 921void 922NSGigE::devIntrChangeMask() 923{ 924 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 925 regs.isr, regs.imr, regs.isr & regs.imr); 926 927 if (regs.isr & regs.imr) 928 cpuIntrPost(curTick()); 929 else 930 cpuIntrClear(); 931} 932 933void 934NSGigE::cpuIntrPost(Tick when) 935{ 936 // If the interrupt you want to post is later than an interrupt 937 // already scheduled, just let it post in the coming one and don't 938 // schedule another. 939 // HOWEVER, must be sure that the scheduled intrTick is in the 940 // future (this was formerly the source of a bug) 941 /** 942 * @todo this warning should be removed and the intrTick code should 943 * be fixed. 944 */ 945 assert(when >= curTick()); 946 assert(intrTick >= curTick() || intrTick == 0); 947 if (when > intrTick && intrTick != 0) { 948 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 949 intrTick); 950 return; 951 } 952 953 intrTick = when; 954 if (intrTick < curTick()) { 955 Debug::breakpoint(); 956 intrTick = curTick(); 957 } 958 959 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 960 intrTick); 961 962 if (intrEvent) 963 intrEvent->squash(); 964 intrEvent = new IntrEvent(this, true); 965 schedule(intrEvent, intrTick); 966} 967 968void 969NSGigE::cpuInterrupt() 970{ 971 assert(intrTick == curTick()); 972 973 // Whether or not there's a pending interrupt, we don't care about 974 // it anymore 975 intrEvent = 0; 976 intrTick = 0; 977 978 // Don't send an interrupt if there's already one 979 if (cpuPendingIntr) { 980 DPRINTF(EthernetIntr, 981 "would send an interrupt now, but there's already pending\n"); 982 } else { 983 // Send interrupt 984 cpuPendingIntr = true; 985 986 DPRINTF(EthernetIntr, "posting interrupt\n"); 987 intrPost(); 988 } 989} 990 991void 992NSGigE::cpuIntrClear() 993{ 994 if (!cpuPendingIntr) 995 return; 996 997 if (intrEvent) { 998 intrEvent->squash(); 999 intrEvent = 0; 1000 } 1001 1002 intrTick = 0; 1003 1004 cpuPendingIntr = false; 1005 1006 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1007 intrClear(); 1008} 1009 1010bool 1011NSGigE::cpuIntrPending() const 1012{ return cpuPendingIntr; } 1013 1014void 1015NSGigE::txReset() 1016{ 1017 1018 DPRINTF(Ethernet, "transmit reset\n"); 1019 1020 CTDD = false; 1021 txEnable = false;; 1022 txFragPtr = 0; 1023 assert(txDescCnt == 0); 1024 txFifo.clear(); 1025 txState = txIdle; 1026 assert(txDmaState == dmaIdle); 1027} 1028 1029void 1030NSGigE::rxReset() 1031{ 1032 DPRINTF(Ethernet, "receive reset\n"); 1033 1034 CRDD = false; 1035 assert(rxPktBytes == 0); 1036 rxEnable = false; 1037 rxFragPtr = 0; 1038 assert(rxDescCnt == 0); 1039 assert(rxDmaState == dmaIdle); 1040 rxFifo.clear(); 1041 rxState = rxIdle; 1042} 1043 1044void 1045NSGigE::regsReset() 1046{ 1047 memset(®s, 0, sizeof(regs)); 1048 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000); 1049 regs.mear = 0x12; 1050 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1051 // fill threshold to 32 bytes 1052 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1053 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1054 regs.mibc = MIBC_FRZ; 1055 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1056 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1057 regs.brar = 0xffffffff; 1058 1059 extstsEnable = false; 1060 acceptBroadcast = false; 1061 acceptMulticast = false; 1062 acceptUnicast = false; 1063 acceptPerfect = false; 1064 acceptArp = false; 1065} 1066 1067bool 1068NSGigE::doRxDmaRead() 1069{ 1070 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1071 rxDmaState = dmaReading; 1072 1073 if (dmaPending() || getState() != Running) 1074 rxDmaState = dmaReadWaiting; 1075 else 1076 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData); 1077 1078 return true; 1079} 1080 1081void 1082NSGigE::rxDmaReadDone() 1083{ 1084 assert(rxDmaState == dmaReading); 1085 rxDmaState = dmaIdle; 1086 1087 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1088 rxDmaAddr, rxDmaLen); 1089 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1090 1091 // If the transmit state machine has a pending DMA, let it go first 1092 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1093 txKick(); 1094 1095 rxKick(); 1096} 1097 1098bool 1099NSGigE::doRxDmaWrite() 1100{ 1101 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1102 rxDmaState = dmaWriting; 1103 1104 if (dmaPending() || getState() != Running) 1105 rxDmaState = dmaWriteWaiting; 1106 else 1107 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData); 1108 return true; 1109} 1110 1111void 1112NSGigE::rxDmaWriteDone() 1113{ 1114 assert(rxDmaState == dmaWriting); 1115 rxDmaState = dmaIdle; 1116 1117 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1118 rxDmaAddr, rxDmaLen); 1119 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1120 1121 // If the transmit state machine has a pending DMA, let it go first 1122 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1123 txKick(); 1124 1125 rxKick(); 1126} 1127 1128void 1129NSGigE::rxKick() 1130{ 1131 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1132 1133 DPRINTF(EthernetSM, 1134 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n", 1135 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32); 1136 1137 Addr link, bufptr; 1138 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts; 1139 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts; 1140 1141 next: 1142 if (clock) { 1143 if (rxKickTick > curTick()) { 1144 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1145 rxKickTick); 1146 1147 goto exit; 1148 } 1149 1150 // Go to the next state machine clock tick. 1151 rxKickTick = curTick() + ticks(1); 1152 } 1153 1154 switch(rxDmaState) { 1155 case dmaReadWaiting: 1156 if (doRxDmaRead()) 1157 goto exit; 1158 break; 1159 case dmaWriteWaiting: 1160 if (doRxDmaWrite()) 1161 goto exit; 1162 break; 1163 default: 1164 break; 1165 } 1166 1167 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link; 1168 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr; 1169 1170 // see state machine from spec for details 1171 // the way this works is, if you finish work on one state and can 1172 // go directly to another, you do that through jumping to the 1173 // label "next". however, if you have intermediate work, like DMA 1174 // so that you can't go to the next state yet, you go to exit and 1175 // exit the loop. however, when the DMA is done it will trigger 1176 // an event and come back to this loop. 1177 switch (rxState) { 1178 case rxIdle: 1179 if (!rxEnable) { 1180 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1181 goto exit; 1182 } 1183 1184 if (CRDD) { 1185 rxState = rxDescRefr; 1186 1187 rxDmaAddr = regs.rxdp & 0x3fffffff; 1188 rxDmaData = 1189 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link; 1190 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link); 1191 rxDmaFree = dmaDescFree; 1192 1193 descDmaReads++; 1194 descDmaRdBytes += rxDmaLen; 1195 1196 if (doRxDmaRead()) 1197 goto exit; 1198 } else { 1199 rxState = rxDescRead; 1200 1201 rxDmaAddr = regs.rxdp & 0x3fffffff; 1202 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1203 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1204 rxDmaFree = dmaDescFree; 1205 1206 descDmaReads++; 1207 descDmaRdBytes += rxDmaLen; 1208 1209 if (doRxDmaRead()) 1210 goto exit; 1211 } 1212 break; 1213 1214 case rxDescRefr: 1215 if (rxDmaState != dmaIdle) 1216 goto exit; 1217 1218 rxState = rxAdvance; 1219 break; 1220 1221 case rxDescRead: 1222 if (rxDmaState != dmaIdle) 1223 goto exit; 1224 1225 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n", 1226 regs.rxdp & 0x3fffffff); 1227 DPRINTF(EthernetDesc, 1228 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1229 link, bufptr, cmdsts, extsts); 1230 1231 if (cmdsts & CMDSTS_OWN) { 1232 devIntrPost(ISR_RXIDLE); 1233 rxState = rxIdle; 1234 goto exit; 1235 } else { 1236 rxState = rxFifoBlock; 1237 rxFragPtr = bufptr; 1238 rxDescCnt = cmdsts & CMDSTS_LEN_MASK; 1239 } 1240 break; 1241 1242 case rxFifoBlock: 1243 if (!rxPacket) { 1244 /** 1245 * @todo in reality, we should be able to start processing 1246 * the packet as it arrives, and not have to wait for the 1247 * full packet ot be in the receive fifo. 1248 */ 1249 if (rxFifo.empty()) 1250 goto exit; 1251 1252 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1253 1254 // If we don't have a packet, grab a new one from the fifo. 1255 rxPacket = rxFifo.front(); 1256 rxPktBytes = rxPacket->length; 1257 rxPacketBufPtr = rxPacket->data; 1258 1259#if TRACING_ON 1260 if (DTRACE(Ethernet)) { 1261 IpPtr ip(rxPacket); 1262 if (ip) { 1263 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1264 TcpPtr tcp(ip); 1265 if (tcp) { 1266 DPRINTF(Ethernet, 1267 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1268 tcp->sport(), tcp->dport(), tcp->seq(), 1269 tcp->ack()); 1270 } 1271 } 1272 } 1273#endif 1274 1275 // sanity check - i think the driver behaves like this 1276 assert(rxDescCnt >= rxPktBytes); 1277 rxFifo.pop(); 1278 } 1279 1280 1281 // dont' need the && rxDescCnt > 0 if driver sanity check 1282 // above holds 1283 if (rxPktBytes > 0) { 1284 rxState = rxFragWrite; 1285 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1286 // check holds 1287 rxXferLen = rxPktBytes; 1288 1289 rxDmaAddr = rxFragPtr & 0x3fffffff; 1290 rxDmaData = rxPacketBufPtr; 1291 rxDmaLen = rxXferLen; 1292 rxDmaFree = dmaDataFree; 1293 1294 if (doRxDmaWrite()) 1295 goto exit; 1296 1297 } else { 1298 rxState = rxDescWrite; 1299 1300 //if (rxPktBytes == 0) { /* packet is done */ 1301 assert(rxPktBytes == 0); 1302 DPRINTF(EthernetSM, "done with receiving packet\n"); 1303 1304 cmdsts |= CMDSTS_OWN; 1305 cmdsts &= ~CMDSTS_MORE; 1306 cmdsts |= CMDSTS_OK; 1307 cmdsts &= 0xffff0000; 1308 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1309 1310#if 0 1311 /* 1312 * all the driver uses these are for its own stats keeping 1313 * which we don't care about, aren't necessary for 1314 * functionality and doing this would just slow us down. 1315 * if they end up using this in a later version for 1316 * functional purposes, just undef 1317 */ 1318 if (rxFilterEnable) { 1319 cmdsts &= ~CMDSTS_DEST_MASK; 1320 const EthAddr &dst = rxFifoFront()->dst(); 1321 if (dst->unicast()) 1322 cmdsts |= CMDSTS_DEST_SELF; 1323 if (dst->multicast()) 1324 cmdsts |= CMDSTS_DEST_MULTI; 1325 if (dst->broadcast()) 1326 cmdsts |= CMDSTS_DEST_MASK; 1327 } 1328#endif 1329 1330 IpPtr ip(rxPacket); 1331 if (extstsEnable && ip) { 1332 extsts |= EXTSTS_IPPKT; 1333 rxIpChecksums++; 1334 if (cksum(ip) != 0) { 1335 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1336 extsts |= EXTSTS_IPERR; 1337 } 1338 TcpPtr tcp(ip); 1339 UdpPtr udp(ip); 1340 if (tcp) { 1341 extsts |= EXTSTS_TCPPKT; 1342 rxTcpChecksums++; 1343 if (cksum(tcp) != 0) { 1344 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1345 extsts |= EXTSTS_TCPERR; 1346 1347 } 1348 } else if (udp) { 1349 extsts |= EXTSTS_UDPPKT; 1350 rxUdpChecksums++; 1351 if (cksum(udp) != 0) { 1352 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1353 extsts |= EXTSTS_UDPERR; 1354 } 1355 } 1356 } 1357 rxPacket = 0; 1358 1359 /* 1360 * the driver seems to always receive into desc buffers 1361 * of size 1514, so you never have a pkt that is split 1362 * into multiple descriptors on the receive side, so 1363 * i don't implement that case, hence the assert above. 1364 */ 1365 1366 DPRINTF(EthernetDesc, 1367 "rxDesc: addr=%08x writeback cmdsts extsts\n", 1368 regs.rxdp & 0x3fffffff); 1369 DPRINTF(EthernetDesc, 1370 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1371 link, bufptr, cmdsts, extsts); 1372 1373 rxDmaAddr = regs.rxdp & 0x3fffffff; 1374 rxDmaData = &cmdsts; 1375 if (is64bit) { 1376 rxDmaAddr += offsetof(ns_desc64, cmdsts); 1377 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts); 1378 } else { 1379 rxDmaAddr += offsetof(ns_desc32, cmdsts); 1380 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts); 1381 } 1382 rxDmaFree = dmaDescFree; 1383 1384 descDmaWrites++; 1385 descDmaWrBytes += rxDmaLen; 1386 1387 if (doRxDmaWrite()) 1388 goto exit; 1389 } 1390 break; 1391 1392 case rxFragWrite: 1393 if (rxDmaState != dmaIdle) 1394 goto exit; 1395 1396 rxPacketBufPtr += rxXferLen; 1397 rxFragPtr += rxXferLen; 1398 rxPktBytes -= rxXferLen; 1399 1400 rxState = rxFifoBlock; 1401 break; 1402 1403 case rxDescWrite: 1404 if (rxDmaState != dmaIdle) 1405 goto exit; 1406 1407 assert(cmdsts & CMDSTS_OWN); 1408 1409 assert(rxPacket == 0); 1410 devIntrPost(ISR_RXOK); 1411 1412 if (cmdsts & CMDSTS_INTR) 1413 devIntrPost(ISR_RXDESC); 1414 1415 if (!rxEnable) { 1416 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1417 rxState = rxIdle; 1418 goto exit; 1419 } else 1420 rxState = rxAdvance; 1421 break; 1422 1423 case rxAdvance: 1424 if (link == 0) { 1425 devIntrPost(ISR_RXIDLE); 1426 rxState = rxIdle; 1427 CRDD = true; 1428 goto exit; 1429 } else { 1430 if (rxDmaState != dmaIdle) 1431 goto exit; 1432 rxState = rxDescRead; 1433 regs.rxdp = link; 1434 CRDD = false; 1435 1436 rxDmaAddr = regs.rxdp & 0x3fffffff; 1437 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1438 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1439 rxDmaFree = dmaDescFree; 1440 1441 if (doRxDmaRead()) 1442 goto exit; 1443 } 1444 break; 1445 1446 default: 1447 panic("Invalid rxState!"); 1448 } 1449 1450 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1451 NsRxStateStrings[rxState]); 1452 goto next; 1453 1454 exit: 1455 /** 1456 * @todo do we want to schedule a future kick? 1457 */ 1458 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1459 NsRxStateStrings[rxState]); 1460 1461 if (clock && !rxKickEvent.scheduled()) 1462 schedule(rxKickEvent, rxKickTick); 1463} 1464 1465void 1466NSGigE::transmit() 1467{ 1468 if (txFifo.empty()) { 1469 DPRINTF(Ethernet, "nothing to transmit\n"); 1470 return; 1471 } 1472 1473 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1474 txFifo.size()); 1475 if (interface->sendPacket(txFifo.front())) { 1476#if TRACING_ON 1477 if (DTRACE(Ethernet)) { 1478 IpPtr ip(txFifo.front()); 1479 if (ip) { 1480 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1481 TcpPtr tcp(ip); 1482 if (tcp) { 1483 DPRINTF(Ethernet, 1484 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1485 tcp->sport(), tcp->dport(), tcp->seq(), 1486 tcp->ack()); 1487 } 1488 } 1489 } 1490#endif 1491 1492 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1493 txBytes += txFifo.front()->length; 1494 txPackets++; 1495 1496 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1497 txFifo.avail()); 1498 txFifo.pop(); 1499 1500 /* 1501 * normally do a writeback of the descriptor here, and ONLY 1502 * after that is done, send this interrupt. but since our 1503 * stuff never actually fails, just do this interrupt here, 1504 * otherwise the code has to stray from this nice format. 1505 * besides, it's functionally the same. 1506 */ 1507 devIntrPost(ISR_TXOK); 1508 } 1509 1510 if (!txFifo.empty() && !txEvent.scheduled()) { 1511 DPRINTF(Ethernet, "reschedule transmit\n"); 1512 schedule(txEvent, curTick() + retryTime); 1513 } 1514} 1515 1516bool 1517NSGigE::doTxDmaRead() 1518{ 1519 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1520 txDmaState = dmaReading; 1521 1522 if (dmaPending() || getState() != Running) 1523 txDmaState = dmaReadWaiting; 1524 else 1525 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData); 1526 1527 return true; 1528} 1529 1530void 1531NSGigE::txDmaReadDone() 1532{ 1533 assert(txDmaState == dmaReading); 1534 txDmaState = dmaIdle; 1535 1536 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1537 txDmaAddr, txDmaLen); 1538 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1539 1540 // If the receive state machine has a pending DMA, let it go first 1541 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1542 rxKick(); 1543 1544 txKick(); 1545} 1546 1547bool 1548NSGigE::doTxDmaWrite() 1549{ 1550 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1551 txDmaState = dmaWriting; 1552 1553 if (dmaPending() || getState() != Running) 1554 txDmaState = dmaWriteWaiting; 1555 else 1556 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData); 1557 return true; 1558} 1559 1560void 1561NSGigE::txDmaWriteDone() 1562{ 1563 assert(txDmaState == dmaWriting); 1564 txDmaState = dmaIdle; 1565 1566 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1567 txDmaAddr, txDmaLen); 1568 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1569 1570 // If the receive state machine has a pending DMA, let it go first 1571 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1572 rxKick(); 1573 1574 txKick(); 1575} 1576 1577void 1578NSGigE::txKick() 1579{ 1580 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1581 1582 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n", 1583 NsTxStateStrings[txState], is64bit ? 64 : 32); 1584 1585 Addr link, bufptr; 1586 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts; 1587 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts; 1588 1589 next: 1590 if (clock) { 1591 if (txKickTick > curTick()) { 1592 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1593 txKickTick); 1594 goto exit; 1595 } 1596 1597 // Go to the next state machine clock tick. 1598 txKickTick = curTick() + ticks(1); 1599 } 1600 1601 switch(txDmaState) { 1602 case dmaReadWaiting: 1603 if (doTxDmaRead()) 1604 goto exit; 1605 break; 1606 case dmaWriteWaiting: 1607 if (doTxDmaWrite()) 1608 goto exit; 1609 break; 1610 default: 1611 break; 1612 } 1613 1614 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link; 1615 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr; 1616 switch (txState) { 1617 case txIdle: 1618 if (!txEnable) { 1619 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1620 goto exit; 1621 } 1622 1623 if (CTDD) { 1624 txState = txDescRefr; 1625 1626 txDmaAddr = regs.txdp & 0x3fffffff; 1627 txDmaData = 1628 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link; 1629 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link); 1630 txDmaFree = dmaDescFree; 1631 1632 descDmaReads++; 1633 descDmaRdBytes += txDmaLen; 1634 1635 if (doTxDmaRead()) 1636 goto exit; 1637 1638 } else { 1639 txState = txDescRead; 1640 1641 txDmaAddr = regs.txdp & 0x3fffffff; 1642 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1643 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1644 txDmaFree = dmaDescFree; 1645 1646 descDmaReads++; 1647 descDmaRdBytes += txDmaLen; 1648 1649 if (doTxDmaRead()) 1650 goto exit; 1651 } 1652 break; 1653 1654 case txDescRefr: 1655 if (txDmaState != dmaIdle) 1656 goto exit; 1657 1658 txState = txAdvance; 1659 break; 1660 1661 case txDescRead: 1662 if (txDmaState != dmaIdle) 1663 goto exit; 1664 1665 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n", 1666 regs.txdp & 0x3fffffff); 1667 DPRINTF(EthernetDesc, 1668 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n", 1669 link, bufptr, cmdsts, extsts); 1670 1671 if (cmdsts & CMDSTS_OWN) { 1672 txState = txFifoBlock; 1673 txFragPtr = bufptr; 1674 txDescCnt = cmdsts & CMDSTS_LEN_MASK; 1675 } else { 1676 devIntrPost(ISR_TXIDLE); 1677 txState = txIdle; 1678 goto exit; 1679 } 1680 break; 1681 1682 case txFifoBlock: 1683 if (!txPacket) { 1684 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 1685 txPacket = new EthPacketData(16384); 1686 txPacketBufPtr = txPacket->data; 1687 } 1688 1689 if (txDescCnt == 0) { 1690 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 1691 if (cmdsts & CMDSTS_MORE) { 1692 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 1693 txState = txDescWrite; 1694 1695 cmdsts &= ~CMDSTS_OWN; 1696 1697 txDmaAddr = regs.txdp & 0x3fffffff; 1698 txDmaData = &cmdsts; 1699 if (is64bit) { 1700 txDmaAddr += offsetof(ns_desc64, cmdsts); 1701 txDmaLen = sizeof(txDesc64.cmdsts); 1702 } else { 1703 txDmaAddr += offsetof(ns_desc32, cmdsts); 1704 txDmaLen = sizeof(txDesc32.cmdsts); 1705 } 1706 txDmaFree = dmaDescFree; 1707 1708 if (doTxDmaWrite()) 1709 goto exit; 1710 1711 } else { /* this packet is totally done */ 1712 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 1713 /* deal with the the packet that just finished */ 1714 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 1715 IpPtr ip(txPacket); 1716 if (extsts & EXTSTS_UDPPKT) { 1717 UdpPtr udp(ip); 1718 if (udp) { 1719 udp->sum(0); 1720 udp->sum(cksum(udp)); 1721 txUdpChecksums++; 1722 } else { 1723 Debug::breakpoint(); 1724 warn_once("UDPPKT set, but not UDP!\n"); 1725 } 1726 } else if (extsts & EXTSTS_TCPPKT) { 1727 TcpPtr tcp(ip); 1728 if (tcp) { 1729 tcp->sum(0); 1730 tcp->sum(cksum(tcp)); 1731 txTcpChecksums++; 1732 } else { 1733 Debug::breakpoint(); 1734 warn_once("TCPPKT set, but not UDP!\n"); 1735 } 1736 } 1737 if (extsts & EXTSTS_IPPKT) { 1738 if (ip) { 1739 ip->sum(0); 1740 ip->sum(cksum(ip)); 1741 txIpChecksums++; 1742 } else { 1743 Debug::breakpoint(); 1744 warn_once("IPPKT set, but not UDP!\n"); 1745 } 1746 } 1747 } 1748 1749 txPacket->length = txPacketBufPtr - txPacket->data; 1750 // this is just because the receive can't handle a 1751 // packet bigger want to make sure 1752 if (txPacket->length > 1514) 1753 panic("transmit packet too large, %s > 1514\n", 1754 txPacket->length); 1755 1756#ifndef NDEBUG 1757 bool success = 1758#endif 1759 txFifo.push(txPacket); 1760 assert(success); 1761 1762 /* 1763 * this following section is not tqo spec, but 1764 * functionally shouldn't be any different. normally, 1765 * the chip will wait til the transmit has occurred 1766 * before writing back the descriptor because it has 1767 * to wait to see that it was successfully transmitted 1768 * to decide whether to set CMDSTS_OK or not. 1769 * however, in the simulator since it is always 1770 * successfully transmitted, and writing it exactly to 1771 * spec would complicate the code, we just do it here 1772 */ 1773 1774 cmdsts &= ~CMDSTS_OWN; 1775 cmdsts |= CMDSTS_OK; 1776 1777 DPRINTF(EthernetDesc, 1778 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 1779 cmdsts, extsts); 1780 1781 txDmaFree = dmaDescFree; 1782 txDmaAddr = regs.txdp & 0x3fffffff; 1783 txDmaData = &cmdsts; 1784 if (is64bit) { 1785 txDmaAddr += offsetof(ns_desc64, cmdsts); 1786 txDmaLen = 1787 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts); 1788 } else { 1789 txDmaAddr += offsetof(ns_desc32, cmdsts); 1790 txDmaLen = 1791 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts); 1792 } 1793 1794 descDmaWrites++; 1795 descDmaWrBytes += txDmaLen; 1796 1797 transmit(); 1798 txPacket = 0; 1799 1800 if (!txEnable) { 1801 DPRINTF(EthernetSM, "halting TX state machine\n"); 1802 txState = txIdle; 1803 goto exit; 1804 } else 1805 txState = txAdvance; 1806 1807 if (doTxDmaWrite()) 1808 goto exit; 1809 } 1810 } else { 1811 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 1812 if (!txFifo.full()) { 1813 txState = txFragRead; 1814 1815 /* 1816 * The number of bytes transferred is either whatever 1817 * is left in the descriptor (txDescCnt), or if there 1818 * is not enough room in the fifo, just whatever room 1819 * is left in the fifo 1820 */ 1821 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 1822 1823 txDmaAddr = txFragPtr & 0x3fffffff; 1824 txDmaData = txPacketBufPtr; 1825 txDmaLen = txXferLen; 1826 txDmaFree = dmaDataFree; 1827 1828 if (doTxDmaRead()) 1829 goto exit; 1830 } else { 1831 txState = txFifoBlock; 1832 transmit(); 1833 1834 goto exit; 1835 } 1836 1837 } 1838 break; 1839 1840 case txFragRead: 1841 if (txDmaState != dmaIdle) 1842 goto exit; 1843 1844 txPacketBufPtr += txXferLen; 1845 txFragPtr += txXferLen; 1846 txDescCnt -= txXferLen; 1847 txFifo.reserve(txXferLen); 1848 1849 txState = txFifoBlock; 1850 break; 1851 1852 case txDescWrite: 1853 if (txDmaState != dmaIdle) 1854 goto exit; 1855 1856 if (cmdsts & CMDSTS_INTR) 1857 devIntrPost(ISR_TXDESC); 1858 1859 if (!txEnable) { 1860 DPRINTF(EthernetSM, "halting TX state machine\n"); 1861 txState = txIdle; 1862 goto exit; 1863 } else 1864 txState = txAdvance; 1865 break; 1866 1867 case txAdvance: 1868 if (link == 0) { 1869 devIntrPost(ISR_TXIDLE); 1870 txState = txIdle; 1871 goto exit; 1872 } else { 1873 if (txDmaState != dmaIdle) 1874 goto exit; 1875 txState = txDescRead; 1876 regs.txdp = link; 1877 CTDD = false; 1878 1879 txDmaAddr = link & 0x3fffffff; 1880 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1881 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1882 txDmaFree = dmaDescFree; 1883 1884 if (doTxDmaRead()) 1885 goto exit; 1886 } 1887 break; 1888 1889 default: 1890 panic("invalid state"); 1891 } 1892 1893 DPRINTF(EthernetSM, "entering next txState=%s\n", 1894 NsTxStateStrings[txState]); 1895 goto next; 1896 1897 exit: 1898 /** 1899 * @todo do we want to schedule a future kick? 1900 */ 1901 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 1902 NsTxStateStrings[txState]); 1903 1904 if (clock && !txKickEvent.scheduled()) 1905 schedule(txKickEvent, txKickTick); 1906} 1907 1908/** 1909 * Advance the EEPROM state machine 1910 * Called on rising edge of EEPROM clock bit in MEAR 1911 */ 1912void 1913NSGigE::eepromKick() 1914{ 1915 switch (eepromState) { 1916 1917 case eepromStart: 1918 1919 // Wait for start bit 1920 if (regs.mear & MEAR_EEDI) { 1921 // Set up to get 2 opcode bits 1922 eepromState = eepromGetOpcode; 1923 eepromBitsToRx = 2; 1924 eepromOpcode = 0; 1925 } 1926 break; 1927 1928 case eepromGetOpcode: 1929 eepromOpcode <<= 1; 1930 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0; 1931 --eepromBitsToRx; 1932 1933 // Done getting opcode 1934 if (eepromBitsToRx == 0) { 1935 if (eepromOpcode != EEPROM_READ) 1936 panic("only EEPROM reads are implemented!"); 1937 1938 // Set up to get address 1939 eepromState = eepromGetAddress; 1940 eepromBitsToRx = 6; 1941 eepromAddress = 0; 1942 } 1943 break; 1944 1945 case eepromGetAddress: 1946 eepromAddress <<= 1; 1947 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0; 1948 --eepromBitsToRx; 1949 1950 // Done getting address 1951 if (eepromBitsToRx == 0) { 1952 1953 if (eepromAddress >= EEPROM_SIZE) 1954 panic("EEPROM read access out of range!"); 1955 1956 switch (eepromAddress) { 1957 1958 case EEPROM_PMATCH2_ADDR: 1959 eepromData = rom.perfectMatch[5]; 1960 eepromData <<= 8; 1961 eepromData += rom.perfectMatch[4]; 1962 break; 1963 1964 case EEPROM_PMATCH1_ADDR: 1965 eepromData = rom.perfectMatch[3]; 1966 eepromData <<= 8; 1967 eepromData += rom.perfectMatch[2]; 1968 break; 1969 1970 case EEPROM_PMATCH0_ADDR: 1971 eepromData = rom.perfectMatch[1]; 1972 eepromData <<= 8; 1973 eepromData += rom.perfectMatch[0]; 1974 break; 1975 1976 default: 1977 panic("FreeBSD driver only uses EEPROM to read PMATCH!"); 1978 } 1979 // Set up to read data 1980 eepromState = eepromRead; 1981 eepromBitsToRx = 16; 1982 1983 // Clear data in bit 1984 regs.mear &= ~MEAR_EEDI; 1985 } 1986 break; 1987 1988 case eepromRead: 1989 // Clear Data Out bit 1990 regs.mear &= ~MEAR_EEDO; 1991 // Set bit to value of current EEPROM bit 1992 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0; 1993 1994 eepromData <<= 1; 1995 --eepromBitsToRx; 1996 1997 // All done 1998 if (eepromBitsToRx == 0) { 1999 eepromState = eepromStart; 2000 } 2001 break; 2002 2003 default: 2004 panic("invalid EEPROM state"); 2005 } 2006 2007} 2008 2009void 2010NSGigE::transferDone() 2011{ 2012 if (txFifo.empty()) { 2013 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2014 return; 2015 } 2016 2017 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2018 2019 reschedule(txEvent, curTick() + ticks(1), true); 2020} 2021 2022bool 2023NSGigE::rxFilter(const EthPacketPtr &packet) 2024{ 2025 EthPtr eth = packet; 2026 bool drop = true; 2027 string type; 2028 2029 const EthAddr &dst = eth->dst(); 2030 if (dst.unicast()) { 2031 // If we're accepting all unicast addresses 2032 if (acceptUnicast) 2033 drop = false; 2034 2035 // If we make a perfect match 2036 if (acceptPerfect && dst == rom.perfectMatch) 2037 drop = false; 2038 2039 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2040 drop = false; 2041 2042 } else if (dst.broadcast()) { 2043 // if we're accepting broadcasts 2044 if (acceptBroadcast) 2045 drop = false; 2046 2047 } else if (dst.multicast()) { 2048 // if we're accepting all multicasts 2049 if (acceptMulticast) 2050 drop = false; 2051 2052 // Multicast hashing faked - all packets accepted 2053 if (multicastHashEnable) 2054 drop = false; 2055 } 2056 2057 if (drop) { 2058 DPRINTF(Ethernet, "rxFilter drop\n"); 2059 DDUMP(EthernetData, packet->data, packet->length); 2060 } 2061 2062 return drop; 2063} 2064 2065bool 2066NSGigE::recvPacket(EthPacketPtr packet) 2067{ 2068 rxBytes += packet->length; 2069 rxPackets++; 2070 2071 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2072 rxFifo.avail()); 2073 2074 if (!rxEnable) { 2075 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2076 return true; 2077 } 2078 2079 if (!rxFilterEnable) { 2080 DPRINTF(Ethernet, 2081 "receive packet filtering disabled . . . packet dropped\n"); 2082 return true; 2083 } 2084 2085 if (rxFilter(packet)) { 2086 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2087 return true; 2088 } 2089 2090 if (rxFifo.avail() < packet->length) { 2091#if TRACING_ON 2092 IpPtr ip(packet); 2093 TcpPtr tcp(ip); 2094 if (ip) { 2095 DPRINTF(Ethernet, 2096 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2097 ip->id()); 2098 if (tcp) { 2099 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2100 } 2101 } 2102#endif 2103 droppedPackets++; 2104 devIntrPost(ISR_RXORN); 2105 return false; 2106 } 2107 2108 rxFifo.push(packet); 2109 2110 rxKick(); 2111 return true; 2112} 2113 2114 2115void 2116NSGigE::resume() 2117{ 2118 SimObject::resume(); 2119 2120 // During drain we could have left the state machines in a waiting state and 2121 // they wouldn't get out until some other event occured to kick them. 2122 // This way they'll get out immediately 2123 txKick(); 2124 rxKick(); 2125} 2126 2127 2128//===================================================================== 2129// 2130// 2131void 2132NSGigE::serialize(ostream &os) 2133{ 2134 // Serialize the PciDev base class 2135 PciDev::serialize(os); 2136 2137 /* 2138 * Finalize any DMA events now. 2139 */ 2140 // @todo will mem system save pending dma? 2141 2142 /* 2143 * Serialize the device registers 2144 */ 2145 SERIALIZE_SCALAR(regs.command); 2146 SERIALIZE_SCALAR(regs.config); 2147 SERIALIZE_SCALAR(regs.mear); 2148 SERIALIZE_SCALAR(regs.ptscr); 2149 SERIALIZE_SCALAR(regs.isr); 2150 SERIALIZE_SCALAR(regs.imr); 2151 SERIALIZE_SCALAR(regs.ier); 2152 SERIALIZE_SCALAR(regs.ihr); 2153 SERIALIZE_SCALAR(regs.txdp); 2154 SERIALIZE_SCALAR(regs.txdp_hi); 2155 SERIALIZE_SCALAR(regs.txcfg); 2156 SERIALIZE_SCALAR(regs.gpior); 2157 SERIALIZE_SCALAR(regs.rxdp); 2158 SERIALIZE_SCALAR(regs.rxdp_hi); 2159 SERIALIZE_SCALAR(regs.rxcfg); 2160 SERIALIZE_SCALAR(regs.pqcr); 2161 SERIALIZE_SCALAR(regs.wcsr); 2162 SERIALIZE_SCALAR(regs.pcr); 2163 SERIALIZE_SCALAR(regs.rfcr); 2164 SERIALIZE_SCALAR(regs.rfdr); 2165 SERIALIZE_SCALAR(regs.brar); 2166 SERIALIZE_SCALAR(regs.brdr); 2167 SERIALIZE_SCALAR(regs.srr); 2168 SERIALIZE_SCALAR(regs.mibc); 2169 SERIALIZE_SCALAR(regs.vrcr); 2170 SERIALIZE_SCALAR(regs.vtcr); 2171 SERIALIZE_SCALAR(regs.vdr); 2172 SERIALIZE_SCALAR(regs.ccsr); 2173 SERIALIZE_SCALAR(regs.tbicr); 2174 SERIALIZE_SCALAR(regs.tbisr); 2175 SERIALIZE_SCALAR(regs.tanar); 2176 SERIALIZE_SCALAR(regs.tanlpar); 2177 SERIALIZE_SCALAR(regs.taner); 2178 SERIALIZE_SCALAR(regs.tesr); 2179 2180 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2181 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2182 2183 SERIALIZE_SCALAR(ioEnable); 2184 2185 /* 2186 * Serialize the data Fifos 2187 */ 2188 rxFifo.serialize("rxFifo", os); 2189 txFifo.serialize("txFifo", os); 2190 2191 /* 2192 * Serialize the various helper variables 2193 */ 2194 bool txPacketExists = txPacket; 2195 SERIALIZE_SCALAR(txPacketExists); 2196 if (txPacketExists) { 2197 txPacket->length = txPacketBufPtr - txPacket->data; 2198 txPacket->serialize("txPacket", os); 2199 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2200 SERIALIZE_SCALAR(txPktBufPtr); 2201 } 2202 2203 bool rxPacketExists = rxPacket; 2204 SERIALIZE_SCALAR(rxPacketExists); 2205 if (rxPacketExists) { 2206 rxPacket->serialize("rxPacket", os); 2207 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2208 SERIALIZE_SCALAR(rxPktBufPtr); 2209 } 2210 2211 SERIALIZE_SCALAR(txXferLen); 2212 SERIALIZE_SCALAR(rxXferLen); 2213 2214 /* 2215 * Serialize Cached Descriptors 2216 */ 2217 SERIALIZE_SCALAR(rxDesc64.link); 2218 SERIALIZE_SCALAR(rxDesc64.bufptr); 2219 SERIALIZE_SCALAR(rxDesc64.cmdsts); 2220 SERIALIZE_SCALAR(rxDesc64.extsts); 2221 SERIALIZE_SCALAR(txDesc64.link); 2222 SERIALIZE_SCALAR(txDesc64.bufptr); 2223 SERIALIZE_SCALAR(txDesc64.cmdsts); 2224 SERIALIZE_SCALAR(txDesc64.extsts); 2225 SERIALIZE_SCALAR(rxDesc32.link); 2226 SERIALIZE_SCALAR(rxDesc32.bufptr); 2227 SERIALIZE_SCALAR(rxDesc32.cmdsts); 2228 SERIALIZE_SCALAR(rxDesc32.extsts); 2229 SERIALIZE_SCALAR(txDesc32.link); 2230 SERIALIZE_SCALAR(txDesc32.bufptr); 2231 SERIALIZE_SCALAR(txDesc32.cmdsts); 2232 SERIALIZE_SCALAR(txDesc32.extsts); 2233 SERIALIZE_SCALAR(extstsEnable); 2234 2235 /* 2236 * Serialize tx state machine 2237 */ 2238 int txState = this->txState; 2239 SERIALIZE_SCALAR(txState); 2240 SERIALIZE_SCALAR(txEnable); 2241 SERIALIZE_SCALAR(CTDD); 2242 SERIALIZE_SCALAR(txFragPtr); 2243 SERIALIZE_SCALAR(txDescCnt); 2244 int txDmaState = this->txDmaState; 2245 SERIALIZE_SCALAR(txDmaState); 2246 SERIALIZE_SCALAR(txKickTick); 2247 2248 /* 2249 * Serialize rx state machine 2250 */ 2251 int rxState = this->rxState; 2252 SERIALIZE_SCALAR(rxState); 2253 SERIALIZE_SCALAR(rxEnable); 2254 SERIALIZE_SCALAR(CRDD); 2255 SERIALIZE_SCALAR(rxPktBytes); 2256 SERIALIZE_SCALAR(rxFragPtr); 2257 SERIALIZE_SCALAR(rxDescCnt); 2258 int rxDmaState = this->rxDmaState; 2259 SERIALIZE_SCALAR(rxDmaState); 2260 SERIALIZE_SCALAR(rxKickTick); 2261 2262 /* 2263 * Serialize EEPROM state machine 2264 */ 2265 int eepromState = this->eepromState; 2266 SERIALIZE_SCALAR(eepromState); 2267 SERIALIZE_SCALAR(eepromClk); 2268 SERIALIZE_SCALAR(eepromBitsToRx); 2269 SERIALIZE_SCALAR(eepromOpcode); 2270 SERIALIZE_SCALAR(eepromAddress); 2271 SERIALIZE_SCALAR(eepromData); 2272 2273 /* 2274 * If there's a pending transmit, store the time so we can 2275 * reschedule it later 2276 */ 2277 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0; 2278 SERIALIZE_SCALAR(transmitTick); 2279 2280 /* 2281 * receive address filter settings 2282 */ 2283 SERIALIZE_SCALAR(rxFilterEnable); 2284 SERIALIZE_SCALAR(acceptBroadcast); 2285 SERIALIZE_SCALAR(acceptMulticast); 2286 SERIALIZE_SCALAR(acceptUnicast); 2287 SERIALIZE_SCALAR(acceptPerfect); 2288 SERIALIZE_SCALAR(acceptArp); 2289 SERIALIZE_SCALAR(multicastHashEnable); 2290 2291 /* 2292 * Keep track of pending interrupt status. 2293 */ 2294 SERIALIZE_SCALAR(intrTick); 2295 SERIALIZE_SCALAR(cpuPendingIntr); 2296 Tick intrEventTick = 0; 2297 if (intrEvent) 2298 intrEventTick = intrEvent->when(); 2299 SERIALIZE_SCALAR(intrEventTick); 2300 2301} 2302 2303void 2304NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2305{ 2306 // Unserialize the PciDev base class 2307 PciDev::unserialize(cp, section); 2308 2309 UNSERIALIZE_SCALAR(regs.command); 2310 UNSERIALIZE_SCALAR(regs.config); 2311 UNSERIALIZE_SCALAR(regs.mear); 2312 UNSERIALIZE_SCALAR(regs.ptscr); 2313 UNSERIALIZE_SCALAR(regs.isr); 2314 UNSERIALIZE_SCALAR(regs.imr); 2315 UNSERIALIZE_SCALAR(regs.ier); 2316 UNSERIALIZE_SCALAR(regs.ihr); 2317 UNSERIALIZE_SCALAR(regs.txdp); 2318 UNSERIALIZE_SCALAR(regs.txdp_hi); 2319 UNSERIALIZE_SCALAR(regs.txcfg); 2320 UNSERIALIZE_SCALAR(regs.gpior); 2321 UNSERIALIZE_SCALAR(regs.rxdp); 2322 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2323 UNSERIALIZE_SCALAR(regs.rxcfg); 2324 UNSERIALIZE_SCALAR(regs.pqcr); 2325 UNSERIALIZE_SCALAR(regs.wcsr); 2326 UNSERIALIZE_SCALAR(regs.pcr); 2327 UNSERIALIZE_SCALAR(regs.rfcr); 2328 UNSERIALIZE_SCALAR(regs.rfdr); 2329 UNSERIALIZE_SCALAR(regs.brar); 2330 UNSERIALIZE_SCALAR(regs.brdr); 2331 UNSERIALIZE_SCALAR(regs.srr); 2332 UNSERIALIZE_SCALAR(regs.mibc); 2333 UNSERIALIZE_SCALAR(regs.vrcr); 2334 UNSERIALIZE_SCALAR(regs.vtcr); 2335 UNSERIALIZE_SCALAR(regs.vdr); 2336 UNSERIALIZE_SCALAR(regs.ccsr); 2337 UNSERIALIZE_SCALAR(regs.tbicr); 2338 UNSERIALIZE_SCALAR(regs.tbisr); 2339 UNSERIALIZE_SCALAR(regs.tanar); 2340 UNSERIALIZE_SCALAR(regs.tanlpar); 2341 UNSERIALIZE_SCALAR(regs.taner); 2342 UNSERIALIZE_SCALAR(regs.tesr); 2343 2344 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2345 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2346 2347 UNSERIALIZE_SCALAR(ioEnable); 2348 2349 /* 2350 * unserialize the data fifos 2351 */ 2352 rxFifo.unserialize("rxFifo", cp, section); 2353 txFifo.unserialize("txFifo", cp, section); 2354 2355 /* 2356 * unserialize the various helper variables 2357 */ 2358 bool txPacketExists; 2359 UNSERIALIZE_SCALAR(txPacketExists); 2360 if (txPacketExists) { 2361 txPacket = new EthPacketData(16384); 2362 txPacket->unserialize("txPacket", cp, section); 2363 uint32_t txPktBufPtr; 2364 UNSERIALIZE_SCALAR(txPktBufPtr); 2365 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2366 } else 2367 txPacket = 0; 2368 2369 bool rxPacketExists; 2370 UNSERIALIZE_SCALAR(rxPacketExists); 2371 rxPacket = 0; 2372 if (rxPacketExists) { 2373 rxPacket = new EthPacketData(16384); 2374 rxPacket->unserialize("rxPacket", cp, section); 2375 uint32_t rxPktBufPtr; 2376 UNSERIALIZE_SCALAR(rxPktBufPtr); 2377 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2378 } else 2379 rxPacket = 0; 2380 2381 UNSERIALIZE_SCALAR(txXferLen); 2382 UNSERIALIZE_SCALAR(rxXferLen); 2383 2384 /* 2385 * Unserialize Cached Descriptors 2386 */ 2387 UNSERIALIZE_SCALAR(rxDesc64.link); 2388 UNSERIALIZE_SCALAR(rxDesc64.bufptr); 2389 UNSERIALIZE_SCALAR(rxDesc64.cmdsts); 2390 UNSERIALIZE_SCALAR(rxDesc64.extsts); 2391 UNSERIALIZE_SCALAR(txDesc64.link); 2392 UNSERIALIZE_SCALAR(txDesc64.bufptr); 2393 UNSERIALIZE_SCALAR(txDesc64.cmdsts); 2394 UNSERIALIZE_SCALAR(txDesc64.extsts); 2395 UNSERIALIZE_SCALAR(rxDesc32.link); 2396 UNSERIALIZE_SCALAR(rxDesc32.bufptr); 2397 UNSERIALIZE_SCALAR(rxDesc32.cmdsts); 2398 UNSERIALIZE_SCALAR(rxDesc32.extsts); 2399 UNSERIALIZE_SCALAR(txDesc32.link); 2400 UNSERIALIZE_SCALAR(txDesc32.bufptr); 2401 UNSERIALIZE_SCALAR(txDesc32.cmdsts); 2402 UNSERIALIZE_SCALAR(txDesc32.extsts); 2403 UNSERIALIZE_SCALAR(extstsEnable); 2404 2405 /* 2406 * unserialize tx state machine 2407 */ 2408 int txState; 2409 UNSERIALIZE_SCALAR(txState); 2410 this->txState = (TxState) txState; 2411 UNSERIALIZE_SCALAR(txEnable); 2412 UNSERIALIZE_SCALAR(CTDD); 2413 UNSERIALIZE_SCALAR(txFragPtr); 2414 UNSERIALIZE_SCALAR(txDescCnt); 2415 int txDmaState; 2416 UNSERIALIZE_SCALAR(txDmaState); 2417 this->txDmaState = (DmaState) txDmaState; 2418 UNSERIALIZE_SCALAR(txKickTick); 2419 if (txKickTick) 2420 schedule(txKickEvent, txKickTick); 2421 2422 /* 2423 * unserialize rx state machine 2424 */ 2425 int rxState; 2426 UNSERIALIZE_SCALAR(rxState); 2427 this->rxState = (RxState) rxState; 2428 UNSERIALIZE_SCALAR(rxEnable); 2429 UNSERIALIZE_SCALAR(CRDD); 2430 UNSERIALIZE_SCALAR(rxPktBytes); 2431 UNSERIALIZE_SCALAR(rxFragPtr); 2432 UNSERIALIZE_SCALAR(rxDescCnt); 2433 int rxDmaState; 2434 UNSERIALIZE_SCALAR(rxDmaState); 2435 this->rxDmaState = (DmaState) rxDmaState; 2436 UNSERIALIZE_SCALAR(rxKickTick); 2437 if (rxKickTick) 2438 schedule(rxKickEvent, rxKickTick); 2439 2440 /* 2441 * Unserialize EEPROM state machine 2442 */ 2443 int eepromState; 2444 UNSERIALIZE_SCALAR(eepromState); 2445 this->eepromState = (EEPROMState) eepromState; 2446 UNSERIALIZE_SCALAR(eepromClk); 2447 UNSERIALIZE_SCALAR(eepromBitsToRx); 2448 UNSERIALIZE_SCALAR(eepromOpcode); 2449 UNSERIALIZE_SCALAR(eepromAddress); 2450 UNSERIALIZE_SCALAR(eepromData); 2451 2452 /* 2453 * If there's a pending transmit, reschedule it now 2454 */ 2455 Tick transmitTick; 2456 UNSERIALIZE_SCALAR(transmitTick); 2457 if (transmitTick) 2458 schedule(txEvent, curTick() + transmitTick); 2459 2460 /* 2461 * unserialize receive address filter settings 2462 */ 2463 UNSERIALIZE_SCALAR(rxFilterEnable); 2464 UNSERIALIZE_SCALAR(acceptBroadcast); 2465 UNSERIALIZE_SCALAR(acceptMulticast); 2466 UNSERIALIZE_SCALAR(acceptUnicast); 2467 UNSERIALIZE_SCALAR(acceptPerfect); 2468 UNSERIALIZE_SCALAR(acceptArp); 2469 UNSERIALIZE_SCALAR(multicastHashEnable); 2470 2471 /* 2472 * Keep track of pending interrupt status. 2473 */ 2474 UNSERIALIZE_SCALAR(intrTick); 2475 UNSERIALIZE_SCALAR(cpuPendingIntr); 2476 Tick intrEventTick; 2477 UNSERIALIZE_SCALAR(intrEventTick); 2478 if (intrEventTick) { 2479 intrEvent = new IntrEvent(this, true); 2480 schedule(intrEvent, intrEventTick); 2481 } 2482} 2483 2484NSGigE * 2485NSGigEParams::create() 2486{ 2487 return new NSGigE(this); 2488} 2489