ns_gige.cc revision 9339
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 * Lisa Hsu 30 */ 31 32/** @file 33 * Device module for modelling the National Semiconductor 34 * DP83820 ethernet controller. Does not support priority queueing 35 */ 36#include <deque> 37#include <string> 38 39#include "base/debug.hh" 40#include "base/inet.hh" 41#include "base/types.hh" 42#include "config/the_isa.hh" 43#include "cpu/thread_context.hh" 44#include "debug/EthernetAll.hh" 45#include "dev/etherlink.hh" 46#include "dev/ns_gige.hh" 47#include "dev/pciconfigall.hh" 48#include "mem/packet.hh" 49#include "mem/packet_access.hh" 50#include "params/NSGigE.hh" 51#include "sim/system.hh" 52 53// clang complains about std::set being overloaded with Packet::set if 54// we open up the entire namespace std 55using std::min; 56using std::ostream; 57using std::string; 58 59const char *NsRxStateStrings[] = 60{ 61 "rxIdle", 62 "rxDescRefr", 63 "rxDescRead", 64 "rxFifoBlock", 65 "rxFragWrite", 66 "rxDescWrite", 67 "rxAdvance" 68}; 69 70const char *NsTxStateStrings[] = 71{ 72 "txIdle", 73 "txDescRefr", 74 "txDescRead", 75 "txFifoBlock", 76 "txFragRead", 77 "txDescWrite", 78 "txAdvance" 79}; 80 81const char *NsDmaState[] = 82{ 83 "dmaIdle", 84 "dmaReading", 85 "dmaWriting", 86 "dmaReadWaiting", 87 "dmaWriteWaiting" 88}; 89 90using namespace Net; 91using namespace TheISA; 92 93/////////////////////////////////////////////////////////////////////// 94// 95// NSGigE PCI Device 96// 97NSGigE::NSGigE(Params *p) 98 : EtherDevBase(p), ioEnable(false), 99 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 100 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 101 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false), 102 txState(txIdle), txEnable(false), CTDD(false), txHalt(false), 103 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 104 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false), 105 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 106 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0), 107 eepromOpcode(0), eepromAddress(0), eepromData(0), 108 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay), 109 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor), 110 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0), 111 txDmaData(NULL), txDmaAddr(0), txDmaLen(0), 112 rxDmaReadEvent(this), rxDmaWriteEvent(this), 113 txDmaReadEvent(this), txDmaWriteEvent(this), 114 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 115 txDelay(p->tx_delay), rxDelay(p->rx_delay), 116 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this), 117 txEvent(this), rxFilterEnable(p->rx_filter), 118 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false), 119 acceptPerfect(false), acceptArp(false), multicastHashEnable(false), 120 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false), 121 intrEvent(0), interface(0) 122{ 123 124 125 interface = new NSGigEInt(name() + ".int0", this); 126 127 regsReset(); 128 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN); 129 130 memset(&rxDesc32, 0, sizeof(rxDesc32)); 131 memset(&txDesc32, 0, sizeof(txDesc32)); 132 memset(&rxDesc64, 0, sizeof(rxDesc64)); 133 memset(&txDesc64, 0, sizeof(txDesc64)); 134} 135 136NSGigE::~NSGigE() 137{ 138 delete interface; 139} 140 141/** 142 * This is to write to the PCI general configuration registers 143 */ 144Tick 145NSGigE::writeConfig(PacketPtr pkt) 146{ 147 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 148 if (offset < PCI_DEVICE_SPECIFIC) 149 PciDev::writeConfig(pkt); 150 else 151 panic("Device specific PCI config space not implemented!\n"); 152 153 switch (offset) { 154 // seems to work fine without all these PCI settings, but i 155 // put in the IO to double check, an assertion will fail if we 156 // need to properly implement it 157 case PCI_COMMAND: 158 if (config.data[offset] & PCI_CMD_IOSE) 159 ioEnable = true; 160 else 161 ioEnable = false; 162 break; 163 } 164 165 return configDelay; 166} 167 168EtherInt* 169NSGigE::getEthPort(const std::string &if_name, int idx) 170{ 171 if (if_name == "interface") { 172 if (interface->getPeer()) 173 panic("interface already connected to\n"); 174 return interface; 175 } 176 return NULL; 177} 178 179/** 180 * This reads the device registers, which are detailed in the NS83820 181 * spec sheet 182 */ 183Tick 184NSGigE::read(PacketPtr pkt) 185{ 186 assert(ioEnable); 187 188 pkt->allocate(); 189 190 //The mask is to give you only the offset into the device register file 191 Addr daddr = pkt->getAddr() & 0xfff; 192 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n", 193 daddr, pkt->getAddr(), pkt->getSize()); 194 195 196 // there are some reserved registers, you can see ns_gige_reg.h and 197 // the spec sheet for details 198 if (daddr > LAST && daddr <= RESERVED) { 199 panic("Accessing reserved register"); 200 } else if (daddr > RESERVED && daddr <= 0x3FC) { 201 return readConfig(pkt); 202 } else if (daddr >= MIB_START && daddr <= MIB_END) { 203 // don't implement all the MIB's. hopefully the kernel 204 // doesn't actually DEPEND upon their values 205 // MIB are just hardware stats keepers 206 pkt->set<uint32_t>(0); 207 pkt->makeAtomicResponse(); 208 return pioDelay; 209 } else if (daddr > 0x3FC) 210 panic("Something is messed up!\n"); 211 212 assert(pkt->getSize() == sizeof(uint32_t)); 213 uint32_t ® = *pkt->getPtr<uint32_t>(); 214 uint16_t rfaddr; 215 216 switch (daddr) { 217 case CR: 218 reg = regs.command; 219 //these are supposed to be cleared on a read 220 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 221 break; 222 223 case CFGR: 224 reg = regs.config; 225 break; 226 227 case MEAR: 228 reg = regs.mear; 229 break; 230 231 case PTSCR: 232 reg = regs.ptscr; 233 break; 234 235 case ISR: 236 reg = regs.isr; 237 devIntrClear(ISR_ALL); 238 break; 239 240 case IMR: 241 reg = regs.imr; 242 break; 243 244 case IER: 245 reg = regs.ier; 246 break; 247 248 case IHR: 249 reg = regs.ihr; 250 break; 251 252 case TXDP: 253 reg = regs.txdp; 254 break; 255 256 case TXDP_HI: 257 reg = regs.txdp_hi; 258 break; 259 260 case TX_CFG: 261 reg = regs.txcfg; 262 break; 263 264 case GPIOR: 265 reg = regs.gpior; 266 break; 267 268 case RXDP: 269 reg = regs.rxdp; 270 break; 271 272 case RXDP_HI: 273 reg = regs.rxdp_hi; 274 break; 275 276 case RX_CFG: 277 reg = regs.rxcfg; 278 break; 279 280 case PQCR: 281 reg = regs.pqcr; 282 break; 283 284 case WCSR: 285 reg = regs.wcsr; 286 break; 287 288 case PCR: 289 reg = regs.pcr; 290 break; 291 292 // see the spec sheet for how RFCR and RFDR work 293 // basically, you write to RFCR to tell the machine 294 // what you want to do next, then you act upon RFDR, 295 // and the device will be prepared b/c of what you 296 // wrote to RFCR 297 case RFCR: 298 reg = regs.rfcr; 299 break; 300 301 case RFDR: 302 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 303 switch (rfaddr) { 304 // Read from perfect match ROM octets 305 case 0x000: 306 reg = rom.perfectMatch[1]; 307 reg = reg << 8; 308 reg += rom.perfectMatch[0]; 309 break; 310 case 0x002: 311 reg = rom.perfectMatch[3] << 8; 312 reg += rom.perfectMatch[2]; 313 break; 314 case 0x004: 315 reg = rom.perfectMatch[5] << 8; 316 reg += rom.perfectMatch[4]; 317 break; 318 default: 319 // Read filter hash table 320 if (rfaddr >= FHASH_ADDR && 321 rfaddr < FHASH_ADDR + FHASH_SIZE) { 322 323 // Only word-aligned reads supported 324 if (rfaddr % 2) 325 panic("unaligned read from filter hash table!"); 326 327 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8; 328 reg += rom.filterHash[rfaddr - FHASH_ADDR]; 329 break; 330 } 331 332 panic("reading RFDR for something other than pattern" 333 " matching or hashing! %#x\n", rfaddr); 334 } 335 break; 336 337 case SRR: 338 reg = regs.srr; 339 break; 340 341 case MIBC: 342 reg = regs.mibc; 343 reg &= ~(MIBC_MIBS | MIBC_ACLR); 344 break; 345 346 case VRCR: 347 reg = regs.vrcr; 348 break; 349 350 case VTCR: 351 reg = regs.vtcr; 352 break; 353 354 case VDR: 355 reg = regs.vdr; 356 break; 357 358 case CCSR: 359 reg = regs.ccsr; 360 break; 361 362 case TBICR: 363 reg = regs.tbicr; 364 break; 365 366 case TBISR: 367 reg = regs.tbisr; 368 break; 369 370 case TANAR: 371 reg = regs.tanar; 372 break; 373 374 case TANLPAR: 375 reg = regs.tanlpar; 376 break; 377 378 case TANER: 379 reg = regs.taner; 380 break; 381 382 case TESR: 383 reg = regs.tesr; 384 break; 385 386 case M5REG: 387 reg = 0; 388 if (params()->rx_thread) 389 reg |= M5REG_RX_THREAD; 390 if (params()->tx_thread) 391 reg |= M5REG_TX_THREAD; 392 if (params()->rss) 393 reg |= M5REG_RSS; 394 break; 395 396 default: 397 panic("reading unimplemented register: addr=%#x", daddr); 398 } 399 400 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 401 daddr, reg, reg); 402 403 pkt->makeAtomicResponse(); 404 return pioDelay; 405} 406 407Tick 408NSGigE::write(PacketPtr pkt) 409{ 410 assert(ioEnable); 411 412 Addr daddr = pkt->getAddr() & 0xfff; 413 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n", 414 daddr, pkt->getAddr(), pkt->getSize()); 415 416 if (daddr > LAST && daddr <= RESERVED) { 417 panic("Accessing reserved register"); 418 } else if (daddr > RESERVED && daddr <= 0x3FC) { 419 return writeConfig(pkt); 420 } else if (daddr > 0x3FC) 421 panic("Something is messed up!\n"); 422 423 if (pkt->getSize() == sizeof(uint32_t)) { 424 uint32_t reg = pkt->get<uint32_t>(); 425 uint16_t rfaddr; 426 427 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 428 429 switch (daddr) { 430 case CR: 431 regs.command = reg; 432 if (reg & CR_TXD) { 433 txEnable = false; 434 } else if (reg & CR_TXE) { 435 txEnable = true; 436 437 // the kernel is enabling the transmit machine 438 if (txState == txIdle) 439 txKick(); 440 } 441 442 if (reg & CR_RXD) { 443 rxEnable = false; 444 } else if (reg & CR_RXE) { 445 rxEnable = true; 446 447 if (rxState == rxIdle) 448 rxKick(); 449 } 450 451 if (reg & CR_TXR) 452 txReset(); 453 454 if (reg & CR_RXR) 455 rxReset(); 456 457 if (reg & CR_SWI) 458 devIntrPost(ISR_SWI); 459 460 if (reg & CR_RST) { 461 txReset(); 462 rxReset(); 463 464 regsReset(); 465 } 466 break; 467 468 case CFGR: 469 if (reg & CFGR_LNKSTS || 470 reg & CFGR_SPDSTS || 471 reg & CFGR_DUPSTS || 472 reg & CFGR_RESERVED || 473 reg & CFGR_T64ADDR || 474 reg & CFGR_PCI64_DET) { 475 // First clear all writable bits 476 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 477 CFGR_RESERVED | CFGR_T64ADDR | 478 CFGR_PCI64_DET; 479 // Now set the appropriate writable bits 480 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 481 CFGR_RESERVED | CFGR_T64ADDR | 482 CFGR_PCI64_DET); 483 } 484 485// all these #if 0's are because i don't THINK the kernel needs to 486// have these implemented. if there is a problem relating to one of 487// these, you may need to add functionality in. 488 489// grouped together and #if 0'ed to avoid empty if body and make clang happy 490#if 0 491 if (reg & CFGR_TBI_EN) ; 492 if (reg & CFGR_MODE_1000) ; 493 494 if (reg & CFGR_PINT_DUPSTS || 495 reg & CFGR_PINT_LNKSTS || 496 reg & CFGR_PINT_SPDSTS) 497 ; 498 499 if (reg & CFGR_TMRTEST) ; 500 if (reg & CFGR_MRM_DIS) ; 501 if (reg & CFGR_MWI_DIS) ; 502 503 if (reg & CFGR_DATA64_EN) ; 504 if (reg & CFGR_M64ADDR) ; 505 if (reg & CFGR_PHY_RST) ; 506 if (reg & CFGR_PHY_DIS) ; 507 508 if (reg & CFGR_REQALG) ; 509 if (reg & CFGR_SB) ; 510 if (reg & CFGR_POW) ; 511 if (reg & CFGR_EXD) ; 512 if (reg & CFGR_PESEL) ; 513 if (reg & CFGR_BROM_DIS) ; 514 if (reg & CFGR_EXT_125) ; 515 if (reg & CFGR_BEM) ; 516 517 if (reg & CFGR_T64ADDR) ; 518 // panic("CFGR_T64ADDR is read only register!\n"); 519#endif 520 if (reg & CFGR_AUTO_1000) 521 panic("CFGR_AUTO_1000 not implemented!\n"); 522 523 if (reg & CFGR_PCI64_DET) 524 panic("CFGR_PCI64_DET is read only register!\n"); 525 526 if (reg & CFGR_EXTSTS_EN) 527 extstsEnable = true; 528 else 529 extstsEnable = false; 530 break; 531 532 case MEAR: 533 // Clear writable bits 534 regs.mear &= MEAR_EEDO; 535 // Set appropriate writable bits 536 regs.mear |= reg & ~MEAR_EEDO; 537 538 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address) 539 // even though it could get it through RFDR 540 if (reg & MEAR_EESEL) { 541 // Rising edge of clock 542 if (reg & MEAR_EECLK && !eepromClk) 543 eepromKick(); 544 } 545 else { 546 eepromState = eepromStart; 547 regs.mear &= ~MEAR_EEDI; 548 } 549 550 eepromClk = reg & MEAR_EECLK; 551 552 // since phy is completely faked, MEAR_MD* don't matter 553 554// grouped together and #if 0'ed to avoid empty if body and make clang happy 555#if 0 556 if (reg & MEAR_MDIO) ; 557 if (reg & MEAR_MDDIR) ; 558 if (reg & MEAR_MDC) ; 559#endif 560 break; 561 562 case PTSCR: 563 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 564 // these control BISTs for various parts of chip - we 565 // don't care or do just fake that the BIST is done 566 if (reg & PTSCR_RBIST_EN) 567 regs.ptscr |= PTSCR_RBIST_DONE; 568 if (reg & PTSCR_EEBIST_EN) 569 regs.ptscr &= ~PTSCR_EEBIST_EN; 570 if (reg & PTSCR_EELOAD_EN) 571 regs.ptscr &= ~PTSCR_EELOAD_EN; 572 break; 573 574 case ISR: /* writing to the ISR has no effect */ 575 panic("ISR is a read only register!\n"); 576 577 case IMR: 578 regs.imr = reg; 579 devIntrChangeMask(); 580 break; 581 582 case IER: 583 regs.ier = reg; 584 break; 585 586 case IHR: 587 regs.ihr = reg; 588 /* not going to implement real interrupt holdoff */ 589 break; 590 591 case TXDP: 592 regs.txdp = (reg & 0xFFFFFFFC); 593 assert(txState == txIdle); 594 CTDD = false; 595 break; 596 597 case TXDP_HI: 598 regs.txdp_hi = reg; 599 break; 600 601 case TX_CFG: 602 regs.txcfg = reg; 603#if 0 604 if (reg & TX_CFG_CSI) ; 605 if (reg & TX_CFG_HBI) ; 606 if (reg & TX_CFG_MLB) ; 607 if (reg & TX_CFG_ATP) ; 608 if (reg & TX_CFG_ECRETRY) { 609 /* 610 * this could easily be implemented, but considering 611 * the network is just a fake pipe, wouldn't make 612 * sense to do this 613 */ 614 } 615 616 if (reg & TX_CFG_BRST_DIS) ; 617#endif 618 619#if 0 620 /* we handle our own DMA, ignore the kernel's exhortations */ 621 if (reg & TX_CFG_MXDMA) ; 622#endif 623 624 // also, we currently don't care about fill/drain 625 // thresholds though this may change in the future with 626 // more realistic networks or a driver which changes it 627 // according to feedback 628 629 break; 630 631 case GPIOR: 632 // Only write writable bits 633 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 634 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN; 635 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 636 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN); 637 /* these just control general purpose i/o pins, don't matter */ 638 break; 639 640 case RXDP: 641 regs.rxdp = reg; 642 CRDD = false; 643 break; 644 645 case RXDP_HI: 646 regs.rxdp_hi = reg; 647 break; 648 649 case RX_CFG: 650 regs.rxcfg = reg; 651#if 0 652 if (reg & RX_CFG_AEP) ; 653 if (reg & RX_CFG_ARP) ; 654 if (reg & RX_CFG_STRIPCRC) ; 655 if (reg & RX_CFG_RX_RD) ; 656 if (reg & RX_CFG_ALP) ; 657 if (reg & RX_CFG_AIRL) ; 658 659 /* we handle our own DMA, ignore what kernel says about it */ 660 if (reg & RX_CFG_MXDMA) ; 661 662 //also, we currently don't care about fill/drain thresholds 663 //though this may change in the future with more realistic 664 //networks or a driver which changes it according to feedback 665 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ; 666#endif 667 break; 668 669 case PQCR: 670 /* there is no priority queueing used in the linux 2.6 driver */ 671 regs.pqcr = reg; 672 break; 673 674 case WCSR: 675 /* not going to implement wake on LAN */ 676 regs.wcsr = reg; 677 break; 678 679 case PCR: 680 /* not going to implement pause control */ 681 regs.pcr = reg; 682 break; 683 684 case RFCR: 685 regs.rfcr = reg; 686 687 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 688 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 689 acceptMulticast = (reg & RFCR_AAM) ? true : false; 690 acceptUnicast = (reg & RFCR_AAU) ? true : false; 691 acceptPerfect = (reg & RFCR_APM) ? true : false; 692 acceptArp = (reg & RFCR_AARP) ? true : false; 693 multicastHashEnable = (reg & RFCR_MHEN) ? true : false; 694 695#if 0 696 if (reg & RFCR_APAT) 697 panic("RFCR_APAT not implemented!\n"); 698#endif 699 if (reg & RFCR_UHEN) 700 panic("Unicast hash filtering not used by drivers!\n"); 701 702 if (reg & RFCR_ULM) 703 panic("RFCR_ULM not implemented!\n"); 704 705 break; 706 707 case RFDR: 708 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 709 switch (rfaddr) { 710 case 0x000: 711 rom.perfectMatch[0] = (uint8_t)reg; 712 rom.perfectMatch[1] = (uint8_t)(reg >> 8); 713 break; 714 case 0x002: 715 rom.perfectMatch[2] = (uint8_t)reg; 716 rom.perfectMatch[3] = (uint8_t)(reg >> 8); 717 break; 718 case 0x004: 719 rom.perfectMatch[4] = (uint8_t)reg; 720 rom.perfectMatch[5] = (uint8_t)(reg >> 8); 721 break; 722 default: 723 724 if (rfaddr >= FHASH_ADDR && 725 rfaddr < FHASH_ADDR + FHASH_SIZE) { 726 727 // Only word-aligned writes supported 728 if (rfaddr % 2) 729 panic("unaligned write to filter hash table!"); 730 731 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg; 732 rom.filterHash[rfaddr - FHASH_ADDR + 1] 733 = (uint8_t)(reg >> 8); 734 break; 735 } 736 panic("writing RFDR for something other than pattern matching\ 737 or hashing! %#x\n", rfaddr); 738 } 739 740 case BRAR: 741 regs.brar = reg; 742 break; 743 744 case BRDR: 745 panic("the driver never uses BRDR, something is wrong!\n"); 746 747 case SRR: 748 panic("SRR is read only register!\n"); 749 750 case MIBC: 751 panic("the driver never uses MIBC, something is wrong!\n"); 752 753 case VRCR: 754 regs.vrcr = reg; 755 break; 756 757 case VTCR: 758 regs.vtcr = reg; 759 break; 760 761 case VDR: 762 panic("the driver never uses VDR, something is wrong!\n"); 763 764 case CCSR: 765 /* not going to implement clockrun stuff */ 766 regs.ccsr = reg; 767 break; 768 769 case TBICR: 770 regs.tbicr = reg; 771 if (reg & TBICR_MR_LOOPBACK) 772 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 773 774 if (reg & TBICR_MR_AN_ENABLE) { 775 regs.tanlpar = regs.tanar; 776 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 777 } 778 779#if 0 780 if (reg & TBICR_MR_RESTART_AN) ; 781#endif 782 783 break; 784 785 case TBISR: 786 panic("TBISR is read only register!\n"); 787 788 case TANAR: 789 // Only write the writable bits 790 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED; 791 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED); 792 793 // Pause capability unimplemented 794#if 0 795 if (reg & TANAR_PS2) ; 796 if (reg & TANAR_PS1) ; 797#endif 798 799 break; 800 801 case TANLPAR: 802 panic("this should only be written to by the fake phy!\n"); 803 804 case TANER: 805 panic("TANER is read only register!\n"); 806 807 case TESR: 808 regs.tesr = reg; 809 break; 810 811 default: 812 panic("invalid register access daddr=%#x", daddr); 813 } 814 } else { 815 panic("Invalid Request Size"); 816 } 817 pkt->makeAtomicResponse(); 818 return pioDelay; 819} 820 821void 822NSGigE::devIntrPost(uint32_t interrupts) 823{ 824 if (interrupts & ISR_RESERVE) 825 panic("Cannot set a reserved interrupt"); 826 827 if (interrupts & ISR_NOIMPL) 828 warn("interrupt not implemented %#x\n", interrupts); 829 830 interrupts &= ISR_IMPL; 831 regs.isr |= interrupts; 832 833 if (interrupts & regs.imr) { 834 if (interrupts & ISR_SWI) { 835 totalSwi++; 836 } 837 if (interrupts & ISR_RXIDLE) { 838 totalRxIdle++; 839 } 840 if (interrupts & ISR_RXOK) { 841 totalRxOk++; 842 } 843 if (interrupts & ISR_RXDESC) { 844 totalRxDesc++; 845 } 846 if (interrupts & ISR_TXOK) { 847 totalTxOk++; 848 } 849 if (interrupts & ISR_TXIDLE) { 850 totalTxIdle++; 851 } 852 if (interrupts & ISR_TXDESC) { 853 totalTxDesc++; 854 } 855 if (interrupts & ISR_RXORN) { 856 totalRxOrn++; 857 } 858 } 859 860 DPRINTF(EthernetIntr, 861 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 862 interrupts, regs.isr, regs.imr); 863 864 if ((regs.isr & regs.imr)) { 865 Tick when = curTick(); 866 if ((regs.isr & regs.imr & ISR_NODELAY) == 0) 867 when += intrDelay; 868 postedInterrupts++; 869 cpuIntrPost(when); 870 } 871} 872 873/* writing this interrupt counting stats inside this means that this function 874 is now limited to being used to clear all interrupts upon the kernel 875 reading isr and servicing. just telling you in case you were thinking 876 of expanding use. 877*/ 878void 879NSGigE::devIntrClear(uint32_t interrupts) 880{ 881 if (interrupts & ISR_RESERVE) 882 panic("Cannot clear a reserved interrupt"); 883 884 if (regs.isr & regs.imr & ISR_SWI) { 885 postedSwi++; 886 } 887 if (regs.isr & regs.imr & ISR_RXIDLE) { 888 postedRxIdle++; 889 } 890 if (regs.isr & regs.imr & ISR_RXOK) { 891 postedRxOk++; 892 } 893 if (regs.isr & regs.imr & ISR_RXDESC) { 894 postedRxDesc++; 895 } 896 if (regs.isr & regs.imr & ISR_TXOK) { 897 postedTxOk++; 898 } 899 if (regs.isr & regs.imr & ISR_TXIDLE) { 900 postedTxIdle++; 901 } 902 if (regs.isr & regs.imr & ISR_TXDESC) { 903 postedTxDesc++; 904 } 905 if (regs.isr & regs.imr & ISR_RXORN) { 906 postedRxOrn++; 907 } 908 909 interrupts &= ~ISR_NOIMPL; 910 regs.isr &= ~interrupts; 911 912 DPRINTF(EthernetIntr, 913 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 914 interrupts, regs.isr, regs.imr); 915 916 if (!(regs.isr & regs.imr)) 917 cpuIntrClear(); 918} 919 920void 921NSGigE::devIntrChangeMask() 922{ 923 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 924 regs.isr, regs.imr, regs.isr & regs.imr); 925 926 if (regs.isr & regs.imr) 927 cpuIntrPost(curTick()); 928 else 929 cpuIntrClear(); 930} 931 932void 933NSGigE::cpuIntrPost(Tick when) 934{ 935 // If the interrupt you want to post is later than an interrupt 936 // already scheduled, just let it post in the coming one and don't 937 // schedule another. 938 // HOWEVER, must be sure that the scheduled intrTick is in the 939 // future (this was formerly the source of a bug) 940 /** 941 * @todo this warning should be removed and the intrTick code should 942 * be fixed. 943 */ 944 assert(when >= curTick()); 945 assert(intrTick >= curTick() || intrTick == 0); 946 if (when > intrTick && intrTick != 0) { 947 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 948 intrTick); 949 return; 950 } 951 952 intrTick = when; 953 if (intrTick < curTick()) { 954 Debug::breakpoint(); 955 intrTick = curTick(); 956 } 957 958 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 959 intrTick); 960 961 if (intrEvent) 962 intrEvent->squash(); 963 intrEvent = new IntrEvent(this, true); 964 schedule(intrEvent, intrTick); 965} 966 967void 968NSGigE::cpuInterrupt() 969{ 970 assert(intrTick == curTick()); 971 972 // Whether or not there's a pending interrupt, we don't care about 973 // it anymore 974 intrEvent = 0; 975 intrTick = 0; 976 977 // Don't send an interrupt if there's already one 978 if (cpuPendingIntr) { 979 DPRINTF(EthernetIntr, 980 "would send an interrupt now, but there's already pending\n"); 981 } else { 982 // Send interrupt 983 cpuPendingIntr = true; 984 985 DPRINTF(EthernetIntr, "posting interrupt\n"); 986 intrPost(); 987 } 988} 989 990void 991NSGigE::cpuIntrClear() 992{ 993 if (!cpuPendingIntr) 994 return; 995 996 if (intrEvent) { 997 intrEvent->squash(); 998 intrEvent = 0; 999 } 1000 1001 intrTick = 0; 1002 1003 cpuPendingIntr = false; 1004 1005 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1006 intrClear(); 1007} 1008 1009bool 1010NSGigE::cpuIntrPending() const 1011{ return cpuPendingIntr; } 1012 1013void 1014NSGigE::txReset() 1015{ 1016 1017 DPRINTF(Ethernet, "transmit reset\n"); 1018 1019 CTDD = false; 1020 txEnable = false;; 1021 txFragPtr = 0; 1022 assert(txDescCnt == 0); 1023 txFifo.clear(); 1024 txState = txIdle; 1025 assert(txDmaState == dmaIdle); 1026} 1027 1028void 1029NSGigE::rxReset() 1030{ 1031 DPRINTF(Ethernet, "receive reset\n"); 1032 1033 CRDD = false; 1034 assert(rxPktBytes == 0); 1035 rxEnable = false; 1036 rxFragPtr = 0; 1037 assert(rxDescCnt == 0); 1038 assert(rxDmaState == dmaIdle); 1039 rxFifo.clear(); 1040 rxState = rxIdle; 1041} 1042 1043void 1044NSGigE::regsReset() 1045{ 1046 memset(®s, 0, sizeof(regs)); 1047 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000); 1048 regs.mear = 0x12; 1049 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1050 // fill threshold to 32 bytes 1051 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1052 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1053 regs.mibc = MIBC_FRZ; 1054 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1055 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1056 regs.brar = 0xffffffff; 1057 1058 extstsEnable = false; 1059 acceptBroadcast = false; 1060 acceptMulticast = false; 1061 acceptUnicast = false; 1062 acceptPerfect = false; 1063 acceptArp = false; 1064} 1065 1066bool 1067NSGigE::doRxDmaRead() 1068{ 1069 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1070 rxDmaState = dmaReading; 1071 1072 if (dmaPending() || getState() != Running) 1073 rxDmaState = dmaReadWaiting; 1074 else 1075 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData); 1076 1077 return true; 1078} 1079 1080void 1081NSGigE::rxDmaReadDone() 1082{ 1083 assert(rxDmaState == dmaReading); 1084 rxDmaState = dmaIdle; 1085 1086 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1087 rxDmaAddr, rxDmaLen); 1088 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1089 1090 // If the transmit state machine has a pending DMA, let it go first 1091 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1092 txKick(); 1093 1094 rxKick(); 1095} 1096 1097bool 1098NSGigE::doRxDmaWrite() 1099{ 1100 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1101 rxDmaState = dmaWriting; 1102 1103 if (dmaPending() || getState() != Running) 1104 rxDmaState = dmaWriteWaiting; 1105 else 1106 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData); 1107 return true; 1108} 1109 1110void 1111NSGigE::rxDmaWriteDone() 1112{ 1113 assert(rxDmaState == dmaWriting); 1114 rxDmaState = dmaIdle; 1115 1116 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1117 rxDmaAddr, rxDmaLen); 1118 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1119 1120 // If the transmit state machine has a pending DMA, let it go first 1121 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1122 txKick(); 1123 1124 rxKick(); 1125} 1126 1127void 1128NSGigE::rxKick() 1129{ 1130 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1131 1132 DPRINTF(EthernetSM, 1133 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n", 1134 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32); 1135 1136 Addr link, bufptr; 1137 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts; 1138 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts; 1139 1140 next: 1141 if (clock) { 1142 if (rxKickTick > curTick()) { 1143 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1144 rxKickTick); 1145 1146 goto exit; 1147 } 1148 1149 // Go to the next state machine clock tick. 1150 rxKickTick = curTick() + clockPeriod(); 1151 } 1152 1153 switch(rxDmaState) { 1154 case dmaReadWaiting: 1155 if (doRxDmaRead()) 1156 goto exit; 1157 break; 1158 case dmaWriteWaiting: 1159 if (doRxDmaWrite()) 1160 goto exit; 1161 break; 1162 default: 1163 break; 1164 } 1165 1166 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link; 1167 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr; 1168 1169 // see state machine from spec for details 1170 // the way this works is, if you finish work on one state and can 1171 // go directly to another, you do that through jumping to the 1172 // label "next". however, if you have intermediate work, like DMA 1173 // so that you can't go to the next state yet, you go to exit and 1174 // exit the loop. however, when the DMA is done it will trigger 1175 // an event and come back to this loop. 1176 switch (rxState) { 1177 case rxIdle: 1178 if (!rxEnable) { 1179 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1180 goto exit; 1181 } 1182 1183 if (CRDD) { 1184 rxState = rxDescRefr; 1185 1186 rxDmaAddr = regs.rxdp & 0x3fffffff; 1187 rxDmaData = 1188 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link; 1189 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link); 1190 rxDmaFree = dmaDescFree; 1191 1192 descDmaReads++; 1193 descDmaRdBytes += rxDmaLen; 1194 1195 if (doRxDmaRead()) 1196 goto exit; 1197 } else { 1198 rxState = rxDescRead; 1199 1200 rxDmaAddr = regs.rxdp & 0x3fffffff; 1201 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1202 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1203 rxDmaFree = dmaDescFree; 1204 1205 descDmaReads++; 1206 descDmaRdBytes += rxDmaLen; 1207 1208 if (doRxDmaRead()) 1209 goto exit; 1210 } 1211 break; 1212 1213 case rxDescRefr: 1214 if (rxDmaState != dmaIdle) 1215 goto exit; 1216 1217 rxState = rxAdvance; 1218 break; 1219 1220 case rxDescRead: 1221 if (rxDmaState != dmaIdle) 1222 goto exit; 1223 1224 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n", 1225 regs.rxdp & 0x3fffffff); 1226 DPRINTF(EthernetDesc, 1227 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1228 link, bufptr, cmdsts, extsts); 1229 1230 if (cmdsts & CMDSTS_OWN) { 1231 devIntrPost(ISR_RXIDLE); 1232 rxState = rxIdle; 1233 goto exit; 1234 } else { 1235 rxState = rxFifoBlock; 1236 rxFragPtr = bufptr; 1237 rxDescCnt = cmdsts & CMDSTS_LEN_MASK; 1238 } 1239 break; 1240 1241 case rxFifoBlock: 1242 if (!rxPacket) { 1243 /** 1244 * @todo in reality, we should be able to start processing 1245 * the packet as it arrives, and not have to wait for the 1246 * full packet ot be in the receive fifo. 1247 */ 1248 if (rxFifo.empty()) 1249 goto exit; 1250 1251 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1252 1253 // If we don't have a packet, grab a new one from the fifo. 1254 rxPacket = rxFifo.front(); 1255 rxPktBytes = rxPacket->length; 1256 rxPacketBufPtr = rxPacket->data; 1257 1258#if TRACING_ON 1259 if (DTRACE(Ethernet)) { 1260 IpPtr ip(rxPacket); 1261 if (ip) { 1262 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1263 TcpPtr tcp(ip); 1264 if (tcp) { 1265 DPRINTF(Ethernet, 1266 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1267 tcp->sport(), tcp->dport(), tcp->seq(), 1268 tcp->ack()); 1269 } 1270 } 1271 } 1272#endif 1273 1274 // sanity check - i think the driver behaves like this 1275 assert(rxDescCnt >= rxPktBytes); 1276 rxFifo.pop(); 1277 } 1278 1279 1280 // dont' need the && rxDescCnt > 0 if driver sanity check 1281 // above holds 1282 if (rxPktBytes > 0) { 1283 rxState = rxFragWrite; 1284 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1285 // check holds 1286 rxXferLen = rxPktBytes; 1287 1288 rxDmaAddr = rxFragPtr & 0x3fffffff; 1289 rxDmaData = rxPacketBufPtr; 1290 rxDmaLen = rxXferLen; 1291 rxDmaFree = dmaDataFree; 1292 1293 if (doRxDmaWrite()) 1294 goto exit; 1295 1296 } else { 1297 rxState = rxDescWrite; 1298 1299 //if (rxPktBytes == 0) { /* packet is done */ 1300 assert(rxPktBytes == 0); 1301 DPRINTF(EthernetSM, "done with receiving packet\n"); 1302 1303 cmdsts |= CMDSTS_OWN; 1304 cmdsts &= ~CMDSTS_MORE; 1305 cmdsts |= CMDSTS_OK; 1306 cmdsts &= 0xffff0000; 1307 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1308 1309#if 0 1310 /* 1311 * all the driver uses these are for its own stats keeping 1312 * which we don't care about, aren't necessary for 1313 * functionality and doing this would just slow us down. 1314 * if they end up using this in a later version for 1315 * functional purposes, just undef 1316 */ 1317 if (rxFilterEnable) { 1318 cmdsts &= ~CMDSTS_DEST_MASK; 1319 const EthAddr &dst = rxFifoFront()->dst(); 1320 if (dst->unicast()) 1321 cmdsts |= CMDSTS_DEST_SELF; 1322 if (dst->multicast()) 1323 cmdsts |= CMDSTS_DEST_MULTI; 1324 if (dst->broadcast()) 1325 cmdsts |= CMDSTS_DEST_MASK; 1326 } 1327#endif 1328 1329 IpPtr ip(rxPacket); 1330 if (extstsEnable && ip) { 1331 extsts |= EXTSTS_IPPKT; 1332 rxIpChecksums++; 1333 if (cksum(ip) != 0) { 1334 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1335 extsts |= EXTSTS_IPERR; 1336 } 1337 TcpPtr tcp(ip); 1338 UdpPtr udp(ip); 1339 if (tcp) { 1340 extsts |= EXTSTS_TCPPKT; 1341 rxTcpChecksums++; 1342 if (cksum(tcp) != 0) { 1343 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1344 extsts |= EXTSTS_TCPERR; 1345 1346 } 1347 } else if (udp) { 1348 extsts |= EXTSTS_UDPPKT; 1349 rxUdpChecksums++; 1350 if (cksum(udp) != 0) { 1351 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1352 extsts |= EXTSTS_UDPERR; 1353 } 1354 } 1355 } 1356 rxPacket = 0; 1357 1358 /* 1359 * the driver seems to always receive into desc buffers 1360 * of size 1514, so you never have a pkt that is split 1361 * into multiple descriptors on the receive side, so 1362 * i don't implement that case, hence the assert above. 1363 */ 1364 1365 DPRINTF(EthernetDesc, 1366 "rxDesc: addr=%08x writeback cmdsts extsts\n", 1367 regs.rxdp & 0x3fffffff); 1368 DPRINTF(EthernetDesc, 1369 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1370 link, bufptr, cmdsts, extsts); 1371 1372 rxDmaAddr = regs.rxdp & 0x3fffffff; 1373 rxDmaData = &cmdsts; 1374 if (is64bit) { 1375 rxDmaAddr += offsetof(ns_desc64, cmdsts); 1376 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts); 1377 } else { 1378 rxDmaAddr += offsetof(ns_desc32, cmdsts); 1379 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts); 1380 } 1381 rxDmaFree = dmaDescFree; 1382 1383 descDmaWrites++; 1384 descDmaWrBytes += rxDmaLen; 1385 1386 if (doRxDmaWrite()) 1387 goto exit; 1388 } 1389 break; 1390 1391 case rxFragWrite: 1392 if (rxDmaState != dmaIdle) 1393 goto exit; 1394 1395 rxPacketBufPtr += rxXferLen; 1396 rxFragPtr += rxXferLen; 1397 rxPktBytes -= rxXferLen; 1398 1399 rxState = rxFifoBlock; 1400 break; 1401 1402 case rxDescWrite: 1403 if (rxDmaState != dmaIdle) 1404 goto exit; 1405 1406 assert(cmdsts & CMDSTS_OWN); 1407 1408 assert(rxPacket == 0); 1409 devIntrPost(ISR_RXOK); 1410 1411 if (cmdsts & CMDSTS_INTR) 1412 devIntrPost(ISR_RXDESC); 1413 1414 if (!rxEnable) { 1415 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1416 rxState = rxIdle; 1417 goto exit; 1418 } else 1419 rxState = rxAdvance; 1420 break; 1421 1422 case rxAdvance: 1423 if (link == 0) { 1424 devIntrPost(ISR_RXIDLE); 1425 rxState = rxIdle; 1426 CRDD = true; 1427 goto exit; 1428 } else { 1429 if (rxDmaState != dmaIdle) 1430 goto exit; 1431 rxState = rxDescRead; 1432 regs.rxdp = link; 1433 CRDD = false; 1434 1435 rxDmaAddr = regs.rxdp & 0x3fffffff; 1436 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1437 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1438 rxDmaFree = dmaDescFree; 1439 1440 if (doRxDmaRead()) 1441 goto exit; 1442 } 1443 break; 1444 1445 default: 1446 panic("Invalid rxState!"); 1447 } 1448 1449 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1450 NsRxStateStrings[rxState]); 1451 goto next; 1452 1453 exit: 1454 /** 1455 * @todo do we want to schedule a future kick? 1456 */ 1457 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1458 NsRxStateStrings[rxState]); 1459 1460 if (clock && !rxKickEvent.scheduled()) 1461 schedule(rxKickEvent, rxKickTick); 1462} 1463 1464void 1465NSGigE::transmit() 1466{ 1467 if (txFifo.empty()) { 1468 DPRINTF(Ethernet, "nothing to transmit\n"); 1469 return; 1470 } 1471 1472 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1473 txFifo.size()); 1474 if (interface->sendPacket(txFifo.front())) { 1475#if TRACING_ON 1476 if (DTRACE(Ethernet)) { 1477 IpPtr ip(txFifo.front()); 1478 if (ip) { 1479 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1480 TcpPtr tcp(ip); 1481 if (tcp) { 1482 DPRINTF(Ethernet, 1483 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1484 tcp->sport(), tcp->dport(), tcp->seq(), 1485 tcp->ack()); 1486 } 1487 } 1488 } 1489#endif 1490 1491 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1492 txBytes += txFifo.front()->length; 1493 txPackets++; 1494 1495 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1496 txFifo.avail()); 1497 txFifo.pop(); 1498 1499 /* 1500 * normally do a writeback of the descriptor here, and ONLY 1501 * after that is done, send this interrupt. but since our 1502 * stuff never actually fails, just do this interrupt here, 1503 * otherwise the code has to stray from this nice format. 1504 * besides, it's functionally the same. 1505 */ 1506 devIntrPost(ISR_TXOK); 1507 } 1508 1509 if (!txFifo.empty() && !txEvent.scheduled()) { 1510 DPRINTF(Ethernet, "reschedule transmit\n"); 1511 schedule(txEvent, curTick() + retryTime); 1512 } 1513} 1514 1515bool 1516NSGigE::doTxDmaRead() 1517{ 1518 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1519 txDmaState = dmaReading; 1520 1521 if (dmaPending() || getState() != Running) 1522 txDmaState = dmaReadWaiting; 1523 else 1524 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData); 1525 1526 return true; 1527} 1528 1529void 1530NSGigE::txDmaReadDone() 1531{ 1532 assert(txDmaState == dmaReading); 1533 txDmaState = dmaIdle; 1534 1535 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1536 txDmaAddr, txDmaLen); 1537 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1538 1539 // If the receive state machine has a pending DMA, let it go first 1540 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1541 rxKick(); 1542 1543 txKick(); 1544} 1545 1546bool 1547NSGigE::doTxDmaWrite() 1548{ 1549 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1550 txDmaState = dmaWriting; 1551 1552 if (dmaPending() || getState() != Running) 1553 txDmaState = dmaWriteWaiting; 1554 else 1555 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData); 1556 return true; 1557} 1558 1559void 1560NSGigE::txDmaWriteDone() 1561{ 1562 assert(txDmaState == dmaWriting); 1563 txDmaState = dmaIdle; 1564 1565 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1566 txDmaAddr, txDmaLen); 1567 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1568 1569 // If the receive state machine has a pending DMA, let it go first 1570 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1571 rxKick(); 1572 1573 txKick(); 1574} 1575 1576void 1577NSGigE::txKick() 1578{ 1579 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1580 1581 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n", 1582 NsTxStateStrings[txState], is64bit ? 64 : 32); 1583 1584 Addr link, bufptr; 1585 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts; 1586 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts; 1587 1588 next: 1589 if (clock) { 1590 if (txKickTick > curTick()) { 1591 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1592 txKickTick); 1593 goto exit; 1594 } 1595 1596 // Go to the next state machine clock tick. 1597 txKickTick = curTick() + clockPeriod(); 1598 } 1599 1600 switch(txDmaState) { 1601 case dmaReadWaiting: 1602 if (doTxDmaRead()) 1603 goto exit; 1604 break; 1605 case dmaWriteWaiting: 1606 if (doTxDmaWrite()) 1607 goto exit; 1608 break; 1609 default: 1610 break; 1611 } 1612 1613 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link; 1614 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr; 1615 switch (txState) { 1616 case txIdle: 1617 if (!txEnable) { 1618 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1619 goto exit; 1620 } 1621 1622 if (CTDD) { 1623 txState = txDescRefr; 1624 1625 txDmaAddr = regs.txdp & 0x3fffffff; 1626 txDmaData = 1627 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link; 1628 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link); 1629 txDmaFree = dmaDescFree; 1630 1631 descDmaReads++; 1632 descDmaRdBytes += txDmaLen; 1633 1634 if (doTxDmaRead()) 1635 goto exit; 1636 1637 } else { 1638 txState = txDescRead; 1639 1640 txDmaAddr = regs.txdp & 0x3fffffff; 1641 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1642 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1643 txDmaFree = dmaDescFree; 1644 1645 descDmaReads++; 1646 descDmaRdBytes += txDmaLen; 1647 1648 if (doTxDmaRead()) 1649 goto exit; 1650 } 1651 break; 1652 1653 case txDescRefr: 1654 if (txDmaState != dmaIdle) 1655 goto exit; 1656 1657 txState = txAdvance; 1658 break; 1659 1660 case txDescRead: 1661 if (txDmaState != dmaIdle) 1662 goto exit; 1663 1664 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n", 1665 regs.txdp & 0x3fffffff); 1666 DPRINTF(EthernetDesc, 1667 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n", 1668 link, bufptr, cmdsts, extsts); 1669 1670 if (cmdsts & CMDSTS_OWN) { 1671 txState = txFifoBlock; 1672 txFragPtr = bufptr; 1673 txDescCnt = cmdsts & CMDSTS_LEN_MASK; 1674 } else { 1675 devIntrPost(ISR_TXIDLE); 1676 txState = txIdle; 1677 goto exit; 1678 } 1679 break; 1680 1681 case txFifoBlock: 1682 if (!txPacket) { 1683 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 1684 txPacket = new EthPacketData(16384); 1685 txPacketBufPtr = txPacket->data; 1686 } 1687 1688 if (txDescCnt == 0) { 1689 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 1690 if (cmdsts & CMDSTS_MORE) { 1691 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 1692 txState = txDescWrite; 1693 1694 cmdsts &= ~CMDSTS_OWN; 1695 1696 txDmaAddr = regs.txdp & 0x3fffffff; 1697 txDmaData = &cmdsts; 1698 if (is64bit) { 1699 txDmaAddr += offsetof(ns_desc64, cmdsts); 1700 txDmaLen = sizeof(txDesc64.cmdsts); 1701 } else { 1702 txDmaAddr += offsetof(ns_desc32, cmdsts); 1703 txDmaLen = sizeof(txDesc32.cmdsts); 1704 } 1705 txDmaFree = dmaDescFree; 1706 1707 if (doTxDmaWrite()) 1708 goto exit; 1709 1710 } else { /* this packet is totally done */ 1711 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 1712 /* deal with the the packet that just finished */ 1713 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 1714 IpPtr ip(txPacket); 1715 if (extsts & EXTSTS_UDPPKT) { 1716 UdpPtr udp(ip); 1717 if (udp) { 1718 udp->sum(0); 1719 udp->sum(cksum(udp)); 1720 txUdpChecksums++; 1721 } else { 1722 Debug::breakpoint(); 1723 warn_once("UDPPKT set, but not UDP!\n"); 1724 } 1725 } else if (extsts & EXTSTS_TCPPKT) { 1726 TcpPtr tcp(ip); 1727 if (tcp) { 1728 tcp->sum(0); 1729 tcp->sum(cksum(tcp)); 1730 txTcpChecksums++; 1731 } else { 1732 Debug::breakpoint(); 1733 warn_once("TCPPKT set, but not UDP!\n"); 1734 } 1735 } 1736 if (extsts & EXTSTS_IPPKT) { 1737 if (ip) { 1738 ip->sum(0); 1739 ip->sum(cksum(ip)); 1740 txIpChecksums++; 1741 } else { 1742 Debug::breakpoint(); 1743 warn_once("IPPKT set, but not UDP!\n"); 1744 } 1745 } 1746 } 1747 1748 txPacket->length = txPacketBufPtr - txPacket->data; 1749 // this is just because the receive can't handle a 1750 // packet bigger want to make sure 1751 if (txPacket->length > 1514) 1752 panic("transmit packet too large, %s > 1514\n", 1753 txPacket->length); 1754 1755#ifndef NDEBUG 1756 bool success = 1757#endif 1758 txFifo.push(txPacket); 1759 assert(success); 1760 1761 /* 1762 * this following section is not tqo spec, but 1763 * functionally shouldn't be any different. normally, 1764 * the chip will wait til the transmit has occurred 1765 * before writing back the descriptor because it has 1766 * to wait to see that it was successfully transmitted 1767 * to decide whether to set CMDSTS_OK or not. 1768 * however, in the simulator since it is always 1769 * successfully transmitted, and writing it exactly to 1770 * spec would complicate the code, we just do it here 1771 */ 1772 1773 cmdsts &= ~CMDSTS_OWN; 1774 cmdsts |= CMDSTS_OK; 1775 1776 DPRINTF(EthernetDesc, 1777 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 1778 cmdsts, extsts); 1779 1780 txDmaFree = dmaDescFree; 1781 txDmaAddr = regs.txdp & 0x3fffffff; 1782 txDmaData = &cmdsts; 1783 if (is64bit) { 1784 txDmaAddr += offsetof(ns_desc64, cmdsts); 1785 txDmaLen = 1786 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts); 1787 } else { 1788 txDmaAddr += offsetof(ns_desc32, cmdsts); 1789 txDmaLen = 1790 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts); 1791 } 1792 1793 descDmaWrites++; 1794 descDmaWrBytes += txDmaLen; 1795 1796 transmit(); 1797 txPacket = 0; 1798 1799 if (!txEnable) { 1800 DPRINTF(EthernetSM, "halting TX state machine\n"); 1801 txState = txIdle; 1802 goto exit; 1803 } else 1804 txState = txAdvance; 1805 1806 if (doTxDmaWrite()) 1807 goto exit; 1808 } 1809 } else { 1810 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 1811 if (!txFifo.full()) { 1812 txState = txFragRead; 1813 1814 /* 1815 * The number of bytes transferred is either whatever 1816 * is left in the descriptor (txDescCnt), or if there 1817 * is not enough room in the fifo, just whatever room 1818 * is left in the fifo 1819 */ 1820 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 1821 1822 txDmaAddr = txFragPtr & 0x3fffffff; 1823 txDmaData = txPacketBufPtr; 1824 txDmaLen = txXferLen; 1825 txDmaFree = dmaDataFree; 1826 1827 if (doTxDmaRead()) 1828 goto exit; 1829 } else { 1830 txState = txFifoBlock; 1831 transmit(); 1832 1833 goto exit; 1834 } 1835 1836 } 1837 break; 1838 1839 case txFragRead: 1840 if (txDmaState != dmaIdle) 1841 goto exit; 1842 1843 txPacketBufPtr += txXferLen; 1844 txFragPtr += txXferLen; 1845 txDescCnt -= txXferLen; 1846 txFifo.reserve(txXferLen); 1847 1848 txState = txFifoBlock; 1849 break; 1850 1851 case txDescWrite: 1852 if (txDmaState != dmaIdle) 1853 goto exit; 1854 1855 if (cmdsts & CMDSTS_INTR) 1856 devIntrPost(ISR_TXDESC); 1857 1858 if (!txEnable) { 1859 DPRINTF(EthernetSM, "halting TX state machine\n"); 1860 txState = txIdle; 1861 goto exit; 1862 } else 1863 txState = txAdvance; 1864 break; 1865 1866 case txAdvance: 1867 if (link == 0) { 1868 devIntrPost(ISR_TXIDLE); 1869 txState = txIdle; 1870 goto exit; 1871 } else { 1872 if (txDmaState != dmaIdle) 1873 goto exit; 1874 txState = txDescRead; 1875 regs.txdp = link; 1876 CTDD = false; 1877 1878 txDmaAddr = link & 0x3fffffff; 1879 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1880 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1881 txDmaFree = dmaDescFree; 1882 1883 if (doTxDmaRead()) 1884 goto exit; 1885 } 1886 break; 1887 1888 default: 1889 panic("invalid state"); 1890 } 1891 1892 DPRINTF(EthernetSM, "entering next txState=%s\n", 1893 NsTxStateStrings[txState]); 1894 goto next; 1895 1896 exit: 1897 /** 1898 * @todo do we want to schedule a future kick? 1899 */ 1900 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 1901 NsTxStateStrings[txState]); 1902 1903 if (clock && !txKickEvent.scheduled()) 1904 schedule(txKickEvent, txKickTick); 1905} 1906 1907/** 1908 * Advance the EEPROM state machine 1909 * Called on rising edge of EEPROM clock bit in MEAR 1910 */ 1911void 1912NSGigE::eepromKick() 1913{ 1914 switch (eepromState) { 1915 1916 case eepromStart: 1917 1918 // Wait for start bit 1919 if (regs.mear & MEAR_EEDI) { 1920 // Set up to get 2 opcode bits 1921 eepromState = eepromGetOpcode; 1922 eepromBitsToRx = 2; 1923 eepromOpcode = 0; 1924 } 1925 break; 1926 1927 case eepromGetOpcode: 1928 eepromOpcode <<= 1; 1929 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0; 1930 --eepromBitsToRx; 1931 1932 // Done getting opcode 1933 if (eepromBitsToRx == 0) { 1934 if (eepromOpcode != EEPROM_READ) 1935 panic("only EEPROM reads are implemented!"); 1936 1937 // Set up to get address 1938 eepromState = eepromGetAddress; 1939 eepromBitsToRx = 6; 1940 eepromAddress = 0; 1941 } 1942 break; 1943 1944 case eepromGetAddress: 1945 eepromAddress <<= 1; 1946 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0; 1947 --eepromBitsToRx; 1948 1949 // Done getting address 1950 if (eepromBitsToRx == 0) { 1951 1952 if (eepromAddress >= EEPROM_SIZE) 1953 panic("EEPROM read access out of range!"); 1954 1955 switch (eepromAddress) { 1956 1957 case EEPROM_PMATCH2_ADDR: 1958 eepromData = rom.perfectMatch[5]; 1959 eepromData <<= 8; 1960 eepromData += rom.perfectMatch[4]; 1961 break; 1962 1963 case EEPROM_PMATCH1_ADDR: 1964 eepromData = rom.perfectMatch[3]; 1965 eepromData <<= 8; 1966 eepromData += rom.perfectMatch[2]; 1967 break; 1968 1969 case EEPROM_PMATCH0_ADDR: 1970 eepromData = rom.perfectMatch[1]; 1971 eepromData <<= 8; 1972 eepromData += rom.perfectMatch[0]; 1973 break; 1974 1975 default: 1976 panic("FreeBSD driver only uses EEPROM to read PMATCH!"); 1977 } 1978 // Set up to read data 1979 eepromState = eepromRead; 1980 eepromBitsToRx = 16; 1981 1982 // Clear data in bit 1983 regs.mear &= ~MEAR_EEDI; 1984 } 1985 break; 1986 1987 case eepromRead: 1988 // Clear Data Out bit 1989 regs.mear &= ~MEAR_EEDO; 1990 // Set bit to value of current EEPROM bit 1991 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0; 1992 1993 eepromData <<= 1; 1994 --eepromBitsToRx; 1995 1996 // All done 1997 if (eepromBitsToRx == 0) { 1998 eepromState = eepromStart; 1999 } 2000 break; 2001 2002 default: 2003 panic("invalid EEPROM state"); 2004 } 2005 2006} 2007 2008void 2009NSGigE::transferDone() 2010{ 2011 if (txFifo.empty()) { 2012 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2013 return; 2014 } 2015 2016 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2017 2018 reschedule(txEvent, curTick() + clockPeriod(), true); 2019} 2020 2021bool 2022NSGigE::rxFilter(const EthPacketPtr &packet) 2023{ 2024 EthPtr eth = packet; 2025 bool drop = true; 2026 string type; 2027 2028 const EthAddr &dst = eth->dst(); 2029 if (dst.unicast()) { 2030 // If we're accepting all unicast addresses 2031 if (acceptUnicast) 2032 drop = false; 2033 2034 // If we make a perfect match 2035 if (acceptPerfect && dst == rom.perfectMatch) 2036 drop = false; 2037 2038 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2039 drop = false; 2040 2041 } else if (dst.broadcast()) { 2042 // if we're accepting broadcasts 2043 if (acceptBroadcast) 2044 drop = false; 2045 2046 } else if (dst.multicast()) { 2047 // if we're accepting all multicasts 2048 if (acceptMulticast) 2049 drop = false; 2050 2051 // Multicast hashing faked - all packets accepted 2052 if (multicastHashEnable) 2053 drop = false; 2054 } 2055 2056 if (drop) { 2057 DPRINTF(Ethernet, "rxFilter drop\n"); 2058 DDUMP(EthernetData, packet->data, packet->length); 2059 } 2060 2061 return drop; 2062} 2063 2064bool 2065NSGigE::recvPacket(EthPacketPtr packet) 2066{ 2067 rxBytes += packet->length; 2068 rxPackets++; 2069 2070 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2071 rxFifo.avail()); 2072 2073 if (!rxEnable) { 2074 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2075 return true; 2076 } 2077 2078 if (!rxFilterEnable) { 2079 DPRINTF(Ethernet, 2080 "receive packet filtering disabled . . . packet dropped\n"); 2081 return true; 2082 } 2083 2084 if (rxFilter(packet)) { 2085 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2086 return true; 2087 } 2088 2089 if (rxFifo.avail() < packet->length) { 2090#if TRACING_ON 2091 IpPtr ip(packet); 2092 TcpPtr tcp(ip); 2093 if (ip) { 2094 DPRINTF(Ethernet, 2095 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2096 ip->id()); 2097 if (tcp) { 2098 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2099 } 2100 } 2101#endif 2102 droppedPackets++; 2103 devIntrPost(ISR_RXORN); 2104 return false; 2105 } 2106 2107 rxFifo.push(packet); 2108 2109 rxKick(); 2110 return true; 2111} 2112 2113 2114void 2115NSGigE::resume() 2116{ 2117 SimObject::resume(); 2118 2119 // During drain we could have left the state machines in a waiting state and 2120 // they wouldn't get out until some other event occured to kick them. 2121 // This way they'll get out immediately 2122 txKick(); 2123 rxKick(); 2124} 2125 2126 2127//===================================================================== 2128// 2129// 2130void 2131NSGigE::serialize(ostream &os) 2132{ 2133 // Serialize the PciDev base class 2134 PciDev::serialize(os); 2135 2136 /* 2137 * Finalize any DMA events now. 2138 */ 2139 // @todo will mem system save pending dma? 2140 2141 /* 2142 * Serialize the device registers 2143 */ 2144 SERIALIZE_SCALAR(regs.command); 2145 SERIALIZE_SCALAR(regs.config); 2146 SERIALIZE_SCALAR(regs.mear); 2147 SERIALIZE_SCALAR(regs.ptscr); 2148 SERIALIZE_SCALAR(regs.isr); 2149 SERIALIZE_SCALAR(regs.imr); 2150 SERIALIZE_SCALAR(regs.ier); 2151 SERIALIZE_SCALAR(regs.ihr); 2152 SERIALIZE_SCALAR(regs.txdp); 2153 SERIALIZE_SCALAR(regs.txdp_hi); 2154 SERIALIZE_SCALAR(regs.txcfg); 2155 SERIALIZE_SCALAR(regs.gpior); 2156 SERIALIZE_SCALAR(regs.rxdp); 2157 SERIALIZE_SCALAR(regs.rxdp_hi); 2158 SERIALIZE_SCALAR(regs.rxcfg); 2159 SERIALIZE_SCALAR(regs.pqcr); 2160 SERIALIZE_SCALAR(regs.wcsr); 2161 SERIALIZE_SCALAR(regs.pcr); 2162 SERIALIZE_SCALAR(regs.rfcr); 2163 SERIALIZE_SCALAR(regs.rfdr); 2164 SERIALIZE_SCALAR(regs.brar); 2165 SERIALIZE_SCALAR(regs.brdr); 2166 SERIALIZE_SCALAR(regs.srr); 2167 SERIALIZE_SCALAR(regs.mibc); 2168 SERIALIZE_SCALAR(regs.vrcr); 2169 SERIALIZE_SCALAR(regs.vtcr); 2170 SERIALIZE_SCALAR(regs.vdr); 2171 SERIALIZE_SCALAR(regs.ccsr); 2172 SERIALIZE_SCALAR(regs.tbicr); 2173 SERIALIZE_SCALAR(regs.tbisr); 2174 SERIALIZE_SCALAR(regs.tanar); 2175 SERIALIZE_SCALAR(regs.tanlpar); 2176 SERIALIZE_SCALAR(regs.taner); 2177 SERIALIZE_SCALAR(regs.tesr); 2178 2179 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2180 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2181 2182 SERIALIZE_SCALAR(ioEnable); 2183 2184 /* 2185 * Serialize the data Fifos 2186 */ 2187 rxFifo.serialize("rxFifo", os); 2188 txFifo.serialize("txFifo", os); 2189 2190 /* 2191 * Serialize the various helper variables 2192 */ 2193 bool txPacketExists = txPacket; 2194 SERIALIZE_SCALAR(txPacketExists); 2195 if (txPacketExists) { 2196 txPacket->length = txPacketBufPtr - txPacket->data; 2197 txPacket->serialize("txPacket", os); 2198 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2199 SERIALIZE_SCALAR(txPktBufPtr); 2200 } 2201 2202 bool rxPacketExists = rxPacket; 2203 SERIALIZE_SCALAR(rxPacketExists); 2204 if (rxPacketExists) { 2205 rxPacket->serialize("rxPacket", os); 2206 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2207 SERIALIZE_SCALAR(rxPktBufPtr); 2208 } 2209 2210 SERIALIZE_SCALAR(txXferLen); 2211 SERIALIZE_SCALAR(rxXferLen); 2212 2213 /* 2214 * Serialize Cached Descriptors 2215 */ 2216 SERIALIZE_SCALAR(rxDesc64.link); 2217 SERIALIZE_SCALAR(rxDesc64.bufptr); 2218 SERIALIZE_SCALAR(rxDesc64.cmdsts); 2219 SERIALIZE_SCALAR(rxDesc64.extsts); 2220 SERIALIZE_SCALAR(txDesc64.link); 2221 SERIALIZE_SCALAR(txDesc64.bufptr); 2222 SERIALIZE_SCALAR(txDesc64.cmdsts); 2223 SERIALIZE_SCALAR(txDesc64.extsts); 2224 SERIALIZE_SCALAR(rxDesc32.link); 2225 SERIALIZE_SCALAR(rxDesc32.bufptr); 2226 SERIALIZE_SCALAR(rxDesc32.cmdsts); 2227 SERIALIZE_SCALAR(rxDesc32.extsts); 2228 SERIALIZE_SCALAR(txDesc32.link); 2229 SERIALIZE_SCALAR(txDesc32.bufptr); 2230 SERIALIZE_SCALAR(txDesc32.cmdsts); 2231 SERIALIZE_SCALAR(txDesc32.extsts); 2232 SERIALIZE_SCALAR(extstsEnable); 2233 2234 /* 2235 * Serialize tx state machine 2236 */ 2237 int txState = this->txState; 2238 SERIALIZE_SCALAR(txState); 2239 SERIALIZE_SCALAR(txEnable); 2240 SERIALIZE_SCALAR(CTDD); 2241 SERIALIZE_SCALAR(txFragPtr); 2242 SERIALIZE_SCALAR(txDescCnt); 2243 int txDmaState = this->txDmaState; 2244 SERIALIZE_SCALAR(txDmaState); 2245 SERIALIZE_SCALAR(txKickTick); 2246 2247 /* 2248 * Serialize rx state machine 2249 */ 2250 int rxState = this->rxState; 2251 SERIALIZE_SCALAR(rxState); 2252 SERIALIZE_SCALAR(rxEnable); 2253 SERIALIZE_SCALAR(CRDD); 2254 SERIALIZE_SCALAR(rxPktBytes); 2255 SERIALIZE_SCALAR(rxFragPtr); 2256 SERIALIZE_SCALAR(rxDescCnt); 2257 int rxDmaState = this->rxDmaState; 2258 SERIALIZE_SCALAR(rxDmaState); 2259 SERIALIZE_SCALAR(rxKickTick); 2260 2261 /* 2262 * Serialize EEPROM state machine 2263 */ 2264 int eepromState = this->eepromState; 2265 SERIALIZE_SCALAR(eepromState); 2266 SERIALIZE_SCALAR(eepromClk); 2267 SERIALIZE_SCALAR(eepromBitsToRx); 2268 SERIALIZE_SCALAR(eepromOpcode); 2269 SERIALIZE_SCALAR(eepromAddress); 2270 SERIALIZE_SCALAR(eepromData); 2271 2272 /* 2273 * If there's a pending transmit, store the time so we can 2274 * reschedule it later 2275 */ 2276 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0; 2277 SERIALIZE_SCALAR(transmitTick); 2278 2279 /* 2280 * receive address filter settings 2281 */ 2282 SERIALIZE_SCALAR(rxFilterEnable); 2283 SERIALIZE_SCALAR(acceptBroadcast); 2284 SERIALIZE_SCALAR(acceptMulticast); 2285 SERIALIZE_SCALAR(acceptUnicast); 2286 SERIALIZE_SCALAR(acceptPerfect); 2287 SERIALIZE_SCALAR(acceptArp); 2288 SERIALIZE_SCALAR(multicastHashEnable); 2289 2290 /* 2291 * Keep track of pending interrupt status. 2292 */ 2293 SERIALIZE_SCALAR(intrTick); 2294 SERIALIZE_SCALAR(cpuPendingIntr); 2295 Tick intrEventTick = 0; 2296 if (intrEvent) 2297 intrEventTick = intrEvent->when(); 2298 SERIALIZE_SCALAR(intrEventTick); 2299 2300} 2301 2302void 2303NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2304{ 2305 // Unserialize the PciDev base class 2306 PciDev::unserialize(cp, section); 2307 2308 UNSERIALIZE_SCALAR(regs.command); 2309 UNSERIALIZE_SCALAR(regs.config); 2310 UNSERIALIZE_SCALAR(regs.mear); 2311 UNSERIALIZE_SCALAR(regs.ptscr); 2312 UNSERIALIZE_SCALAR(regs.isr); 2313 UNSERIALIZE_SCALAR(regs.imr); 2314 UNSERIALIZE_SCALAR(regs.ier); 2315 UNSERIALIZE_SCALAR(regs.ihr); 2316 UNSERIALIZE_SCALAR(regs.txdp); 2317 UNSERIALIZE_SCALAR(regs.txdp_hi); 2318 UNSERIALIZE_SCALAR(regs.txcfg); 2319 UNSERIALIZE_SCALAR(regs.gpior); 2320 UNSERIALIZE_SCALAR(regs.rxdp); 2321 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2322 UNSERIALIZE_SCALAR(regs.rxcfg); 2323 UNSERIALIZE_SCALAR(regs.pqcr); 2324 UNSERIALIZE_SCALAR(regs.wcsr); 2325 UNSERIALIZE_SCALAR(regs.pcr); 2326 UNSERIALIZE_SCALAR(regs.rfcr); 2327 UNSERIALIZE_SCALAR(regs.rfdr); 2328 UNSERIALIZE_SCALAR(regs.brar); 2329 UNSERIALIZE_SCALAR(regs.brdr); 2330 UNSERIALIZE_SCALAR(regs.srr); 2331 UNSERIALIZE_SCALAR(regs.mibc); 2332 UNSERIALIZE_SCALAR(regs.vrcr); 2333 UNSERIALIZE_SCALAR(regs.vtcr); 2334 UNSERIALIZE_SCALAR(regs.vdr); 2335 UNSERIALIZE_SCALAR(regs.ccsr); 2336 UNSERIALIZE_SCALAR(regs.tbicr); 2337 UNSERIALIZE_SCALAR(regs.tbisr); 2338 UNSERIALIZE_SCALAR(regs.tanar); 2339 UNSERIALIZE_SCALAR(regs.tanlpar); 2340 UNSERIALIZE_SCALAR(regs.taner); 2341 UNSERIALIZE_SCALAR(regs.tesr); 2342 2343 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2344 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2345 2346 UNSERIALIZE_SCALAR(ioEnable); 2347 2348 /* 2349 * unserialize the data fifos 2350 */ 2351 rxFifo.unserialize("rxFifo", cp, section); 2352 txFifo.unserialize("txFifo", cp, section); 2353 2354 /* 2355 * unserialize the various helper variables 2356 */ 2357 bool txPacketExists; 2358 UNSERIALIZE_SCALAR(txPacketExists); 2359 if (txPacketExists) { 2360 txPacket = new EthPacketData(16384); 2361 txPacket->unserialize("txPacket", cp, section); 2362 uint32_t txPktBufPtr; 2363 UNSERIALIZE_SCALAR(txPktBufPtr); 2364 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2365 } else 2366 txPacket = 0; 2367 2368 bool rxPacketExists; 2369 UNSERIALIZE_SCALAR(rxPacketExists); 2370 rxPacket = 0; 2371 if (rxPacketExists) { 2372 rxPacket = new EthPacketData(16384); 2373 rxPacket->unserialize("rxPacket", cp, section); 2374 uint32_t rxPktBufPtr; 2375 UNSERIALIZE_SCALAR(rxPktBufPtr); 2376 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2377 } else 2378 rxPacket = 0; 2379 2380 UNSERIALIZE_SCALAR(txXferLen); 2381 UNSERIALIZE_SCALAR(rxXferLen); 2382 2383 /* 2384 * Unserialize Cached Descriptors 2385 */ 2386 UNSERIALIZE_SCALAR(rxDesc64.link); 2387 UNSERIALIZE_SCALAR(rxDesc64.bufptr); 2388 UNSERIALIZE_SCALAR(rxDesc64.cmdsts); 2389 UNSERIALIZE_SCALAR(rxDesc64.extsts); 2390 UNSERIALIZE_SCALAR(txDesc64.link); 2391 UNSERIALIZE_SCALAR(txDesc64.bufptr); 2392 UNSERIALIZE_SCALAR(txDesc64.cmdsts); 2393 UNSERIALIZE_SCALAR(txDesc64.extsts); 2394 UNSERIALIZE_SCALAR(rxDesc32.link); 2395 UNSERIALIZE_SCALAR(rxDesc32.bufptr); 2396 UNSERIALIZE_SCALAR(rxDesc32.cmdsts); 2397 UNSERIALIZE_SCALAR(rxDesc32.extsts); 2398 UNSERIALIZE_SCALAR(txDesc32.link); 2399 UNSERIALIZE_SCALAR(txDesc32.bufptr); 2400 UNSERIALIZE_SCALAR(txDesc32.cmdsts); 2401 UNSERIALIZE_SCALAR(txDesc32.extsts); 2402 UNSERIALIZE_SCALAR(extstsEnable); 2403 2404 /* 2405 * unserialize tx state machine 2406 */ 2407 int txState; 2408 UNSERIALIZE_SCALAR(txState); 2409 this->txState = (TxState) txState; 2410 UNSERIALIZE_SCALAR(txEnable); 2411 UNSERIALIZE_SCALAR(CTDD); 2412 UNSERIALIZE_SCALAR(txFragPtr); 2413 UNSERIALIZE_SCALAR(txDescCnt); 2414 int txDmaState; 2415 UNSERIALIZE_SCALAR(txDmaState); 2416 this->txDmaState = (DmaState) txDmaState; 2417 UNSERIALIZE_SCALAR(txKickTick); 2418 if (txKickTick) 2419 schedule(txKickEvent, txKickTick); 2420 2421 /* 2422 * unserialize rx state machine 2423 */ 2424 int rxState; 2425 UNSERIALIZE_SCALAR(rxState); 2426 this->rxState = (RxState) rxState; 2427 UNSERIALIZE_SCALAR(rxEnable); 2428 UNSERIALIZE_SCALAR(CRDD); 2429 UNSERIALIZE_SCALAR(rxPktBytes); 2430 UNSERIALIZE_SCALAR(rxFragPtr); 2431 UNSERIALIZE_SCALAR(rxDescCnt); 2432 int rxDmaState; 2433 UNSERIALIZE_SCALAR(rxDmaState); 2434 this->rxDmaState = (DmaState) rxDmaState; 2435 UNSERIALIZE_SCALAR(rxKickTick); 2436 if (rxKickTick) 2437 schedule(rxKickEvent, rxKickTick); 2438 2439 /* 2440 * Unserialize EEPROM state machine 2441 */ 2442 int eepromState; 2443 UNSERIALIZE_SCALAR(eepromState); 2444 this->eepromState = (EEPROMState) eepromState; 2445 UNSERIALIZE_SCALAR(eepromClk); 2446 UNSERIALIZE_SCALAR(eepromBitsToRx); 2447 UNSERIALIZE_SCALAR(eepromOpcode); 2448 UNSERIALIZE_SCALAR(eepromAddress); 2449 UNSERIALIZE_SCALAR(eepromData); 2450 2451 /* 2452 * If there's a pending transmit, reschedule it now 2453 */ 2454 Tick transmitTick; 2455 UNSERIALIZE_SCALAR(transmitTick); 2456 if (transmitTick) 2457 schedule(txEvent, curTick() + transmitTick); 2458 2459 /* 2460 * unserialize receive address filter settings 2461 */ 2462 UNSERIALIZE_SCALAR(rxFilterEnable); 2463 UNSERIALIZE_SCALAR(acceptBroadcast); 2464 UNSERIALIZE_SCALAR(acceptMulticast); 2465 UNSERIALIZE_SCALAR(acceptUnicast); 2466 UNSERIALIZE_SCALAR(acceptPerfect); 2467 UNSERIALIZE_SCALAR(acceptArp); 2468 UNSERIALIZE_SCALAR(multicastHashEnable); 2469 2470 /* 2471 * Keep track of pending interrupt status. 2472 */ 2473 UNSERIALIZE_SCALAR(intrTick); 2474 UNSERIALIZE_SCALAR(cpuPendingIntr); 2475 Tick intrEventTick; 2476 UNSERIALIZE_SCALAR(intrEventTick); 2477 if (intrEventTick) { 2478 intrEvent = new IntrEvent(this, true); 2479 schedule(intrEvent, intrEventTick); 2480 } 2481} 2482 2483NSGigE * 2484NSGigEParams::create() 2485{ 2486 return new NSGigE(this); 2487} 2488