1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 * Lisa Hsu 30 */ 31 32/** @file 33 * Device module for modelling the National Semiconductor 34 * DP83820 ethernet controller. Does not support priority queueing 35 */ 36 37#include "dev/net/ns_gige.hh" 38 39#include <deque> 40#include <memory> 41#include <string> 42 43#include "base/debug.hh" 44#include "base/inet.hh" 45#include "base/types.hh" 46#include "config/the_isa.hh" 47#include "debug/EthernetAll.hh" 48#include "dev/net/etherlink.hh" 49#include "mem/packet.hh" 50#include "mem/packet_access.hh" 51#include "params/NSGigE.hh" 52#include "sim/system.hh" 53 54// clang complains about std::set being overloaded with Packet::set if 55// we open up the entire namespace std 56using std::make_shared; 57using std::min; 58using std::ostream; 59using std::string; 60 61const char *NsRxStateStrings[] = 62{ 63 "rxIdle", 64 "rxDescRefr", 65 "rxDescRead", 66 "rxFifoBlock", 67 "rxFragWrite", 68 "rxDescWrite", 69 "rxAdvance" 70}; 71 72const char *NsTxStateStrings[] = 73{ 74 "txIdle", 75 "txDescRefr", 76 "txDescRead", 77 "txFifoBlock", 78 "txFragRead", 79 "txDescWrite", 80 "txAdvance" 81}; 82 83const char *NsDmaState[] = 84{ 85 "dmaIdle", 86 "dmaReading", 87 "dmaWriting", 88 "dmaReadWaiting", 89 "dmaWriteWaiting" 90}; 91 92using namespace Net; 93using namespace TheISA; 94 95/////////////////////////////////////////////////////////////////////// 96// 97// NSGigE PCI Device 98// 99NSGigE::NSGigE(Params *p) 100 : EtherDevBase(p), ioEnable(false), 101 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 102 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 103 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false), 104 txState(txIdle), txEnable(false), CTDD(false), txHalt(false), 105 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 106 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false), 107 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 108 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0), 109 eepromOpcode(0), eepromAddress(0), eepromData(0), 110 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay), 111 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor), 112 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0), 113 txDmaData(NULL), txDmaAddr(0), txDmaLen(0), 114 rxDmaReadEvent([this]{ rxDmaReadDone(); }, name()), 115 rxDmaWriteEvent([this]{ rxDmaWriteDone(); }, name()), 116 txDmaReadEvent([this]{ txDmaReadDone(); }, name()), 117 txDmaWriteEvent([this]{ txDmaWriteDone(); }, name()), 118 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 119 txDelay(p->tx_delay), rxDelay(p->rx_delay), 120 rxKickTick(0), 121 rxKickEvent([this]{ rxKick(); }, name()), 122 txKickTick(0), 123 txKickEvent([this]{ txKick(); }, name()), 124 txEvent([this]{ txEventTransmit(); }, name()), 125 rxFilterEnable(p->rx_filter), 126 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false), 127 acceptPerfect(false), acceptArp(false), multicastHashEnable(false), 128 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false), 129 intrEvent(0), interface(0) 130{ 131 132 133 interface = new NSGigEInt(name() + ".int0", this); 134 135 regsReset(); 136 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN); 137 138 memset(&rxDesc32, 0, sizeof(rxDesc32)); 139 memset(&txDesc32, 0, sizeof(txDesc32)); 140 memset(&rxDesc64, 0, sizeof(rxDesc64)); 141 memset(&txDesc64, 0, sizeof(txDesc64)); 142} 143 144NSGigE::~NSGigE() 145{ 146 delete interface; 147} 148 149/** 150 * This is to write to the PCI general configuration registers 151 */ 152Tick 153NSGigE::writeConfig(PacketPtr pkt) 154{ 155 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 156 if (offset < PCI_DEVICE_SPECIFIC) 157 PciDevice::writeConfig(pkt); 158 else 159 panic("Device specific PCI config space not implemented!\n"); 160 161 switch (offset) { 162 // seems to work fine without all these PCI settings, but i 163 // put in the IO to double check, an assertion will fail if we 164 // need to properly implement it 165 case PCI_COMMAND: 166 if (config.data[offset] & PCI_CMD_IOSE) 167 ioEnable = true; 168 else 169 ioEnable = false; 170 break; 171 } 172 173 return configDelay; 174} 175 176Port & 177NSGigE::getPort(const std::string &if_name, PortID idx) 178{ 179 if (if_name == "interface") 180 return *interface; 181 return EtherDevBase::getPort(if_name, idx); 182} 183 184/** 185 * This reads the device registers, which are detailed in the NS83820 186 * spec sheet 187 */ 188Tick 189NSGigE::read(PacketPtr pkt) 190{ 191 assert(ioEnable); 192 193 //The mask is to give you only the offset into the device register file 194 Addr daddr = pkt->getAddr() & 0xfff; 195 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n", 196 daddr, pkt->getAddr(), pkt->getSize()); 197 198 199 // there are some reserved registers, you can see ns_gige_reg.h and 200 // the spec sheet for details 201 if (daddr > LAST && daddr <= RESERVED) { 202 panic("Accessing reserved register"); 203 } else if (daddr > RESERVED && daddr <= 0x3FC) { 204 return readConfig(pkt); 205 } else if (daddr >= MIB_START && daddr <= MIB_END) { 206 // don't implement all the MIB's. hopefully the kernel 207 // doesn't actually DEPEND upon their values 208 // MIB are just hardware stats keepers 209 pkt->setLE<uint32_t>(0); 210 pkt->makeAtomicResponse(); 211 return pioDelay; 212 } else if (daddr > 0x3FC) 213 panic("Something is messed up!\n"); 214 215 assert(pkt->getSize() == sizeof(uint32_t)); 216 uint32_t ® = *pkt->getPtr<uint32_t>(); 217 uint16_t rfaddr; 218 219 switch (daddr) { 220 case CR: 221 reg = regs.command; 222 //these are supposed to be cleared on a read 223 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 224 break; 225 226 case CFGR: 227 reg = regs.config; 228 break; 229 230 case MEAR: 231 reg = regs.mear; 232 break; 233 234 case PTSCR: 235 reg = regs.ptscr; 236 break; 237 238 case ISR: 239 reg = regs.isr; 240 devIntrClear(ISR_ALL); 241 break; 242 243 case IMR: 244 reg = regs.imr; 245 break; 246 247 case IER: 248 reg = regs.ier; 249 break; 250 251 case IHR: 252 reg = regs.ihr; 253 break; 254 255 case TXDP: 256 reg = regs.txdp; 257 break; 258 259 case TXDP_HI: 260 reg = regs.txdp_hi; 261 break; 262 263 case TX_CFG: 264 reg = regs.txcfg; 265 break; 266 267 case GPIOR: 268 reg = regs.gpior; 269 break; 270 271 case RXDP: 272 reg = regs.rxdp; 273 break; 274 275 case RXDP_HI: 276 reg = regs.rxdp_hi; 277 break; 278 279 case RX_CFG: 280 reg = regs.rxcfg; 281 break; 282 283 case PQCR: 284 reg = regs.pqcr; 285 break; 286 287 case WCSR: 288 reg = regs.wcsr; 289 break; 290 291 case PCR: 292 reg = regs.pcr; 293 break; 294 295 // see the spec sheet for how RFCR and RFDR work 296 // basically, you write to RFCR to tell the machine 297 // what you want to do next, then you act upon RFDR, 298 // and the device will be prepared b/c of what you 299 // wrote to RFCR 300 case RFCR: 301 reg = regs.rfcr; 302 break; 303 304 case RFDR: 305 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 306 switch (rfaddr) { 307 // Read from perfect match ROM octets 308 case 0x000: 309 reg = rom.perfectMatch[1]; 310 reg = reg << 8; 311 reg += rom.perfectMatch[0]; 312 break; 313 case 0x002: 314 reg = rom.perfectMatch[3] << 8; 315 reg += rom.perfectMatch[2]; 316 break; 317 case 0x004: 318 reg = rom.perfectMatch[5] << 8; 319 reg += rom.perfectMatch[4]; 320 break; 321 default: 322 // Read filter hash table 323 if (rfaddr >= FHASH_ADDR && 324 rfaddr < FHASH_ADDR + FHASH_SIZE) { 325 326 // Only word-aligned reads supported 327 if (rfaddr % 2) 328 panic("unaligned read from filter hash table!"); 329 330 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8; 331 reg += rom.filterHash[rfaddr - FHASH_ADDR]; 332 break; 333 } 334 335 panic("reading RFDR for something other than pattern" 336 " matching or hashing! %#x\n", rfaddr); 337 } 338 break; 339 340 case SRR: 341 reg = regs.srr; 342 break; 343 344 case MIBC: 345 reg = regs.mibc; 346 reg &= ~(MIBC_MIBS | MIBC_ACLR); 347 break; 348 349 case VRCR: 350 reg = regs.vrcr; 351 break; 352 353 case VTCR: 354 reg = regs.vtcr; 355 break; 356 357 case VDR: 358 reg = regs.vdr; 359 break; 360 361 case CCSR: 362 reg = regs.ccsr; 363 break; 364 365 case TBICR: 366 reg = regs.tbicr; 367 break; 368 369 case TBISR: 370 reg = regs.tbisr; 371 break; 372 373 case TANAR: 374 reg = regs.tanar; 375 break; 376 377 case TANLPAR: 378 reg = regs.tanlpar; 379 break; 380 381 case TANER: 382 reg = regs.taner; 383 break; 384 385 case TESR: 386 reg = regs.tesr; 387 break; 388 389 case M5REG: 390 reg = 0; 391 if (params()->rx_thread) 392 reg |= M5REG_RX_THREAD; 393 if (params()->tx_thread) 394 reg |= M5REG_TX_THREAD; 395 if (params()->rss) 396 reg |= M5REG_RSS; 397 break; 398 399 default: 400 panic("reading unimplemented register: addr=%#x", daddr); 401 } 402 403 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 404 daddr, reg, reg); 405 406 pkt->makeAtomicResponse(); 407 return pioDelay; 408} 409 410Tick 411NSGigE::write(PacketPtr pkt) 412{ 413 assert(ioEnable); 414 415 Addr daddr = pkt->getAddr() & 0xfff; 416 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n", 417 daddr, pkt->getAddr(), pkt->getSize()); 418 419 if (daddr > LAST && daddr <= RESERVED) { 420 panic("Accessing reserved register"); 421 } else if (daddr > RESERVED && daddr <= 0x3FC) { 422 return writeConfig(pkt); 423 } else if (daddr > 0x3FC) 424 panic("Something is messed up!\n"); 425 426 if (pkt->getSize() == sizeof(uint32_t)) { 427 uint32_t reg = pkt->getLE<uint32_t>(); 428 uint16_t rfaddr; 429 430 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 431 432 switch (daddr) { 433 case CR: 434 regs.command = reg; 435 if (reg & CR_TXD) { 436 txEnable = false; 437 } else if (reg & CR_TXE) { 438 txEnable = true; 439 440 // the kernel is enabling the transmit machine 441 if (txState == txIdle) 442 txKick(); 443 } 444 445 if (reg & CR_RXD) { 446 rxEnable = false; 447 } else if (reg & CR_RXE) { 448 rxEnable = true; 449 450 if (rxState == rxIdle) 451 rxKick(); 452 } 453 454 if (reg & CR_TXR) 455 txReset(); 456 457 if (reg & CR_RXR) 458 rxReset(); 459 460 if (reg & CR_SWI) 461 devIntrPost(ISR_SWI); 462 463 if (reg & CR_RST) { 464 txReset(); 465 rxReset(); 466 467 regsReset(); 468 } 469 break; 470 471 case CFGR: 472 if (reg & CFGR_LNKSTS || 473 reg & CFGR_SPDSTS || 474 reg & CFGR_DUPSTS || 475 reg & CFGR_RESERVED || 476 reg & CFGR_T64ADDR || 477 reg & CFGR_PCI64_DET) { 478 // First clear all writable bits 479 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 480 CFGR_RESERVED | CFGR_T64ADDR | 481 CFGR_PCI64_DET; 482 // Now set the appropriate writable bits 483 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 484 CFGR_RESERVED | CFGR_T64ADDR | 485 CFGR_PCI64_DET); 486 } 487 488 if (reg & CFGR_AUTO_1000) 489 panic("CFGR_AUTO_1000 not implemented!\n"); 490 491 if (reg & CFGR_PCI64_DET) 492 panic("CFGR_PCI64_DET is read only register!\n"); 493 494 if (reg & CFGR_EXTSTS_EN) 495 extstsEnable = true; 496 else 497 extstsEnable = false; 498 break; 499 500 case MEAR: 501 // Clear writable bits 502 regs.mear &= MEAR_EEDO; 503 // Set appropriate writable bits 504 regs.mear |= reg & ~MEAR_EEDO; 505 506 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address) 507 // even though it could get it through RFDR 508 if (reg & MEAR_EESEL) { 509 // Rising edge of clock 510 if (reg & MEAR_EECLK && !eepromClk) 511 eepromKick(); 512 } 513 else { 514 eepromState = eepromStart; 515 regs.mear &= ~MEAR_EEDI; 516 } 517 518 eepromClk = reg & MEAR_EECLK; 519 520 // since phy is completely faked, MEAR_MD* don't matter 521 break; 522 523 case PTSCR: 524 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 525 // these control BISTs for various parts of chip - we 526 // don't care or do just fake that the BIST is done 527 if (reg & PTSCR_RBIST_EN) 528 regs.ptscr |= PTSCR_RBIST_DONE; 529 if (reg & PTSCR_EEBIST_EN) 530 regs.ptscr &= ~PTSCR_EEBIST_EN; 531 if (reg & PTSCR_EELOAD_EN) 532 regs.ptscr &= ~PTSCR_EELOAD_EN; 533 break; 534 535 case ISR: /* writing to the ISR has no effect */ 536 panic("ISR is a read only register!\n"); 537 538 case IMR: 539 regs.imr = reg; 540 devIntrChangeMask(); 541 break; 542 543 case IER: 544 regs.ier = reg; 545 break; 546 547 case IHR: 548 regs.ihr = reg; 549 /* not going to implement real interrupt holdoff */ 550 break; 551 552 case TXDP: 553 regs.txdp = (reg & 0xFFFFFFFC); 554 assert(txState == txIdle); 555 CTDD = false; 556 break; 557 558 case TXDP_HI: 559 regs.txdp_hi = reg; 560 break; 561 562 case TX_CFG: 563 regs.txcfg = reg; 564 565 // also, we currently don't care about fill/drain 566 // thresholds though this may change in the future with 567 // more realistic networks or a driver which changes it 568 // according to feedback 569 570 break; 571 572 case GPIOR: 573 // Only write writable bits 574 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 575 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN; 576 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 577 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN); 578 /* these just control general purpose i/o pins, don't matter */ 579 break; 580 581 case RXDP: 582 regs.rxdp = reg; 583 CRDD = false; 584 break; 585 586 case RXDP_HI: 587 regs.rxdp_hi = reg; 588 break; 589 590 case RX_CFG: 591 regs.rxcfg = reg; 592 break; 593 594 case PQCR: 595 /* there is no priority queueing used in the linux 2.6 driver */ 596 regs.pqcr = reg; 597 break; 598 599 case WCSR: 600 /* not going to implement wake on LAN */ 601 regs.wcsr = reg; 602 break; 603 604 case PCR: 605 /* not going to implement pause control */ 606 regs.pcr = reg; 607 break; 608 609 case RFCR: 610 regs.rfcr = reg; 611 612 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 613 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 614 acceptMulticast = (reg & RFCR_AAM) ? true : false; 615 acceptUnicast = (reg & RFCR_AAU) ? true : false; 616 acceptPerfect = (reg & RFCR_APM) ? true : false; 617 acceptArp = (reg & RFCR_AARP) ? true : false; 618 multicastHashEnable = (reg & RFCR_MHEN) ? true : false; 619 620 if (reg & RFCR_UHEN) 621 panic("Unicast hash filtering not used by drivers!\n"); 622 623 if (reg & RFCR_ULM) 624 panic("RFCR_ULM not implemented!\n"); 625 626 break; 627 628 case RFDR: 629 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 630 switch (rfaddr) { 631 case 0x000: 632 rom.perfectMatch[0] = (uint8_t)reg; 633 rom.perfectMatch[1] = (uint8_t)(reg >> 8); 634 break; 635 case 0x002: 636 rom.perfectMatch[2] = (uint8_t)reg; 637 rom.perfectMatch[3] = (uint8_t)(reg >> 8); 638 break; 639 case 0x004: 640 rom.perfectMatch[4] = (uint8_t)reg; 641 rom.perfectMatch[5] = (uint8_t)(reg >> 8); 642 break; 643 default: 644 645 if (rfaddr >= FHASH_ADDR && 646 rfaddr < FHASH_ADDR + FHASH_SIZE) { 647 648 // Only word-aligned writes supported 649 if (rfaddr % 2) 650 panic("unaligned write to filter hash table!"); 651 652 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg; 653 rom.filterHash[rfaddr - FHASH_ADDR + 1] 654 = (uint8_t)(reg >> 8); 655 break; 656 } 657 panic("writing RFDR for something other than pattern matching " 658 "or hashing! %#x\n", rfaddr); 659 } 660 break; 661 662 case BRAR: 663 regs.brar = reg; 664 break; 665 666 case BRDR: 667 panic("the driver never uses BRDR, something is wrong!\n"); 668 669 case SRR: 670 panic("SRR is read only register!\n"); 671 672 case MIBC: 673 panic("the driver never uses MIBC, something is wrong!\n"); 674 675 case VRCR: 676 regs.vrcr = reg; 677 break; 678 679 case VTCR: 680 regs.vtcr = reg; 681 break; 682 683 case VDR: 684 panic("the driver never uses VDR, something is wrong!\n"); 685 686 case CCSR: 687 /* not going to implement clockrun stuff */ 688 regs.ccsr = reg; 689 break; 690 691 case TBICR: 692 regs.tbicr = reg; 693 if (reg & TBICR_MR_LOOPBACK) 694 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 695 696 if (reg & TBICR_MR_AN_ENABLE) { 697 regs.tanlpar = regs.tanar; 698 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 699 } 700 701 break; 702 703 case TBISR: 704 panic("TBISR is read only register!\n"); 705 706 case TANAR: 707 // Only write the writable bits 708 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED; 709 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED); 710 711 // Pause capability unimplemented 712 break; 713 714 case TANLPAR: 715 panic("this should only be written to by the fake phy!\n"); 716 717 case TANER: 718 panic("TANER is read only register!\n"); 719 720 case TESR: 721 regs.tesr = reg; 722 break; 723 724 default: 725 panic("invalid register access daddr=%#x", daddr); 726 } 727 } else { 728 panic("Invalid Request Size"); 729 } 730 pkt->makeAtomicResponse(); 731 return pioDelay; 732} 733 734void 735NSGigE::devIntrPost(uint32_t interrupts) 736{ 737 if (interrupts & ISR_RESERVE) 738 panic("Cannot set a reserved interrupt"); 739 740 if (interrupts & ISR_NOIMPL) 741 warn("interrupt not implemented %#x\n", interrupts); 742 743 interrupts &= ISR_IMPL; 744 regs.isr |= interrupts; 745 746 if (interrupts & regs.imr) { 747 if (interrupts & ISR_SWI) { 748 totalSwi++; 749 } 750 if (interrupts & ISR_RXIDLE) { 751 totalRxIdle++; 752 } 753 if (interrupts & ISR_RXOK) { 754 totalRxOk++; 755 } 756 if (interrupts & ISR_RXDESC) { 757 totalRxDesc++; 758 } 759 if (interrupts & ISR_TXOK) { 760 totalTxOk++; 761 } 762 if (interrupts & ISR_TXIDLE) { 763 totalTxIdle++; 764 } 765 if (interrupts & ISR_TXDESC) { 766 totalTxDesc++; 767 } 768 if (interrupts & ISR_RXORN) { 769 totalRxOrn++; 770 } 771 } 772 773 DPRINTF(EthernetIntr, 774 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 775 interrupts, regs.isr, regs.imr); 776 777 if ((regs.isr & regs.imr)) { 778 Tick when = curTick(); 779 if ((regs.isr & regs.imr & ISR_NODELAY) == 0) 780 when += intrDelay; 781 postedInterrupts++; 782 cpuIntrPost(when); 783 } 784} 785 786/* writing this interrupt counting stats inside this means that this function 787 is now limited to being used to clear all interrupts upon the kernel 788 reading isr and servicing. just telling you in case you were thinking 789 of expanding use. 790*/ 791void 792NSGigE::devIntrClear(uint32_t interrupts) 793{ 794 if (interrupts & ISR_RESERVE) 795 panic("Cannot clear a reserved interrupt"); 796 797 if (regs.isr & regs.imr & ISR_SWI) { 798 postedSwi++; 799 } 800 if (regs.isr & regs.imr & ISR_RXIDLE) { 801 postedRxIdle++; 802 } 803 if (regs.isr & regs.imr & ISR_RXOK) { 804 postedRxOk++; 805 } 806 if (regs.isr & regs.imr & ISR_RXDESC) { 807 postedRxDesc++; 808 } 809 if (regs.isr & regs.imr & ISR_TXOK) { 810 postedTxOk++; 811 } 812 if (regs.isr & regs.imr & ISR_TXIDLE) { 813 postedTxIdle++; 814 } 815 if (regs.isr & regs.imr & ISR_TXDESC) { 816 postedTxDesc++; 817 } 818 if (regs.isr & regs.imr & ISR_RXORN) { 819 postedRxOrn++; 820 } 821 822 interrupts &= ~ISR_NOIMPL; 823 regs.isr &= ~interrupts; 824 825 DPRINTF(EthernetIntr, 826 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 827 interrupts, regs.isr, regs.imr); 828 829 if (!(regs.isr & regs.imr)) 830 cpuIntrClear(); 831} 832 833void 834NSGigE::devIntrChangeMask() 835{ 836 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 837 regs.isr, regs.imr, regs.isr & regs.imr); 838 839 if (regs.isr & regs.imr) 840 cpuIntrPost(curTick()); 841 else 842 cpuIntrClear(); 843} 844 845void 846NSGigE::cpuIntrPost(Tick when) 847{ 848 // If the interrupt you want to post is later than an interrupt 849 // already scheduled, just let it post in the coming one and don't 850 // schedule another. 851 // HOWEVER, must be sure that the scheduled intrTick is in the 852 // future (this was formerly the source of a bug) 853 /** 854 * @todo this warning should be removed and the intrTick code should 855 * be fixed. 856 */ 857 assert(when >= curTick()); 858 assert(intrTick >= curTick() || intrTick == 0); 859 if (when > intrTick && intrTick != 0) { 860 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 861 intrTick); 862 return; 863 } 864 865 intrTick = when; 866 if (intrTick < curTick()) { 867 intrTick = curTick(); 868 } 869 870 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 871 intrTick); 872 873 if (intrEvent) 874 intrEvent->squash(); 875 876 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); }, 877 name(), true); 878 schedule(intrEvent, intrTick); 879} 880 881void 882NSGigE::cpuInterrupt() 883{ 884 assert(intrTick == curTick()); 885 886 // Whether or not there's a pending interrupt, we don't care about 887 // it anymore 888 intrEvent = 0; 889 intrTick = 0; 890 891 // Don't send an interrupt if there's already one 892 if (cpuPendingIntr) { 893 DPRINTF(EthernetIntr, 894 "would send an interrupt now, but there's already pending\n"); 895 } else { 896 // Send interrupt 897 cpuPendingIntr = true; 898 899 DPRINTF(EthernetIntr, "posting interrupt\n"); 900 intrPost(); 901 } 902} 903 904void 905NSGigE::cpuIntrClear() 906{ 907 if (!cpuPendingIntr) 908 return; 909 910 if (intrEvent) { 911 intrEvent->squash(); 912 intrEvent = 0; 913 } 914 915 intrTick = 0; 916 917 cpuPendingIntr = false; 918 919 DPRINTF(EthernetIntr, "clearing interrupt\n"); 920 intrClear(); 921} 922 923bool 924NSGigE::cpuIntrPending() const 925{ return cpuPendingIntr; } 926 927void 928NSGigE::txReset() 929{ 930 931 DPRINTF(Ethernet, "transmit reset\n"); 932 933 CTDD = false; 934 txEnable = false;; 935 txFragPtr = 0; 936 assert(txDescCnt == 0); 937 txFifo.clear(); 938 txState = txIdle; 939 assert(txDmaState == dmaIdle); 940} 941 942void 943NSGigE::rxReset() 944{ 945 DPRINTF(Ethernet, "receive reset\n"); 946 947 CRDD = false; 948 assert(rxPktBytes == 0); 949 rxEnable = false; 950 rxFragPtr = 0; 951 assert(rxDescCnt == 0); 952 assert(rxDmaState == dmaIdle); 953 rxFifo.clear(); 954 rxState = rxIdle; 955} 956 957void 958NSGigE::regsReset() 959{ 960 memset(®s, 0, sizeof(regs)); 961 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000); 962 regs.mear = 0x12; 963 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 964 // fill threshold to 32 bytes 965 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 966 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 967 regs.mibc = MIBC_FRZ; 968 regs.vdr = 0x81; // set the vlan tag type to 802.1q 969 regs.tesr = 0xc000; // TBI capable of both full and half duplex 970 regs.brar = 0xffffffff; 971 972 extstsEnable = false; 973 acceptBroadcast = false; 974 acceptMulticast = false; 975 acceptUnicast = false; 976 acceptPerfect = false; 977 acceptArp = false; 978} 979 980bool 981NSGigE::doRxDmaRead() 982{ 983 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 984 rxDmaState = dmaReading; 985 986 if (dmaPending() || drainState() != DrainState::Running) 987 rxDmaState = dmaReadWaiting; 988 else 989 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData); 990 991 return true; 992} 993 994void 995NSGigE::rxDmaReadDone() 996{ 997 assert(rxDmaState == dmaReading); 998 rxDmaState = dmaIdle; 999 1000 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1001 rxDmaAddr, rxDmaLen); 1002 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1003 1004 // If the transmit state machine has a pending DMA, let it go first 1005 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1006 txKick(); 1007 1008 rxKick(); 1009} 1010 1011bool 1012NSGigE::doRxDmaWrite() 1013{ 1014 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1015 rxDmaState = dmaWriting; 1016 1017 if (dmaPending() || drainState() != DrainState::Running) 1018 rxDmaState = dmaWriteWaiting; 1019 else 1020 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData); 1021 return true; 1022} 1023 1024void 1025NSGigE::rxDmaWriteDone() 1026{ 1027 assert(rxDmaState == dmaWriting); 1028 rxDmaState = dmaIdle; 1029 1030 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1031 rxDmaAddr, rxDmaLen); 1032 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1033 1034 // If the transmit state machine has a pending DMA, let it go first 1035 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1036 txKick(); 1037 1038 rxKick(); 1039} 1040 1041void 1042NSGigE::rxKick() 1043{ 1044 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1045 1046 DPRINTF(EthernetSM, 1047 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n", 1048 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32); 1049 1050 Addr link, bufptr; 1051 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts; 1052 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts; 1053 1054 next: 1055 if (rxKickTick > curTick()) { 1056 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1057 rxKickTick); 1058 1059 goto exit; 1060 } 1061 1062 // Go to the next state machine clock tick. 1063 rxKickTick = clockEdge(Cycles(1)); 1064 1065 switch(rxDmaState) { 1066 case dmaReadWaiting: 1067 if (doRxDmaRead()) 1068 goto exit; 1069 break; 1070 case dmaWriteWaiting: 1071 if (doRxDmaWrite()) 1072 goto exit; 1073 break; 1074 default: 1075 break; 1076 } 1077 1078 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link; 1079 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr; 1080 1081 // see state machine from spec for details 1082 // the way this works is, if you finish work on one state and can 1083 // go directly to another, you do that through jumping to the 1084 // label "next". however, if you have intermediate work, like DMA 1085 // so that you can't go to the next state yet, you go to exit and 1086 // exit the loop. however, when the DMA is done it will trigger 1087 // an event and come back to this loop. 1088 switch (rxState) { 1089 case rxIdle: 1090 if (!rxEnable) { 1091 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1092 goto exit; 1093 } 1094 1095 if (CRDD) { 1096 rxState = rxDescRefr; 1097 1098 rxDmaAddr = regs.rxdp & 0x3fffffff; 1099 rxDmaData = 1100 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link; 1101 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link); 1102 rxDmaFree = dmaDescFree; 1103 1104 descDmaReads++; 1105 descDmaRdBytes += rxDmaLen; 1106 1107 if (doRxDmaRead()) 1108 goto exit; 1109 } else { 1110 rxState = rxDescRead; 1111 1112 rxDmaAddr = regs.rxdp & 0x3fffffff; 1113 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1114 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1115 rxDmaFree = dmaDescFree; 1116 1117 descDmaReads++; 1118 descDmaRdBytes += rxDmaLen; 1119 1120 if (doRxDmaRead()) 1121 goto exit; 1122 } 1123 break; 1124 1125 case rxDescRefr: 1126 if (rxDmaState != dmaIdle) 1127 goto exit; 1128 1129 rxState = rxAdvance; 1130 break; 1131 1132 case rxDescRead: 1133 if (rxDmaState != dmaIdle) 1134 goto exit; 1135 1136 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n", 1137 regs.rxdp & 0x3fffffff); 1138 DPRINTF(EthernetDesc, 1139 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1140 link, bufptr, cmdsts, extsts); 1141 1142 if (cmdsts & CMDSTS_OWN) { 1143 devIntrPost(ISR_RXIDLE); 1144 rxState = rxIdle; 1145 goto exit; 1146 } else { 1147 rxState = rxFifoBlock; 1148 rxFragPtr = bufptr; 1149 rxDescCnt = cmdsts & CMDSTS_LEN_MASK; 1150 } 1151 break; 1152 1153 case rxFifoBlock: 1154 if (!rxPacket) { 1155 /** 1156 * @todo in reality, we should be able to start processing 1157 * the packet as it arrives, and not have to wait for the 1158 * full packet ot be in the receive fifo. 1159 */ 1160 if (rxFifo.empty()) 1161 goto exit; 1162 1163 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1164 1165 // If we don't have a packet, grab a new one from the fifo. 1166 rxPacket = rxFifo.front(); 1167 rxPktBytes = rxPacket->length; 1168 rxPacketBufPtr = rxPacket->data; 1169 1170#if TRACING_ON 1171 if (DTRACE(Ethernet)) { 1172 IpPtr ip(rxPacket); 1173 if (ip) { 1174 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1175 TcpPtr tcp(ip); 1176 if (tcp) { 1177 DPRINTF(Ethernet, 1178 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1179 tcp->sport(), tcp->dport(), tcp->seq(), 1180 tcp->ack()); 1181 } 1182 } 1183 } 1184#endif 1185 1186 // sanity check - i think the driver behaves like this 1187 assert(rxDescCnt >= rxPktBytes); 1188 rxFifo.pop(); 1189 } 1190 1191 1192 // dont' need the && rxDescCnt > 0 if driver sanity check 1193 // above holds 1194 if (rxPktBytes > 0) { 1195 rxState = rxFragWrite; 1196 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1197 // check holds 1198 rxXferLen = rxPktBytes; 1199 1200 rxDmaAddr = rxFragPtr & 0x3fffffff; 1201 rxDmaData = rxPacketBufPtr; 1202 rxDmaLen = rxXferLen; 1203 rxDmaFree = dmaDataFree; 1204 1205 if (doRxDmaWrite()) 1206 goto exit; 1207 1208 } else { 1209 rxState = rxDescWrite; 1210 1211 //if (rxPktBytes == 0) { /* packet is done */ 1212 assert(rxPktBytes == 0); 1213 DPRINTF(EthernetSM, "done with receiving packet\n"); 1214 1215 cmdsts |= CMDSTS_OWN; 1216 cmdsts &= ~CMDSTS_MORE; 1217 cmdsts |= CMDSTS_OK; 1218 cmdsts &= 0xffff0000; 1219 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1220 1221 IpPtr ip(rxPacket); 1222 if (extstsEnable && ip) { 1223 extsts |= EXTSTS_IPPKT; 1224 rxIpChecksums++; 1225 if (cksum(ip) != 0) { 1226 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1227 extsts |= EXTSTS_IPERR; 1228 } 1229 TcpPtr tcp(ip); 1230 UdpPtr udp(ip); 1231 if (tcp) { 1232 extsts |= EXTSTS_TCPPKT; 1233 rxTcpChecksums++; 1234 if (cksum(tcp) != 0) { 1235 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1236 extsts |= EXTSTS_TCPERR; 1237 1238 } 1239 } else if (udp) { 1240 extsts |= EXTSTS_UDPPKT; 1241 rxUdpChecksums++; 1242 if (cksum(udp) != 0) { 1243 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1244 extsts |= EXTSTS_UDPERR; 1245 } 1246 } 1247 } 1248 rxPacket = 0; 1249 1250 /* 1251 * the driver seems to always receive into desc buffers 1252 * of size 1514, so you never have a pkt that is split 1253 * into multiple descriptors on the receive side, so 1254 * i don't implement that case, hence the assert above. 1255 */ 1256 1257 DPRINTF(EthernetDesc, 1258 "rxDesc: addr=%08x writeback cmdsts extsts\n", 1259 regs.rxdp & 0x3fffffff); 1260 DPRINTF(EthernetDesc, 1261 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1262 link, bufptr, cmdsts, extsts); 1263 1264 rxDmaAddr = regs.rxdp & 0x3fffffff; 1265 rxDmaData = &cmdsts; 1266 if (is64bit) { 1267 rxDmaAddr += offsetof(ns_desc64, cmdsts); 1268 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts); 1269 } else { 1270 rxDmaAddr += offsetof(ns_desc32, cmdsts); 1271 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts); 1272 } 1273 rxDmaFree = dmaDescFree; 1274 1275 descDmaWrites++; 1276 descDmaWrBytes += rxDmaLen; 1277 1278 if (doRxDmaWrite()) 1279 goto exit; 1280 } 1281 break; 1282 1283 case rxFragWrite: 1284 if (rxDmaState != dmaIdle) 1285 goto exit; 1286 1287 rxPacketBufPtr += rxXferLen; 1288 rxFragPtr += rxXferLen; 1289 rxPktBytes -= rxXferLen; 1290 1291 rxState = rxFifoBlock; 1292 break; 1293 1294 case rxDescWrite: 1295 if (rxDmaState != dmaIdle) 1296 goto exit; 1297 1298 assert(cmdsts & CMDSTS_OWN); 1299 1300 assert(rxPacket == 0); 1301 devIntrPost(ISR_RXOK); 1302 1303 if (cmdsts & CMDSTS_INTR) 1304 devIntrPost(ISR_RXDESC); 1305 1306 if (!rxEnable) { 1307 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1308 rxState = rxIdle; 1309 goto exit; 1310 } else 1311 rxState = rxAdvance; 1312 break; 1313 1314 case rxAdvance: 1315 if (link == 0) { 1316 devIntrPost(ISR_RXIDLE); 1317 rxState = rxIdle; 1318 CRDD = true; 1319 goto exit; 1320 } else { 1321 if (rxDmaState != dmaIdle) 1322 goto exit; 1323 rxState = rxDescRead; 1324 regs.rxdp = link; 1325 CRDD = false; 1326 1327 rxDmaAddr = regs.rxdp & 0x3fffffff; 1328 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1329 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1330 rxDmaFree = dmaDescFree; 1331 1332 if (doRxDmaRead()) 1333 goto exit; 1334 } 1335 break; 1336 1337 default: 1338 panic("Invalid rxState!"); 1339 } 1340 1341 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1342 NsRxStateStrings[rxState]); 1343 goto next; 1344 1345 exit: 1346 /** 1347 * @todo do we want to schedule a future kick? 1348 */ 1349 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1350 NsRxStateStrings[rxState]); 1351 1352 if (!rxKickEvent.scheduled()) 1353 schedule(rxKickEvent, rxKickTick); 1354} 1355 1356void 1357NSGigE::transmit() 1358{ 1359 if (txFifo.empty()) { 1360 DPRINTF(Ethernet, "nothing to transmit\n"); 1361 return; 1362 } 1363 1364 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1365 txFifo.size()); 1366 if (interface->sendPacket(txFifo.front())) { 1367#if TRACING_ON 1368 if (DTRACE(Ethernet)) { 1369 IpPtr ip(txFifo.front()); 1370 if (ip) { 1371 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1372 TcpPtr tcp(ip); 1373 if (tcp) { 1374 DPRINTF(Ethernet, 1375 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1376 tcp->sport(), tcp->dport(), tcp->seq(), 1377 tcp->ack()); 1378 } 1379 } 1380 } 1381#endif 1382 1383 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1384 txBytes += txFifo.front()->length; 1385 txPackets++; 1386 1387 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1388 txFifo.avail()); 1389 txFifo.pop(); 1390 1391 /* 1392 * normally do a writeback of the descriptor here, and ONLY 1393 * after that is done, send this interrupt. but since our 1394 * stuff never actually fails, just do this interrupt here, 1395 * otherwise the code has to stray from this nice format. 1396 * besides, it's functionally the same. 1397 */ 1398 devIntrPost(ISR_TXOK); 1399 } 1400 1401 if (!txFifo.empty() && !txEvent.scheduled()) { 1402 DPRINTF(Ethernet, "reschedule transmit\n"); 1403 schedule(txEvent, curTick() + retryTime); 1404 } 1405} 1406 1407bool 1408NSGigE::doTxDmaRead() 1409{ 1410 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1411 txDmaState = dmaReading; 1412 1413 if (dmaPending() || drainState() != DrainState::Running) 1414 txDmaState = dmaReadWaiting; 1415 else 1416 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData); 1417 1418 return true; 1419} 1420 1421void 1422NSGigE::txDmaReadDone() 1423{ 1424 assert(txDmaState == dmaReading); 1425 txDmaState = dmaIdle; 1426 1427 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1428 txDmaAddr, txDmaLen); 1429 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1430 1431 // If the receive state machine has a pending DMA, let it go first 1432 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1433 rxKick(); 1434 1435 txKick(); 1436} 1437 1438bool 1439NSGigE::doTxDmaWrite() 1440{ 1441 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1442 txDmaState = dmaWriting; 1443 1444 if (dmaPending() || drainState() != DrainState::Running) 1445 txDmaState = dmaWriteWaiting; 1446 else 1447 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData); 1448 return true; 1449} 1450 1451void 1452NSGigE::txDmaWriteDone() 1453{ 1454 assert(txDmaState == dmaWriting); 1455 txDmaState = dmaIdle; 1456 1457 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1458 txDmaAddr, txDmaLen); 1459 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1460 1461 // If the receive state machine has a pending DMA, let it go first 1462 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1463 rxKick(); 1464 1465 txKick(); 1466} 1467 1468void 1469NSGigE::txKick() 1470{ 1471 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1472 1473 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n", 1474 NsTxStateStrings[txState], is64bit ? 64 : 32); 1475 1476 Addr link, bufptr; 1477 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts; 1478 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts; 1479 1480 next: 1481 if (txKickTick > curTick()) { 1482 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1483 txKickTick); 1484 goto exit; 1485 } 1486 1487 // Go to the next state machine clock tick. 1488 txKickTick = clockEdge(Cycles(1)); 1489 1490 switch(txDmaState) { 1491 case dmaReadWaiting: 1492 if (doTxDmaRead()) 1493 goto exit; 1494 break; 1495 case dmaWriteWaiting: 1496 if (doTxDmaWrite()) 1497 goto exit; 1498 break; 1499 default: 1500 break; 1501 } 1502 1503 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link; 1504 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr; 1505 switch (txState) { 1506 case txIdle: 1507 if (!txEnable) { 1508 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1509 goto exit; 1510 } 1511 1512 if (CTDD) { 1513 txState = txDescRefr; 1514 1515 txDmaAddr = regs.txdp & 0x3fffffff; 1516 txDmaData = 1517 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link; 1518 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link); 1519 txDmaFree = dmaDescFree; 1520 1521 descDmaReads++; 1522 descDmaRdBytes += txDmaLen; 1523 1524 if (doTxDmaRead()) 1525 goto exit; 1526 1527 } else { 1528 txState = txDescRead; 1529 1530 txDmaAddr = regs.txdp & 0x3fffffff; 1531 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1532 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1533 txDmaFree = dmaDescFree; 1534 1535 descDmaReads++; 1536 descDmaRdBytes += txDmaLen; 1537 1538 if (doTxDmaRead()) 1539 goto exit; 1540 } 1541 break; 1542 1543 case txDescRefr: 1544 if (txDmaState != dmaIdle) 1545 goto exit; 1546 1547 txState = txAdvance; 1548 break; 1549 1550 case txDescRead: 1551 if (txDmaState != dmaIdle) 1552 goto exit; 1553 1554 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n", 1555 regs.txdp & 0x3fffffff); 1556 DPRINTF(EthernetDesc, 1557 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n", 1558 link, bufptr, cmdsts, extsts); 1559 1560 if (cmdsts & CMDSTS_OWN) { 1561 txState = txFifoBlock; 1562 txFragPtr = bufptr; 1563 txDescCnt = cmdsts & CMDSTS_LEN_MASK; 1564 } else { 1565 devIntrPost(ISR_TXIDLE); 1566 txState = txIdle; 1567 goto exit; 1568 } 1569 break; 1570 1571 case txFifoBlock: 1572 if (!txPacket) { 1573 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 1574 txPacket = make_shared<EthPacketData>(16384); 1575 txPacketBufPtr = txPacket->data; 1576 } 1577 1578 if (txDescCnt == 0) { 1579 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 1580 if (cmdsts & CMDSTS_MORE) { 1581 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 1582 txState = txDescWrite; 1583 1584 cmdsts &= ~CMDSTS_OWN; 1585 1586 txDmaAddr = regs.txdp & 0x3fffffff; 1587 txDmaData = &cmdsts; 1588 if (is64bit) { 1589 txDmaAddr += offsetof(ns_desc64, cmdsts); 1590 txDmaLen = sizeof(txDesc64.cmdsts); 1591 } else { 1592 txDmaAddr += offsetof(ns_desc32, cmdsts); 1593 txDmaLen = sizeof(txDesc32.cmdsts); 1594 } 1595 txDmaFree = dmaDescFree; 1596 1597 if (doTxDmaWrite()) 1598 goto exit; 1599 1600 } else { /* this packet is totally done */ 1601 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 1602 /* deal with the the packet that just finished */ 1603 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 1604 IpPtr ip(txPacket); 1605 if (extsts & EXTSTS_UDPPKT) { 1606 UdpPtr udp(ip); 1607 if (udp) { 1608 udp->sum(0); 1609 udp->sum(cksum(udp)); 1610 txUdpChecksums++; 1611 } else { 1612 Debug::breakpoint(); 1613 warn_once("UDPPKT set, but not UDP!\n"); 1614 } 1615 } else if (extsts & EXTSTS_TCPPKT) { 1616 TcpPtr tcp(ip); 1617 if (tcp) { 1618 tcp->sum(0); 1619 tcp->sum(cksum(tcp)); 1620 txTcpChecksums++; 1621 } else { 1622 warn_once("TCPPKT set, but not UDP!\n"); 1623 } 1624 } 1625 if (extsts & EXTSTS_IPPKT) { 1626 if (ip) { 1627 ip->sum(0); 1628 ip->sum(cksum(ip)); 1629 txIpChecksums++; 1630 } else { 1631 warn_once("IPPKT set, but not UDP!\n"); 1632 } 1633 } 1634 } 1635 1636 txPacket->simLength = txPacketBufPtr - txPacket->data; 1637 txPacket->length = txPacketBufPtr - txPacket->data; 1638 // this is just because the receive can't handle a 1639 // packet bigger want to make sure 1640 if (txPacket->length > 1514) 1641 panic("transmit packet too large, %s > 1514\n", 1642 txPacket->length); 1643 1644#ifndef NDEBUG 1645 bool success = 1646#endif 1647 txFifo.push(txPacket); 1648 assert(success); 1649 1650 /* 1651 * this following section is not tqo spec, but 1652 * functionally shouldn't be any different. normally, 1653 * the chip will wait til the transmit has occurred 1654 * before writing back the descriptor because it has 1655 * to wait to see that it was successfully transmitted 1656 * to decide whether to set CMDSTS_OK or not. 1657 * however, in the simulator since it is always 1658 * successfully transmitted, and writing it exactly to 1659 * spec would complicate the code, we just do it here 1660 */ 1661 1662 cmdsts &= ~CMDSTS_OWN; 1663 cmdsts |= CMDSTS_OK; 1664 1665 DPRINTF(EthernetDesc, 1666 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 1667 cmdsts, extsts); 1668 1669 txDmaFree = dmaDescFree; 1670 txDmaAddr = regs.txdp & 0x3fffffff; 1671 txDmaData = &cmdsts; 1672 if (is64bit) { 1673 txDmaAddr += offsetof(ns_desc64, cmdsts); 1674 txDmaLen = 1675 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts); 1676 } else { 1677 txDmaAddr += offsetof(ns_desc32, cmdsts); 1678 txDmaLen = 1679 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts); 1680 } 1681 1682 descDmaWrites++; 1683 descDmaWrBytes += txDmaLen; 1684 1685 transmit(); 1686 txPacket = 0; 1687 1688 if (!txEnable) { 1689 DPRINTF(EthernetSM, "halting TX state machine\n"); 1690 txState = txIdle; 1691 goto exit; 1692 } else 1693 txState = txAdvance; 1694 1695 if (doTxDmaWrite()) 1696 goto exit; 1697 } 1698 } else { 1699 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 1700 if (!txFifo.full()) { 1701 txState = txFragRead; 1702 1703 /* 1704 * The number of bytes transferred is either whatever 1705 * is left in the descriptor (txDescCnt), or if there 1706 * is not enough room in the fifo, just whatever room 1707 * is left in the fifo 1708 */ 1709 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 1710 1711 txDmaAddr = txFragPtr & 0x3fffffff; 1712 txDmaData = txPacketBufPtr; 1713 txDmaLen = txXferLen; 1714 txDmaFree = dmaDataFree; 1715 1716 if (doTxDmaRead()) 1717 goto exit; 1718 } else { 1719 txState = txFifoBlock; 1720 transmit(); 1721 1722 goto exit; 1723 } 1724 1725 } 1726 break; 1727 1728 case txFragRead: 1729 if (txDmaState != dmaIdle) 1730 goto exit; 1731 1732 txPacketBufPtr += txXferLen; 1733 txFragPtr += txXferLen; 1734 txDescCnt -= txXferLen; 1735 txFifo.reserve(txXferLen); 1736 1737 txState = txFifoBlock; 1738 break; 1739 1740 case txDescWrite: 1741 if (txDmaState != dmaIdle) 1742 goto exit; 1743 1744 if (cmdsts & CMDSTS_INTR) 1745 devIntrPost(ISR_TXDESC); 1746 1747 if (!txEnable) { 1748 DPRINTF(EthernetSM, "halting TX state machine\n"); 1749 txState = txIdle; 1750 goto exit; 1751 } else 1752 txState = txAdvance; 1753 break; 1754 1755 case txAdvance: 1756 if (link == 0) { 1757 devIntrPost(ISR_TXIDLE); 1758 txState = txIdle; 1759 goto exit; 1760 } else { 1761 if (txDmaState != dmaIdle) 1762 goto exit; 1763 txState = txDescRead; 1764 regs.txdp = link; 1765 CTDD = false; 1766 1767 txDmaAddr = link & 0x3fffffff; 1768 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1769 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1770 txDmaFree = dmaDescFree; 1771 1772 if (doTxDmaRead()) 1773 goto exit; 1774 } 1775 break; 1776 1777 default: 1778 panic("invalid state"); 1779 } 1780 1781 DPRINTF(EthernetSM, "entering next txState=%s\n", 1782 NsTxStateStrings[txState]); 1783 goto next; 1784 1785 exit: 1786 /** 1787 * @todo do we want to schedule a future kick? 1788 */ 1789 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 1790 NsTxStateStrings[txState]); 1791 1792 if (!txKickEvent.scheduled()) 1793 schedule(txKickEvent, txKickTick); 1794} 1795 1796/** 1797 * Advance the EEPROM state machine 1798 * Called on rising edge of EEPROM clock bit in MEAR 1799 */ 1800void 1801NSGigE::eepromKick() 1802{ 1803 switch (eepromState) { 1804 1805 case eepromStart: 1806 1807 // Wait for start bit 1808 if (regs.mear & MEAR_EEDI) { 1809 // Set up to get 2 opcode bits 1810 eepromState = eepromGetOpcode; 1811 eepromBitsToRx = 2; 1812 eepromOpcode = 0; 1813 } 1814 break; 1815 1816 case eepromGetOpcode: 1817 eepromOpcode <<= 1; 1818 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0; 1819 --eepromBitsToRx; 1820 1821 // Done getting opcode 1822 if (eepromBitsToRx == 0) { 1823 if (eepromOpcode != EEPROM_READ) 1824 panic("only EEPROM reads are implemented!"); 1825 1826 // Set up to get address 1827 eepromState = eepromGetAddress; 1828 eepromBitsToRx = 6; 1829 eepromAddress = 0; 1830 } 1831 break; 1832 1833 case eepromGetAddress: 1834 eepromAddress <<= 1; 1835 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0; 1836 --eepromBitsToRx; 1837 1838 // Done getting address 1839 if (eepromBitsToRx == 0) { 1840 1841 if (eepromAddress >= EEPROM_SIZE) 1842 panic("EEPROM read access out of range!"); 1843 1844 switch (eepromAddress) { 1845 1846 case EEPROM_PMATCH2_ADDR: 1847 eepromData = rom.perfectMatch[5]; 1848 eepromData <<= 8; 1849 eepromData += rom.perfectMatch[4]; 1850 break; 1851 1852 case EEPROM_PMATCH1_ADDR: 1853 eepromData = rom.perfectMatch[3]; 1854 eepromData <<= 8; 1855 eepromData += rom.perfectMatch[2]; 1856 break; 1857 1858 case EEPROM_PMATCH0_ADDR: 1859 eepromData = rom.perfectMatch[1]; 1860 eepromData <<= 8; 1861 eepromData += rom.perfectMatch[0]; 1862 break; 1863 1864 default: 1865 panic("FreeBSD driver only uses EEPROM to read PMATCH!"); 1866 } 1867 // Set up to read data 1868 eepromState = eepromRead; 1869 eepromBitsToRx = 16; 1870 1871 // Clear data in bit 1872 regs.mear &= ~MEAR_EEDI; 1873 } 1874 break; 1875 1876 case eepromRead: 1877 // Clear Data Out bit 1878 regs.mear &= ~MEAR_EEDO; 1879 // Set bit to value of current EEPROM bit 1880 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0; 1881 1882 eepromData <<= 1; 1883 --eepromBitsToRx; 1884 1885 // All done 1886 if (eepromBitsToRx == 0) { 1887 eepromState = eepromStart; 1888 } 1889 break; 1890 1891 default: 1892 panic("invalid EEPROM state"); 1893 } 1894 1895} 1896 1897void 1898NSGigE::transferDone() 1899{ 1900 if (txFifo.empty()) { 1901 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 1902 return; 1903 } 1904 1905 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 1906 1907 reschedule(txEvent, clockEdge(Cycles(1)), true); 1908} 1909 1910bool 1911NSGigE::rxFilter(const EthPacketPtr &packet) 1912{ 1913 EthPtr eth = packet; 1914 bool drop = true; 1915 string type; 1916 1917 const EthAddr &dst = eth->dst(); 1918 if (dst.unicast()) { 1919 // If we're accepting all unicast addresses 1920 if (acceptUnicast) 1921 drop = false; 1922 1923 // If we make a perfect match 1924 if (acceptPerfect && dst == rom.perfectMatch) 1925 drop = false; 1926 1927 if (acceptArp && eth->type() == ETH_TYPE_ARP) 1928 drop = false; 1929 1930 } else if (dst.broadcast()) { 1931 // if we're accepting broadcasts 1932 if (acceptBroadcast) 1933 drop = false; 1934 1935 } else if (dst.multicast()) { 1936 // if we're accepting all multicasts 1937 if (acceptMulticast) 1938 drop = false; 1939 1940 // Multicast hashing faked - all packets accepted 1941 if (multicastHashEnable) 1942 drop = false; 1943 } 1944 1945 if (drop) { 1946 DPRINTF(Ethernet, "rxFilter drop\n"); 1947 DDUMP(EthernetData, packet->data, packet->length); 1948 } 1949 1950 return drop; 1951} 1952 1953bool 1954NSGigE::recvPacket(EthPacketPtr packet) 1955{ 1956 rxBytes += packet->length; 1957 rxPackets++; 1958 1959 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 1960 rxFifo.avail()); 1961 1962 if (!rxEnable) { 1963 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 1964 return true; 1965 } 1966 1967 if (!rxFilterEnable) { 1968 DPRINTF(Ethernet, 1969 "receive packet filtering disabled . . . packet dropped\n"); 1970 return true; 1971 } 1972 1973 if (rxFilter(packet)) { 1974 DPRINTF(Ethernet, "packet filtered...dropped\n"); 1975 return true; 1976 } 1977 1978 if (rxFifo.avail() < packet->length) { 1979#if TRACING_ON 1980 IpPtr ip(packet); 1981 TcpPtr tcp(ip); 1982 if (ip) { 1983 DPRINTF(Ethernet, 1984 "packet won't fit in receive buffer...pkt ID %d dropped\n", 1985 ip->id()); 1986 if (tcp) { 1987 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 1988 } 1989 } 1990#endif 1991 droppedPackets++; 1992 devIntrPost(ISR_RXORN); 1993 return false; 1994 } 1995 1996 rxFifo.push(packet); 1997 1998 rxKick(); 1999 return true; 2000} 2001 2002 2003void 2004NSGigE::drainResume() 2005{ 2006 Drainable::drainResume(); 2007 2008 // During drain we could have left the state machines in a waiting state and 2009 // they wouldn't get out until some other event occured to kick them. 2010 // This way they'll get out immediately 2011 txKick(); 2012 rxKick(); 2013} 2014 2015 2016//===================================================================== 2017// 2018// 2019void 2020NSGigE::serialize(CheckpointOut &cp) const 2021{ 2022 // Serialize the PciDevice base class 2023 PciDevice::serialize(cp); 2024 2025 /* 2026 * Finalize any DMA events now. 2027 */ 2028 // @todo will mem system save pending dma? 2029 2030 /* 2031 * Serialize the device registers 2032 */ 2033 SERIALIZE_SCALAR(regs.command); 2034 SERIALIZE_SCALAR(regs.config); 2035 SERIALIZE_SCALAR(regs.mear); 2036 SERIALIZE_SCALAR(regs.ptscr); 2037 SERIALIZE_SCALAR(regs.isr); 2038 SERIALIZE_SCALAR(regs.imr); 2039 SERIALIZE_SCALAR(regs.ier); 2040 SERIALIZE_SCALAR(regs.ihr); 2041 SERIALIZE_SCALAR(regs.txdp); 2042 SERIALIZE_SCALAR(regs.txdp_hi); 2043 SERIALIZE_SCALAR(regs.txcfg); 2044 SERIALIZE_SCALAR(regs.gpior); 2045 SERIALIZE_SCALAR(regs.rxdp); 2046 SERIALIZE_SCALAR(regs.rxdp_hi); 2047 SERIALIZE_SCALAR(regs.rxcfg); 2048 SERIALIZE_SCALAR(regs.pqcr); 2049 SERIALIZE_SCALAR(regs.wcsr); 2050 SERIALIZE_SCALAR(regs.pcr); 2051 SERIALIZE_SCALAR(regs.rfcr); 2052 SERIALIZE_SCALAR(regs.rfdr); 2053 SERIALIZE_SCALAR(regs.brar); 2054 SERIALIZE_SCALAR(regs.brdr); 2055 SERIALIZE_SCALAR(regs.srr); 2056 SERIALIZE_SCALAR(regs.mibc); 2057 SERIALIZE_SCALAR(regs.vrcr); 2058 SERIALIZE_SCALAR(regs.vtcr); 2059 SERIALIZE_SCALAR(regs.vdr); 2060 SERIALIZE_SCALAR(regs.ccsr); 2061 SERIALIZE_SCALAR(regs.tbicr); 2062 SERIALIZE_SCALAR(regs.tbisr); 2063 SERIALIZE_SCALAR(regs.tanar); 2064 SERIALIZE_SCALAR(regs.tanlpar); 2065 SERIALIZE_SCALAR(regs.taner); 2066 SERIALIZE_SCALAR(regs.tesr); 2067 2068 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2069 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2070 2071 SERIALIZE_SCALAR(ioEnable); 2072 2073 /* 2074 * Serialize the data Fifos 2075 */ 2076 rxFifo.serialize("rxFifo", cp); 2077 txFifo.serialize("txFifo", cp); 2078 2079 /* 2080 * Serialize the various helper variables 2081 */ 2082 bool txPacketExists = txPacket != nullptr; 2083 SERIALIZE_SCALAR(txPacketExists); 2084 if (txPacketExists) { 2085 txPacket->simLength = txPacketBufPtr - txPacket->data; 2086 txPacket->length = txPacketBufPtr - txPacket->data; 2087 txPacket->serialize("txPacket", cp); 2088 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2089 SERIALIZE_SCALAR(txPktBufPtr); 2090 } 2091 2092 bool rxPacketExists = rxPacket != nullptr; 2093 SERIALIZE_SCALAR(rxPacketExists); 2094 if (rxPacketExists) { 2095 rxPacket->serialize("rxPacket", cp); 2096 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2097 SERIALIZE_SCALAR(rxPktBufPtr); 2098 } 2099 2100 SERIALIZE_SCALAR(txXferLen); 2101 SERIALIZE_SCALAR(rxXferLen); 2102 2103 /* 2104 * Serialize Cached Descriptors 2105 */ 2106 SERIALIZE_SCALAR(rxDesc64.link); 2107 SERIALIZE_SCALAR(rxDesc64.bufptr); 2108 SERIALIZE_SCALAR(rxDesc64.cmdsts); 2109 SERIALIZE_SCALAR(rxDesc64.extsts); 2110 SERIALIZE_SCALAR(txDesc64.link); 2111 SERIALIZE_SCALAR(txDesc64.bufptr); 2112 SERIALIZE_SCALAR(txDesc64.cmdsts); 2113 SERIALIZE_SCALAR(txDesc64.extsts); 2114 SERIALIZE_SCALAR(rxDesc32.link); 2115 SERIALIZE_SCALAR(rxDesc32.bufptr); 2116 SERIALIZE_SCALAR(rxDesc32.cmdsts); 2117 SERIALIZE_SCALAR(rxDesc32.extsts); 2118 SERIALIZE_SCALAR(txDesc32.link); 2119 SERIALIZE_SCALAR(txDesc32.bufptr); 2120 SERIALIZE_SCALAR(txDesc32.cmdsts); 2121 SERIALIZE_SCALAR(txDesc32.extsts); 2122 SERIALIZE_SCALAR(extstsEnable); 2123 2124 /* 2125 * Serialize tx state machine 2126 */ 2127 int txState = this->txState; 2128 SERIALIZE_SCALAR(txState); 2129 SERIALIZE_SCALAR(txEnable); 2130 SERIALIZE_SCALAR(CTDD); 2131 SERIALIZE_SCALAR(txFragPtr); 2132 SERIALIZE_SCALAR(txDescCnt); 2133 int txDmaState = this->txDmaState; 2134 SERIALIZE_SCALAR(txDmaState); 2135 SERIALIZE_SCALAR(txKickTick); 2136 2137 /* 2138 * Serialize rx state machine 2139 */ 2140 int rxState = this->rxState; 2141 SERIALIZE_SCALAR(rxState); 2142 SERIALIZE_SCALAR(rxEnable); 2143 SERIALIZE_SCALAR(CRDD); 2144 SERIALIZE_SCALAR(rxPktBytes); 2145 SERIALIZE_SCALAR(rxFragPtr); 2146 SERIALIZE_SCALAR(rxDescCnt); 2147 int rxDmaState = this->rxDmaState; 2148 SERIALIZE_SCALAR(rxDmaState); 2149 SERIALIZE_SCALAR(rxKickTick); 2150 2151 /* 2152 * Serialize EEPROM state machine 2153 */ 2154 int eepromState = this->eepromState; 2155 SERIALIZE_SCALAR(eepromState); 2156 SERIALIZE_SCALAR(eepromClk); 2157 SERIALIZE_SCALAR(eepromBitsToRx); 2158 SERIALIZE_SCALAR(eepromOpcode); 2159 SERIALIZE_SCALAR(eepromAddress); 2160 SERIALIZE_SCALAR(eepromData); 2161 2162 /* 2163 * If there's a pending transmit, store the time so we can 2164 * reschedule it later 2165 */ 2166 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0; 2167 SERIALIZE_SCALAR(transmitTick); 2168 2169 /* 2170 * receive address filter settings 2171 */ 2172 SERIALIZE_SCALAR(rxFilterEnable); 2173 SERIALIZE_SCALAR(acceptBroadcast); 2174 SERIALIZE_SCALAR(acceptMulticast); 2175 SERIALIZE_SCALAR(acceptUnicast); 2176 SERIALIZE_SCALAR(acceptPerfect); 2177 SERIALIZE_SCALAR(acceptArp); 2178 SERIALIZE_SCALAR(multicastHashEnable); 2179 2180 /* 2181 * Keep track of pending interrupt status. 2182 */ 2183 SERIALIZE_SCALAR(intrTick); 2184 SERIALIZE_SCALAR(cpuPendingIntr); 2185 Tick intrEventTick = 0; 2186 if (intrEvent) 2187 intrEventTick = intrEvent->when(); 2188 SERIALIZE_SCALAR(intrEventTick); 2189 2190} 2191 2192void 2193NSGigE::unserialize(CheckpointIn &cp) 2194{ 2195 // Unserialize the PciDevice base class 2196 PciDevice::unserialize(cp); 2197 2198 UNSERIALIZE_SCALAR(regs.command); 2199 UNSERIALIZE_SCALAR(regs.config); 2200 UNSERIALIZE_SCALAR(regs.mear); 2201 UNSERIALIZE_SCALAR(regs.ptscr); 2202 UNSERIALIZE_SCALAR(regs.isr); 2203 UNSERIALIZE_SCALAR(regs.imr); 2204 UNSERIALIZE_SCALAR(regs.ier); 2205 UNSERIALIZE_SCALAR(regs.ihr); 2206 UNSERIALIZE_SCALAR(regs.txdp); 2207 UNSERIALIZE_SCALAR(regs.txdp_hi); 2208 UNSERIALIZE_SCALAR(regs.txcfg); 2209 UNSERIALIZE_SCALAR(regs.gpior); 2210 UNSERIALIZE_SCALAR(regs.rxdp); 2211 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2212 UNSERIALIZE_SCALAR(regs.rxcfg); 2213 UNSERIALIZE_SCALAR(regs.pqcr); 2214 UNSERIALIZE_SCALAR(regs.wcsr); 2215 UNSERIALIZE_SCALAR(regs.pcr); 2216 UNSERIALIZE_SCALAR(regs.rfcr); 2217 UNSERIALIZE_SCALAR(regs.rfdr); 2218 UNSERIALIZE_SCALAR(regs.brar); 2219 UNSERIALIZE_SCALAR(regs.brdr); 2220 UNSERIALIZE_SCALAR(regs.srr); 2221 UNSERIALIZE_SCALAR(regs.mibc); 2222 UNSERIALIZE_SCALAR(regs.vrcr); 2223 UNSERIALIZE_SCALAR(regs.vtcr); 2224 UNSERIALIZE_SCALAR(regs.vdr); 2225 UNSERIALIZE_SCALAR(regs.ccsr); 2226 UNSERIALIZE_SCALAR(regs.tbicr); 2227 UNSERIALIZE_SCALAR(regs.tbisr); 2228 UNSERIALIZE_SCALAR(regs.tanar); 2229 UNSERIALIZE_SCALAR(regs.tanlpar); 2230 UNSERIALIZE_SCALAR(regs.taner); 2231 UNSERIALIZE_SCALAR(regs.tesr); 2232 2233 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2234 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2235 2236 UNSERIALIZE_SCALAR(ioEnable); 2237 2238 /* 2239 * unserialize the data fifos 2240 */ 2241 rxFifo.unserialize("rxFifo", cp); 2242 txFifo.unserialize("txFifo", cp); 2243 2244 /* 2245 * unserialize the various helper variables 2246 */ 2247 bool txPacketExists; 2248 UNSERIALIZE_SCALAR(txPacketExists); 2249 if (txPacketExists) { 2250 txPacket = make_shared<EthPacketData>(16384); 2251 txPacket->unserialize("txPacket", cp); 2252 uint32_t txPktBufPtr; 2253 UNSERIALIZE_SCALAR(txPktBufPtr); 2254 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2255 } else 2256 txPacket = 0; 2257 2258 bool rxPacketExists; 2259 UNSERIALIZE_SCALAR(rxPacketExists); 2260 rxPacket = 0; 2261 if (rxPacketExists) { 2262 rxPacket = make_shared<EthPacketData>(); 2263 rxPacket->unserialize("rxPacket", cp); 2264 uint32_t rxPktBufPtr; 2265 UNSERIALIZE_SCALAR(rxPktBufPtr); 2266 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2267 } else 2268 rxPacket = 0; 2269 2270 UNSERIALIZE_SCALAR(txXferLen); 2271 UNSERIALIZE_SCALAR(rxXferLen); 2272 2273 /* 2274 * Unserialize Cached Descriptors 2275 */ 2276 UNSERIALIZE_SCALAR(rxDesc64.link); 2277 UNSERIALIZE_SCALAR(rxDesc64.bufptr); 2278 UNSERIALIZE_SCALAR(rxDesc64.cmdsts); 2279 UNSERIALIZE_SCALAR(rxDesc64.extsts); 2280 UNSERIALIZE_SCALAR(txDesc64.link); 2281 UNSERIALIZE_SCALAR(txDesc64.bufptr); 2282 UNSERIALIZE_SCALAR(txDesc64.cmdsts); 2283 UNSERIALIZE_SCALAR(txDesc64.extsts); 2284 UNSERIALIZE_SCALAR(rxDesc32.link); 2285 UNSERIALIZE_SCALAR(rxDesc32.bufptr); 2286 UNSERIALIZE_SCALAR(rxDesc32.cmdsts); 2287 UNSERIALIZE_SCALAR(rxDesc32.extsts); 2288 UNSERIALIZE_SCALAR(txDesc32.link); 2289 UNSERIALIZE_SCALAR(txDesc32.bufptr); 2290 UNSERIALIZE_SCALAR(txDesc32.cmdsts); 2291 UNSERIALIZE_SCALAR(txDesc32.extsts); 2292 UNSERIALIZE_SCALAR(extstsEnable); 2293 2294 /* 2295 * unserialize tx state machine 2296 */ 2297 int txState; 2298 UNSERIALIZE_SCALAR(txState); 2299 this->txState = (TxState) txState; 2300 UNSERIALIZE_SCALAR(txEnable); 2301 UNSERIALIZE_SCALAR(CTDD); 2302 UNSERIALIZE_SCALAR(txFragPtr); 2303 UNSERIALIZE_SCALAR(txDescCnt); 2304 int txDmaState; 2305 UNSERIALIZE_SCALAR(txDmaState); 2306 this->txDmaState = (DmaState) txDmaState; 2307 UNSERIALIZE_SCALAR(txKickTick); 2308 if (txKickTick) 2309 schedule(txKickEvent, txKickTick); 2310 2311 /* 2312 * unserialize rx state machine 2313 */ 2314 int rxState; 2315 UNSERIALIZE_SCALAR(rxState); 2316 this->rxState = (RxState) rxState; 2317 UNSERIALIZE_SCALAR(rxEnable); 2318 UNSERIALIZE_SCALAR(CRDD); 2319 UNSERIALIZE_SCALAR(rxPktBytes); 2320 UNSERIALIZE_SCALAR(rxFragPtr); 2321 UNSERIALIZE_SCALAR(rxDescCnt); 2322 int rxDmaState; 2323 UNSERIALIZE_SCALAR(rxDmaState); 2324 this->rxDmaState = (DmaState) rxDmaState; 2325 UNSERIALIZE_SCALAR(rxKickTick); 2326 if (rxKickTick) 2327 schedule(rxKickEvent, rxKickTick); 2328 2329 /* 2330 * Unserialize EEPROM state machine 2331 */ 2332 int eepromState; 2333 UNSERIALIZE_SCALAR(eepromState); 2334 this->eepromState = (EEPROMState) eepromState; 2335 UNSERIALIZE_SCALAR(eepromClk); 2336 UNSERIALIZE_SCALAR(eepromBitsToRx); 2337 UNSERIALIZE_SCALAR(eepromOpcode); 2338 UNSERIALIZE_SCALAR(eepromAddress); 2339 UNSERIALIZE_SCALAR(eepromData); 2340 2341 /* 2342 * If there's a pending transmit, reschedule it now 2343 */ 2344 Tick transmitTick; 2345 UNSERIALIZE_SCALAR(transmitTick); 2346 if (transmitTick) 2347 schedule(txEvent, curTick() + transmitTick); 2348 2349 /* 2350 * unserialize receive address filter settings 2351 */ 2352 UNSERIALIZE_SCALAR(rxFilterEnable); 2353 UNSERIALIZE_SCALAR(acceptBroadcast); 2354 UNSERIALIZE_SCALAR(acceptMulticast); 2355 UNSERIALIZE_SCALAR(acceptUnicast); 2356 UNSERIALIZE_SCALAR(acceptPerfect); 2357 UNSERIALIZE_SCALAR(acceptArp); 2358 UNSERIALIZE_SCALAR(multicastHashEnable); 2359 2360 /* 2361 * Keep track of pending interrupt status. 2362 */ 2363 UNSERIALIZE_SCALAR(intrTick); 2364 UNSERIALIZE_SCALAR(cpuPendingIntr); 2365 Tick intrEventTick; 2366 UNSERIALIZE_SCALAR(intrEventTick); 2367 if (intrEventTick) { 2368 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); }, 2369 name(), true); 2370 schedule(intrEvent, intrEventTick); 2371 } 2372} 2373 2374NSGigE * 2375NSGigEParams::create() 2376{ 2377 return new NSGigE(this); 2378} 2379