ns_gige.cc revision 4762
14120Sgblack@eecs.umich.edu/* 24120Sgblack@eecs.umich.edu * Copyright (c) 2004-2005 The Regents of The University of Michigan 34120Sgblack@eecs.umich.edu * All rights reserved. 44120Sgblack@eecs.umich.edu * 54120Sgblack@eecs.umich.edu * Redistribution and use in source and binary forms, with or without 64120Sgblack@eecs.umich.edu * modification, are permitted provided that the following conditions are 74120Sgblack@eecs.umich.edu * met: redistributions of source code must retain the above copyright 84120Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer; 94120Sgblack@eecs.umich.edu * redistributions in binary form must reproduce the above copyright 104120Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the 114120Sgblack@eecs.umich.edu * documentation and/or other materials provided with the distribution; 124120Sgblack@eecs.umich.edu * neither the name of the copyright holders nor the names of its 134120Sgblack@eecs.umich.edu * contributors may be used to endorse or promote products derived from 144120Sgblack@eecs.umich.edu * this software without specific prior written permission. 154120Sgblack@eecs.umich.edu * 164120Sgblack@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 174120Sgblack@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 184120Sgblack@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 194120Sgblack@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 204120Sgblack@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 214120Sgblack@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 224120Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 234120Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 244120Sgblack@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 254120Sgblack@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 264120Sgblack@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 274120Sgblack@eecs.umich.edu * 284120Sgblack@eecs.umich.edu * Authors: Nathan Binkert 294120Sgblack@eecs.umich.edu * Lisa Hsu 304120Sgblack@eecs.umich.edu */ 314120Sgblack@eecs.umich.edu 324120Sgblack@eecs.umich.edu/** @file 334120Sgblack@eecs.umich.edu * Device module for modelling the National Semiconductor 344120Sgblack@eecs.umich.edu * DP83820 ethernet controller. Does not support priority queueing 354120Sgblack@eecs.umich.edu */ 364120Sgblack@eecs.umich.edu#include <deque> 374120Sgblack@eecs.umich.edu#include <string> 384120Sgblack@eecs.umich.edu 394120Sgblack@eecs.umich.edu#include "base/inet.hh" 404120Sgblack@eecs.umich.edu#include "cpu/thread_context.hh" 414120Sgblack@eecs.umich.edu#include "dev/etherlink.hh" 424120Sgblack@eecs.umich.edu#include "dev/ns_gige.hh" 434120Sgblack@eecs.umich.edu#include "dev/pciconfigall.hh" 444120Sgblack@eecs.umich.edu#include "mem/packet.hh" 454120Sgblack@eecs.umich.edu#include "mem/packet_access.hh" 464120Sgblack@eecs.umich.edu#include "params/NSGigE.hh" 474120Sgblack@eecs.umich.edu#include "params/NSGigEInt.hh" 484120Sgblack@eecs.umich.edu#include "sim/debug.hh" 494120Sgblack@eecs.umich.edu#include "sim/host.hh" 504120Sgblack@eecs.umich.edu#include "sim/stats.hh" 514120Sgblack@eecs.umich.edu#include "sim/system.hh" 524120Sgblack@eecs.umich.edu 534120Sgblack@eecs.umich.educonst char *NsRxStateStrings[] = 544120Sgblack@eecs.umich.edu{ 554120Sgblack@eecs.umich.edu "rxIdle", 564120Sgblack@eecs.umich.edu "rxDescRefr", 574120Sgblack@eecs.umich.edu "rxDescRead", 584120Sgblack@eecs.umich.edu "rxFifoBlock", 594120Sgblack@eecs.umich.edu "rxFragWrite", 604120Sgblack@eecs.umich.edu "rxDescWrite", 614166Sgblack@eecs.umich.edu "rxAdvance" 624166Sgblack@eecs.umich.edu}; 634166Sgblack@eecs.umich.edu 644120Sgblack@eecs.umich.educonst char *NsTxStateStrings[] = 654120Sgblack@eecs.umich.edu{ 664120Sgblack@eecs.umich.edu "txIdle", 674166Sgblack@eecs.umich.edu "txDescRefr", 684166Sgblack@eecs.umich.edu "txDescRead", 694166Sgblack@eecs.umich.edu "txFifoBlock", 704166Sgblack@eecs.umich.edu "txFragRead", 714166Sgblack@eecs.umich.edu "txDescWrite", 724166Sgblack@eecs.umich.edu "txAdvance" 734166Sgblack@eecs.umich.edu}; 744166Sgblack@eecs.umich.edu 754166Sgblack@eecs.umich.educonst char *NsDmaState[] = 764166Sgblack@eecs.umich.edu{ 774166Sgblack@eecs.umich.edu "dmaIdle", 784166Sgblack@eecs.umich.edu "dmaReading", 794166Sgblack@eecs.umich.edu "dmaWriting", 804166Sgblack@eecs.umich.edu "dmaReadWaiting", 814166Sgblack@eecs.umich.edu "dmaWriteWaiting" 824166Sgblack@eecs.umich.edu}; 834166Sgblack@eecs.umich.edu 844166Sgblack@eecs.umich.eduusing namespace std; 854166Sgblack@eecs.umich.eduusing namespace Net; 864166Sgblack@eecs.umich.eduusing namespace TheISA; 874166Sgblack@eecs.umich.edu 884166Sgblack@eecs.umich.edu/////////////////////////////////////////////////////////////////////// 894166Sgblack@eecs.umich.edu// 904166Sgblack@eecs.umich.edu// NSGigE PCI Device 914166Sgblack@eecs.umich.edu// 924166Sgblack@eecs.umich.eduNSGigE::NSGigE(Params *p) 934166Sgblack@eecs.umich.edu : PciDev(p), ioEnable(false), 944166Sgblack@eecs.umich.edu txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 954166Sgblack@eecs.umich.edu txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 964166Sgblack@eecs.umich.edu txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false), 974166Sgblack@eecs.umich.edu clock(p->clock), 984166Sgblack@eecs.umich.edu txState(txIdle), txEnable(false), CTDD(false), txHalt(false), 994166Sgblack@eecs.umich.edu txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 1004166Sgblack@eecs.umich.edu rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false), 1014166Sgblack@eecs.umich.edu rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 1024166Sgblack@eecs.umich.edu eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0), 1034166Sgblack@eecs.umich.edu eepromOpcode(0), eepromAddress(0), eepromData(0), 1044166Sgblack@eecs.umich.edu dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay), 1054166Sgblack@eecs.umich.edu dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor), 1064166Sgblack@eecs.umich.edu rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0), 1074120Sgblack@eecs.umich.edu txDmaData(NULL), txDmaAddr(0), txDmaLen(0), 1084120Sgblack@eecs.umich.edu rxDmaReadEvent(this), rxDmaWriteEvent(this), 109 txDmaReadEvent(this), txDmaWriteEvent(this), 110 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 111 txDelay(p->tx_delay), rxDelay(p->rx_delay), 112 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this), 113 txEvent(this), rxFilterEnable(p->rx_filter), 114 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false), 115 acceptPerfect(false), acceptArp(false), multicastHashEnable(false), 116 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false), 117 intrEvent(0), interface(0) 118{ 119 120 121 regsReset(); 122 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN); 123 124 memset(&rxDesc32, 0, sizeof(rxDesc32)); 125 memset(&txDesc32, 0, sizeof(txDesc32)); 126 memset(&rxDesc64, 0, sizeof(rxDesc64)); 127 memset(&txDesc64, 0, sizeof(txDesc64)); 128} 129 130NSGigE::~NSGigE() 131{} 132 133void 134NSGigE::regStats() 135{ 136 txBytes 137 .name(name() + ".txBytes") 138 .desc("Bytes Transmitted") 139 .prereq(txBytes) 140 ; 141 142 rxBytes 143 .name(name() + ".rxBytes") 144 .desc("Bytes Received") 145 .prereq(rxBytes) 146 ; 147 148 txPackets 149 .name(name() + ".txPackets") 150 .desc("Number of Packets Transmitted") 151 .prereq(txBytes) 152 ; 153 154 rxPackets 155 .name(name() + ".rxPackets") 156 .desc("Number of Packets Received") 157 .prereq(rxBytes) 158 ; 159 160 txIpChecksums 161 .name(name() + ".txIpChecksums") 162 .desc("Number of tx IP Checksums done by device") 163 .precision(0) 164 .prereq(txBytes) 165 ; 166 167 rxIpChecksums 168 .name(name() + ".rxIpChecksums") 169 .desc("Number of rx IP Checksums done by device") 170 .precision(0) 171 .prereq(rxBytes) 172 ; 173 174 txTcpChecksums 175 .name(name() + ".txTcpChecksums") 176 .desc("Number of tx TCP Checksums done by device") 177 .precision(0) 178 .prereq(txBytes) 179 ; 180 181 rxTcpChecksums 182 .name(name() + ".rxTcpChecksums") 183 .desc("Number of rx TCP Checksums done by device") 184 .precision(0) 185 .prereq(rxBytes) 186 ; 187 188 txUdpChecksums 189 .name(name() + ".txUdpChecksums") 190 .desc("Number of tx UDP Checksums done by device") 191 .precision(0) 192 .prereq(txBytes) 193 ; 194 195 rxUdpChecksums 196 .name(name() + ".rxUdpChecksums") 197 .desc("Number of rx UDP Checksums done by device") 198 .precision(0) 199 .prereq(rxBytes) 200 ; 201 202 descDmaReads 203 .name(name() + ".descDMAReads") 204 .desc("Number of descriptors the device read w/ DMA") 205 .precision(0) 206 ; 207 208 descDmaWrites 209 .name(name() + ".descDMAWrites") 210 .desc("Number of descriptors the device wrote w/ DMA") 211 .precision(0) 212 ; 213 214 descDmaRdBytes 215 .name(name() + ".descDmaReadBytes") 216 .desc("number of descriptor bytes read w/ DMA") 217 .precision(0) 218 ; 219 220 descDmaWrBytes 221 .name(name() + ".descDmaWriteBytes") 222 .desc("number of descriptor bytes write w/ DMA") 223 .precision(0) 224 ; 225 226 txBandwidth 227 .name(name() + ".txBandwidth") 228 .desc("Transmit Bandwidth (bits/s)") 229 .precision(0) 230 .prereq(txBytes) 231 ; 232 233 rxBandwidth 234 .name(name() + ".rxBandwidth") 235 .desc("Receive Bandwidth (bits/s)") 236 .precision(0) 237 .prereq(rxBytes) 238 ; 239 240 totBandwidth 241 .name(name() + ".totBandwidth") 242 .desc("Total Bandwidth (bits/s)") 243 .precision(0) 244 .prereq(totBytes) 245 ; 246 247 totPackets 248 .name(name() + ".totPackets") 249 .desc("Total Packets") 250 .precision(0) 251 .prereq(totBytes) 252 ; 253 254 totBytes 255 .name(name() + ".totBytes") 256 .desc("Total Bytes") 257 .precision(0) 258 .prereq(totBytes) 259 ; 260 261 totPacketRate 262 .name(name() + ".totPPS") 263 .desc("Total Tranmission Rate (packets/s)") 264 .precision(0) 265 .prereq(totBytes) 266 ; 267 268 txPacketRate 269 .name(name() + ".txPPS") 270 .desc("Packet Tranmission Rate (packets/s)") 271 .precision(0) 272 .prereq(txBytes) 273 ; 274 275 rxPacketRate 276 .name(name() + ".rxPPS") 277 .desc("Packet Reception Rate (packets/s)") 278 .precision(0) 279 .prereq(rxBytes) 280 ; 281 282 postedSwi 283 .name(name() + ".postedSwi") 284 .desc("number of software interrupts posted to CPU") 285 .precision(0) 286 ; 287 288 totalSwi 289 .name(name() + ".totalSwi") 290 .desc("total number of Swi written to ISR") 291 .precision(0) 292 ; 293 294 coalescedSwi 295 .name(name() + ".coalescedSwi") 296 .desc("average number of Swi's coalesced into each post") 297 .precision(0) 298 ; 299 300 postedRxIdle 301 .name(name() + ".postedRxIdle") 302 .desc("number of rxIdle interrupts posted to CPU") 303 .precision(0) 304 ; 305 306 totalRxIdle 307 .name(name() + ".totalRxIdle") 308 .desc("total number of RxIdle written to ISR") 309 .precision(0) 310 ; 311 312 coalescedRxIdle 313 .name(name() + ".coalescedRxIdle") 314 .desc("average number of RxIdle's coalesced into each post") 315 .precision(0) 316 ; 317 318 postedRxOk 319 .name(name() + ".postedRxOk") 320 .desc("number of RxOk interrupts posted to CPU") 321 .precision(0) 322 ; 323 324 totalRxOk 325 .name(name() + ".totalRxOk") 326 .desc("total number of RxOk written to ISR") 327 .precision(0) 328 ; 329 330 coalescedRxOk 331 .name(name() + ".coalescedRxOk") 332 .desc("average number of RxOk's coalesced into each post") 333 .precision(0) 334 ; 335 336 postedRxDesc 337 .name(name() + ".postedRxDesc") 338 .desc("number of RxDesc interrupts posted to CPU") 339 .precision(0) 340 ; 341 342 totalRxDesc 343 .name(name() + ".totalRxDesc") 344 .desc("total number of RxDesc written to ISR") 345 .precision(0) 346 ; 347 348 coalescedRxDesc 349 .name(name() + ".coalescedRxDesc") 350 .desc("average number of RxDesc's coalesced into each post") 351 .precision(0) 352 ; 353 354 postedTxOk 355 .name(name() + ".postedTxOk") 356 .desc("number of TxOk interrupts posted to CPU") 357 .precision(0) 358 ; 359 360 totalTxOk 361 .name(name() + ".totalTxOk") 362 .desc("total number of TxOk written to ISR") 363 .precision(0) 364 ; 365 366 coalescedTxOk 367 .name(name() + ".coalescedTxOk") 368 .desc("average number of TxOk's coalesced into each post") 369 .precision(0) 370 ; 371 372 postedTxIdle 373 .name(name() + ".postedTxIdle") 374 .desc("number of TxIdle interrupts posted to CPU") 375 .precision(0) 376 ; 377 378 totalTxIdle 379 .name(name() + ".totalTxIdle") 380 .desc("total number of TxIdle written to ISR") 381 .precision(0) 382 ; 383 384 coalescedTxIdle 385 .name(name() + ".coalescedTxIdle") 386 .desc("average number of TxIdle's coalesced into each post") 387 .precision(0) 388 ; 389 390 postedTxDesc 391 .name(name() + ".postedTxDesc") 392 .desc("number of TxDesc interrupts posted to CPU") 393 .precision(0) 394 ; 395 396 totalTxDesc 397 .name(name() + ".totalTxDesc") 398 .desc("total number of TxDesc written to ISR") 399 .precision(0) 400 ; 401 402 coalescedTxDesc 403 .name(name() + ".coalescedTxDesc") 404 .desc("average number of TxDesc's coalesced into each post") 405 .precision(0) 406 ; 407 408 postedRxOrn 409 .name(name() + ".postedRxOrn") 410 .desc("number of RxOrn posted to CPU") 411 .precision(0) 412 ; 413 414 totalRxOrn 415 .name(name() + ".totalRxOrn") 416 .desc("total number of RxOrn written to ISR") 417 .precision(0) 418 ; 419 420 coalescedRxOrn 421 .name(name() + ".coalescedRxOrn") 422 .desc("average number of RxOrn's coalesced into each post") 423 .precision(0) 424 ; 425 426 coalescedTotal 427 .name(name() + ".coalescedTotal") 428 .desc("average number of interrupts coalesced into each post") 429 .precision(0) 430 ; 431 432 postedInterrupts 433 .name(name() + ".postedInterrupts") 434 .desc("number of posts to CPU") 435 .precision(0) 436 ; 437 438 droppedPackets 439 .name(name() + ".droppedPackets") 440 .desc("number of packets dropped") 441 .precision(0) 442 ; 443 444 coalescedSwi = totalSwi / postedInterrupts; 445 coalescedRxIdle = totalRxIdle / postedInterrupts; 446 coalescedRxOk = totalRxOk / postedInterrupts; 447 coalescedRxDesc = totalRxDesc / postedInterrupts; 448 coalescedTxOk = totalTxOk / postedInterrupts; 449 coalescedTxIdle = totalTxIdle / postedInterrupts; 450 coalescedTxDesc = totalTxDesc / postedInterrupts; 451 coalescedRxOrn = totalRxOrn / postedInterrupts; 452 453 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + 454 totalTxOk + totalTxIdle + totalTxDesc + 455 totalRxOrn) / postedInterrupts; 456 457 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 458 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 459 totBandwidth = txBandwidth + rxBandwidth; 460 totBytes = txBytes + rxBytes; 461 totPackets = txPackets + rxPackets; 462 463 txPacketRate = txPackets / simSeconds; 464 rxPacketRate = rxPackets / simSeconds; 465} 466 467 468/** 469 * This is to write to the PCI general configuration registers 470 */ 471Tick 472NSGigE::writeConfig(PacketPtr pkt) 473{ 474 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 475 if (offset < PCI_DEVICE_SPECIFIC) 476 PciDev::writeConfig(pkt); 477 else 478 panic("Device specific PCI config space not implemented!\n"); 479 480 switch (offset) { 481 // seems to work fine without all these PCI settings, but i 482 // put in the IO to double check, an assertion will fail if we 483 // need to properly implement it 484 case PCI_COMMAND: 485 if (config.data[offset] & PCI_CMD_IOSE) 486 ioEnable = true; 487 else 488 ioEnable = false; 489 break; 490 } 491 pkt->result = Packet::Success; 492 return configDelay; 493} 494 495/** 496 * This reads the device registers, which are detailed in the NS83820 497 * spec sheet 498 */ 499Tick 500NSGigE::read(PacketPtr pkt) 501{ 502 assert(ioEnable); 503 504 pkt->allocate(); 505 506 //The mask is to give you only the offset into the device register file 507 Addr daddr = pkt->getAddr() & 0xfff; 508 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n", 509 daddr, pkt->getAddr(), pkt->getSize()); 510 511 512 // there are some reserved registers, you can see ns_gige_reg.h and 513 // the spec sheet for details 514 if (daddr > LAST && daddr <= RESERVED) { 515 panic("Accessing reserved register"); 516 } else if (daddr > RESERVED && daddr <= 0x3FC) { 517 return readConfig(pkt); 518 } else if (daddr >= MIB_START && daddr <= MIB_END) { 519 // don't implement all the MIB's. hopefully the kernel 520 // doesn't actually DEPEND upon their values 521 // MIB are just hardware stats keepers 522 pkt->set<uint32_t>(0); 523 pkt->result = Packet::Success; 524 return pioDelay; 525 } else if (daddr > 0x3FC) 526 panic("Something is messed up!\n"); 527 528 assert(pkt->getSize() == sizeof(uint32_t)); 529 uint32_t ® = *pkt->getPtr<uint32_t>(); 530 uint16_t rfaddr; 531 532 switch (daddr) { 533 case CR: 534 reg = regs.command; 535 //these are supposed to be cleared on a read 536 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 537 break; 538 539 case CFGR: 540 reg = regs.config; 541 break; 542 543 case MEAR: 544 reg = regs.mear; 545 break; 546 547 case PTSCR: 548 reg = regs.ptscr; 549 break; 550 551 case ISR: 552 reg = regs.isr; 553 devIntrClear(ISR_ALL); 554 break; 555 556 case IMR: 557 reg = regs.imr; 558 break; 559 560 case IER: 561 reg = regs.ier; 562 break; 563 564 case IHR: 565 reg = regs.ihr; 566 break; 567 568 case TXDP: 569 reg = regs.txdp; 570 break; 571 572 case TXDP_HI: 573 reg = regs.txdp_hi; 574 break; 575 576 case TX_CFG: 577 reg = regs.txcfg; 578 break; 579 580 case GPIOR: 581 reg = regs.gpior; 582 break; 583 584 case RXDP: 585 reg = regs.rxdp; 586 break; 587 588 case RXDP_HI: 589 reg = regs.rxdp_hi; 590 break; 591 592 case RX_CFG: 593 reg = regs.rxcfg; 594 break; 595 596 case PQCR: 597 reg = regs.pqcr; 598 break; 599 600 case WCSR: 601 reg = regs.wcsr; 602 break; 603 604 case PCR: 605 reg = regs.pcr; 606 break; 607 608 // see the spec sheet for how RFCR and RFDR work 609 // basically, you write to RFCR to tell the machine 610 // what you want to do next, then you act upon RFDR, 611 // and the device will be prepared b/c of what you 612 // wrote to RFCR 613 case RFCR: 614 reg = regs.rfcr; 615 break; 616 617 case RFDR: 618 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 619 switch (rfaddr) { 620 // Read from perfect match ROM octets 621 case 0x000: 622 reg = rom.perfectMatch[1]; 623 reg = reg << 8; 624 reg += rom.perfectMatch[0]; 625 break; 626 case 0x002: 627 reg = rom.perfectMatch[3] << 8; 628 reg += rom.perfectMatch[2]; 629 break; 630 case 0x004: 631 reg = rom.perfectMatch[5] << 8; 632 reg += rom.perfectMatch[4]; 633 break; 634 default: 635 // Read filter hash table 636 if (rfaddr >= FHASH_ADDR && 637 rfaddr < FHASH_ADDR + FHASH_SIZE) { 638 639 // Only word-aligned reads supported 640 if (rfaddr % 2) 641 panic("unaligned read from filter hash table!"); 642 643 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8; 644 reg += rom.filterHash[rfaddr - FHASH_ADDR]; 645 break; 646 } 647 648 panic("reading RFDR for something other than pattern" 649 " matching or hashing! %#x\n", rfaddr); 650 } 651 break; 652 653 case SRR: 654 reg = regs.srr; 655 break; 656 657 case MIBC: 658 reg = regs.mibc; 659 reg &= ~(MIBC_MIBS | MIBC_ACLR); 660 break; 661 662 case VRCR: 663 reg = regs.vrcr; 664 break; 665 666 case VTCR: 667 reg = regs.vtcr; 668 break; 669 670 case VDR: 671 reg = regs.vdr; 672 break; 673 674 case CCSR: 675 reg = regs.ccsr; 676 break; 677 678 case TBICR: 679 reg = regs.tbicr; 680 break; 681 682 case TBISR: 683 reg = regs.tbisr; 684 break; 685 686 case TANAR: 687 reg = regs.tanar; 688 break; 689 690 case TANLPAR: 691 reg = regs.tanlpar; 692 break; 693 694 case TANER: 695 reg = regs.taner; 696 break; 697 698 case TESR: 699 reg = regs.tesr; 700 break; 701 702 case M5REG: 703 reg = 0; 704 if (params()->rx_thread) 705 reg |= M5REG_RX_THREAD; 706 if (params()->tx_thread) 707 reg |= M5REG_TX_THREAD; 708 if (params()->rss) 709 reg |= M5REG_RSS; 710 break; 711 712 default: 713 panic("reading unimplemented register: addr=%#x", daddr); 714 } 715 716 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 717 daddr, reg, reg); 718 719 pkt->result = Packet::Success; 720 return pioDelay; 721} 722 723Tick 724NSGigE::write(PacketPtr pkt) 725{ 726 assert(ioEnable); 727 728 Addr daddr = pkt->getAddr() & 0xfff; 729 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n", 730 daddr, pkt->getAddr(), pkt->getSize()); 731 732 if (daddr > LAST && daddr <= RESERVED) { 733 panic("Accessing reserved register"); 734 } else if (daddr > RESERVED && daddr <= 0x3FC) { 735 return writeConfig(pkt); 736 } else if (daddr > 0x3FC) 737 panic("Something is messed up!\n"); 738 739 if (pkt->getSize() == sizeof(uint32_t)) { 740 uint32_t reg = pkt->get<uint32_t>(); 741 uint16_t rfaddr; 742 743 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 744 745 switch (daddr) { 746 case CR: 747 regs.command = reg; 748 if (reg & CR_TXD) { 749 txEnable = false; 750 } else if (reg & CR_TXE) { 751 txEnable = true; 752 753 // the kernel is enabling the transmit machine 754 if (txState == txIdle) 755 txKick(); 756 } 757 758 if (reg & CR_RXD) { 759 rxEnable = false; 760 } else if (reg & CR_RXE) { 761 rxEnable = true; 762 763 if (rxState == rxIdle) 764 rxKick(); 765 } 766 767 if (reg & CR_TXR) 768 txReset(); 769 770 if (reg & CR_RXR) 771 rxReset(); 772 773 if (reg & CR_SWI) 774 devIntrPost(ISR_SWI); 775 776 if (reg & CR_RST) { 777 txReset(); 778 rxReset(); 779 780 regsReset(); 781 } 782 break; 783 784 case CFGR: 785 if (reg & CFGR_LNKSTS || 786 reg & CFGR_SPDSTS || 787 reg & CFGR_DUPSTS || 788 reg & CFGR_RESERVED || 789 reg & CFGR_T64ADDR || 790 reg & CFGR_PCI64_DET) 791 792 // First clear all writable bits 793 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 794 CFGR_RESERVED | CFGR_T64ADDR | 795 CFGR_PCI64_DET; 796 // Now set the appropriate writable bits 797 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS | 798 CFGR_RESERVED | CFGR_T64ADDR | 799 CFGR_PCI64_DET); 800 801// all these #if 0's are because i don't THINK the kernel needs to 802// have these implemented. if there is a problem relating to one of 803// these, you may need to add functionality in. 804 if (reg & CFGR_TBI_EN) ; 805 if (reg & CFGR_MODE_1000) ; 806 807 if (reg & CFGR_AUTO_1000) 808 panic("CFGR_AUTO_1000 not implemented!\n"); 809 810 if (reg & CFGR_PINT_DUPSTS || 811 reg & CFGR_PINT_LNKSTS || 812 reg & CFGR_PINT_SPDSTS) 813 ; 814 815 if (reg & CFGR_TMRTEST) ; 816 if (reg & CFGR_MRM_DIS) ; 817 if (reg & CFGR_MWI_DIS) ; 818 819 if (reg & CFGR_T64ADDR) ; 820 // panic("CFGR_T64ADDR is read only register!\n"); 821 822 if (reg & CFGR_PCI64_DET) 823 panic("CFGR_PCI64_DET is read only register!\n"); 824 825 if (reg & CFGR_DATA64_EN) ; 826 if (reg & CFGR_M64ADDR) ; 827 if (reg & CFGR_PHY_RST) ; 828 if (reg & CFGR_PHY_DIS) ; 829 830 if (reg & CFGR_EXTSTS_EN) 831 extstsEnable = true; 832 else 833 extstsEnable = false; 834 835 if (reg & CFGR_REQALG) ; 836 if (reg & CFGR_SB) ; 837 if (reg & CFGR_POW) ; 838 if (reg & CFGR_EXD) ; 839 if (reg & CFGR_PESEL) ; 840 if (reg & CFGR_BROM_DIS) ; 841 if (reg & CFGR_EXT_125) ; 842 if (reg & CFGR_BEM) ; 843 break; 844 845 case MEAR: 846 // Clear writable bits 847 regs.mear &= MEAR_EEDO; 848 // Set appropriate writable bits 849 regs.mear |= reg & ~MEAR_EEDO; 850 851 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address) 852 // even though it could get it through RFDR 853 if (reg & MEAR_EESEL) { 854 // Rising edge of clock 855 if (reg & MEAR_EECLK && !eepromClk) 856 eepromKick(); 857 } 858 else { 859 eepromState = eepromStart; 860 regs.mear &= ~MEAR_EEDI; 861 } 862 863 eepromClk = reg & MEAR_EECLK; 864 865 // since phy is completely faked, MEAR_MD* don't matter 866 if (reg & MEAR_MDIO) ; 867 if (reg & MEAR_MDDIR) ; 868 if (reg & MEAR_MDC) ; 869 break; 870 871 case PTSCR: 872 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 873 // these control BISTs for various parts of chip - we 874 // don't care or do just fake that the BIST is done 875 if (reg & PTSCR_RBIST_EN) 876 regs.ptscr |= PTSCR_RBIST_DONE; 877 if (reg & PTSCR_EEBIST_EN) 878 regs.ptscr &= ~PTSCR_EEBIST_EN; 879 if (reg & PTSCR_EELOAD_EN) 880 regs.ptscr &= ~PTSCR_EELOAD_EN; 881 break; 882 883 case ISR: /* writing to the ISR has no effect */ 884 panic("ISR is a read only register!\n"); 885 886 case IMR: 887 regs.imr = reg; 888 devIntrChangeMask(); 889 break; 890 891 case IER: 892 regs.ier = reg; 893 break; 894 895 case IHR: 896 regs.ihr = reg; 897 /* not going to implement real interrupt holdoff */ 898 break; 899 900 case TXDP: 901 regs.txdp = (reg & 0xFFFFFFFC); 902 assert(txState == txIdle); 903 CTDD = false; 904 break; 905 906 case TXDP_HI: 907 regs.txdp_hi = reg; 908 break; 909 910 case TX_CFG: 911 regs.txcfg = reg; 912#if 0 913 if (reg & TX_CFG_CSI) ; 914 if (reg & TX_CFG_HBI) ; 915 if (reg & TX_CFG_MLB) ; 916 if (reg & TX_CFG_ATP) ; 917 if (reg & TX_CFG_ECRETRY) { 918 /* 919 * this could easily be implemented, but considering 920 * the network is just a fake pipe, wouldn't make 921 * sense to do this 922 */ 923 } 924 925 if (reg & TX_CFG_BRST_DIS) ; 926#endif 927 928#if 0 929 /* we handle our own DMA, ignore the kernel's exhortations */ 930 if (reg & TX_CFG_MXDMA) ; 931#endif 932 933 // also, we currently don't care about fill/drain 934 // thresholds though this may change in the future with 935 // more realistic networks or a driver which changes it 936 // according to feedback 937 938 break; 939 940 case GPIOR: 941 // Only write writable bits 942 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 943 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN; 944 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN 945 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN); 946 /* these just control general purpose i/o pins, don't matter */ 947 break; 948 949 case RXDP: 950 regs.rxdp = reg; 951 CRDD = false; 952 break; 953 954 case RXDP_HI: 955 regs.rxdp_hi = reg; 956 break; 957 958 case RX_CFG: 959 regs.rxcfg = reg; 960#if 0 961 if (reg & RX_CFG_AEP) ; 962 if (reg & RX_CFG_ARP) ; 963 if (reg & RX_CFG_STRIPCRC) ; 964 if (reg & RX_CFG_RX_RD) ; 965 if (reg & RX_CFG_ALP) ; 966 if (reg & RX_CFG_AIRL) ; 967 968 /* we handle our own DMA, ignore what kernel says about it */ 969 if (reg & RX_CFG_MXDMA) ; 970 971 //also, we currently don't care about fill/drain thresholds 972 //though this may change in the future with more realistic 973 //networks or a driver which changes it according to feedback 974 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ; 975#endif 976 break; 977 978 case PQCR: 979 /* there is no priority queueing used in the linux 2.6 driver */ 980 regs.pqcr = reg; 981 break; 982 983 case WCSR: 984 /* not going to implement wake on LAN */ 985 regs.wcsr = reg; 986 break; 987 988 case PCR: 989 /* not going to implement pause control */ 990 regs.pcr = reg; 991 break; 992 993 case RFCR: 994 regs.rfcr = reg; 995 996 rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 997 acceptBroadcast = (reg & RFCR_AAB) ? true : false; 998 acceptMulticast = (reg & RFCR_AAM) ? true : false; 999 acceptUnicast = (reg & RFCR_AAU) ? true : false; 1000 acceptPerfect = (reg & RFCR_APM) ? true : false; 1001 acceptArp = (reg & RFCR_AARP) ? true : false; 1002 multicastHashEnable = (reg & RFCR_MHEN) ? true : false; 1003 1004#if 0 1005 if (reg & RFCR_APAT) 1006 panic("RFCR_APAT not implemented!\n"); 1007#endif 1008 if (reg & RFCR_UHEN) 1009 panic("Unicast hash filtering not used by drivers!\n"); 1010 1011 if (reg & RFCR_ULM) 1012 panic("RFCR_ULM not implemented!\n"); 1013 1014 break; 1015 1016 case RFDR: 1017 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR); 1018 switch (rfaddr) { 1019 case 0x000: 1020 rom.perfectMatch[0] = (uint8_t)reg; 1021 rom.perfectMatch[1] = (uint8_t)(reg >> 8); 1022 break; 1023 case 0x002: 1024 rom.perfectMatch[2] = (uint8_t)reg; 1025 rom.perfectMatch[3] = (uint8_t)(reg >> 8); 1026 break; 1027 case 0x004: 1028 rom.perfectMatch[4] = (uint8_t)reg; 1029 rom.perfectMatch[5] = (uint8_t)(reg >> 8); 1030 break; 1031 default: 1032 1033 if (rfaddr >= FHASH_ADDR && 1034 rfaddr < FHASH_ADDR + FHASH_SIZE) { 1035 1036 // Only word-aligned writes supported 1037 if (rfaddr % 2) 1038 panic("unaligned write to filter hash table!"); 1039 1040 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg; 1041 rom.filterHash[rfaddr - FHASH_ADDR + 1] 1042 = (uint8_t)(reg >> 8); 1043 break; 1044 } 1045 panic("writing RFDR for something other than pattern matching\ 1046 or hashing! %#x\n", rfaddr); 1047 } 1048 1049 case BRAR: 1050 regs.brar = reg; 1051 break; 1052 1053 case BRDR: 1054 panic("the driver never uses BRDR, something is wrong!\n"); 1055 1056 case SRR: 1057 panic("SRR is read only register!\n"); 1058 1059 case MIBC: 1060 panic("the driver never uses MIBC, something is wrong!\n"); 1061 1062 case VRCR: 1063 regs.vrcr = reg; 1064 break; 1065 1066 case VTCR: 1067 regs.vtcr = reg; 1068 break; 1069 1070 case VDR: 1071 panic("the driver never uses VDR, something is wrong!\n"); 1072 1073 case CCSR: 1074 /* not going to implement clockrun stuff */ 1075 regs.ccsr = reg; 1076 break; 1077 1078 case TBICR: 1079 regs.tbicr = reg; 1080 if (reg & TBICR_MR_LOOPBACK) 1081 panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 1082 1083 if (reg & TBICR_MR_AN_ENABLE) { 1084 regs.tanlpar = regs.tanar; 1085 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 1086 } 1087 1088#if 0 1089 if (reg & TBICR_MR_RESTART_AN) ; 1090#endif 1091 1092 break; 1093 1094 case TBISR: 1095 panic("TBISR is read only register!\n"); 1096 1097 case TANAR: 1098 // Only write the writable bits 1099 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED; 1100 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED); 1101 1102 // Pause capability unimplemented 1103#if 0 1104 if (reg & TANAR_PS2) ; 1105 if (reg & TANAR_PS1) ; 1106#endif 1107 1108 break; 1109 1110 case TANLPAR: 1111 panic("this should only be written to by the fake phy!\n"); 1112 1113 case TANER: 1114 panic("TANER is read only register!\n"); 1115 1116 case TESR: 1117 regs.tesr = reg; 1118 break; 1119 1120 default: 1121 panic("invalid register access daddr=%#x", daddr); 1122 } 1123 } else { 1124 panic("Invalid Request Size"); 1125 } 1126 pkt->result = Packet::Success; 1127 return pioDelay; 1128} 1129 1130void 1131NSGigE::devIntrPost(uint32_t interrupts) 1132{ 1133 if (interrupts & ISR_RESERVE) 1134 panic("Cannot set a reserved interrupt"); 1135 1136 if (interrupts & ISR_NOIMPL) 1137 warn("interrupt not implemented %#x\n", interrupts); 1138 1139 interrupts &= ISR_IMPL; 1140 regs.isr |= interrupts; 1141 1142 if (interrupts & regs.imr) { 1143 if (interrupts & ISR_SWI) { 1144 totalSwi++; 1145 } 1146 if (interrupts & ISR_RXIDLE) { 1147 totalRxIdle++; 1148 } 1149 if (interrupts & ISR_RXOK) { 1150 totalRxOk++; 1151 } 1152 if (interrupts & ISR_RXDESC) { 1153 totalRxDesc++; 1154 } 1155 if (interrupts & ISR_TXOK) { 1156 totalTxOk++; 1157 } 1158 if (interrupts & ISR_TXIDLE) { 1159 totalTxIdle++; 1160 } 1161 if (interrupts & ISR_TXDESC) { 1162 totalTxDesc++; 1163 } 1164 if (interrupts & ISR_RXORN) { 1165 totalRxOrn++; 1166 } 1167 } 1168 1169 DPRINTF(EthernetIntr, 1170 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 1171 interrupts, regs.isr, regs.imr); 1172 1173 if ((regs.isr & regs.imr)) { 1174 Tick when = curTick; 1175 if ((regs.isr & regs.imr & ISR_NODELAY) == 0) 1176 when += intrDelay; 1177 cpuIntrPost(when); 1178 } 1179} 1180 1181/* writing this interrupt counting stats inside this means that this function 1182 is now limited to being used to clear all interrupts upon the kernel 1183 reading isr and servicing. just telling you in case you were thinking 1184 of expanding use. 1185*/ 1186void 1187NSGigE::devIntrClear(uint32_t interrupts) 1188{ 1189 if (interrupts & ISR_RESERVE) 1190 panic("Cannot clear a reserved interrupt"); 1191 1192 if (regs.isr & regs.imr & ISR_SWI) { 1193 postedSwi++; 1194 } 1195 if (regs.isr & regs.imr & ISR_RXIDLE) { 1196 postedRxIdle++; 1197 } 1198 if (regs.isr & regs.imr & ISR_RXOK) { 1199 postedRxOk++; 1200 } 1201 if (regs.isr & regs.imr & ISR_RXDESC) { 1202 postedRxDesc++; 1203 } 1204 if (regs.isr & regs.imr & ISR_TXOK) { 1205 postedTxOk++; 1206 } 1207 if (regs.isr & regs.imr & ISR_TXIDLE) { 1208 postedTxIdle++; 1209 } 1210 if (regs.isr & regs.imr & ISR_TXDESC) { 1211 postedTxDesc++; 1212 } 1213 if (regs.isr & regs.imr & ISR_RXORN) { 1214 postedRxOrn++; 1215 } 1216 1217 if (regs.isr & regs.imr & ISR_IMPL) 1218 postedInterrupts++; 1219 1220 interrupts &= ~ISR_NOIMPL; 1221 regs.isr &= ~interrupts; 1222 1223 DPRINTF(EthernetIntr, 1224 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 1225 interrupts, regs.isr, regs.imr); 1226 1227 if (!(regs.isr & regs.imr)) 1228 cpuIntrClear(); 1229} 1230 1231void 1232NSGigE::devIntrChangeMask() 1233{ 1234 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 1235 regs.isr, regs.imr, regs.isr & regs.imr); 1236 1237 if (regs.isr & regs.imr) 1238 cpuIntrPost(curTick); 1239 else 1240 cpuIntrClear(); 1241} 1242 1243void 1244NSGigE::cpuIntrPost(Tick when) 1245{ 1246 // If the interrupt you want to post is later than an interrupt 1247 // already scheduled, just let it post in the coming one and don't 1248 // schedule another. 1249 // HOWEVER, must be sure that the scheduled intrTick is in the 1250 // future (this was formerly the source of a bug) 1251 /** 1252 * @todo this warning should be removed and the intrTick code should 1253 * be fixed. 1254 */ 1255 assert(when >= curTick); 1256 assert(intrTick >= curTick || intrTick == 0); 1257 if (when > intrTick && intrTick != 0) { 1258 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 1259 intrTick); 1260 return; 1261 } 1262 1263 intrTick = when; 1264 if (intrTick < curTick) { 1265 debug_break(); 1266 intrTick = curTick; 1267 } 1268 1269 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 1270 intrTick); 1271 1272 if (intrEvent) 1273 intrEvent->squash(); 1274 intrEvent = new IntrEvent(this, intrTick, true); 1275} 1276 1277void 1278NSGigE::cpuInterrupt() 1279{ 1280 assert(intrTick == curTick); 1281 1282 // Whether or not there's a pending interrupt, we don't care about 1283 // it anymore 1284 intrEvent = 0; 1285 intrTick = 0; 1286 1287 // Don't send an interrupt if there's already one 1288 if (cpuPendingIntr) { 1289 DPRINTF(EthernetIntr, 1290 "would send an interrupt now, but there's already pending\n"); 1291 } else { 1292 // Send interrupt 1293 cpuPendingIntr = true; 1294 1295 DPRINTF(EthernetIntr, "posting interrupt\n"); 1296 intrPost(); 1297 } 1298} 1299 1300void 1301NSGigE::cpuIntrClear() 1302{ 1303 if (!cpuPendingIntr) 1304 return; 1305 1306 if (intrEvent) { 1307 intrEvent->squash(); 1308 intrEvent = 0; 1309 } 1310 1311 intrTick = 0; 1312 1313 cpuPendingIntr = false; 1314 1315 DPRINTF(EthernetIntr, "clearing interrupt\n"); 1316 intrClear(); 1317} 1318 1319bool 1320NSGigE::cpuIntrPending() const 1321{ return cpuPendingIntr; } 1322 1323void 1324NSGigE::txReset() 1325{ 1326 1327 DPRINTF(Ethernet, "transmit reset\n"); 1328 1329 CTDD = false; 1330 txEnable = false;; 1331 txFragPtr = 0; 1332 assert(txDescCnt == 0); 1333 txFifo.clear(); 1334 txState = txIdle; 1335 assert(txDmaState == dmaIdle); 1336} 1337 1338void 1339NSGigE::rxReset() 1340{ 1341 DPRINTF(Ethernet, "receive reset\n"); 1342 1343 CRDD = false; 1344 assert(rxPktBytes == 0); 1345 rxEnable = false; 1346 rxFragPtr = 0; 1347 assert(rxDescCnt == 0); 1348 assert(rxDmaState == dmaIdle); 1349 rxFifo.clear(); 1350 rxState = rxIdle; 1351} 1352 1353void 1354NSGigE::regsReset() 1355{ 1356 memset(®s, 0, sizeof(regs)); 1357 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000); 1358 regs.mear = 0x12; 1359 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 1360 // fill threshold to 32 bytes 1361 regs.rxcfg = 0x4; // set drain threshold to 16 bytes 1362 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 1363 regs.mibc = MIBC_FRZ; 1364 regs.vdr = 0x81; // set the vlan tag type to 802.1q 1365 regs.tesr = 0xc000; // TBI capable of both full and half duplex 1366 regs.brar = 0xffffffff; 1367 1368 extstsEnable = false; 1369 acceptBroadcast = false; 1370 acceptMulticast = false; 1371 acceptUnicast = false; 1372 acceptPerfect = false; 1373 acceptArp = false; 1374} 1375 1376bool 1377NSGigE::doRxDmaRead() 1378{ 1379 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 1380 rxDmaState = dmaReading; 1381 1382 if (dmaPending() || getState() != Running) 1383 rxDmaState = dmaReadWaiting; 1384 else 1385 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData); 1386 1387 return true; 1388} 1389 1390void 1391NSGigE::rxDmaReadDone() 1392{ 1393 assert(rxDmaState == dmaReading); 1394 rxDmaState = dmaIdle; 1395 1396 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 1397 rxDmaAddr, rxDmaLen); 1398 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1399 1400 // If the transmit state machine has a pending DMA, let it go first 1401 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1402 txKick(); 1403 1404 rxKick(); 1405} 1406 1407bool 1408NSGigE::doRxDmaWrite() 1409{ 1410 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 1411 rxDmaState = dmaWriting; 1412 1413 if (dmaPending() || getState() != Running) 1414 rxDmaState = dmaWriteWaiting; 1415 else 1416 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData); 1417 return true; 1418} 1419 1420void 1421NSGigE::rxDmaWriteDone() 1422{ 1423 assert(rxDmaState == dmaWriting); 1424 rxDmaState = dmaIdle; 1425 1426 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 1427 rxDmaAddr, rxDmaLen); 1428 DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 1429 1430 // If the transmit state machine has a pending DMA, let it go first 1431 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 1432 txKick(); 1433 1434 rxKick(); 1435} 1436 1437void 1438NSGigE::rxKick() 1439{ 1440 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1441 1442 DPRINTF(EthernetSM, 1443 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n", 1444 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32); 1445 1446 Addr link, bufptr; 1447 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts; 1448 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts; 1449 1450 next: 1451 if (clock) { 1452 if (rxKickTick > curTick) { 1453 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 1454 rxKickTick); 1455 1456 goto exit; 1457 } 1458 1459 // Go to the next state machine clock tick. 1460 rxKickTick = curTick + cycles(1); 1461 } 1462 1463 switch(rxDmaState) { 1464 case dmaReadWaiting: 1465 if (doRxDmaRead()) 1466 goto exit; 1467 break; 1468 case dmaWriteWaiting: 1469 if (doRxDmaWrite()) 1470 goto exit; 1471 break; 1472 default: 1473 break; 1474 } 1475 1476 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link; 1477 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr; 1478 1479 // see state machine from spec for details 1480 // the way this works is, if you finish work on one state and can 1481 // go directly to another, you do that through jumping to the 1482 // label "next". however, if you have intermediate work, like DMA 1483 // so that you can't go to the next state yet, you go to exit and 1484 // exit the loop. however, when the DMA is done it will trigger 1485 // an event and come back to this loop. 1486 switch (rxState) { 1487 case rxIdle: 1488 if (!rxEnable) { 1489 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 1490 goto exit; 1491 } 1492 1493 if (CRDD) { 1494 rxState = rxDescRefr; 1495 1496 rxDmaAddr = regs.rxdp & 0x3fffffff; 1497 rxDmaData = 1498 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link; 1499 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link); 1500 rxDmaFree = dmaDescFree; 1501 1502 descDmaReads++; 1503 descDmaRdBytes += rxDmaLen; 1504 1505 if (doRxDmaRead()) 1506 goto exit; 1507 } else { 1508 rxState = rxDescRead; 1509 1510 rxDmaAddr = regs.rxdp & 0x3fffffff; 1511 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1512 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1513 rxDmaFree = dmaDescFree; 1514 1515 descDmaReads++; 1516 descDmaRdBytes += rxDmaLen; 1517 1518 if (doRxDmaRead()) 1519 goto exit; 1520 } 1521 break; 1522 1523 case rxDescRefr: 1524 if (rxDmaState != dmaIdle) 1525 goto exit; 1526 1527 rxState = rxAdvance; 1528 break; 1529 1530 case rxDescRead: 1531 if (rxDmaState != dmaIdle) 1532 goto exit; 1533 1534 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n", 1535 regs.rxdp & 0x3fffffff); 1536 DPRINTF(EthernetDesc, 1537 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1538 link, bufptr, cmdsts, extsts); 1539 1540 if (cmdsts & CMDSTS_OWN) { 1541 devIntrPost(ISR_RXIDLE); 1542 rxState = rxIdle; 1543 goto exit; 1544 } else { 1545 rxState = rxFifoBlock; 1546 rxFragPtr = bufptr; 1547 rxDescCnt = cmdsts & CMDSTS_LEN_MASK; 1548 } 1549 break; 1550 1551 case rxFifoBlock: 1552 if (!rxPacket) { 1553 /** 1554 * @todo in reality, we should be able to start processing 1555 * the packet as it arrives, and not have to wait for the 1556 * full packet ot be in the receive fifo. 1557 */ 1558 if (rxFifo.empty()) 1559 goto exit; 1560 1561 DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 1562 1563 // If we don't have a packet, grab a new one from the fifo. 1564 rxPacket = rxFifo.front(); 1565 rxPktBytes = rxPacket->length; 1566 rxPacketBufPtr = rxPacket->data; 1567 1568#if TRACING_ON 1569 if (DTRACE(Ethernet)) { 1570 IpPtr ip(rxPacket); 1571 if (ip) { 1572 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1573 TcpPtr tcp(ip); 1574 if (tcp) { 1575 DPRINTF(Ethernet, 1576 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1577 tcp->sport(), tcp->dport(), tcp->seq(), 1578 tcp->ack()); 1579 } 1580 } 1581 } 1582#endif 1583 1584 // sanity check - i think the driver behaves like this 1585 assert(rxDescCnt >= rxPktBytes); 1586 rxFifo.pop(); 1587 } 1588 1589 1590 // dont' need the && rxDescCnt > 0 if driver sanity check 1591 // above holds 1592 if (rxPktBytes > 0) { 1593 rxState = rxFragWrite; 1594 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1595 // check holds 1596 rxXferLen = rxPktBytes; 1597 1598 rxDmaAddr = rxFragPtr & 0x3fffffff; 1599 rxDmaData = rxPacketBufPtr; 1600 rxDmaLen = rxXferLen; 1601 rxDmaFree = dmaDataFree; 1602 1603 if (doRxDmaWrite()) 1604 goto exit; 1605 1606 } else { 1607 rxState = rxDescWrite; 1608 1609 //if (rxPktBytes == 0) { /* packet is done */ 1610 assert(rxPktBytes == 0); 1611 DPRINTF(EthernetSM, "done with receiving packet\n"); 1612 1613 cmdsts |= CMDSTS_OWN; 1614 cmdsts &= ~CMDSTS_MORE; 1615 cmdsts |= CMDSTS_OK; 1616 cmdsts &= 0xffff0000; 1617 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1618 1619#if 0 1620 /* 1621 * all the driver uses these are for its own stats keeping 1622 * which we don't care about, aren't necessary for 1623 * functionality and doing this would just slow us down. 1624 * if they end up using this in a later version for 1625 * functional purposes, just undef 1626 */ 1627 if (rxFilterEnable) { 1628 cmdsts &= ~CMDSTS_DEST_MASK; 1629 const EthAddr &dst = rxFifoFront()->dst(); 1630 if (dst->unicast()) 1631 cmdsts |= CMDSTS_DEST_SELF; 1632 if (dst->multicast()) 1633 cmdsts |= CMDSTS_DEST_MULTI; 1634 if (dst->broadcast()) 1635 cmdsts |= CMDSTS_DEST_MASK; 1636 } 1637#endif 1638 1639 IpPtr ip(rxPacket); 1640 if (extstsEnable && ip) { 1641 extsts |= EXTSTS_IPPKT; 1642 rxIpChecksums++; 1643 if (cksum(ip) != 0) { 1644 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1645 extsts |= EXTSTS_IPERR; 1646 } 1647 TcpPtr tcp(ip); 1648 UdpPtr udp(ip); 1649 if (tcp) { 1650 extsts |= EXTSTS_TCPPKT; 1651 rxTcpChecksums++; 1652 if (cksum(tcp) != 0) { 1653 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1654 extsts |= EXTSTS_TCPERR; 1655 1656 } 1657 } else if (udp) { 1658 extsts |= EXTSTS_UDPPKT; 1659 rxUdpChecksums++; 1660 if (cksum(udp) != 0) { 1661 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1662 extsts |= EXTSTS_UDPERR; 1663 } 1664 } 1665 } 1666 rxPacket = 0; 1667 1668 /* 1669 * the driver seems to always receive into desc buffers 1670 * of size 1514, so you never have a pkt that is split 1671 * into multiple descriptors on the receive side, so 1672 * i don't implement that case, hence the assert above. 1673 */ 1674 1675 DPRINTF(EthernetDesc, 1676 "rxDesc: addr=%08x writeback cmdsts extsts\n", 1677 regs.rxdp & 0x3fffffff); 1678 DPRINTF(EthernetDesc, 1679 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n", 1680 link, bufptr, cmdsts, extsts); 1681 1682 rxDmaAddr = regs.rxdp & 0x3fffffff; 1683 rxDmaData = &cmdsts; 1684 if (is64bit) { 1685 rxDmaAddr += offsetof(ns_desc64, cmdsts); 1686 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts); 1687 } else { 1688 rxDmaAddr += offsetof(ns_desc32, cmdsts); 1689 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts); 1690 } 1691 rxDmaFree = dmaDescFree; 1692 1693 descDmaWrites++; 1694 descDmaWrBytes += rxDmaLen; 1695 1696 if (doRxDmaWrite()) 1697 goto exit; 1698 } 1699 break; 1700 1701 case rxFragWrite: 1702 if (rxDmaState != dmaIdle) 1703 goto exit; 1704 1705 rxPacketBufPtr += rxXferLen; 1706 rxFragPtr += rxXferLen; 1707 rxPktBytes -= rxXferLen; 1708 1709 rxState = rxFifoBlock; 1710 break; 1711 1712 case rxDescWrite: 1713 if (rxDmaState != dmaIdle) 1714 goto exit; 1715 1716 assert(cmdsts & CMDSTS_OWN); 1717 1718 assert(rxPacket == 0); 1719 devIntrPost(ISR_RXOK); 1720 1721 if (cmdsts & CMDSTS_INTR) 1722 devIntrPost(ISR_RXDESC); 1723 1724 if (!rxEnable) { 1725 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1726 rxState = rxIdle; 1727 goto exit; 1728 } else 1729 rxState = rxAdvance; 1730 break; 1731 1732 case rxAdvance: 1733 if (link == 0) { 1734 devIntrPost(ISR_RXIDLE); 1735 rxState = rxIdle; 1736 CRDD = true; 1737 goto exit; 1738 } else { 1739 if (rxDmaState != dmaIdle) 1740 goto exit; 1741 rxState = rxDescRead; 1742 regs.rxdp = link; 1743 CRDD = false; 1744 1745 rxDmaAddr = regs.rxdp & 0x3fffffff; 1746 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32; 1747 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32); 1748 rxDmaFree = dmaDescFree; 1749 1750 if (doRxDmaRead()) 1751 goto exit; 1752 } 1753 break; 1754 1755 default: 1756 panic("Invalid rxState!"); 1757 } 1758 1759 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1760 NsRxStateStrings[rxState]); 1761 goto next; 1762 1763 exit: 1764 /** 1765 * @todo do we want to schedule a future kick? 1766 */ 1767 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1768 NsRxStateStrings[rxState]); 1769 1770 if (clock && !rxKickEvent.scheduled()) 1771 rxKickEvent.schedule(rxKickTick); 1772} 1773 1774void 1775NSGigE::transmit() 1776{ 1777 if (txFifo.empty()) { 1778 DPRINTF(Ethernet, "nothing to transmit\n"); 1779 return; 1780 } 1781 1782 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1783 txFifo.size()); 1784 if (interface->sendPacket(txFifo.front())) { 1785#if TRACING_ON 1786 if (DTRACE(Ethernet)) { 1787 IpPtr ip(txFifo.front()); 1788 if (ip) { 1789 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1790 TcpPtr tcp(ip); 1791 if (tcp) { 1792 DPRINTF(Ethernet, 1793 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1794 tcp->sport(), tcp->dport(), tcp->seq(), 1795 tcp->ack()); 1796 } 1797 } 1798 } 1799#endif 1800 1801 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1802 txBytes += txFifo.front()->length; 1803 txPackets++; 1804 1805 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1806 txFifo.avail()); 1807 txFifo.pop(); 1808 1809 /* 1810 * normally do a writeback of the descriptor here, and ONLY 1811 * after that is done, send this interrupt. but since our 1812 * stuff never actually fails, just do this interrupt here, 1813 * otherwise the code has to stray from this nice format. 1814 * besides, it's functionally the same. 1815 */ 1816 devIntrPost(ISR_TXOK); 1817 } 1818 1819 if (!txFifo.empty() && !txEvent.scheduled()) { 1820 DPRINTF(Ethernet, "reschedule transmit\n"); 1821 txEvent.schedule(curTick + retryTime); 1822 } 1823} 1824 1825bool 1826NSGigE::doTxDmaRead() 1827{ 1828 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1829 txDmaState = dmaReading; 1830 1831 if (dmaPending() || getState() != Running) 1832 txDmaState = dmaReadWaiting; 1833 else 1834 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData); 1835 1836 return true; 1837} 1838 1839void 1840NSGigE::txDmaReadDone() 1841{ 1842 assert(txDmaState == dmaReading); 1843 txDmaState = dmaIdle; 1844 1845 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1846 txDmaAddr, txDmaLen); 1847 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1848 1849 // If the receive state machine has a pending DMA, let it go first 1850 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1851 rxKick(); 1852 1853 txKick(); 1854} 1855 1856bool 1857NSGigE::doTxDmaWrite() 1858{ 1859 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1860 txDmaState = dmaWriting; 1861 1862 if (dmaPending() || getState() != Running) 1863 txDmaState = dmaWriteWaiting; 1864 else 1865 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData); 1866 return true; 1867} 1868 1869void 1870NSGigE::txDmaWriteDone() 1871{ 1872 assert(txDmaState == dmaWriting); 1873 txDmaState = dmaIdle; 1874 1875 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1876 txDmaAddr, txDmaLen); 1877 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1878 1879 // If the receive state machine has a pending DMA, let it go first 1880 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1881 rxKick(); 1882 1883 txKick(); 1884} 1885 1886void 1887NSGigE::txKick() 1888{ 1889 bool is64bit = (bool)(regs.config & CFGR_M64ADDR); 1890 1891 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n", 1892 NsTxStateStrings[txState], is64bit ? 64 : 32); 1893 1894 Addr link, bufptr; 1895 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts; 1896 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts; 1897 1898 next: 1899 if (clock) { 1900 if (txKickTick > curTick) { 1901 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1902 txKickTick); 1903 goto exit; 1904 } 1905 1906 // Go to the next state machine clock tick. 1907 txKickTick = curTick + cycles(1); 1908 } 1909 1910 switch(txDmaState) { 1911 case dmaReadWaiting: 1912 if (doTxDmaRead()) 1913 goto exit; 1914 break; 1915 case dmaWriteWaiting: 1916 if (doTxDmaWrite()) 1917 goto exit; 1918 break; 1919 default: 1920 break; 1921 } 1922 1923 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link; 1924 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr; 1925 switch (txState) { 1926 case txIdle: 1927 if (!txEnable) { 1928 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1929 goto exit; 1930 } 1931 1932 if (CTDD) { 1933 txState = txDescRefr; 1934 1935 txDmaAddr = regs.txdp & 0x3fffffff; 1936 txDmaData = 1937 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link; 1938 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link); 1939 txDmaFree = dmaDescFree; 1940 1941 descDmaReads++; 1942 descDmaRdBytes += txDmaLen; 1943 1944 if (doTxDmaRead()) 1945 goto exit; 1946 1947 } else { 1948 txState = txDescRead; 1949 1950 txDmaAddr = regs.txdp & 0x3fffffff; 1951 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 1952 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 1953 txDmaFree = dmaDescFree; 1954 1955 descDmaReads++; 1956 descDmaRdBytes += txDmaLen; 1957 1958 if (doTxDmaRead()) 1959 goto exit; 1960 } 1961 break; 1962 1963 case txDescRefr: 1964 if (txDmaState != dmaIdle) 1965 goto exit; 1966 1967 txState = txAdvance; 1968 break; 1969 1970 case txDescRead: 1971 if (txDmaState != dmaIdle) 1972 goto exit; 1973 1974 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n", 1975 regs.txdp & 0x3fffffff); 1976 DPRINTF(EthernetDesc, 1977 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n", 1978 link, bufptr, cmdsts, extsts); 1979 1980 if (cmdsts & CMDSTS_OWN) { 1981 txState = txFifoBlock; 1982 txFragPtr = bufptr; 1983 txDescCnt = cmdsts & CMDSTS_LEN_MASK; 1984 } else { 1985 devIntrPost(ISR_TXIDLE); 1986 txState = txIdle; 1987 goto exit; 1988 } 1989 break; 1990 1991 case txFifoBlock: 1992 if (!txPacket) { 1993 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 1994 txPacket = new EthPacketData(16384); 1995 txPacketBufPtr = txPacket->data; 1996 } 1997 1998 if (txDescCnt == 0) { 1999 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 2000 if (cmdsts & CMDSTS_MORE) { 2001 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 2002 txState = txDescWrite; 2003 2004 cmdsts &= ~CMDSTS_OWN; 2005 2006 txDmaAddr = regs.txdp & 0x3fffffff; 2007 txDmaData = &cmdsts; 2008 if (is64bit) { 2009 txDmaAddr += offsetof(ns_desc64, cmdsts); 2010 txDmaLen = sizeof(txDesc64.cmdsts); 2011 } else { 2012 txDmaAddr += offsetof(ns_desc32, cmdsts); 2013 txDmaLen = sizeof(txDesc32.cmdsts); 2014 } 2015 txDmaFree = dmaDescFree; 2016 2017 if (doTxDmaWrite()) 2018 goto exit; 2019 2020 } else { /* this packet is totally done */ 2021 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 2022 /* deal with the the packet that just finished */ 2023 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 2024 IpPtr ip(txPacket); 2025 if (extsts & EXTSTS_UDPPKT) { 2026 UdpPtr udp(ip); 2027 udp->sum(0); 2028 udp->sum(cksum(udp)); 2029 txUdpChecksums++; 2030 } else if (extsts & EXTSTS_TCPPKT) { 2031 TcpPtr tcp(ip); 2032 tcp->sum(0); 2033 tcp->sum(cksum(tcp)); 2034 txTcpChecksums++; 2035 } 2036 if (extsts & EXTSTS_IPPKT) { 2037 ip->sum(0); 2038 ip->sum(cksum(ip)); 2039 txIpChecksums++; 2040 } 2041 } 2042 2043 txPacket->length = txPacketBufPtr - txPacket->data; 2044 // this is just because the receive can't handle a 2045 // packet bigger want to make sure 2046 if (txPacket->length > 1514) 2047 panic("transmit packet too large, %s > 1514\n", 2048 txPacket->length); 2049 2050#ifndef NDEBUG 2051 bool success = 2052#endif 2053 txFifo.push(txPacket); 2054 assert(success); 2055 2056 /* 2057 * this following section is not tqo spec, but 2058 * functionally shouldn't be any different. normally, 2059 * the chip will wait til the transmit has occurred 2060 * before writing back the descriptor because it has 2061 * to wait to see that it was successfully transmitted 2062 * to decide whether to set CMDSTS_OK or not. 2063 * however, in the simulator since it is always 2064 * successfully transmitted, and writing it exactly to 2065 * spec would complicate the code, we just do it here 2066 */ 2067 2068 cmdsts &= ~CMDSTS_OWN; 2069 cmdsts |= CMDSTS_OK; 2070 2071 DPRINTF(EthernetDesc, 2072 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 2073 cmdsts, extsts); 2074 2075 txDmaFree = dmaDescFree; 2076 txDmaAddr = regs.txdp & 0x3fffffff; 2077 txDmaData = &cmdsts; 2078 if (is64bit) { 2079 txDmaAddr += offsetof(ns_desc64, cmdsts); 2080 txDmaLen = 2081 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts); 2082 } else { 2083 txDmaAddr += offsetof(ns_desc32, cmdsts); 2084 txDmaLen = 2085 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts); 2086 } 2087 2088 descDmaWrites++; 2089 descDmaWrBytes += txDmaLen; 2090 2091 transmit(); 2092 txPacket = 0; 2093 2094 if (!txEnable) { 2095 DPRINTF(EthernetSM, "halting TX state machine\n"); 2096 txState = txIdle; 2097 goto exit; 2098 } else 2099 txState = txAdvance; 2100 2101 if (doTxDmaWrite()) 2102 goto exit; 2103 } 2104 } else { 2105 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 2106 if (!txFifo.full()) { 2107 txState = txFragRead; 2108 2109 /* 2110 * The number of bytes transferred is either whatever 2111 * is left in the descriptor (txDescCnt), or if there 2112 * is not enough room in the fifo, just whatever room 2113 * is left in the fifo 2114 */ 2115 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 2116 2117 txDmaAddr = txFragPtr & 0x3fffffff; 2118 txDmaData = txPacketBufPtr; 2119 txDmaLen = txXferLen; 2120 txDmaFree = dmaDataFree; 2121 2122 if (doTxDmaRead()) 2123 goto exit; 2124 } else { 2125 txState = txFifoBlock; 2126 transmit(); 2127 2128 goto exit; 2129 } 2130 2131 } 2132 break; 2133 2134 case txFragRead: 2135 if (txDmaState != dmaIdle) 2136 goto exit; 2137 2138 txPacketBufPtr += txXferLen; 2139 txFragPtr += txXferLen; 2140 txDescCnt -= txXferLen; 2141 txFifo.reserve(txXferLen); 2142 2143 txState = txFifoBlock; 2144 break; 2145 2146 case txDescWrite: 2147 if (txDmaState != dmaIdle) 2148 goto exit; 2149 2150 if (cmdsts & CMDSTS_INTR) 2151 devIntrPost(ISR_TXDESC); 2152 2153 if (!txEnable) { 2154 DPRINTF(EthernetSM, "halting TX state machine\n"); 2155 txState = txIdle; 2156 goto exit; 2157 } else 2158 txState = txAdvance; 2159 break; 2160 2161 case txAdvance: 2162 if (link == 0) { 2163 devIntrPost(ISR_TXIDLE); 2164 txState = txIdle; 2165 goto exit; 2166 } else { 2167 if (txDmaState != dmaIdle) 2168 goto exit; 2169 txState = txDescRead; 2170 regs.txdp = link; 2171 CTDD = false; 2172 2173 txDmaAddr = link & 0x3fffffff; 2174 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32; 2175 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32); 2176 txDmaFree = dmaDescFree; 2177 2178 if (doTxDmaRead()) 2179 goto exit; 2180 } 2181 break; 2182 2183 default: 2184 panic("invalid state"); 2185 } 2186 2187 DPRINTF(EthernetSM, "entering next txState=%s\n", 2188 NsTxStateStrings[txState]); 2189 goto next; 2190 2191 exit: 2192 /** 2193 * @todo do we want to schedule a future kick? 2194 */ 2195 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 2196 NsTxStateStrings[txState]); 2197 2198 if (clock && !txKickEvent.scheduled()) 2199 txKickEvent.schedule(txKickTick); 2200} 2201 2202/** 2203 * Advance the EEPROM state machine 2204 * Called on rising edge of EEPROM clock bit in MEAR 2205 */ 2206void 2207NSGigE::eepromKick() 2208{ 2209 switch (eepromState) { 2210 2211 case eepromStart: 2212 2213 // Wait for start bit 2214 if (regs.mear & MEAR_EEDI) { 2215 // Set up to get 2 opcode bits 2216 eepromState = eepromGetOpcode; 2217 eepromBitsToRx = 2; 2218 eepromOpcode = 0; 2219 } 2220 break; 2221 2222 case eepromGetOpcode: 2223 eepromOpcode <<= 1; 2224 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0; 2225 --eepromBitsToRx; 2226 2227 // Done getting opcode 2228 if (eepromBitsToRx == 0) { 2229 if (eepromOpcode != EEPROM_READ) 2230 panic("only EEPROM reads are implemented!"); 2231 2232 // Set up to get address 2233 eepromState = eepromGetAddress; 2234 eepromBitsToRx = 6; 2235 eepromAddress = 0; 2236 } 2237 break; 2238 2239 case eepromGetAddress: 2240 eepromAddress <<= 1; 2241 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0; 2242 --eepromBitsToRx; 2243 2244 // Done getting address 2245 if (eepromBitsToRx == 0) { 2246 2247 if (eepromAddress >= EEPROM_SIZE) 2248 panic("EEPROM read access out of range!"); 2249 2250 switch (eepromAddress) { 2251 2252 case EEPROM_PMATCH2_ADDR: 2253 eepromData = rom.perfectMatch[5]; 2254 eepromData <<= 8; 2255 eepromData += rom.perfectMatch[4]; 2256 break; 2257 2258 case EEPROM_PMATCH1_ADDR: 2259 eepromData = rom.perfectMatch[3]; 2260 eepromData <<= 8; 2261 eepromData += rom.perfectMatch[2]; 2262 break; 2263 2264 case EEPROM_PMATCH0_ADDR: 2265 eepromData = rom.perfectMatch[1]; 2266 eepromData <<= 8; 2267 eepromData += rom.perfectMatch[0]; 2268 break; 2269 2270 default: 2271 panic("FreeBSD driver only uses EEPROM to read PMATCH!"); 2272 } 2273 // Set up to read data 2274 eepromState = eepromRead; 2275 eepromBitsToRx = 16; 2276 2277 // Clear data in bit 2278 regs.mear &= ~MEAR_EEDI; 2279 } 2280 break; 2281 2282 case eepromRead: 2283 // Clear Data Out bit 2284 regs.mear &= ~MEAR_EEDO; 2285 // Set bit to value of current EEPROM bit 2286 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0; 2287 2288 eepromData <<= 1; 2289 --eepromBitsToRx; 2290 2291 // All done 2292 if (eepromBitsToRx == 0) { 2293 eepromState = eepromStart; 2294 } 2295 break; 2296 2297 default: 2298 panic("invalid EEPROM state"); 2299 } 2300 2301} 2302 2303void 2304NSGigE::transferDone() 2305{ 2306 if (txFifo.empty()) { 2307 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2308 return; 2309 } 2310 2311 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2312 2313 txEvent.reschedule(curTick + cycles(1), true); 2314} 2315 2316bool 2317NSGigE::rxFilter(const EthPacketPtr &packet) 2318{ 2319 EthPtr eth = packet; 2320 bool drop = true; 2321 string type; 2322 2323 const EthAddr &dst = eth->dst(); 2324 if (dst.unicast()) { 2325 // If we're accepting all unicast addresses 2326 if (acceptUnicast) 2327 drop = false; 2328 2329 // If we make a perfect match 2330 if (acceptPerfect && dst == rom.perfectMatch) 2331 drop = false; 2332 2333 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2334 drop = false; 2335 2336 } else if (dst.broadcast()) { 2337 // if we're accepting broadcasts 2338 if (acceptBroadcast) 2339 drop = false; 2340 2341 } else if (dst.multicast()) { 2342 // if we're accepting all multicasts 2343 if (acceptMulticast) 2344 drop = false; 2345 2346 // Multicast hashing faked - all packets accepted 2347 if (multicastHashEnable) 2348 drop = false; 2349 } 2350 2351 if (drop) { 2352 DPRINTF(Ethernet, "rxFilter drop\n"); 2353 DDUMP(EthernetData, packet->data, packet->length); 2354 } 2355 2356 return drop; 2357} 2358 2359bool 2360NSGigE::recvPacket(EthPacketPtr packet) 2361{ 2362 rxBytes += packet->length; 2363 rxPackets++; 2364 2365 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2366 rxFifo.avail()); 2367 2368 if (!rxEnable) { 2369 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2370 return true; 2371 } 2372 2373 if (!rxFilterEnable) { 2374 DPRINTF(Ethernet, 2375 "receive packet filtering disabled . . . packet dropped\n"); 2376 return true; 2377 } 2378 2379 if (rxFilter(packet)) { 2380 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2381 return true; 2382 } 2383 2384 if (rxFifo.avail() < packet->length) { 2385#if TRACING_ON 2386 IpPtr ip(packet); 2387 TcpPtr tcp(ip); 2388 if (ip) { 2389 DPRINTF(Ethernet, 2390 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2391 ip->id()); 2392 if (tcp) { 2393 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2394 } 2395 } 2396#endif 2397 droppedPackets++; 2398 devIntrPost(ISR_RXORN); 2399 return false; 2400 } 2401 2402 rxFifo.push(packet); 2403 2404 rxKick(); 2405 return true; 2406} 2407 2408 2409void 2410NSGigE::resume() 2411{ 2412 SimObject::resume(); 2413 2414 // During drain we could have left the state machines in a waiting state and 2415 // they wouldn't get out until some other event occured to kick them. 2416 // This way they'll get out immediately 2417 txKick(); 2418 rxKick(); 2419} 2420 2421 2422//===================================================================== 2423// 2424// 2425void 2426NSGigE::serialize(ostream &os) 2427{ 2428 // Serialize the PciDev base class 2429 PciDev::serialize(os); 2430 2431 /* 2432 * Finalize any DMA events now. 2433 */ 2434 // @todo will mem system save pending dma? 2435 2436 /* 2437 * Serialize the device registers 2438 */ 2439 SERIALIZE_SCALAR(regs.command); 2440 SERIALIZE_SCALAR(regs.config); 2441 SERIALIZE_SCALAR(regs.mear); 2442 SERIALIZE_SCALAR(regs.ptscr); 2443 SERIALIZE_SCALAR(regs.isr); 2444 SERIALIZE_SCALAR(regs.imr); 2445 SERIALIZE_SCALAR(regs.ier); 2446 SERIALIZE_SCALAR(regs.ihr); 2447 SERIALIZE_SCALAR(regs.txdp); 2448 SERIALIZE_SCALAR(regs.txdp_hi); 2449 SERIALIZE_SCALAR(regs.txcfg); 2450 SERIALIZE_SCALAR(regs.gpior); 2451 SERIALIZE_SCALAR(regs.rxdp); 2452 SERIALIZE_SCALAR(regs.rxdp_hi); 2453 SERIALIZE_SCALAR(regs.rxcfg); 2454 SERIALIZE_SCALAR(regs.pqcr); 2455 SERIALIZE_SCALAR(regs.wcsr); 2456 SERIALIZE_SCALAR(regs.pcr); 2457 SERIALIZE_SCALAR(regs.rfcr); 2458 SERIALIZE_SCALAR(regs.rfdr); 2459 SERIALIZE_SCALAR(regs.brar); 2460 SERIALIZE_SCALAR(regs.brdr); 2461 SERIALIZE_SCALAR(regs.srr); 2462 SERIALIZE_SCALAR(regs.mibc); 2463 SERIALIZE_SCALAR(regs.vrcr); 2464 SERIALIZE_SCALAR(regs.vtcr); 2465 SERIALIZE_SCALAR(regs.vdr); 2466 SERIALIZE_SCALAR(regs.ccsr); 2467 SERIALIZE_SCALAR(regs.tbicr); 2468 SERIALIZE_SCALAR(regs.tbisr); 2469 SERIALIZE_SCALAR(regs.tanar); 2470 SERIALIZE_SCALAR(regs.tanlpar); 2471 SERIALIZE_SCALAR(regs.taner); 2472 SERIALIZE_SCALAR(regs.tesr); 2473 2474 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2475 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2476 2477 SERIALIZE_SCALAR(ioEnable); 2478 2479 /* 2480 * Serialize the data Fifos 2481 */ 2482 rxFifo.serialize("rxFifo", os); 2483 txFifo.serialize("txFifo", os); 2484 2485 /* 2486 * Serialize the various helper variables 2487 */ 2488 bool txPacketExists = txPacket; 2489 SERIALIZE_SCALAR(txPacketExists); 2490 if (txPacketExists) { 2491 txPacket->length = txPacketBufPtr - txPacket->data; 2492 txPacket->serialize("txPacket", os); 2493 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2494 SERIALIZE_SCALAR(txPktBufPtr); 2495 } 2496 2497 bool rxPacketExists = rxPacket; 2498 SERIALIZE_SCALAR(rxPacketExists); 2499 if (rxPacketExists) { 2500 rxPacket->serialize("rxPacket", os); 2501 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2502 SERIALIZE_SCALAR(rxPktBufPtr); 2503 } 2504 2505 SERIALIZE_SCALAR(txXferLen); 2506 SERIALIZE_SCALAR(rxXferLen); 2507 2508 /* 2509 * Serialize Cached Descriptors 2510 */ 2511 SERIALIZE_SCALAR(rxDesc64.link); 2512 SERIALIZE_SCALAR(rxDesc64.bufptr); 2513 SERIALIZE_SCALAR(rxDesc64.cmdsts); 2514 SERIALIZE_SCALAR(rxDesc64.extsts); 2515 SERIALIZE_SCALAR(txDesc64.link); 2516 SERIALIZE_SCALAR(txDesc64.bufptr); 2517 SERIALIZE_SCALAR(txDesc64.cmdsts); 2518 SERIALIZE_SCALAR(txDesc64.extsts); 2519 SERIALIZE_SCALAR(rxDesc32.link); 2520 SERIALIZE_SCALAR(rxDesc32.bufptr); 2521 SERIALIZE_SCALAR(rxDesc32.cmdsts); 2522 SERIALIZE_SCALAR(rxDesc32.extsts); 2523 SERIALIZE_SCALAR(txDesc32.link); 2524 SERIALIZE_SCALAR(txDesc32.bufptr); 2525 SERIALIZE_SCALAR(txDesc32.cmdsts); 2526 SERIALIZE_SCALAR(txDesc32.extsts); 2527 SERIALIZE_SCALAR(extstsEnable); 2528 2529 /* 2530 * Serialize tx state machine 2531 */ 2532 int txState = this->txState; 2533 SERIALIZE_SCALAR(txState); 2534 SERIALIZE_SCALAR(txEnable); 2535 SERIALIZE_SCALAR(CTDD); 2536 SERIALIZE_SCALAR(txFragPtr); 2537 SERIALIZE_SCALAR(txDescCnt); 2538 int txDmaState = this->txDmaState; 2539 SERIALIZE_SCALAR(txDmaState); 2540 SERIALIZE_SCALAR(txKickTick); 2541 2542 /* 2543 * Serialize rx state machine 2544 */ 2545 int rxState = this->rxState; 2546 SERIALIZE_SCALAR(rxState); 2547 SERIALIZE_SCALAR(rxEnable); 2548 SERIALIZE_SCALAR(CRDD); 2549 SERIALIZE_SCALAR(rxPktBytes); 2550 SERIALIZE_SCALAR(rxFragPtr); 2551 SERIALIZE_SCALAR(rxDescCnt); 2552 int rxDmaState = this->rxDmaState; 2553 SERIALIZE_SCALAR(rxDmaState); 2554 SERIALIZE_SCALAR(rxKickTick); 2555 2556 /* 2557 * Serialize EEPROM state machine 2558 */ 2559 int eepromState = this->eepromState; 2560 SERIALIZE_SCALAR(eepromState); 2561 SERIALIZE_SCALAR(eepromClk); 2562 SERIALIZE_SCALAR(eepromBitsToRx); 2563 SERIALIZE_SCALAR(eepromOpcode); 2564 SERIALIZE_SCALAR(eepromAddress); 2565 SERIALIZE_SCALAR(eepromData); 2566 2567 /* 2568 * If there's a pending transmit, store the time so we can 2569 * reschedule it later 2570 */ 2571 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2572 SERIALIZE_SCALAR(transmitTick); 2573 2574 /* 2575 * receive address filter settings 2576 */ 2577 SERIALIZE_SCALAR(rxFilterEnable); 2578 SERIALIZE_SCALAR(acceptBroadcast); 2579 SERIALIZE_SCALAR(acceptMulticast); 2580 SERIALIZE_SCALAR(acceptUnicast); 2581 SERIALIZE_SCALAR(acceptPerfect); 2582 SERIALIZE_SCALAR(acceptArp); 2583 SERIALIZE_SCALAR(multicastHashEnable); 2584 2585 /* 2586 * Keep track of pending interrupt status. 2587 */ 2588 SERIALIZE_SCALAR(intrTick); 2589 SERIALIZE_SCALAR(cpuPendingIntr); 2590 Tick intrEventTick = 0; 2591 if (intrEvent) 2592 intrEventTick = intrEvent->when(); 2593 SERIALIZE_SCALAR(intrEventTick); 2594 2595} 2596 2597void 2598NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2599{ 2600 // Unserialize the PciDev base class 2601 PciDev::unserialize(cp, section); 2602 2603 UNSERIALIZE_SCALAR(regs.command); 2604 UNSERIALIZE_SCALAR(regs.config); 2605 UNSERIALIZE_SCALAR(regs.mear); 2606 UNSERIALIZE_SCALAR(regs.ptscr); 2607 UNSERIALIZE_SCALAR(regs.isr); 2608 UNSERIALIZE_SCALAR(regs.imr); 2609 UNSERIALIZE_SCALAR(regs.ier); 2610 UNSERIALIZE_SCALAR(regs.ihr); 2611 UNSERIALIZE_SCALAR(regs.txdp); 2612 UNSERIALIZE_SCALAR(regs.txdp_hi); 2613 UNSERIALIZE_SCALAR(regs.txcfg); 2614 UNSERIALIZE_SCALAR(regs.gpior); 2615 UNSERIALIZE_SCALAR(regs.rxdp); 2616 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2617 UNSERIALIZE_SCALAR(regs.rxcfg); 2618 UNSERIALIZE_SCALAR(regs.pqcr); 2619 UNSERIALIZE_SCALAR(regs.wcsr); 2620 UNSERIALIZE_SCALAR(regs.pcr); 2621 UNSERIALIZE_SCALAR(regs.rfcr); 2622 UNSERIALIZE_SCALAR(regs.rfdr); 2623 UNSERIALIZE_SCALAR(regs.brar); 2624 UNSERIALIZE_SCALAR(regs.brdr); 2625 UNSERIALIZE_SCALAR(regs.srr); 2626 UNSERIALIZE_SCALAR(regs.mibc); 2627 UNSERIALIZE_SCALAR(regs.vrcr); 2628 UNSERIALIZE_SCALAR(regs.vtcr); 2629 UNSERIALIZE_SCALAR(regs.vdr); 2630 UNSERIALIZE_SCALAR(regs.ccsr); 2631 UNSERIALIZE_SCALAR(regs.tbicr); 2632 UNSERIALIZE_SCALAR(regs.tbisr); 2633 UNSERIALIZE_SCALAR(regs.tanar); 2634 UNSERIALIZE_SCALAR(regs.tanlpar); 2635 UNSERIALIZE_SCALAR(regs.taner); 2636 UNSERIALIZE_SCALAR(regs.tesr); 2637 2638 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2639 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE); 2640 2641 UNSERIALIZE_SCALAR(ioEnable); 2642 2643 /* 2644 * unserialize the data fifos 2645 */ 2646 rxFifo.unserialize("rxFifo", cp, section); 2647 txFifo.unserialize("txFifo", cp, section); 2648 2649 /* 2650 * unserialize the various helper variables 2651 */ 2652 bool txPacketExists; 2653 UNSERIALIZE_SCALAR(txPacketExists); 2654 if (txPacketExists) { 2655 txPacket = new EthPacketData(16384); 2656 txPacket->unserialize("txPacket", cp, section); 2657 uint32_t txPktBufPtr; 2658 UNSERIALIZE_SCALAR(txPktBufPtr); 2659 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2660 } else 2661 txPacket = 0; 2662 2663 bool rxPacketExists; 2664 UNSERIALIZE_SCALAR(rxPacketExists); 2665 rxPacket = 0; 2666 if (rxPacketExists) { 2667 rxPacket = new EthPacketData(16384); 2668 rxPacket->unserialize("rxPacket", cp, section); 2669 uint32_t rxPktBufPtr; 2670 UNSERIALIZE_SCALAR(rxPktBufPtr); 2671 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2672 } else 2673 rxPacket = 0; 2674 2675 UNSERIALIZE_SCALAR(txXferLen); 2676 UNSERIALIZE_SCALAR(rxXferLen); 2677 2678 /* 2679 * Unserialize Cached Descriptors 2680 */ 2681 UNSERIALIZE_SCALAR(rxDesc64.link); 2682 UNSERIALIZE_SCALAR(rxDesc64.bufptr); 2683 UNSERIALIZE_SCALAR(rxDesc64.cmdsts); 2684 UNSERIALIZE_SCALAR(rxDesc64.extsts); 2685 UNSERIALIZE_SCALAR(txDesc64.link); 2686 UNSERIALIZE_SCALAR(txDesc64.bufptr); 2687 UNSERIALIZE_SCALAR(txDesc64.cmdsts); 2688 UNSERIALIZE_SCALAR(txDesc64.extsts); 2689 UNSERIALIZE_SCALAR(rxDesc32.link); 2690 UNSERIALIZE_SCALAR(rxDesc32.bufptr); 2691 UNSERIALIZE_SCALAR(rxDesc32.cmdsts); 2692 UNSERIALIZE_SCALAR(rxDesc32.extsts); 2693 UNSERIALIZE_SCALAR(txDesc32.link); 2694 UNSERIALIZE_SCALAR(txDesc32.bufptr); 2695 UNSERIALIZE_SCALAR(txDesc32.cmdsts); 2696 UNSERIALIZE_SCALAR(txDesc32.extsts); 2697 UNSERIALIZE_SCALAR(extstsEnable); 2698 2699 /* 2700 * unserialize tx state machine 2701 */ 2702 int txState; 2703 UNSERIALIZE_SCALAR(txState); 2704 this->txState = (TxState) txState; 2705 UNSERIALIZE_SCALAR(txEnable); 2706 UNSERIALIZE_SCALAR(CTDD); 2707 UNSERIALIZE_SCALAR(txFragPtr); 2708 UNSERIALIZE_SCALAR(txDescCnt); 2709 int txDmaState; 2710 UNSERIALIZE_SCALAR(txDmaState); 2711 this->txDmaState = (DmaState) txDmaState; 2712 UNSERIALIZE_SCALAR(txKickTick); 2713 if (txKickTick) 2714 txKickEvent.schedule(txKickTick); 2715 2716 /* 2717 * unserialize rx state machine 2718 */ 2719 int rxState; 2720 UNSERIALIZE_SCALAR(rxState); 2721 this->rxState = (RxState) rxState; 2722 UNSERIALIZE_SCALAR(rxEnable); 2723 UNSERIALIZE_SCALAR(CRDD); 2724 UNSERIALIZE_SCALAR(rxPktBytes); 2725 UNSERIALIZE_SCALAR(rxFragPtr); 2726 UNSERIALIZE_SCALAR(rxDescCnt); 2727 int rxDmaState; 2728 UNSERIALIZE_SCALAR(rxDmaState); 2729 this->rxDmaState = (DmaState) rxDmaState; 2730 UNSERIALIZE_SCALAR(rxKickTick); 2731 if (rxKickTick) 2732 rxKickEvent.schedule(rxKickTick); 2733 2734 /* 2735 * Unserialize EEPROM state machine 2736 */ 2737 int eepromState; 2738 UNSERIALIZE_SCALAR(eepromState); 2739 this->eepromState = (EEPROMState) eepromState; 2740 UNSERIALIZE_SCALAR(eepromClk); 2741 UNSERIALIZE_SCALAR(eepromBitsToRx); 2742 UNSERIALIZE_SCALAR(eepromOpcode); 2743 UNSERIALIZE_SCALAR(eepromAddress); 2744 UNSERIALIZE_SCALAR(eepromData); 2745 2746 /* 2747 * If there's a pending transmit, reschedule it now 2748 */ 2749 Tick transmitTick; 2750 UNSERIALIZE_SCALAR(transmitTick); 2751 if (transmitTick) 2752 txEvent.schedule(curTick + transmitTick); 2753 2754 /* 2755 * unserialize receive address filter settings 2756 */ 2757 UNSERIALIZE_SCALAR(rxFilterEnable); 2758 UNSERIALIZE_SCALAR(acceptBroadcast); 2759 UNSERIALIZE_SCALAR(acceptMulticast); 2760 UNSERIALIZE_SCALAR(acceptUnicast); 2761 UNSERIALIZE_SCALAR(acceptPerfect); 2762 UNSERIALIZE_SCALAR(acceptArp); 2763 UNSERIALIZE_SCALAR(multicastHashEnable); 2764 2765 /* 2766 * Keep track of pending interrupt status. 2767 */ 2768 UNSERIALIZE_SCALAR(intrTick); 2769 UNSERIALIZE_SCALAR(cpuPendingIntr); 2770 Tick intrEventTick; 2771 UNSERIALIZE_SCALAR(intrEventTick); 2772 if (intrEventTick) { 2773 intrEvent = new IntrEvent(this, intrEventTick, true); 2774 } 2775} 2776 2777NSGigEInt * 2778NSGigEIntParams::create() 2779{ 2780 NSGigEInt *dev_int = new NSGigEInt(name, device); 2781 2782 EtherInt *p = (EtherInt *)peer; 2783 if (p) { 2784 dev_int->setPeer(p); 2785 p->setPeer(dev_int); 2786 } 2787 2788 return dev_int; 2789} 2790 2791NSGigE * 2792NSGigEParams::create() 2793{ 2794 return new NSGigE(this); 2795} 2796