i8254xGBe.cc revision 5763
12650SN/A/* 210650Sandreas.hansson@arm.com * Copyright (c) 2006 The Regents of The University of Michigan 36965SN/A * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Ali Saidi 29 */ 30 31/* @file 32 * Device model for Intel's 8254x line of gigabit ethernet controllers. 33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the 34 * fewest workarounds in the driver. It will probably work with most of the 35 * other MACs with slight modifications. 36 */ 37 38 39/* 40 * @todo really there are multiple dma engines.. we should implement them. 41 */ 42 43#include <algorithm> 44 45#include "base/inet.hh" 46#include "base/trace.hh" 47#include "dev/i8254xGBe.hh" 48#include "mem/packet.hh" 49#include "mem/packet_access.hh" 50#include "params/IGbE.hh" 51#include "sim/stats.hh" 52#include "sim/system.hh" 53 54using namespace iGbReg; 55using namespace Net; 56 57IGbE::IGbE(const Params *p) 58 : EtherDevice(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control), 59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false), 60 txTick(false), txFifoTick(false), rxDmaPacket(false), 61 fetchDelay(p->fetch_delay), wbDelay(p->wb_delay), 62 fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay), 63 rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay), 64 rdtrEvent(this), radvEvent(this), 65 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this), 66 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size), 67 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), 68 clock(p->clock), lastInterrupt(0) 69{ 70 etherInt = new IGbEInt(name() + ".int", this); 71 72 // Initialized internal registers per Intel documentation 73 // All registers intialized to 0 by per register constructor 74 regs.ctrl.fd(1); 75 regs.ctrl.lrst(1); 76 regs.ctrl.speed(2); 77 regs.ctrl.frcspd(1); 78 regs.sts.speed(3); // Say we're 1000Mbps 79 regs.sts.fd(1); // full duplex 80 regs.sts.lu(1); // link up 81 regs.eecd.fwe(1); 82 regs.eecd.ee_type(1); 83 regs.imr = 0; 84 regs.iam = 0; 85 regs.rxdctl.gran(1); 86 regs.rxdctl.wthresh(1); 87 regs.fcrth(1); 88 regs.tdwba = 0; 89 regs.rlpml = 0; 90 regs.sw_fw_sync = 0; 91 92 regs.pba.rxa(0x30); 93 regs.pba.txa(0x10); 94 95 eeOpBits = 0; 96 eeAddrBits = 0; 97 eeDataBits = 0; 98 eeOpcode = 0; 99 100 // clear all 64 16 bit words of the eeprom 101 memset(&flash, 0, EEPROM_SIZE*2); 102 103 // Set the MAC address 104 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN); 105 for (int x = 0; x < ETH_ADDR_LEN/2; x++) 106 flash[x] = htobe(flash[x]); 107 108 uint16_t csum = 0; 109 for (int x = 0; x < EEPROM_SIZE; x++) 110 csum += htobe(flash[x]); 111 112 113 // Magic happy checksum value 114 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum)); 115 116 rxFifo.clear(); 117 txFifo.clear(); 118} 119 120EtherInt* 121IGbE::getEthPort(const std::string &if_name, int idx) 122{ 123 124 if (if_name == "interface") { 125 if (etherInt->getPeer()) 126 panic("Port already connected to\n"); 127 return etherInt; 128 } 129 return NULL; 130} 131 132Tick 133IGbE::writeConfig(PacketPtr pkt) 134{ 135 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 136 if (offset < PCI_DEVICE_SPECIFIC) 137 PciDev::writeConfig(pkt); 138 else 139 panic("Device specific PCI config space not implemented.\n"); 140 141 /// 142 /// Some work may need to be done here based for the pci COMMAND bits. 143 /// 144 145 return pioDelay; 146} 147 148Tick 149IGbE::read(PacketPtr pkt) 150{ 151 int bar; 152 Addr daddr; 153 154 if (!getBAR(pkt->getAddr(), bar, daddr)) 155 panic("Invalid PCI memory access to unmapped memory.\n"); 156 157 // Only Memory register BAR is allowed 158 assert(bar == 0); 159 160 // Only 32bit accesses allowed 161 assert(pkt->getSize() == 4); 162 163 DPRINTF(Ethernet, "Read device register %#X\n", daddr); 164 165 pkt->allocate(); 166 167 /// 168 /// Handle read of register here 169 /// 170 171 172 switch (daddr) { 173 case REG_CTRL: 174 pkt->set<uint32_t>(regs.ctrl()); 175 break; 176 case REG_STATUS: 177 pkt->set<uint32_t>(regs.sts()); 178 break; 179 case REG_EECD: 180 pkt->set<uint32_t>(regs.eecd()); 181 break; 182 case REG_EERD: 183 pkt->set<uint32_t>(regs.eerd()); 184 break; 185 case REG_CTRL_EXT: 186 pkt->set<uint32_t>(regs.ctrl_ext()); 187 break; 188 case REG_MDIC: 189 pkt->set<uint32_t>(regs.mdic()); 190 break; 191 case REG_ICR: 192 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(), 193 regs.imr, regs.iam, regs.ctrl_ext.iame()); 194 pkt->set<uint32_t>(regs.icr()); 195 if (regs.icr.int_assert() || regs.imr == 0) { 196 regs.icr = regs.icr() & ~mask(30); 197 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr()); 198 } 199 if (regs.ctrl_ext.iame() && regs.icr.int_assert()) 200 regs.imr &= ~regs.iam; 201 chkInterrupt(); 202 break; 203 case REG_EICR: 204 // This is only useful for MSI, but the driver reads it every time 205 // Just don't do anything 206 pkt->set<uint32_t>(0); 207 break; 208 case REG_ITR: 209 pkt->set<uint32_t>(regs.itr()); 210 break; 211 case REG_RCTL: 212 pkt->set<uint32_t>(regs.rctl()); 213 break; 214 case REG_FCTTV: 215 pkt->set<uint32_t>(regs.fcttv()); 216 break; 217 case REG_TCTL: 218 pkt->set<uint32_t>(regs.tctl()); 219 break; 220 case REG_PBA: 221 pkt->set<uint32_t>(regs.pba()); 222 break; 223 case REG_WUC: 224 case REG_LEDCTL: 225 pkt->set<uint32_t>(0); // We don't care, so just return 0 226 break; 227 case REG_FCRTL: 228 pkt->set<uint32_t>(regs.fcrtl()); 229 break; 230 case REG_FCRTH: 231 pkt->set<uint32_t>(regs.fcrth()); 232 break; 233 case REG_RDBAL: 234 pkt->set<uint32_t>(regs.rdba.rdbal()); 235 break; 236 case REG_RDBAH: 237 pkt->set<uint32_t>(regs.rdba.rdbah()); 238 break; 239 case REG_RDLEN: 240 pkt->set<uint32_t>(regs.rdlen()); 241 break; 242 case REG_SRRCTL: 243 pkt->set<uint32_t>(regs.srrctl()); 244 break; 245 case REG_RDH: 246 pkt->set<uint32_t>(regs.rdh()); 247 break; 248 case REG_RDT: 249 pkt->set<uint32_t>(regs.rdt()); 250 break; 251 case REG_RDTR: 252 pkt->set<uint32_t>(regs.rdtr()); 253 if (regs.rdtr.fpd()) { 254 rxDescCache.writeback(0); 255 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n"); 256 postInterrupt(IT_RXT); 257 regs.rdtr.fpd(0); 258 } 259 break; 260 case REG_RXDCTL: 261 pkt->set<uint32_t>(regs.rxdctl()); 262 break; 263 case REG_RADV: 264 pkt->set<uint32_t>(regs.radv()); 265 break; 266 case REG_TDBAL: 267 pkt->set<uint32_t>(regs.tdba.tdbal()); 268 break; 269 case REG_TDBAH: 270 pkt->set<uint32_t>(regs.tdba.tdbah()); 271 break; 272 case REG_TDLEN: 273 pkt->set<uint32_t>(regs.tdlen()); 274 break; 275 case REG_TDH: 276 pkt->set<uint32_t>(regs.tdh()); 277 break; 278 case REG_TXDCA_CTL: 279 pkt->set<uint32_t>(regs.txdca_ctl()); 280 break; 281 case REG_TDT: 282 pkt->set<uint32_t>(regs.tdt()); 283 break; 284 case REG_TIDV: 285 pkt->set<uint32_t>(regs.tidv()); 286 break; 287 case REG_TXDCTL: 288 pkt->set<uint32_t>(regs.txdctl()); 289 break; 290 case REG_TADV: 291 pkt->set<uint32_t>(regs.tadv()); 292 break; 293 case REG_TDWBAL: 294 pkt->set<uint32_t>(regs.tdwba & mask(32)); 295 break; 296 case REG_TDWBAH: 297 pkt->set<uint32_t>(regs.tdwba >> 32); 298 break; 299 case REG_RXCSUM: 300 pkt->set<uint32_t>(regs.rxcsum()); 301 break; 302 case REG_RLPML: 303 pkt->set<uint32_t>(regs.rlpml); 304 break; 305 case REG_RFCTL: 306 pkt->set<uint32_t>(regs.rfctl()); 307 break; 308 case REG_MANC: 309 pkt->set<uint32_t>(regs.manc()); 310 break; 311 case REG_SWSM: 312 pkt->set<uint32_t>(regs.swsm()); 313 regs.swsm.smbi(1); 314 break; 315 case REG_FWSM: 316 pkt->set<uint32_t>(regs.fwsm()); 317 break; 318 case REG_SWFWSYNC: 319 pkt->set<uint32_t>(regs.sw_fw_sync); 320 break; 321 default: 322 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) && 323 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) && 324 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) && 325 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE))) 326 panic("Read request to unknown register number: %#x\n", daddr); 327 else 328 pkt->set<uint32_t>(0); 329 }; 330 331 pkt->makeAtomicResponse(); 332 return pioDelay; 333} 334 335Tick 336IGbE::write(PacketPtr pkt) 337{ 338 int bar; 339 Addr daddr; 340 341 342 if (!getBAR(pkt->getAddr(), bar, daddr)) 343 panic("Invalid PCI memory access to unmapped memory.\n"); 344 345 // Only Memory register BAR is allowed 346 assert(bar == 0); 347 348 // Only 32bit accesses allowed 349 assert(pkt->getSize() == sizeof(uint32_t)); 350 351 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>()); 352 353 /// 354 /// Handle write of register here 355 /// 356 uint32_t val = pkt->get<uint32_t>(); 357 358 Regs::RCTL oldrctl; 359 Regs::TCTL oldtctl; 360 361 switch (daddr) { 362 case REG_CTRL: 363 regs.ctrl = val; 364 if (regs.ctrl.tfce()) 365 warn("TX Flow control enabled, should implement\n"); 366 if (regs.ctrl.rfce()) 367 warn("RX Flow control enabled, should implement\n"); 368 break; 369 case REG_CTRL_EXT: 370 regs.ctrl_ext = val; 371 break; 372 case REG_STATUS: 373 regs.sts = val; 374 break; 375 case REG_EECD: 376 int oldClk; 377 oldClk = regs.eecd.sk(); 378 regs.eecd = val; 379 // See if this is a eeprom access and emulate accordingly 380 if (!oldClk && regs.eecd.sk()) { 381 if (eeOpBits < 8) { 382 eeOpcode = eeOpcode << 1 | regs.eecd.din(); 383 eeOpBits++; 384 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 385 eeAddr = eeAddr << 1 | regs.eecd.din(); 386 eeAddrBits++; 387 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 388 assert(eeAddr>>1 < EEPROM_SIZE); 389 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n", 390 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]); 391 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1); 392 eeDataBits++; 393 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) { 394 regs.eecd.dout(0); 395 eeDataBits++; 396 } else 397 panic("What's going on with eeprom interface? opcode:" 398 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode, 399 (uint32_t)eeOpBits, (uint32_t)eeAddr, 400 (uint32_t)eeAddrBits, (uint32_t)eeDataBits); 401 402 // Reset everything for the next command 403 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) || 404 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) { 405 eeOpBits = 0; 406 eeAddrBits = 0; 407 eeDataBits = 0; 408 eeOpcode = 0; 409 eeAddr = 0; 410 } 411 412 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n", 413 (uint32_t)eeOpcode, (uint32_t) eeOpBits, 414 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits); 415 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI || 416 eeOpcode == EEPROM_RDSR_OPCODE_SPI )) 417 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode, 418 (uint32_t)eeOpBits); 419 420 421 } 422 // If driver requests eeprom access, immediately give it to it 423 regs.eecd.ee_gnt(regs.eecd.ee_req()); 424 break; 425 case REG_EERD: 426 regs.eerd = val; 427 if (regs.eerd.start()) { 428 regs.eerd.done(1); 429 assert(regs.eerd.addr() < EEPROM_SIZE); 430 regs.eerd.data(flash[regs.eerd.addr()]); 431 regs.eerd.start(0); 432 DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n", 433 regs.eerd.addr(), regs.eerd.data()); 434 } 435 break; 436 case REG_MDIC: 437 regs.mdic = val; 438 if (regs.mdic.i()) 439 panic("No support for interrupt on mdic complete\n"); 440 if (regs.mdic.phyadd() != 1) 441 panic("No support for reading anything but phy\n"); 442 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing" 443 : "Reading", regs.mdic.regadd()); 444 switch (regs.mdic.regadd()) { 445 case PHY_PSTATUS: 446 regs.mdic.data(0x796D); // link up 447 break; 448 case PHY_PID: 449 if (params()->is8257) 450 regs.mdic.data(0x0141); 451 else 452 regs.mdic.data(0x02A8); 453 break; 454 case PHY_EPID: 455 if (params()->is8257) 456 regs.mdic.data(0x0CC0); 457 else 458 regs.mdic.data(0x0380); 459 break; 460 case PHY_GSTATUS: 461 regs.mdic.data(0x7C00); 462 break; 463 case PHY_EPSTATUS: 464 regs.mdic.data(0x3000); 465 break; 466 case PHY_AGC: 467 regs.mdic.data(0x180); // some random length 468 break; 469 default: 470 regs.mdic.data(0); 471 } 472 regs.mdic.r(1); 473 break; 474 case REG_ICR: 475 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(), 476 regs.imr, regs.iam, regs.ctrl_ext.iame()); 477 if (regs.ctrl_ext.iame()) 478 regs.imr &= ~regs.iam; 479 regs.icr = ~bits(val,30,0) & regs.icr(); 480 chkInterrupt(); 481 break; 482 case REG_ITR: 483 regs.itr = val; 484 break; 485 case REG_ICS: 486 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n"); 487 postInterrupt((IntTypes)val); 488 break; 489 case REG_IMS: 490 regs.imr |= val; 491 chkInterrupt(); 492 break; 493 case REG_IMC: 494 regs.imr &= ~val; 495 chkInterrupt(); 496 break; 497 case REG_IAM: 498 regs.iam = val; 499 break; 500 case REG_RCTL: 501 oldrctl = regs.rctl; 502 regs.rctl = val; 503 if (regs.rctl.rst()) { 504 rxDescCache.reset(); 505 DPRINTF(EthernetSM, "RXS: Got RESET!\n"); 506 rxFifo.clear(); 507 regs.rctl.rst(0); 508 } 509 if (regs.rctl.en()) 510 rxTick = true; 511 restartClock(); 512 break; 513 case REG_FCTTV: 514 regs.fcttv = val; 515 break; 516 case REG_TCTL: 517 regs.tctl = val; 518 oldtctl = regs.tctl; 519 regs.tctl = val; 520 if (regs.tctl.en()) 521 txTick = true; 522 restartClock(); 523 if (regs.tctl.en() && !oldtctl.en()) { 524 txDescCache.reset(); 525 } 526 break; 527 case REG_PBA: 528 regs.pba.rxa(val); 529 regs.pba.txa(64 - regs.pba.rxa()); 530 break; 531 case REG_WUC: 532 case REG_LEDCTL: 533 case REG_FCAL: 534 case REG_FCAH: 535 case REG_FCT: 536 case REG_VET: 537 case REG_AIFS: 538 case REG_TIPG: 539 ; // We don't care, so don't store anything 540 break; 541 case REG_IVAR0: 542 warn("Writing to IVAR0, ignoring...\n"); 543 break; 544 case REG_FCRTL: 545 regs.fcrtl = val; 546 break; 547 case REG_FCRTH: 548 regs.fcrth = val; 549 break; 550 case REG_RDBAL: 551 regs.rdba.rdbal( val & ~mask(4)); 552 rxDescCache.areaChanged(); 553 break; 554 case REG_RDBAH: 555 regs.rdba.rdbah(val); 556 rxDescCache.areaChanged(); 557 break; 558 case REG_RDLEN: 559 regs.rdlen = val & ~mask(7); 560 rxDescCache.areaChanged(); 561 break; 562 case REG_SRRCTL: 563 regs.srrctl = val; 564 break; 565 case REG_RDH: 566 regs.rdh = val; 567 rxDescCache.areaChanged(); 568 break; 569 case REG_RDT: 570 regs.rdt = val; 571 DPRINTF(EthernetSM, "RXS: RDT Updated.\n"); 572 if (getState() == SimObject::Running) { 573 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n"); 574 rxDescCache.fetchDescriptors(); 575 } else { 576 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n"); 577 } 578 break; 579 case REG_RDTR: 580 regs.rdtr = val; 581 break; 582 case REG_RADV: 583 regs.radv = val; 584 break; 585 case REG_RXDCTL: 586 regs.rxdctl = val; 587 break; 588 case REG_TDBAL: 589 regs.tdba.tdbal( val & ~mask(4)); 590 txDescCache.areaChanged(); 591 break; 592 case REG_TDBAH: 593 regs.tdba.tdbah(val); 594 txDescCache.areaChanged(); 595 break; 596 case REG_TDLEN: 597 regs.tdlen = val & ~mask(7); 598 txDescCache.areaChanged(); 599 break; 600 case REG_TDH: 601 regs.tdh = val; 602 txDescCache.areaChanged(); 603 break; 604 case REG_TXDCA_CTL: 605 regs.txdca_ctl = val; 606 if (regs.txdca_ctl.enabled()) 607 panic("No support for DCA\n"); 608 break; 609 case REG_TDT: 610 regs.tdt = val; 611 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n"); 612 if (getState() == SimObject::Running) { 613 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n"); 614 txDescCache.fetchDescriptors(); 615 } else { 616 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n"); 617 } 618 break; 619 case REG_TIDV: 620 regs.tidv = val; 621 break; 622 case REG_TXDCTL: 623 regs.txdctl = val; 624 break; 625 case REG_TADV: 626 regs.tadv = val; 627 break; 628 case REG_TDWBAL: 629 regs.tdwba &= ~mask(32); 630 regs.tdwba |= val; 631 txDescCache.completionWriteback(regs.tdwba & ~mask(1), regs.tdwba & mask(1)); 632 break; 633 case REG_TDWBAH: 634 regs.tdwba &= mask(32); 635 regs.tdwba |= (uint64_t)val << 32; 636 txDescCache.completionWriteback(regs.tdwba & ~mask(1), regs.tdwba & mask(1)); 637 break; 638 case REG_RXCSUM: 639 regs.rxcsum = val; 640 break; 641 case REG_RLPML: 642 regs.rlpml = val; 643 break; 644 case REG_RFCTL: 645 regs.rfctl = val; 646 if (regs.rfctl.exsten()) 647 panic("Extended RX descriptors not implemented\n"); 648 break; 649 case REG_MANC: 650 regs.manc = val; 651 break; 652 case REG_SWSM: 653 regs.swsm = val; 654 if (regs.fwsm.eep_fw_semaphore()) 655 regs.swsm.swesmbi(0); 656 break; 657 case REG_SWFWSYNC: 658 regs.sw_fw_sync = val; 659 break; 660 default: 661 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) && 662 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) && 663 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4))) 664 panic("Write request to unknown register number: %#x\n", daddr); 665 }; 666 667 pkt->makeAtomicResponse(); 668 return pioDelay; 669} 670 671void 672IGbE::postInterrupt(IntTypes t, bool now) 673{ 674 assert(t); 675 676 // Interrupt is already pending 677 if (t & regs.icr() && !now) 678 return; 679 680 regs.icr = regs.icr() | t; 681 682 Tick itr_interval = Clock::Int::ns * 256 * regs.itr.interval(); 683 DPRINTF(EthernetIntr, "EINT: postInterrupt() curTick: %d itr: %d interval: %d\n", 684 curTick, regs.itr.interval(), itr_interval); 685 686 if (regs.itr.interval() == 0 || now || lastInterrupt + itr_interval <= curTick) { 687 if (interEvent.scheduled()) { 688 deschedule(interEvent); 689 } 690 cpuPostInt(); 691 } else { 692 Tick int_time = lastInterrupt + itr_interval; 693 assert(int_time > 0); 694 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n", 695 int_time); 696 if (!interEvent.scheduled()) { 697 schedule(interEvent, int_time); 698 } 699 } 700} 701 702void 703IGbE::delayIntEvent() 704{ 705 cpuPostInt(); 706} 707 708 709void 710IGbE::cpuPostInt() 711{ 712 713 postedInterrupts++; 714 715 if (!(regs.icr() & regs.imr)) { 716 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n"); 717 return; 718 } 719 720 DPRINTF(Ethernet, "Posting Interrupt\n"); 721 722 723 if (interEvent.scheduled()) { 724 deschedule(interEvent); 725 } 726 727 if (rdtrEvent.scheduled()) { 728 regs.icr.rxt0(1); 729 deschedule(rdtrEvent); 730 } 731 if (radvEvent.scheduled()) { 732 regs.icr.rxt0(1); 733 deschedule(radvEvent); 734 } 735 if (tadvEvent.scheduled()) { 736 regs.icr.txdw(1); 737 deschedule(tadvEvent); 738 } 739 if (tidvEvent.scheduled()) { 740 regs.icr.txdw(1); 741 deschedule(tidvEvent); 742 } 743 744 regs.icr.int_assert(1); 745 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n", 746 regs.icr()); 747 748 intrPost(); 749 750 lastInterrupt = curTick; 751} 752 753void 754IGbE::cpuClearInt() 755{ 756 if (regs.icr.int_assert()) { 757 regs.icr.int_assert(0); 758 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n", 759 regs.icr()); 760 intrClear(); 761 } 762} 763 764void 765IGbE::chkInterrupt() 766{ 767 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(), 768 regs.imr); 769 // Check if we need to clear the cpu interrupt 770 if (!(regs.icr() & regs.imr)) { 771 DPRINTF(Ethernet, "Mask cleaned all interrupts\n"); 772 if (interEvent.scheduled()) 773 deschedule(interEvent); 774 if (regs.icr.int_assert()) 775 cpuClearInt(); 776 } 777 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", regs.itr(), regs.itr.interval()); 778 779 if (regs.icr() & regs.imr) { 780 if (regs.itr.interval() == 0) { 781 cpuPostInt(); 782 } else { 783 DPRINTF(Ethernet, "Possibly scheduling interrupt because of imr write\n"); 784 if (!interEvent.scheduled()) { 785 DPRINTF(Ethernet, "Scheduling for %d\n", curTick + Clock::Int::ns 786 * 256 * regs.itr.interval()); 787 schedule(interEvent, 788 curTick + Clock::Int::ns * 256 * regs.itr.interval()); 789 } 790 } 791 } 792 793 794} 795 796 797IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s) 798 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this) 799 800{ 801} 802 803void 804IGbE::RxDescCache::writePacket(EthPacketPtr packet) 805{ 806 assert(unusedCache.size()); 807 //if (!unusedCache.size()) 808 // return false; 809 810 pktPtr = packet; 811 pktDone = false; 812 813 Addr buf; 814 RxDesc *desc = unusedCache.front(); 815 switch (igbe->regs.srrctl.desctype()) { 816 case RXDT_LEGACY: 817 buf = desc->legacy.buf; 818 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n", 819 packet->length, igbe->regs.rctl.descSize()); 820 assert(packet->length < igbe->regs.rctl.descSize()); 821 break; 822 case RXDT_ADV_ONEBUF: 823 int buf_len; 824 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() : 825 igbe->regs.rctl.descSize(); 826 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n", 827 packet->length, igbe->regs.srrctl(), buf_len); 828 assert(packet->length < buf_len); 829 buf = desc->adv_read.pkt; 830 break; 831 default: 832 panic("Unimplemnted RX receive buffer type: %d\n", 833 igbe->regs.srrctl.desctype()); 834 } 835 836 igbe->dmaWrite(igbe->platform->pciToDma(buf), packet->length, &pktEvent, 837 packet->data, igbe->rxWriteDelay); 838} 839 840void 841IGbE::RxDescCache::pktComplete() 842{ 843 assert(unusedCache.size()); 844 RxDesc *desc; 845 desc = unusedCache.front(); 846 847 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ; 848 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n", 849 pktPtr->length, crcfixup, 850 htole((uint16_t)(pktPtr->length + crcfixup)), 851 (uint16_t)(pktPtr->length + crcfixup)); 852 853 // no support for anything but starting at 0 854 assert(igbe->regs.rxcsum.pcss() == 0); 855 856 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n"); 857 858 uint16_t status = RXDS_DD | RXDS_EOP; 859 uint8_t err = 0; 860 uint16_t ext_err = 0; 861 uint16_t csum = 0; 862 uint16_t ptype = 0; 863 uint16_t ip_id = 0; 864 865 IpPtr ip(pktPtr); 866 867 if (ip) { 868 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id()); 869 ptype |= RXDP_IPV4; 870 ip_id = ip->id(); 871 872 if (igbe->regs.rxcsum.ipofld()) { 873 DPRINTF(EthernetDesc, "Checking IP checksum\n"); 874 status |= RXDS_IPCS; 875 csum = htole(cksum(ip)); 876 igbe->rxIpChecksums++; 877 if (cksum(ip) != 0) { 878 err |= RXDE_IPE; 879 ext_err |= RXDEE_IPE; 880 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 881 } 882 } 883 TcpPtr tcp(ip); 884 if (tcp && igbe->regs.rxcsum.tuofld()) { 885 DPRINTF(EthernetDesc, "Checking TCP checksum\n"); 886 status |= RXDS_TCPCS; 887 ptype |= RXDP_TCP; 888 csum = htole(cksum(tcp)); 889 igbe->rxTcpChecksums++; 890 if (cksum(tcp) != 0) { 891 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 892 err |= RXDE_TCPE; 893 ext_err |= RXDEE_TCPE; 894 } 895 } 896 897 UdpPtr udp(ip); 898 if (udp && igbe->regs.rxcsum.tuofld()) { 899 DPRINTF(EthernetDesc, "Checking UDP checksum\n"); 900 status |= RXDS_UDPCS; 901 ptype |= RXDP_UDP; 902 csum = htole(cksum(udp)); 903 igbe->rxUdpChecksums++; 904 if (cksum(udp) != 0) { 905 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 906 ext_err |= RXDEE_TCPE; 907 err |= RXDE_TCPE; 908 } 909 } 910 } else { // if ip 911 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 912 } 913 914 switch (igbe->regs.srrctl.desctype()) { 915 case RXDT_LEGACY: 916 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup)); 917 desc->legacy.status = htole(status); 918 desc->legacy.errors = htole(err); 919 // No vlan support at this point... just set it to 0 920 desc->legacy.vlan = 0; 921 break; 922 case RXDT_ADV_ONEBUF: 923 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length + crcfixup)); 924 desc->adv_wb.rss_type = htole(0); 925 desc->adv_wb.pkt_type = htole(ptype); 926 // no header splititng support yet 927 desc->adv_wb.header_len = htole(0); 928 desc->adv_wb.sph = htole(0); 929 if (igbe->regs.rxcsum.pcsd()) { 930 // no rss support right now 931 desc->adv_wb.rss_hash = htole(0); 932 } else { 933 desc->adv_wb.id = htole(ip_id); 934 desc->adv_wb.csum = htole(csum); 935 } 936 desc->adv_wb.status = htole(status); 937 desc->adv_wb.errors = htole(ext_err); 938 // no vlan support 939 desc->adv_wb.vlan_tag = htole(0); 940 break; 941 default: 942 panic("Unimplemnted RX receive buffer type %d\n", 943 igbe->regs.srrctl.desctype()); 944 } 945 946 947 // Deal with the rx timer interrupts 948 if (igbe->regs.rdtr.delay()) { 949 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", 950 igbe->regs.rdtr.delay() * igbe->intClock()); 951 igbe->reschedule(igbe->rdtrEvent, 952 curTick + igbe->regs.rdtr.delay() * igbe->intClock(), true); 953 } 954 955 if (igbe->regs.radv.idv()) { 956 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", 957 igbe->regs.radv.idv() * igbe->intClock()); 958 if (!igbe->radvEvent.scheduled()) { 959 igbe->schedule(igbe->radvEvent, 960 curTick + igbe->regs.radv.idv() * igbe->intClock()); 961 } 962 } 963 964 // if neither radv or rdtr, maybe itr is set... 965 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) { 966 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n"); 967 igbe->postInterrupt(IT_RXT); 968 } 969 970 // If the packet is small enough, interrupt appropriately 971 // I wonder if this is delayed or not?! 972 if (pktPtr->length <= igbe->regs.rsrpd.idv()) { 973 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n"); 974 igbe->postInterrupt(IT_SRPD); 975 } 976 977 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n"); 978 unusedCache.pop_front(); 979 usedCache.push_back(desc); 980 981 982 pktPtr = NULL; 983 enableSm(); 984 pktDone = true; 985 igbe->checkDrain(); 986 987} 988 989void 990IGbE::RxDescCache::enableSm() 991{ 992 if (!igbe->drainEvent) { 993 igbe->rxTick = true; 994 igbe->restartClock(); 995 } 996} 997 998bool 999IGbE::RxDescCache::packetDone() 1000{ 1001 if (pktDone) { 1002 pktDone = false; 1003 return true; 1004 } 1005 return false; 1006} 1007 1008bool 1009IGbE::RxDescCache::hasOutstandingEvents() 1010{ 1011 return pktEvent.scheduled() || wbEvent.scheduled() || 1012 fetchEvent.scheduled(); 1013} 1014 1015void 1016IGbE::RxDescCache::serialize(std::ostream &os) 1017{ 1018 DescCache<RxDesc>::serialize(os); 1019 SERIALIZE_SCALAR(pktDone); 1020} 1021 1022void 1023IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string §ion) 1024{ 1025 DescCache<RxDesc>::unserialize(cp, section); 1026 UNSERIALIZE_SCALAR(pktDone); 1027} 1028 1029 1030///////////////////////////////////// IGbE::TxDesc ///////////////////////////////// 1031 1032IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s) 1033 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false), 1034 useTso(false), pktEvent(this), headerEvent(this), nullEvent(this) 1035 1036{ 1037} 1038 1039void 1040IGbE::TxDescCache::processContextDesc() 1041{ 1042 assert(unusedCache.size()); 1043 TxDesc *desc; 1044 1045 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n"); 1046 1047 while (!useTso && unusedCache.size() && TxdOp::isContext(unusedCache.front())) { 1048 DPRINTF(EthernetDesc, "Got context descriptor type...\n"); 1049 1050 desc = unusedCache.front(); 1051 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n", 1052 desc->d1, desc->d2); 1053 1054 1055 // is this going to be a tcp or udp packet? 1056 isTcp = TxdOp::tcp(desc) ? true : false; 1057 1058 // setup all the TSO variables, they'll be ignored if we don't use 1059 // tso for this connection 1060 tsoHeaderLen = TxdOp::hdrlen(desc); 1061 tsoMss = TxdOp::mss(desc); 1062 1063 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) { 1064 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: %d mss: %d paylen %d\n", 1065 TxdOp::hdrlen(desc), TxdOp::mss(desc), TxdOp::getLen(desc)); 1066 useTso = true; 1067 tsoTotalLen = TxdOp::getLen(desc); 1068 tsoLoadedHeader = false; 1069 tsoDescBytesUsed = 0; 1070 tsoUsedLen = 0; 1071 tsoPrevSeq = 0; 1072 tsoPktHasHeader = false; 1073 tsoPkts = 0; 1074 1075 } 1076 1077 TxdOp::setDd(desc); 1078 unusedCache.pop_front(); 1079 usedCache.push_back(desc); 1080 } 1081 1082 if (!unusedCache.size()) 1083 return; 1084 1085 desc = unusedCache.front(); 1086 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) && TxdOp::tse(desc)) { 1087 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet hdrlen: %d mss: %d paylen %d\n", 1088 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc)); 1089 useTso = true; 1090 tsoTotalLen = TxdOp::getTsoLen(desc); 1091 tsoLoadedHeader = false; 1092 tsoDescBytesUsed = 0; 1093 tsoUsedLen = 0; 1094 tsoPrevSeq = 0; 1095 tsoPktHasHeader = false; 1096 tsoPkts = 0; 1097 } 1098 1099 if (useTso && !tsoLoadedHeader) { 1100 // we need to fetch a header 1101 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n"); 1102 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen); 1103 pktWaiting = true; 1104 assert(tsoHeaderLen <= 256); 1105 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)), 1106 tsoHeaderLen, &headerEvent, tsoHeader, 0); 1107 } 1108} 1109 1110void 1111IGbE::TxDescCache::headerComplete() 1112{ 1113 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n"); 1114 pktWaiting = false; 1115 1116 assert(unusedCache.size()); 1117 TxDesc *desc = unusedCache.front(); 1118 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n", 1119 TxdOp::getLen(desc), tsoHeaderLen); 1120 1121 if (TxdOp::getLen(desc) == tsoHeaderLen) { 1122 tsoDescBytesUsed = 0; 1123 tsoLoadedHeader = true; 1124 unusedCache.pop_front(); 1125 usedCache.push_back(desc); 1126 } else { 1127 // I don't think this case happens, I think the headrer is always 1128 // it's own packet, if it wasn't it might be as simple as just 1129 // incrementing descBytesUsed by the header length, but I'm not 1130 // completely sure 1131 panic("TSO header part of bigger packet, not implemented\n"); 1132 } 1133 enableSm(); 1134 igbe->checkDrain(); 1135} 1136 1137int 1138IGbE::TxDescCache::getPacketSize(EthPacketPtr p) 1139{ 1140 TxDesc *desc; 1141 1142 1143 if (!unusedCache.size()) 1144 return -1; 1145 1146 DPRINTF(EthernetDesc, "Starting processing of descriptor\n"); 1147 1148 assert(!useTso || tsoLoadedHeader); 1149 desc = unusedCache.front(); 1150 1151 1152 if (useTso) { 1153 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1154 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d used: %d loaded hdr: %d\n", 1155 useTso, tsoHeaderLen, tsoMss, tsoTotalLen, tsoUsedLen, tsoLoadedHeader); 1156 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d this descLen: %d\n", 1157 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc)); 1158 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader); 1159 1160 if (tsoPktHasHeader) 1161 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length, TxdOp::getLen(desc) - tsoDescBytesUsed); 1162 else 1163 tsoCopyBytes = std::min(tsoMss, TxdOp::getLen(desc) - tsoDescBytesUsed); 1164 Addr pkt_size = tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen); 1165 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size); 1166 return pkt_size; 1167 } 1168 1169 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n", 1170 TxdOp::getLen(unusedCache.front())); 1171 return TxdOp::getLen(desc); 1172} 1173 1174void 1175IGbE::TxDescCache::getPacketData(EthPacketPtr p) 1176{ 1177 assert(unusedCache.size()); 1178 1179 TxDesc *desc; 1180 desc = unusedCache.front(); 1181 1182 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1183 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); 1184 1185 pktPtr = p; 1186 1187 pktWaiting = true; 1188 1189 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length); 1190 1191 if (useTso) { 1192 assert(tsoLoadedHeader); 1193 if (!tsoPktHasHeader) { 1194 DPRINTF(EthernetDesc, "Loading TSO header (%d bytes) into start of packet\n", 1195 tsoHeaderLen); 1196 memcpy(p->data, &tsoHeader,tsoHeaderLen); 1197 p->length +=tsoHeaderLen; 1198 tsoPktHasHeader = true; 1199 } 1200 } 1201 1202 if (useTso) { 1203 tsoDescBytesUsed += tsoCopyBytes; 1204 assert(tsoDescBytesUsed <= TxdOp::getLen(desc)); 1205 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d length: %d\n", 1206 p->length, tsoCopyBytes); 1207 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)) + tsoDescBytesUsed, 1208 tsoCopyBytes, &pktEvent, p->data + p->length, igbe->txReadDelay); 1209 } else { 1210 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)), 1211 TxdOp::getLen(desc), &pktEvent, p->data + p->length, igbe->txReadDelay); 1212 } 1213} 1214 1215void 1216IGbE::TxDescCache::pktComplete() 1217{ 1218 1219 TxDesc *desc; 1220 assert(unusedCache.size()); 1221 assert(pktPtr); 1222 1223 DPRINTF(EthernetDesc, "DMA of packet complete\n"); 1224 1225 1226 desc = unusedCache.front(); 1227 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); 1228 1229 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1230 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d used: %d loaded hdr: %d\n", 1231 useTso, tsoHeaderLen, tsoMss, tsoTotalLen, tsoUsedLen, tsoLoadedHeader); 1232 1233 // Set the length of the data in the EtherPacket 1234 if (useTso) { 1235 pktPtr->length += tsoCopyBytes; 1236 tsoUsedLen += tsoCopyBytes; 1237 } else 1238 pktPtr->length += TxdOp::getLen(desc); 1239 1240 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n", 1241 tsoDescBytesUsed, tsoCopyBytes); 1242 1243 1244 if ((!TxdOp::eop(desc) && !useTso) || 1245 (pktPtr->length < ( tsoMss + tsoHeaderLen) && tsoTotalLen != tsoUsedLen)) { 1246 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc))); 1247 unusedCache.pop_front(); 1248 usedCache.push_back(desc); 1249 1250 tsoDescBytesUsed = 0; 1251 pktDone = true; 1252 pktWaiting = false; 1253 pktMultiDesc = true; 1254 1255 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n", 1256 pktPtr->length); 1257 pktPtr = NULL; 1258 1259 enableSm(); 1260 igbe->checkDrain(); 1261 return; 1262 } 1263 1264 1265 pktMultiDesc = false; 1266 // no support for vlans 1267 assert(!TxdOp::vle(desc)); 1268 1269 // we only support single packet descriptors at this point 1270 if (!useTso) 1271 assert(TxdOp::eop(desc)); 1272 1273 // set that this packet is done 1274 if (TxdOp::rs(desc)) 1275 TxdOp::setDd(desc); 1276 1277 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1278 1279 if (useTso) { 1280 IpPtr ip(pktPtr); 1281 if (ip) { 1282 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n", 1283 tsoPkts); 1284 ip->id(ip->id() + tsoPkts++); 1285 ip->len(pktPtr->length - EthPtr(pktPtr)->size()); 1286 1287 TcpPtr tcp(ip); 1288 if (tcp) { 1289 DPRINTF(EthernetDesc, "TSO: Modifying TCP header. old seq %d + %d\n", 1290 tcp->seq(), tsoPrevSeq); 1291 tcp->seq(tcp->seq() + tsoPrevSeq); 1292 if (tsoUsedLen != tsoTotalLen) 1293 tcp->flags(tcp->flags() & ~9); // clear fin & psh 1294 } 1295 UdpPtr udp(ip); 1296 if (udp) { 1297 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n"); 1298 udp->len(pktPtr->length - EthPtr(pktPtr)->size()); 1299 } 1300 } 1301 tsoPrevSeq = tsoUsedLen; 1302 } 1303 1304 if (DTRACE(EthernetDesc)) { 1305 IpPtr ip(pktPtr); 1306 if (ip) 1307 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", 1308 ip->id()); 1309 else 1310 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 1311 } 1312 1313 // Checksums are only ofloaded for new descriptor types 1314 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) { 1315 DPRINTF(EthernetDesc, "Calculating checksums for packet\n"); 1316 IpPtr ip(pktPtr); 1317 assert(ip); 1318 if (TxdOp::ixsm(desc)) { 1319 ip->sum(0); 1320 ip->sum(cksum(ip)); 1321 igbe->txIpChecksums++; 1322 DPRINTF(EthernetDesc, "Calculated IP checksum\n"); 1323 } 1324 if (TxdOp::txsm(desc)) { 1325 TcpPtr tcp(ip); 1326 UdpPtr udp(ip); 1327 if (tcp) { 1328 tcp->sum(0); 1329 tcp->sum(cksum(tcp)); 1330 igbe->txTcpChecksums++; 1331 DPRINTF(EthernetDesc, "Calculated TCP checksum\n"); 1332 } else if (udp) { 1333 assert(udp); 1334 udp->sum(0); 1335 udp->sum(cksum(udp)); 1336 igbe->txUdpChecksums++; 1337 DPRINTF(EthernetDesc, "Calculated UDP checksum\n"); 1338 } else { 1339 panic("Told to checksum, but don't know how\n"); 1340 } 1341 } 1342 } 1343 1344 if (TxdOp::ide(desc)) { 1345 // Deal with the rx timer interrupts 1346 DPRINTF(EthernetDesc, "Descriptor had IDE set\n"); 1347 if (igbe->regs.tidv.idv()) { 1348 DPRINTF(EthernetDesc, "setting tidv\n"); 1349 igbe->reschedule(igbe->tidvEvent, 1350 curTick + igbe->regs.tidv.idv() * igbe->intClock(), true); 1351 } 1352 1353 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) { 1354 DPRINTF(EthernetDesc, "setting tadv\n"); 1355 if (!igbe->tadvEvent.scheduled()) { 1356 igbe->schedule(igbe->tadvEvent, 1357 curTick + igbe->regs.tadv.idv() * igbe->intClock()); 1358 } 1359 } 1360 } 1361 1362 1363 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) { 1364 DPRINTF(EthernetDesc, "Descriptor Done\n"); 1365 unusedCache.pop_front(); 1366 usedCache.push_back(desc); 1367 tsoDescBytesUsed = 0; 1368 } 1369 1370 if (useTso && tsoUsedLen == tsoTotalLen) 1371 useTso = false; 1372 1373 1374 DPRINTF(EthernetDesc, "------Packet of %d bytes ready for transmission-------\n", 1375 pktPtr->length); 1376 pktDone = true; 1377 pktWaiting = false; 1378 pktPtr = NULL; 1379 tsoPktHasHeader = false; 1380 1381 if (igbe->regs.txdctl.wthresh() == 0) { 1382 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n"); 1383 writeback(0); 1384 } else if (igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() >= 1385 descInBlock(usedCache.size())) { 1386 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1387 writeback((igbe->cacheBlockSize()-1)>>4); 1388 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) { 1389 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1390 writeback((igbe->cacheBlockSize()-1)>>4); 1391 } 1392 1393 enableSm(); 1394 igbe->checkDrain(); 1395} 1396 1397void 1398IGbE::TxDescCache::actionAfterWb() 1399{ 1400 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n", 1401 completionEnabled); 1402 igbe->postInterrupt(iGbReg::IT_TXDW); 1403 if (completionEnabled) { 1404 descEnd = igbe->regs.tdh(); 1405 DPRINTF(EthernetDesc, "Completion writing back value: %d to addr: %#x\n", descEnd, 1406 completionAddress); 1407 igbe->dmaWrite(igbe->platform->pciToDma(mbits(completionAddress, 63, 2)), 1408 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0); 1409 } 1410} 1411 1412void 1413IGbE::TxDescCache::serialize(std::ostream &os) 1414{ 1415 DescCache<TxDesc>::serialize(os); 1416 SERIALIZE_SCALAR(pktDone); 1417 SERIALIZE_SCALAR(isTcp); 1418 SERIALIZE_SCALAR(pktWaiting); 1419 SERIALIZE_SCALAR(pktMultiDesc); 1420 1421 SERIALIZE_SCALAR(useTso); 1422 SERIALIZE_SCALAR(tsoHeaderLen); 1423 SERIALIZE_SCALAR(tsoMss); 1424 SERIALIZE_SCALAR(tsoTotalLen); 1425 SERIALIZE_SCALAR(tsoUsedLen); 1426 SERIALIZE_SCALAR(tsoPrevSeq);; 1427 SERIALIZE_SCALAR(tsoPktPayloadBytes); 1428 SERIALIZE_SCALAR(tsoLoadedHeader); 1429 SERIALIZE_SCALAR(tsoPktHasHeader); 1430 SERIALIZE_ARRAY(tsoHeader, 256); 1431 SERIALIZE_SCALAR(tsoDescBytesUsed); 1432 SERIALIZE_SCALAR(tsoCopyBytes); 1433 SERIALIZE_SCALAR(tsoPkts); 1434 1435 SERIALIZE_SCALAR(completionAddress); 1436 SERIALIZE_SCALAR(completionEnabled); 1437 SERIALIZE_SCALAR(descEnd); 1438} 1439 1440void 1441IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string §ion) 1442{ 1443 DescCache<TxDesc>::unserialize(cp, section); 1444 UNSERIALIZE_SCALAR(pktDone); 1445 UNSERIALIZE_SCALAR(isTcp); 1446 UNSERIALIZE_SCALAR(pktWaiting); 1447 UNSERIALIZE_SCALAR(pktMultiDesc); 1448 1449 UNSERIALIZE_SCALAR(useTso); 1450 UNSERIALIZE_SCALAR(tsoHeaderLen); 1451 UNSERIALIZE_SCALAR(tsoMss); 1452 UNSERIALIZE_SCALAR(tsoTotalLen); 1453 UNSERIALIZE_SCALAR(tsoUsedLen); 1454 UNSERIALIZE_SCALAR(tsoPrevSeq);; 1455 UNSERIALIZE_SCALAR(tsoPktPayloadBytes); 1456 UNSERIALIZE_SCALAR(tsoLoadedHeader); 1457 UNSERIALIZE_SCALAR(tsoPktHasHeader); 1458 UNSERIALIZE_ARRAY(tsoHeader, 256); 1459 UNSERIALIZE_SCALAR(tsoDescBytesUsed); 1460 UNSERIALIZE_SCALAR(tsoCopyBytes); 1461 UNSERIALIZE_SCALAR(tsoPkts); 1462 1463 UNSERIALIZE_SCALAR(completionAddress); 1464 UNSERIALIZE_SCALAR(completionEnabled); 1465 UNSERIALIZE_SCALAR(descEnd); 1466} 1467 1468bool 1469IGbE::TxDescCache::packetAvailable() 1470{ 1471 if (pktDone) { 1472 pktDone = false; 1473 return true; 1474 } 1475 return false; 1476} 1477 1478void 1479IGbE::TxDescCache::enableSm() 1480{ 1481 if (!igbe->drainEvent) { 1482 igbe->txTick = true; 1483 igbe->restartClock(); 1484 } 1485} 1486 1487bool 1488IGbE::TxDescCache::hasOutstandingEvents() 1489{ 1490 return pktEvent.scheduled() || wbEvent.scheduled() || 1491 fetchEvent.scheduled(); 1492} 1493 1494 1495///////////////////////////////////// IGbE ///////////////////////////////// 1496 1497void 1498IGbE::restartClock() 1499{ 1500 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && 1501 getState() == SimObject::Running) 1502 schedule(tickEvent, (curTick / ticks(1)) * ticks(1) + ticks(1)); 1503} 1504 1505unsigned int 1506IGbE::drain(Event *de) 1507{ 1508 unsigned int count; 1509 count = pioPort->drain(de) + dmaPort->drain(de); 1510 if (rxDescCache.hasOutstandingEvents() || 1511 txDescCache.hasOutstandingEvents()) { 1512 count++; 1513 drainEvent = de; 1514 } 1515 1516 txFifoTick = false; 1517 txTick = false; 1518 rxTick = false; 1519 1520 if (tickEvent.scheduled()) 1521 deschedule(tickEvent); 1522 1523 if (count) 1524 changeState(Draining); 1525 else 1526 changeState(Drained); 1527 1528 return count; 1529} 1530 1531void 1532IGbE::resume() 1533{ 1534 SimObject::resume(); 1535 1536 txFifoTick = true; 1537 txTick = true; 1538 rxTick = true; 1539 1540 restartClock(); 1541} 1542 1543void 1544IGbE::checkDrain() 1545{ 1546 if (!drainEvent) 1547 return; 1548 1549 txFifoTick = false; 1550 txTick = false; 1551 rxTick = false; 1552 if (!rxDescCache.hasOutstandingEvents() && 1553 !txDescCache.hasOutstandingEvents()) { 1554 drainEvent->process(); 1555 drainEvent = NULL; 1556 } 1557} 1558 1559void 1560IGbE::txStateMachine() 1561{ 1562 if (!regs.tctl.en()) { 1563 txTick = false; 1564 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n"); 1565 return; 1566 } 1567 1568 // If we have a packet available and it's length is not 0 (meaning it's not 1569 // a multidescriptor packet) put it in the fifo, otherwise an the next 1570 // iteration we'll get the rest of the data 1571 if (txPacket && txDescCache.packetAvailable() 1572 && !txDescCache.packetMultiDesc() && txPacket->length) { 1573 bool success; 1574 1575 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n"); 1576 success = txFifo.push(txPacket); 1577 txFifoTick = true && !drainEvent; 1578 assert(success); 1579 txPacket = NULL; 1580 txDescCache.writeback((cacheBlockSize()-1)>>4); 1581 return; 1582 } 1583 1584 // Only support descriptor granularity 1585 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) { 1586 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n"); 1587 postInterrupt(IT_TXDLOW); 1588 } 1589 1590 if (!txPacket) { 1591 txPacket = new EthPacketData(16384); 1592 } 1593 1594 if (!txDescCache.packetWaiting()) { 1595 if (txDescCache.descLeft() == 0) { 1596 postInterrupt(IT_TXQE); 1597 txDescCache.writeback(0); 1598 txDescCache.fetchDescriptors(); 1599 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing " 1600 "writeback stopping ticking and posting TXQE\n"); 1601 txTick = false; 1602 return; 1603 } 1604 1605 1606 if (!(txDescCache.descUnused())) { 1607 txDescCache.fetchDescriptors(); 1608 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n"); 1609 txTick = false; 1610 return; 1611 } 1612 1613 1614 txDescCache.processContextDesc(); 1615 if (txDescCache.packetWaiting()) { 1616 DPRINTF(EthernetSM, "TXS: Fetching TSO header, stopping ticking\n"); 1617 txTick = false; 1618 return; 1619 } 1620 1621 int size; 1622 size = txDescCache.getPacketSize(txPacket); 1623 if (size > 0 && txFifo.avail() > size) { 1624 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining " 1625 "DMA of next packet\n", size); 1626 txFifo.reserve(size); 1627 txDescCache.getPacketData(txPacket); 1628 } else if (size <= 0) { 1629 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size); 1630 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n"); 1631 txDescCache.writeback(0); 1632 } else { 1633 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space " 1634 "available in FIFO\n"); 1635 txTick = false; 1636 } 1637 1638 1639 return; 1640 } 1641 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n"); 1642 txTick = false; 1643} 1644 1645bool 1646IGbE::ethRxPkt(EthPacketPtr pkt) 1647{ 1648 rxBytes += pkt->length; 1649 rxPackets++; 1650 1651 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n"); 1652 1653 if (!regs.rctl.en()) { 1654 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n"); 1655 return true; 1656 } 1657 1658 // restart the state machines if they are stopped 1659 rxTick = true && !drainEvent; 1660 if ((rxTick || txTick) && !tickEvent.scheduled()) { 1661 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n"); 1662 restartClock(); 1663 } 1664 1665 if (!rxFifo.push(pkt)) { 1666 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n"); 1667 postInterrupt(IT_RXO, true); 1668 return false; 1669 } 1670 1671 return true; 1672} 1673 1674 1675void 1676IGbE::rxStateMachine() 1677{ 1678 if (!regs.rctl.en()) { 1679 rxTick = false; 1680 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n"); 1681 return; 1682 } 1683 1684 // If the packet is done check for interrupts/descriptors/etc 1685 if (rxDescCache.packetDone()) { 1686 rxDmaPacket = false; 1687 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n"); 1688 int descLeft = rxDescCache.descLeft(); 1689 switch (regs.rctl.rdmts()) { 1690 case 2: if (descLeft > .125 * regs.rdlen()) break; 1691 case 1: if (descLeft > .250 * regs.rdlen()) break; 1692 case 0: if (descLeft > .500 * regs.rdlen()) break; 1693 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n"); 1694 postInterrupt(IT_RXDMT); 1695 break; 1696 } 1697 1698 if (descLeft == 0) { 1699 rxDescCache.writeback(0); 1700 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing" 1701 " writeback and stopping ticking\n"); 1702 rxTick = false; 1703 } 1704 1705 // only support descriptor granulaties 1706 assert(regs.rxdctl.gran()); 1707 1708 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) { 1709 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n"); 1710 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4)) 1711 rxDescCache.writeback(regs.rxdctl.wthresh()-1); 1712 else 1713 rxDescCache.writeback((cacheBlockSize()-1)>>4); 1714 } 1715 1716 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) && 1717 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) { 1718 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n"); 1719 rxDescCache.fetchDescriptors(); 1720 } 1721 1722 if (rxDescCache.descUnused() == 0) { 1723 rxDescCache.fetchDescriptors(); 1724 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, " 1725 "fetching descriptors and stopping ticking\n"); 1726 rxTick = false; 1727 } 1728 return; 1729 } 1730 1731 if (rxDmaPacket) { 1732 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 1733 rxTick = false; 1734 return; 1735 } 1736 1737 if (!rxDescCache.descUnused()) { 1738 rxDescCache.fetchDescriptors(); 1739 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n"); 1740 rxTick = false; 1741 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n"); 1742 return; 1743 } 1744 1745 if (rxFifo.empty()) { 1746 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n"); 1747 rxTick = false; 1748 return; 1749 } 1750 1751 EthPacketPtr pkt; 1752 pkt = rxFifo.front(); 1753 1754 1755 rxDescCache.writePacket(pkt); 1756 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n"); 1757 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n"); 1758 rxFifo.pop(); 1759 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 1760 rxTick = false; 1761 rxDmaPacket = true; 1762} 1763 1764void 1765IGbE::txWire() 1766{ 1767 if (txFifo.empty()) { 1768 txFifoTick = false; 1769 return; 1770 } 1771 1772 1773 if (etherInt->sendPacket(txFifo.front())) { 1774 if (DTRACE(EthernetSM)) { 1775 IpPtr ip(txFifo.front()); 1776 if (ip) 1777 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n", 1778 ip->id()); 1779 else 1780 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n"); 1781 } 1782 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n", 1783 txFifo.avail()); 1784 1785 txBytes += txFifo.front()->length; 1786 txPackets++; 1787 txFifoTick = false; 1788 1789 txFifo.pop(); 1790 } else { 1791 // We'll get woken up when the packet ethTxDone() gets called 1792 txFifoTick = false; 1793 } 1794} 1795 1796void 1797IGbE::tick() 1798{ 1799 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n"); 1800 1801 if (rxTick) 1802 rxStateMachine(); 1803 1804 if (txTick) 1805 txStateMachine(); 1806 1807 if (txFifoTick) 1808 txWire(); 1809 1810 1811 if (rxTick || txTick || txFifoTick) 1812 schedule(tickEvent, curTick + ticks(1)); 1813} 1814 1815void 1816IGbE::ethTxDone() 1817{ 1818 // restart the tx state machines if they are stopped 1819 // fifo to send another packet 1820 // tx sm to put more data into the fifo 1821 txFifoTick = true && !drainEvent; 1822 if (txDescCache.descLeft() != 0 && !drainEvent) 1823 txTick = true; 1824 1825 restartClock(); 1826 txWire(); 1827 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n"); 1828} 1829 1830void 1831IGbE::serialize(std::ostream &os) 1832{ 1833 PciDev::serialize(os); 1834 1835 regs.serialize(os); 1836 SERIALIZE_SCALAR(eeOpBits); 1837 SERIALIZE_SCALAR(eeAddrBits); 1838 SERIALIZE_SCALAR(eeDataBits); 1839 SERIALIZE_SCALAR(eeOpcode); 1840 SERIALIZE_SCALAR(eeAddr); 1841 SERIALIZE_SCALAR(lastInterrupt); 1842 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 1843 1844 rxFifo.serialize("rxfifo", os); 1845 txFifo.serialize("txfifo", os); 1846 1847 bool txPktExists = txPacket; 1848 SERIALIZE_SCALAR(txPktExists); 1849 if (txPktExists) 1850 txPacket->serialize("txpacket", os); 1851 1852 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0, 1853 inter_time = 0; 1854 1855 if (rdtrEvent.scheduled()) 1856 rdtr_time = rdtrEvent.when(); 1857 SERIALIZE_SCALAR(rdtr_time); 1858 1859 if (radvEvent.scheduled()) 1860 radv_time = radvEvent.when(); 1861 SERIALIZE_SCALAR(radv_time); 1862 1863 if (tidvEvent.scheduled()) 1864 tidv_time = tidvEvent.when(); 1865 SERIALIZE_SCALAR(tidv_time); 1866 1867 if (tadvEvent.scheduled()) 1868 tadv_time = tadvEvent.when(); 1869 SERIALIZE_SCALAR(tadv_time); 1870 1871 if (interEvent.scheduled()) 1872 inter_time = interEvent.when(); 1873 SERIALIZE_SCALAR(inter_time); 1874 1875 nameOut(os, csprintf("%s.TxDescCache", name())); 1876 txDescCache.serialize(os); 1877 1878 nameOut(os, csprintf("%s.RxDescCache", name())); 1879 rxDescCache.serialize(os); 1880} 1881 1882void 1883IGbE::unserialize(Checkpoint *cp, const std::string §ion) 1884{ 1885 PciDev::unserialize(cp, section); 1886 1887 regs.unserialize(cp, section); 1888 UNSERIALIZE_SCALAR(eeOpBits); 1889 UNSERIALIZE_SCALAR(eeAddrBits); 1890 UNSERIALIZE_SCALAR(eeDataBits); 1891 UNSERIALIZE_SCALAR(eeOpcode); 1892 UNSERIALIZE_SCALAR(eeAddr); 1893 UNSERIALIZE_SCALAR(lastInterrupt); 1894 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 1895 1896 rxFifo.unserialize("rxfifo", cp, section); 1897 txFifo.unserialize("txfifo", cp, section); 1898 1899 bool txPktExists; 1900 UNSERIALIZE_SCALAR(txPktExists); 1901 if (txPktExists) { 1902 txPacket = new EthPacketData(16384); 1903 txPacket->unserialize("txpacket", cp, section); 1904 } 1905 1906 rxTick = true; 1907 txTick = true; 1908 txFifoTick = true; 1909 1910 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time; 1911 UNSERIALIZE_SCALAR(rdtr_time); 1912 UNSERIALIZE_SCALAR(radv_time); 1913 UNSERIALIZE_SCALAR(tidv_time); 1914 UNSERIALIZE_SCALAR(tadv_time); 1915 UNSERIALIZE_SCALAR(inter_time); 1916 1917 if (rdtr_time) 1918 schedule(rdtrEvent, rdtr_time); 1919 1920 if (radv_time) 1921 schedule(radvEvent, radv_time); 1922 1923 if (tidv_time) 1924 schedule(tidvEvent, tidv_time); 1925 1926 if (tadv_time) 1927 schedule(tadvEvent, tadv_time); 1928 1929 if (inter_time) 1930 schedule(interEvent, inter_time); 1931 1932 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section)); 1933 1934 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section)); 1935} 1936 1937IGbE * 1938IGbEParams::create() 1939{ 1940 return new IGbE(this); 1941} 1942