i8254xGBe.cc revision 5538
1/* 2 * Copyright (c) 2006 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Ali Saidi 29 */ 30 31/* @file 32 * Device model for Intel's 8254x line of gigabit ethernet controllers. 33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the 34 * fewest workarounds in the driver. It will probably work with most of the 35 * other MACs with slight modifications. 36 */ 37 38 39/* 40 * @todo really there are multiple dma engines.. we should implement them. 41 */ 42 43#include <algorithm> 44 45#include "base/inet.hh" 46#include "base/trace.hh" 47#include "dev/i8254xGBe.hh" 48#include "mem/packet.hh" 49#include "mem/packet_access.hh" 50#include "params/IGbE.hh" 51#include "sim/stats.hh" 52#include "sim/system.hh" 53 54using namespace iGbReg; 55using namespace Net; 56 57IGbE::IGbE(const Params *p) 58 : EtherDevice(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control), 59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false), 60 txTick(false), txFifoTick(false), rxDmaPacket(false), 61 fetchDelay(p->fetch_delay), wbDelay(p->wb_delay), 62 fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay), 63 rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay), 64 rdtrEvent(this), radvEvent(this), 65 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this), 66 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size), 67 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), 68 clock(p->clock), lastInterrupt(0) 69{ 70 etherInt = new IGbEInt(name() + ".int", this); 71 72 // Initialized internal registers per Intel documentation 73 // All registers intialized to 0 by per register constructor 74 regs.ctrl.fd(1); 75 regs.ctrl.lrst(1); 76 regs.ctrl.speed(2); 77 regs.ctrl.frcspd(1); 78 regs.sts.speed(3); // Say we're 1000Mbps 79 regs.sts.fd(1); // full duplex 80 regs.sts.lu(1); // link up 81 regs.eecd.fwe(1); 82 regs.eecd.ee_type(1); 83 regs.imr = 0; 84 regs.iam = 0; 85 regs.rxdctl.gran(1); 86 regs.rxdctl.wthresh(1); 87 regs.fcrth(1); 88 89 regs.pba.rxa(0x30); 90 regs.pba.txa(0x10); 91 92 eeOpBits = 0; 93 eeAddrBits = 0; 94 eeDataBits = 0; 95 eeOpcode = 0; 96 97 // clear all 64 16 bit words of the eeprom 98 memset(&flash, 0, EEPROM_SIZE*2); 99 100 // Set the MAC address 101 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN); 102 for (int x = 0; x < ETH_ADDR_LEN/2; x++) 103 flash[x] = htobe(flash[x]); 104 105 uint16_t csum = 0; 106 for (int x = 0; x < EEPROM_SIZE; x++) 107 csum += htobe(flash[x]); 108 109 110 // Magic happy checksum value 111 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum)); 112 113 rxFifo.clear(); 114 txFifo.clear(); 115} 116 117EtherInt* 118IGbE::getEthPort(const std::string &if_name, int idx) 119{ 120 121 if (if_name == "interface") { 122 if (etherInt->getPeer()) 123 panic("Port already connected to\n"); 124 return etherInt; 125 } 126 return NULL; 127} 128 129Tick 130IGbE::writeConfig(PacketPtr pkt) 131{ 132 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 133 if (offset < PCI_DEVICE_SPECIFIC) 134 PciDev::writeConfig(pkt); 135 else 136 panic("Device specific PCI config space not implemented.\n"); 137 138 /// 139 /// Some work may need to be done here based for the pci COMMAND bits. 140 /// 141 142 return pioDelay; 143} 144 145Tick 146IGbE::read(PacketPtr pkt) 147{ 148 int bar; 149 Addr daddr; 150 151 if (!getBAR(pkt->getAddr(), bar, daddr)) 152 panic("Invalid PCI memory access to unmapped memory.\n"); 153 154 // Only Memory register BAR is allowed 155 assert(bar == 0); 156 157 // Only 32bit accesses allowed 158 assert(pkt->getSize() == 4); 159 160 DPRINTF(Ethernet, "Read device register %#X\n", daddr); 161 162 pkt->allocate(); 163 164 /// 165 /// Handle read of register here 166 /// 167 168 169 switch (daddr) { 170 case REG_CTRL: 171 pkt->set<uint32_t>(regs.ctrl()); 172 break; 173 case REG_STATUS: 174 pkt->set<uint32_t>(regs.sts()); 175 break; 176 case REG_EECD: 177 pkt->set<uint32_t>(regs.eecd()); 178 break; 179 case REG_EERD: 180 pkt->set<uint32_t>(regs.eerd()); 181 break; 182 case REG_CTRL_EXT: 183 pkt->set<uint32_t>(regs.ctrl_ext()); 184 break; 185 case REG_MDIC: 186 pkt->set<uint32_t>(regs.mdic()); 187 break; 188 case REG_ICR: 189 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(), 190 regs.imr, regs.iam, regs.ctrl_ext.iame()); 191 pkt->set<uint32_t>(regs.icr()); 192 if (regs.icr.int_assert() || regs.imr == 0) { 193 regs.icr = regs.icr() & ~mask(30); 194 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr()); 195 } 196 if (regs.ctrl_ext.iame() && regs.icr.int_assert()) 197 regs.imr &= ~regs.iam; 198 chkInterrupt(); 199 break; 200 case REG_ITR: 201 pkt->set<uint32_t>(regs.itr()); 202 break; 203 case REG_RCTL: 204 pkt->set<uint32_t>(regs.rctl()); 205 break; 206 case REG_FCTTV: 207 pkt->set<uint32_t>(regs.fcttv()); 208 break; 209 case REG_TCTL: 210 pkt->set<uint32_t>(regs.tctl()); 211 break; 212 case REG_PBA: 213 pkt->set<uint32_t>(regs.pba()); 214 break; 215 case REG_WUC: 216 case REG_LEDCTL: 217 pkt->set<uint32_t>(0); // We don't care, so just return 0 218 break; 219 case REG_FCRTL: 220 pkt->set<uint32_t>(regs.fcrtl()); 221 break; 222 case REG_FCRTH: 223 pkt->set<uint32_t>(regs.fcrth()); 224 break; 225 case REG_RDBAL: 226 pkt->set<uint32_t>(regs.rdba.rdbal()); 227 break; 228 case REG_RDBAH: 229 pkt->set<uint32_t>(regs.rdba.rdbah()); 230 break; 231 case REG_RDLEN: 232 pkt->set<uint32_t>(regs.rdlen()); 233 break; 234 case REG_RDH: 235 pkt->set<uint32_t>(regs.rdh()); 236 break; 237 case REG_RDT: 238 pkt->set<uint32_t>(regs.rdt()); 239 break; 240 case REG_RDTR: 241 pkt->set<uint32_t>(regs.rdtr()); 242 if (regs.rdtr.fpd()) { 243 rxDescCache.writeback(0); 244 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n"); 245 postInterrupt(IT_RXT); 246 regs.rdtr.fpd(0); 247 } 248 break; 249 case REG_RADV: 250 pkt->set<uint32_t>(regs.radv()); 251 break; 252 case REG_TDBAL: 253 pkt->set<uint32_t>(regs.tdba.tdbal()); 254 break; 255 case REG_TDBAH: 256 pkt->set<uint32_t>(regs.tdba.tdbah()); 257 break; 258 case REG_TDLEN: 259 pkt->set<uint32_t>(regs.tdlen()); 260 break; 261 case REG_TDH: 262 pkt->set<uint32_t>(regs.tdh()); 263 break; 264 case REG_TDT: 265 pkt->set<uint32_t>(regs.tdt()); 266 break; 267 case REG_TIDV: 268 pkt->set<uint32_t>(regs.tidv()); 269 break; 270 case REG_TXDCTL: 271 pkt->set<uint32_t>(regs.txdctl()); 272 break; 273 case REG_TADV: 274 pkt->set<uint32_t>(regs.tadv()); 275 break; 276 case REG_RXCSUM: 277 pkt->set<uint32_t>(regs.rxcsum()); 278 break; 279 case REG_MANC: 280 pkt->set<uint32_t>(regs.manc()); 281 break; 282 default: 283 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) && 284 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) && 285 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) && 286 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE))) 287 panic("Read request to unknown register number: %#x\n", daddr); 288 else 289 pkt->set<uint32_t>(0); 290 }; 291 292 pkt->makeAtomicResponse(); 293 return pioDelay; 294} 295 296Tick 297IGbE::write(PacketPtr pkt) 298{ 299 int bar; 300 Addr daddr; 301 302 303 if (!getBAR(pkt->getAddr(), bar, daddr)) 304 panic("Invalid PCI memory access to unmapped memory.\n"); 305 306 // Only Memory register BAR is allowed 307 assert(bar == 0); 308 309 // Only 32bit accesses allowed 310 assert(pkt->getSize() == sizeof(uint32_t)); 311 312 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>()); 313 314 /// 315 /// Handle write of register here 316 /// 317 uint32_t val = pkt->get<uint32_t>(); 318 319 Regs::RCTL oldrctl; 320 Regs::TCTL oldtctl; 321 322 switch (daddr) { 323 case REG_CTRL: 324 regs.ctrl = val; 325 if (regs.ctrl.tfce()) 326 warn("TX Flow control enabled, should implement\n"); 327 if (regs.ctrl.rfce()) 328 warn("RX Flow control enabled, should implement\n"); 329 break; 330 case REG_CTRL_EXT: 331 regs.ctrl_ext = val; 332 break; 333 case REG_STATUS: 334 regs.sts = val; 335 break; 336 case REG_EECD: 337 int oldClk; 338 oldClk = regs.eecd.sk(); 339 regs.eecd = val; 340 // See if this is a eeprom access and emulate accordingly 341 if (!oldClk && regs.eecd.sk()) { 342 if (eeOpBits < 8) { 343 eeOpcode = eeOpcode << 1 | regs.eecd.din(); 344 eeOpBits++; 345 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 346 eeAddr = eeAddr << 1 | regs.eecd.din(); 347 eeAddrBits++; 348 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 349 assert(eeAddr>>1 < EEPROM_SIZE); 350 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n", 351 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]); 352 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1); 353 eeDataBits++; 354 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) { 355 regs.eecd.dout(0); 356 eeDataBits++; 357 } else 358 panic("What's going on with eeprom interface? opcode:" 359 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode, 360 (uint32_t)eeOpBits, (uint32_t)eeAddr, 361 (uint32_t)eeAddrBits, (uint32_t)eeDataBits); 362 363 // Reset everything for the next command 364 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) || 365 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) { 366 eeOpBits = 0; 367 eeAddrBits = 0; 368 eeDataBits = 0; 369 eeOpcode = 0; 370 eeAddr = 0; 371 } 372 373 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n", 374 (uint32_t)eeOpcode, (uint32_t) eeOpBits, 375 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits); 376 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI || 377 eeOpcode == EEPROM_RDSR_OPCODE_SPI )) 378 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode, 379 (uint32_t)eeOpBits); 380 381 382 } 383 // If driver requests eeprom access, immediately give it to it 384 regs.eecd.ee_gnt(regs.eecd.ee_req()); 385 break; 386 case REG_EERD: 387 regs.eerd = val; 388 break; 389 case REG_MDIC: 390 regs.mdic = val; 391 if (regs.mdic.i()) 392 panic("No support for interrupt on mdic complete\n"); 393 if (regs.mdic.phyadd() != 1) 394 panic("No support for reading anything but phy\n"); 395 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing" 396 : "Reading", regs.mdic.regadd()); 397 switch (regs.mdic.regadd()) { 398 case PHY_PSTATUS: 399 regs.mdic.data(0x796D); // link up 400 break; 401 case PHY_PID: 402 regs.mdic.data(0x02A8); 403 break; 404 case PHY_EPID: 405 regs.mdic.data(0x0380); 406 break; 407 case PHY_GSTATUS: 408 regs.mdic.data(0x7C00); 409 break; 410 case PHY_EPSTATUS: 411 regs.mdic.data(0x3000); 412 break; 413 case PHY_AGC: 414 regs.mdic.data(0x180); // some random length 415 break; 416 default: 417 regs.mdic.data(0); 418 } 419 regs.mdic.r(1); 420 break; 421 case REG_ICR: 422 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(), 423 regs.imr, regs.iam, regs.ctrl_ext.iame()); 424 if (regs.ctrl_ext.iame()) 425 regs.imr &= ~regs.iam; 426 regs.icr = ~bits(val,30,0) & regs.icr(); 427 chkInterrupt(); 428 break; 429 case REG_ITR: 430 regs.itr = val; 431 break; 432 case REG_ICS: 433 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n"); 434 postInterrupt((IntTypes)val); 435 break; 436 case REG_IMS: 437 regs.imr |= val; 438 chkInterrupt(); 439 break; 440 case REG_IMC: 441 regs.imr &= ~val; 442 chkInterrupt(); 443 break; 444 case REG_IAM: 445 regs.iam = val; 446 break; 447 case REG_RCTL: 448 oldrctl = regs.rctl; 449 regs.rctl = val; 450 if (regs.rctl.rst()) { 451 rxDescCache.reset(); 452 DPRINTF(EthernetSM, "RXS: Got RESET!\n"); 453 rxFifo.clear(); 454 regs.rctl.rst(0); 455 } 456 if (regs.rctl.en()) 457 rxTick = true; 458 restartClock(); 459 break; 460 case REG_FCTTV: 461 regs.fcttv = val; 462 break; 463 case REG_TCTL: 464 regs.tctl = val; 465 oldtctl = regs.tctl; 466 regs.tctl = val; 467 if (regs.tctl.en()) 468 txTick = true; 469 restartClock(); 470 if (regs.tctl.en() && !oldtctl.en()) { 471 txDescCache.reset(); 472 } 473 break; 474 case REG_PBA: 475 regs.pba.rxa(val); 476 regs.pba.txa(64 - regs.pba.rxa()); 477 break; 478 case REG_WUC: 479 case REG_LEDCTL: 480 case REG_FCAL: 481 case REG_FCAH: 482 case REG_FCT: 483 case REG_VET: 484 case REG_AIFS: 485 case REG_TIPG: 486 ; // We don't care, so don't store anything 487 break; 488 case REG_FCRTL: 489 regs.fcrtl = val; 490 break; 491 case REG_FCRTH: 492 regs.fcrth = val; 493 break; 494 case REG_RDBAL: 495 regs.rdba.rdbal( val & ~mask(4)); 496 rxDescCache.areaChanged(); 497 break; 498 case REG_RDBAH: 499 regs.rdba.rdbah(val); 500 rxDescCache.areaChanged(); 501 break; 502 case REG_RDLEN: 503 regs.rdlen = val & ~mask(7); 504 rxDescCache.areaChanged(); 505 break; 506 case REG_RDH: 507 regs.rdh = val; 508 rxDescCache.areaChanged(); 509 break; 510 case REG_RDT: 511 regs.rdt = val; 512 DPRINTF(EthernetSM, "RXS: RDT Updated.\n"); 513 if (getState() == SimObject::Running) { 514 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n"); 515 rxDescCache.fetchDescriptors(); 516 } else { 517 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n"); 518 } 519 break; 520 case REG_RDTR: 521 regs.rdtr = val; 522 break; 523 case REG_RADV: 524 regs.radv = val; 525 break; 526 case REG_TDBAL: 527 regs.tdba.tdbal( val & ~mask(4)); 528 txDescCache.areaChanged(); 529 break; 530 case REG_TDBAH: 531 regs.tdba.tdbah(val); 532 txDescCache.areaChanged(); 533 break; 534 case REG_TDLEN: 535 regs.tdlen = val & ~mask(7); 536 txDescCache.areaChanged(); 537 break; 538 case REG_TDH: 539 regs.tdh = val; 540 txDescCache.areaChanged(); 541 break; 542 case REG_TDT: 543 regs.tdt = val; 544 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n"); 545 if (getState() == SimObject::Running) { 546 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n"); 547 txDescCache.fetchDescriptors(); 548 } else { 549 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n"); 550 } 551 break; 552 case REG_TIDV: 553 regs.tidv = val; 554 break; 555 case REG_TXDCTL: 556 regs.txdctl = val; 557 break; 558 case REG_TADV: 559 regs.tadv = val; 560 break; 561 case REG_RXCSUM: 562 regs.rxcsum = val; 563 break; 564 case REG_MANC: 565 regs.manc = val; 566 break; 567 default: 568 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) && 569 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) && 570 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4))) 571 panic("Write request to unknown register number: %#x\n", daddr); 572 }; 573 574 pkt->makeAtomicResponse(); 575 return pioDelay; 576} 577 578void 579IGbE::postInterrupt(IntTypes t, bool now) 580{ 581 assert(t); 582 583 // Interrupt is already pending 584 if (t & regs.icr() && !now) 585 return; 586 587 regs.icr = regs.icr() | t; 588 589 Tick itr_interval = Clock::Int::ns * 256 * regs.itr.interval(); 590 DPRINTF(EthernetIntr, "EINT: postInterrupt() curTick: %d itr: %d interval: %d\n", 591 curTick, regs.itr.interval(), itr_interval); 592 593 if (regs.itr.interval() == 0 || now || lastInterrupt + itr_interval <= curTick) { 594 if (interEvent.scheduled()) { 595 interEvent.deschedule(); 596 } 597 cpuPostInt(); 598 } else { 599 Tick int_time = lastInterrupt + itr_interval; 600 assert(int_time > 0); 601 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n", 602 int_time); 603 if (!interEvent.scheduled()) { 604 interEvent.schedule(int_time); 605 } 606 } 607} 608 609void 610IGbE::delayIntEvent() 611{ 612 cpuPostInt(); 613} 614 615 616void 617IGbE::cpuPostInt() 618{ 619 620 postedInterrupts++; 621 622 if (!(regs.icr() & regs.imr)) { 623 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n"); 624 return; 625 } 626 627 DPRINTF(Ethernet, "Posting Interrupt\n"); 628 629 630 if (interEvent.scheduled()) { 631 interEvent.deschedule(); 632 } 633 634 if (rdtrEvent.scheduled()) { 635 regs.icr.rxt0(1); 636 rdtrEvent.deschedule(); 637 } 638 if (radvEvent.scheduled()) { 639 regs.icr.rxt0(1); 640 radvEvent.deschedule(); 641 } 642 if (tadvEvent.scheduled()) { 643 regs.icr.txdw(1); 644 tadvEvent.deschedule(); 645 } 646 if (tidvEvent.scheduled()) { 647 regs.icr.txdw(1); 648 tidvEvent.deschedule(); 649 } 650 651 regs.icr.int_assert(1); 652 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n", 653 regs.icr()); 654 655 intrPost(); 656 657 lastInterrupt = curTick; 658} 659 660void 661IGbE::cpuClearInt() 662{ 663 if (regs.icr.int_assert()) { 664 regs.icr.int_assert(0); 665 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n", 666 regs.icr()); 667 intrClear(); 668 } 669} 670 671void 672IGbE::chkInterrupt() 673{ 674 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(), 675 regs.imr); 676 // Check if we need to clear the cpu interrupt 677 if (!(regs.icr() & regs.imr)) { 678 DPRINTF(Ethernet, "Mask cleaned all interrupts\n"); 679 if (interEvent.scheduled()) 680 interEvent.deschedule(); 681 if (regs.icr.int_assert()) 682 cpuClearInt(); 683 } 684 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", regs.itr(), regs.itr.interval()); 685 686 if (regs.icr() & regs.imr) { 687 if (regs.itr.interval() == 0) { 688 cpuPostInt(); 689 } else { 690 DPRINTF(Ethernet, "Possibly scheduling interrupt because of imr write\n"); 691 if (!interEvent.scheduled()) { 692 DPRINTF(Ethernet, "Scheduling for %d\n", curTick + Clock::Int::ns 693 * 256 * regs.itr.interval()); 694 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval()); 695 } 696 } 697 } 698 699 700} 701 702 703IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s) 704 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this) 705 706{ 707} 708 709void 710IGbE::RxDescCache::writePacket(EthPacketPtr packet) 711{ 712 // We shouldn't have to deal with any of these yet 713 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n", 714 packet->length, igbe->regs.rctl.descSize()); 715 assert(packet->length < igbe->regs.rctl.descSize()); 716 717 assert(unusedCache.size()); 718 //if (!unusedCache.size()) 719 // return false; 720 721 pktPtr = packet; 722 pktDone = false; 723 igbe->dmaWrite(igbe->platform->pciToDma(unusedCache.front()->buf), 724 packet->length, &pktEvent, packet->data, igbe->rxWriteDelay); 725} 726 727void 728IGbE::RxDescCache::pktComplete() 729{ 730 assert(unusedCache.size()); 731 RxDesc *desc; 732 desc = unusedCache.front(); 733 734 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ; 735 desc->len = htole((uint16_t)(pktPtr->length + crcfixup)); 736 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n", 737 pktPtr->length, crcfixup, 738 htole((uint16_t)(pktPtr->length + crcfixup)), 739 (uint16_t)(pktPtr->length + crcfixup)); 740 741 // no support for anything but starting at 0 742 assert(igbe->regs.rxcsum.pcss() == 0); 743 744 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n"); 745 746 uint8_t status = RXDS_DD | RXDS_EOP; 747 uint8_t err = 0; 748 749 IpPtr ip(pktPtr); 750 751 if (ip) { 752 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id()); 753 754 if (igbe->regs.rxcsum.ipofld()) { 755 DPRINTF(EthernetDesc, "Checking IP checksum\n"); 756 status |= RXDS_IPCS; 757 desc->csum = htole(cksum(ip)); 758 igbe->rxIpChecksums++; 759 if (cksum(ip) != 0) { 760 err |= RXDE_IPE; 761 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 762 } 763 } 764 TcpPtr tcp(ip); 765 if (tcp && igbe->regs.rxcsum.tuofld()) { 766 DPRINTF(EthernetDesc, "Checking TCP checksum\n"); 767 status |= RXDS_TCPCS; 768 desc->csum = htole(cksum(tcp)); 769 igbe->rxTcpChecksums++; 770 if (cksum(tcp) != 0) { 771 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 772 err |= RXDE_TCPE; 773 } 774 } 775 776 UdpPtr udp(ip); 777 if (udp && igbe->regs.rxcsum.tuofld()) { 778 DPRINTF(EthernetDesc, "Checking UDP checksum\n"); 779 status |= RXDS_UDPCS; 780 desc->csum = htole(cksum(udp)); 781 igbe->rxUdpChecksums++; 782 if (cksum(udp) != 0) { 783 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 784 err |= RXDE_TCPE; 785 } 786 } 787 } else { // if ip 788 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 789 } 790 791 792 desc->status = htole(status); 793 desc->errors = htole(err); 794 795 // No vlan support at this point... just set it to 0 796 desc->vlan = 0; 797 798 // Deal with the rx timer interrupts 799 if (igbe->regs.rdtr.delay()) { 800 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", 801 igbe->regs.rdtr.delay() * igbe->intClock()); 802 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() * 803 igbe->intClock(),true); 804 } 805 806 if (igbe->regs.radv.idv()) { 807 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", 808 igbe->regs.radv.idv() * igbe->intClock()); 809 if (!igbe->radvEvent.scheduled()) { 810 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() * 811 igbe->intClock()); 812 } 813 } 814 815 // if neither radv or rdtr, maybe itr is set... 816 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) { 817 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n"); 818 igbe->postInterrupt(IT_RXT); 819 } 820 821 // If the packet is small enough, interrupt appropriately 822 // I wonder if this is delayed or not?! 823 if (pktPtr->length <= igbe->regs.rsrpd.idv()) { 824 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n"); 825 igbe->postInterrupt(IT_SRPD); 826 } 827 828 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n"); 829 unusedCache.pop_front(); 830 usedCache.push_back(desc); 831 832 833 pktPtr = NULL; 834 enableSm(); 835 pktDone = true; 836 igbe->checkDrain(); 837 838} 839 840void 841IGbE::RxDescCache::enableSm() 842{ 843 if (!igbe->drainEvent) { 844 igbe->rxTick = true; 845 igbe->restartClock(); 846 } 847} 848 849bool 850IGbE::RxDescCache::packetDone() 851{ 852 if (pktDone) { 853 pktDone = false; 854 return true; 855 } 856 return false; 857} 858 859bool 860IGbE::RxDescCache::hasOutstandingEvents() 861{ 862 return pktEvent.scheduled() || wbEvent.scheduled() || 863 fetchEvent.scheduled(); 864} 865 866void 867IGbE::RxDescCache::serialize(std::ostream &os) 868{ 869 DescCache<RxDesc>::serialize(os); 870 SERIALIZE_SCALAR(pktDone); 871} 872 873void 874IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string §ion) 875{ 876 DescCache<RxDesc>::unserialize(cp, section); 877 UNSERIALIZE_SCALAR(pktDone); 878} 879 880 881///////////////////////////////////// IGbE::TxDesc ///////////////////////////////// 882 883IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s) 884 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false), 885 pktEvent(this) 886 887{ 888} 889 890int 891IGbE::TxDescCache::getPacketSize() 892{ 893 assert(unusedCache.size()); 894 895 TxDesc *desc; 896 897 DPRINTF(EthernetDesc, "Starting processing of descriptor\n"); 898 899 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) { 900 DPRINTF(EthernetDesc, "Got context descriptor type... skipping\n"); 901 902 // I think we can just ignore these for now? 903 desc = unusedCache.front(); 904 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n", desc->d1, 905 desc->d2); 906 // is this going to be a tcp or udp packet? 907 isTcp = TxdOp::tcp(desc) ? true : false; 908 909 // make sure it's ipv4 910 //assert(TxdOp::ip(desc)); 911 912 TxdOp::setDd(desc); 913 unusedCache.pop_front(); 914 usedCache.push_back(desc); 915 } 916 917 if (!unusedCache.size()) 918 return -1; 919 920 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n", 921 TxdOp::getLen(unusedCache.front())); 922 923 return TxdOp::getLen(unusedCache.front()); 924} 925 926void 927IGbE::TxDescCache::getPacketData(EthPacketPtr p) 928{ 929 assert(unusedCache.size()); 930 931 TxDesc *desc; 932 desc = unusedCache.front(); 933 934 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); 935 936 pktPtr = p; 937 938 pktWaiting = true; 939 940 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length); 941 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)), 942 TxdOp::getLen(desc), &pktEvent, p->data + p->length, igbe->txReadDelay); 943 944 945} 946 947void 948IGbE::TxDescCache::pktComplete() 949{ 950 951 TxDesc *desc; 952 assert(unusedCache.size()); 953 assert(pktPtr); 954 955 DPRINTF(EthernetDesc, "DMA of packet complete\n"); 956 957 958 desc = unusedCache.front(); 959 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); 960 961 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 962 963 if (!TxdOp::eop(desc)) { 964 pktPtr->length += TxdOp::getLen(desc); 965 unusedCache.pop_front(); 966 usedCache.push_back(desc); 967 pktDone = true; 968 pktWaiting = false; 969 pktMultiDesc = true; 970 971 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n", 972 pktPtr->length); 973 pktPtr = NULL; 974 975 enableSm(); 976 igbe->checkDrain(); 977 return; 978 } 979 pktMultiDesc = false; 980 981 // Set the length of the data in the EtherPacket 982 pktPtr->length += TxdOp::getLen(desc); 983 984 // no support for vlans 985 assert(!TxdOp::vle(desc)); 986 987 // we alway report status 988 assert(TxdOp::rs(desc)); 989 990 // we only support single packet descriptors at this point 991 assert(TxdOp::eop(desc)); 992 993 // set that this packet is done 994 TxdOp::setDd(desc); 995 996 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 997 998 if (DTRACE(EthernetDesc)) { 999 IpPtr ip(pktPtr); 1000 if (ip) 1001 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", 1002 ip->id()); 1003 else 1004 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 1005 } 1006 1007 // Checksums are only ofloaded for new descriptor types 1008 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) { 1009 DPRINTF(EthernetDesc, "Calculating checksums for packet\n"); 1010 IpPtr ip(pktPtr); 1011 assert(ip); 1012 if (TxdOp::ixsm(desc)) { 1013 ip->sum(0); 1014 ip->sum(cksum(ip)); 1015 igbe->txIpChecksums++; 1016 DPRINTF(EthernetDesc, "Calculated IP checksum\n"); 1017 } 1018 if (TxdOp::txsm(desc)) { 1019 TcpPtr tcp(ip); 1020 UdpPtr udp(ip); 1021 if (tcp) { 1022 tcp->sum(0); 1023 tcp->sum(cksum(tcp)); 1024 igbe->txTcpChecksums++; 1025 DPRINTF(EthernetDesc, "Calculated TCP checksum\n"); 1026 } else if (udp) { 1027 assert(udp); 1028 udp->sum(0); 1029 udp->sum(cksum(udp)); 1030 igbe->txUdpChecksums++; 1031 DPRINTF(EthernetDesc, "Calculated UDP checksum\n"); 1032 } else { 1033 panic("Told to checksum, but don't know how\n"); 1034 } 1035 } 1036 } 1037 1038 if (TxdOp::ide(desc)) { 1039 // Deal with the rx timer interrupts 1040 DPRINTF(EthernetDesc, "Descriptor had IDE set\n"); 1041 if (igbe->regs.tidv.idv()) { 1042 DPRINTF(EthernetDesc, "setting tidv\n"); 1043 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() * 1044 igbe->intClock(), true); 1045 } 1046 1047 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) { 1048 DPRINTF(EthernetDesc, "setting tadv\n"); 1049 if (!igbe->tadvEvent.scheduled()) { 1050 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() * 1051 igbe->intClock()); 1052 } 1053 } 1054 } 1055 1056 1057 1058 unusedCache.pop_front(); 1059 usedCache.push_back(desc); 1060 pktDone = true; 1061 pktWaiting = false; 1062 pktPtr = NULL; 1063 1064 DPRINTF(EthernetDesc, "Descriptor Done\n"); 1065 1066 if (igbe->regs.txdctl.wthresh() == 0) { 1067 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n"); 1068 writeback(0); 1069 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) { 1070 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1071 writeback((igbe->cacheBlockSize()-1)>>4); 1072 } 1073 enableSm(); 1074 igbe->checkDrain(); 1075} 1076 1077void 1078IGbE::TxDescCache::serialize(std::ostream &os) 1079{ 1080 DescCache<TxDesc>::serialize(os); 1081 SERIALIZE_SCALAR(pktDone); 1082 SERIALIZE_SCALAR(isTcp); 1083 SERIALIZE_SCALAR(pktWaiting); 1084 SERIALIZE_SCALAR(pktMultiDesc); 1085} 1086 1087void 1088IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string §ion) 1089{ 1090 DescCache<TxDesc>::unserialize(cp, section); 1091 UNSERIALIZE_SCALAR(pktDone); 1092 UNSERIALIZE_SCALAR(isTcp); 1093 UNSERIALIZE_SCALAR(pktWaiting); 1094 UNSERIALIZE_SCALAR(pktMultiDesc); 1095} 1096 1097bool 1098IGbE::TxDescCache::packetAvailable() 1099{ 1100 if (pktDone) { 1101 pktDone = false; 1102 return true; 1103 } 1104 return false; 1105} 1106 1107void 1108IGbE::TxDescCache::enableSm() 1109{ 1110 if (!igbe->drainEvent) { 1111 igbe->txTick = true; 1112 igbe->restartClock(); 1113 } 1114} 1115 1116bool 1117IGbE::TxDescCache::hasOutstandingEvents() 1118{ 1119 return pktEvent.scheduled() || wbEvent.scheduled() || 1120 fetchEvent.scheduled(); 1121} 1122 1123 1124///////////////////////////////////// IGbE ///////////////////////////////// 1125 1126void 1127IGbE::restartClock() 1128{ 1129 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && getState() == 1130 SimObject::Running) 1131 tickEvent.schedule((curTick/ticks(1)) * ticks(1) + ticks(1)); 1132} 1133 1134unsigned int 1135IGbE::drain(Event *de) 1136{ 1137 unsigned int count; 1138 count = pioPort->drain(de) + dmaPort->drain(de); 1139 if (rxDescCache.hasOutstandingEvents() || 1140 txDescCache.hasOutstandingEvents()) { 1141 count++; 1142 drainEvent = de; 1143 } 1144 1145 txFifoTick = false; 1146 txTick = false; 1147 rxTick = false; 1148 1149 if (tickEvent.scheduled()) 1150 tickEvent.deschedule(); 1151 1152 if (count) 1153 changeState(Draining); 1154 else 1155 changeState(Drained); 1156 1157 return count; 1158} 1159 1160void 1161IGbE::resume() 1162{ 1163 SimObject::resume(); 1164 1165 txFifoTick = true; 1166 txTick = true; 1167 rxTick = true; 1168 1169 restartClock(); 1170} 1171 1172void 1173IGbE::checkDrain() 1174{ 1175 if (!drainEvent) 1176 return; 1177 1178 txFifoTick = false; 1179 txTick = false; 1180 rxTick = false; 1181 if (!rxDescCache.hasOutstandingEvents() && 1182 !txDescCache.hasOutstandingEvents()) { 1183 drainEvent->process(); 1184 drainEvent = NULL; 1185 } 1186} 1187 1188void 1189IGbE::txStateMachine() 1190{ 1191 if (!regs.tctl.en()) { 1192 txTick = false; 1193 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n"); 1194 return; 1195 } 1196 1197 // If we have a packet available and it's length is not 0 (meaning it's not 1198 // a multidescriptor packet) put it in the fifo, otherwise an the next 1199 // iteration we'll get the rest of the data 1200 if (txPacket && txDescCache.packetAvailable() 1201 && !txDescCache.packetMultiDesc() && txPacket->length) { 1202 bool success; 1203 1204 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n"); 1205 success = txFifo.push(txPacket); 1206 txFifoTick = true && !drainEvent; 1207 assert(success); 1208 txPacket = NULL; 1209 txDescCache.writeback((cacheBlockSize()-1)>>4); 1210 return; 1211 } 1212 1213 // Only support descriptor granularity 1214 assert(regs.txdctl.gran()); 1215 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) { 1216 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n"); 1217 postInterrupt(IT_TXDLOW); 1218 } 1219 1220 if (!txPacket) { 1221 txPacket = new EthPacketData(16384); 1222 } 1223 1224 if (!txDescCache.packetWaiting()) { 1225 if (txDescCache.descLeft() == 0) { 1226 postInterrupt(IT_TXQE); 1227 txDescCache.writeback(0); 1228 txDescCache.fetchDescriptors(); 1229 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing " 1230 "writeback stopping ticking and posting TXQE\n"); 1231 txTick = false; 1232 return; 1233 } 1234 1235 1236 if (!(txDescCache.descUnused())) { 1237 txDescCache.fetchDescriptors(); 1238 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n"); 1239 txTick = false; 1240 return; 1241 } 1242 1243 1244 int size; 1245 size = txDescCache.getPacketSize(); 1246 if (size > 0 && txFifo.avail() > size) { 1247 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining " 1248 "DMA of next packet\n", size); 1249 txFifo.reserve(size); 1250 txDescCache.getPacketData(txPacket); 1251 } else if (size <= 0) { 1252 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size); 1253 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n"); 1254 txDescCache.writeback(0); 1255 } else { 1256 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space " 1257 "available in FIFO\n"); 1258 txTick = false; 1259 } 1260 1261 1262 return; 1263 } 1264 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n"); 1265 txTick = false; 1266} 1267 1268bool 1269IGbE::ethRxPkt(EthPacketPtr pkt) 1270{ 1271 rxBytes += pkt->length; 1272 rxPackets++; 1273 1274 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n"); 1275 1276 if (!regs.rctl.en()) { 1277 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n"); 1278 return true; 1279 } 1280 1281 // restart the state machines if they are stopped 1282 rxTick = true && !drainEvent; 1283 if ((rxTick || txTick) && !tickEvent.scheduled()) { 1284 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n"); 1285 restartClock(); 1286 } 1287 1288 if (!rxFifo.push(pkt)) { 1289 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n"); 1290 postInterrupt(IT_RXO, true); 1291 return false; 1292 } 1293 1294 return true; 1295} 1296 1297 1298void 1299IGbE::rxStateMachine() 1300{ 1301 if (!regs.rctl.en()) { 1302 rxTick = false; 1303 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n"); 1304 return; 1305 } 1306 1307 // If the packet is done check for interrupts/descriptors/etc 1308 if (rxDescCache.packetDone()) { 1309 rxDmaPacket = false; 1310 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n"); 1311 int descLeft = rxDescCache.descLeft(); 1312 switch (regs.rctl.rdmts()) { 1313 case 2: if (descLeft > .125 * regs.rdlen()) break; 1314 case 1: if (descLeft > .250 * regs.rdlen()) break; 1315 case 0: if (descLeft > .500 * regs.rdlen()) break; 1316 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n"); 1317 postInterrupt(IT_RXDMT); 1318 break; 1319 } 1320 1321 if (descLeft == 0) { 1322 rxDescCache.writeback(0); 1323 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing" 1324 " writeback and stopping ticking\n"); 1325 rxTick = false; 1326 } 1327 1328 // only support descriptor granulaties 1329 assert(regs.rxdctl.gran()); 1330 1331 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) { 1332 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n"); 1333 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4)) 1334 rxDescCache.writeback(regs.rxdctl.wthresh()-1); 1335 else 1336 rxDescCache.writeback((cacheBlockSize()-1)>>4); 1337 } 1338 1339 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) && 1340 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) { 1341 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n"); 1342 rxDescCache.fetchDescriptors(); 1343 } 1344 1345 if (rxDescCache.descUnused() == 0) { 1346 rxDescCache.fetchDescriptors(); 1347 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, " 1348 "fetching descriptors and stopping ticking\n"); 1349 rxTick = false; 1350 } 1351 return; 1352 } 1353 1354 if (rxDmaPacket) { 1355 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 1356 rxTick = false; 1357 return; 1358 } 1359 1360 if (!rxDescCache.descUnused()) { 1361 rxDescCache.fetchDescriptors(); 1362 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n"); 1363 rxTick = false; 1364 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n"); 1365 return; 1366 } 1367 1368 if (rxFifo.empty()) { 1369 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n"); 1370 rxTick = false; 1371 return; 1372 } 1373 1374 EthPacketPtr pkt; 1375 pkt = rxFifo.front(); 1376 1377 1378 rxDescCache.writePacket(pkt); 1379 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n"); 1380 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n"); 1381 rxFifo.pop(); 1382 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 1383 rxTick = false; 1384 rxDmaPacket = true; 1385} 1386 1387void 1388IGbE::txWire() 1389{ 1390 if (txFifo.empty()) { 1391 txFifoTick = false; 1392 return; 1393 } 1394 1395 1396 if (etherInt->sendPacket(txFifo.front())) { 1397 if (DTRACE(EthernetSM)) { 1398 IpPtr ip(txFifo.front()); 1399 if (ip) 1400 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n", 1401 ip->id()); 1402 else 1403 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n"); 1404 } 1405 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n", 1406 txFifo.avail()); 1407 1408 txBytes += txFifo.front()->length; 1409 txPackets++; 1410 txFifoTick = false; 1411 1412 txFifo.pop(); 1413 } else { 1414 // We'll get woken up when the packet ethTxDone() gets called 1415 txFifoTick = false; 1416 } 1417} 1418 1419void 1420IGbE::tick() 1421{ 1422 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n"); 1423 1424 if (rxTick) 1425 rxStateMachine(); 1426 1427 if (txTick) 1428 txStateMachine(); 1429 1430 if (txFifoTick) 1431 txWire(); 1432 1433 1434 if (rxTick || txTick || txFifoTick) 1435 tickEvent.schedule(curTick + ticks(1)); 1436} 1437 1438void 1439IGbE::ethTxDone() 1440{ 1441 // restart the tx state machines if they are stopped 1442 // fifo to send another packet 1443 // tx sm to put more data into the fifo 1444 txFifoTick = true && !drainEvent; 1445 if (txDescCache.descLeft() != 0 && !drainEvent) 1446 txTick = true; 1447 1448 restartClock(); 1449 txWire(); 1450 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n"); 1451} 1452 1453void 1454IGbE::serialize(std::ostream &os) 1455{ 1456 PciDev::serialize(os); 1457 1458 regs.serialize(os); 1459 SERIALIZE_SCALAR(eeOpBits); 1460 SERIALIZE_SCALAR(eeAddrBits); 1461 SERIALIZE_SCALAR(eeDataBits); 1462 SERIALIZE_SCALAR(eeOpcode); 1463 SERIALIZE_SCALAR(eeAddr); 1464 SERIALIZE_SCALAR(lastInterrupt); 1465 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 1466 1467 rxFifo.serialize("rxfifo", os); 1468 txFifo.serialize("txfifo", os); 1469 1470 bool txPktExists = txPacket; 1471 SERIALIZE_SCALAR(txPktExists); 1472 if (txPktExists) 1473 txPacket->serialize("txpacket", os); 1474 1475 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0, 1476 inter_time = 0; 1477 1478 if (rdtrEvent.scheduled()) 1479 rdtr_time = rdtrEvent.when(); 1480 SERIALIZE_SCALAR(rdtr_time); 1481 1482 if (radvEvent.scheduled()) 1483 radv_time = radvEvent.when(); 1484 SERIALIZE_SCALAR(radv_time); 1485 1486 if (tidvEvent.scheduled()) 1487 tidv_time = tidvEvent.when(); 1488 SERIALIZE_SCALAR(tidv_time); 1489 1490 if (tadvEvent.scheduled()) 1491 tadv_time = tadvEvent.when(); 1492 SERIALIZE_SCALAR(tadv_time); 1493 1494 if (interEvent.scheduled()) 1495 inter_time = interEvent.when(); 1496 SERIALIZE_SCALAR(inter_time); 1497 1498 nameOut(os, csprintf("%s.TxDescCache", name())); 1499 txDescCache.serialize(os); 1500 1501 nameOut(os, csprintf("%s.RxDescCache", name())); 1502 rxDescCache.serialize(os); 1503} 1504 1505void 1506IGbE::unserialize(Checkpoint *cp, const std::string §ion) 1507{ 1508 PciDev::unserialize(cp, section); 1509 1510 regs.unserialize(cp, section); 1511 UNSERIALIZE_SCALAR(eeOpBits); 1512 UNSERIALIZE_SCALAR(eeAddrBits); 1513 UNSERIALIZE_SCALAR(eeDataBits); 1514 UNSERIALIZE_SCALAR(eeOpcode); 1515 UNSERIALIZE_SCALAR(eeAddr); 1516 UNSERIALIZE_SCALAR(lastInterrupt); 1517 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 1518 1519 rxFifo.unserialize("rxfifo", cp, section); 1520 txFifo.unserialize("txfifo", cp, section); 1521 1522 bool txPktExists; 1523 UNSERIALIZE_SCALAR(txPktExists); 1524 if (txPktExists) { 1525 txPacket = new EthPacketData(16384); 1526 txPacket->unserialize("txpacket", cp, section); 1527 } 1528 1529 rxTick = true; 1530 txTick = true; 1531 txFifoTick = true; 1532 1533 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time; 1534 UNSERIALIZE_SCALAR(rdtr_time); 1535 UNSERIALIZE_SCALAR(radv_time); 1536 UNSERIALIZE_SCALAR(tidv_time); 1537 UNSERIALIZE_SCALAR(tadv_time); 1538 UNSERIALIZE_SCALAR(inter_time); 1539 1540 if (rdtr_time) 1541 rdtrEvent.schedule(rdtr_time); 1542 1543 if (radv_time) 1544 radvEvent.schedule(radv_time); 1545 1546 if (tidv_time) 1547 tidvEvent.schedule(tidv_time); 1548 1549 if (tadv_time) 1550 tadvEvent.schedule(tadv_time); 1551 1552 if (inter_time) 1553 interEvent.schedule(inter_time); 1554 1555 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section)); 1556 1557 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section)); 1558} 1559 1560IGbE * 1561IGbEParams::create() 1562{ 1563 return new IGbE(this); 1564} 1565