i8254xGBe.cc revision 5533
1/* 2 * Copyright (c) 2006 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Ali Saidi 29 */ 30 31/* @file 32 * Device model for Intel's 8254x line of gigabit ethernet controllers. 33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the 34 * fewest workarounds in the driver. It will probably work with most of the 35 * other MACs with slight modifications. 36 */ 37 38 39/* 40 * @todo really there are multiple dma engines.. we should implement them. 41 */ 42 43#include <algorithm> 44 45#include "base/inet.hh" 46#include "base/trace.hh" 47#include "dev/i8254xGBe.hh" 48#include "mem/packet.hh" 49#include "mem/packet_access.hh" 50#include "params/IGbE.hh" 51#include "sim/stats.hh" 52#include "sim/system.hh" 53 54using namespace iGbReg; 55using namespace Net; 56 57IGbE::IGbE(const Params *p) 58 : EtherDevice(p), etherInt(NULL), drainEvent(NULL), useFlowControl(p->use_flow_control), 59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false), 60 txTick(false), txFifoTick(false), rxDmaPacket(false), rdtrEvent(this), radvEvent(this), 61 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this), 62 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size), 63 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), 64 clock(p->clock), lastInterrupt(0) 65{ 66 etherInt = new IGbEInt(name() + ".int", this); 67 68 // Initialized internal registers per Intel documentation 69 // All registers intialized to 0 by per register constructor 70 regs.ctrl.fd(1); 71 regs.ctrl.lrst(1); 72 regs.ctrl.speed(2); 73 regs.ctrl.frcspd(1); 74 regs.sts.speed(3); // Say we're 1000Mbps 75 regs.sts.fd(1); // full duplex 76 regs.sts.lu(1); // link up 77 regs.eecd.fwe(1); 78 regs.eecd.ee_type(1); 79 regs.imr = 0; 80 regs.iam = 0; 81 regs.rxdctl.gran(1); 82 regs.rxdctl.wthresh(1); 83 regs.fcrth(1); 84 85 regs.pba.rxa(0x30); 86 regs.pba.txa(0x10); 87 88 eeOpBits = 0; 89 eeAddrBits = 0; 90 eeDataBits = 0; 91 eeOpcode = 0; 92 93 // clear all 64 16 bit words of the eeprom 94 memset(&flash, 0, EEPROM_SIZE*2); 95 96 // Set the MAC address 97 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN); 98 for (int x = 0; x < ETH_ADDR_LEN/2; x++) 99 flash[x] = htobe(flash[x]); 100 101 uint16_t csum = 0; 102 for (int x = 0; x < EEPROM_SIZE; x++) 103 csum += htobe(flash[x]); 104 105 106 // Magic happy checksum value 107 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum)); 108 109 rxFifo.clear(); 110 txFifo.clear(); 111} 112 113EtherInt* 114IGbE::getEthPort(const std::string &if_name, int idx) 115{ 116 117 if (if_name == "interface") { 118 if (etherInt->getPeer()) 119 panic("Port already connected to\n"); 120 return etherInt; 121 } 122 return NULL; 123} 124 125Tick 126IGbE::writeConfig(PacketPtr pkt) 127{ 128 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 129 if (offset < PCI_DEVICE_SPECIFIC) 130 PciDev::writeConfig(pkt); 131 else 132 panic("Device specific PCI config space not implemented.\n"); 133 134 /// 135 /// Some work may need to be done here based for the pci COMMAND bits. 136 /// 137 138 return pioDelay; 139} 140 141Tick 142IGbE::read(PacketPtr pkt) 143{ 144 int bar; 145 Addr daddr; 146 147 if (!getBAR(pkt->getAddr(), bar, daddr)) 148 panic("Invalid PCI memory access to unmapped memory.\n"); 149 150 // Only Memory register BAR is allowed 151 assert(bar == 0); 152 153 // Only 32bit accesses allowed 154 assert(pkt->getSize() == 4); 155 156 DPRINTF(Ethernet, "Read device register %#X\n", daddr); 157 158 pkt->allocate(); 159 160 /// 161 /// Handle read of register here 162 /// 163 164 165 switch (daddr) { 166 case REG_CTRL: 167 pkt->set<uint32_t>(regs.ctrl()); 168 break; 169 case REG_STATUS: 170 pkt->set<uint32_t>(regs.sts()); 171 break; 172 case REG_EECD: 173 pkt->set<uint32_t>(regs.eecd()); 174 break; 175 case REG_EERD: 176 pkt->set<uint32_t>(regs.eerd()); 177 break; 178 case REG_CTRL_EXT: 179 pkt->set<uint32_t>(regs.ctrl_ext()); 180 break; 181 case REG_MDIC: 182 pkt->set<uint32_t>(regs.mdic()); 183 break; 184 case REG_ICR: 185 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(), 186 regs.imr, regs.iam, regs.ctrl_ext.iame()); 187 pkt->set<uint32_t>(regs.icr()); 188 if (regs.icr.int_assert() || regs.imr == 0) { 189 regs.icr = regs.icr() & ~mask(30); 190 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr()); 191 } 192 if (regs.ctrl_ext.iame() && regs.icr.int_assert()) 193 regs.imr &= ~regs.iam; 194 chkInterrupt(); 195 break; 196 case REG_ITR: 197 pkt->set<uint32_t>(regs.itr()); 198 break; 199 case REG_RCTL: 200 pkt->set<uint32_t>(regs.rctl()); 201 break; 202 case REG_FCTTV: 203 pkt->set<uint32_t>(regs.fcttv()); 204 break; 205 case REG_TCTL: 206 pkt->set<uint32_t>(regs.tctl()); 207 break; 208 case REG_PBA: 209 pkt->set<uint32_t>(regs.pba()); 210 break; 211 case REG_WUC: 212 case REG_LEDCTL: 213 pkt->set<uint32_t>(0); // We don't care, so just return 0 214 break; 215 case REG_FCRTL: 216 pkt->set<uint32_t>(regs.fcrtl()); 217 break; 218 case REG_FCRTH: 219 pkt->set<uint32_t>(regs.fcrth()); 220 break; 221 case REG_RDBAL: 222 pkt->set<uint32_t>(regs.rdba.rdbal()); 223 break; 224 case REG_RDBAH: 225 pkt->set<uint32_t>(regs.rdba.rdbah()); 226 break; 227 case REG_RDLEN: 228 pkt->set<uint32_t>(regs.rdlen()); 229 break; 230 case REG_RDH: 231 pkt->set<uint32_t>(regs.rdh()); 232 break; 233 case REG_RDT: 234 pkt->set<uint32_t>(regs.rdt()); 235 break; 236 case REG_RDTR: 237 pkt->set<uint32_t>(regs.rdtr()); 238 if (regs.rdtr.fpd()) { 239 rxDescCache.writeback(0); 240 DPRINTF(EthernetIntr, "Posting interrupt because of RDTR.FPD write\n"); 241 postInterrupt(IT_RXT); 242 regs.rdtr.fpd(0); 243 } 244 break; 245 case REG_RADV: 246 pkt->set<uint32_t>(regs.radv()); 247 break; 248 case REG_TDBAL: 249 pkt->set<uint32_t>(regs.tdba.tdbal()); 250 break; 251 case REG_TDBAH: 252 pkt->set<uint32_t>(regs.tdba.tdbah()); 253 break; 254 case REG_TDLEN: 255 pkt->set<uint32_t>(regs.tdlen()); 256 break; 257 case REG_TDH: 258 pkt->set<uint32_t>(regs.tdh()); 259 break; 260 case REG_TDT: 261 pkt->set<uint32_t>(regs.tdt()); 262 break; 263 case REG_TIDV: 264 pkt->set<uint32_t>(regs.tidv()); 265 break; 266 case REG_TXDCTL: 267 pkt->set<uint32_t>(regs.txdctl()); 268 break; 269 case REG_TADV: 270 pkt->set<uint32_t>(regs.tadv()); 271 break; 272 case REG_RXCSUM: 273 pkt->set<uint32_t>(regs.rxcsum()); 274 break; 275 case REG_MANC: 276 pkt->set<uint32_t>(regs.manc()); 277 break; 278 default: 279 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) && 280 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) && 281 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) && 282 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE))) 283 panic("Read request to unknown register number: %#x\n", daddr); 284 else 285 pkt->set<uint32_t>(0); 286 }; 287 288 pkt->makeAtomicResponse(); 289 return pioDelay; 290} 291 292Tick 293IGbE::write(PacketPtr pkt) 294{ 295 int bar; 296 Addr daddr; 297 298 299 if (!getBAR(pkt->getAddr(), bar, daddr)) 300 panic("Invalid PCI memory access to unmapped memory.\n"); 301 302 // Only Memory register BAR is allowed 303 assert(bar == 0); 304 305 // Only 32bit accesses allowed 306 assert(pkt->getSize() == sizeof(uint32_t)); 307 308 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>()); 309 310 /// 311 /// Handle write of register here 312 /// 313 uint32_t val = pkt->get<uint32_t>(); 314 315 Regs::RCTL oldrctl; 316 Regs::TCTL oldtctl; 317 318 switch (daddr) { 319 case REG_CTRL: 320 regs.ctrl = val; 321 if (regs.ctrl.tfce()) 322 warn("TX Flow control enabled, should implement\n"); 323 if (regs.ctrl.rfce()) 324 warn("RX Flow control enabled, should implement\n"); 325 break; 326 case REG_CTRL_EXT: 327 regs.ctrl_ext = val; 328 break; 329 case REG_STATUS: 330 regs.sts = val; 331 break; 332 case REG_EECD: 333 int oldClk; 334 oldClk = regs.eecd.sk(); 335 regs.eecd = val; 336 // See if this is a eeprom access and emulate accordingly 337 if (!oldClk && regs.eecd.sk()) { 338 if (eeOpBits < 8) { 339 eeOpcode = eeOpcode << 1 | regs.eecd.din(); 340 eeOpBits++; 341 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 342 eeAddr = eeAddr << 1 | regs.eecd.din(); 343 eeAddrBits++; 344 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 345 assert(eeAddr>>1 < EEPROM_SIZE); 346 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n", 347 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]); 348 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1); 349 eeDataBits++; 350 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) { 351 regs.eecd.dout(0); 352 eeDataBits++; 353 } else 354 panic("What's going on with eeprom interface? opcode:" 355 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode, 356 (uint32_t)eeOpBits, (uint32_t)eeAddr, 357 (uint32_t)eeAddrBits, (uint32_t)eeDataBits); 358 359 // Reset everything for the next command 360 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) || 361 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) { 362 eeOpBits = 0; 363 eeAddrBits = 0; 364 eeDataBits = 0; 365 eeOpcode = 0; 366 eeAddr = 0; 367 } 368 369 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n", 370 (uint32_t)eeOpcode, (uint32_t) eeOpBits, 371 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits); 372 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI || 373 eeOpcode == EEPROM_RDSR_OPCODE_SPI )) 374 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode, 375 (uint32_t)eeOpBits); 376 377 378 } 379 // If driver requests eeprom access, immediately give it to it 380 regs.eecd.ee_gnt(regs.eecd.ee_req()); 381 break; 382 case REG_EERD: 383 regs.eerd = val; 384 break; 385 case REG_MDIC: 386 regs.mdic = val; 387 if (regs.mdic.i()) 388 panic("No support for interrupt on mdic complete\n"); 389 if (regs.mdic.phyadd() != 1) 390 panic("No support for reading anything but phy\n"); 391 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing" 392 : "Reading", regs.mdic.regadd()); 393 switch (regs.mdic.regadd()) { 394 case PHY_PSTATUS: 395 regs.mdic.data(0x796D); // link up 396 break; 397 case PHY_PID: 398 regs.mdic.data(0x02A8); 399 break; 400 case PHY_EPID: 401 regs.mdic.data(0x0380); 402 break; 403 case PHY_GSTATUS: 404 regs.mdic.data(0x7C00); 405 break; 406 case PHY_EPSTATUS: 407 regs.mdic.data(0x3000); 408 break; 409 case PHY_AGC: 410 regs.mdic.data(0x180); // some random length 411 break; 412 default: 413 regs.mdic.data(0); 414 } 415 regs.mdic.r(1); 416 break; 417 case REG_ICR: 418 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(), 419 regs.imr, regs.iam, regs.ctrl_ext.iame()); 420 if (regs.ctrl_ext.iame()) 421 regs.imr &= ~regs.iam; 422 regs.icr = ~bits(val,30,0) & regs.icr(); 423 chkInterrupt(); 424 break; 425 case REG_ITR: 426 regs.itr = val; 427 break; 428 case REG_ICS: 429 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n"); 430 postInterrupt((IntTypes)val); 431 break; 432 case REG_IMS: 433 regs.imr |= val; 434 chkInterrupt(); 435 break; 436 case REG_IMC: 437 regs.imr &= ~val; 438 chkInterrupt(); 439 break; 440 case REG_IAM: 441 regs.iam = val; 442 break; 443 case REG_RCTL: 444 oldrctl = regs.rctl; 445 regs.rctl = val; 446 if (regs.rctl.rst()) { 447 rxDescCache.reset(); 448 DPRINTF(EthernetSM, "RXS: Got RESET!\n"); 449 rxFifo.clear(); 450 regs.rctl.rst(0); 451 } 452 if (regs.rctl.en()) 453 rxTick = true; 454 restartClock(); 455 break; 456 case REG_FCTTV: 457 regs.fcttv = val; 458 break; 459 case REG_TCTL: 460 regs.tctl = val; 461 oldtctl = regs.tctl; 462 regs.tctl = val; 463 if (regs.tctl.en()) 464 txTick = true; 465 restartClock(); 466 if (regs.tctl.en() && !oldtctl.en()) { 467 txDescCache.reset(); 468 } 469 break; 470 case REG_PBA: 471 regs.pba.rxa(val); 472 regs.pba.txa(64 - regs.pba.rxa()); 473 break; 474 case REG_WUC: 475 case REG_LEDCTL: 476 case REG_FCAL: 477 case REG_FCAH: 478 case REG_FCT: 479 case REG_VET: 480 case REG_AIFS: 481 case REG_TIPG: 482 ; // We don't care, so don't store anything 483 break; 484 case REG_FCRTL: 485 regs.fcrtl = val; 486 break; 487 case REG_FCRTH: 488 regs.fcrth = val; 489 break; 490 case REG_RDBAL: 491 regs.rdba.rdbal( val & ~mask(4)); 492 rxDescCache.areaChanged(); 493 break; 494 case REG_RDBAH: 495 regs.rdba.rdbah(val); 496 rxDescCache.areaChanged(); 497 break; 498 case REG_RDLEN: 499 regs.rdlen = val & ~mask(7); 500 rxDescCache.areaChanged(); 501 break; 502 case REG_RDH: 503 regs.rdh = val; 504 rxDescCache.areaChanged(); 505 break; 506 case REG_RDT: 507 regs.rdt = val; 508 DPRINTF(EthernetSM, "RXS: RDT Updated.\n"); 509 if (getState() == SimObject::Running) { 510 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n"); 511 rxDescCache.fetchDescriptors(); 512 } else { 513 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n"); 514 } 515 break; 516 case REG_RDTR: 517 regs.rdtr = val; 518 break; 519 case REG_RADV: 520 regs.radv = val; 521 break; 522 case REG_TDBAL: 523 regs.tdba.tdbal( val & ~mask(4)); 524 txDescCache.areaChanged(); 525 break; 526 case REG_TDBAH: 527 regs.tdba.tdbah(val); 528 txDescCache.areaChanged(); 529 break; 530 case REG_TDLEN: 531 regs.tdlen = val & ~mask(7); 532 txDescCache.areaChanged(); 533 break; 534 case REG_TDH: 535 regs.tdh = val; 536 txDescCache.areaChanged(); 537 break; 538 case REG_TDT: 539 regs.tdt = val; 540 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n"); 541 if (getState() == SimObject::Running) { 542 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n"); 543 txDescCache.fetchDescriptors(); 544 } else { 545 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n"); 546 } 547 break; 548 case REG_TIDV: 549 regs.tidv = val; 550 break; 551 case REG_TXDCTL: 552 regs.txdctl = val; 553 break; 554 case REG_TADV: 555 regs.tadv = val; 556 break; 557 case REG_RXCSUM: 558 regs.rxcsum = val; 559 break; 560 case REG_MANC: 561 regs.manc = val; 562 break; 563 default: 564 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) && 565 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) && 566 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4))) 567 panic("Write request to unknown register number: %#x\n", daddr); 568 }; 569 570 pkt->makeAtomicResponse(); 571 return pioDelay; 572} 573 574void 575IGbE::postInterrupt(IntTypes t, bool now) 576{ 577 assert(t); 578 579 // Interrupt is already pending 580 if (t & regs.icr() && !now) 581 return; 582 583 regs.icr = regs.icr() | t; 584 585 Tick itr_interval = Clock::Int::ns * 256 * regs.itr.interval(); 586 587 if (regs.itr.interval() == 0 || now || lastInterrupt + itr_interval <= curTick) { 588 if (interEvent.scheduled()) { 589 interEvent.deschedule(); 590 } 591 cpuPostInt(); 592 } else { 593 Tick int_time = lastInterrupt + itr_interval; 594 assert(int_time > 0); 595 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n", 596 int_time); 597 if (!interEvent.scheduled()) { 598 interEvent.schedule(int_time); 599 } 600 } 601} 602 603void 604IGbE::delayIntEvent() 605{ 606 cpuPostInt(); 607} 608 609 610void 611IGbE::cpuPostInt() 612{ 613 614 postedInterrupts++; 615 616 if (!(regs.icr() & regs.imr)) { 617 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n"); 618 return; 619 } 620 621 DPRINTF(Ethernet, "Posting Interrupt\n"); 622 623 624 if (interEvent.scheduled()) { 625 interEvent.deschedule(); 626 } 627 628 if (rdtrEvent.scheduled()) { 629 regs.icr.rxt0(1); 630 rdtrEvent.deschedule(); 631 } 632 if (radvEvent.scheduled()) { 633 regs.icr.rxt0(1); 634 radvEvent.deschedule(); 635 } 636 if (tadvEvent.scheduled()) { 637 regs.icr.txdw(1); 638 tadvEvent.deschedule(); 639 } 640 if (tidvEvent.scheduled()) { 641 regs.icr.txdw(1); 642 tidvEvent.deschedule(); 643 } 644 645 regs.icr.int_assert(1); 646 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n", 647 regs.icr()); 648 649 intrPost(); 650 651} 652 653void 654IGbE::cpuClearInt() 655{ 656 if (regs.icr.int_assert()) { 657 regs.icr.int_assert(0); 658 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n", 659 regs.icr()); 660 intrClear(); 661 } 662} 663 664void 665IGbE::chkInterrupt() 666{ 667 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(), 668 regs.imr); 669 // Check if we need to clear the cpu interrupt 670 if (!(regs.icr() & regs.imr)) { 671 DPRINTF(Ethernet, "Mask cleaned all interrupts\n"); 672 if (interEvent.scheduled()) 673 interEvent.deschedule(); 674 if (regs.icr.int_assert()) 675 cpuClearInt(); 676 } 677 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", regs.itr(), regs.itr.interval()); 678 679 if (regs.icr() & regs.imr) { 680 if (regs.itr.interval() == 0) { 681 cpuPostInt(); 682 } else { 683 DPRINTF(Ethernet, "Possibly scheduling interrupt because of imr write\n"); 684 if (!interEvent.scheduled()) { 685 DPRINTF(Ethernet, "Scheduling for %d\n", curTick + Clock::Int::ns 686 * 256 * regs.itr.interval()); 687 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval()); 688 } 689 } 690 } 691 692 693} 694 695 696IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s) 697 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this) 698 699{ 700} 701 702void 703IGbE::RxDescCache::writePacket(EthPacketPtr packet) 704{ 705 // We shouldn't have to deal with any of these yet 706 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n", 707 packet->length, igbe->regs.rctl.descSize()); 708 assert(packet->length < igbe->regs.rctl.descSize()); 709 710 assert(unusedCache.size()); 711 //if (!unusedCache.size()) 712 // return false; 713 714 pktPtr = packet; 715 pktDone = false; 716 igbe->dmaWrite(igbe->platform->pciToDma(unusedCache.front()->buf), 717 packet->length, &pktEvent, packet->data); 718} 719 720void 721IGbE::RxDescCache::pktComplete() 722{ 723 assert(unusedCache.size()); 724 RxDesc *desc; 725 desc = unusedCache.front(); 726 727 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ; 728 desc->len = htole((uint16_t)(pktPtr->length + crcfixup)); 729 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n", 730 pktPtr->length, crcfixup, 731 htole((uint16_t)(pktPtr->length + crcfixup)), 732 (uint16_t)(pktPtr->length + crcfixup)); 733 734 // no support for anything but starting at 0 735 assert(igbe->regs.rxcsum.pcss() == 0); 736 737 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n"); 738 739 uint8_t status = RXDS_DD | RXDS_EOP; 740 uint8_t err = 0; 741 742 IpPtr ip(pktPtr); 743 744 if (ip) { 745 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id()); 746 747 if (igbe->regs.rxcsum.ipofld()) { 748 DPRINTF(EthernetDesc, "Checking IP checksum\n"); 749 status |= RXDS_IPCS; 750 desc->csum = htole(cksum(ip)); 751 igbe->rxIpChecksums++; 752 if (cksum(ip) != 0) { 753 err |= RXDE_IPE; 754 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 755 } 756 } 757 TcpPtr tcp(ip); 758 if (tcp && igbe->regs.rxcsum.tuofld()) { 759 DPRINTF(EthernetDesc, "Checking TCP checksum\n"); 760 status |= RXDS_TCPCS; 761 desc->csum = htole(cksum(tcp)); 762 igbe->rxTcpChecksums++; 763 if (cksum(tcp) != 0) { 764 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 765 err |= RXDE_TCPE; 766 } 767 } 768 769 UdpPtr udp(ip); 770 if (udp && igbe->regs.rxcsum.tuofld()) { 771 DPRINTF(EthernetDesc, "Checking UDP checksum\n"); 772 status |= RXDS_UDPCS; 773 desc->csum = htole(cksum(udp)); 774 igbe->rxUdpChecksums++; 775 if (cksum(udp) != 0) { 776 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 777 err |= RXDE_TCPE; 778 } 779 } 780 } else { // if ip 781 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 782 } 783 784 785 desc->status = htole(status); 786 desc->errors = htole(err); 787 788 // No vlan support at this point... just set it to 0 789 desc->vlan = 0; 790 791 // Deal with the rx timer interrupts 792 if (igbe->regs.rdtr.delay()) { 793 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", 794 igbe->regs.rdtr.delay() * igbe->intClock()); 795 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() * 796 igbe->intClock(),true); 797 } 798 799 if (igbe->regs.radv.idv()) { 800 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", 801 igbe->regs.radv.idv() * igbe->intClock()); 802 if (!igbe->radvEvent.scheduled()) { 803 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() * 804 igbe->intClock()); 805 } 806 } 807 808 // if neither radv or rdtr, maybe itr is set... 809 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) { 810 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n"); 811 igbe->postInterrupt(IT_RXT); 812 } 813 814 // If the packet is small enough, interrupt appropriately 815 // I wonder if this is delayed or not?! 816 if (pktPtr->length <= igbe->regs.rsrpd.idv()) { 817 DPRINTF(EthernetSM, "RXS: Posting IT_SRPD beacuse small packet received\n"); 818 igbe->postInterrupt(IT_SRPD); 819 } 820 821 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n"); 822 unusedCache.pop_front(); 823 usedCache.push_back(desc); 824 825 826 pktPtr = NULL; 827 enableSm(); 828 pktDone = true; 829 igbe->checkDrain(); 830 831} 832 833void 834IGbE::RxDescCache::enableSm() 835{ 836 if (!igbe->drainEvent) { 837 igbe->rxTick = true; 838 igbe->restartClock(); 839 } 840} 841 842bool 843IGbE::RxDescCache::packetDone() 844{ 845 if (pktDone) { 846 pktDone = false; 847 return true; 848 } 849 return false; 850} 851 852bool 853IGbE::RxDescCache::hasOutstandingEvents() 854{ 855 return pktEvent.scheduled() || wbEvent.scheduled() || 856 fetchEvent.scheduled(); 857} 858 859void 860IGbE::RxDescCache::serialize(std::ostream &os) 861{ 862 DescCache<RxDesc>::serialize(os); 863 SERIALIZE_SCALAR(pktDone); 864} 865 866void 867IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string §ion) 868{ 869 DescCache<RxDesc>::unserialize(cp, section); 870 UNSERIALIZE_SCALAR(pktDone); 871} 872 873 874///////////////////////////////////// IGbE::TxDesc ///////////////////////////////// 875 876IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s) 877 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false), 878 pktEvent(this) 879 880{ 881} 882 883int 884IGbE::TxDescCache::getPacketSize() 885{ 886 assert(unusedCache.size()); 887 888 TxDesc *desc; 889 890 DPRINTF(EthernetDesc, "Starting processing of descriptor\n"); 891 892 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) { 893 DPRINTF(EthernetDesc, "Got context descriptor type... skipping\n"); 894 895 // I think we can just ignore these for now? 896 desc = unusedCache.front(); 897 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n", desc->d1, 898 desc->d2); 899 // is this going to be a tcp or udp packet? 900 isTcp = TxdOp::tcp(desc) ? true : false; 901 902 // make sure it's ipv4 903 //assert(TxdOp::ip(desc)); 904 905 TxdOp::setDd(desc); 906 unusedCache.pop_front(); 907 usedCache.push_back(desc); 908 } 909 910 if (!unusedCache.size()) 911 return -1; 912 913 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n", 914 TxdOp::getLen(unusedCache.front())); 915 916 return TxdOp::getLen(unusedCache.front()); 917} 918 919void 920IGbE::TxDescCache::getPacketData(EthPacketPtr p) 921{ 922 assert(unusedCache.size()); 923 924 TxDesc *desc; 925 desc = unusedCache.front(); 926 927 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); 928 929 pktPtr = p; 930 931 pktWaiting = true; 932 933 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length); 934 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)), 935 TxdOp::getLen(desc), &pktEvent, p->data + p->length); 936 937 938} 939 940void 941IGbE::TxDescCache::pktComplete() 942{ 943 944 TxDesc *desc; 945 assert(unusedCache.size()); 946 assert(pktPtr); 947 948 DPRINTF(EthernetDesc, "DMA of packet complete\n"); 949 950 951 desc = unusedCache.front(); 952 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); 953 954 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 955 956 if (!TxdOp::eop(desc)) { 957 pktPtr->length += TxdOp::getLen(desc); 958 unusedCache.pop_front(); 959 usedCache.push_back(desc); 960 pktDone = true; 961 pktWaiting = false; 962 pktMultiDesc = true; 963 964 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n", 965 pktPtr->length); 966 pktPtr = NULL; 967 968 enableSm(); 969 igbe->checkDrain(); 970 return; 971 } 972 pktMultiDesc = false; 973 974 // Set the length of the data in the EtherPacket 975 pktPtr->length += TxdOp::getLen(desc); 976 977 // no support for vlans 978 assert(!TxdOp::vle(desc)); 979 980 // we alway report status 981 assert(TxdOp::rs(desc)); 982 983 // we only support single packet descriptors at this point 984 assert(TxdOp::eop(desc)); 985 986 // set that this packet is done 987 TxdOp::setDd(desc); 988 989 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 990 991 if (DTRACE(EthernetDesc)) { 992 IpPtr ip(pktPtr); 993 if (ip) 994 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", 995 ip->id()); 996 else 997 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 998 } 999 1000 // Checksums are only ofloaded for new descriptor types 1001 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) { 1002 DPRINTF(EthernetDesc, "Calculating checksums for packet\n"); 1003 IpPtr ip(pktPtr); 1004 assert(ip); 1005 if (TxdOp::ixsm(desc)) { 1006 ip->sum(0); 1007 ip->sum(cksum(ip)); 1008 igbe->txIpChecksums++; 1009 DPRINTF(EthernetDesc, "Calculated IP checksum\n"); 1010 } 1011 if (TxdOp::txsm(desc)) { 1012 TcpPtr tcp(ip); 1013 UdpPtr udp(ip); 1014 if (tcp) { 1015 tcp->sum(0); 1016 tcp->sum(cksum(tcp)); 1017 igbe->txTcpChecksums++; 1018 DPRINTF(EthernetDesc, "Calculated TCP checksum\n"); 1019 } else if (udp) { 1020 assert(udp); 1021 udp->sum(0); 1022 udp->sum(cksum(udp)); 1023 igbe->txUdpChecksums++; 1024 DPRINTF(EthernetDesc, "Calculated UDP checksum\n"); 1025 } else { 1026 panic("Told to checksum, but don't know how\n"); 1027 } 1028 } 1029 } 1030 1031 if (TxdOp::ide(desc)) { 1032 // Deal with the rx timer interrupts 1033 DPRINTF(EthernetDesc, "Descriptor had IDE set\n"); 1034 if (igbe->regs.tidv.idv()) { 1035 DPRINTF(EthernetDesc, "setting tidv\n"); 1036 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() * 1037 igbe->intClock(), true); 1038 } 1039 1040 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) { 1041 DPRINTF(EthernetDesc, "setting tadv\n"); 1042 if (!igbe->tadvEvent.scheduled()) { 1043 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() * 1044 igbe->intClock()); 1045 } 1046 } 1047 } 1048 1049 1050 1051 unusedCache.pop_front(); 1052 usedCache.push_back(desc); 1053 pktDone = true; 1054 pktWaiting = false; 1055 pktPtr = NULL; 1056 1057 DPRINTF(EthernetDesc, "Descriptor Done\n"); 1058 1059 if (igbe->regs.txdctl.wthresh() == 0) { 1060 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n"); 1061 writeback(0); 1062 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) { 1063 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1064 writeback((igbe->cacheBlockSize()-1)>>4); 1065 } 1066 enableSm(); 1067 igbe->checkDrain(); 1068} 1069 1070void 1071IGbE::TxDescCache::serialize(std::ostream &os) 1072{ 1073 DescCache<TxDesc>::serialize(os); 1074 SERIALIZE_SCALAR(pktDone); 1075 SERIALIZE_SCALAR(isTcp); 1076 SERIALIZE_SCALAR(pktWaiting); 1077 SERIALIZE_SCALAR(pktMultiDesc); 1078} 1079 1080void 1081IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string §ion) 1082{ 1083 DescCache<TxDesc>::unserialize(cp, section); 1084 UNSERIALIZE_SCALAR(pktDone); 1085 UNSERIALIZE_SCALAR(isTcp); 1086 UNSERIALIZE_SCALAR(pktWaiting); 1087 UNSERIALIZE_SCALAR(pktMultiDesc); 1088} 1089 1090bool 1091IGbE::TxDescCache::packetAvailable() 1092{ 1093 if (pktDone) { 1094 pktDone = false; 1095 return true; 1096 } 1097 return false; 1098} 1099 1100void 1101IGbE::TxDescCache::enableSm() 1102{ 1103 if (!igbe->drainEvent) { 1104 igbe->txTick = true; 1105 igbe->restartClock(); 1106 } 1107} 1108 1109bool 1110IGbE::TxDescCache::hasOutstandingEvents() 1111{ 1112 return pktEvent.scheduled() || wbEvent.scheduled() || 1113 fetchEvent.scheduled(); 1114} 1115 1116 1117///////////////////////////////////// IGbE ///////////////////////////////// 1118 1119void 1120IGbE::restartClock() 1121{ 1122 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && getState() == 1123 SimObject::Running) 1124 tickEvent.schedule((curTick/ticks(1)) * ticks(1) + ticks(1)); 1125} 1126 1127unsigned int 1128IGbE::drain(Event *de) 1129{ 1130 unsigned int count; 1131 count = pioPort->drain(de) + dmaPort->drain(de); 1132 if (rxDescCache.hasOutstandingEvents() || 1133 txDescCache.hasOutstandingEvents()) { 1134 count++; 1135 drainEvent = de; 1136 } 1137 1138 txFifoTick = false; 1139 txTick = false; 1140 rxTick = false; 1141 1142 if (tickEvent.scheduled()) 1143 tickEvent.deschedule(); 1144 1145 if (count) 1146 changeState(Draining); 1147 else 1148 changeState(Drained); 1149 1150 return count; 1151} 1152 1153void 1154IGbE::resume() 1155{ 1156 SimObject::resume(); 1157 1158 txFifoTick = true; 1159 txTick = true; 1160 rxTick = true; 1161 1162 restartClock(); 1163} 1164 1165void 1166IGbE::checkDrain() 1167{ 1168 if (!drainEvent) 1169 return; 1170 1171 txFifoTick = false; 1172 txTick = false; 1173 rxTick = false; 1174 if (!rxDescCache.hasOutstandingEvents() && 1175 !txDescCache.hasOutstandingEvents()) { 1176 drainEvent->process(); 1177 drainEvent = NULL; 1178 } 1179} 1180 1181void 1182IGbE::txStateMachine() 1183{ 1184 if (!regs.tctl.en()) { 1185 txTick = false; 1186 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n"); 1187 return; 1188 } 1189 1190 // If we have a packet available and it's length is not 0 (meaning it's not 1191 // a multidescriptor packet) put it in the fifo, otherwise an the next 1192 // iteration we'll get the rest of the data 1193 if (txPacket && txDescCache.packetAvailable() 1194 && !txDescCache.packetMultiDesc() && txPacket->length) { 1195 bool success; 1196 1197 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n"); 1198 success = txFifo.push(txPacket); 1199 txFifoTick = true && !drainEvent; 1200 assert(success); 1201 txPacket = NULL; 1202 txDescCache.writeback((cacheBlockSize()-1)>>4); 1203 return; 1204 } 1205 1206 // Only support descriptor granularity 1207 assert(regs.txdctl.gran()); 1208 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) { 1209 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n"); 1210 postInterrupt(IT_TXDLOW); 1211 } 1212 1213 if (!txPacket) { 1214 txPacket = new EthPacketData(16384); 1215 } 1216 1217 if (!txDescCache.packetWaiting()) { 1218 if (txDescCache.descLeft() == 0) { 1219 postInterrupt(IT_TXQE); 1220 txDescCache.writeback(0); 1221 txDescCache.fetchDescriptors(); 1222 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing " 1223 "writeback stopping ticking and posting TXQE\n"); 1224 txTick = false; 1225 return; 1226 } 1227 1228 1229 if (!(txDescCache.descUnused())) { 1230 txDescCache.fetchDescriptors(); 1231 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n"); 1232 txTick = false; 1233 return; 1234 } 1235 1236 1237 int size; 1238 size = txDescCache.getPacketSize(); 1239 if (size > 0 && txFifo.avail() > size) { 1240 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining " 1241 "DMA of next packet\n", size); 1242 txFifo.reserve(size); 1243 txDescCache.getPacketData(txPacket); 1244 } else if (size <= 0) { 1245 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size); 1246 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n"); 1247 txDescCache.writeback(0); 1248 } else { 1249 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space " 1250 "available in FIFO\n"); 1251 txTick = false; 1252 } 1253 1254 1255 return; 1256 } 1257 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n"); 1258 txTick = false; 1259} 1260 1261bool 1262IGbE::ethRxPkt(EthPacketPtr pkt) 1263{ 1264 rxBytes += pkt->length; 1265 rxPackets++; 1266 1267 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n"); 1268 1269 if (!regs.rctl.en()) { 1270 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n"); 1271 return true; 1272 } 1273 1274 // restart the state machines if they are stopped 1275 rxTick = true && !drainEvent; 1276 if ((rxTick || txTick) && !tickEvent.scheduled()) { 1277 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n"); 1278 restartClock(); 1279 } 1280 1281 if (!rxFifo.push(pkt)) { 1282 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n"); 1283 postInterrupt(IT_RXO, true); 1284 return false; 1285 } 1286 1287 return true; 1288} 1289 1290 1291void 1292IGbE::rxStateMachine() 1293{ 1294 if (!regs.rctl.en()) { 1295 rxTick = false; 1296 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n"); 1297 return; 1298 } 1299 1300 // If the packet is done check for interrupts/descriptors/etc 1301 if (rxDescCache.packetDone()) { 1302 rxDmaPacket = false; 1303 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n"); 1304 int descLeft = rxDescCache.descLeft(); 1305 switch (regs.rctl.rdmts()) { 1306 case 2: if (descLeft > .125 * regs.rdlen()) break; 1307 case 1: if (descLeft > .250 * regs.rdlen()) break; 1308 case 0: if (descLeft > .500 * regs.rdlen()) break; 1309 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n"); 1310 postInterrupt(IT_RXDMT); 1311 break; 1312 } 1313 1314 if (descLeft == 0) { 1315 rxDescCache.writeback(0); 1316 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing" 1317 " writeback and stopping ticking\n"); 1318 rxTick = false; 1319 } 1320 1321 // only support descriptor granulaties 1322 assert(regs.rxdctl.gran()); 1323 1324 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) { 1325 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n"); 1326 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4)) 1327 rxDescCache.writeback(regs.rxdctl.wthresh()-1); 1328 else 1329 rxDescCache.writeback((cacheBlockSize()-1)>>4); 1330 } 1331 1332 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) && 1333 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) { 1334 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n"); 1335 rxDescCache.fetchDescriptors(); 1336 } 1337 1338 if (rxDescCache.descUnused() == 0) { 1339 rxDescCache.fetchDescriptors(); 1340 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, " 1341 "fetching descriptors and stopping ticking\n"); 1342 rxTick = false; 1343 } 1344 return; 1345 } 1346 1347 if (rxDmaPacket) { 1348 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 1349 rxTick = false; 1350 return; 1351 } 1352 1353 if (!rxDescCache.descUnused()) { 1354 rxDescCache.fetchDescriptors(); 1355 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n"); 1356 rxTick = false; 1357 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n"); 1358 return; 1359 } 1360 1361 if (rxFifo.empty()) { 1362 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n"); 1363 rxTick = false; 1364 return; 1365 } 1366 1367 EthPacketPtr pkt; 1368 pkt = rxFifo.front(); 1369 1370 1371 rxDescCache.writePacket(pkt); 1372 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n"); 1373 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n"); 1374 rxFifo.pop(); 1375 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 1376 rxTick = false; 1377 rxDmaPacket = true; 1378} 1379 1380void 1381IGbE::txWire() 1382{ 1383 if (txFifo.empty()) { 1384 txFifoTick = false; 1385 return; 1386 } 1387 1388 1389 if (etherInt->sendPacket(txFifo.front())) { 1390 if (DTRACE(EthernetSM)) { 1391 IpPtr ip(txFifo.front()); 1392 if (ip) 1393 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n", 1394 ip->id()); 1395 else 1396 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n"); 1397 } 1398 DPRINTF(EthernetSM, "TxFIFO: Successful transmit, bytes available in fifo: %d\n", 1399 txFifo.avail()); 1400 1401 txBytes += txFifo.front()->length; 1402 txPackets++; 1403 1404 txFifo.pop(); 1405 } else { 1406 // We'll get woken up when the packet ethTxDone() gets called 1407 txFifoTick = false; 1408 } 1409} 1410 1411void 1412IGbE::tick() 1413{ 1414 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n"); 1415 1416 if (rxTick) 1417 rxStateMachine(); 1418 1419 if (txTick) 1420 txStateMachine(); 1421 1422 if (txFifoTick) 1423 txWire(); 1424 1425 1426 if (rxTick || txTick || txFifoTick) 1427 tickEvent.schedule(curTick + ticks(1)); 1428} 1429 1430void 1431IGbE::ethTxDone() 1432{ 1433 // restart the tx state machines if they are stopped 1434 // fifo to send another packet 1435 // tx sm to put more data into the fifo 1436 txFifoTick = true && !drainEvent; 1437 if (txDescCache.descLeft() != 0 && !drainEvent) 1438 txTick = true; 1439 1440 restartClock(); 1441 txWire(); 1442 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n"); 1443} 1444 1445void 1446IGbE::serialize(std::ostream &os) 1447{ 1448 PciDev::serialize(os); 1449 1450 regs.serialize(os); 1451 SERIALIZE_SCALAR(eeOpBits); 1452 SERIALIZE_SCALAR(eeAddrBits); 1453 SERIALIZE_SCALAR(eeDataBits); 1454 SERIALIZE_SCALAR(eeOpcode); 1455 SERIALIZE_SCALAR(eeAddr); 1456 SERIALIZE_SCALAR(lastInterrupt); 1457 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 1458 1459 rxFifo.serialize("rxfifo", os); 1460 txFifo.serialize("txfifo", os); 1461 1462 bool txPktExists = txPacket; 1463 SERIALIZE_SCALAR(txPktExists); 1464 if (txPktExists) 1465 txPacket->serialize("txpacket", os); 1466 1467 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0, 1468 inter_time = 0; 1469 1470 if (rdtrEvent.scheduled()) 1471 rdtr_time = rdtrEvent.when(); 1472 SERIALIZE_SCALAR(rdtr_time); 1473 1474 if (radvEvent.scheduled()) 1475 radv_time = radvEvent.when(); 1476 SERIALIZE_SCALAR(radv_time); 1477 1478 if (tidvEvent.scheduled()) 1479 tidv_time = tidvEvent.when(); 1480 SERIALIZE_SCALAR(tidv_time); 1481 1482 if (tadvEvent.scheduled()) 1483 tadv_time = tadvEvent.when(); 1484 SERIALIZE_SCALAR(tadv_time); 1485 1486 if (interEvent.scheduled()) 1487 inter_time = interEvent.when(); 1488 SERIALIZE_SCALAR(inter_time); 1489 1490 nameOut(os, csprintf("%s.TxDescCache", name())); 1491 txDescCache.serialize(os); 1492 1493 nameOut(os, csprintf("%s.RxDescCache", name())); 1494 rxDescCache.serialize(os); 1495} 1496 1497void 1498IGbE::unserialize(Checkpoint *cp, const std::string §ion) 1499{ 1500 PciDev::unserialize(cp, section); 1501 1502 regs.unserialize(cp, section); 1503 UNSERIALIZE_SCALAR(eeOpBits); 1504 UNSERIALIZE_SCALAR(eeAddrBits); 1505 UNSERIALIZE_SCALAR(eeDataBits); 1506 UNSERIALIZE_SCALAR(eeOpcode); 1507 UNSERIALIZE_SCALAR(eeAddr); 1508 UNSERIALIZE_SCALAR(lastInterrupt); 1509 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 1510 1511 rxFifo.unserialize("rxfifo", cp, section); 1512 txFifo.unserialize("txfifo", cp, section); 1513 1514 bool txPktExists; 1515 UNSERIALIZE_SCALAR(txPktExists); 1516 if (txPktExists) { 1517 txPacket = new EthPacketData(16384); 1518 txPacket->unserialize("txpacket", cp, section); 1519 } 1520 1521 rxTick = true; 1522 txTick = true; 1523 txFifoTick = true; 1524 1525 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time; 1526 UNSERIALIZE_SCALAR(rdtr_time); 1527 UNSERIALIZE_SCALAR(radv_time); 1528 UNSERIALIZE_SCALAR(tidv_time); 1529 UNSERIALIZE_SCALAR(tadv_time); 1530 UNSERIALIZE_SCALAR(inter_time); 1531 1532 if (rdtr_time) 1533 rdtrEvent.schedule(rdtr_time); 1534 1535 if (radv_time) 1536 radvEvent.schedule(radv_time); 1537 1538 if (tidv_time) 1539 tidvEvent.schedule(tidv_time); 1540 1541 if (tadv_time) 1542 tadvEvent.schedule(tadv_time); 1543 1544 if (inter_time) 1545 interEvent.schedule(inter_time); 1546 1547 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section)); 1548 1549 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section)); 1550} 1551 1552IGbE * 1553IGbEParams::create() 1554{ 1555 return new IGbE(this); 1556} 1557