i8254xGBe.cc revision 4263
1/* 2 * Copyright (c) 2006 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Ali Saidi 29 */ 30 31/* @file 32 * Device model for Intel's 8254x line of gigabit ethernet controllers. 33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the 34 * fewest workarounds in the driver. It will probably work with most of the 35 * other MACs with slight modifications. 36 */ 37 38 39/* 40 * @todo really there are multiple dma engines.. we should implement them. 41 */ 42 43#include "base/inet.hh" 44#include "base/trace.hh" 45#include "dev/i8254xGBe.hh" 46#include "mem/packet.hh" 47#include "mem/packet_access.hh" 48#include "sim/builder.hh" 49#include "sim/stats.hh" 50#include "sim/system.hh" 51 52#include <algorithm> 53 54using namespace iGbReg; 55using namespace Net; 56 57IGbE::IGbE(Params *p) 58 : PciDev(p), etherInt(NULL), useFlowControl(p->use_flow_control), 59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false), 60 txTick(false), rdtrEvent(this), radvEvent(this), tadvEvent(this), 61 tidvEvent(this), tickEvent(this), interEvent(this), 62 rxDescCache(this, name()+".TxDesc", p->rx_desc_cache_size), 63 txDescCache(this, name()+".RxDesc", p->tx_desc_cache_size), clock(p->clock) 64{ 65 // Initialized internal registers per Intel documentation 66 // All registers intialized to 0 by per register constructor 67 regs.ctrl.fd(1); 68 regs.ctrl.lrst(1); 69 regs.ctrl.speed(2); 70 regs.ctrl.frcspd(1); 71 regs.sts.speed(3); // Say we're 1000Mbps 72 regs.sts.fd(1); // full duplex 73 regs.eecd.fwe(1); 74 regs.eecd.ee_type(1); 75 regs.imr = 0; 76 regs.iam = 0; 77 regs.rxdctl.gran(1); 78 regs.rxdctl.wthresh(1); 79 regs.fcrth(1); 80 81 regs.pba.rxa(0x30); 82 regs.pba.txa(0x10); 83 84 eeOpBits = 0; 85 eeAddrBits = 0; 86 eeDataBits = 0; 87 eeOpcode = 0; 88 89 // clear all 64 16 bit words of the eeprom 90 memset(&flash, 0, EEPROM_SIZE*2); 91 92 //We'll need to instert the MAC address into the flash 93 flash[0] = 0xA4A4; 94 flash[1] = 0xB6B6; 95 flash[2] = 0xC8C8; 96 97 uint16_t csum = 0; 98 for (int x = 0; x < EEPROM_SIZE; x++) 99 csum += flash[x]; 100 101 // Magic happy checksum value 102 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum)); 103 104 rxFifo.clear(); 105 txFifo.clear(); 106} 107 108 109Tick 110IGbE::writeConfig(PacketPtr pkt) 111{ 112 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 113 if (offset < PCI_DEVICE_SPECIFIC) 114 PciDev::writeConfig(pkt); 115 else 116 panic("Device specific PCI config space not implemented.\n"); 117 118 /// 119 /// Some work may need to be done here based for the pci COMMAND bits. 120 /// 121 122 return pioDelay; 123} 124 125Tick 126IGbE::read(PacketPtr pkt) 127{ 128 int bar; 129 Addr daddr; 130 131 if (!getBAR(pkt->getAddr(), bar, daddr)) 132 panic("Invalid PCI memory access to unmapped memory.\n"); 133 134 // Only Memory register BAR is allowed 135 assert(bar == 0); 136 137 // Only 32bit accesses allowed 138 assert(pkt->getSize() == 4); 139 140 //DPRINTF(Ethernet, "Read device register %#X\n", daddr); 141 142 pkt->allocate(); 143 144 /// 145 /// Handle read of register here 146 /// 147 148 149 switch (daddr) { 150 case REG_CTRL: 151 pkt->set<uint32_t>(regs.ctrl()); 152 break; 153 case REG_STATUS: 154 pkt->set<uint32_t>(regs.sts()); 155 break; 156 case REG_EECD: 157 pkt->set<uint32_t>(regs.eecd()); 158 break; 159 case REG_EERD: 160 pkt->set<uint32_t>(regs.eerd()); 161 break; 162 case REG_CTRL_EXT: 163 pkt->set<uint32_t>(regs.ctrl_ext()); 164 break; 165 case REG_MDIC: 166 pkt->set<uint32_t>(regs.mdic()); 167 break; 168 case REG_ICR: 169 pkt->set<uint32_t>(regs.icr()); 170 if (regs.icr.int_assert()) 171 regs.imr &= regs.iam; 172 if (regs.imr == 0 || (regs.icr.int_assert() && regs.ctrl_ext.iame())) { 173 regs.icr(0); 174 cpuClearInt(); 175 } 176 break; 177 case REG_ITR: 178 pkt->set<uint32_t>(regs.itr()); 179 break; 180 case REG_RCTL: 181 pkt->set<uint32_t>(regs.rctl()); 182 break; 183 case REG_FCTTV: 184 pkt->set<uint32_t>(regs.fcttv()); 185 break; 186 case REG_TCTL: 187 pkt->set<uint32_t>(regs.tctl()); 188 break; 189 case REG_PBA: 190 pkt->set<uint32_t>(regs.pba()); 191 break; 192 case REG_WUC: 193 case REG_LEDCTL: 194 pkt->set<uint32_t>(0); // We don't care, so just return 0 195 break; 196 case REG_FCRTL: 197 pkt->set<uint32_t>(regs.fcrtl()); 198 break; 199 case REG_FCRTH: 200 pkt->set<uint32_t>(regs.fcrth()); 201 break; 202 case REG_RDBAL: 203 pkt->set<uint32_t>(regs.rdba.rdbal()); 204 break; 205 case REG_RDBAH: 206 pkt->set<uint32_t>(regs.rdba.rdbah()); 207 break; 208 case REG_RDLEN: 209 pkt->set<uint32_t>(regs.rdlen()); 210 break; 211 case REG_RDH: 212 pkt->set<uint32_t>(regs.rdh()); 213 break; 214 case REG_RDT: 215 pkt->set<uint32_t>(regs.rdt()); 216 break; 217 case REG_RDTR: 218 pkt->set<uint32_t>(regs.rdtr()); 219 if (regs.rdtr.fpd()) { 220 rxDescCache.writeback(0); 221 postInterrupt(IT_RXT); 222 regs.rdtr.fpd(0); 223 } 224 if (regs.rdtr.delay()) { 225 Tick t = regs.rdtr.delay() * Clock::Int::ns * 1024; 226 if (rdtrEvent.scheduled()) 227 rdtrEvent.reschedule(curTick + t); 228 else 229 rdtrEvent.schedule(curTick + t); 230 } 231 break; 232 case REG_RADV: 233 pkt->set<uint32_t>(regs.radv()); 234 break; 235 case REG_TDBAL: 236 pkt->set<uint32_t>(regs.tdba.tdbal()); 237 break; 238 case REG_TDBAH: 239 pkt->set<uint32_t>(regs.tdba.tdbah()); 240 break; 241 case REG_TDLEN: 242 pkt->set<uint32_t>(regs.tdlen()); 243 break; 244 case REG_TDH: 245 pkt->set<uint32_t>(regs.tdh()); 246 break; 247 case REG_TDT: 248 pkt->set<uint32_t>(regs.tdt()); 249 break; 250 case REG_TIDV: 251 pkt->set<uint32_t>(regs.tidv()); 252 break; 253 case REG_TXDCTL: 254 pkt->set<uint32_t>(regs.txdctl()); 255 break; 256 case REG_TADV: 257 pkt->set<uint32_t>(regs.tadv()); 258 break; 259 case REG_RXCSUM: 260 pkt->set<uint32_t>(regs.rxcsum()); 261 break; 262 case REG_MANC: 263 pkt->set<uint32_t>(regs.manc()); 264 break; 265 default: 266 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) && 267 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) && 268 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) && 269 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE))) 270 panic("Read request to unknown register number: %#x\n", daddr); 271 else 272 pkt->set<uint32_t>(0); 273 }; 274 275 pkt->result = Packet::Success; 276 return pioDelay; 277} 278 279Tick 280IGbE::write(PacketPtr pkt) 281{ 282 int bar; 283 Addr daddr; 284 285 286 if (!getBAR(pkt->getAddr(), bar, daddr)) 287 panic("Invalid PCI memory access to unmapped memory.\n"); 288 289 // Only Memory register BAR is allowed 290 assert(bar == 0); 291 292 // Only 32bit accesses allowed 293 assert(pkt->getSize() == sizeof(uint32_t)); 294 295 //DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>()); 296 297 /// 298 /// Handle write of register here 299 /// 300 uint32_t val = pkt->get<uint32_t>(); 301 302 Regs::RCTL oldrctl; 303 Regs::TCTL oldtctl; 304 305 switch (daddr) { 306 case REG_CTRL: 307 regs.ctrl = val; 308 if (regs.ctrl.tfce()) 309 warn("TX Flow control enabled, should implement\n"); 310 if (regs.ctrl.rfce()) 311 warn("RX Flow control enabled, should implement\n"); 312 break; 313 case REG_CTRL_EXT: 314 regs.ctrl_ext = val; 315 break; 316 case REG_STATUS: 317 regs.sts = val; 318 break; 319 case REG_EECD: 320 int oldClk; 321 oldClk = regs.eecd.sk(); 322 regs.eecd = val; 323 // See if this is a eeprom access and emulate accordingly 324 if (!oldClk && regs.eecd.sk()) { 325 if (eeOpBits < 8) { 326 eeOpcode = eeOpcode << 1 | regs.eecd.din(); 327 eeOpBits++; 328 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 329 eeAddr = eeAddr << 1 | regs.eecd.din(); 330 eeAddrBits++; 331 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 332 assert(eeAddr>>1 < EEPROM_SIZE); 333 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n", 334 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]); 335 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1); 336 eeDataBits++; 337 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) { 338 regs.eecd.dout(0); 339 eeDataBits++; 340 } else 341 panic("What's going on with eeprom interface? opcode:" 342 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode, 343 (uint32_t)eeOpBits, (uint32_t)eeAddr, 344 (uint32_t)eeAddrBits, (uint32_t)eeDataBits); 345 346 // Reset everything for the next command 347 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) || 348 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) { 349 eeOpBits = 0; 350 eeAddrBits = 0; 351 eeDataBits = 0; 352 eeOpcode = 0; 353 eeAddr = 0; 354 } 355 356 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n", 357 (uint32_t)eeOpcode, (uint32_t) eeOpBits, 358 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits); 359 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI || 360 eeOpcode == EEPROM_RDSR_OPCODE_SPI )) 361 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode, 362 (uint32_t)eeOpBits); 363 364 365 } 366 // If driver requests eeprom access, immediately give it to it 367 regs.eecd.ee_gnt(regs.eecd.ee_req()); 368 break; 369 case REG_EERD: 370 regs.eerd = val; 371 break; 372 case REG_MDIC: 373 regs.mdic = val; 374 if (regs.mdic.i()) 375 panic("No support for interrupt on mdic complete\n"); 376 if (regs.mdic.phyadd() != 1) 377 panic("No support for reading anything but phy\n"); 378 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing" 379 : "Reading", regs.mdic.regadd()); 380 switch (regs.mdic.regadd()) { 381 case PHY_PSTATUS: 382 regs.mdic.data(0x796D); // link up 383 break; 384 case PHY_PID: 385 regs.mdic.data(0x02A8); 386 break; 387 case PHY_EPID: 388 regs.mdic.data(0x0380); 389 break; 390 case PHY_GSTATUS: 391 regs.mdic.data(0x7C00); 392 break; 393 case PHY_EPSTATUS: 394 regs.mdic.data(0x3000); 395 break; 396 case PHY_AGC: 397 regs.mdic.data(0x180); // some random length 398 break; 399 default: 400 regs.mdic.data(0); 401 warn("Accessing unknown phy register %d\n", regs.mdic.regadd()); 402 } 403 regs.mdic.r(1); 404 break; 405 case REG_ICR: 406 if (regs.icr.int_assert()) 407 regs.imr &= regs.iam; 408 409 regs.icr = ~bits(val,30,0) & regs.icr(); 410 // if no more bits are set clear the int_asserted bit 411 if (!bits(regs.icr(),31,31)) 412 cpuClearInt(); 413 414 break; 415 case REG_ITR: 416 regs.itr = val; 417 break; 418 case REG_ICS: 419 postInterrupt((IntTypes)val); 420 break; 421 case REG_IMS: 422 regs.imr |= val; 423 chkInterrupt(); 424 break; 425 case REG_IMC: 426 regs.imr &= ~val; 427 chkInterrupt(); 428 break; 429 case REG_IAM: 430 regs.iam = val; 431 break; 432 case REG_RCTL: 433 oldrctl = regs.rctl; 434 regs.rctl = val; 435 if (regs.rctl.rst()) { 436 rxDescCache.reset(); 437 rxFifo.clear(); 438 regs.rctl.rst(0); 439 } 440 if (regs.rctl.en()) 441 rxTick = true; 442 if ((rxTick || txTick) && !tickEvent.scheduled()) 443 tickEvent.schedule(curTick + cycles(1)); 444 break; 445 case REG_FCTTV: 446 regs.fcttv = val; 447 break; 448 case REG_TCTL: 449 regs.tctl = val; 450 oldtctl = regs.tctl; 451 regs.tctl = val; 452 if (regs.tctl.en()) 453 txTick = true; 454 if ((rxTick || txTick) && !tickEvent.scheduled()) 455 tickEvent.schedule(curTick + cycles(1)); 456 if (regs.tctl.en() && !oldtctl.en()) { 457 txDescCache.reset(); 458 } 459 break; 460 case REG_PBA: 461 regs.pba.rxa(val); 462 regs.pba.txa(64 - regs.pba.rxa()); 463 break; 464 case REG_WUC: 465 case REG_LEDCTL: 466 case REG_FCAL: 467 case REG_FCAH: 468 case REG_FCT: 469 case REG_VET: 470 case REG_AIFS: 471 case REG_TIPG: 472 ; // We don't care, so don't store anything 473 break; 474 case REG_FCRTL: 475 regs.fcrtl = val; 476 break; 477 case REG_FCRTH: 478 regs.fcrth = val; 479 break; 480 case REG_RDBAL: 481 regs.rdba.rdbal( val & ~mask(4)); 482 rxDescCache.areaChanged(); 483 break; 484 case REG_RDBAH: 485 regs.rdba.rdbah(val); 486 rxDescCache.areaChanged(); 487 break; 488 case REG_RDLEN: 489 regs.rdlen = val & ~mask(7); 490 rxDescCache.areaChanged(); 491 break; 492 case REG_RDH: 493 regs.rdh = val; 494 rxDescCache.areaChanged(); 495 break; 496 case REG_RDT: 497 regs.rdt = val; 498 rxTick = true; 499 if ((rxTick || txTick) && !tickEvent.scheduled()) 500 tickEvent.schedule(curTick + cycles(1)); 501 break; 502 case REG_RDTR: 503 regs.rdtr = val; 504 break; 505 case REG_RADV: 506 regs.radv = val; 507 break; 508 case REG_TDBAL: 509 regs.tdba.tdbal( val & ~mask(4)); 510 txDescCache.areaChanged(); 511 break; 512 case REG_TDBAH: 513 regs.tdba.tdbah(val); 514 txDescCache.areaChanged(); 515 break; 516 case REG_TDLEN: 517 regs.tdlen = val & ~mask(7); 518 txDescCache.areaChanged(); 519 break; 520 case REG_TDH: 521 regs.tdh = val; 522 txDescCache.areaChanged(); 523 break; 524 case REG_TDT: 525 regs.tdt = val; 526 txTick = true; 527 if ((rxTick || txTick) && !tickEvent.scheduled()) 528 tickEvent.schedule(curTick + cycles(1)); 529 break; 530 case REG_TIDV: 531 regs.tidv = val; 532 break; 533 case REG_TXDCTL: 534 regs.txdctl = val; 535 break; 536 case REG_TADV: 537 regs.tadv = val; 538 break; 539 case REG_RXCSUM: 540 regs.rxcsum = val; 541 break; 542 case REG_MANC: 543 regs.manc = val; 544 break; 545 default: 546 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) && 547 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) && 548 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4))) 549 panic("Write request to unknown register number: %#x\n", daddr); 550 }; 551 552 pkt->result = Packet::Success; 553 return pioDelay; 554} 555 556void 557IGbE::postInterrupt(IntTypes t, bool now) 558{ 559 // Interrupt is already pending 560 if (t & regs.icr()) 561 return; 562 563 if (regs.icr() & regs.imr) 564 { 565 // already in an interrupt state, set new int and done 566 regs.icr = regs.icr() | t; 567 } else { 568 regs.icr = regs.icr() | t; 569 if (regs.itr.interval() == 0 || now) { 570 if (now) { 571 if (interEvent.scheduled()) 572 interEvent.deschedule(); 573 } 574 cpuPostInt(); 575 } else { 576 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for %d ticks\n", 577 Clock::Int::ns * 256 * regs.itr.interval()); 578 assert(!interEvent.scheduled()); 579 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval()); 580 } 581 } 582} 583 584void 585IGbE::cpuPostInt() 586{ 587 if (rdtrEvent.scheduled()) { 588 regs.icr.rxt0(1); 589 rdtrEvent.deschedule(); 590 } 591 if (radvEvent.scheduled()) { 592 regs.icr.rxt0(1); 593 radvEvent.deschedule(); 594 } 595 if (tadvEvent.scheduled()) { 596 regs.icr.txdw(1); 597 tadvEvent.deschedule(); 598 } 599 if (tidvEvent.scheduled()) { 600 regs.icr.txdw(1); 601 tidvEvent.deschedule(); 602 } 603 604 regs.icr.int_assert(1); 605 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n", 606 regs.icr()); 607 intrPost(); 608} 609 610void 611IGbE::cpuClearInt() 612{ 613 regs.icr.int_assert(0); 614 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n", 615 regs.icr()); 616 intrClear(); 617} 618 619void 620IGbE::chkInterrupt() 621{ 622 // Check if we need to clear the cpu interrupt 623 if (!(regs.icr() & regs.imr)) 624 cpuClearInt(); 625 626 // Check if we need to set the cpu interupt 627 postInterrupt(IT_NONE); 628} 629 630 631IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s) 632 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this) 633 634{ 635} 636 637bool 638IGbE::RxDescCache::writePacket(EthPacketPtr packet) 639{ 640 // We shouldn't have to deal with any of these yet 641 assert(packet->length < igbe->regs.rctl.descSize()); 642 643 if (!unusedCache.size()) 644 return false; 645 646 pktPtr = packet; 647 648 igbe->dmaWrite(unusedCache.front()->buf, packet->length, &pktEvent, packet->data); 649 return true; 650} 651 652void 653IGbE::RxDescCache::pktComplete() 654{ 655 assert(unusedCache.size()); 656 RxDesc *desc; 657 desc = unusedCache.front(); 658 659 desc->len = pktPtr->length; 660 // no support for anything but starting at 0 661 assert(igbe->regs.rxcsum.pcss() == 0); 662 663 DPRINTF(EthernetDesc, "RxDesc: Packet written to memory updating Descriptor\n"); 664 665 uint8_t status = RXDS_DD | RXDS_EOP; 666 uint8_t err = 0; 667 IpPtr ip(pktPtr); 668 if (ip) { 669 if (igbe->regs.rxcsum.ipofld()) { 670 DPRINTF(EthernetDesc, "RxDesc: Checking IP checksum\n"); 671 status |= RXDS_IPCS; 672 desc->csum = cksum(ip); 673 if (cksum(ip) != 0) { 674 err |= RXDE_IPE; 675 DPRINTF(EthernetDesc, "RxDesc: Checksum is bad!!\n"); 676 } 677 } 678 TcpPtr tcp(ip); 679 if (tcp && igbe->regs.rxcsum.tuofld()) { 680 DPRINTF(EthernetDesc, "RxDesc: Checking TCP checksum\n"); 681 status |= RXDS_TCPCS; 682 desc->csum = cksum(tcp); 683 if (cksum(tcp) != 0) { 684 DPRINTF(EthernetDesc, "RxDesc: Checksum is bad!!\n"); 685 err |= RXDE_TCPE; 686 } 687 } 688 689 UdpPtr udp(ip); 690 if (udp && igbe->regs.rxcsum.tuofld()) { 691 DPRINTF(EthernetDesc, "RxDesc: Checking UDP checksum\n"); 692 status |= RXDS_UDPCS; 693 desc->csum = cksum(udp); 694 if (cksum(tcp) != 0) { 695 DPRINTF(EthernetDesc, "RxDesc: Checksum is bad!!\n"); 696 err |= RXDE_TCPE; 697 } 698 } 699 } // if ip 700 701 desc->status = status; 702 desc->errors = err; 703 704 // No vlan support at this point... just set it to 0 705 desc->vlan = 0; 706 707 // Deal with the rx timer interrupts 708 if (igbe->regs.rdtr.delay()) { 709 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", 710 igbe->regs.rdtr.delay() * igbe->intClock()); 711 if (igbe->rdtrEvent.scheduled()) 712 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() * 713 igbe->intClock()); 714 else 715 igbe->rdtrEvent.schedule(curTick + igbe->regs.rdtr.delay() * 716 igbe->intClock()); 717 } 718 719 if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) { 720 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", 721 igbe->regs.radv.idv() * igbe->intClock()); 722 if (!igbe->radvEvent.scheduled()) 723 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() * 724 igbe->intClock()); 725 } 726 727 // If the packet is small enough, interrupt appropriately 728 if (pktPtr->length <= igbe->regs.rsrpd.idv()) 729 igbe->postInterrupt(IT_SRPD); 730 731 DPRINTF(EthernetDesc, "RxDesc: Processing of this descriptor complete\n"); 732 unusedCache.pop_front(); 733 usedCache.push_back(desc); 734 pktPtr = NULL; 735 enableSm(); 736 pktDone = true; 737} 738 739void 740IGbE::RxDescCache::enableSm() 741{ 742 igbe->rxTick = true; 743 if ((igbe->rxTick || igbe->txTick) && !igbe->tickEvent.scheduled()) 744 igbe->tickEvent.schedule((curTick/igbe->cycles(1)) * igbe->cycles(1) + 745 igbe->cycles(1)); 746} 747 748bool 749IGbE::RxDescCache::packetDone() 750{ 751 if (pktDone) { 752 pktDone = false; 753 return true; 754 } 755 return false; 756} 757 758///////////////////////////////////// IGbE::TxDesc ///////////////////////////////// 759 760IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s) 761 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false), 762 pktEvent(this) 763 764{ 765} 766 767int 768IGbE::TxDescCache::getPacketSize() 769{ 770 assert(unusedCache.size()); 771 772 TxDesc *desc; 773 774 DPRINTF(EthernetDesc, "TxDesc: Starting processing of descriptor\n"); 775 776 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) { 777 DPRINTF(EthernetDesc, "TxDesc: Got context descriptor type... skipping\n"); 778 779 // I think we can just ignore these for now? 780 desc = unusedCache.front(); 781 // is this going to be a tcp or udp packet? 782 isTcp = TxdOp::tcp(desc) ? true : false; 783 784 // make sure it's ipv4 785 assert(TxdOp::ip(desc)); 786 787 TxdOp::setDd(desc); 788 unusedCache.pop_front(); 789 usedCache.push_back(desc); 790 } 791 792 if (!unusedCache.size()) 793 return -1; 794 795 DPRINTF(EthernetDesc, "TxDesc: Next TX packet is %d bytes\n", 796 TxdOp::getLen(unusedCache.front())); 797 798 return TxdOp::getLen(unusedCache.front()); 799} 800 801void 802IGbE::TxDescCache::getPacketData(EthPacketPtr p) 803{ 804 assert(unusedCache.size()); 805 806 TxDesc *desc; 807 desc = unusedCache.front(); 808 809 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); 810 811 pktPtr = p; 812 813 pktWaiting = true; 814 815 DPRINTF(EthernetDesc, "TxDesc: Starting DMA of packet\n"); 816 igbe->dmaRead(TxdOp::getBuf(desc), TxdOp::getLen(desc), &pktEvent, p->data); 817 818 819} 820 821void 822IGbE::TxDescCache::pktComplete() 823{ 824 825 TxDesc *desc; 826 assert(unusedCache.size()); 827 assert(pktPtr); 828 829 DPRINTF(EthernetDesc, "TxDesc: DMA of packet complete\n"); 830 831 desc = unusedCache.front(); 832 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); 833 834 // no support for vlans 835 assert(!TxdOp::vle(desc)); 836 837 // we alway report status 838 assert(TxdOp::rs(desc)); 839 840 // we only support single packet descriptors at this point 841 assert(TxdOp::eop(desc)); 842 843 // set that this packet is done 844 TxdOp::setDd(desc); 845 846 // Checksums are only ofloaded for new descriptor types 847 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) { 848 DPRINTF(EthernetDesc, "TxDesc: Calculating checksums for packet\n"); 849 IpPtr ip(pktPtr); 850 if (TxdOp::ixsm(desc)) { 851 ip->sum(0); 852 ip->sum(cksum(ip)); 853 DPRINTF(EthernetDesc, "TxDesc: Calculated IP checksum\n"); 854 } 855 if (TxdOp::txsm(desc)) { 856 if (isTcp) { 857 TcpPtr tcp(ip); 858 tcp->sum(0); 859 tcp->sum(cksum(tcp)); 860 DPRINTF(EthernetDesc, "TxDesc: Calculated TCP checksum\n"); 861 } else { 862 UdpPtr udp(ip); 863 udp->sum(0); 864 udp->sum(cksum(udp)); 865 DPRINTF(EthernetDesc, "TxDesc: Calculated UDP checksum\n"); 866 } 867 } 868 } 869 870 if (TxdOp::ide(desc)) { 871 // Deal with the rx timer interrupts 872 DPRINTF(EthernetDesc, "TxDesc: Descriptor had IDE set\n"); 873 if (igbe->regs.tidv.idv()) { 874 DPRINTF(EthernetDesc, "TxDesc: setting tidv\n"); 875 if (igbe->tidvEvent.scheduled()) 876 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() * 877 igbe->intClock()); 878 else 879 igbe->tidvEvent.schedule(curTick + igbe->regs.tidv.idv() * 880 igbe->intClock()); 881 } 882 883 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) { 884 DPRINTF(EthernetDesc, "TxDesc: setting tadv\n"); 885 if (!igbe->tadvEvent.scheduled()) 886 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() * 887 igbe->intClock()); 888 } 889 } 890 891 unusedCache.pop_front(); 892 usedCache.push_back(desc); 893 pktDone = true; 894 pktWaiting = false; 895 pktPtr = NULL; 896 897 DPRINTF(EthernetDesc, "TxDesc: Descriptor Done\n"); 898} 899 900bool 901IGbE::TxDescCache::packetAvailable() 902{ 903 if (pktDone) { 904 pktDone = false; 905 return true; 906 } 907 return false; 908} 909 910void 911IGbE::TxDescCache::enableSm() 912{ 913 igbe->txTick = true; 914 if ((igbe->rxTick || igbe->txTick) && !igbe->tickEvent.scheduled()) 915 igbe->tickEvent.schedule((curTick/igbe->cycles(1)) * igbe->cycles(1) + 916 igbe->cycles(1)); 917} 918 919 920 921 922///////////////////////////////////// IGbE ///////////////////////////////// 923 924void 925IGbE::txStateMachine() 926{ 927 if (!regs.tctl.en()) { 928 txTick = false; 929 DPRINTF(EthernetSM, "TXS: RX disabled, stopping ticking\n"); 930 return; 931 } 932 933 if (txPacket && txDescCache.packetAvailable()) { 934 bool success; 935 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n"); 936 success = txFifo.push(txPacket); 937 assert(success); 938 txPacket = NULL; 939 return; 940 } 941 942 // Only support descriptor granularity 943 assert(regs.txdctl.gran()); 944 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) { 945 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n"); 946 postInterrupt(IT_TXDLOW); 947 } 948 949 if (!txPacket) { 950 txPacket = new EthPacketData(16384); 951 } 952 953 if (!txDescCache.packetWaiting()) { 954 if (txDescCache.descLeft() == 0) { 955 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing writeback\n"); 956 txDescCache.writeback(0); 957 DPRINTF(EthernetSM, "TXS: No descriptors left, stopping ticking\n"); 958 txTick = false; 959 } 960 961 if (!(txDescCache.descUnused())) { 962 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, stopping ticking\n"); 963 txTick = false; 964 DPRINTF(EthernetSM, "TXS: No descriptors left, fetching\n"); 965 txDescCache.fetchDescriptors(); 966 return; 967 } 968 969 int size; 970 size = txDescCache.getPacketSize(); 971 if (size > 0 && rxFifo.avail() > size) { 972 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining DMA of next packet\n"); 973 rxFifo.reserve(size); 974 txDescCache.getPacketData(txPacket); 975 } else { 976 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n"); 977 txDescCache.writeback(0); 978 } 979 980 return; 981 } 982} 983 984bool 985IGbE::ethRxPkt(EthPacketPtr pkt) 986{ 987 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n"); 988 if (!regs.rctl.en()) { 989 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n"); 990 return true; 991 } 992 993 // restart the state machines if they are stopped 994 rxTick = true; 995 if ((rxTick || txTick) && !tickEvent.scheduled()) { 996 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n"); 997 tickEvent.schedule(curTick/cycles(1) + cycles(1)); 998 } 999 1000 if (!rxFifo.push(pkt)) { 1001 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n"); 1002 postInterrupt(IT_RXO, true); 1003 return false; 1004 } 1005 return true; 1006} 1007 1008 1009void 1010IGbE::rxStateMachine() 1011{ 1012 if (!regs.rctl.en()) { 1013 rxTick = false; 1014 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n"); 1015 return; 1016 } 1017 1018 // If the packet is done check for interrupts/descriptors/etc 1019 if (rxDescCache.packetDone()) { 1020 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n"); 1021 int descLeft = rxDescCache.descLeft(); 1022 switch (regs.rctl.rdmts()) { 1023 case 2: if (descLeft > .125 * regs.rdlen()) break; 1024 case 1: if (descLeft > .250 * regs.rdlen()) break; 1025 case 0: if (descLeft > .500 * regs.rdlen()) break; 1026 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n"); 1027 postInterrupt(IT_RXDMT); 1028 break; 1029 } 1030 1031 if (descLeft == 0) { 1032 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing writeback\n"); 1033 rxDescCache.writeback(0); 1034 DPRINTF(EthernetSM, "RXS: No descriptors left, stopping ticking\n"); 1035 rxTick = false; 1036 } 1037 1038 // only support descriptor granulaties 1039 assert(regs.rxdctl.gran()); 1040 1041 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) { 1042 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n"); 1043 rxDescCache.writeback(cacheBlockSize()-1); 1044 } 1045 1046 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) && 1047 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) { 1048 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n"); 1049 rxDescCache.fetchDescriptors(); 1050 } 1051 1052 if (rxDescCache.descUnused() == 0) { 1053 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n"); 1054 rxTick = false; 1055 DPRINTF(EthernetSM, "RXS: Fetching descriptors because none available\n"); 1056 rxDescCache.fetchDescriptors(); 1057 } 1058 return; 1059 } 1060 1061 if (!rxDescCache.descUnused()) { 1062 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n"); 1063 rxTick = false; 1064 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n"); 1065 rxDescCache.fetchDescriptors(); 1066 return; 1067 } 1068 1069 if (rxFifo.empty()) { 1070 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n"); 1071 rxTick = false; 1072 return; 1073 } 1074 1075 EthPacketPtr pkt; 1076 pkt = rxFifo.front(); 1077 1078 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n"); 1079 if (!rxDescCache.writePacket(pkt)) { 1080 return; 1081 } 1082 1083 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n"); 1084 rxFifo.pop(); 1085 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 1086 rxTick = false; 1087} 1088 1089void 1090IGbE::txWire() 1091{ 1092 if (txFifo.empty()) { 1093 return; 1094 } 1095 1096 txTick = true; 1097 1098 if (etherInt->sendPacket(txFifo.front())) { 1099 DPRINTF(Ethernet, "TxFIFO: Successful transmit, bytes in fifo: %d\n", 1100 txFifo.avail()); 1101 txFifo.pop(); 1102 } 1103 1104 if (txFifo.empty()) { 1105 postInterrupt(IT_TXQE); 1106 DPRINTF(Ethernet, "TxFIFO: Empty, posting interruppt\n"); 1107 } 1108} 1109 1110void 1111IGbE::tick() 1112{ 1113 DPRINTF(EthernetSM, "IGbE: -------------- Cycle -------------- "); 1114 1115 if (rxTick) 1116 rxStateMachine(); 1117 1118 if (txTick) { 1119 txStateMachine(); 1120 txWire(); 1121 } 1122 1123 if (rxTick || txTick) 1124 tickEvent.schedule(curTick + cycles(1)); 1125} 1126 1127void 1128IGbE::ethTxDone() 1129{ 1130 // restart the state machines if they are stopped 1131 txTick = true; 1132 if ((rxTick || txTick) && !tickEvent.scheduled()) 1133 tickEvent.schedule(curTick/cycles(1) + cycles(1)); 1134 DPRINTF(Ethernet, "TxFIFO: Transmission complete\n"); 1135} 1136 1137void 1138IGbE::serialize(std::ostream &os) 1139{ 1140 panic("Need to implemenet\n"); 1141} 1142 1143void 1144IGbE::unserialize(Checkpoint *cp, const std::string §ion) 1145{ 1146 panic("Need to implemenet\n"); 1147} 1148 1149 1150BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbEInt) 1151 1152 SimObjectParam<EtherInt *> peer; 1153 SimObjectParam<IGbE *> device; 1154 1155END_DECLARE_SIM_OBJECT_PARAMS(IGbEInt) 1156 1157BEGIN_INIT_SIM_OBJECT_PARAMS(IGbEInt) 1158 1159 INIT_PARAM_DFLT(peer, "peer interface", NULL), 1160 INIT_PARAM(device, "Ethernet device of this interface") 1161 1162END_INIT_SIM_OBJECT_PARAMS(IGbEInt) 1163 1164CREATE_SIM_OBJECT(IGbEInt) 1165{ 1166 IGbEInt *dev_int = new IGbEInt(getInstanceName(), device); 1167 1168 EtherInt *p = (EtherInt *)peer; 1169 if (p) { 1170 dev_int->setPeer(p); 1171 p->setPeer(dev_int); 1172 } 1173 1174 return dev_int; 1175} 1176 1177REGISTER_SIM_OBJECT("IGbEInt", IGbEInt) 1178 1179 1180BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbE) 1181 1182 SimObjectParam<System *> system; 1183 SimObjectParam<Platform *> platform; 1184 SimObjectParam<PciConfigData *> configdata; 1185 Param<uint32_t> pci_bus; 1186 Param<uint32_t> pci_dev; 1187 Param<uint32_t> pci_func; 1188 Param<Tick> pio_latency; 1189 Param<Tick> config_latency; 1190 1191END_DECLARE_SIM_OBJECT_PARAMS(IGbE) 1192 1193BEGIN_INIT_SIM_OBJECT_PARAMS(IGbE) 1194 1195 INIT_PARAM(system, "System pointer"), 1196 INIT_PARAM(platform, "Platform pointer"), 1197 INIT_PARAM(configdata, "PCI Config data"), 1198 INIT_PARAM(pci_bus, "PCI bus ID"), 1199 INIT_PARAM(pci_dev, "PCI device number"), 1200 INIT_PARAM(pci_func, "PCI function code"), 1201 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 1202 INIT_PARAM(config_latency, "Number of cycles for a config read or write") 1203 1204END_INIT_SIM_OBJECT_PARAMS(IGbE) 1205 1206 1207CREATE_SIM_OBJECT(IGbE) 1208{ 1209 IGbE::Params *params = new IGbE::Params; 1210 1211 params->name = getInstanceName(); 1212 params->platform = platform; 1213 params->system = system; 1214 params->configData = configdata; 1215 params->busNum = pci_bus; 1216 params->deviceNum = pci_dev; 1217 params->functionNum = pci_func; 1218 params->pio_delay = pio_latency; 1219 params->config_delay = config_latency; 1220 1221 return new IGbE(params); 1222} 1223 1224REGISTER_SIM_OBJECT("IGbE", IGbE) 1225