i8254xGBe.cc revision 4283
1/* 2 * Copyright (c) 2006 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Ali Saidi 29 */ 30 31/* @file 32 * Device model for Intel's 8254x line of gigabit ethernet controllers. 33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the 34 * fewest workarounds in the driver. It will probably work with most of the 35 * other MACs with slight modifications. 36 */ 37 38 39/* 40 * @todo really there are multiple dma engines.. we should implement them. 41 */ 42 43#include "base/inet.hh" 44#include "base/trace.hh" 45#include "dev/i8254xGBe.hh" 46#include "mem/packet.hh" 47#include "mem/packet_access.hh" 48#include "sim/builder.hh" 49#include "sim/stats.hh" 50#include "sim/system.hh" 51 52#include <algorithm> 53 54using namespace iGbReg; 55using namespace Net; 56 57IGbE::IGbE(Params *p) 58 : PciDev(p), etherInt(NULL), useFlowControl(p->use_flow_control), 59 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false), 60 txTick(false), rdtrEvent(this), radvEvent(this), tadvEvent(this), 61 tidvEvent(this), tickEvent(this), interEvent(this), 62 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size), 63 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), clock(p->clock) 64{ 65 // Initialized internal registers per Intel documentation 66 // All registers intialized to 0 by per register constructor 67 regs.ctrl.fd(1); 68 regs.ctrl.lrst(1); 69 regs.ctrl.speed(2); 70 regs.ctrl.frcspd(1); 71 regs.sts.speed(3); // Say we're 1000Mbps 72 regs.sts.fd(1); // full duplex 73 regs.sts.lu(1); // link up 74 regs.eecd.fwe(1); 75 regs.eecd.ee_type(1); 76 regs.imr = 0; 77 regs.iam = 0; 78 regs.rxdctl.gran(1); 79 regs.rxdctl.wthresh(1); 80 regs.fcrth(1); 81 82 regs.pba.rxa(0x30); 83 regs.pba.txa(0x10); 84 85 eeOpBits = 0; 86 eeAddrBits = 0; 87 eeDataBits = 0; 88 eeOpcode = 0; 89 90 // clear all 64 16 bit words of the eeprom 91 memset(&flash, 0, EEPROM_SIZE*2); 92 93 // Set the MAC address 94 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN); 95 for (int x = 0; x < ETH_ADDR_LEN/2; x++) 96 flash[x] = htobe(flash[x]); 97 98 uint16_t csum = 0; 99 for (int x = 0; x < EEPROM_SIZE; x++) 100 csum += htobe(flash[x]); 101 102 103 // Magic happy checksum value 104 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum)); 105 106 rxFifo.clear(); 107 txFifo.clear(); 108} 109 110 111Tick 112IGbE::writeConfig(PacketPtr pkt) 113{ 114 int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 115 if (offset < PCI_DEVICE_SPECIFIC) 116 PciDev::writeConfig(pkt); 117 else 118 panic("Device specific PCI config space not implemented.\n"); 119 120 /// 121 /// Some work may need to be done here based for the pci COMMAND bits. 122 /// 123 124 return pioDelay; 125} 126 127Tick 128IGbE::read(PacketPtr pkt) 129{ 130 int bar; 131 Addr daddr; 132 133 if (!getBAR(pkt->getAddr(), bar, daddr)) 134 panic("Invalid PCI memory access to unmapped memory.\n"); 135 136 // Only Memory register BAR is allowed 137 assert(bar == 0); 138 139 // Only 32bit accesses allowed 140 assert(pkt->getSize() == 4); 141 142 DPRINTF(Ethernet, "Read device register %#X\n", daddr); 143 144 pkt->allocate(); 145 146 /// 147 /// Handle read of register here 148 /// 149 150 151 switch (daddr) { 152 case REG_CTRL: 153 pkt->set<uint32_t>(regs.ctrl()); 154 break; 155 case REG_STATUS: 156 pkt->set<uint32_t>(regs.sts()); 157 break; 158 case REG_EECD: 159 pkt->set<uint32_t>(regs.eecd()); 160 break; 161 case REG_EERD: 162 pkt->set<uint32_t>(regs.eerd()); 163 break; 164 case REG_CTRL_EXT: 165 pkt->set<uint32_t>(regs.ctrl_ext()); 166 break; 167 case REG_MDIC: 168 pkt->set<uint32_t>(regs.mdic()); 169 break; 170 case REG_ICR: 171 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(), 172 regs.imr, regs.iam, regs.ctrl_ext.iame()); 173 pkt->set<uint32_t>(regs.icr()); 174 if (regs.icr.int_assert() || regs.imr == 0) { 175 regs.icr = regs.icr() & ~mask(30); 176 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr()); 177 } 178 if (regs.ctrl_ext.iame() && regs.icr.int_assert()) 179 regs.imr &= ~regs.iam; 180 chkInterrupt(); 181 break; 182 case REG_ITR: 183 pkt->set<uint32_t>(regs.itr()); 184 break; 185 case REG_RCTL: 186 pkt->set<uint32_t>(regs.rctl()); 187 break; 188 case REG_FCTTV: 189 pkt->set<uint32_t>(regs.fcttv()); 190 break; 191 case REG_TCTL: 192 pkt->set<uint32_t>(regs.tctl()); 193 break; 194 case REG_PBA: 195 pkt->set<uint32_t>(regs.pba()); 196 break; 197 case REG_WUC: 198 case REG_LEDCTL: 199 pkt->set<uint32_t>(0); // We don't care, so just return 0 200 break; 201 case REG_FCRTL: 202 pkt->set<uint32_t>(regs.fcrtl()); 203 break; 204 case REG_FCRTH: 205 pkt->set<uint32_t>(regs.fcrth()); 206 break; 207 case REG_RDBAL: 208 pkt->set<uint32_t>(regs.rdba.rdbal()); 209 break; 210 case REG_RDBAH: 211 pkt->set<uint32_t>(regs.rdba.rdbah()); 212 break; 213 case REG_RDLEN: 214 pkt->set<uint32_t>(regs.rdlen()); 215 break; 216 case REG_RDH: 217 pkt->set<uint32_t>(regs.rdh()); 218 break; 219 case REG_RDT: 220 pkt->set<uint32_t>(regs.rdt()); 221 break; 222 case REG_RDTR: 223 pkt->set<uint32_t>(regs.rdtr()); 224 if (regs.rdtr.fpd()) { 225 rxDescCache.writeback(0); 226 postInterrupt(IT_RXT); 227 regs.rdtr.fpd(0); 228 } 229 break; 230 case REG_RADV: 231 pkt->set<uint32_t>(regs.radv()); 232 break; 233 case REG_TDBAL: 234 pkt->set<uint32_t>(regs.tdba.tdbal()); 235 break; 236 case REG_TDBAH: 237 pkt->set<uint32_t>(regs.tdba.tdbah()); 238 break; 239 case REG_TDLEN: 240 pkt->set<uint32_t>(regs.tdlen()); 241 break; 242 case REG_TDH: 243 pkt->set<uint32_t>(regs.tdh()); 244 break; 245 case REG_TDT: 246 pkt->set<uint32_t>(regs.tdt()); 247 break; 248 case REG_TIDV: 249 pkt->set<uint32_t>(regs.tidv()); 250 break; 251 case REG_TXDCTL: 252 pkt->set<uint32_t>(regs.txdctl()); 253 break; 254 case REG_TADV: 255 pkt->set<uint32_t>(regs.tadv()); 256 break; 257 case REG_RXCSUM: 258 pkt->set<uint32_t>(regs.rxcsum()); 259 break; 260 case REG_MANC: 261 pkt->set<uint32_t>(regs.manc()); 262 break; 263 default: 264 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) && 265 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) && 266 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4)) && 267 !(daddr >= REG_CRCERRS && daddr < (REG_CRCERRS + STATS_REGS_SIZE))) 268 panic("Read request to unknown register number: %#x\n", daddr); 269 else 270 pkt->set<uint32_t>(0); 271 }; 272 273 pkt->result = Packet::Success; 274 return pioDelay; 275} 276 277Tick 278IGbE::write(PacketPtr pkt) 279{ 280 int bar; 281 Addr daddr; 282 283 284 if (!getBAR(pkt->getAddr(), bar, daddr)) 285 panic("Invalid PCI memory access to unmapped memory.\n"); 286 287 // Only Memory register BAR is allowed 288 assert(bar == 0); 289 290 // Only 32bit accesses allowed 291 assert(pkt->getSize() == sizeof(uint32_t)); 292 293 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", daddr, pkt->get<uint32_t>()); 294 295 /// 296 /// Handle write of register here 297 /// 298 uint32_t val = pkt->get<uint32_t>(); 299 300 Regs::RCTL oldrctl; 301 Regs::TCTL oldtctl; 302 303 switch (daddr) { 304 case REG_CTRL: 305 regs.ctrl = val; 306 if (regs.ctrl.tfce()) 307 warn("TX Flow control enabled, should implement\n"); 308 if (regs.ctrl.rfce()) 309 warn("RX Flow control enabled, should implement\n"); 310 break; 311 case REG_CTRL_EXT: 312 regs.ctrl_ext = val; 313 break; 314 case REG_STATUS: 315 regs.sts = val; 316 break; 317 case REG_EECD: 318 int oldClk; 319 oldClk = regs.eecd.sk(); 320 regs.eecd = val; 321 // See if this is a eeprom access and emulate accordingly 322 if (!oldClk && regs.eecd.sk()) { 323 if (eeOpBits < 8) { 324 eeOpcode = eeOpcode << 1 | regs.eecd.din(); 325 eeOpBits++; 326 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 327 eeAddr = eeAddr << 1 | regs.eecd.din(); 328 eeAddrBits++; 329 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 330 assert(eeAddr>>1 < EEPROM_SIZE); 331 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n", 332 flash[eeAddr>>1] >> eeDataBits & 0x1, flash[eeAddr>>1]); 333 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1); 334 eeDataBits++; 335 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) { 336 regs.eecd.dout(0); 337 eeDataBits++; 338 } else 339 panic("What's going on with eeprom interface? opcode:" 340 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode, 341 (uint32_t)eeOpBits, (uint32_t)eeAddr, 342 (uint32_t)eeAddrBits, (uint32_t)eeDataBits); 343 344 // Reset everything for the next command 345 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) || 346 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) { 347 eeOpBits = 0; 348 eeAddrBits = 0; 349 eeDataBits = 0; 350 eeOpcode = 0; 351 eeAddr = 0; 352 } 353 354 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n", 355 (uint32_t)eeOpcode, (uint32_t) eeOpBits, 356 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits); 357 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI || 358 eeOpcode == EEPROM_RDSR_OPCODE_SPI )) 359 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode, 360 (uint32_t)eeOpBits); 361 362 363 } 364 // If driver requests eeprom access, immediately give it to it 365 regs.eecd.ee_gnt(regs.eecd.ee_req()); 366 break; 367 case REG_EERD: 368 regs.eerd = val; 369 break; 370 case REG_MDIC: 371 regs.mdic = val; 372 if (regs.mdic.i()) 373 panic("No support for interrupt on mdic complete\n"); 374 if (regs.mdic.phyadd() != 1) 375 panic("No support for reading anything but phy\n"); 376 DPRINTF(Ethernet, "%s phy address %x\n", regs.mdic.op() == 1 ? "Writing" 377 : "Reading", regs.mdic.regadd()); 378 switch (regs.mdic.regadd()) { 379 case PHY_PSTATUS: 380 regs.mdic.data(0x796D); // link up 381 break; 382 case PHY_PID: 383 regs.mdic.data(0x02A8); 384 break; 385 case PHY_EPID: 386 regs.mdic.data(0x0380); 387 break; 388 case PHY_GSTATUS: 389 regs.mdic.data(0x7C00); 390 break; 391 case PHY_EPSTATUS: 392 regs.mdic.data(0x3000); 393 break; 394 case PHY_AGC: 395 regs.mdic.data(0x180); // some random length 396 break; 397 default: 398 regs.mdic.data(0); 399 } 400 regs.mdic.r(1); 401 break; 402 case REG_ICR: 403 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", regs.icr(), 404 regs.imr, regs.iam, regs.ctrl_ext.iame()); 405 if (regs.ctrl_ext.iame()) 406 regs.imr &= ~regs.iam; 407 regs.icr = ~bits(val,30,0) & regs.icr(); 408 chkInterrupt(); 409 break; 410 case REG_ITR: 411 regs.itr = val; 412 break; 413 case REG_ICS: 414 postInterrupt((IntTypes)val); 415 break; 416 case REG_IMS: 417 regs.imr |= val; 418 chkInterrupt(); 419 break; 420 case REG_IMC: 421 regs.imr &= ~val; 422 chkInterrupt(); 423 break; 424 case REG_IAM: 425 regs.iam = val; 426 break; 427 case REG_RCTL: 428 oldrctl = regs.rctl; 429 regs.rctl = val; 430 if (regs.rctl.rst()) { 431 rxDescCache.reset(); 432 rxFifo.clear(); 433 regs.rctl.rst(0); 434 } 435 if (regs.rctl.en()) 436 rxTick = true; 437 restartClock(); 438 break; 439 case REG_FCTTV: 440 regs.fcttv = val; 441 break; 442 case REG_TCTL: 443 regs.tctl = val; 444 oldtctl = regs.tctl; 445 regs.tctl = val; 446 if (regs.tctl.en()) 447 txTick = true; 448 restartClock(); 449 if (regs.tctl.en() && !oldtctl.en()) { 450 txDescCache.reset(); 451 } 452 break; 453 case REG_PBA: 454 regs.pba.rxa(val); 455 regs.pba.txa(64 - regs.pba.rxa()); 456 break; 457 case REG_WUC: 458 case REG_LEDCTL: 459 case REG_FCAL: 460 case REG_FCAH: 461 case REG_FCT: 462 case REG_VET: 463 case REG_AIFS: 464 case REG_TIPG: 465 ; // We don't care, so don't store anything 466 break; 467 case REG_FCRTL: 468 regs.fcrtl = val; 469 break; 470 case REG_FCRTH: 471 regs.fcrth = val; 472 break; 473 case REG_RDBAL: 474 regs.rdba.rdbal( val & ~mask(4)); 475 rxDescCache.areaChanged(); 476 break; 477 case REG_RDBAH: 478 regs.rdba.rdbah(val); 479 rxDescCache.areaChanged(); 480 break; 481 case REG_RDLEN: 482 regs.rdlen = val & ~mask(7); 483 rxDescCache.areaChanged(); 484 break; 485 case REG_RDH: 486 regs.rdh = val; 487 rxDescCache.areaChanged(); 488 break; 489 case REG_RDT: 490 regs.rdt = val; 491 rxTick = true; 492 restartClock(); 493 break; 494 case REG_RDTR: 495 regs.rdtr = val; 496 break; 497 case REG_RADV: 498 regs.radv = val; 499 break; 500 case REG_TDBAL: 501 regs.tdba.tdbal( val & ~mask(4)); 502 txDescCache.areaChanged(); 503 break; 504 case REG_TDBAH: 505 regs.tdba.tdbah(val); 506 txDescCache.areaChanged(); 507 break; 508 case REG_TDLEN: 509 regs.tdlen = val & ~mask(7); 510 txDescCache.areaChanged(); 511 break; 512 case REG_TDH: 513 regs.tdh = val; 514 txDescCache.areaChanged(); 515 break; 516 case REG_TDT: 517 regs.tdt = val; 518 txTick = true; 519 restartClock(); 520 break; 521 case REG_TIDV: 522 regs.tidv = val; 523 break; 524 case REG_TXDCTL: 525 regs.txdctl = val; 526 break; 527 case REG_TADV: 528 regs.tadv = val; 529 break; 530 case REG_RXCSUM: 531 regs.rxcsum = val; 532 break; 533 case REG_MANC: 534 regs.manc = val; 535 break; 536 default: 537 if (!(daddr >= REG_VFTA && daddr < (REG_VFTA + VLAN_FILTER_TABLE_SIZE*4)) && 538 !(daddr >= REG_RAL && daddr < (REG_RAL + RCV_ADDRESS_TABLE_SIZE*8)) && 539 !(daddr >= REG_MTA && daddr < (REG_MTA + MULTICAST_TABLE_SIZE*4))) 540 panic("Write request to unknown register number: %#x\n", daddr); 541 }; 542 543 pkt->result = Packet::Success; 544 return pioDelay; 545} 546 547void 548IGbE::postInterrupt(IntTypes t, bool now) 549{ 550 assert(t); 551 552 // Interrupt is already pending 553 if (t & regs.icr()) 554 return; 555 556 if (regs.icr() & regs.imr) 557 { 558 regs.icr = regs.icr() | t; 559 if (!interEvent.scheduled()) 560 interEvent.schedule(curTick + Clock::Int::ns * 256 * 561 regs.itr.interval()); 562 } else { 563 regs.icr = regs.icr() | t; 564 if (regs.itr.interval() == 0 || now) { 565 if (interEvent.scheduled()) 566 interEvent.deschedule(); 567 cpuPostInt(); 568 } else { 569 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for %d ticks\n", 570 Clock::Int::ns * 256 * regs.itr.interval()); 571 assert(!interEvent.scheduled()); 572 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval()); 573 } 574 } 575} 576 577void 578IGbE::cpuPostInt() 579{ 580 if (rdtrEvent.scheduled()) { 581 regs.icr.rxt0(1); 582 rdtrEvent.deschedule(); 583 } 584 if (radvEvent.scheduled()) { 585 regs.icr.rxt0(1); 586 radvEvent.deschedule(); 587 } 588 if (tadvEvent.scheduled()) { 589 regs.icr.txdw(1); 590 tadvEvent.deschedule(); 591 } 592 if (tidvEvent.scheduled()) { 593 regs.icr.txdw(1); 594 tidvEvent.deschedule(); 595 } 596 597 regs.icr.int_assert(1); 598 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n", 599 regs.icr()); 600 intrPost(); 601} 602 603void 604IGbE::cpuClearInt() 605{ 606 if (regs.icr.int_assert()) { 607 regs.icr.int_assert(0); 608 DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n", 609 regs.icr()); 610 intrClear(); 611 } 612} 613 614void 615IGbE::chkInterrupt() 616{ 617 // Check if we need to clear the cpu interrupt 618 if (!(regs.icr() & regs.imr)) { 619 if (interEvent.scheduled()) 620 interEvent.deschedule(); 621 if (regs.icr.int_assert()) 622 cpuClearInt(); 623 } 624 625 if (regs.icr() & regs.imr) { 626 if (regs.itr.interval() == 0) { 627 cpuPostInt(); 628 } else { 629 if (!interEvent.scheduled()) 630 interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval()); 631 } 632 } 633 634 635} 636 637 638IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s) 639 : DescCache<RxDesc>(i, n, s), pktDone(false), pktEvent(this) 640 641{ 642} 643 644bool 645IGbE::RxDescCache::writePacket(EthPacketPtr packet) 646{ 647 // We shouldn't have to deal with any of these yet 648 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n", 649 packet->length, igbe->regs.rctl.descSize()); 650 assert(packet->length < igbe->regs.rctl.descSize()); 651 652 if (!unusedCache.size()) 653 return false; 654 655 pktPtr = packet; 656 657 igbe->dmaWrite(igbe->platform->pciToDma(unusedCache.front()->buf), 658 packet->length, &pktEvent, packet->data); 659 return true; 660} 661 662void 663IGbE::RxDescCache::pktComplete() 664{ 665 assert(unusedCache.size()); 666 RxDesc *desc; 667 desc = unusedCache.front(); 668 669 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ; 670 desc->len = htole((uint16_t)(pktPtr->length + crcfixup)); 671 DPRINTF(EthernetDesc, "pktPtr->length: %d stripcrc offset: %d value written: %d %d\n", 672 pktPtr->length, crcfixup, 673 htole((uint16_t)(pktPtr->length + crcfixup)), 674 (uint16_t)(pktPtr->length + crcfixup)); 675 676 // no support for anything but starting at 0 677 assert(igbe->regs.rxcsum.pcss() == 0); 678 679 DPRINTF(EthernetDesc, "RxDesc: Packet written to memory updating Descriptor\n"); 680 681 uint8_t status = RXDS_DD | RXDS_EOP; 682 uint8_t err = 0; 683 IpPtr ip(pktPtr); 684 if (ip) { 685 if (igbe->regs.rxcsum.ipofld()) { 686 DPRINTF(EthernetDesc, "RxDesc: Checking IP checksum\n"); 687 status |= RXDS_IPCS; 688 desc->csum = htole(cksum(ip)); 689 if (cksum(ip) != 0) { 690 err |= RXDE_IPE; 691 DPRINTF(EthernetDesc, "RxDesc: Checksum is bad!!\n"); 692 } 693 } 694 TcpPtr tcp(ip); 695 if (tcp && igbe->regs.rxcsum.tuofld()) { 696 DPRINTF(EthernetDesc, "RxDesc: Checking TCP checksum\n"); 697 status |= RXDS_TCPCS; 698 desc->csum = htole(cksum(tcp)); 699 if (cksum(tcp) != 0) { 700 DPRINTF(EthernetDesc, "RxDesc: Checksum is bad!!\n"); 701 err |= RXDE_TCPE; 702 } 703 } 704 705 UdpPtr udp(ip); 706 if (udp && igbe->regs.rxcsum.tuofld()) { 707 DPRINTF(EthernetDesc, "RxDesc: Checking UDP checksum\n"); 708 status |= RXDS_UDPCS; 709 desc->csum = htole(cksum(udp)); 710 if (cksum(tcp) != 0) { 711 DPRINTF(EthernetDesc, "RxDesc: Checksum is bad!!\n"); 712 err |= RXDE_TCPE; 713 } 714 } 715 } // if ip 716 717 desc->status = htole(status); 718 desc->errors = htole(err); 719 720 // No vlan support at this point... just set it to 0 721 desc->vlan = 0; 722 723 // Deal with the rx timer interrupts 724 if (igbe->regs.rdtr.delay()) { 725 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", 726 igbe->regs.rdtr.delay() * igbe->intClock()); 727 if (igbe->rdtrEvent.scheduled()) 728 igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() * 729 igbe->intClock()); 730 else 731 igbe->rdtrEvent.schedule(curTick + igbe->regs.rdtr.delay() * 732 igbe->intClock()); 733 } 734 735 if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) { 736 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", 737 igbe->regs.radv.idv() * igbe->intClock()); 738 if (!igbe->radvEvent.scheduled()) 739 igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() * 740 igbe->intClock()); 741 } 742 743 // if neither radv or rdtr, maybe itr is set... 744 if (!igbe->regs.rdtr.delay()) { 745 DPRINTF(EthernetSM, "RXS: Receive interrupt delay disabled, posting IT_RXT\n"); 746 igbe->postInterrupt(IT_RXT); 747 } 748 749 // If the packet is small enough, interrupt appropriately 750 // I wonder if this is delayed or not?! 751 if (pktPtr->length <= igbe->regs.rsrpd.idv()) 752 igbe->postInterrupt(IT_SRPD); 753 754 DPRINTF(EthernetDesc, "RxDesc: Processing of this descriptor complete\n"); 755 unusedCache.pop_front(); 756 usedCache.push_back(desc); 757 pktPtr = NULL; 758 enableSm(); 759 pktDone = true; 760} 761 762void 763IGbE::RxDescCache::enableSm() 764{ 765 igbe->rxTick = true; 766 igbe->restartClock(); 767} 768 769bool 770IGbE::RxDescCache::packetDone() 771{ 772 if (pktDone) { 773 pktDone = false; 774 return true; 775 } 776 return false; 777} 778 779///////////////////////////////////// IGbE::TxDesc ///////////////////////////////// 780 781IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s) 782 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), pktWaiting(false), 783 hLen(0), pktEvent(this) 784 785{ 786} 787 788int 789IGbE::TxDescCache::getPacketSize() 790{ 791 assert(unusedCache.size()); 792 793 TxDesc *desc; 794 795 DPRINTF(EthernetDesc, "TxDesc: Starting processing of descriptor\n"); 796 797 while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) { 798 DPRINTF(EthernetDesc, "TxDesc: Got context descriptor type... skipping\n"); 799 800 // I think we can just ignore these for now? 801 desc = unusedCache.front(); 802 // is this going to be a tcp or udp packet? 803 isTcp = TxdOp::tcp(desc) ? true : false; 804 805 // make sure it's ipv4 806 assert(TxdOp::ip(desc)); 807 808 TxdOp::setDd(desc); 809 unusedCache.pop_front(); 810 usedCache.push_back(desc); 811 } 812 813 if (!unusedCache.size()) 814 return -1; 815 816 DPRINTF(EthernetDesc, "TxDesc: Next TX packet is %d bytes\n", 817 TxdOp::getLen(unusedCache.front())); 818 819 return TxdOp::getLen(unusedCache.front()); 820} 821 822void 823IGbE::TxDescCache::getPacketData(EthPacketPtr p) 824{ 825 assert(unusedCache.size()); 826 827 TxDesc *desc; 828 desc = unusedCache.front(); 829 830 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); 831 832 pktPtr = p; 833 834 pktWaiting = true; 835 836 DPRINTF(EthernetDesc, "TxDesc: Starting DMA of packet\n"); 837 igbe->dmaRead(igbe->platform->pciToDma(TxdOp::getBuf(desc)), 838 TxdOp::getLen(desc), &pktEvent, p->data + hLen); 839 840 841} 842 843void 844IGbE::TxDescCache::pktComplete() 845{ 846 847 TxDesc *desc; 848 assert(unusedCache.size()); 849 assert(pktPtr); 850 851 DPRINTF(EthernetDesc, "TxDesc: DMA of packet complete\n"); 852 853 854 desc = unusedCache.front(); 855 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); 856 857 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 858 859 if (!TxdOp::eop(desc)) { 860 assert(hLen == 0); 861 hLen = TxdOp::getLen(desc); 862 unusedCache.pop_front(); 863 usedCache.push_back(desc); 864 pktDone = true; 865 pktWaiting = false; 866 pktPtr = NULL; 867 868 DPRINTF(EthernetDesc, "TxDesc: Partial Packet Descriptor Done\n"); 869 return; 870 } 871 872 // Set the length of the data in the EtherPacket 873 pktPtr->length = TxdOp::getLen(desc) + hLen; 874 875 // no support for vlans 876 assert(!TxdOp::vle(desc)); 877 878 // we alway report status 879 assert(TxdOp::rs(desc)); 880 881 // we only support single packet descriptors at this point 882 assert(TxdOp::eop(desc)); 883 884 // set that this packet is done 885 TxdOp::setDd(desc); 886 887 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 888 889 // Checksums are only ofloaded for new descriptor types 890 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) { 891 DPRINTF(EthernetDesc, "TxDesc: Calculating checksums for packet\n"); 892 IpPtr ip(pktPtr); 893 if (TxdOp::ixsm(desc)) { 894 ip->sum(0); 895 ip->sum(cksum(ip)); 896 DPRINTF(EthernetDesc, "TxDesc: Calculated IP checksum\n"); 897 } 898 if (TxdOp::txsm(desc)) { 899 if (isTcp) { 900 TcpPtr tcp(ip); 901 tcp->sum(0); 902 tcp->sum(cksum(tcp)); 903 DPRINTF(EthernetDesc, "TxDesc: Calculated TCP checksum\n"); 904 } else { 905 UdpPtr udp(ip); 906 udp->sum(0); 907 udp->sum(cksum(udp)); 908 DPRINTF(EthernetDesc, "TxDesc: Calculated UDP checksum\n"); 909 } 910 } 911 } 912 913 if (TxdOp::ide(desc)) { 914 // Deal with the rx timer interrupts 915 DPRINTF(EthernetDesc, "TxDesc: Descriptor had IDE set\n"); 916 if (igbe->regs.tidv.idv()) { 917 DPRINTF(EthernetDesc, "TxDesc: setting tidv\n"); 918 if (igbe->tidvEvent.scheduled()) 919 igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() * 920 igbe->intClock()); 921 else 922 igbe->tidvEvent.schedule(curTick + igbe->regs.tidv.idv() * 923 igbe->intClock()); 924 } 925 926 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) { 927 DPRINTF(EthernetDesc, "TxDesc: setting tadv\n"); 928 if (!igbe->tadvEvent.scheduled()) 929 igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() * 930 igbe->intClock()); 931 } 932 } 933 934 935 936 unusedCache.pop_front(); 937 usedCache.push_back(desc); 938 pktDone = true; 939 pktWaiting = false; 940 pktPtr = NULL; 941 942 hLen = 0; 943 DPRINTF(EthernetDesc, "TxDesc: Descriptor Done\n"); 944 945 if (igbe->regs.txdctl.wthresh() == 0) { 946 DPRINTF(EthernetDesc, "TxDesc: WTHRESH == 0, writing back descriptor\n"); 947 writeback(0); 948 } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) { 949 DPRINTF(EthernetDesc, "TxDesc: used > WTHRESH, writing back descriptor\n"); 950 writeback((igbe->cacheBlockSize()-1)>>4); 951 } 952 953} 954 955bool 956IGbE::TxDescCache::packetAvailable() 957{ 958 if (pktDone) { 959 pktDone = false; 960 return true; 961 } 962 return false; 963} 964 965void 966IGbE::TxDescCache::enableSm() 967{ 968 igbe->txTick = true; 969 igbe->restartClock(); 970} 971 972 973 974 975///////////////////////////////////// IGbE ///////////////////////////////// 976 977void 978IGbE::restartClock() 979{ 980 if (!tickEvent.scheduled() && (rxTick || txTick)) 981 tickEvent.schedule((curTick/cycles(1)) * cycles(1) + cycles(1)); 982} 983 984 985void 986IGbE::txStateMachine() 987{ 988 if (!regs.tctl.en()) { 989 txTick = false; 990 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n"); 991 return; 992 } 993 994 // If we have a packet available and it's length is not 0 (meaning it's not 995 // a multidescriptor packet) put it in the fifo, otherwise an the next 996 // iteration we'll get the rest of the data 997 if (txPacket && txDescCache.packetAvailable() && txPacket->length) { 998 bool success; 999 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n"); 1000 success = txFifo.push(txPacket); 1001 assert(success); 1002 txPacket = NULL; 1003 return; 1004 } 1005 1006 // Only support descriptor granularity 1007 assert(regs.txdctl.gran()); 1008 if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) { 1009 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n"); 1010 postInterrupt(IT_TXDLOW); 1011 } 1012 1013 if (!txPacket) { 1014 txPacket = new EthPacketData(16384); 1015 } 1016 1017 if (!txDescCache.packetWaiting()) { 1018 if (txDescCache.descLeft() == 0) { 1019 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing " 1020 "writeback stopping ticking and posting TXQE\n"); 1021 txDescCache.writeback(0); 1022 txTick = false; 1023 postInterrupt(IT_TXQE, true); 1024 } 1025 1026 1027 if (!(txDescCache.descUnused())) { 1028 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, fetching and stopping ticking\n"); 1029 txTick = false; 1030 txDescCache.fetchDescriptors(); 1031 return; 1032 } 1033 1034 int size; 1035 size = txDescCache.getPacketSize(); 1036 if (size > 0 && txFifo.avail() > size) { 1037 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining " 1038 "DMA of next packet\n", size); 1039 txFifo.reserve(size); 1040 txDescCache.getPacketData(txPacket); 1041 } else { 1042 DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n"); 1043 txDescCache.writeback(0); 1044 } 1045 1046 return; 1047 } 1048} 1049 1050bool 1051IGbE::ethRxPkt(EthPacketPtr pkt) 1052{ 1053 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n"); 1054 if (!regs.rctl.en()) { 1055 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n"); 1056 return true; 1057 } 1058 1059 // restart the state machines if they are stopped 1060 rxTick = true; 1061 if ((rxTick || txTick) && !tickEvent.scheduled()) { 1062 DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n"); 1063 restartClock(); 1064 } 1065 1066 if (!rxFifo.push(pkt)) { 1067 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n"); 1068 postInterrupt(IT_RXO, true); 1069 return false; 1070 } 1071 return true; 1072} 1073 1074 1075void 1076IGbE::rxStateMachine() 1077{ 1078 if (!regs.rctl.en()) { 1079 rxTick = false; 1080 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n"); 1081 return; 1082 } 1083 1084 // If the packet is done check for interrupts/descriptors/etc 1085 if (rxDescCache.packetDone()) { 1086 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n"); 1087 int descLeft = rxDescCache.descLeft(); 1088 switch (regs.rctl.rdmts()) { 1089 case 2: if (descLeft > .125 * regs.rdlen()) break; 1090 case 1: if (descLeft > .250 * regs.rdlen()) break; 1091 case 0: if (descLeft > .500 * regs.rdlen()) break; 1092 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n"); 1093 postInterrupt(IT_RXDMT); 1094 break; 1095 } 1096 1097 if (descLeft == 0) { 1098 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing writeback\n"); 1099 rxDescCache.writeback(0); 1100 DPRINTF(EthernetSM, "RXS: No descriptors left, stopping ticking\n"); 1101 rxTick = false; 1102 } 1103 1104 // only support descriptor granulaties 1105 assert(regs.rxdctl.gran()); 1106 1107 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) { 1108 DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n"); 1109 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4)) 1110 rxDescCache.writeback(regs.rxdctl.wthresh()-1); 1111 else 1112 rxDescCache.writeback((cacheBlockSize()-1)>>4); 1113 } 1114 1115 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) && 1116 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) { 1117 DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n"); 1118 rxDescCache.fetchDescriptors(); 1119 } 1120 1121 if (rxDescCache.descUnused() == 0) { 1122 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n"); 1123 rxTick = false; 1124 DPRINTF(EthernetSM, "RXS: Fetching descriptors because none available\n"); 1125 rxDescCache.fetchDescriptors(); 1126 } 1127 return; 1128 } 1129 1130 if (!rxDescCache.descUnused()) { 1131 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n"); 1132 rxTick = false; 1133 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n"); 1134 rxDescCache.fetchDescriptors(); 1135 return; 1136 } 1137 1138 if (rxFifo.empty()) { 1139 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n"); 1140 rxTick = false; 1141 return; 1142 } 1143 1144 EthPacketPtr pkt; 1145 pkt = rxFifo.front(); 1146 1147 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n"); 1148 if (!rxDescCache.writePacket(pkt)) { 1149 return; 1150 } 1151 1152 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n"); 1153 rxFifo.pop(); 1154 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 1155 rxTick = false; 1156} 1157 1158void 1159IGbE::txWire() 1160{ 1161 if (txFifo.empty()) { 1162 return; 1163 } 1164 1165 txTick = true; 1166 1167 if (etherInt->sendPacket(txFifo.front())) { 1168 DPRINTF(Ethernet, "TxFIFO: Successful transmit, bytes in fifo: %d\n", 1169 txFifo.avail()); 1170 txFifo.pop(); 1171 } 1172 1173} 1174 1175void 1176IGbE::tick() 1177{ 1178 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n"); 1179 1180 if (rxTick) 1181 rxStateMachine(); 1182 1183 if (txTick) { 1184 txStateMachine(); 1185 txWire(); 1186 } 1187 1188 if (rxTick || txTick) 1189 tickEvent.schedule(curTick + cycles(1)); 1190} 1191 1192void 1193IGbE::ethTxDone() 1194{ 1195 // restart the state machines if they are stopped 1196 txTick = true; 1197 restartClock(); 1198 DPRINTF(Ethernet, "TxFIFO: Transmission complete\n"); 1199} 1200 1201void 1202IGbE::serialize(std::ostream &os) 1203{ 1204 panic("Need to implemenet\n"); 1205} 1206 1207void 1208IGbE::unserialize(Checkpoint *cp, const std::string §ion) 1209{ 1210 panic("Need to implemenet\n"); 1211} 1212 1213 1214BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbEInt) 1215 1216 SimObjectParam<EtherInt *> peer; 1217 SimObjectParam<IGbE *> device; 1218 1219END_DECLARE_SIM_OBJECT_PARAMS(IGbEInt) 1220 1221BEGIN_INIT_SIM_OBJECT_PARAMS(IGbEInt) 1222 1223 INIT_PARAM_DFLT(peer, "peer interface", NULL), 1224 INIT_PARAM(device, "Ethernet device of this interface") 1225 1226END_INIT_SIM_OBJECT_PARAMS(IGbEInt) 1227 1228CREATE_SIM_OBJECT(IGbEInt) 1229{ 1230 IGbEInt *dev_int = new IGbEInt(getInstanceName(), device); 1231 1232 EtherInt *p = (EtherInt *)peer; 1233 if (p) { 1234 dev_int->setPeer(p); 1235 p->setPeer(dev_int); 1236 } 1237 1238 return dev_int; 1239} 1240 1241REGISTER_SIM_OBJECT("IGbEInt", IGbEInt) 1242 1243 1244BEGIN_DECLARE_SIM_OBJECT_PARAMS(IGbE) 1245 1246 SimObjectParam<System *> system; 1247 SimObjectParam<Platform *> platform; 1248 SimObjectParam<PciConfigData *> configdata; 1249 Param<uint32_t> pci_bus; 1250 Param<uint32_t> pci_dev; 1251 Param<uint32_t> pci_func; 1252 Param<Tick> pio_latency; 1253 Param<Tick> config_latency; 1254 Param<std::string> hardware_address; 1255 Param<bool> use_flow_control; 1256 Param<int> rx_fifo_size; 1257 Param<int> tx_fifo_size; 1258 Param<int> rx_desc_cache_size; 1259 Param<int> tx_desc_cache_size; 1260 Param<Tick> clock; 1261 1262 1263END_DECLARE_SIM_OBJECT_PARAMS(IGbE) 1264 1265BEGIN_INIT_SIM_OBJECT_PARAMS(IGbE) 1266 1267 INIT_PARAM(system, "System pointer"), 1268 INIT_PARAM(platform, "Platform pointer"), 1269 INIT_PARAM(configdata, "PCI Config data"), 1270 INIT_PARAM(pci_bus, "PCI bus ID"), 1271 INIT_PARAM(pci_dev, "PCI device number"), 1272 INIT_PARAM(pci_func, "PCI function code"), 1273 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 1274 INIT_PARAM(config_latency, "Number of cycles for a config read or write"), 1275 INIT_PARAM(hardware_address, "Ethernet Hardware Address"), 1276 INIT_PARAM(use_flow_control,"Should the device use xon/off packets"), 1277 INIT_PARAM(rx_fifo_size,"Size of the RX FIFO"), 1278 INIT_PARAM(tx_fifo_size,"Size of the TX FIFO"), 1279 INIT_PARAM(rx_desc_cache_size,"Size of the RX descriptor cache"), 1280 INIT_PARAM(tx_desc_cache_size,"Size of the TX descriptor cache"), 1281 INIT_PARAM(clock,"Clock rate for the device to tick at") 1282 1283END_INIT_SIM_OBJECT_PARAMS(IGbE) 1284 1285 1286CREATE_SIM_OBJECT(IGbE) 1287{ 1288 IGbE::Params *params = new IGbE::Params; 1289 1290 params->name = getInstanceName(); 1291 params->platform = platform; 1292 params->system = system; 1293 params->configData = configdata; 1294 params->busNum = pci_bus; 1295 params->deviceNum = pci_dev; 1296 params->functionNum = pci_func; 1297 params->pio_delay = pio_latency; 1298 params->config_delay = config_latency; 1299 params->hardware_address = hardware_address; 1300 params->use_flow_control = use_flow_control; 1301 params->rx_fifo_size = rx_fifo_size; 1302 params->tx_fifo_size = tx_fifo_size; 1303 params->rx_desc_cache_size = rx_desc_cache_size; 1304 params->tx_desc_cache_size = tx_desc_cache_size; 1305 params->clock = clock; 1306 1307 1308 return new IGbE(params); 1309} 1310 1311REGISTER_SIM_OBJECT("IGbE", IGbE) 1312