i8254xGBe.cc revision 11263
11689SN/A/* 29444SAndreas.Sandberg@ARM.com * Copyright (c) 2006 The Regents of The University of Michigan 37854SAli.Saidi@ARM.com * All rights reserved. 47854SAli.Saidi@ARM.com * 57854SAli.Saidi@ARM.com * Redistribution and use in source and binary forms, with or without 67854SAli.Saidi@ARM.com * modification, are permitted provided that the following conditions are 77854SAli.Saidi@ARM.com * met: redistributions of source code must retain the above copyright 87854SAli.Saidi@ARM.com * notice, this list of conditions and the following disclaimer; 97854SAli.Saidi@ARM.com * redistributions in binary form must reproduce the above copyright 107854SAli.Saidi@ARM.com * notice, this list of conditions and the following disclaimer in the 117854SAli.Saidi@ARM.com * documentation and/or other materials provided with the distribution; 127854SAli.Saidi@ARM.com * neither the name of the copyright holders nor the names of its 137854SAli.Saidi@ARM.com * contributors may be used to endorse or promote products derived from 142329SN/A * this software without specific prior written permission. 151689SN/A * 161689SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 171689SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 181689SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 191689SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 201689SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 211689SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 221689SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 231689SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 241689SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 251689SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 261689SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 271689SN/A * 281689SN/A * Authors: Ali Saidi 291689SN/A */ 301689SN/A 311689SN/A/* @file 321689SN/A * Device model for Intel's 8254x line of gigabit ethernet controllers. 331689SN/A * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the 341689SN/A * fewest workarounds in the driver. It will probably work with most of the 351689SN/A * other MACs with slight modifications. 361689SN/A */ 371689SN/A 381689SN/A#include "dev/net/i8254xGBe.hh" 392665Ssaidi@eecs.umich.edu 402665Ssaidi@eecs.umich.edu/* 412935Sksewell@umich.edu * @todo really there are multiple dma engines.. we should implement them. 421689SN/A */ 431689SN/A 441060SN/A#include <algorithm> 451060SN/A#include <memory> 463773Sgblack@eecs.umich.edu 476329Sgblack@eecs.umich.edu#include "base/inet.hh" 486658Snate@binkert.org#include "base/trace.hh" 491717SN/A#include "debug/Drain.hh" 508232Snate@binkert.org#include "debug/EthernetAll.hh" 518232Snate@binkert.org#include "mem/packet.hh" 529527SMatt.Horsnell@arm.com#include "mem/packet_access.hh" 535529Snate@binkert.org#include "params/IGbE.hh" 541060SN/A#include "sim/stats.hh" 556221Snate@binkert.org#include "sim/system.hh" 566221Snate@binkert.org 571061SN/Ausing namespace iGbReg; 585529Snate@binkert.orgusing namespace Net; 594329Sktlim@umich.edu 604329Sktlim@umich.eduIGbE::IGbE(const Params *p) 612292SN/A : EtherDevice(p), etherInt(NULL), cpa(NULL), 622292SN/A rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false), 632292SN/A txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0), 642292SN/A fetchDelay(p->fetch_delay), wbDelay(p->wb_delay), 655529Snate@binkert.org fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay), 662361SN/A rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay), 671060SN/A rdtrEvent(this), radvEvent(this), 682292SN/A tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this), 698907Slukefahr@umich.edu rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size), 702292SN/A txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), 712292SN/A lastInterrupt(0) 722292SN/A{ 732292SN/A etherInt = new IGbEInt(name() + ".int", this); 742292SN/A 752292SN/A // Initialized internal registers per Intel documentation 762292SN/A // All registers intialized to 0 by per register constructor 771060SN/A regs.ctrl.fd(1); 781060SN/A regs.ctrl.lrst(1); 791061SN/A regs.ctrl.speed(2); 801060SN/A regs.ctrl.frcspd(1); 812292SN/A regs.sts.speed(3); // Say we're 1000Mbps 821062SN/A regs.sts.fd(1); // full duplex 831062SN/A regs.sts.lu(1); // link up 848240Snate@binkert.org regs.eecd.fwe(1); 851062SN/A regs.eecd.ee_type(1); 861062SN/A regs.imr = 0; 871062SN/A regs.iam = 0; 888240Snate@binkert.org regs.rxdctl.gran(1); 891062SN/A regs.rxdctl.wthresh(1); 901062SN/A regs.fcrth(1); 911062SN/A regs.tdwba = 0; 928240Snate@binkert.org regs.rlpml = 0; 931062SN/A regs.sw_fw_sync = 0; 941062SN/A 952301SN/A regs.pba.rxa(0x30); 968240Snate@binkert.org regs.pba.txa(0x10); 972301SN/A 982301SN/A eeOpBits = 0; 992292SN/A eeAddrBits = 0; 1008240Snate@binkert.org eeDataBits = 0; 1012292SN/A eeOpcode = 0; 1022292SN/A 1031062SN/A // clear all 64 16 bit words of the eeprom 1048240Snate@binkert.org memset(&flash, 0, EEPROM_SIZE*2); 1051062SN/A 1061062SN/A // Set the MAC address 1071062SN/A memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN); 1088240Snate@binkert.org for (int x = 0; x < ETH_ADDR_LEN/2; x++) 1091062SN/A flash[x] = htobe(flash[x]); 1101062SN/A 1111062SN/A uint16_t csum = 0; 1128240Snate@binkert.org for (int x = 0; x < EEPROM_SIZE; x++) 1131062SN/A csum += htobe(flash[x]); 1141062SN/A 1151062SN/A 1168240Snate@binkert.org // Magic happy checksum value 1172292SN/A flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum)); 1181062SN/A 1191062SN/A // Store the MAC address as queue ID 1208240Snate@binkert.org macAddr = p->hardware_address; 1212292SN/A 1221062SN/A rxFifo.clear(); 1232292SN/A txFifo.clear(); 1248240Snate@binkert.org} 1252292SN/A 1262292SN/AIGbE::~IGbE() 1271062SN/A{ 1288240Snate@binkert.org delete etherInt; 1291062SN/A} 1301062SN/A 1311062SN/Avoid 1328240Snate@binkert.orgIGbE::init() 1331062SN/A{ 1341062SN/A cpa = CPA::cpa(); 1351062SN/A PciDevice::init(); 1368240Snate@binkert.org} 1371062SN/A 1381062SN/AEtherInt* 1391062SN/AIGbE::getEthPort(const std::string &if_name, int idx) 1408240Snate@binkert.org{ 1411062SN/A 1421062SN/A if (if_name == "interface") { 1431062SN/A if (etherInt->getPeer()) 1448240Snate@binkert.org panic("Port already connected to\n"); 1451062SN/A return etherInt; 1461062SN/A } 1472301SN/A return NULL; 1488240Snate@binkert.org} 1492301SN/A 1502301SN/ATick 1512301SN/AIGbE::writeConfig(PacketPtr pkt) 1522301SN/A{ 1538240Snate@binkert.org int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 1542301SN/A if (offset < PCI_DEVICE_SPECIFIC) 1552301SN/A PciDevice::writeConfig(pkt); 1562301SN/A else 1572307SN/A panic("Device specific PCI config space not implemented.\n"); 1588240Snate@binkert.org 1592307SN/A // 1602307SN/A // Some work may need to be done here based for the pci COMMAND bits. 1612307SN/A // 1627897Shestness@cs.utexas.edu 1638240Snate@binkert.org return configDelay; 1647897Shestness@cs.utexas.edu} 1657897Shestness@cs.utexas.edu 1667897Shestness@cs.utexas.edu// Handy macro for range-testing register access addresses 1678240Snate@binkert.org#define IN_RANGE(val, base, len) (val >= base && val < (base + len)) 1687897Shestness@cs.utexas.edu 1697897Shestness@cs.utexas.eduTick 1701062SN/AIGbE::read(PacketPtr pkt) 1711062SN/A{ 1721062SN/A int bar; 1731062SN/A Addr daddr; 1742292SN/A 1751060SN/A if (!getBAR(pkt->getAddr(), bar, daddr)) 1761060SN/A panic("Invalid PCI memory access to unmapped memory.\n"); 1771060SN/A 1781060SN/A // Only Memory register BAR is allowed 1791060SN/A assert(bar == 0); 1801060SN/A 1811060SN/A // Only 32bit accesses allowed 1821060SN/A assert(pkt->getSize() == 4); 1831060SN/A 1841060SN/A DPRINTF(Ethernet, "Read device register %#X\n", daddr); 1851060SN/A 1861060SN/A // 1871060SN/A // Handle read of register here 1881061SN/A // 1891060SN/A 1902292SN/A 1911060SN/A switch (daddr) { 1921060SN/A case REG_CTRL: 1931060SN/A pkt->set<uint32_t>(regs.ctrl()); 1941060SN/A break; 1951060SN/A case REG_STATUS: 1961060SN/A pkt->set<uint32_t>(regs.sts()); 1971060SN/A break; 1981061SN/A case REG_EECD: 1991060SN/A pkt->set<uint32_t>(regs.eecd()); 2002292SN/A break; 2011060SN/A case REG_EERD: 2021060SN/A pkt->set<uint32_t>(regs.eerd()); 2031060SN/A break; 2041060SN/A case REG_CTRL_EXT: 2051060SN/A pkt->set<uint32_t>(regs.ctrl_ext()); 2061060SN/A break; 2071060SN/A case REG_MDIC: 2081061SN/A pkt->set<uint32_t>(regs.mdic()); 2091060SN/A break; 2109427SAndreas.Sandberg@ARM.com case REG_ICR: 2111060SN/A DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", 2129444SAndreas.Sandberg@ARM.com regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame()); 2139444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.icr()); 2149444SAndreas.Sandberg@ARM.com if (regs.icr.int_assert() || regs.imr == 0) { 2159444SAndreas.Sandberg@ARM.com regs.icr = regs.icr() & ~mask(30); 2169444SAndreas.Sandberg@ARM.com DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr()); 2179444SAndreas.Sandberg@ARM.com } 2189444SAndreas.Sandberg@ARM.com if (regs.ctrl_ext.iame() && regs.icr.int_assert()) 2199444SAndreas.Sandberg@ARM.com regs.imr &= ~regs.iam; 2209444SAndreas.Sandberg@ARM.com chkInterrupt(); 2219444SAndreas.Sandberg@ARM.com break; 2229444SAndreas.Sandberg@ARM.com case REG_EICR: 2239444SAndreas.Sandberg@ARM.com // This is only useful for MSI, but the driver reads it every time 2242329SN/A // Just don't do anything 2256221Snate@binkert.org pkt->set<uint32_t>(0); 2269444SAndreas.Sandberg@ARM.com break; 2279444SAndreas.Sandberg@ARM.com case REG_ITR: 2282292SN/A pkt->set<uint32_t>(regs.itr()); 2292292SN/A break; 2302292SN/A case REG_RCTL: 2312292SN/A pkt->set<uint32_t>(regs.rctl()); 2329444SAndreas.Sandberg@ARM.com break; 2339444SAndreas.Sandberg@ARM.com case REG_FCTTV: 2349444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.fcttv()); 2359444SAndreas.Sandberg@ARM.com break; 2369444SAndreas.Sandberg@ARM.com case REG_TCTL: 2379444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.tctl()); 2389444SAndreas.Sandberg@ARM.com break; 2399444SAndreas.Sandberg@ARM.com case REG_PBA: 2402292SN/A pkt->set<uint32_t>(regs.pba()); 2411060SN/A break; 2421060SN/A case REG_WUC: 2432292SN/A case REG_LEDCTL: 2442292SN/A pkt->set<uint32_t>(0); // We don't care, so just return 0 2456221Snate@binkert.org break; 2462292SN/A case REG_FCRTL: 2472292SN/A pkt->set<uint32_t>(regs.fcrtl()); 2482292SN/A break; 2492292SN/A case REG_FCRTH: 2502292SN/A pkt->set<uint32_t>(regs.fcrth()); 2511061SN/A break; 2521060SN/A case REG_RDBAL: 2532292SN/A pkt->set<uint32_t>(regs.rdba.rdbal()); 2541060SN/A break; 2556221Snate@binkert.org case REG_RDBAH: 2566221Snate@binkert.org pkt->set<uint32_t>(regs.rdba.rdbah()); 2571060SN/A break; 2581060SN/A case REG_RDLEN: 2591061SN/A pkt->set<uint32_t>(regs.rdlen()); 2601060SN/A break; 2612292SN/A case REG_SRRCTL: 2621060SN/A pkt->set<uint32_t>(regs.srrctl()); 2632292SN/A break; 2642292SN/A case REG_RDH: 2651060SN/A pkt->set<uint32_t>(regs.rdh()); 2662292SN/A break; 2672292SN/A case REG_RDT: 2682292SN/A pkt->set<uint32_t>(regs.rdt()); 2692292SN/A break; 2702292SN/A case REG_RDTR: 2711060SN/A pkt->set<uint32_t>(regs.rdtr()); 2721060SN/A if (regs.rdtr.fpd()) { 2731061SN/A rxDescCache.writeback(0); 2742863Sktlim@umich.edu DPRINTF(EthernetIntr, 2759444SAndreas.Sandberg@ARM.com "Posting interrupt because of RDTR.FPD write\n"); 2761060SN/A postInterrupt(IT_RXT); 2779444SAndreas.Sandberg@ARM.com regs.rdtr.fpd(0); 2789444SAndreas.Sandberg@ARM.com } 2799444SAndreas.Sandberg@ARM.com break; 2809444SAndreas.Sandberg@ARM.com case REG_RXDCTL: 2819444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.rxdctl()); 2829444SAndreas.Sandberg@ARM.com break; 2839444SAndreas.Sandberg@ARM.com case REG_RADV: 2842863Sktlim@umich.edu pkt->set<uint32_t>(regs.radv()); 2852316SN/A break; 2861060SN/A case REG_TDBAL: 2872316SN/A pkt->set<uint32_t>(regs.tdba.tdbal()); 2882316SN/A break; 2892307SN/A case REG_TDBAH: 2901060SN/A pkt->set<uint32_t>(regs.tdba.tdbah()); 2919444SAndreas.Sandberg@ARM.com break; 2929444SAndreas.Sandberg@ARM.com case REG_TDLEN: 2931060SN/A pkt->set<uint32_t>(regs.tdlen()); 2949444SAndreas.Sandberg@ARM.com break; 2959444SAndreas.Sandberg@ARM.com case REG_TDH: 2969444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.tdh()); 2979444SAndreas.Sandberg@ARM.com break; 2986221Snate@binkert.org case REG_TXDCA_CTL: 2999444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.txdca_ctl()); 3009444SAndreas.Sandberg@ARM.com break; 3019444SAndreas.Sandberg@ARM.com case REG_TDT: 3029444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.tdt()); 3032307SN/A break; 3042307SN/A case REG_TIDV: 3052307SN/A pkt->set<uint32_t>(regs.tidv()); 3062307SN/A break; 3072307SN/A case REG_TXDCTL: 3086221Snate@binkert.org pkt->set<uint32_t>(regs.txdctl()); 3091858SN/A break; 3102292SN/A case REG_TADV: 3111858SN/A pkt->set<uint32_t>(regs.tadv()); 3122292SN/A break; 3132292SN/A case REG_TDWBAL: 3142292SN/A pkt->set<uint32_t>(regs.tdwba & mask(32)); 3152292SN/A break; 3163788Sgblack@eecs.umich.edu case REG_TDWBAH: 3172292SN/A pkt->set<uint32_t>(regs.tdwba >> 32); 3182698Sktlim@umich.edu break; 3193788Sgblack@eecs.umich.edu case REG_RXCSUM: 3202301SN/A pkt->set<uint32_t>(regs.rxcsum()); 3213788Sgblack@eecs.umich.edu break; 3223788Sgblack@eecs.umich.edu case REG_RLPML: 3233788Sgblack@eecs.umich.edu pkt->set<uint32_t>(regs.rlpml); 3243788Sgblack@eecs.umich.edu break; 3253788Sgblack@eecs.umich.edu case REG_RFCTL: 3263788Sgblack@eecs.umich.edu pkt->set<uint32_t>(regs.rfctl()); 3273788Sgblack@eecs.umich.edu break; 3283788Sgblack@eecs.umich.edu case REG_MANC: 3293788Sgblack@eecs.umich.edu pkt->set<uint32_t>(regs.manc()); 3303788Sgblack@eecs.umich.edu break; 3313788Sgblack@eecs.umich.edu case REG_SWSM: 3322292SN/A pkt->set<uint32_t>(regs.swsm()); 3332292SN/A regs.swsm.smbi(1); 3342292SN/A break; 3352292SN/A case REG_FWSM: 3362292SN/A pkt->set<uint32_t>(regs.fwsm()); 3372329SN/A break; 3382292SN/A case REG_SWFWSYNC: 3392292SN/A pkt->set<uint32_t>(regs.sw_fw_sync); 3402292SN/A break; 3412935Sksewell@umich.edu default: 3422935Sksewell@umich.edu if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) && 3432731Sktlim@umich.edu !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) && 3442292SN/A !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) && 3452292SN/A !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE)) 3462292SN/A panic("Read request to unknown register number: %#x\n", daddr); 3472935Sksewell@umich.edu else 3482292SN/A pkt->set<uint32_t>(0); 3492292SN/A }; 3502935Sksewell@umich.edu 3514632Sgblack@eecs.umich.edu pkt->makeAtomicResponse(); 3523093Sksewell@umich.edu return pioDelay; 3532292SN/A} 3542292SN/A 3553093Sksewell@umich.eduTick 3564632Sgblack@eecs.umich.eduIGbE::write(PacketPtr pkt) 3572935Sksewell@umich.edu{ 3582292SN/A int bar; 3592292SN/A Addr daddr; 3602292SN/A 3612292SN/A 3622292SN/A if (!getBAR(pkt->getAddr(), bar, daddr)) 3632292SN/A panic("Invalid PCI memory access to unmapped memory.\n"); 3642292SN/A 3652292SN/A // Only Memory register BAR is allowed 3662292SN/A assert(bar == 0); 3672292SN/A 3682292SN/A // Only 32bit accesses allowed 3692292SN/A assert(pkt->getSize() == sizeof(uint32_t)); 3702292SN/A 3712292SN/A DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", 3722292SN/A daddr, pkt->get<uint32_t>()); 3732292SN/A 3746221Snate@binkert.org // 3756221Snate@binkert.org // Handle write of register here 3762292SN/A // 3772292SN/A uint32_t val = pkt->get<uint32_t>(); 3783867Sbinkertn@umich.edu 3796221Snate@binkert.org Regs::RCTL oldrctl; 3802292SN/A Regs::TCTL oldtctl; 3812292SN/A 3822292SN/A switch (daddr) { 3832292SN/A case REG_CTRL: 3842292SN/A regs.ctrl = val; 3852292SN/A if (regs.ctrl.tfce()) 3862292SN/A warn("TX Flow control enabled, should implement\n"); 3872292SN/A if (regs.ctrl.rfce()) 3882292SN/A warn("RX Flow control enabled, should implement\n"); 3892292SN/A break; 3902292SN/A case REG_CTRL_EXT: 3912292SN/A regs.ctrl_ext = val; 3922292SN/A break; 3932292SN/A case REG_STATUS: 3942292SN/A regs.sts = val; 3952292SN/A break; 3962292SN/A case REG_EECD: 3973867Sbinkertn@umich.edu int oldClk; 3982292SN/A oldClk = regs.eecd.sk(); 3993867Sbinkertn@umich.edu regs.eecd = val; 4006221Snate@binkert.org // See if this is a eeprom access and emulate accordingly 4012292SN/A if (!oldClk && regs.eecd.sk()) { 4022292SN/A if (eeOpBits < 8) { 4032292SN/A eeOpcode = eeOpcode << 1 | regs.eecd.din(); 4042292SN/A eeOpBits++; 4052292SN/A } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 4062292SN/A eeAddr = eeAddr << 1 | regs.eecd.din(); 4072292SN/A eeAddrBits++; 4082292SN/A } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 4092292SN/A assert(eeAddr>>1 < EEPROM_SIZE); 4102292SN/A DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n", 4112292SN/A flash[eeAddr>>1] >> eeDataBits & 0x1, 4122292SN/A flash[eeAddr>>1]); 4136221Snate@binkert.org regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1); 4142292SN/A eeDataBits++; 4152292SN/A } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) { 4162292SN/A regs.eecd.dout(0); 4172292SN/A eeDataBits++; 4182292SN/A } else 4192292SN/A panic("What's going on with eeprom interface? opcode:" 4202292SN/A " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode, 4212292SN/A (uint32_t)eeOpBits, (uint32_t)eeAddr, 4222292SN/A (uint32_t)eeAddrBits, (uint32_t)eeDataBits); 4236221Snate@binkert.org 4242292SN/A // Reset everything for the next command 4252292SN/A if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) || 4262292SN/A (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) { 4272292SN/A eeOpBits = 0; 4282292SN/A eeAddrBits = 0; 4292292SN/A eeDataBits = 0; 4302292SN/A eeOpcode = 0; 4312292SN/A eeAddr = 0; 4322292SN/A } 4332292SN/A 4342292SN/A DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n", 4352292SN/A (uint32_t)eeOpcode, (uint32_t) eeOpBits, 4362301SN/A (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits); 4372301SN/A if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI || 4383788Sgblack@eecs.umich.edu eeOpcode == EEPROM_RDSR_OPCODE_SPI )) 4393788Sgblack@eecs.umich.edu panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode, 4403788Sgblack@eecs.umich.edu (uint32_t)eeOpBits); 4413788Sgblack@eecs.umich.edu 4423788Sgblack@eecs.umich.edu 4433788Sgblack@eecs.umich.edu } 4443788Sgblack@eecs.umich.edu // If driver requests eeprom access, immediately give it to it 4453788Sgblack@eecs.umich.edu regs.eecd.ee_gnt(regs.eecd.ee_req()); 4463798Sgblack@eecs.umich.edu break; 4473798Sgblack@eecs.umich.edu case REG_EERD: 4483798Sgblack@eecs.umich.edu regs.eerd = val; 4493798Sgblack@eecs.umich.edu if (regs.eerd.start()) { 4503798Sgblack@eecs.umich.edu regs.eerd.done(1); 4513798Sgblack@eecs.umich.edu assert(regs.eerd.addr() < EEPROM_SIZE); 4522292SN/A regs.eerd.data(flash[regs.eerd.addr()]); 4532292SN/A regs.eerd.start(0); 4542292SN/A DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n", 4552292SN/A regs.eerd.addr(), regs.eerd.data()); 4562292SN/A } 4572292SN/A break; 4582292SN/A case REG_MDIC: 4592292SN/A regs.mdic = val; 4602292SN/A if (regs.mdic.i()) 4612292SN/A panic("No support for interrupt on mdic complete\n"); 4622292SN/A if (regs.mdic.phyadd() != 1) 4632292SN/A panic("No support for reading anything but phy\n"); 4642292SN/A DPRINTF(Ethernet, "%s phy address %x\n", 4652292SN/A regs.mdic.op() == 1 ? "Writing" : "Reading", 4662292SN/A regs.mdic.regadd()); 4672292SN/A switch (regs.mdic.regadd()) { 4682292SN/A case PHY_PSTATUS: 4692292SN/A regs.mdic.data(0x796D); // link up 4702292SN/A break; 4712292SN/A case PHY_PID: 4721858SN/A regs.mdic.data(params()->phy_pid); 4731858SN/A break; 4741858SN/A case PHY_EPID: 4751858SN/A regs.mdic.data(params()->phy_epid); 4761858SN/A break; 4776221Snate@binkert.org case PHY_GSTATUS: 4781858SN/A regs.mdic.data(0x7C00); 4792292SN/A break; 4802292SN/A case PHY_EPSTATUS: 4812292SN/A regs.mdic.data(0x3000); 4822292SN/A break; 4831858SN/A case PHY_AGC: 4842292SN/A regs.mdic.data(0x180); // some random length 4852292SN/A break; 4862292SN/A default: 4872292SN/A regs.mdic.data(0); 4882292SN/A } 4892292SN/A regs.mdic.r(1); 4902292SN/A break; 4912292SN/A case REG_ICR: 4922292SN/A DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", 4932292SN/A regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame()); 4942292SN/A if (regs.ctrl_ext.iame()) 4952292SN/A regs.imr &= ~regs.iam; 4962292SN/A regs.icr = ~bits(val,30,0) & regs.icr(); 4971858SN/A chkInterrupt(); 4982292SN/A break; 4992292SN/A case REG_ITR: 5002292SN/A regs.itr = val; 5012292SN/A break; 5022292SN/A case REG_ICS: 5032292SN/A DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n"); 5042292SN/A postInterrupt((IntTypes)val); 5052292SN/A break; 5062292SN/A case REG_IMS: 5072292SN/A regs.imr |= val; 5082292SN/A chkInterrupt(); 5092292SN/A break; 5102292SN/A case REG_IMC: 5112292SN/A regs.imr &= ~val; 5122292SN/A chkInterrupt(); 5132292SN/A break; 5142292SN/A case REG_IAM: 5152292SN/A regs.iam = val; 5162292SN/A break; 5172292SN/A case REG_RCTL: 5182292SN/A oldrctl = regs.rctl; 5192292SN/A regs.rctl = val; 5202292SN/A if (regs.rctl.rst()) { 5212292SN/A rxDescCache.reset(); 5222292SN/A DPRINTF(EthernetSM, "RXS: Got RESET!\n"); 5232292SN/A rxFifo.clear(); 5242292SN/A regs.rctl.rst(0); 5252292SN/A } 5262292SN/A if (regs.rctl.en()) 5272292SN/A rxTick = true; 5282292SN/A restartClock(); 5292292SN/A break; 5302292SN/A case REG_FCTTV: 5312292SN/A regs.fcttv = val; 5322292SN/A break; 5332292SN/A case REG_TCTL: 5342292SN/A regs.tctl = val; 5352292SN/A oldtctl = regs.tctl; 5362292SN/A regs.tctl = val; 5372292SN/A if (regs.tctl.en()) 5382292SN/A txTick = true; 5392292SN/A restartClock(); 5402292SN/A if (regs.tctl.en() && !oldtctl.en()) { 5412292SN/A txDescCache.reset(); 5422292SN/A } 5432292SN/A break; 5442292SN/A case REG_PBA: 5452292SN/A regs.pba.rxa(val); 5462292SN/A regs.pba.txa(64 - regs.pba.rxa()); 5472292SN/A break; 5482292SN/A case REG_WUC: 5492292SN/A case REG_LEDCTL: 5502292SN/A case REG_FCAL: 5512292SN/A case REG_FCAH: 5522292SN/A case REG_FCT: 5532292SN/A case REG_VET: 5542292SN/A case REG_AIFS: 5552292SN/A case REG_TIPG: 5562292SN/A ; // We don't care, so don't store anything 5572292SN/A break; 5582292SN/A case REG_IVAR0: 5592292SN/A warn("Writing to IVAR0, ignoring...\n"); 5602292SN/A break; 5612292SN/A case REG_FCRTL: 5622292SN/A regs.fcrtl = val; 5632292SN/A break; 5642292SN/A case REG_FCRTH: 5652292SN/A regs.fcrth = val; 5662292SN/A break; 5672292SN/A case REG_RDBAL: 5682292SN/A regs.rdba.rdbal( val & ~mask(4)); 5692292SN/A rxDescCache.areaChanged(); 5702292SN/A break; 5712292SN/A case REG_RDBAH: 5722292SN/A regs.rdba.rdbah(val); 5732292SN/A rxDescCache.areaChanged(); 5742292SN/A break; 5752292SN/A case REG_RDLEN: 5762292SN/A regs.rdlen = val & ~mask(7); 5772292SN/A rxDescCache.areaChanged(); 5782292SN/A break; 5792292SN/A case REG_SRRCTL: 5802292SN/A regs.srrctl = val; 5812292SN/A break; 5822292SN/A case REG_RDH: 5837720Sgblack@eecs.umich.edu regs.rdh = val; 5847720Sgblack@eecs.umich.edu rxDescCache.areaChanged(); 5852292SN/A break; 5862292SN/A case REG_RDT: 5872292SN/A regs.rdt = val; 5887720Sgblack@eecs.umich.edu DPRINTF(EthernetSM, "RXS: RDT Updated.\n"); 5897720Sgblack@eecs.umich.edu if (drainState() == DrainState::Running) { 5907720Sgblack@eecs.umich.edu DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n"); 5912292SN/A rxDescCache.fetchDescriptors(); 5922292SN/A } else { 5932292SN/A DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n"); 5942292SN/A } 5952292SN/A break; 5962292SN/A case REG_RDTR: 5972292SN/A regs.rdtr = val; 5982292SN/A break; 5992292SN/A case REG_RADV: 6002292SN/A regs.radv = val; 6017720Sgblack@eecs.umich.edu break; 6022292SN/A case REG_RXDCTL: 6032292SN/A regs.rxdctl = val; 6042292SN/A break; 6052292SN/A case REG_TDBAL: 6062292SN/A regs.tdba.tdbal( val & ~mask(4)); 6072336SN/A txDescCache.areaChanged(); 6082336SN/A break; 6092336SN/A case REG_TDBAH: 6102336SN/A regs.tdba.tdbah(val); 6112336SN/A txDescCache.areaChanged(); 6122336SN/A break; 6132336SN/A case REG_TDLEN: 6142336SN/A regs.tdlen = val & ~mask(7); 6152292SN/A txDescCache.areaChanged(); 6162292SN/A break; 6172301SN/A case REG_TDH: 6182301SN/A regs.tdh = val; 6192292SN/A txDescCache.areaChanged(); 6202301SN/A break; 6212301SN/A case REG_TXDCA_CTL: 6222301SN/A regs.txdca_ctl = val; 6232292SN/A if (regs.txdca_ctl.enabled()) 6242301SN/A panic("No support for DCA\n"); 6252292SN/A break; 6262301SN/A case REG_TDT: 6272292SN/A regs.tdt = val; 6282301SN/A DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n"); 6292292SN/A if (drainState() == DrainState::Running) { 6302292SN/A DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n"); 6312292SN/A txDescCache.fetchDescriptors(); 6322292SN/A } else { 6332336SN/A DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n"); 6342336SN/A } 6352292SN/A break; 6362292SN/A case REG_TIDV: 6372307SN/A regs.tidv = val; 6382307SN/A break; 6392292SN/A case REG_TXDCTL: 6402292SN/A regs.txdctl = val; 6412292SN/A break; 6422292SN/A case REG_TADV: 6432292SN/A regs.tadv = val; 6442292SN/A break; 6452292SN/A case REG_TDWBAL: 6462292SN/A regs.tdwba &= ~mask(32); 6472292SN/A regs.tdwba |= val; 6482292SN/A txDescCache.completionWriteback(regs.tdwba & ~mask(1), 6492292SN/A regs.tdwba & mask(1)); 6504345Sktlim@umich.edu break; 6512292SN/A case REG_TDWBAH: 6522292SN/A regs.tdwba &= mask(32); 6532292SN/A regs.tdwba |= (uint64_t)val << 32; 6542292SN/A txDescCache.completionWriteback(regs.tdwba & ~mask(1), 6552292SN/A regs.tdwba & mask(1)); 6562292SN/A break; 6572292SN/A case REG_RXCSUM: 6582292SN/A regs.rxcsum = val; 6592292SN/A break; 6602292SN/A case REG_RLPML: 6612292SN/A regs.rlpml = val; 6628471SGiacomo.Gabrielli@arm.com break; 6632292SN/A case REG_RFCTL: 6642292SN/A regs.rfctl = val; 6652292SN/A if (regs.rfctl.exsten()) 6662292SN/A panic("Extended RX descriptors not implemented\n"); 6672292SN/A break; 6682292SN/A case REG_MANC: 6692292SN/A regs.manc = val; 6702292SN/A break; 6712292SN/A case REG_SWSM: 6722292SN/A regs.swsm = val; 6732292SN/A if (regs.fwsm.eep_fw_semaphore()) 6742292SN/A regs.swsm.swesmbi(0); 6752307SN/A break; 6762292SN/A case REG_SWFWSYNC: 6772292SN/A regs.sw_fw_sync = val; 6782292SN/A break; 6792292SN/A default: 6802292SN/A if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) && 6812292SN/A !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) && 6822292SN/A !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4)) 6832292SN/A panic("Write request to unknown register number: %#x\n", daddr); 6842292SN/A }; 6852292SN/A 6862292SN/A pkt->makeAtomicResponse(); 6872292SN/A return pioDelay; 6882292SN/A} 6892292SN/A 6902292SN/Avoid 6912292SN/AIGbE::postInterrupt(IntTypes t, bool now) 6922292SN/A{ 6932292SN/A assert(t); 6942292SN/A 6952292SN/A // Interrupt is already pending 6966221Snate@binkert.org if (t & regs.icr() && !now) 6972292SN/A return; 6982292SN/A 6992292SN/A regs.icr = regs.icr() | t; 7002292SN/A 7012292SN/A Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval(); 7022292SN/A DPRINTF(EthernetIntr, 7032292SN/A "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n", 7042292SN/A curTick(), regs.itr.interval(), itr_interval); 7052292SN/A 7062292SN/A if (regs.itr.interval() == 0 || now || 7077720Sgblack@eecs.umich.edu lastInterrupt + itr_interval <= curTick()) { 7087720Sgblack@eecs.umich.edu if (interEvent.scheduled()) { 7092292SN/A deschedule(interEvent); 7102307SN/A } 7112307SN/A cpuPostInt(); 7122292SN/A } else { 7132292SN/A Tick int_time = lastInterrupt + itr_interval; 7142292SN/A assert(int_time > 0); 7152292SN/A DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n", 7163798Sgblack@eecs.umich.edu int_time); 7173798Sgblack@eecs.umich.edu if (!interEvent.scheduled()) { 7183798Sgblack@eecs.umich.edu schedule(interEvent, int_time); 7193798Sgblack@eecs.umich.edu } 7203798Sgblack@eecs.umich.edu } 7213798Sgblack@eecs.umich.edu} 7227720Sgblack@eecs.umich.edu 7233798Sgblack@eecs.umich.eduvoid 7243798Sgblack@eecs.umich.eduIGbE::delayIntEvent() 7252292SN/A{ 7263798Sgblack@eecs.umich.edu cpuPostInt(); 7272292SN/A} 7282292SN/A 7292292SN/A 7302292SN/Avoid 7312292SN/AIGbE::cpuPostInt() 7322292SN/A{ 7332292SN/A 7342292SN/A postedInterrupts++; 7352292SN/A 7362292SN/A if (!(regs.icr() & regs.imr)) { 7379527SMatt.Horsnell@arm.com DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n"); 7389527SMatt.Horsnell@arm.com return; 7399527SMatt.Horsnell@arm.com } 7409527SMatt.Horsnell@arm.com 7419527SMatt.Horsnell@arm.com DPRINTF(Ethernet, "Posting Interrupt\n"); 7422292SN/A 7432292SN/A 7442292SN/A if (interEvent.scheduled()) { 7452292SN/A deschedule(interEvent); 7462292SN/A } 7472292SN/A 7482292SN/A if (rdtrEvent.scheduled()) { 7496221Snate@binkert.org regs.icr.rxt0(1); 7506221Snate@binkert.org deschedule(rdtrEvent); 7512292SN/A } 7523867Sbinkertn@umich.edu if (radvEvent.scheduled()) { 7536221Snate@binkert.org regs.icr.rxt0(1); 7543867Sbinkertn@umich.edu deschedule(radvEvent); 7553867Sbinkertn@umich.edu } 7562292SN/A if (tadvEvent.scheduled()) { 7572292SN/A regs.icr.txdw(1); 7582292SN/A deschedule(tadvEvent); 7592292SN/A } 7602292SN/A if (tidvEvent.scheduled()) { 7612292SN/A regs.icr.txdw(1); 7622292SN/A deschedule(tidvEvent); 7632292SN/A } 7642292SN/A 7652292SN/A regs.icr.int_assert(1); 7662292SN/A DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n", 7672292SN/A regs.icr()); 7686221Snate@binkert.org 7696221Snate@binkert.org intrPost(); 7702292SN/A 7713867Sbinkertn@umich.edu lastInterrupt = curTick(); 7726221Snate@binkert.org} 7732292SN/A 7742292SN/Avoid 7752292SN/AIGbE::cpuClearInt() 7762292SN/A{ 7772292SN/A if (regs.icr.int_assert()) { 7782292SN/A regs.icr.int_assert(0); 7792292SN/A DPRINTF(EthernetIntr, 7802292SN/A "EINT: Clearing interrupt to CPU now. Vector %#x\n", 7812292SN/A regs.icr()); 7822292SN/A intrClear(); 7832292SN/A } 7842292SN/A} 7852292SN/A 7862292SN/Avoid 7872733Sktlim@umich.eduIGbE::chkInterrupt() 7882292SN/A{ 7892292SN/A DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(), 7902292SN/A regs.imr); 7912292SN/A // Check if we need to clear the cpu interrupt 7922292SN/A if (!(regs.icr() & regs.imr)) { 7932292SN/A DPRINTF(Ethernet, "Mask cleaned all interrupts\n"); 7942292SN/A if (interEvent.scheduled()) 7952292SN/A deschedule(interEvent); 7962733Sktlim@umich.edu if (regs.icr.int_assert()) 7972292SN/A cpuClearInt(); 7982292SN/A } 7992292SN/A DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", 8002292SN/A regs.itr(), regs.itr.interval()); 8012292SN/A 8022292SN/A if (regs.icr() & regs.imr) { 8036221Snate@binkert.org if (regs.itr.interval() == 0) { 8042292SN/A cpuPostInt(); 8052292SN/A } else { 8062292SN/A DPRINTF(Ethernet, 8072292SN/A "Possibly scheduling interrupt because of imr write\n"); 8082292SN/A if (!interEvent.scheduled()) { 8092292SN/A Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval(); 8102292SN/A DPRINTF(Ethernet, "Scheduling for %d\n", t); 8112292SN/A schedule(interEvent, t); 8122292SN/A } 8132292SN/A } 8143798Sgblack@eecs.umich.edu } 8153798Sgblack@eecs.umich.edu} 8163798Sgblack@eecs.umich.edu 8173798Sgblack@eecs.umich.edu 8182292SN/A///////////////////////////// IGbE::DescCache ////////////////////////////// 8192292SN/A 8202292SN/Atemplate<class T> 8212292SN/AIGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s) 8222292SN/A : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), 8232329SN/A wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL), 8242329SN/A wbDelayEvent(this), fetchDelayEvent(this), fetchEvent(this), 8252301SN/A wbEvent(this) 8262292SN/A{ 8272292SN/A fetchBuf = new T[size]; 8282292SN/A wbBuf = new T[size]; 8292292SN/A} 8302292SN/A 8312292SN/Atemplate<class T> 8322292SN/AIGbE::DescCache<T>::~DescCache() 8332292SN/A{ 8342292SN/A reset(); 8352292SN/A delete[] fetchBuf; 8362292SN/A delete[] wbBuf; 8376221Snate@binkert.org} 8382292SN/A 8392292SN/Atemplate<class T> 8402292SN/Avoid 8412292SN/AIGbE::DescCache<T>::areaChanged() 8422301SN/A{ 8432292SN/A if (usedCache.size() > 0 || curFetching || wbOut) 8442292SN/A panic("Descriptor Address, Length or Head changed. Bad\n"); 8452292SN/A reset(); 8462292SN/A 8472292SN/A} 8482292SN/A 8492292SN/Atemplate<class T> 8502292SN/Avoid 8512292SN/AIGbE::DescCache<T>::writeback(Addr aMask) 8522292SN/A{ 8532292SN/A int curHead = descHead(); 8542292SN/A int max_to_wb = usedCache.size(); 8552292SN/A 8562292SN/A // Check if this writeback is less restrictive that the previous 8572292SN/A // and if so setup another one immediately following it 8586221Snate@binkert.org if (wbOut) { 8592292SN/A if (aMask < wbAlignment) { 8602980Sgblack@eecs.umich.edu moreToWb = true; 8612980Sgblack@eecs.umich.edu wbAlignment = aMask; 8622292SN/A } 8631060SN/A DPRINTF(EthernetDesc, 8641060SN/A "Writing back already in process, returning\n"); 8652292SN/A return; 8661060SN/A } 8671060SN/A 8681060SN/A moreToWb = false; 8691060SN/A wbAlignment = aMask; 8701060SN/A 8712292SN/A 8722292SN/A DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: " 8732292SN/A "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n", 8741062SN/A curHead, descTail(), descLen(), cachePnt, max_to_wb, 8752292SN/A descLeft()); 8762292SN/A 8771060SN/A if (max_to_wb + curHead >= descLen()) { 8782292SN/A max_to_wb = descLen() - curHead; 8792292SN/A moreToWb = true; 8802292SN/A // this is by definition aligned correctly 8811060SN/A } else if (wbAlignment != 0) { 8822292SN/A // align the wb point to the mask 8832292SN/A max_to_wb = max_to_wb & ~wbAlignment; 8841062SN/A } 8852367SN/A 8862367SN/A DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb); 8872367SN/A 8882367SN/A if (max_to_wb <= 0) { 8892367SN/A if (usedCache.size()) 8902292SN/A igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT); 8911061SN/A else 8921062SN/A igbe->anWe(annSmWb, annUsedCacheQ); 8931060SN/A return; 8941060SN/A } 8951060SN/A 8961060SN/A wbOut = max_to_wb; 8971060SN/A 8986221Snate@binkert.org assert(!wbDelayEvent.scheduled()); 8991060SN/A igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay); 9002292SN/A igbe->anBegin(annSmWb, "Prepare Writeback Desc"); 9012292SN/A} 9022292SN/A 9032292SN/Atemplate<class T> 9042980Sgblack@eecs.umich.eduvoid 9052980Sgblack@eecs.umich.eduIGbE::DescCache<T>::writeback1() 9061060SN/A{ 9071061SN/A // If we're draining delay issuing this DMA 9081060SN/A if (igbe->drainState() != DrainState::Running) { 9092292SN/A igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay); 9102292SN/A return; 9112292SN/A } 9122292SN/A 9132292SN/A DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut); 9142292SN/A 9151060SN/A for (int x = 0; x < wbOut; x++) { 9161060SN/A assert(usedCache.size()); 9171060SN/A memcpy(&wbBuf[x], usedCache[x], sizeof(T)); 9182292SN/A igbe->anPq(annSmWb, annUsedCacheQ); 9192292SN/A igbe->anPq(annSmWb, annDescQ); 9202292SN/A igbe->anQ(annSmWb, annUsedDescQ); 9212292SN/A } 9222292SN/A 9232292SN/A 9242292SN/A igbe->anBegin(annSmWb, "Writeback Desc DMA"); 9251060SN/A 9262329SN/A assert(wbOut); 9272329SN/A igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)), 9282292SN/A wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf, 9291061SN/A igbe->wbCompDelay); 9302292SN/A} 9312292SN/A 9321061SN/Atemplate<class T> 9332292SN/Avoid 9341060SN/AIGbE::DescCache<T>::fetchDescriptors() 9351060SN/A{ 9361060SN/A size_t max_to_fetch; 9371061SN/A 9381061SN/A if (curFetching) { 9396221Snate@binkert.org DPRINTF(EthernetDesc, 9401061SN/A "Currently fetching %d descriptors, returning\n", 9412292SN/A curFetching); 9422292SN/A return; 9431061SN/A } 9441061SN/A 9451061SN/A if (descTail() >= cachePnt) 9461061SN/A max_to_fetch = descTail() - cachePnt; 9471061SN/A else 9482292SN/A max_to_fetch = descLen() - cachePnt; 9491061SN/A 9503773Sgblack@eecs.umich.edu size_t free_cache = size - usedCache.size() - unusedCache.size(); 9513773Sgblack@eecs.umich.edu 9526313Sgblack@eecs.umich.edu if (!max_to_fetch) 9537767Sgblack@eecs.umich.edu igbe->anWe(annSmFetch, annUnusedDescQ); 9547767Sgblack@eecs.umich.edu else 9555082Sgblack@eecs.umich.edu igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch); 9565082Sgblack@eecs.umich.edu 9576313Sgblack@eecs.umich.edu if (max_to_fetch) { 9587767Sgblack@eecs.umich.edu if (!free_cache) 9597767Sgblack@eecs.umich.edu igbe->anWf(annSmFetch, annDescQ); 9605082Sgblack@eecs.umich.edu else 9617649Sminkyu.jeong@arm.com igbe->anRq(annSmFetch, annDescQ, free_cache); 9627767Sgblack@eecs.umich.edu } 9637767Sgblack@eecs.umich.edu 9647767Sgblack@eecs.umich.edu max_to_fetch = std::min(max_to_fetch, free_cache); 9657767Sgblack@eecs.umich.edu 9667649Sminkyu.jeong@arm.com 9677649Sminkyu.jeong@arm.com DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: " 9683773Sgblack@eecs.umich.edu "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n", 9694352Sgblack@eecs.umich.edu descHead(), descTail(), descLen(), cachePnt, 9701061SN/A max_to_fetch, descLeft()); 9711061SN/A 9723773Sgblack@eecs.umich.edu // Nothing to do 9731061SN/A if (max_to_fetch == 0) 9742292SN/A return; 9753773Sgblack@eecs.umich.edu 9762292SN/A // So we don't have two descriptor fetches going on at once 9771061SN/A curFetching = max_to_fetch; 9781061SN/A 9791061SN/A assert(!fetchDelayEvent.scheduled()); 9802292SN/A igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay); 9812292SN/A igbe->anBegin(annSmFetch, "Prepare Fetch Desc"); 9827767Sgblack@eecs.umich.edu} 9837767Sgblack@eecs.umich.edu 9841061SN/Atemplate<class T> 9851061SN/Avoid 9864636Sgblack@eecs.umich.eduIGbE::DescCache<T>::fetchDescriptors1() 9877767Sgblack@eecs.umich.edu{ 9887767Sgblack@eecs.umich.edu // If we're draining delay issuing this DMA 9891061SN/A if (igbe->drainState() != DrainState::Running) { 9901062SN/A igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay); 9911062SN/A return; 9927897Shestness@cs.utexas.edu } 9931061SN/A 9941061SN/A igbe->anBegin(annSmFetch, "Fetch Desc"); 9951061SN/A 9961061SN/A DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n", 9971061SN/A descBase() + cachePnt * sizeof(T), 9986221Snate@binkert.org pciToDma(descBase() + cachePnt * sizeof(T)), 9991061SN/A curFetching * sizeof(T)); 10002292SN/A assert(curFetching); 10011061SN/A igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)), 10021061SN/A curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf, 10031061SN/A igbe->fetchCompDelay); 10042292SN/A} 10052292SN/A 10062292SN/Atemplate<class T> 10073773Sgblack@eecs.umich.eduvoid 10083773Sgblack@eecs.umich.eduIGbE::DescCache<T>::fetchComplete() 10094352Sgblack@eecs.umich.edu{ 10106313Sgblack@eecs.umich.edu T *newDesc; 10117767Sgblack@eecs.umich.edu igbe->anBegin(annSmFetch, "Fetch Complete"); 10127767Sgblack@eecs.umich.edu for (int x = 0; x < curFetching; x++) { 10137767Sgblack@eecs.umich.edu newDesc = new T; 10147767Sgblack@eecs.umich.edu memcpy(newDesc, &fetchBuf[x], sizeof(T)); 10157767Sgblack@eecs.umich.edu unusedCache.push_back(newDesc); 10167767Sgblack@eecs.umich.edu igbe->anDq(annSmFetch, annUnusedDescQ); 10177767Sgblack@eecs.umich.edu igbe->anQ(annSmFetch, annUnusedCacheQ); 10187767Sgblack@eecs.umich.edu igbe->anQ(annSmFetch, annDescQ); 10197649Sminkyu.jeong@arm.com } 10204352Sgblack@eecs.umich.edu 10214352Sgblack@eecs.umich.edu 10227767Sgblack@eecs.umich.edu#ifndef NDEBUG 10237767Sgblack@eecs.umich.edu int oldCp = cachePnt; 10247767Sgblack@eecs.umich.edu#endif 10257767Sgblack@eecs.umich.edu 10267649Sminkyu.jeong@arm.com cachePnt += curFetching; 10277649Sminkyu.jeong@arm.com assert(cachePnt <= descLen()); 10283773Sgblack@eecs.umich.edu if (cachePnt == descLen()) 10293773Sgblack@eecs.umich.edu cachePnt = 0; 10303773Sgblack@eecs.umich.edu 10311061SN/A curFetching = 0; 10322292SN/A 10332292SN/A DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n", 10343773Sgblack@eecs.umich.edu oldCp, cachePnt); 10351061SN/A 10362292SN/A if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() - 10377854SAli.Saidi@ARM.com cachePnt)) == 0) 10387854SAli.Saidi@ARM.com { 10391062SN/A igbe->anWe(annSmFetch, annUnusedDescQ); 10402292SN/A } else if (!(size - usedCache.size() - unusedCache.size())) { 10413773Sgblack@eecs.umich.edu igbe->anWf(annSmFetch, annDescQ); 10422292SN/A } else { 10431062SN/A igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT); 10442292SN/A } 10453773Sgblack@eecs.umich.edu 10462292SN/A enableSm(); 10472292SN/A igbe->checkDrain(); 10481062SN/A} 10492292SN/A 10501062SN/Atemplate<class T> 10512935Sksewell@umich.eduvoid 10522935Sksewell@umich.eduIGbE::DescCache<T>::wbComplete() 10532935Sksewell@umich.edu{ 10542292SN/A 10551062SN/A igbe->anBegin(annSmWb, "Finish Writeback"); 10562292SN/A 10572292SN/A long curHead = descHead(); 10582292SN/A#ifndef NDEBUG 10592292SN/A long oldHead = curHead; 10602292SN/A#endif 10612292SN/A 10622292SN/A for (int x = 0; x < wbOut; x++) { 10632292SN/A assert(usedCache.size()); 10641062SN/A delete usedCache[0]; 10652292SN/A usedCache.pop_front(); 10661061SN/A 10671061SN/A igbe->anDq(annSmWb, annUsedCacheQ); 10681061SN/A igbe->anDq(annSmWb, annDescQ); 10691061SN/A } 10701061SN/A 10716221Snate@binkert.org curHead += wbOut; 10721061SN/A wbOut = 0; 10732292SN/A 10742292SN/A if (curHead >= descLen()) 10752292SN/A curHead -= descLen(); 10762292SN/A 10772292SN/A // Update the head 10782292SN/A updateHead(curHead); 10791061SN/A 10801061SN/A DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n", 10811061SN/A oldHead, curHead); 10821061SN/A 10836221Snate@binkert.org // If we still have more to wb, call wb now 10841061SN/A actionAfterWb(); 10852292SN/A if (moreToWb) { 10862292SN/A moreToWb = false; 10872292SN/A DPRINTF(EthernetDesc, "Writeback has more todo\n"); 10882292SN/A writeback(wbAlignment); 10892292SN/A } 10902292SN/A 10912292SN/A if (!wbOut) { 10922292SN/A igbe->checkDrain(); 10932292SN/A if (usedCache.size()) 10942292SN/A igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT); 10956221Snate@binkert.org else 10962292SN/A igbe->anWe(annSmWb, annUsedCacheQ); 10972292SN/A } 10982292SN/A fetchAfterWb(); 10992292SN/A} 11002292SN/A 11012292SN/Atemplate<class T> 11022292SN/Avoid 11032292SN/AIGbE::DescCache<T>::reset() 11042292SN/A{ 11052292SN/A DPRINTF(EthernetDesc, "Reseting descriptor cache\n"); 11062292SN/A for (typename CacheType::size_type x = 0; x < usedCache.size(); x++) 11072292SN/A delete usedCache[x]; 11082292SN/A for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++) 11092292SN/A delete unusedCache[x]; 11102292SN/A 11112292SN/A usedCache.clear(); 11122731Sktlim@umich.edu unusedCache.clear(); 11132292SN/A 11142292SN/A cachePnt = 0; 11152292SN/A 11162292SN/A} 11172292SN/A 11182292SN/Atemplate<class T> 11192292SN/Avoid 11202292SN/AIGbE::DescCache<T>::serialize(CheckpointOut &cp) const 11216221Snate@binkert.org{ 11222292SN/A SERIALIZE_SCALAR(cachePnt); 11232292SN/A SERIALIZE_SCALAR(curFetching); 11242292SN/A SERIALIZE_SCALAR(wbOut); 11252292SN/A SERIALIZE_SCALAR(moreToWb); 11262292SN/A SERIALIZE_SCALAR(wbAlignment); 11272292SN/A 11282292SN/A typename CacheType::size_type usedCacheSize = usedCache.size(); 11292292SN/A SERIALIZE_SCALAR(usedCacheSize); 11302292SN/A for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) { 11312292SN/A arrayParamOut(cp, csprintf("usedCache_%d", x), 11322292SN/A (uint8_t*)usedCache[x],sizeof(T)); 11332292SN/A } 11342292SN/A 11352292SN/A typename CacheType::size_type unusedCacheSize = unusedCache.size(); 11362292SN/A SERIALIZE_SCALAR(unusedCacheSize); 11372292SN/A for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) { 11382292SN/A arrayParamOut(cp, csprintf("unusedCache_%d", x), 11392292SN/A (uint8_t*)unusedCache[x],sizeof(T)); 11402292SN/A } 11412292SN/A 11422292SN/A Tick fetch_delay = 0, wb_delay = 0; 11432292SN/A if (fetchDelayEvent.scheduled()) 11446221Snate@binkert.org fetch_delay = fetchDelayEvent.when(); 11452292SN/A SERIALIZE_SCALAR(fetch_delay); 11462292SN/A if (wbDelayEvent.scheduled()) 11472292SN/A wb_delay = wbDelayEvent.when(); 11482292SN/A SERIALIZE_SCALAR(wb_delay); 11492292SN/A 11502292SN/A 11512292SN/A} 11522292SN/A 11532292SN/Atemplate<class T> 11542292SN/Avoid 11552292SN/AIGbE::DescCache<T>::unserialize(CheckpointIn &cp) 11562292SN/A{ 11572292SN/A UNSERIALIZE_SCALAR(cachePnt); 11582292SN/A UNSERIALIZE_SCALAR(curFetching); 11592292SN/A UNSERIALIZE_SCALAR(wbOut); 11602292SN/A UNSERIALIZE_SCALAR(moreToWb); 11612292SN/A UNSERIALIZE_SCALAR(wbAlignment); 11622292SN/A 11632292SN/A typename CacheType::size_type usedCacheSize; 11642292SN/A UNSERIALIZE_SCALAR(usedCacheSize); 11652292SN/A T *temp; 11662301SN/A for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) { 11672292SN/A temp = new T; 11682301SN/A arrayParamIn(cp, csprintf("usedCache_%d", x), 11692292SN/A (uint8_t*)temp,sizeof(T)); 11702292SN/A usedCache.push_back(temp); 11712292SN/A } 11722292SN/A 11732292SN/A typename CacheType::size_type unusedCacheSize; 11742292SN/A UNSERIALIZE_SCALAR(unusedCacheSize); 11752292SN/A for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) { 11762292SN/A temp = new T; 11772292SN/A arrayParamIn(cp, csprintf("unusedCache_%d", x), 11782292SN/A (uint8_t*)temp,sizeof(T)); 11796221Snate@binkert.org unusedCache.push_back(temp); 11802292SN/A } 11818607Sgblack@eecs.umich.edu Tick fetch_delay = 0, wb_delay = 0; 11828607Sgblack@eecs.umich.edu UNSERIALIZE_SCALAR(fetch_delay); 11832292SN/A UNSERIALIZE_SCALAR(wb_delay); 11848607Sgblack@eecs.umich.edu if (fetch_delay) 11858607Sgblack@eecs.umich.edu igbe->schedule(fetchDelayEvent, fetch_delay); 11862292SN/A if (wb_delay) 11872292SN/A igbe->schedule(wbDelayEvent, wb_delay); 11882292SN/A 11892292SN/A 11902292SN/A} 11912292SN/A 11922292SN/A///////////////////////////// IGbE::RxDescCache ////////////////////////////// 11932292SN/A 11942292SN/AIGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s) 11952292SN/A : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0), 11962292SN/A pktEvent(this), pktHdrEvent(this), pktDataEvent(this) 11972292SN/A 11982292SN/A{ 11992292SN/A annSmFetch = "RX Desc Fetch"; 12002292SN/A annSmWb = "RX Desc Writeback"; 12012292SN/A annUnusedDescQ = "RX Unused Descriptors"; 12022292SN/A annUnusedCacheQ = "RX Unused Descriptor Cache"; 12032292SN/A annUsedCacheQ = "RX Used Descriptor Cache"; 12042292SN/A annUsedDescQ = "RX Used Descriptors"; 12056221Snate@binkert.org annDescQ = "RX Descriptors"; 12062292SN/A} 12072292SN/A 12082292SN/Avoid 12092292SN/AIGbE::RxDescCache::pktSplitDone() 12102292SN/A{ 12112292SN/A splitCount++; 12122292SN/A DPRINTF(EthernetDesc, 12132292SN/A "Part of split packet done: splitcount now %d\n", splitCount); 12142301SN/A assert(splitCount <= 2); 12152292SN/A if (splitCount != 2) 12162292SN/A return; 12172292SN/A splitCount = 0; 12182292SN/A DPRINTF(EthernetDesc, 12192292SN/A "Part of split packet done: calling pktComplete()\n"); 12202292SN/A pktComplete(); 12212292SN/A} 12222292SN/A 12232292SN/Aint 12244632Sgblack@eecs.umich.eduIGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset) 12252292SN/A{ 12262292SN/A assert(unusedCache.size()); 12272292SN/A //if (!unusedCache.size()) 12282292SN/A // return false; 12292292SN/A 12302292SN/A pktPtr = packet; 12312292SN/A pktDone = false; 12322292SN/A unsigned buf_len, hdr_len; 12332292SN/A 12342292SN/A RxDesc *desc = unusedCache.front(); 12352292SN/A switch (igbe->regs.srrctl.desctype()) { 12362292SN/A case RXDT_LEGACY: 12372292SN/A assert(pkt_offset == 0); 12382292SN/A bytesCopied = packet->length; 12392292SN/A DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n", 12402292SN/A packet->length, igbe->regs.rctl.descSize()); 12412292SN/A assert(packet->length < igbe->regs.rctl.descSize()); 12422292SN/A igbe->dmaWrite(pciToDma(desc->legacy.buf), 12432292SN/A packet->length, &pktEvent, packet->data, 12442292SN/A igbe->rxWriteDelay); 12452292SN/A break; 12462292SN/A case RXDT_ADV_ONEBUF: 12472292SN/A assert(pkt_offset == 0); 12482292SN/A bytesCopied = packet->length; 12492292SN/A buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() : 12502292SN/A igbe->regs.rctl.descSize(); 12512292SN/A DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n", 12522292SN/A packet->length, igbe->regs.srrctl(), buf_len); 12532292SN/A assert(packet->length < buf_len); 12542292SN/A igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 12553798Sgblack@eecs.umich.edu packet->length, &pktEvent, packet->data, 12563798Sgblack@eecs.umich.edu igbe->rxWriteDelay); 12573798Sgblack@eecs.umich.edu desc->adv_wb.header_len = htole(0); 12582292SN/A desc->adv_wb.sph = htole(0); 12593798Sgblack@eecs.umich.edu desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length)); 12603798Sgblack@eecs.umich.edu break; 12613798Sgblack@eecs.umich.edu case RXDT_ADV_SPLIT_A: 12623798Sgblack@eecs.umich.edu int split_point; 12633798Sgblack@eecs.umich.edu 12643798Sgblack@eecs.umich.edu buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() : 12653798Sgblack@eecs.umich.edu igbe->regs.rctl.descSize(); 12663798Sgblack@eecs.umich.edu hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0; 12673788Sgblack@eecs.umich.edu DPRINTF(EthernetDesc, 12683788Sgblack@eecs.umich.edu "lpe: %d Packet Length: %d offset: %d srrctl: %#x " 12692292SN/A "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n", 12703788Sgblack@eecs.umich.edu igbe->regs.rctl.lpe(), packet->length, pkt_offset, 12713788Sgblack@eecs.umich.edu igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len, 12723788Sgblack@eecs.umich.edu desc->adv_read.pkt, buf_len); 12732292SN/A 12742292SN/A split_point = hsplit(pktPtr); 12752301SN/A 12762292SN/A if (packet->length <= hdr_len) { 12772301SN/A bytesCopied = packet->length; 12782292SN/A assert(pkt_offset == 0); 12792292SN/A DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n"); 12802301SN/A igbe->dmaWrite(pciToDma(desc->adv_read.hdr), 12812292SN/A packet->length, &pktEvent, packet->data, 12822292SN/A igbe->rxWriteDelay); 12832292SN/A desc->adv_wb.header_len = htole((uint16_t)packet->length); 12842292SN/A desc->adv_wb.sph = htole(0); 12852292SN/A desc->adv_wb.pkt_len = htole(0); 12862292SN/A } else if (split_point) { 12877720Sgblack@eecs.umich.edu if (pkt_offset) { 12882292SN/A // we are only copying some data, header/data has already been 12892292SN/A // copied 12902301SN/A int max_to_copy = 12912292SN/A std::min(packet->length - pkt_offset, buf_len); 12922292SN/A bytesCopied += max_to_copy; 12932301SN/A DPRINTF(EthernetDesc, 12942292SN/A "Hdr split: Continuing data buffer copy\n"); 12952301SN/A igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 12962292SN/A max_to_copy, &pktEvent, 12972292SN/A packet->data + pkt_offset, igbe->rxWriteDelay); 12982292SN/A desc->adv_wb.header_len = htole(0); 12992703Sktlim@umich.edu desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy); 13002292SN/A desc->adv_wb.sph = htole(0); 13012301SN/A } else { 13022292SN/A int max_to_copy = 13032292SN/A std::min(packet->length - split_point, buf_len); 13042292SN/A bytesCopied += max_to_copy + split_point; 13052292SN/A 13062292SN/A DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n", 13072292SN/A split_point); 13082292SN/A igbe->dmaWrite(pciToDma(desc->adv_read.hdr), 13091061SN/A split_point, &pktHdrEvent, 13101061SN/A packet->data, igbe->rxWriteDelay); 13111060SN/A igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 13121060SN/A max_to_copy, &pktDataEvent, 13136221Snate@binkert.org packet->data + split_point, igbe->rxWriteDelay); 13141060SN/A desc->adv_wb.header_len = htole(split_point); 13152292SN/A desc->adv_wb.sph = 1; 13162292SN/A desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy)); 13172292SN/A } 13181060SN/A } else { 13191060SN/A panic("Header split not fitting within header buffer or " 13201060SN/A "undecodable packet not fitting in header unsupported\n"); 13212292SN/A } 13222292SN/A break; 13232292SN/A default: 13242292SN/A panic("Unimplemnted RX receive buffer type: %d\n", 13252292SN/A igbe->regs.srrctl.desctype()); 13262292SN/A } 13272292SN/A return bytesCopied; 13282292SN/A 13292292SN/A} 13302292SN/A 13312292SN/Avoid 13322292SN/AIGbE::RxDescCache::pktComplete() 13332292SN/A{ 13342292SN/A assert(unusedCache.size()); 13352292SN/A RxDesc *desc; 13362292SN/A desc = unusedCache.front(); 13372292SN/A 13382292SN/A igbe->anBegin("RXS", "Update Desc"); 13392292SN/A 13402292SN/A uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ; 13412292SN/A DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d " 13421060SN/A "stripcrc offset: %d value written: %d %d\n", 13432292SN/A pktPtr->length, bytesCopied, crcfixup, 13441060SN/A htole((uint16_t)(pktPtr->length + crcfixup)), 13452292SN/A (uint16_t)(pktPtr->length + crcfixup)); 13462292SN/A 13472292SN/A // no support for anything but starting at 0 13482292SN/A assert(igbe->regs.rxcsum.pcss() == 0); 13492980Sgblack@eecs.umich.edu 13501060SN/A DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n"); 13516221Snate@binkert.org 13521060SN/A uint16_t status = RXDS_DD; 13536221Snate@binkert.org uint8_t err = 0; 13541060SN/A uint16_t ext_err = 0; 13556221Snate@binkert.org uint16_t csum = 0; 13562292SN/A uint16_t ptype = 0; 13572292SN/A uint16_t ip_id = 0; 13582292SN/A 13591060SN/A assert(bytesCopied <= pktPtr->length); 13602292SN/A if (bytesCopied == pktPtr->length) 13611062SN/A status |= RXDS_EOP; 13621060SN/A 13631060SN/A IpPtr ip(pktPtr); 1364 1365 if (ip) { 1366 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id()); 1367 ptype |= RXDP_IPV4; 1368 ip_id = ip->id(); 1369 1370 if (igbe->regs.rxcsum.ipofld()) { 1371 DPRINTF(EthernetDesc, "Checking IP checksum\n"); 1372 status |= RXDS_IPCS; 1373 csum = htole(cksum(ip)); 1374 igbe->rxIpChecksums++; 1375 if (cksum(ip) != 0) { 1376 err |= RXDE_IPE; 1377 ext_err |= RXDEE_IPE; 1378 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 1379 } 1380 } 1381 TcpPtr tcp(ip); 1382 if (tcp && igbe->regs.rxcsum.tuofld()) { 1383 DPRINTF(EthernetDesc, "Checking TCP checksum\n"); 1384 status |= RXDS_TCPCS; 1385 ptype |= RXDP_TCP; 1386 csum = htole(cksum(tcp)); 1387 igbe->rxTcpChecksums++; 1388 if (cksum(tcp) != 0) { 1389 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 1390 err |= RXDE_TCPE; 1391 ext_err |= RXDEE_TCPE; 1392 } 1393 } 1394 1395 UdpPtr udp(ip); 1396 if (udp && igbe->regs.rxcsum.tuofld()) { 1397 DPRINTF(EthernetDesc, "Checking UDP checksum\n"); 1398 status |= RXDS_UDPCS; 1399 ptype |= RXDP_UDP; 1400 csum = htole(cksum(udp)); 1401 igbe->rxUdpChecksums++; 1402 if (cksum(udp) != 0) { 1403 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 1404 ext_err |= RXDEE_TCPE; 1405 err |= RXDE_TCPE; 1406 } 1407 } 1408 } else { // if ip 1409 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 1410 } 1411 1412 switch (igbe->regs.srrctl.desctype()) { 1413 case RXDT_LEGACY: 1414 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup)); 1415 desc->legacy.status = htole(status); 1416 desc->legacy.errors = htole(err); 1417 // No vlan support at this point... just set it to 0 1418 desc->legacy.vlan = 0; 1419 break; 1420 case RXDT_ADV_SPLIT_A: 1421 case RXDT_ADV_ONEBUF: 1422 desc->adv_wb.rss_type = htole(0); 1423 desc->adv_wb.pkt_type = htole(ptype); 1424 if (igbe->regs.rxcsum.pcsd()) { 1425 // no rss support right now 1426 desc->adv_wb.rss_hash = htole(0); 1427 } else { 1428 desc->adv_wb.id = htole(ip_id); 1429 desc->adv_wb.csum = htole(csum); 1430 } 1431 desc->adv_wb.status = htole(status); 1432 desc->adv_wb.errors = htole(ext_err); 1433 // no vlan support 1434 desc->adv_wb.vlan_tag = htole(0); 1435 break; 1436 default: 1437 panic("Unimplemnted RX receive buffer type %d\n", 1438 igbe->regs.srrctl.desctype()); 1439 } 1440 1441 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n", 1442 desc->adv_read.pkt, desc->adv_read.hdr); 1443 1444 if (bytesCopied == pktPtr->length) { 1445 DPRINTF(EthernetDesc, 1446 "Packet completely written to descriptor buffers\n"); 1447 // Deal with the rx timer interrupts 1448 if (igbe->regs.rdtr.delay()) { 1449 Tick delay = igbe->regs.rdtr.delay() * igbe->intClock(); 1450 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay); 1451 igbe->reschedule(igbe->rdtrEvent, curTick() + delay); 1452 } 1453 1454 if (igbe->regs.radv.idv()) { 1455 Tick delay = igbe->regs.radv.idv() * igbe->intClock(); 1456 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay); 1457 if (!igbe->radvEvent.scheduled()) { 1458 igbe->schedule(igbe->radvEvent, curTick() + delay); 1459 } 1460 } 1461 1462 // if neither radv or rdtr, maybe itr is set... 1463 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) { 1464 DPRINTF(EthernetSM, 1465 "RXS: Receive interrupt delay disabled, posting IT_RXT\n"); 1466 igbe->postInterrupt(IT_RXT); 1467 } 1468 1469 // If the packet is small enough, interrupt appropriately 1470 // I wonder if this is delayed or not?! 1471 if (pktPtr->length <= igbe->regs.rsrpd.idv()) { 1472 DPRINTF(EthernetSM, 1473 "RXS: Posting IT_SRPD beacuse small packet received\n"); 1474 igbe->postInterrupt(IT_SRPD); 1475 } 1476 bytesCopied = 0; 1477 } 1478 1479 pktPtr = NULL; 1480 igbe->checkDrain(); 1481 enableSm(); 1482 pktDone = true; 1483 1484 igbe->anBegin("RXS", "Done Updating Desc"); 1485 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n"); 1486 igbe->anDq("RXS", annUnusedCacheQ); 1487 unusedCache.pop_front(); 1488 igbe->anQ("RXS", annUsedCacheQ); 1489 usedCache.push_back(desc); 1490} 1491 1492void 1493IGbE::RxDescCache::enableSm() 1494{ 1495 if (igbe->drainState() != DrainState::Draining) { 1496 igbe->rxTick = true; 1497 igbe->restartClock(); 1498 } 1499} 1500 1501bool 1502IGbE::RxDescCache::packetDone() 1503{ 1504 if (pktDone) { 1505 pktDone = false; 1506 return true; 1507 } 1508 return false; 1509} 1510 1511bool 1512IGbE::RxDescCache::hasOutstandingEvents() 1513{ 1514 return pktEvent.scheduled() || wbEvent.scheduled() || 1515 fetchEvent.scheduled() || pktHdrEvent.scheduled() || 1516 pktDataEvent.scheduled(); 1517 1518} 1519 1520void 1521IGbE::RxDescCache::serialize(CheckpointOut &cp) const 1522{ 1523 DescCache<RxDesc>::serialize(cp); 1524 SERIALIZE_SCALAR(pktDone); 1525 SERIALIZE_SCALAR(splitCount); 1526 SERIALIZE_SCALAR(bytesCopied); 1527} 1528 1529void 1530IGbE::RxDescCache::unserialize(CheckpointIn &cp) 1531{ 1532 DescCache<RxDesc>::unserialize(cp); 1533 UNSERIALIZE_SCALAR(pktDone); 1534 UNSERIALIZE_SCALAR(splitCount); 1535 UNSERIALIZE_SCALAR(bytesCopied); 1536} 1537 1538 1539///////////////////////////// IGbE::TxDescCache ////////////////////////////// 1540 1541IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s) 1542 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), 1543 pktWaiting(false), pktMultiDesc(false), 1544 completionAddress(0), completionEnabled(false), 1545 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0), 1546 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false), 1547 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0), 1548 pktEvent(this), headerEvent(this), nullEvent(this) 1549{ 1550 annSmFetch = "TX Desc Fetch"; 1551 annSmWb = "TX Desc Writeback"; 1552 annUnusedDescQ = "TX Unused Descriptors"; 1553 annUnusedCacheQ = "TX Unused Descriptor Cache"; 1554 annUsedCacheQ = "TX Used Descriptor Cache"; 1555 annUsedDescQ = "TX Used Descriptors"; 1556 annDescQ = "TX Descriptors"; 1557} 1558 1559void 1560IGbE::TxDescCache::processContextDesc() 1561{ 1562 assert(unusedCache.size()); 1563 TxDesc *desc; 1564 1565 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n"); 1566 1567 while (!useTso && unusedCache.size() && 1568 TxdOp::isContext(unusedCache.front())) { 1569 DPRINTF(EthernetDesc, "Got context descriptor type...\n"); 1570 1571 desc = unusedCache.front(); 1572 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n", 1573 desc->d1, desc->d2); 1574 1575 1576 // is this going to be a tcp or udp packet? 1577 isTcp = TxdOp::tcp(desc) ? true : false; 1578 1579 // setup all the TSO variables, they'll be ignored if we don't use 1580 // tso for this connection 1581 tsoHeaderLen = TxdOp::hdrlen(desc); 1582 tsoMss = TxdOp::mss(desc); 1583 1584 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) { 1585 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: " 1586 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc), 1587 TxdOp::mss(desc), TxdOp::getLen(desc)); 1588 useTso = true; 1589 tsoTotalLen = TxdOp::getLen(desc); 1590 tsoLoadedHeader = false; 1591 tsoDescBytesUsed = 0; 1592 tsoUsedLen = 0; 1593 tsoPrevSeq = 0; 1594 tsoPktHasHeader = false; 1595 tsoPkts = 0; 1596 tsoCopyBytes = 0; 1597 } 1598 1599 TxdOp::setDd(desc); 1600 unusedCache.pop_front(); 1601 igbe->anDq("TXS", annUnusedCacheQ); 1602 usedCache.push_back(desc); 1603 igbe->anQ("TXS", annUsedCacheQ); 1604 } 1605 1606 if (!unusedCache.size()) 1607 return; 1608 1609 desc = unusedCache.front(); 1610 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) && 1611 TxdOp::tse(desc)) { 1612 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet " 1613 "hdrlen: %d mss: %d paylen %d\n", 1614 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc)); 1615 useTso = true; 1616 tsoTotalLen = TxdOp::getTsoLen(desc); 1617 tsoLoadedHeader = false; 1618 tsoDescBytesUsed = 0; 1619 tsoUsedLen = 0; 1620 tsoPrevSeq = 0; 1621 tsoPktHasHeader = false; 1622 tsoPkts = 0; 1623 } 1624 1625 if (useTso && !tsoLoadedHeader) { 1626 // we need to fetch a header 1627 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n"); 1628 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen); 1629 pktWaiting = true; 1630 assert(tsoHeaderLen <= 256); 1631 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)), 1632 tsoHeaderLen, &headerEvent, tsoHeader, 0); 1633 } 1634} 1635 1636void 1637IGbE::TxDescCache::headerComplete() 1638{ 1639 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n"); 1640 pktWaiting = false; 1641 1642 assert(unusedCache.size()); 1643 TxDesc *desc = unusedCache.front(); 1644 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n", 1645 TxdOp::getLen(desc), tsoHeaderLen); 1646 1647 if (TxdOp::getLen(desc) == tsoHeaderLen) { 1648 tsoDescBytesUsed = 0; 1649 tsoLoadedHeader = true; 1650 unusedCache.pop_front(); 1651 usedCache.push_back(desc); 1652 } else { 1653 DPRINTF(EthernetDesc, "TSO: header part of larger payload\n"); 1654 tsoDescBytesUsed = tsoHeaderLen; 1655 tsoLoadedHeader = true; 1656 } 1657 enableSm(); 1658 igbe->checkDrain(); 1659} 1660 1661unsigned 1662IGbE::TxDescCache::getPacketSize(EthPacketPtr p) 1663{ 1664 if (!unusedCache.size()) 1665 return 0; 1666 1667 DPRINTF(EthernetDesc, "Starting processing of descriptor\n"); 1668 1669 assert(!useTso || tsoLoadedHeader); 1670 TxDesc *desc = unusedCache.front(); 1671 1672 if (useTso) { 1673 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data " 1674 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1675 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d " 1676 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss, 1677 tsoTotalLen, tsoUsedLen, tsoLoadedHeader); 1678 1679 if (tsoPktHasHeader) 1680 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length, 1681 TxdOp::getLen(desc) - tsoDescBytesUsed); 1682 else 1683 tsoCopyBytes = std::min(tsoMss, 1684 TxdOp::getLen(desc) - tsoDescBytesUsed); 1685 unsigned pkt_size = 1686 tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen); 1687 1688 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d " 1689 "this descLen: %d\n", 1690 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc)); 1691 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader); 1692 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size); 1693 return pkt_size; 1694 } 1695 1696 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n", 1697 TxdOp::getLen(unusedCache.front())); 1698 return TxdOp::getLen(desc); 1699} 1700 1701void 1702IGbE::TxDescCache::getPacketData(EthPacketPtr p) 1703{ 1704 assert(unusedCache.size()); 1705 1706 TxDesc *desc; 1707 desc = unusedCache.front(); 1708 1709 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data " 1710 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1711 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && 1712 TxdOp::getLen(desc)); 1713 1714 pktPtr = p; 1715 1716 pktWaiting = true; 1717 1718 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length); 1719 1720 if (useTso) { 1721 assert(tsoLoadedHeader); 1722 if (!tsoPktHasHeader) { 1723 DPRINTF(EthernetDesc, 1724 "Loading TSO header (%d bytes) into start of packet\n", 1725 tsoHeaderLen); 1726 memcpy(p->data, &tsoHeader,tsoHeaderLen); 1727 p->length +=tsoHeaderLen; 1728 tsoPktHasHeader = true; 1729 } 1730 } 1731 1732 if (useTso) { 1733 DPRINTF(EthernetDesc, 1734 "Starting DMA of packet at offset %d length: %d\n", 1735 p->length, tsoCopyBytes); 1736 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)) 1737 + tsoDescBytesUsed, 1738 tsoCopyBytes, &pktEvent, p->data + p->length, 1739 igbe->txReadDelay); 1740 tsoDescBytesUsed += tsoCopyBytes; 1741 assert(tsoDescBytesUsed <= TxdOp::getLen(desc)); 1742 } else { 1743 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)), 1744 TxdOp::getLen(desc), &pktEvent, p->data + p->length, 1745 igbe->txReadDelay); 1746 } 1747} 1748 1749void 1750IGbE::TxDescCache::pktComplete() 1751{ 1752 1753 TxDesc *desc; 1754 assert(unusedCache.size()); 1755 assert(pktPtr); 1756 1757 igbe->anBegin("TXS", "Update Desc"); 1758 1759 DPRINTF(EthernetDesc, "DMA of packet complete\n"); 1760 1761 1762 desc = unusedCache.front(); 1763 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && 1764 TxdOp::getLen(desc)); 1765 1766 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", 1767 desc->d1, desc->d2); 1768 1769 // Set the length of the data in the EtherPacket 1770 if (useTso) { 1771 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d " 1772 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss, 1773 tsoTotalLen, tsoUsedLen, tsoLoadedHeader); 1774 pktPtr->length += tsoCopyBytes; 1775 tsoUsedLen += tsoCopyBytes; 1776 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n", 1777 tsoDescBytesUsed, tsoCopyBytes); 1778 } else 1779 pktPtr->length += TxdOp::getLen(desc); 1780 1781 1782 1783 if ((!TxdOp::eop(desc) && !useTso) || 1784 (pktPtr->length < ( tsoMss + tsoHeaderLen) && 1785 tsoTotalLen != tsoUsedLen && useTso)) { 1786 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc))); 1787 igbe->anDq("TXS", annUnusedCacheQ); 1788 unusedCache.pop_front(); 1789 igbe->anQ("TXS", annUsedCacheQ); 1790 usedCache.push_back(desc); 1791 1792 tsoDescBytesUsed = 0; 1793 pktDone = true; 1794 pktWaiting = false; 1795 pktMultiDesc = true; 1796 1797 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n", 1798 pktPtr->length); 1799 pktPtr = NULL; 1800 1801 enableSm(); 1802 igbe->checkDrain(); 1803 return; 1804 } 1805 1806 1807 pktMultiDesc = false; 1808 // no support for vlans 1809 assert(!TxdOp::vle(desc)); 1810 1811 // we only support single packet descriptors at this point 1812 if (!useTso) 1813 assert(TxdOp::eop(desc)); 1814 1815 // set that this packet is done 1816 if (TxdOp::rs(desc)) 1817 TxdOp::setDd(desc); 1818 1819 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", 1820 desc->d1, desc->d2); 1821 1822 if (useTso) { 1823 IpPtr ip(pktPtr); 1824 if (ip) { 1825 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n", 1826 tsoPkts); 1827 ip->id(ip->id() + tsoPkts++); 1828 ip->len(pktPtr->length - EthPtr(pktPtr)->size()); 1829 1830 TcpPtr tcp(ip); 1831 if (tcp) { 1832 DPRINTF(EthernetDesc, 1833 "TSO: Modifying TCP header. old seq %d + %d\n", 1834 tcp->seq(), tsoPrevSeq); 1835 tcp->seq(tcp->seq() + tsoPrevSeq); 1836 if (tsoUsedLen != tsoTotalLen) 1837 tcp->flags(tcp->flags() & ~9); // clear fin & psh 1838 } 1839 UdpPtr udp(ip); 1840 if (udp) { 1841 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n"); 1842 udp->len(pktPtr->length - EthPtr(pktPtr)->size()); 1843 } 1844 } 1845 tsoPrevSeq = tsoUsedLen; 1846 } 1847 1848 if (DTRACE(EthernetDesc)) { 1849 IpPtr ip(pktPtr); 1850 if (ip) 1851 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", 1852 ip->id()); 1853 else 1854 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 1855 } 1856 1857 // Checksums are only ofloaded for new descriptor types 1858 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) { 1859 DPRINTF(EthernetDesc, "Calculating checksums for packet\n"); 1860 IpPtr ip(pktPtr); 1861 assert(ip); 1862 if (TxdOp::ixsm(desc)) { 1863 ip->sum(0); 1864 ip->sum(cksum(ip)); 1865 igbe->txIpChecksums++; 1866 DPRINTF(EthernetDesc, "Calculated IP checksum\n"); 1867 } 1868 if (TxdOp::txsm(desc)) { 1869 TcpPtr tcp(ip); 1870 UdpPtr udp(ip); 1871 if (tcp) { 1872 tcp->sum(0); 1873 tcp->sum(cksum(tcp)); 1874 igbe->txTcpChecksums++; 1875 DPRINTF(EthernetDesc, "Calculated TCP checksum\n"); 1876 } else if (udp) { 1877 assert(udp); 1878 udp->sum(0); 1879 udp->sum(cksum(udp)); 1880 igbe->txUdpChecksums++; 1881 DPRINTF(EthernetDesc, "Calculated UDP checksum\n"); 1882 } else { 1883 panic("Told to checksum, but don't know how\n"); 1884 } 1885 } 1886 } 1887 1888 if (TxdOp::ide(desc)) { 1889 // Deal with the rx timer interrupts 1890 DPRINTF(EthernetDesc, "Descriptor had IDE set\n"); 1891 if (igbe->regs.tidv.idv()) { 1892 Tick delay = igbe->regs.tidv.idv() * igbe->intClock(); 1893 DPRINTF(EthernetDesc, "setting tidv\n"); 1894 igbe->reschedule(igbe->tidvEvent, curTick() + delay, true); 1895 } 1896 1897 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) { 1898 Tick delay = igbe->regs.tadv.idv() * igbe->intClock(); 1899 DPRINTF(EthernetDesc, "setting tadv\n"); 1900 if (!igbe->tadvEvent.scheduled()) { 1901 igbe->schedule(igbe->tadvEvent, curTick() + delay); 1902 } 1903 } 1904 } 1905 1906 1907 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) { 1908 DPRINTF(EthernetDesc, "Descriptor Done\n"); 1909 igbe->anDq("TXS", annUnusedCacheQ); 1910 unusedCache.pop_front(); 1911 igbe->anQ("TXS", annUsedCacheQ); 1912 usedCache.push_back(desc); 1913 tsoDescBytesUsed = 0; 1914 } 1915 1916 if (useTso && tsoUsedLen == tsoTotalLen) 1917 useTso = false; 1918 1919 1920 DPRINTF(EthernetDesc, 1921 "------Packet of %d bytes ready for transmission-------\n", 1922 pktPtr->length); 1923 pktDone = true; 1924 pktWaiting = false; 1925 pktPtr = NULL; 1926 tsoPktHasHeader = false; 1927 1928 if (igbe->regs.txdctl.wthresh() == 0) { 1929 igbe->anBegin("TXS", "Desc Writeback"); 1930 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n"); 1931 writeback(0); 1932 } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <= 1933 descInBlock(usedCache.size())) { 1934 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1935 igbe->anBegin("TXS", "Desc Writeback"); 1936 writeback((igbe->cacheBlockSize()-1)>>4); 1937 } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) { 1938 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1939 igbe->anBegin("TXS", "Desc Writeback"); 1940 writeback((igbe->cacheBlockSize()-1)>>4); 1941 } 1942 1943 enableSm(); 1944 igbe->checkDrain(); 1945} 1946 1947void 1948IGbE::TxDescCache::actionAfterWb() 1949{ 1950 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n", 1951 completionEnabled); 1952 igbe->postInterrupt(iGbReg::IT_TXDW); 1953 if (completionEnabled) { 1954 descEnd = igbe->regs.tdh(); 1955 DPRINTF(EthernetDesc, 1956 "Completion writing back value: %d to addr: %#x\n", descEnd, 1957 completionAddress); 1958 igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)), 1959 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0); 1960 } 1961} 1962 1963void 1964IGbE::TxDescCache::serialize(CheckpointOut &cp) const 1965{ 1966 DescCache<TxDesc>::serialize(cp); 1967 1968 SERIALIZE_SCALAR(pktDone); 1969 SERIALIZE_SCALAR(isTcp); 1970 SERIALIZE_SCALAR(pktWaiting); 1971 SERIALIZE_SCALAR(pktMultiDesc); 1972 1973 SERIALIZE_SCALAR(useTso); 1974 SERIALIZE_SCALAR(tsoHeaderLen); 1975 SERIALIZE_SCALAR(tsoMss); 1976 SERIALIZE_SCALAR(tsoTotalLen); 1977 SERIALIZE_SCALAR(tsoUsedLen); 1978 SERIALIZE_SCALAR(tsoPrevSeq);; 1979 SERIALIZE_SCALAR(tsoPktPayloadBytes); 1980 SERIALIZE_SCALAR(tsoLoadedHeader); 1981 SERIALIZE_SCALAR(tsoPktHasHeader); 1982 SERIALIZE_ARRAY(tsoHeader, 256); 1983 SERIALIZE_SCALAR(tsoDescBytesUsed); 1984 SERIALIZE_SCALAR(tsoCopyBytes); 1985 SERIALIZE_SCALAR(tsoPkts); 1986 1987 SERIALIZE_SCALAR(completionAddress); 1988 SERIALIZE_SCALAR(completionEnabled); 1989 SERIALIZE_SCALAR(descEnd); 1990} 1991 1992void 1993IGbE::TxDescCache::unserialize(CheckpointIn &cp) 1994{ 1995 DescCache<TxDesc>::unserialize(cp); 1996 1997 UNSERIALIZE_SCALAR(pktDone); 1998 UNSERIALIZE_SCALAR(isTcp); 1999 UNSERIALIZE_SCALAR(pktWaiting); 2000 UNSERIALIZE_SCALAR(pktMultiDesc); 2001 2002 UNSERIALIZE_SCALAR(useTso); 2003 UNSERIALIZE_SCALAR(tsoHeaderLen); 2004 UNSERIALIZE_SCALAR(tsoMss); 2005 UNSERIALIZE_SCALAR(tsoTotalLen); 2006 UNSERIALIZE_SCALAR(tsoUsedLen); 2007 UNSERIALIZE_SCALAR(tsoPrevSeq);; 2008 UNSERIALIZE_SCALAR(tsoPktPayloadBytes); 2009 UNSERIALIZE_SCALAR(tsoLoadedHeader); 2010 UNSERIALIZE_SCALAR(tsoPktHasHeader); 2011 UNSERIALIZE_ARRAY(tsoHeader, 256); 2012 UNSERIALIZE_SCALAR(tsoDescBytesUsed); 2013 UNSERIALIZE_SCALAR(tsoCopyBytes); 2014 UNSERIALIZE_SCALAR(tsoPkts); 2015 2016 UNSERIALIZE_SCALAR(completionAddress); 2017 UNSERIALIZE_SCALAR(completionEnabled); 2018 UNSERIALIZE_SCALAR(descEnd); 2019} 2020 2021bool 2022IGbE::TxDescCache::packetAvailable() 2023{ 2024 if (pktDone) { 2025 pktDone = false; 2026 return true; 2027 } 2028 return false; 2029} 2030 2031void 2032IGbE::TxDescCache::enableSm() 2033{ 2034 if (igbe->drainState() != DrainState::Draining) { 2035 igbe->txTick = true; 2036 igbe->restartClock(); 2037 } 2038} 2039 2040bool 2041IGbE::TxDescCache::hasOutstandingEvents() 2042{ 2043 return pktEvent.scheduled() || wbEvent.scheduled() || 2044 fetchEvent.scheduled(); 2045} 2046 2047 2048///////////////////////////////////// IGbE ///////////////////////////////// 2049 2050void 2051IGbE::restartClock() 2052{ 2053 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && 2054 drainState() == DrainState::Running) 2055 schedule(tickEvent, clockEdge(Cycles(1))); 2056} 2057 2058DrainState 2059IGbE::drain() 2060{ 2061 unsigned int count(0); 2062 if (rxDescCache.hasOutstandingEvents() || 2063 txDescCache.hasOutstandingEvents()) { 2064 count++; 2065 } 2066 2067 txFifoTick = false; 2068 txTick = false; 2069 rxTick = false; 2070 2071 if (tickEvent.scheduled()) 2072 deschedule(tickEvent); 2073 2074 if (count) { 2075 DPRINTF(Drain, "IGbE not drained\n"); 2076 return DrainState::Draining; 2077 } else 2078 return DrainState::Drained; 2079} 2080 2081void 2082IGbE::drainResume() 2083{ 2084 Drainable::drainResume(); 2085 2086 txFifoTick = true; 2087 txTick = true; 2088 rxTick = true; 2089 2090 restartClock(); 2091 DPRINTF(EthernetSM, "resuming from drain"); 2092} 2093 2094void 2095IGbE::checkDrain() 2096{ 2097 if (drainState() != DrainState::Draining) 2098 return; 2099 2100 txFifoTick = false; 2101 txTick = false; 2102 rxTick = false; 2103 if (!rxDescCache.hasOutstandingEvents() && 2104 !txDescCache.hasOutstandingEvents()) { 2105 DPRINTF(Drain, "IGbE done draining, processing drain event\n"); 2106 signalDrainDone(); 2107 } 2108} 2109 2110void 2111IGbE::txStateMachine() 2112{ 2113 if (!regs.tctl.en()) { 2114 txTick = false; 2115 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n"); 2116 return; 2117 } 2118 2119 // If we have a packet available and it's length is not 0 (meaning it's not 2120 // a multidescriptor packet) put it in the fifo, otherwise an the next 2121 // iteration we'll get the rest of the data 2122 if (txPacket && txDescCache.packetAvailable() 2123 && !txDescCache.packetMultiDesc() && txPacket->length) { 2124 anQ("TXS", "TX FIFO Q"); 2125 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n"); 2126#ifndef NDEBUG 2127 bool success = 2128#endif 2129 txFifo.push(txPacket); 2130 txFifoTick = true && drainState() != DrainState::Draining; 2131 assert(success); 2132 txPacket = NULL; 2133 anBegin("TXS", "Desc Writeback"); 2134 txDescCache.writeback((cacheBlockSize()-1)>>4); 2135 return; 2136 } 2137 2138 // Only support descriptor granularity 2139 if (regs.txdctl.lwthresh() && 2140 txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) { 2141 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n"); 2142 postInterrupt(IT_TXDLOW); 2143 } 2144 2145 if (!txPacket) { 2146 txPacket = std::make_shared<EthPacketData>(16384); 2147 } 2148 2149 if (!txDescCache.packetWaiting()) { 2150 if (txDescCache.descLeft() == 0) { 2151 postInterrupt(IT_TXQE); 2152 anBegin("TXS", "Desc Writeback"); 2153 txDescCache.writeback(0); 2154 anBegin("TXS", "Desc Fetch"); 2155 anWe("TXS", txDescCache.annUnusedCacheQ); 2156 txDescCache.fetchDescriptors(); 2157 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing " 2158 "writeback stopping ticking and posting TXQE\n"); 2159 txTick = false; 2160 return; 2161 } 2162 2163 2164 if (!(txDescCache.descUnused())) { 2165 anBegin("TXS", "Desc Fetch"); 2166 txDescCache.fetchDescriptors(); 2167 anWe("TXS", txDescCache.annUnusedCacheQ); 2168 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, " 2169 "fetching and stopping ticking\n"); 2170 txTick = false; 2171 return; 2172 } 2173 anPq("TXS", txDescCache.annUnusedCacheQ); 2174 2175 2176 txDescCache.processContextDesc(); 2177 if (txDescCache.packetWaiting()) { 2178 DPRINTF(EthernetSM, 2179 "TXS: Fetching TSO header, stopping ticking\n"); 2180 txTick = false; 2181 return; 2182 } 2183 2184 unsigned size = txDescCache.getPacketSize(txPacket); 2185 if (size > 0 && txFifo.avail() > size) { 2186 anRq("TXS", "TX FIFO Q"); 2187 anBegin("TXS", "DMA Packet"); 2188 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and " 2189 "beginning DMA of next packet\n", size); 2190 txFifo.reserve(size); 2191 txDescCache.getPacketData(txPacket); 2192 } else if (size == 0) { 2193 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size); 2194 DPRINTF(EthernetSM, 2195 "TXS: No packets to get, writing back used descriptors\n"); 2196 anBegin("TXS", "Desc Writeback"); 2197 txDescCache.writeback(0); 2198 } else { 2199 anWf("TXS", "TX FIFO Q"); 2200 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space " 2201 "available in FIFO\n"); 2202 txTick = false; 2203 } 2204 2205 2206 return; 2207 } 2208 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n"); 2209 txTick = false; 2210} 2211 2212bool 2213IGbE::ethRxPkt(EthPacketPtr pkt) 2214{ 2215 rxBytes += pkt->length; 2216 rxPackets++; 2217 2218 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n"); 2219 anBegin("RXQ", "Wire Recv"); 2220 2221 2222 if (!regs.rctl.en()) { 2223 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n"); 2224 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD); 2225 return true; 2226 } 2227 2228 // restart the state machines if they are stopped 2229 rxTick = true && drainState() != DrainState::Draining; 2230 if ((rxTick || txTick) && !tickEvent.scheduled()) { 2231 DPRINTF(EthernetSM, 2232 "RXS: received packet into fifo, starting ticking\n"); 2233 restartClock(); 2234 } 2235 2236 if (!rxFifo.push(pkt)) { 2237 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n"); 2238 postInterrupt(IT_RXO, true); 2239 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD); 2240 return false; 2241 } 2242 2243 if (CPA::available() && cpa->enabled()) { 2244 assert(sys->numSystemsRunning <= 2); 2245 System *other_sys; 2246 if (sys->systemList[0] == sys) 2247 other_sys = sys->systemList[1]; 2248 else 2249 other_sys = sys->systemList[0]; 2250 2251 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys); 2252 anQ("RXQ", "RX FIFO Q"); 2253 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys); 2254 } 2255 2256 return true; 2257} 2258 2259 2260void 2261IGbE::rxStateMachine() 2262{ 2263 if (!regs.rctl.en()) { 2264 rxTick = false; 2265 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n"); 2266 return; 2267 } 2268 2269 // If the packet is done check for interrupts/descriptors/etc 2270 if (rxDescCache.packetDone()) { 2271 rxDmaPacket = false; 2272 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n"); 2273 int descLeft = rxDescCache.descLeft(); 2274 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n", 2275 descLeft, regs.rctl.rdmts(), regs.rdlen()); 2276 switch (regs.rctl.rdmts()) { 2277 case 2: if (descLeft > .125 * regs.rdlen()) break; 2278 case 1: if (descLeft > .250 * regs.rdlen()) break; 2279 case 0: if (descLeft > .500 * regs.rdlen()) break; 2280 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) " 2281 "because of descriptors left\n"); 2282 postInterrupt(IT_RXDMT); 2283 break; 2284 } 2285 2286 if (rxFifo.empty()) 2287 rxDescCache.writeback(0); 2288 2289 if (descLeft == 0) { 2290 anBegin("RXS", "Writeback Descriptors"); 2291 rxDescCache.writeback(0); 2292 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing" 2293 " writeback and stopping ticking\n"); 2294 rxTick = false; 2295 } 2296 2297 // only support descriptor granulaties 2298 assert(regs.rxdctl.gran()); 2299 2300 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) { 2301 DPRINTF(EthernetSM, 2302 "RXS: Writing back because WTHRESH >= descUsed\n"); 2303 anBegin("RXS", "Writeback Descriptors"); 2304 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4)) 2305 rxDescCache.writeback(regs.rxdctl.wthresh()-1); 2306 else 2307 rxDescCache.writeback((cacheBlockSize()-1)>>4); 2308 } 2309 2310 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) && 2311 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > 2312 regs.rxdctl.hthresh())) { 2313 DPRINTF(EthernetSM, "RXS: Fetching descriptors because " 2314 "descUnused < PTHRESH\n"); 2315 anBegin("RXS", "Fetch Descriptors"); 2316 rxDescCache.fetchDescriptors(); 2317 } 2318 2319 if (rxDescCache.descUnused() == 0) { 2320 anBegin("RXS", "Fetch Descriptors"); 2321 rxDescCache.fetchDescriptors(); 2322 anWe("RXS", rxDescCache.annUnusedCacheQ); 2323 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, " 2324 "fetching descriptors and stopping ticking\n"); 2325 rxTick = false; 2326 } 2327 return; 2328 } 2329 2330 if (rxDmaPacket) { 2331 DPRINTF(EthernetSM, 2332 "RXS: stopping ticking until packet DMA completes\n"); 2333 rxTick = false; 2334 return; 2335 } 2336 2337 if (!rxDescCache.descUnused()) { 2338 anBegin("RXS", "Fetch Descriptors"); 2339 rxDescCache.fetchDescriptors(); 2340 anWe("RXS", rxDescCache.annUnusedCacheQ); 2341 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, " 2342 "stopping ticking\n"); 2343 rxTick = false; 2344 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n"); 2345 return; 2346 } 2347 anPq("RXS", rxDescCache.annUnusedCacheQ); 2348 2349 if (rxFifo.empty()) { 2350 anWe("RXS", "RX FIFO Q"); 2351 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n"); 2352 rxTick = false; 2353 return; 2354 } 2355 anPq("RXS", "RX FIFO Q"); 2356 anBegin("RXS", "Get Desc"); 2357 2358 EthPacketPtr pkt; 2359 pkt = rxFifo.front(); 2360 2361 2362 pktOffset = rxDescCache.writePacket(pkt, pktOffset); 2363 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n"); 2364 if (pktOffset == pkt->length) { 2365 anBegin( "RXS", "FIFO Dequeue"); 2366 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n"); 2367 pktOffset = 0; 2368 anDq("RXS", "RX FIFO Q"); 2369 rxFifo.pop(); 2370 } 2371 2372 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 2373 rxTick = false; 2374 rxDmaPacket = true; 2375 anBegin("RXS", "DMA Packet"); 2376} 2377 2378void 2379IGbE::txWire() 2380{ 2381 if (txFifo.empty()) { 2382 anWe("TXQ", "TX FIFO Q"); 2383 txFifoTick = false; 2384 return; 2385 } 2386 2387 2388 anPq("TXQ", "TX FIFO Q"); 2389 if (etherInt->sendPacket(txFifo.front())) { 2390 anQ("TXQ", "WireQ"); 2391 if (DTRACE(EthernetSM)) { 2392 IpPtr ip(txFifo.front()); 2393 if (ip) 2394 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n", 2395 ip->id()); 2396 else 2397 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n"); 2398 } 2399 anDq("TXQ", "TX FIFO Q"); 2400 anBegin("TXQ", "Wire Send"); 2401 DPRINTF(EthernetSM, 2402 "TxFIFO: Successful transmit, bytes available in fifo: %d\n", 2403 txFifo.avail()); 2404 2405 txBytes += txFifo.front()->length; 2406 txPackets++; 2407 txFifoTick = false; 2408 2409 txFifo.pop(); 2410 } else { 2411 // We'll get woken up when the packet ethTxDone() gets called 2412 txFifoTick = false; 2413 } 2414} 2415 2416void 2417IGbE::tick() 2418{ 2419 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n"); 2420 2421 if (rxTick) 2422 rxStateMachine(); 2423 2424 if (txTick) 2425 txStateMachine(); 2426 2427 if (txFifoTick) 2428 txWire(); 2429 2430 2431 if (rxTick || txTick || txFifoTick) 2432 schedule(tickEvent, curTick() + clockPeriod()); 2433} 2434 2435void 2436IGbE::ethTxDone() 2437{ 2438 anBegin("TXQ", "Send Done"); 2439 // restart the tx state machines if they are stopped 2440 // fifo to send another packet 2441 // tx sm to put more data into the fifo 2442 txFifoTick = true && drainState() != DrainState::Draining; 2443 if (txDescCache.descLeft() != 0 && drainState() != DrainState::Draining) 2444 txTick = true; 2445 2446 restartClock(); 2447 txWire(); 2448 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n"); 2449} 2450 2451void 2452IGbE::serialize(CheckpointOut &cp) const 2453{ 2454 PciDevice::serialize(cp); 2455 2456 regs.serialize(cp); 2457 SERIALIZE_SCALAR(eeOpBits); 2458 SERIALIZE_SCALAR(eeAddrBits); 2459 SERIALIZE_SCALAR(eeDataBits); 2460 SERIALIZE_SCALAR(eeOpcode); 2461 SERIALIZE_SCALAR(eeAddr); 2462 SERIALIZE_SCALAR(lastInterrupt); 2463 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 2464 2465 rxFifo.serialize("rxfifo", cp); 2466 txFifo.serialize("txfifo", cp); 2467 2468 bool txPktExists = txPacket != nullptr; 2469 SERIALIZE_SCALAR(txPktExists); 2470 if (txPktExists) 2471 txPacket->serialize("txpacket", cp); 2472 2473 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0, 2474 inter_time = 0; 2475 2476 if (rdtrEvent.scheduled()) 2477 rdtr_time = rdtrEvent.when(); 2478 SERIALIZE_SCALAR(rdtr_time); 2479 2480 if (radvEvent.scheduled()) 2481 radv_time = radvEvent.when(); 2482 SERIALIZE_SCALAR(radv_time); 2483 2484 if (tidvEvent.scheduled()) 2485 tidv_time = tidvEvent.when(); 2486 SERIALIZE_SCALAR(tidv_time); 2487 2488 if (tadvEvent.scheduled()) 2489 tadv_time = tadvEvent.when(); 2490 SERIALIZE_SCALAR(tadv_time); 2491 2492 if (interEvent.scheduled()) 2493 inter_time = interEvent.when(); 2494 SERIALIZE_SCALAR(inter_time); 2495 2496 SERIALIZE_SCALAR(pktOffset); 2497 2498 txDescCache.serializeSection(cp, "TxDescCache"); 2499 rxDescCache.serializeSection(cp, "RxDescCache"); 2500} 2501 2502void 2503IGbE::unserialize(CheckpointIn &cp) 2504{ 2505 PciDevice::unserialize(cp); 2506 2507 regs.unserialize(cp); 2508 UNSERIALIZE_SCALAR(eeOpBits); 2509 UNSERIALIZE_SCALAR(eeAddrBits); 2510 UNSERIALIZE_SCALAR(eeDataBits); 2511 UNSERIALIZE_SCALAR(eeOpcode); 2512 UNSERIALIZE_SCALAR(eeAddr); 2513 UNSERIALIZE_SCALAR(lastInterrupt); 2514 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 2515 2516 rxFifo.unserialize("rxfifo", cp); 2517 txFifo.unserialize("txfifo", cp); 2518 2519 bool txPktExists; 2520 UNSERIALIZE_SCALAR(txPktExists); 2521 if (txPktExists) { 2522 txPacket = std::make_shared<EthPacketData>(16384); 2523 txPacket->unserialize("txpacket", cp); 2524 } 2525 2526 rxTick = true; 2527 txTick = true; 2528 txFifoTick = true; 2529 2530 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time; 2531 UNSERIALIZE_SCALAR(rdtr_time); 2532 UNSERIALIZE_SCALAR(radv_time); 2533 UNSERIALIZE_SCALAR(tidv_time); 2534 UNSERIALIZE_SCALAR(tadv_time); 2535 UNSERIALIZE_SCALAR(inter_time); 2536 2537 if (rdtr_time) 2538 schedule(rdtrEvent, rdtr_time); 2539 2540 if (radv_time) 2541 schedule(radvEvent, radv_time); 2542 2543 if (tidv_time) 2544 schedule(tidvEvent, tidv_time); 2545 2546 if (tadv_time) 2547 schedule(tadvEvent, tadv_time); 2548 2549 if (inter_time) 2550 schedule(interEvent, inter_time); 2551 2552 UNSERIALIZE_SCALAR(pktOffset); 2553 2554 txDescCache.unserializeSection(cp, "TxDescCache"); 2555 rxDescCache.unserializeSection(cp, "RxDescCache"); 2556} 2557 2558IGbE * 2559IGbEParams::create() 2560{ 2561 return new IGbE(this); 2562} 2563