i8254xGBe.cc revision 10702
11689SN/A/* 212106SRekai.GonzalezAlberquilla@arm.com * Copyright (c) 2006 The Regents of The University of Michigan 39913Ssteve.reinhardt@amd.com * All rights reserved. 47854SAli.Saidi@ARM.com * 57854SAli.Saidi@ARM.com * Redistribution and use in source and binary forms, with or without 67854SAli.Saidi@ARM.com * modification, are permitted provided that the following conditions are 77854SAli.Saidi@ARM.com * met: redistributions of source code must retain the above copyright 87854SAli.Saidi@ARM.com * notice, this list of conditions and the following disclaimer; 97854SAli.Saidi@ARM.com * redistributions in binary form must reproduce the above copyright 107854SAli.Saidi@ARM.com * notice, this list of conditions and the following disclaimer in the 117854SAli.Saidi@ARM.com * documentation and/or other materials provided with the distribution; 127854SAli.Saidi@ARM.com * neither the name of the copyright holders nor the names of its 137854SAli.Saidi@ARM.com * contributors may be used to endorse or promote products derived from 147854SAli.Saidi@ARM.com * this software without specific prior written permission. 152329SN/A * 161689SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 171689SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 181689SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 191689SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 201689SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 211689SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 221689SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 231689SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 241689SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 251689SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 261689SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 271689SN/A * 281689SN/A * Authors: Ali Saidi 291689SN/A */ 301689SN/A 311689SN/A/* @file 321689SN/A * Device model for Intel's 8254x line of gigabit ethernet controllers. 331689SN/A * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the 341689SN/A * fewest workarounds in the driver. It will probably work with most of the 351689SN/A * other MACs with slight modifications. 361689SN/A */ 371689SN/A 381689SN/A 391689SN/A/* 402665Ssaidi@eecs.umich.edu * @todo really there are multiple dma engines.. we should implement them. 412665Ssaidi@eecs.umich.edu */ 422935Sksewell@umich.edu 431689SN/A#include <algorithm> 441689SN/A#include <memory> 459944Smatt.horsnell@ARM.com 469944Smatt.horsnell@ARM.com#include "base/inet.hh" 479944Smatt.horsnell@ARM.com#include "base/trace.hh" 481060SN/A#include "debug/Drain.hh" 491060SN/A#include "debug/EthernetAll.hh" 503773Sgblack@eecs.umich.edu#include "dev/i8254xGBe.hh" 516329Sgblack@eecs.umich.edu#include "mem/packet.hh" 526658Snate@binkert.org#include "mem/packet_access.hh" 531717SN/A#include "params/IGbE.hh" 549913Ssteve.reinhardt@amd.com#include "sim/stats.hh" 558232Snate@binkert.org#include "sim/system.hh" 568232Snate@binkert.org 579527SMatt.Horsnell@arm.comusing namespace iGbReg; 585529Snate@binkert.orgusing namespace Net; 591060SN/A 606221Snate@binkert.orgIGbE::IGbE(const Params *p) 616221Snate@binkert.org : EtherDevice(p), etherInt(NULL), cpa(NULL), drainManager(NULL), 621061SN/A rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false), 635529Snate@binkert.org txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0), 644329Sktlim@umich.edu fetchDelay(p->fetch_delay), wbDelay(p->wb_delay), 654329Sktlim@umich.edu fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay), 662292SN/A rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay), 672292SN/A rdtrEvent(this), radvEvent(this), 682292SN/A tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this), 692292SN/A rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size), 7012109SRekai.GonzalezAlberquilla@arm.com txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size), 711060SN/A lastInterrupt(0) 7210172Sdam.sunwoo@arm.com{ 7310172Sdam.sunwoo@arm.com etherInt = new IGbEInt(name() + ".int", this); 7410172Sdam.sunwoo@arm.com 7510172Sdam.sunwoo@arm.com // Initialized internal registers per Intel documentation 7610172Sdam.sunwoo@arm.com // All registers intialized to 0 by per register constructor 772292SN/A regs.ctrl.fd(1); 7810328Smitch.hayenga@arm.com regs.ctrl.lrst(1); 7913453Srekai.gonzalezalberquilla@arm.com regs.ctrl.speed(2); 8013453Srekai.gonzalezalberquilla@arm.com regs.ctrl.frcspd(1); 8113453Srekai.gonzalezalberquilla@arm.com regs.sts.speed(3); // Say we're 1000Mbps 8213453Srekai.gonzalezalberquilla@arm.com regs.sts.fd(1); // full duplex 8313453Srekai.gonzalezalberquilla@arm.com regs.sts.lu(1); // link up 8413453Srekai.gonzalezalberquilla@arm.com regs.eecd.fwe(1); 8513453Srekai.gonzalezalberquilla@arm.com regs.eecd.ee_type(1); 8613453Srekai.gonzalezalberquilla@arm.com regs.imr = 0; 8713453Srekai.gonzalezalberquilla@arm.com regs.iam = 0; 8813453Srekai.gonzalezalberquilla@arm.com regs.rxdctl.gran(1); 8913453Srekai.gonzalezalberquilla@arm.com regs.rxdctl.wthresh(1); 9013453Srekai.gonzalezalberquilla@arm.com regs.fcrth(1); 912292SN/A regs.tdwba = 0; 922292SN/A regs.rlpml = 0; 932292SN/A regs.sw_fw_sync = 0; 942292SN/A 952292SN/A regs.pba.rxa(0x30); 962292SN/A regs.pba.txa(0x10); 972292SN/A 981060SN/A eeOpBits = 0; 991060SN/A eeAddrBits = 0; 1001061SN/A eeDataBits = 0; 1011060SN/A eeOpcode = 0; 1022292SN/A 1031062SN/A // clear all 64 16 bit words of the eeprom 1041062SN/A memset(&flash, 0, EEPROM_SIZE*2); 1058240Snate@binkert.org 1061062SN/A // Set the MAC address 1071062SN/A memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN); 1081062SN/A for (int x = 0; x < ETH_ADDR_LEN/2; x++) 1098240Snate@binkert.org flash[x] = htobe(flash[x]); 1101062SN/A 1111062SN/A uint16_t csum = 0; 1121062SN/A for (int x = 0; x < EEPROM_SIZE; x++) 1138240Snate@binkert.org csum += htobe(flash[x]); 1141062SN/A 1151062SN/A 1162301SN/A // Magic happy checksum value 1178240Snate@binkert.org flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum)); 1182301SN/A 1192301SN/A // Store the MAC address as queue ID 1202292SN/A macAddr = p->hardware_address; 1218240Snate@binkert.org 1222292SN/A rxFifo.clear(); 1232292SN/A txFifo.clear(); 1241062SN/A} 1258240Snate@binkert.org 1261062SN/AIGbE::~IGbE() 1271062SN/A{ 1281062SN/A delete etherInt; 1298240Snate@binkert.org} 1301062SN/A 1311062SN/Avoid 1321062SN/AIGbE::init() 1338240Snate@binkert.org{ 1341062SN/A cpa = CPA::cpa(); 1351062SN/A PciDevice::init(); 1361062SN/A} 1378240Snate@binkert.org 1382292SN/AEtherInt* 1391062SN/AIGbE::getEthPort(const std::string &if_name, int idx) 1401062SN/A{ 1418240Snate@binkert.org 1422292SN/A if (if_name == "interface") { 1431062SN/A if (etherInt->getPeer()) 14410239Sbinhpham@cs.rutgers.edu panic("Port already connected to\n"); 14510239Sbinhpham@cs.rutgers.edu return etherInt; 14610239Sbinhpham@cs.rutgers.edu } 14710239Sbinhpham@cs.rutgers.edu return NULL; 14810239Sbinhpham@cs.rutgers.edu} 14910239Sbinhpham@cs.rutgers.edu 15010239Sbinhpham@cs.rutgers.eduTick 15110239Sbinhpham@cs.rutgers.eduIGbE::writeConfig(PacketPtr pkt) 1521062SN/A{ 1538240Snate@binkert.org int offset = pkt->getAddr() & PCI_CONFIG_SIZE; 1541062SN/A if (offset < PCI_DEVICE_SPECIFIC) 1551062SN/A PciDevice::writeConfig(pkt); 1561062SN/A else 1578240Snate@binkert.org panic("Device specific PCI config space not implemented.\n"); 1581062SN/A 1591062SN/A // 1601062SN/A // Some work may need to be done here based for the pci COMMAND bits. 1618240Snate@binkert.org // 1621062SN/A 1631062SN/A return configDelay; 1641062SN/A} 1658240Snate@binkert.org 1661062SN/A// Handy macro for range-testing register access addresses 1671062SN/A#define IN_RANGE(val, base, len) (val >= base && val < (base + len)) 1681062SN/A 1698240Snate@binkert.orgTick 1701062SN/AIGbE::read(PacketPtr pkt) 1711062SN/A{ 1722301SN/A int bar; 1738240Snate@binkert.org Addr daddr; 1742301SN/A 1752301SN/A if (!getBAR(pkt->getAddr(), bar, daddr)) 1762301SN/A panic("Invalid PCI memory access to unmapped memory.\n"); 1772301SN/A 1788240Snate@binkert.org // Only Memory register BAR is allowed 1792301SN/A assert(bar == 0); 1802301SN/A 1812301SN/A // Only 32bit accesses allowed 1822307SN/A assert(pkt->getSize() == 4); 1838240Snate@binkert.org 1842307SN/A DPRINTF(Ethernet, "Read device register %#X\n", daddr); 1852307SN/A 1862307SN/A // 1877897Shestness@cs.utexas.edu // Handle read of register here 1888240Snate@binkert.org // 1897897Shestness@cs.utexas.edu 1907897Shestness@cs.utexas.edu 1917897Shestness@cs.utexas.edu switch (daddr) { 1928240Snate@binkert.org case REG_CTRL: 1937897Shestness@cs.utexas.edu pkt->set<uint32_t>(regs.ctrl()); 1947897Shestness@cs.utexas.edu break; 19512109SRekai.GonzalezAlberquilla@arm.com case REG_STATUS: 19612109SRekai.GonzalezAlberquilla@arm.com pkt->set<uint32_t>(regs.sts()); 19712109SRekai.GonzalezAlberquilla@arm.com break; 19812109SRekai.GonzalezAlberquilla@arm.com case REG_EECD: 19913610Sgiacomo.gabrielli@arm.com pkt->set<uint32_t>(regs.eecd()); 20013610Sgiacomo.gabrielli@arm.com break; 20113610Sgiacomo.gabrielli@arm.com case REG_EERD: 20213610Sgiacomo.gabrielli@arm.com pkt->set<uint32_t>(regs.eerd()); 2031062SN/A break; 2041062SN/A case REG_CTRL_EXT: 2051062SN/A pkt->set<uint32_t>(regs.ctrl_ext()); 2061062SN/A break; 20711246Sradhika.jagtap@ARM.com case REG_MDIC: 20811246Sradhika.jagtap@ARM.com pkt->set<uint32_t>(regs.mdic()); 20911246Sradhika.jagtap@ARM.com break; 21011246Sradhika.jagtap@ARM.com case REG_ICR: 21111246Sradhika.jagtap@ARM.com DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", 21211246Sradhika.jagtap@ARM.com regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame()); 21311246Sradhika.jagtap@ARM.com pkt->set<uint32_t>(regs.icr()); 21411246Sradhika.jagtap@ARM.com if (regs.icr.int_assert() || regs.imr == 0) { 21511246Sradhika.jagtap@ARM.com regs.icr = regs.icr() & ~mask(30); 2162292SN/A DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr()); 2171060SN/A } 2181060SN/A if (regs.ctrl_ext.iame() && regs.icr.int_assert()) 2191060SN/A regs.imr &= ~regs.iam; 2201060SN/A chkInterrupt(); 2211060SN/A break; 2221060SN/A case REG_EICR: 2231060SN/A // This is only useful for MSI, but the driver reads it every time 2241060SN/A // Just don't do anything 2251060SN/A pkt->set<uint32_t>(0); 2261060SN/A break; 2271060SN/A case REG_ITR: 2281060SN/A pkt->set<uint32_t>(regs.itr()); 2291060SN/A break; 2301061SN/A case REG_RCTL: 2311060SN/A pkt->set<uint32_t>(regs.rctl()); 2322292SN/A break; 2331060SN/A case REG_FCTTV: 2341060SN/A pkt->set<uint32_t>(regs.fcttv()); 2351060SN/A break; 2361060SN/A case REG_TCTL: 2371060SN/A pkt->set<uint32_t>(regs.tctl()); 2381060SN/A break; 2391060SN/A case REG_PBA: 2401061SN/A pkt->set<uint32_t>(regs.pba()); 2411060SN/A break; 2422292SN/A case REG_WUC: 2431060SN/A case REG_LEDCTL: 2441060SN/A pkt->set<uint32_t>(0); // We don't care, so just return 0 2451060SN/A break; 2461060SN/A case REG_FCRTL: 2471060SN/A pkt->set<uint32_t>(regs.fcrtl()); 2481060SN/A break; 2491060SN/A case REG_FCRTH: 2501061SN/A pkt->set<uint32_t>(regs.fcrth()); 2511060SN/A break; 2529427SAndreas.Sandberg@ARM.com case REG_RDBAL: 2531060SN/A pkt->set<uint32_t>(regs.rdba.rdbal()); 2549444SAndreas.Sandberg@ARM.com break; 2559444SAndreas.Sandberg@ARM.com case REG_RDBAH: 2569444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.rdba.rdbah()); 2579444SAndreas.Sandberg@ARM.com break; 2589444SAndreas.Sandberg@ARM.com case REG_RDLEN: 25913641Sqtt2@cornell.edu pkt->set<uint32_t>(regs.rdlen()); 26013641Sqtt2@cornell.edu break; 26113641Sqtt2@cornell.edu case REG_SRRCTL: 26213641Sqtt2@cornell.edu pkt->set<uint32_t>(regs.srrctl()); 26313641Sqtt2@cornell.edu break; 26413641Sqtt2@cornell.edu case REG_RDH: 26513641Sqtt2@cornell.edu pkt->set<uint32_t>(regs.rdh()); 26613641Sqtt2@cornell.edu break; 26713641Sqtt2@cornell.edu case REG_RDT: 26813641Sqtt2@cornell.edu pkt->set<uint32_t>(regs.rdt()); 26913641Sqtt2@cornell.edu break; 27013641Sqtt2@cornell.edu case REG_RDTR: 27113641Sqtt2@cornell.edu pkt->set<uint32_t>(regs.rdtr()); 27213641Sqtt2@cornell.edu if (regs.rdtr.fpd()) { 27313641Sqtt2@cornell.edu rxDescCache.writeback(0); 27413641Sqtt2@cornell.edu DPRINTF(EthernetIntr, 27513641Sqtt2@cornell.edu "Posting interrupt because of RDTR.FPD write\n"); 27613641Sqtt2@cornell.edu postInterrupt(IT_RXT); 27713641Sqtt2@cornell.edu regs.rdtr.fpd(0); 27813641Sqtt2@cornell.edu } 27913641Sqtt2@cornell.edu break; 28013641Sqtt2@cornell.edu case REG_RXDCTL: 2819444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.rxdctl()); 2829444SAndreas.Sandberg@ARM.com break; 2839444SAndreas.Sandberg@ARM.com case REG_RADV: 2849444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.radv()); 2859444SAndreas.Sandberg@ARM.com break; 2869444SAndreas.Sandberg@ARM.com case REG_TDBAL: 2879444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.tdba.tdbal()); 2882329SN/A break; 2896221Snate@binkert.org case REG_TDBAH: 2909444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.tdba.tdbah()); 2919444SAndreas.Sandberg@ARM.com break; 2922292SN/A case REG_TDLEN: 29310239Sbinhpham@cs.rutgers.edu pkt->set<uint32_t>(regs.tdlen()); 29410239Sbinhpham@cs.rutgers.edu break; 2952292SN/A case REG_TDH: 2962292SN/A pkt->set<uint32_t>(regs.tdh()); 2979444SAndreas.Sandberg@ARM.com break; 2989444SAndreas.Sandberg@ARM.com case REG_TXDCA_CTL: 2999444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.txdca_ctl()); 3009444SAndreas.Sandberg@ARM.com break; 3019444SAndreas.Sandberg@ARM.com case REG_TDT: 30210239Sbinhpham@cs.rutgers.edu pkt->set<uint32_t>(regs.tdt()); 30310239Sbinhpham@cs.rutgers.edu break; 3049444SAndreas.Sandberg@ARM.com case REG_TIDV: 3059444SAndreas.Sandberg@ARM.com pkt->set<uint32_t>(regs.tidv()); 3062292SN/A break; 3071060SN/A case REG_TXDCTL: 3081060SN/A pkt->set<uint32_t>(regs.txdctl()); 3092292SN/A break; 3102292SN/A case REG_TADV: 3116221Snate@binkert.org pkt->set<uint32_t>(regs.tadv()); 3122292SN/A break; 3132292SN/A case REG_TDWBAL: 3142292SN/A pkt->set<uint32_t>(regs.tdwba & mask(32)); 3152292SN/A break; 3162292SN/A case REG_TDWBAH: 3171061SN/A pkt->set<uint32_t>(regs.tdwba >> 32); 3181060SN/A break; 3192292SN/A case REG_RXCSUM: 3201060SN/A pkt->set<uint32_t>(regs.rxcsum()); 3216221Snate@binkert.org break; 3226221Snate@binkert.org case REG_RLPML: 3231060SN/A pkt->set<uint32_t>(regs.rlpml); 3241060SN/A break; 3251061SN/A case REG_RFCTL: 3261060SN/A pkt->set<uint32_t>(regs.rfctl()); 3272292SN/A break; 3281060SN/A case REG_MANC: 3292292SN/A pkt->set<uint32_t>(regs.manc()); 3302292SN/A break; 3311060SN/A case REG_SWSM: 3322292SN/A pkt->set<uint32_t>(regs.swsm()); 3332292SN/A regs.swsm.smbi(1); 3342292SN/A break; 3352292SN/A case REG_FWSM: 3362292SN/A pkt->set<uint32_t>(regs.fwsm()); 3371060SN/A break; 3381060SN/A case REG_SWFWSYNC: 3391061SN/A pkt->set<uint32_t>(regs.sw_fw_sync); 3402863Sktlim@umich.edu break; 3419444SAndreas.Sandberg@ARM.com default: 3421060SN/A if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) && 3439444SAndreas.Sandberg@ARM.com !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) && 3449444SAndreas.Sandberg@ARM.com !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) && 3459444SAndreas.Sandberg@ARM.com !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE)) 3469444SAndreas.Sandberg@ARM.com panic("Read request to unknown register number: %#x\n", daddr); 34711650Srekai.gonzalezalberquilla@arm.com else 34811650Srekai.gonzalezalberquilla@arm.com pkt->set<uint32_t>(0); 3499444SAndreas.Sandberg@ARM.com }; 3509444SAndreas.Sandberg@ARM.com 3512863Sktlim@umich.edu pkt->makeAtomicResponse(); 3522316SN/A return pioDelay; 3531060SN/A} 3542316SN/A 3552316SN/ATick 3562307SN/AIGbE::write(PacketPtr pkt) 3571060SN/A{ 3589444SAndreas.Sandberg@ARM.com int bar; 3599444SAndreas.Sandberg@ARM.com Addr daddr; 3601060SN/A 3619444SAndreas.Sandberg@ARM.com 3629444SAndreas.Sandberg@ARM.com if (!getBAR(pkt->getAddr(), bar, daddr)) 3639444SAndreas.Sandberg@ARM.com panic("Invalid PCI memory access to unmapped memory.\n"); 3649444SAndreas.Sandberg@ARM.com 3656221Snate@binkert.org // Only Memory register BAR is allowed 3669444SAndreas.Sandberg@ARM.com assert(bar == 0); 3679444SAndreas.Sandberg@ARM.com 3689444SAndreas.Sandberg@ARM.com // Only 32bit accesses allowed 3699444SAndreas.Sandberg@ARM.com assert(pkt->getSize() == sizeof(uint32_t)); 3702307SN/A 3712307SN/A DPRINTF(Ethernet, "Wrote device register %#X value %#X\n", 3722307SN/A daddr, pkt->get<uint32_t>()); 3732307SN/A 3742307SN/A // 3756221Snate@binkert.org // Handle write of register here 3761858SN/A // 3772292SN/A uint32_t val = pkt->get<uint32_t>(); 3781858SN/A 3792292SN/A Regs::RCTL oldrctl; 3802292SN/A Regs::TCTL oldtctl; 3812292SN/A 3822292SN/A switch (daddr) { 3833788Sgblack@eecs.umich.edu case REG_CTRL: 3842292SN/A regs.ctrl = val; 3852698Sktlim@umich.edu if (regs.ctrl.tfce()) 3863788Sgblack@eecs.umich.edu warn("TX Flow control enabled, should implement\n"); 3872301SN/A if (regs.ctrl.rfce()) 3883788Sgblack@eecs.umich.edu warn("RX Flow control enabled, should implement\n"); 3893788Sgblack@eecs.umich.edu break; 3903788Sgblack@eecs.umich.edu case REG_CTRL_EXT: 3913788Sgblack@eecs.umich.edu regs.ctrl_ext = val; 3923788Sgblack@eecs.umich.edu break; 3933788Sgblack@eecs.umich.edu case REG_STATUS: 3943788Sgblack@eecs.umich.edu regs.sts = val; 3953788Sgblack@eecs.umich.edu break; 3963788Sgblack@eecs.umich.edu case REG_EECD: 3973788Sgblack@eecs.umich.edu int oldClk; 3983788Sgblack@eecs.umich.edu oldClk = regs.eecd.sk(); 3992292SN/A regs.eecd = val; 4002292SN/A // See if this is a eeprom access and emulate accordingly 4012292SN/A if (!oldClk && regs.eecd.sk()) { 4022292SN/A if (eeOpBits < 8) { 4032292SN/A eeOpcode = eeOpcode << 1 | regs.eecd.din(); 4042329SN/A eeOpBits++; 4052292SN/A } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 4062935Sksewell@umich.edu eeAddr = eeAddr << 1 | regs.eecd.din(); 4072935Sksewell@umich.edu eeAddrBits++; 4082731Sktlim@umich.edu } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) { 4092292SN/A assert(eeAddr>>1 < EEPROM_SIZE); 4102292SN/A DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n", 4112935Sksewell@umich.edu flash[eeAddr>>1] >> eeDataBits & 0x1, 4122292SN/A flash[eeAddr>>1]); 4132292SN/A regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1); 4142935Sksewell@umich.edu eeDataBits++; 4154632Sgblack@eecs.umich.edu } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) { 4163093Sksewell@umich.edu regs.eecd.dout(0); 4172292SN/A eeDataBits++; 4182292SN/A } else 4193093Sksewell@umich.edu panic("What's going on with eeprom interface? opcode:" 4204632Sgblack@eecs.umich.edu " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode, 4212935Sksewell@umich.edu (uint32_t)eeOpBits, (uint32_t)eeAddr, 4222292SN/A (uint32_t)eeAddrBits, (uint32_t)eeDataBits); 4232292SN/A 4242292SN/A // Reset everything for the next command 4252292SN/A if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) || 4262292SN/A (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) { 4272292SN/A eeOpBits = 0; 4282292SN/A eeAddrBits = 0; 4292292SN/A eeDataBits = 0; 4302292SN/A eeOpcode = 0; 4312292SN/A eeAddr = 0; 4322292SN/A } 4332292SN/A 4342292SN/A DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n", 4352292SN/A (uint32_t)eeOpcode, (uint32_t) eeOpBits, 4362292SN/A (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits); 4372292SN/A if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI || 4386221Snate@binkert.org eeOpcode == EEPROM_RDSR_OPCODE_SPI )) 4396221Snate@binkert.org panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode, 4402292SN/A (uint32_t)eeOpBits); 4412292SN/A 4423867Sbinkertn@umich.edu 4436221Snate@binkert.org } 4442292SN/A // If driver requests eeprom access, immediately give it to it 4452292SN/A regs.eecd.ee_gnt(regs.eecd.ee_req()); 4462292SN/A break; 4472292SN/A case REG_EERD: 4482292SN/A regs.eerd = val; 4492292SN/A if (regs.eerd.start()) { 4502292SN/A regs.eerd.done(1); 4512292SN/A assert(regs.eerd.addr() < EEPROM_SIZE); 4522292SN/A regs.eerd.data(flash[regs.eerd.addr()]); 4532292SN/A regs.eerd.start(0); 4542292SN/A DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n", 4552292SN/A regs.eerd.addr(), regs.eerd.data()); 4562292SN/A } 4572292SN/A break; 4582292SN/A case REG_MDIC: 4592292SN/A regs.mdic = val; 4602292SN/A if (regs.mdic.i()) 4613867Sbinkertn@umich.edu panic("No support for interrupt on mdic complete\n"); 4622292SN/A if (regs.mdic.phyadd() != 1) 4633867Sbinkertn@umich.edu panic("No support for reading anything but phy\n"); 4646221Snate@binkert.org DPRINTF(Ethernet, "%s phy address %x\n", 4652292SN/A regs.mdic.op() == 1 ? "Writing" : "Reading", 4662292SN/A regs.mdic.regadd()); 4672292SN/A switch (regs.mdic.regadd()) { 4682292SN/A case PHY_PSTATUS: 4692292SN/A regs.mdic.data(0x796D); // link up 4702292SN/A break; 4712292SN/A case PHY_PID: 4722292SN/A regs.mdic.data(params()->phy_pid); 4732292SN/A break; 4742292SN/A case PHY_EPID: 4752292SN/A regs.mdic.data(params()->phy_epid); 4762292SN/A break; 4776221Snate@binkert.org case PHY_GSTATUS: 4782292SN/A regs.mdic.data(0x7C00); 47910239Sbinhpham@cs.rutgers.edu break; 48010239Sbinhpham@cs.rutgers.edu case PHY_EPSTATUS: 48110239Sbinhpham@cs.rutgers.edu regs.mdic.data(0x3000); 48210239Sbinhpham@cs.rutgers.edu break; 4832292SN/A case PHY_AGC: 4842292SN/A regs.mdic.data(0x180); // some random length 4852292SN/A break; 4862292SN/A default: 4872292SN/A regs.mdic.data(0); 4882292SN/A } 4892292SN/A regs.mdic.r(1); 4906221Snate@binkert.org break; 4912292SN/A case REG_ICR: 4922292SN/A DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n", 4932292SN/A regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame()); 4942292SN/A if (regs.ctrl_ext.iame()) 4952292SN/A regs.imr &= ~regs.iam; 4962292SN/A regs.icr = ~bits(val,30,0) & regs.icr(); 4972292SN/A chkInterrupt(); 4982292SN/A break; 4992292SN/A case REG_ITR: 5002292SN/A regs.itr = val; 5012292SN/A break; 5022292SN/A case REG_ICS: 5032301SN/A DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n"); 5042301SN/A postInterrupt((IntTypes)val); 5053788Sgblack@eecs.umich.edu break; 5063788Sgblack@eecs.umich.edu case REG_IMS: 5073788Sgblack@eecs.umich.edu regs.imr |= val; 5083788Sgblack@eecs.umich.edu chkInterrupt(); 5093788Sgblack@eecs.umich.edu break; 5103788Sgblack@eecs.umich.edu case REG_IMC: 5113788Sgblack@eecs.umich.edu regs.imr &= ~val; 5123788Sgblack@eecs.umich.edu chkInterrupt(); 5133798Sgblack@eecs.umich.edu break; 5143798Sgblack@eecs.umich.edu case REG_IAM: 5153798Sgblack@eecs.umich.edu regs.iam = val; 5163798Sgblack@eecs.umich.edu break; 5173798Sgblack@eecs.umich.edu case REG_RCTL: 5183798Sgblack@eecs.umich.edu oldrctl = regs.rctl; 5192292SN/A regs.rctl = val; 5202292SN/A if (regs.rctl.rst()) { 5212292SN/A rxDescCache.reset(); 5222292SN/A DPRINTF(EthernetSM, "RXS: Got RESET!\n"); 5232292SN/A rxFifo.clear(); 5242292SN/A regs.rctl.rst(0); 5252292SN/A } 5262292SN/A if (regs.rctl.en()) 5272292SN/A rxTick = true; 5282292SN/A restartClock(); 5292292SN/A break; 5302292SN/A case REG_FCTTV: 5312292SN/A regs.fcttv = val; 5322292SN/A break; 5332292SN/A case REG_TCTL: 5342292SN/A regs.tctl = val; 5352292SN/A oldtctl = regs.tctl; 5362292SN/A regs.tctl = val; 5372292SN/A if (regs.tctl.en()) 5382292SN/A txTick = true; 5391858SN/A restartClock(); 5401858SN/A if (regs.tctl.en() && !oldtctl.en()) { 5411858SN/A txDescCache.reset(); 5421858SN/A } 5431858SN/A break; 5446221Snate@binkert.org case REG_PBA: 5451858SN/A regs.pba.rxa(val); 5462292SN/A regs.pba.txa(64 - regs.pba.rxa()); 5472292SN/A break; 5482292SN/A case REG_WUC: 5492292SN/A case REG_LEDCTL: 5501858SN/A case REG_FCAL: 5512292SN/A case REG_FCAH: 5522292SN/A case REG_FCT: 5532292SN/A case REG_VET: 5542292SN/A case REG_AIFS: 5552292SN/A case REG_TIPG: 5562292SN/A ; // We don't care, so don't store anything 5572292SN/A break; 5582292SN/A case REG_IVAR0: 5592292SN/A warn("Writing to IVAR0, ignoring...\n"); 5602292SN/A break; 5612292SN/A case REG_FCRTL: 5622292SN/A regs.fcrtl = val; 5632292SN/A break; 5641858SN/A case REG_FCRTH: 5652292SN/A regs.fcrth = val; 5662292SN/A break; 5672292SN/A case REG_RDBAL: 5682292SN/A regs.rdba.rdbal( val & ~mask(4)); 5692292SN/A rxDescCache.areaChanged(); 5702292SN/A break; 5712292SN/A case REG_RDBAH: 5722292SN/A regs.rdba.rdbah(val); 5732292SN/A rxDescCache.areaChanged(); 5742292SN/A break; 5752292SN/A case REG_RDLEN: 5762292SN/A regs.rdlen = val & ~mask(7); 5772292SN/A rxDescCache.areaChanged(); 5782292SN/A break; 5792292SN/A case REG_SRRCTL: 58010239Sbinhpham@cs.rutgers.edu regs.srrctl = val; 5812292SN/A break; 5822292SN/A case REG_RDH: 58310239Sbinhpham@cs.rutgers.edu regs.rdh = val; 5842292SN/A rxDescCache.areaChanged(); 5852292SN/A break; 58610239Sbinhpham@cs.rutgers.edu case REG_RDT: 5872292SN/A regs.rdt = val; 5882292SN/A DPRINTF(EthernetSM, "RXS: RDT Updated.\n"); 5892292SN/A if (getDrainState() == Drainable::Running) { 5902292SN/A DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n"); 5912292SN/A rxDescCache.fetchDescriptors(); 5922292SN/A } else { 5932292SN/A DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n"); 5942292SN/A } 5952292SN/A break; 5962292SN/A case REG_RDTR: 5972292SN/A regs.rdtr = val; 5982292SN/A break; 5992292SN/A case REG_RADV: 6002292SN/A regs.radv = val; 6012292SN/A break; 6022292SN/A case REG_RXDCTL: 6032292SN/A regs.rxdctl = val; 6042292SN/A break; 6052292SN/A case REG_TDBAL: 6062292SN/A regs.tdba.tdbal( val & ~mask(4)); 6072292SN/A txDescCache.areaChanged(); 6082292SN/A break; 6092292SN/A case REG_TDBAH: 6102292SN/A regs.tdba.tdbah(val); 6112292SN/A txDescCache.areaChanged(); 6122292SN/A break; 6132292SN/A case REG_TDLEN: 6142292SN/A regs.tdlen = val & ~mask(7); 6152292SN/A txDescCache.areaChanged(); 6162292SN/A break; 6172292SN/A case REG_TDH: 6182292SN/A regs.tdh = val; 6192292SN/A txDescCache.areaChanged(); 6202292SN/A break; 6212292SN/A case REG_TXDCA_CTL: 6222292SN/A regs.txdca_ctl = val; 6232292SN/A if (regs.txdca_ctl.enabled()) 6242292SN/A panic("No support for DCA\n"); 6252292SN/A break; 6262292SN/A case REG_TDT: 6272292SN/A regs.tdt = val; 6282292SN/A DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n"); 6292292SN/A if (getDrainState() == Drainable::Running) { 6302292SN/A DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n"); 6312292SN/A txDescCache.fetchDescriptors(); 6322292SN/A } else { 6332292SN/A DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n"); 6342292SN/A } 63513429Srekai.gonzalezalberquilla@arm.com break; 6362292SN/A case REG_TIDV: 63710239Sbinhpham@cs.rutgers.edu regs.tidv = val; 63810239Sbinhpham@cs.rutgers.edu break; 63910239Sbinhpham@cs.rutgers.edu case REG_TXDCTL: 64010239Sbinhpham@cs.rutgers.edu regs.txdctl = val; 64110239Sbinhpham@cs.rutgers.edu break; 64210933Snilay@cs.wisc.edu case REG_TADV: 64310933Snilay@cs.wisc.edu regs.tadv = val; 64410933Snilay@cs.wisc.edu break; 64510933Snilay@cs.wisc.edu case REG_TDWBAL: 64610933Snilay@cs.wisc.edu regs.tdwba &= ~mask(32); 64710933Snilay@cs.wisc.edu regs.tdwba |= val; 64810239Sbinhpham@cs.rutgers.edu txDescCache.completionWriteback(regs.tdwba & ~mask(1), 64910239Sbinhpham@cs.rutgers.edu regs.tdwba & mask(1)); 65010239Sbinhpham@cs.rutgers.edu break; 65110933Snilay@cs.wisc.edu case REG_TDWBAH: 65210933Snilay@cs.wisc.edu regs.tdwba &= mask(32); 65310933Snilay@cs.wisc.edu regs.tdwba |= (uint64_t)val << 32; 65410933Snilay@cs.wisc.edu txDescCache.completionWriteback(regs.tdwba & ~mask(1), 65510933Snilay@cs.wisc.edu regs.tdwba & mask(1)); 65610933Snilay@cs.wisc.edu break; 65710239Sbinhpham@cs.rutgers.edu case REG_RXCSUM: 65810239Sbinhpham@cs.rutgers.edu regs.rxcsum = val; 6592292SN/A break; 6602292SN/A case REG_RLPML: 6612292SN/A regs.rlpml = val; 6627720Sgblack@eecs.umich.edu break; 6637720Sgblack@eecs.umich.edu case REG_RFCTL: 6642292SN/A regs.rfctl = val; 6652292SN/A if (regs.rfctl.exsten()) 6662292SN/A panic("Extended RX descriptors not implemented\n"); 6677720Sgblack@eecs.umich.edu break; 6687720Sgblack@eecs.umich.edu case REG_MANC: 6697720Sgblack@eecs.umich.edu regs.manc = val; 6702292SN/A break; 6712292SN/A case REG_SWSM: 6722292SN/A regs.swsm = val; 6732292SN/A if (regs.fwsm.eep_fw_semaphore()) 6742292SN/A regs.swsm.swesmbi(0); 6752292SN/A break; 6762292SN/A case REG_SWFWSYNC: 6772292SN/A regs.sw_fw_sync = val; 6782292SN/A break; 6792292SN/A default: 6807720Sgblack@eecs.umich.edu if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) && 6812292SN/A !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) && 6829531Sgeoffrey.blake@arm.com !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4)) 6839531Sgeoffrey.blake@arm.com panic("Write request to unknown register number: %#x\n", daddr); 68410715SRekai.GonzalezAlberquilla@arm.com }; 68510715SRekai.GonzalezAlberquilla@arm.com 68612109SRekai.GonzalezAlberquilla@arm.com pkt->makeAtomicResponse(); 68712109SRekai.GonzalezAlberquilla@arm.com return pioDelay; 68813610Sgiacomo.gabrielli@arm.com} 68910935Snilay@cs.wisc.edu 6909531Sgeoffrey.blake@arm.comvoid 6919531Sgeoffrey.blake@arm.comIGbE::postInterrupt(IntTypes t, bool now) 6929531Sgeoffrey.blake@arm.com{ 6939531Sgeoffrey.blake@arm.com assert(t); 6949531Sgeoffrey.blake@arm.com 6959531Sgeoffrey.blake@arm.com // Interrupt is already pending 6969531Sgeoffrey.blake@arm.com if (t & regs.icr() && !now) 6979531Sgeoffrey.blake@arm.com return; 6989531Sgeoffrey.blake@arm.com 6992292SN/A regs.icr = regs.icr() | t; 7002292SN/A 7012292SN/A Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval(); 7022292SN/A DPRINTF(EthernetIntr, 7032336SN/A "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n", 7042336SN/A curTick(), regs.itr.interval(), itr_interval); 7052336SN/A 7062336SN/A if (regs.itr.interval() == 0 || now || 7072336SN/A lastInterrupt + itr_interval <= curTick()) { 7082336SN/A if (interEvent.scheduled()) { 7092336SN/A deschedule(interEvent); 7102336SN/A } 7112292SN/A cpuPostInt(); 7122292SN/A } else { 7132301SN/A Tick int_time = lastInterrupt + itr_interval; 7142301SN/A assert(int_time > 0); 7152292SN/A DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n", 7162301SN/A int_time); 7172301SN/A if (!interEvent.scheduled()) { 7182301SN/A schedule(interEvent, int_time); 7192292SN/A } 7202301SN/A } 7212292SN/A} 7222301SN/A 7232292SN/Avoid 7242301SN/AIGbE::delayIntEvent() 7252292SN/A{ 7262292SN/A cpuPostInt(); 7272292SN/A} 7282292SN/A 7292336SN/A 7302336SN/Avoid 7312292SN/AIGbE::cpuPostInt() 7322292SN/A{ 7332307SN/A 7342307SN/A postedInterrupts++; 7352292SN/A 7362292SN/A if (!(regs.icr() & regs.imr)) { 7372292SN/A DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n"); 7382292SN/A return; 7392292SN/A } 7402292SN/A 7412292SN/A DPRINTF(Ethernet, "Posting Interrupt\n"); 7422292SN/A 7432292SN/A 74410239Sbinhpham@cs.rutgers.edu if (interEvent.scheduled()) { 74510239Sbinhpham@cs.rutgers.edu deschedule(interEvent); 74610239Sbinhpham@cs.rutgers.edu } 74710239Sbinhpham@cs.rutgers.edu 74810239Sbinhpham@cs.rutgers.edu if (rdtrEvent.scheduled()) { 74910239Sbinhpham@cs.rutgers.edu regs.icr.rxt0(1); 7502292SN/A deschedule(rdtrEvent); 75111246Sradhika.jagtap@ARM.com } 75211246Sradhika.jagtap@ARM.com if (radvEvent.scheduled()) { 75311246Sradhika.jagtap@ARM.com regs.icr.rxt0(1); 7548471SGiacomo.Gabrielli@arm.com deschedule(radvEvent); 7552292SN/A } 7562292SN/A if (tadvEvent.scheduled()) { 7572292SN/A regs.icr.txdw(1); 7582292SN/A deschedule(tadvEvent); 7592292SN/A } 7602292SN/A if (tidvEvent.scheduled()) { 7612292SN/A regs.icr.txdw(1); 7622292SN/A deschedule(tidvEvent); 7632292SN/A } 7642292SN/A 7652292SN/A regs.icr.int_assert(1); 7662292SN/A DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n", 7672307SN/A regs.icr()); 7682292SN/A 7692292SN/A intrPost(); 7702292SN/A 7712292SN/A lastInterrupt = curTick(); 7722292SN/A} 7732292SN/A 7742292SN/Avoid 7752292SN/AIGbE::cpuClearInt() 7762292SN/A{ 7772292SN/A if (regs.icr.int_assert()) { 7782292SN/A regs.icr.int_assert(0); 7792292SN/A DPRINTF(EthernetIntr, 7802292SN/A "EINT: Clearing interrupt to CPU now. Vector %#x\n", 7812292SN/A regs.icr()); 7822292SN/A intrClear(); 7832292SN/A } 7842292SN/A} 7852292SN/A 7862292SN/Avoid 7872292SN/AIGbE::chkInterrupt() 7886221Snate@binkert.org{ 7892292SN/A DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(), 7902292SN/A regs.imr); 7912292SN/A // Check if we need to clear the cpu interrupt 7922292SN/A if (!(regs.icr() & regs.imr)) { 7932292SN/A DPRINTF(Ethernet, "Mask cleaned all interrupts\n"); 7942292SN/A if (interEvent.scheduled()) 7952292SN/A deschedule(interEvent); 7962292SN/A if (regs.icr.int_assert()) 7972292SN/A cpuClearInt(); 7982292SN/A } 7997720Sgblack@eecs.umich.edu DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", 8007720Sgblack@eecs.umich.edu regs.itr(), regs.itr.interval()); 8012292SN/A 8022307SN/A if (regs.icr() & regs.imr) { 8032307SN/A if (regs.itr.interval() == 0) { 8042292SN/A cpuPostInt(); 8052292SN/A } else { 8062292SN/A DPRINTF(Ethernet, 8072292SN/A "Possibly scheduling interrupt because of imr write\n"); 8083798Sgblack@eecs.umich.edu if (!interEvent.scheduled()) { 8093798Sgblack@eecs.umich.edu Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval(); 8103798Sgblack@eecs.umich.edu DPRINTF(Ethernet, "Scheduling for %d\n", t); 81111321Ssteve.reinhardt@amd.com schedule(interEvent, t); 8123798Sgblack@eecs.umich.edu } 8133798Sgblack@eecs.umich.edu } 8147720Sgblack@eecs.umich.edu } 8153798Sgblack@eecs.umich.edu} 8163798Sgblack@eecs.umich.edu 8172292SN/A 8183798Sgblack@eecs.umich.edu///////////////////////////// IGbE::DescCache ////////////////////////////// 8192292SN/A 8202292SN/Atemplate<class T> 8212292SN/AIGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s) 8222292SN/A : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), 8232292SN/A wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL), 8242292SN/A wbDelayEvent(this), fetchDelayEvent(this), fetchEvent(this), 8252292SN/A wbEvent(this) 8262292SN/A{ 82713429Srekai.gonzalezalberquilla@arm.com fetchBuf = new T[size]; 8282292SN/A wbBuf = new T[size]; 8299527SMatt.Horsnell@arm.com} 8309527SMatt.Horsnell@arm.com 8319527SMatt.Horsnell@arm.comtemplate<class T> 8329527SMatt.Horsnell@arm.comIGbE::DescCache<T>::~DescCache() 8339527SMatt.Horsnell@arm.com{ 8342292SN/A reset(); 8352292SN/A delete[] fetchBuf; 8362292SN/A delete[] wbBuf; 8372292SN/A} 8382292SN/A 8392292SN/Atemplate<class T> 8402292SN/Avoid 8416221Snate@binkert.orgIGbE::DescCache<T>::areaChanged() 8426221Snate@binkert.org{ 8432292SN/A if (usedCache.size() > 0 || curFetching || wbOut) 8443867Sbinkertn@umich.edu panic("Descriptor Address, Length or Head changed. Bad\n"); 8456221Snate@binkert.org reset(); 8463867Sbinkertn@umich.edu 8473867Sbinkertn@umich.edu} 8482292SN/A 8492292SN/Atemplate<class T> 8502292SN/Avoid 8512292SN/AIGbE::DescCache<T>::writeback(Addr aMask) 8522292SN/A{ 8532292SN/A int curHead = descHead(); 8542292SN/A int max_to_wb = usedCache.size(); 8552292SN/A 8562292SN/A // Check if this writeback is less restrictive that the previous 8572292SN/A // and if so setup another one immediately following it 8582292SN/A if (wbOut) { 8592292SN/A if (aMask < wbAlignment) { 8606221Snate@binkert.org moreToWb = true; 8616221Snate@binkert.org wbAlignment = aMask; 8622292SN/A } 8633867Sbinkertn@umich.edu DPRINTF(EthernetDesc, 8646221Snate@binkert.org "Writing back already in process, returning\n"); 8652292SN/A return; 8662292SN/A } 8672292SN/A 8682292SN/A moreToWb = false; 8692292SN/A wbAlignment = aMask; 8702292SN/A 8712292SN/A 8722292SN/A DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: " 8732292SN/A "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n", 8742292SN/A curHead, descTail(), descLen(), cachePnt, max_to_wb, 8752292SN/A descLeft()); 8762292SN/A 8772292SN/A if (max_to_wb + curHead >= descLen()) { 8782292SN/A max_to_wb = descLen() - curHead; 8792733Sktlim@umich.edu moreToWb = true; 8802292SN/A // this is by definition aligned correctly 8812292SN/A } else if (wbAlignment != 0) { 8822292SN/A // align the wb point to the mask 8832292SN/A max_to_wb = max_to_wb & ~wbAlignment; 8842292SN/A } 8852292SN/A 8862292SN/A DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb); 8872292SN/A 8882733Sktlim@umich.edu if (max_to_wb <= 0) { 8892292SN/A if (usedCache.size()) 8902292SN/A igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT); 8912292SN/A else 8922292SN/A igbe->anWe(annSmWb, annUsedCacheQ); 8932292SN/A return; 8942292SN/A } 8956221Snate@binkert.org 8962292SN/A wbOut = max_to_wb; 8972292SN/A 8982292SN/A assert(!wbDelayEvent.scheduled()); 8992292SN/A igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay); 9002292SN/A igbe->anBegin(annSmWb, "Prepare Writeback Desc"); 9012292SN/A} 9022292SN/A 9032292SN/Atemplate<class T> 9042292SN/Avoid 9052292SN/AIGbE::DescCache<T>::writeback1() 9063798Sgblack@eecs.umich.edu{ 9073798Sgblack@eecs.umich.edu // If we're draining delay issuing this DMA 9083798Sgblack@eecs.umich.edu if (igbe->getDrainState() != Drainable::Running) { 9093798Sgblack@eecs.umich.edu igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay); 9102292SN/A return; 9112292SN/A } 9122292SN/A 9132292SN/A DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut); 9142292SN/A 9152329SN/A for (int x = 0; x < wbOut; x++) { 9162329SN/A assert(usedCache.size()); 9172301SN/A memcpy(&wbBuf[x], usedCache[x], sizeof(T)); 9182292SN/A igbe->anPq(annSmWb, annUsedCacheQ); 9192292SN/A igbe->anPq(annSmWb, annDescQ); 9202292SN/A igbe->anQ(annSmWb, annUsedDescQ); 9212292SN/A } 9222292SN/A 9232292SN/A 9242292SN/A igbe->anBegin(annSmWb, "Writeback Desc DMA"); 9252292SN/A 9262292SN/A assert(wbOut); 9272292SN/A igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)), 9282292SN/A wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf, 9296221Snate@binkert.org igbe->wbCompDelay); 9302292SN/A} 9312292SN/A 9322292SN/Atemplate<class T> 9332292SN/Avoid 9342301SN/AIGbE::DescCache<T>::fetchDescriptors() 9352292SN/A{ 9362292SN/A size_t max_to_fetch; 9372292SN/A 9382292SN/A if (curFetching) { 9392292SN/A DPRINTF(EthernetDesc, 9402292SN/A "Currently fetching %d descriptors, returning\n", 9412292SN/A curFetching); 9422292SN/A return; 9432292SN/A } 9442292SN/A 9452292SN/A if (descTail() >= cachePnt) 9462292SN/A max_to_fetch = descTail() - cachePnt; 9472292SN/A else 9482292SN/A max_to_fetch = descLen() - cachePnt; 9492292SN/A 9506221Snate@binkert.org size_t free_cache = size - usedCache.size() - unusedCache.size(); 9512292SN/A 9522980Sgblack@eecs.umich.edu if (!max_to_fetch) 9532980Sgblack@eecs.umich.edu igbe->anWe(annSmFetch, annUnusedDescQ); 9542292SN/A else 9551060SN/A igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch); 9561060SN/A 9571060SN/A if (max_to_fetch) { 9581060SN/A if (!free_cache) 9592292SN/A igbe->anWf(annSmFetch, annDescQ); 9609919Ssteve.reinhardt@amd.com else 9612292SN/A igbe->anRq(annSmFetch, annDescQ, free_cache); 9621062SN/A } 9632292SN/A 9649919Ssteve.reinhardt@amd.com max_to_fetch = std::min(max_to_fetch, free_cache); 9651060SN/A 9669919Ssteve.reinhardt@amd.com 9679919Ssteve.reinhardt@amd.com DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: " 9689919Ssteve.reinhardt@amd.com "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n", 9699919Ssteve.reinhardt@amd.com descHead(), descTail(), descLen(), cachePnt, 9709919Ssteve.reinhardt@amd.com max_to_fetch, descLeft()); 9719919Ssteve.reinhardt@amd.com 9729919Ssteve.reinhardt@amd.com // Nothing to do 9739919Ssteve.reinhardt@amd.com if (max_to_fetch == 0) 9749919Ssteve.reinhardt@amd.com return; 9759919Ssteve.reinhardt@amd.com 9761060SN/A // So we don't have two descriptor fetches going on at once 9779919Ssteve.reinhardt@amd.com curFetching = max_to_fetch; 9789919Ssteve.reinhardt@amd.com 9799919Ssteve.reinhardt@amd.com assert(!fetchDelayEvent.scheduled()); 9801062SN/A igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay); 98111246Sradhika.jagtap@ARM.com igbe->anBegin(annSmFetch, "Prepare Fetch Desc"); 98211246Sradhika.jagtap@ARM.com} 98311246Sradhika.jagtap@ARM.com 98411246Sradhika.jagtap@ARM.comtemplate<class T> 98511246Sradhika.jagtap@ARM.comvoid 98611246Sradhika.jagtap@ARM.comIGbE::DescCache<T>::fetchDescriptors1() 9872292SN/A{ 9881061SN/A // If we're draining delay issuing this DMA 9891062SN/A if (igbe->getDrainState() != Drainable::Running) { 9901060SN/A igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay); 99113601Sgiacomo.travaglini@arm.com return; 99213601Sgiacomo.travaglini@arm.com } 99313601Sgiacomo.travaglini@arm.com 9941060SN/A igbe->anBegin(annSmFetch, "Fetch Desc"); 9951060SN/A 9961060SN/A DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n", 9971060SN/A descBase() + cachePnt * sizeof(T), 9986221Snate@binkert.org pciToDma(descBase() + cachePnt * sizeof(T)), 9991060SN/A curFetching * sizeof(T)); 10002292SN/A assert(curFetching); 10012292SN/A igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)), 10022292SN/A curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf, 10032292SN/A igbe->fetchCompDelay); 10042980Sgblack@eecs.umich.edu} 10052980Sgblack@eecs.umich.edu 10061060SN/Atemplate<class T> 10071061SN/Avoid 10081060SN/AIGbE::DescCache<T>::fetchComplete() 10092292SN/A{ 10102292SN/A T *newDesc; 10112292SN/A igbe->anBegin(annSmFetch, "Fetch Complete"); 10122292SN/A for (int x = 0; x < curFetching; x++) { 10132292SN/A newDesc = new T; 10142292SN/A memcpy(newDesc, &fetchBuf[x], sizeof(T)); 10151060SN/A unusedCache.push_back(newDesc); 10161060SN/A igbe->anDq(annSmFetch, annUnusedDescQ); 10171060SN/A igbe->anQ(annSmFetch, annUnusedCacheQ); 10182292SN/A igbe->anQ(annSmFetch, annDescQ); 10192292SN/A } 10202292SN/A 10212292SN/A 10222292SN/A#ifndef NDEBUG 10232292SN/A int oldCp = cachePnt; 10249919Ssteve.reinhardt@amd.com#endif 10251060SN/A 102612105Snathanael.premillieu@arm.com cachePnt += curFetching; 10272329SN/A assert(cachePnt <= descLen()); 102812106SRekai.GonzalezAlberquilla@arm.com if (cachePnt == descLen()) 102912106SRekai.GonzalezAlberquilla@arm.com cachePnt = 0; 103012105Snathanael.premillieu@arm.com 10311061SN/A curFetching = 0; 10329919Ssteve.reinhardt@amd.com 10339919Ssteve.reinhardt@amd.com DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n", 10349919Ssteve.reinhardt@amd.com oldCp, cachePnt); 10359919Ssteve.reinhardt@amd.com 10369919Ssteve.reinhardt@amd.com if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() - 10379919Ssteve.reinhardt@amd.com cachePnt)) == 0) 10389919Ssteve.reinhardt@amd.com { 10392292SN/A igbe->anWe(annSmFetch, annUnusedDescQ); 10401061SN/A } else if (!(size - usedCache.size() - unusedCache.size())) { 10412292SN/A igbe->anWf(annSmFetch, annDescQ); 10421060SN/A } else { 10431060SN/A igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT); 10441060SN/A } 10451061SN/A 10461061SN/A enableSm(); 104713429Srekai.gonzalezalberquilla@arm.com igbe->checkDrain(); 10481061SN/A} 10499919Ssteve.reinhardt@amd.com 10509919Ssteve.reinhardt@amd.comtemplate<class T> 10511061SN/Avoid 10521061SN/AIGbE::DescCache<T>::wbComplete() 10531061SN/A{ 10549919Ssteve.reinhardt@amd.com 10552292SN/A igbe->anBegin(annSmWb, "Finish Writeback"); 105612106SRekai.GonzalezAlberquilla@arm.com 105712105Snathanael.premillieu@arm.com long curHead = descHead(); 10589919Ssteve.reinhardt@amd.com#ifndef NDEBUG 105912106SRekai.GonzalezAlberquilla@arm.com long oldHead = curHead; 106012106SRekai.GonzalezAlberquilla@arm.com#endif 10619913Ssteve.reinhardt@amd.com 10629919Ssteve.reinhardt@amd.com for (int x = 0; x < wbOut; x++) { 10639913Ssteve.reinhardt@amd.com assert(usedCache.size()); 10649913Ssteve.reinhardt@amd.com delete usedCache[0]; 10659919Ssteve.reinhardt@amd.com usedCache.pop_front(); 10669913Ssteve.reinhardt@amd.com 106712144Srekai.gonzalezalberquilla@arm.com igbe->anDq(annSmWb, annUsedCacheQ); 106813598Sgiacomo.travaglini@arm.com igbe->anDq(annSmWb, annDescQ); 106912144Srekai.gonzalezalberquilla@arm.com } 107012144Srekai.gonzalezalberquilla@arm.com 107113610Sgiacomo.gabrielli@arm.com curHead += wbOut; 107213610Sgiacomo.gabrielli@arm.com wbOut = 0; 107313610Sgiacomo.gabrielli@arm.com 10749920Syasuko.eckert@amd.com if (curHead >= descLen()) 10759913Ssteve.reinhardt@amd.com curHead -= descLen(); 10769913Ssteve.reinhardt@amd.com 10779913Ssteve.reinhardt@amd.com // Update the head 10789913Ssteve.reinhardt@amd.com updateHead(curHead); 107912106SRekai.GonzalezAlberquilla@arm.com 10803773Sgblack@eecs.umich.edu DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n", 10814352Sgblack@eecs.umich.edu oldHead, curHead); 108212105Snathanael.premillieu@arm.com 108312106SRekai.GonzalezAlberquilla@arm.com // If we still have more to wb, call wb now 108412106SRekai.GonzalezAlberquilla@arm.com actionAfterWb(); 108512106SRekai.GonzalezAlberquilla@arm.com if (moreToWb) { 108612106SRekai.GonzalezAlberquilla@arm.com moreToWb = false; 10871061SN/A DPRINTF(EthernetDesc, "Writeback has more todo\n"); 10881061SN/A writeback(wbAlignment); 10891061SN/A } 10902292SN/A 10919919Ssteve.reinhardt@amd.com if (!wbOut) { 109212105Snathanael.premillieu@arm.com igbe->checkDrain(); 109312106SRekai.GonzalezAlberquilla@arm.com if (usedCache.size()) 109412106SRekai.GonzalezAlberquilla@arm.com igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT); 109512106SRekai.GonzalezAlberquilla@arm.com else 10961061SN/A igbe->anWe(annSmWb, annUsedCacheQ); 10971061SN/A } 10984636Sgblack@eecs.umich.edu fetchAfterWb(); 109912105Snathanael.premillieu@arm.com} 110012106SRekai.GonzalezAlberquilla@arm.com 110112106SRekai.GonzalezAlberquilla@arm.comtemplate<class T> 110212106SRekai.GonzalezAlberquilla@arm.comvoid 11031061SN/AIGbE::DescCache<T>::reset() 11041062SN/A{ 11051062SN/A DPRINTF(EthernetDesc, "Reseting descriptor cache\n"); 11061061SN/A for (typename CacheType::size_type x = 0; x < usedCache.size(); x++) 11071061SN/A delete usedCache[x]; 11081061SN/A for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++) 11091061SN/A delete unusedCache[x]; 11101061SN/A 111113429Srekai.gonzalezalberquilla@arm.com usedCache.clear(); 11121061SN/A unusedCache.clear(); 11139919Ssteve.reinhardt@amd.com 11149919Ssteve.reinhardt@amd.com cachePnt = 0; 11151061SN/A 11161061SN/A} 11172292SN/A 11182292SN/Atemplate<class T> 111912106SRekai.GonzalezAlberquilla@arm.comvoid 11209919Ssteve.reinhardt@amd.comIGbE::DescCache<T>::serialize(std::ostream &os) 11219919Ssteve.reinhardt@amd.com{ 112212106SRekai.GonzalezAlberquilla@arm.com SERIALIZE_SCALAR(cachePnt); 11239913Ssteve.reinhardt@amd.com SERIALIZE_SCALAR(curFetching); 112412106SRekai.GonzalezAlberquilla@arm.com SERIALIZE_SCALAR(wbOut); 11259913Ssteve.reinhardt@amd.com SERIALIZE_SCALAR(moreToWb); 112612106SRekai.GonzalezAlberquilla@arm.com SERIALIZE_SCALAR(wbAlignment); 11271061SN/A 11289919Ssteve.reinhardt@amd.com typename CacheType::size_type usedCacheSize = usedCache.size(); 11299916Ssteve.reinhardt@amd.com SERIALIZE_SCALAR(usedCacheSize); 11301062SN/A for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) { 113112105Snathanael.premillieu@arm.com arrayParamOut(os, csprintf("usedCache_%d", x), 113212106SRekai.GonzalezAlberquilla@arm.com (uint8_t*)usedCache[x],sizeof(T)); 113312106SRekai.GonzalezAlberquilla@arm.com } 113412106SRekai.GonzalezAlberquilla@arm.com 113512106SRekai.GonzalezAlberquilla@arm.com typename CacheType::size_type unusedCacheSize = unusedCache.size(); 11361062SN/A SERIALIZE_SCALAR(unusedCacheSize); 11372292SN/A for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) { 113812106SRekai.GonzalezAlberquilla@arm.com arrayParamOut(os, csprintf("unusedCache_%d", x), 11392292SN/A (uint8_t*)unusedCache[x],sizeof(T)); 11402292SN/A } 11411062SN/A 11422292SN/A Tick fetch_delay = 0, wb_delay = 0; 11431062SN/A if (fetchDelayEvent.scheduled()) 11442935Sksewell@umich.edu fetch_delay = fetchDelayEvent.when(); 11452935Sksewell@umich.edu SERIALIZE_SCALAR(fetch_delay); 11462935Sksewell@umich.edu if (wbDelayEvent.scheduled()) 11472292SN/A wb_delay = wbDelayEvent.when(); 11481062SN/A SERIALIZE_SCALAR(wb_delay); 11492292SN/A 11502292SN/A 11512292SN/A} 11522292SN/A 11532292SN/Atemplate<class T> 11542292SN/Avoid 11552292SN/AIGbE::DescCache<T>::unserialize(Checkpoint *cp, const std::string §ion) 11562292SN/A{ 11571062SN/A UNSERIALIZE_SCALAR(cachePnt); 11582292SN/A UNSERIALIZE_SCALAR(curFetching); 11591061SN/A UNSERIALIZE_SCALAR(wbOut); 11601061SN/A UNSERIALIZE_SCALAR(moreToWb); 11611061SN/A UNSERIALIZE_SCALAR(wbAlignment); 11621061SN/A 11631061SN/A typename CacheType::size_type usedCacheSize; 11646221Snate@binkert.org UNSERIALIZE_SCALAR(usedCacheSize); 11651061SN/A T *temp; 11662292SN/A for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) { 11672292SN/A temp = new T; 11682292SN/A arrayParamIn(cp, section, csprintf("usedCache_%d", x), 11692292SN/A (uint8_t*)temp,sizeof(T)); 11702292SN/A usedCache.push_back(temp); 11712292SN/A } 11721061SN/A 11731061SN/A typename CacheType::size_type unusedCacheSize; 11741061SN/A UNSERIALIZE_SCALAR(unusedCacheSize); 11751061SN/A for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) { 11766221Snate@binkert.org temp = new T; 11771061SN/A arrayParamIn(cp, section, csprintf("unusedCache_%d", x), 11782292SN/A (uint8_t*)temp,sizeof(T)); 11792292SN/A unusedCache.push_back(temp); 11802292SN/A } 11812292SN/A Tick fetch_delay = 0, wb_delay = 0; 11822292SN/A UNSERIALIZE_SCALAR(fetch_delay); 11832292SN/A UNSERIALIZE_SCALAR(wb_delay); 11842292SN/A if (fetch_delay) 11852292SN/A igbe->schedule(fetchDelayEvent, fetch_delay); 11862292SN/A if (wb_delay) 11872292SN/A igbe->schedule(wbDelayEvent, wb_delay); 118810239Sbinhpham@cs.rutgers.edu 11892292SN/A 119010239Sbinhpham@cs.rutgers.edu} 119110935Snilay@cs.wisc.edu 119210239Sbinhpham@cs.rutgers.edu///////////////////////////// IGbE::RxDescCache ////////////////////////////// 119310239Sbinhpham@cs.rutgers.edu 119410239Sbinhpham@cs.rutgers.eduIGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s) 119510239Sbinhpham@cs.rutgers.edu : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0), 119610239Sbinhpham@cs.rutgers.edu pktEvent(this), pktHdrEvent(this), pktDataEvent(this) 11972292SN/A 119810239Sbinhpham@cs.rutgers.edu{ 119910239Sbinhpham@cs.rutgers.edu annSmFetch = "RX Desc Fetch"; 120010239Sbinhpham@cs.rutgers.edu annSmWb = "RX Desc Writeback"; 120110239Sbinhpham@cs.rutgers.edu annUnusedDescQ = "RX Unused Descriptors"; 120210239Sbinhpham@cs.rutgers.edu annUnusedCacheQ = "RX Unused Descriptor Cache"; 120310935Snilay@cs.wisc.edu annUsedCacheQ = "RX Used Descriptor Cache"; 120410239Sbinhpham@cs.rutgers.edu annUsedDescQ = "RX Used Descriptors"; 120510239Sbinhpham@cs.rutgers.edu annDescQ = "RX Descriptors"; 120610239Sbinhpham@cs.rutgers.edu} 120710239Sbinhpham@cs.rutgers.edu 12082292SN/Avoid 12092292SN/AIGbE::RxDescCache::pktSplitDone() 12102292SN/A{ 12112292SN/A splitCount++; 12122292SN/A DPRINTF(EthernetDesc, 12132292SN/A "Part of split packet done: splitcount now %d\n", splitCount); 12142292SN/A assert(splitCount <= 2); 12152292SN/A if (splitCount != 2) 12162292SN/A return; 12172731Sktlim@umich.edu splitCount = 0; 12182292SN/A DPRINTF(EthernetDesc, 12192292SN/A "Part of split packet done: calling pktComplete()\n"); 12202292SN/A pktComplete(); 12212292SN/A} 12222292SN/A 12232292SN/Aint 12242292SN/AIGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset) 12252292SN/A{ 12266221Snate@binkert.org assert(unusedCache.size()); 12272292SN/A //if (!unusedCache.size()) 12282292SN/A // return false; 12292292SN/A 12302292SN/A pktPtr = packet; 12312292SN/A pktDone = false; 12322292SN/A unsigned buf_len, hdr_len; 12332292SN/A 12342292SN/A RxDesc *desc = unusedCache.front(); 12352292SN/A switch (igbe->regs.srrctl.desctype()) { 12362292SN/A case RXDT_LEGACY: 12372292SN/A assert(pkt_offset == 0); 12382292SN/A bytesCopied = packet->length; 12392292SN/A DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n", 12406221Snate@binkert.org packet->length, igbe->regs.rctl.descSize()); 12412292SN/A assert(packet->length < igbe->regs.rctl.descSize()); 12422292SN/A igbe->dmaWrite(pciToDma(desc->legacy.buf), 12432292SN/A packet->length, &pktEvent, packet->data, 12442292SN/A igbe->rxWriteDelay); 12452292SN/A break; 12462292SN/A case RXDT_ADV_ONEBUF: 12472292SN/A assert(pkt_offset == 0); 12482292SN/A bytesCopied = packet->length; 12492292SN/A buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() : 12502292SN/A igbe->regs.rctl.descSize(); 12512292SN/A DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n", 12522292SN/A packet->length, igbe->regs.srrctl(), buf_len); 125310239Sbinhpham@cs.rutgers.edu assert(packet->length < buf_len); 12542292SN/A igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 12552292SN/A packet->length, &pktEvent, packet->data, 12562292SN/A igbe->rxWriteDelay); 12572292SN/A desc->adv_wb.header_len = htole(0); 12582292SN/A desc->adv_wb.sph = htole(0); 12592301SN/A desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length)); 12602292SN/A break; 12612301SN/A case RXDT_ADV_SPLIT_A: 12622292SN/A int split_point; 12632292SN/A 12642292SN/A buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() : 12652292SN/A igbe->regs.rctl.descSize(); 12662292SN/A hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0; 12672292SN/A DPRINTF(EthernetDesc, 12682292SN/A "lpe: %d Packet Length: %d offset: %d srrctl: %#x " 12692292SN/A "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n", 12702292SN/A igbe->regs.rctl.lpe(), packet->length, pkt_offset, 12712292SN/A igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len, 12726221Snate@binkert.org desc->adv_read.pkt, buf_len); 12732292SN/A 12748607Sgblack@eecs.umich.edu split_point = hsplit(pktPtr); 12758607Sgblack@eecs.umich.edu 12762292SN/A if (packet->length <= hdr_len) { 127710239Sbinhpham@cs.rutgers.edu bytesCopied = packet->length; 127810239Sbinhpham@cs.rutgers.edu assert(pkt_offset == 0); 127910239Sbinhpham@cs.rutgers.edu DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n"); 128010239Sbinhpham@cs.rutgers.edu igbe->dmaWrite(pciToDma(desc->adv_read.hdr), 12812292SN/A packet->length, &pktEvent, packet->data, 12822292SN/A igbe->rxWriteDelay); 12832292SN/A desc->adv_wb.header_len = htole((uint16_t)packet->length); 12842292SN/A desc->adv_wb.sph = htole(0); 12852292SN/A desc->adv_wb.pkt_len = htole(0); 12862292SN/A } else if (split_point) { 12872292SN/A if (pkt_offset) { 128810239Sbinhpham@cs.rutgers.edu // we are only copying some data, header/data has already been 128913610Sgiacomo.gabrielli@arm.com // copied 12902292SN/A int max_to_copy = 12912292SN/A std::min(packet->length - pkt_offset, buf_len); 12922292SN/A bytesCopied += max_to_copy; 129310239Sbinhpham@cs.rutgers.edu DPRINTF(EthernetDesc, 129412109SRekai.GonzalezAlberquilla@arm.com "Hdr split: Continuing data buffer copy\n"); 129512109SRekai.GonzalezAlberquilla@arm.com igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 129612109SRekai.GonzalezAlberquilla@arm.com max_to_copy, &pktEvent, 129712109SRekai.GonzalezAlberquilla@arm.com packet->data + pkt_offset, igbe->rxWriteDelay); 129812109SRekai.GonzalezAlberquilla@arm.com desc->adv_wb.header_len = htole(0); 129913610Sgiacomo.gabrielli@arm.com desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy); 130012109SRekai.GonzalezAlberquilla@arm.com desc->adv_wb.sph = htole(0); 13012292SN/A } else { 13022292SN/A int max_to_copy = 13032292SN/A std::min(packet->length - split_point, buf_len); 13042292SN/A bytesCopied += max_to_copy + split_point; 13052292SN/A 13062292SN/A DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n", 13072292SN/A split_point); 13086221Snate@binkert.org igbe->dmaWrite(pciToDma(desc->adv_read.hdr), 13092292SN/A split_point, &pktHdrEvent, 13102292SN/A packet->data, igbe->rxWriteDelay); 13112292SN/A igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 13122292SN/A max_to_copy, &pktDataEvent, 13132292SN/A packet->data + split_point, igbe->rxWriteDelay); 13142292SN/A desc->adv_wb.header_len = htole(split_point); 13152292SN/A desc->adv_wb.sph = 1; 13162292SN/A desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy)); 13172301SN/A } 13182292SN/A } else { 13192292SN/A panic("Header split not fitting within header buffer or " 13202292SN/A "undecodable packet not fitting in header unsupported\n"); 13212292SN/A } 13222292SN/A break; 13232292SN/A default: 13242292SN/A panic("Unimplemnted RX receive buffer type: %d\n", 13252292SN/A igbe->regs.srrctl.desctype()); 13262292SN/A } 13274632Sgblack@eecs.umich.edu return bytesCopied; 13282292SN/A 13292292SN/A} 13302292SN/A 13312292SN/Avoid 13322292SN/AIGbE::RxDescCache::pktComplete() 13332292SN/A{ 13342292SN/A assert(unusedCache.size()); 13352292SN/A RxDesc *desc; 13362292SN/A desc = unusedCache.front(); 13372292SN/A 13382292SN/A igbe->anBegin("RXS", "Update Desc"); 13392292SN/A 13402292SN/A uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ; 13412292SN/A DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d " 13422292SN/A "stripcrc offset: %d value written: %d %d\n", 13432292SN/A pktPtr->length, bytesCopied, crcfixup, 13442292SN/A htole((uint16_t)(pktPtr->length + crcfixup)), 13452292SN/A (uint16_t)(pktPtr->length + crcfixup)); 13462292SN/A 13472292SN/A // no support for anything but starting at 0 13482292SN/A assert(igbe->regs.rxcsum.pcss() == 0); 13492292SN/A 13503798Sgblack@eecs.umich.edu DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n"); 13513798Sgblack@eecs.umich.edu 13523798Sgblack@eecs.umich.edu uint16_t status = RXDS_DD; 13532292SN/A uint8_t err = 0; 13543798Sgblack@eecs.umich.edu uint16_t ext_err = 0; 13553798Sgblack@eecs.umich.edu uint16_t csum = 0; 13563798Sgblack@eecs.umich.edu uint16_t ptype = 0; 13573798Sgblack@eecs.umich.edu uint16_t ip_id = 0; 13583798Sgblack@eecs.umich.edu 13593798Sgblack@eecs.umich.edu assert(bytesCopied <= pktPtr->length); 13603798Sgblack@eecs.umich.edu if (bytesCopied == pktPtr->length) 13613798Sgblack@eecs.umich.edu status |= RXDS_EOP; 13623788Sgblack@eecs.umich.edu 13633788Sgblack@eecs.umich.edu IpPtr ip(pktPtr); 13642292SN/A 13653788Sgblack@eecs.umich.edu if (ip) { 13663788Sgblack@eecs.umich.edu DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id()); 13673788Sgblack@eecs.umich.edu ptype |= RXDP_IPV4; 13682292SN/A ip_id = ip->id(); 13692292SN/A 13702301SN/A if (igbe->regs.rxcsum.ipofld()) { 13712292SN/A DPRINTF(EthernetDesc, "Checking IP checksum\n"); 13722301SN/A status |= RXDS_IPCS; 13732292SN/A csum = htole(cksum(ip)); 13742292SN/A igbe->rxIpChecksums++; 13752301SN/A if (cksum(ip) != 0) { 13762292SN/A err |= RXDE_IPE; 13772292SN/A ext_err |= RXDEE_IPE; 13782292SN/A DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 13792292SN/A } 13802292SN/A } 13812292SN/A TcpPtr tcp(ip); 13827720Sgblack@eecs.umich.edu if (tcp && igbe->regs.rxcsum.tuofld()) { 13832292SN/A DPRINTF(EthernetDesc, "Checking TCP checksum\n"); 13842292SN/A status |= RXDS_TCPCS; 13852301SN/A ptype |= RXDP_TCP; 13862292SN/A csum = htole(cksum(tcp)); 13872292SN/A igbe->rxTcpChecksums++; 13882301SN/A if (cksum(tcp) != 0) { 13892292SN/A DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 13902301SN/A err |= RXDE_TCPE; 13912292SN/A ext_err |= RXDEE_TCPE; 13922292SN/A } 13932292SN/A } 13942703Sktlim@umich.edu 13952292SN/A UdpPtr udp(ip); 13962301SN/A if (udp && igbe->regs.rxcsum.tuofld()) { 13972292SN/A DPRINTF(EthernetDesc, "Checking UDP checksum\n"); 13982292SN/A status |= RXDS_UDPCS; 13992292SN/A ptype |= RXDP_UDP; 14002292SN/A csum = htole(cksum(udp)); 14012292SN/A igbe->rxUdpChecksums++; 14022292SN/A if (cksum(udp) != 0) { 14032292SN/A DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 14041061SN/A ext_err |= RXDEE_TCPE; 14051061SN/A err |= RXDE_TCPE; 14061060SN/A } 14071060SN/A } 14086221Snate@binkert.org } else { // if ip 14091060SN/A DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 14102292SN/A } 14112292SN/A 14122292SN/A switch (igbe->regs.srrctl.desctype()) { 14131060SN/A case RXDT_LEGACY: 14141060SN/A desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup)); 14151060SN/A desc->legacy.status = htole(status); 14162292SN/A desc->legacy.errors = htole(err); 14172292SN/A // No vlan support at this point... just set it to 0 14182292SN/A desc->legacy.vlan = 0; 14192292SN/A break; 14202292SN/A case RXDT_ADV_SPLIT_A: 14212292SN/A case RXDT_ADV_ONEBUF: 14222292SN/A desc->adv_wb.rss_type = htole(0); 14232292SN/A desc->adv_wb.pkt_type = htole(ptype); 14242292SN/A if (igbe->regs.rxcsum.pcsd()) { 14252292SN/A // no rss support right now 14262292SN/A desc->adv_wb.rss_hash = htole(0); 14272292SN/A } else { 14282292SN/A desc->adv_wb.id = htole(ip_id); 14292292SN/A desc->adv_wb.csum = htole(csum); 14302292SN/A } 143110239Sbinhpham@cs.rutgers.edu desc->adv_wb.status = htole(status); 143210239Sbinhpham@cs.rutgers.edu desc->adv_wb.errors = htole(ext_err); 143310239Sbinhpham@cs.rutgers.edu // no vlan support 143410239Sbinhpham@cs.rutgers.edu desc->adv_wb.vlan_tag = htole(0); 143510239Sbinhpham@cs.rutgers.edu break; 14362292SN/A default: 14372292SN/A panic("Unimplemnted RX receive buffer type %d\n", 14382292SN/A igbe->regs.srrctl.desctype()); 14392292SN/A } 14401060SN/A 14412292SN/A DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n", 14421060SN/A desc->adv_read.pkt, desc->adv_read.hdr); 14432292SN/A 14442292SN/A if (bytesCopied == pktPtr->length) { 14452292SN/A DPRINTF(EthernetDesc, 14462292SN/A "Packet completely written to descriptor buffers\n"); 14472980Sgblack@eecs.umich.edu // Deal with the rx timer interrupts 14481060SN/A if (igbe->regs.rdtr.delay()) { 14496221Snate@binkert.org Tick delay = igbe->regs.rdtr.delay() * igbe->intClock(); 14501060SN/A DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay); 14516221Snate@binkert.org igbe->reschedule(igbe->rdtrEvent, curTick() + delay); 14521060SN/A } 14536221Snate@binkert.org 145412105Snathanael.premillieu@arm.com if (igbe->regs.radv.idv()) { 145512105Snathanael.premillieu@arm.com Tick delay = igbe->regs.radv.idv() * igbe->intClock(); 145612105Snathanael.premillieu@arm.com DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay); 145712106SRekai.GonzalezAlberquilla@arm.com if (!igbe->radvEvent.scheduled()) { 145812106SRekai.GonzalezAlberquilla@arm.com igbe->schedule(igbe->radvEvent, curTick() + delay); 145912106SRekai.GonzalezAlberquilla@arm.com } 146012106SRekai.GonzalezAlberquilla@arm.com } 146112106SRekai.GonzalezAlberquilla@arm.com 146212106SRekai.GonzalezAlberquilla@arm.com // if neither radv or rdtr, maybe itr is set... 14631060SN/A if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) { 14642292SN/A DPRINTF(EthernetSM, 14651062SN/A "RXS: Receive interrupt delay disabled, posting IT_RXT\n"); 14661060SN/A igbe->postInterrupt(IT_RXT); 14671060SN/A } 14689944Smatt.horsnell@ARM.com 14699944Smatt.horsnell@ARM.com // If the packet is small enough, interrupt appropriately 1470 // I wonder if this is delayed or not?! 1471 if (pktPtr->length <= igbe->regs.rsrpd.idv()) { 1472 DPRINTF(EthernetSM, 1473 "RXS: Posting IT_SRPD beacuse small packet received\n"); 1474 igbe->postInterrupt(IT_SRPD); 1475 } 1476 bytesCopied = 0; 1477 } 1478 1479 pktPtr = NULL; 1480 igbe->checkDrain(); 1481 enableSm(); 1482 pktDone = true; 1483 1484 igbe->anBegin("RXS", "Done Updating Desc"); 1485 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n"); 1486 igbe->anDq("RXS", annUnusedCacheQ); 1487 unusedCache.pop_front(); 1488 igbe->anQ("RXS", annUsedCacheQ); 1489 usedCache.push_back(desc); 1490} 1491 1492void 1493IGbE::RxDescCache::enableSm() 1494{ 1495 if (!igbe->drainManager) { 1496 igbe->rxTick = true; 1497 igbe->restartClock(); 1498 } 1499} 1500 1501bool 1502IGbE::RxDescCache::packetDone() 1503{ 1504 if (pktDone) { 1505 pktDone = false; 1506 return true; 1507 } 1508 return false; 1509} 1510 1511bool 1512IGbE::RxDescCache::hasOutstandingEvents() 1513{ 1514 return pktEvent.scheduled() || wbEvent.scheduled() || 1515 fetchEvent.scheduled() || pktHdrEvent.scheduled() || 1516 pktDataEvent.scheduled(); 1517 1518} 1519 1520void 1521IGbE::RxDescCache::serialize(std::ostream &os) 1522{ 1523 DescCache<RxDesc>::serialize(os); 1524 SERIALIZE_SCALAR(pktDone); 1525 SERIALIZE_SCALAR(splitCount); 1526 SERIALIZE_SCALAR(bytesCopied); 1527} 1528 1529void 1530IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string §ion) 1531{ 1532 DescCache<RxDesc>::unserialize(cp, section); 1533 UNSERIALIZE_SCALAR(pktDone); 1534 UNSERIALIZE_SCALAR(splitCount); 1535 UNSERIALIZE_SCALAR(bytesCopied); 1536} 1537 1538 1539///////////////////////////// IGbE::TxDescCache ////////////////////////////// 1540 1541IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s) 1542 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), 1543 pktWaiting(false), pktMultiDesc(false), 1544 completionAddress(0), completionEnabled(false), 1545 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0), 1546 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false), 1547 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0), 1548 pktEvent(this), headerEvent(this), nullEvent(this) 1549{ 1550 annSmFetch = "TX Desc Fetch"; 1551 annSmWb = "TX Desc Writeback"; 1552 annUnusedDescQ = "TX Unused Descriptors"; 1553 annUnusedCacheQ = "TX Unused Descriptor Cache"; 1554 annUsedCacheQ = "TX Used Descriptor Cache"; 1555 annUsedDescQ = "TX Used Descriptors"; 1556 annDescQ = "TX Descriptors"; 1557} 1558 1559void 1560IGbE::TxDescCache::processContextDesc() 1561{ 1562 assert(unusedCache.size()); 1563 TxDesc *desc; 1564 1565 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n"); 1566 1567 while (!useTso && unusedCache.size() && 1568 TxdOp::isContext(unusedCache.front())) { 1569 DPRINTF(EthernetDesc, "Got context descriptor type...\n"); 1570 1571 desc = unusedCache.front(); 1572 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n", 1573 desc->d1, desc->d2); 1574 1575 1576 // is this going to be a tcp or udp packet? 1577 isTcp = TxdOp::tcp(desc) ? true : false; 1578 1579 // setup all the TSO variables, they'll be ignored if we don't use 1580 // tso for this connection 1581 tsoHeaderLen = TxdOp::hdrlen(desc); 1582 tsoMss = TxdOp::mss(desc); 1583 1584 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) { 1585 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: " 1586 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc), 1587 TxdOp::mss(desc), TxdOp::getLen(desc)); 1588 useTso = true; 1589 tsoTotalLen = TxdOp::getLen(desc); 1590 tsoLoadedHeader = false; 1591 tsoDescBytesUsed = 0; 1592 tsoUsedLen = 0; 1593 tsoPrevSeq = 0; 1594 tsoPktHasHeader = false; 1595 tsoPkts = 0; 1596 tsoCopyBytes = 0; 1597 } 1598 1599 TxdOp::setDd(desc); 1600 unusedCache.pop_front(); 1601 igbe->anDq("TXS", annUnusedCacheQ); 1602 usedCache.push_back(desc); 1603 igbe->anQ("TXS", annUsedCacheQ); 1604 } 1605 1606 if (!unusedCache.size()) 1607 return; 1608 1609 desc = unusedCache.front(); 1610 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) && 1611 TxdOp::tse(desc)) { 1612 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet " 1613 "hdrlen: %d mss: %d paylen %d\n", 1614 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc)); 1615 useTso = true; 1616 tsoTotalLen = TxdOp::getTsoLen(desc); 1617 tsoLoadedHeader = false; 1618 tsoDescBytesUsed = 0; 1619 tsoUsedLen = 0; 1620 tsoPrevSeq = 0; 1621 tsoPktHasHeader = false; 1622 tsoPkts = 0; 1623 } 1624 1625 if (useTso && !tsoLoadedHeader) { 1626 // we need to fetch a header 1627 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n"); 1628 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen); 1629 pktWaiting = true; 1630 assert(tsoHeaderLen <= 256); 1631 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)), 1632 tsoHeaderLen, &headerEvent, tsoHeader, 0); 1633 } 1634} 1635 1636void 1637IGbE::TxDescCache::headerComplete() 1638{ 1639 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n"); 1640 pktWaiting = false; 1641 1642 assert(unusedCache.size()); 1643 TxDesc *desc = unusedCache.front(); 1644 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n", 1645 TxdOp::getLen(desc), tsoHeaderLen); 1646 1647 if (TxdOp::getLen(desc) == tsoHeaderLen) { 1648 tsoDescBytesUsed = 0; 1649 tsoLoadedHeader = true; 1650 unusedCache.pop_front(); 1651 usedCache.push_back(desc); 1652 } else { 1653 DPRINTF(EthernetDesc, "TSO: header part of larger payload\n"); 1654 tsoDescBytesUsed = tsoHeaderLen; 1655 tsoLoadedHeader = true; 1656 } 1657 enableSm(); 1658 igbe->checkDrain(); 1659} 1660 1661unsigned 1662IGbE::TxDescCache::getPacketSize(EthPacketPtr p) 1663{ 1664 if (!unusedCache.size()) 1665 return 0; 1666 1667 DPRINTF(EthernetDesc, "Starting processing of descriptor\n"); 1668 1669 assert(!useTso || tsoLoadedHeader); 1670 TxDesc *desc = unusedCache.front(); 1671 1672 if (useTso) { 1673 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data " 1674 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1675 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d " 1676 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss, 1677 tsoTotalLen, tsoUsedLen, tsoLoadedHeader); 1678 1679 if (tsoPktHasHeader) 1680 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length, 1681 TxdOp::getLen(desc) - tsoDescBytesUsed); 1682 else 1683 tsoCopyBytes = std::min(tsoMss, 1684 TxdOp::getLen(desc) - tsoDescBytesUsed); 1685 unsigned pkt_size = 1686 tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen); 1687 1688 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d " 1689 "this descLen: %d\n", 1690 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc)); 1691 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader); 1692 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size); 1693 return pkt_size; 1694 } 1695 1696 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n", 1697 TxdOp::getLen(unusedCache.front())); 1698 return TxdOp::getLen(desc); 1699} 1700 1701void 1702IGbE::TxDescCache::getPacketData(EthPacketPtr p) 1703{ 1704 assert(unusedCache.size()); 1705 1706 TxDesc *desc; 1707 desc = unusedCache.front(); 1708 1709 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data " 1710 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1711 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && 1712 TxdOp::getLen(desc)); 1713 1714 pktPtr = p; 1715 1716 pktWaiting = true; 1717 1718 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length); 1719 1720 if (useTso) { 1721 assert(tsoLoadedHeader); 1722 if (!tsoPktHasHeader) { 1723 DPRINTF(EthernetDesc, 1724 "Loading TSO header (%d bytes) into start of packet\n", 1725 tsoHeaderLen); 1726 memcpy(p->data, &tsoHeader,tsoHeaderLen); 1727 p->length +=tsoHeaderLen; 1728 tsoPktHasHeader = true; 1729 } 1730 } 1731 1732 if (useTso) { 1733 DPRINTF(EthernetDesc, 1734 "Starting DMA of packet at offset %d length: %d\n", 1735 p->length, tsoCopyBytes); 1736 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)) 1737 + tsoDescBytesUsed, 1738 tsoCopyBytes, &pktEvent, p->data + p->length, 1739 igbe->txReadDelay); 1740 tsoDescBytesUsed += tsoCopyBytes; 1741 assert(tsoDescBytesUsed <= TxdOp::getLen(desc)); 1742 } else { 1743 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)), 1744 TxdOp::getLen(desc), &pktEvent, p->data + p->length, 1745 igbe->txReadDelay); 1746 } 1747} 1748 1749void 1750IGbE::TxDescCache::pktComplete() 1751{ 1752 1753 TxDesc *desc; 1754 assert(unusedCache.size()); 1755 assert(pktPtr); 1756 1757 igbe->anBegin("TXS", "Update Desc"); 1758 1759 DPRINTF(EthernetDesc, "DMA of packet complete\n"); 1760 1761 1762 desc = unusedCache.front(); 1763 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && 1764 TxdOp::getLen(desc)); 1765 1766 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", 1767 desc->d1, desc->d2); 1768 1769 // Set the length of the data in the EtherPacket 1770 if (useTso) { 1771 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d " 1772 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss, 1773 tsoTotalLen, tsoUsedLen, tsoLoadedHeader); 1774 pktPtr->length += tsoCopyBytes; 1775 tsoUsedLen += tsoCopyBytes; 1776 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n", 1777 tsoDescBytesUsed, tsoCopyBytes); 1778 } else 1779 pktPtr->length += TxdOp::getLen(desc); 1780 1781 1782 1783 if ((!TxdOp::eop(desc) && !useTso) || 1784 (pktPtr->length < ( tsoMss + tsoHeaderLen) && 1785 tsoTotalLen != tsoUsedLen && useTso)) { 1786 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc))); 1787 igbe->anDq("TXS", annUnusedCacheQ); 1788 unusedCache.pop_front(); 1789 igbe->anQ("TXS", annUsedCacheQ); 1790 usedCache.push_back(desc); 1791 1792 tsoDescBytesUsed = 0; 1793 pktDone = true; 1794 pktWaiting = false; 1795 pktMultiDesc = true; 1796 1797 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n", 1798 pktPtr->length); 1799 pktPtr = NULL; 1800 1801 enableSm(); 1802 igbe->checkDrain(); 1803 return; 1804 } 1805 1806 1807 pktMultiDesc = false; 1808 // no support for vlans 1809 assert(!TxdOp::vle(desc)); 1810 1811 // we only support single packet descriptors at this point 1812 if (!useTso) 1813 assert(TxdOp::eop(desc)); 1814 1815 // set that this packet is done 1816 if (TxdOp::rs(desc)) 1817 TxdOp::setDd(desc); 1818 1819 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", 1820 desc->d1, desc->d2); 1821 1822 if (useTso) { 1823 IpPtr ip(pktPtr); 1824 if (ip) { 1825 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n", 1826 tsoPkts); 1827 ip->id(ip->id() + tsoPkts++); 1828 ip->len(pktPtr->length - EthPtr(pktPtr)->size()); 1829 1830 TcpPtr tcp(ip); 1831 if (tcp) { 1832 DPRINTF(EthernetDesc, 1833 "TSO: Modifying TCP header. old seq %d + %d\n", 1834 tcp->seq(), tsoPrevSeq); 1835 tcp->seq(tcp->seq() + tsoPrevSeq); 1836 if (tsoUsedLen != tsoTotalLen) 1837 tcp->flags(tcp->flags() & ~9); // clear fin & psh 1838 } 1839 UdpPtr udp(ip); 1840 if (udp) { 1841 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n"); 1842 udp->len(pktPtr->length - EthPtr(pktPtr)->size()); 1843 } 1844 } 1845 tsoPrevSeq = tsoUsedLen; 1846 } 1847 1848 if (DTRACE(EthernetDesc)) { 1849 IpPtr ip(pktPtr); 1850 if (ip) 1851 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", 1852 ip->id()); 1853 else 1854 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 1855 } 1856 1857 // Checksums are only ofloaded for new descriptor types 1858 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) { 1859 DPRINTF(EthernetDesc, "Calculating checksums for packet\n"); 1860 IpPtr ip(pktPtr); 1861 assert(ip); 1862 if (TxdOp::ixsm(desc)) { 1863 ip->sum(0); 1864 ip->sum(cksum(ip)); 1865 igbe->txIpChecksums++; 1866 DPRINTF(EthernetDesc, "Calculated IP checksum\n"); 1867 } 1868 if (TxdOp::txsm(desc)) { 1869 TcpPtr tcp(ip); 1870 UdpPtr udp(ip); 1871 if (tcp) { 1872 tcp->sum(0); 1873 tcp->sum(cksum(tcp)); 1874 igbe->txTcpChecksums++; 1875 DPRINTF(EthernetDesc, "Calculated TCP checksum\n"); 1876 } else if (udp) { 1877 assert(udp); 1878 udp->sum(0); 1879 udp->sum(cksum(udp)); 1880 igbe->txUdpChecksums++; 1881 DPRINTF(EthernetDesc, "Calculated UDP checksum\n"); 1882 } else { 1883 panic("Told to checksum, but don't know how\n"); 1884 } 1885 } 1886 } 1887 1888 if (TxdOp::ide(desc)) { 1889 // Deal with the rx timer interrupts 1890 DPRINTF(EthernetDesc, "Descriptor had IDE set\n"); 1891 if (igbe->regs.tidv.idv()) { 1892 Tick delay = igbe->regs.tidv.idv() * igbe->intClock(); 1893 DPRINTF(EthernetDesc, "setting tidv\n"); 1894 igbe->reschedule(igbe->tidvEvent, curTick() + delay, true); 1895 } 1896 1897 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) { 1898 Tick delay = igbe->regs.tadv.idv() * igbe->intClock(); 1899 DPRINTF(EthernetDesc, "setting tadv\n"); 1900 if (!igbe->tadvEvent.scheduled()) { 1901 igbe->schedule(igbe->tadvEvent, curTick() + delay); 1902 } 1903 } 1904 } 1905 1906 1907 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) { 1908 DPRINTF(EthernetDesc, "Descriptor Done\n"); 1909 igbe->anDq("TXS", annUnusedCacheQ); 1910 unusedCache.pop_front(); 1911 igbe->anQ("TXS", annUsedCacheQ); 1912 usedCache.push_back(desc); 1913 tsoDescBytesUsed = 0; 1914 } 1915 1916 if (useTso && tsoUsedLen == tsoTotalLen) 1917 useTso = false; 1918 1919 1920 DPRINTF(EthernetDesc, 1921 "------Packet of %d bytes ready for transmission-------\n", 1922 pktPtr->length); 1923 pktDone = true; 1924 pktWaiting = false; 1925 pktPtr = NULL; 1926 tsoPktHasHeader = false; 1927 1928 if (igbe->regs.txdctl.wthresh() == 0) { 1929 igbe->anBegin("TXS", "Desc Writeback"); 1930 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n"); 1931 writeback(0); 1932 } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <= 1933 descInBlock(usedCache.size())) { 1934 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1935 igbe->anBegin("TXS", "Desc Writeback"); 1936 writeback((igbe->cacheBlockSize()-1)>>4); 1937 } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) { 1938 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1939 igbe->anBegin("TXS", "Desc Writeback"); 1940 writeback((igbe->cacheBlockSize()-1)>>4); 1941 } 1942 1943 enableSm(); 1944 igbe->checkDrain(); 1945} 1946 1947void 1948IGbE::TxDescCache::actionAfterWb() 1949{ 1950 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n", 1951 completionEnabled); 1952 igbe->postInterrupt(iGbReg::IT_TXDW); 1953 if (completionEnabled) { 1954 descEnd = igbe->regs.tdh(); 1955 DPRINTF(EthernetDesc, 1956 "Completion writing back value: %d to addr: %#x\n", descEnd, 1957 completionAddress); 1958 igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)), 1959 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0); 1960 } 1961} 1962 1963void 1964IGbE::TxDescCache::serialize(std::ostream &os) 1965{ 1966 DescCache<TxDesc>::serialize(os); 1967 SERIALIZE_SCALAR(pktDone); 1968 SERIALIZE_SCALAR(isTcp); 1969 SERIALIZE_SCALAR(pktWaiting); 1970 SERIALIZE_SCALAR(pktMultiDesc); 1971 1972 SERIALIZE_SCALAR(useTso); 1973 SERIALIZE_SCALAR(tsoHeaderLen); 1974 SERIALIZE_SCALAR(tsoMss); 1975 SERIALIZE_SCALAR(tsoTotalLen); 1976 SERIALIZE_SCALAR(tsoUsedLen); 1977 SERIALIZE_SCALAR(tsoPrevSeq);; 1978 SERIALIZE_SCALAR(tsoPktPayloadBytes); 1979 SERIALIZE_SCALAR(tsoLoadedHeader); 1980 SERIALIZE_SCALAR(tsoPktHasHeader); 1981 SERIALIZE_ARRAY(tsoHeader, 256); 1982 SERIALIZE_SCALAR(tsoDescBytesUsed); 1983 SERIALIZE_SCALAR(tsoCopyBytes); 1984 SERIALIZE_SCALAR(tsoPkts); 1985 1986 SERIALIZE_SCALAR(completionAddress); 1987 SERIALIZE_SCALAR(completionEnabled); 1988 SERIALIZE_SCALAR(descEnd); 1989} 1990 1991void 1992IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string §ion) 1993{ 1994 DescCache<TxDesc>::unserialize(cp, section); 1995 UNSERIALIZE_SCALAR(pktDone); 1996 UNSERIALIZE_SCALAR(isTcp); 1997 UNSERIALIZE_SCALAR(pktWaiting); 1998 UNSERIALIZE_SCALAR(pktMultiDesc); 1999 2000 UNSERIALIZE_SCALAR(useTso); 2001 UNSERIALIZE_SCALAR(tsoHeaderLen); 2002 UNSERIALIZE_SCALAR(tsoMss); 2003 UNSERIALIZE_SCALAR(tsoTotalLen); 2004 UNSERIALIZE_SCALAR(tsoUsedLen); 2005 UNSERIALIZE_SCALAR(tsoPrevSeq);; 2006 UNSERIALIZE_SCALAR(tsoPktPayloadBytes); 2007 UNSERIALIZE_SCALAR(tsoLoadedHeader); 2008 UNSERIALIZE_SCALAR(tsoPktHasHeader); 2009 UNSERIALIZE_ARRAY(tsoHeader, 256); 2010 UNSERIALIZE_SCALAR(tsoDescBytesUsed); 2011 UNSERIALIZE_SCALAR(tsoCopyBytes); 2012 UNSERIALIZE_SCALAR(tsoPkts); 2013 2014 UNSERIALIZE_SCALAR(completionAddress); 2015 UNSERIALIZE_SCALAR(completionEnabled); 2016 UNSERIALIZE_SCALAR(descEnd); 2017} 2018 2019bool 2020IGbE::TxDescCache::packetAvailable() 2021{ 2022 if (pktDone) { 2023 pktDone = false; 2024 return true; 2025 } 2026 return false; 2027} 2028 2029void 2030IGbE::TxDescCache::enableSm() 2031{ 2032 if (!igbe->drainManager) { 2033 igbe->txTick = true; 2034 igbe->restartClock(); 2035 } 2036} 2037 2038bool 2039IGbE::TxDescCache::hasOutstandingEvents() 2040{ 2041 return pktEvent.scheduled() || wbEvent.scheduled() || 2042 fetchEvent.scheduled(); 2043} 2044 2045 2046///////////////////////////////////// IGbE ///////////////////////////////// 2047 2048void 2049IGbE::restartClock() 2050{ 2051 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && 2052 getDrainState() == Drainable::Running) 2053 schedule(tickEvent, clockEdge(Cycles(1))); 2054} 2055 2056unsigned int 2057IGbE::drain(DrainManager *dm) 2058{ 2059 unsigned int count; 2060 count = pioPort.drain(dm) + dmaPort.drain(dm); 2061 if (rxDescCache.hasOutstandingEvents() || 2062 txDescCache.hasOutstandingEvents()) { 2063 count++; 2064 drainManager = dm; 2065 } 2066 2067 txFifoTick = false; 2068 txTick = false; 2069 rxTick = false; 2070 2071 if (tickEvent.scheduled()) 2072 deschedule(tickEvent); 2073 2074 if (count) { 2075 DPRINTF(Drain, "IGbE not drained\n"); 2076 setDrainState(Drainable::Draining); 2077 } else 2078 setDrainState(Drainable::Drained); 2079 2080 return count; 2081} 2082 2083void 2084IGbE::drainResume() 2085{ 2086 Drainable::drainResume(); 2087 2088 txFifoTick = true; 2089 txTick = true; 2090 rxTick = true; 2091 2092 restartClock(); 2093 DPRINTF(EthernetSM, "resuming from drain"); 2094} 2095 2096void 2097IGbE::checkDrain() 2098{ 2099 if (!drainManager) 2100 return; 2101 2102 txFifoTick = false; 2103 txTick = false; 2104 rxTick = false; 2105 if (!rxDescCache.hasOutstandingEvents() && 2106 !txDescCache.hasOutstandingEvents()) { 2107 DPRINTF(Drain, "IGbE done draining, processing drain event\n"); 2108 drainManager->signalDrainDone(); 2109 drainManager = NULL; 2110 } 2111} 2112 2113void 2114IGbE::txStateMachine() 2115{ 2116 if (!regs.tctl.en()) { 2117 txTick = false; 2118 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n"); 2119 return; 2120 } 2121 2122 // If we have a packet available and it's length is not 0 (meaning it's not 2123 // a multidescriptor packet) put it in the fifo, otherwise an the next 2124 // iteration we'll get the rest of the data 2125 if (txPacket && txDescCache.packetAvailable() 2126 && !txDescCache.packetMultiDesc() && txPacket->length) { 2127 anQ("TXS", "TX FIFO Q"); 2128 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n"); 2129#ifndef NDEBUG 2130 bool success = 2131#endif 2132 txFifo.push(txPacket); 2133 txFifoTick = true && !drainManager; 2134 assert(success); 2135 txPacket = NULL; 2136 anBegin("TXS", "Desc Writeback"); 2137 txDescCache.writeback((cacheBlockSize()-1)>>4); 2138 return; 2139 } 2140 2141 // Only support descriptor granularity 2142 if (regs.txdctl.lwthresh() && 2143 txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) { 2144 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n"); 2145 postInterrupt(IT_TXDLOW); 2146 } 2147 2148 if (!txPacket) { 2149 txPacket = std::make_shared<EthPacketData>(16384); 2150 } 2151 2152 if (!txDescCache.packetWaiting()) { 2153 if (txDescCache.descLeft() == 0) { 2154 postInterrupt(IT_TXQE); 2155 anBegin("TXS", "Desc Writeback"); 2156 txDescCache.writeback(0); 2157 anBegin("TXS", "Desc Fetch"); 2158 anWe("TXS", txDescCache.annUnusedCacheQ); 2159 txDescCache.fetchDescriptors(); 2160 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing " 2161 "writeback stopping ticking and posting TXQE\n"); 2162 txTick = false; 2163 return; 2164 } 2165 2166 2167 if (!(txDescCache.descUnused())) { 2168 anBegin("TXS", "Desc Fetch"); 2169 txDescCache.fetchDescriptors(); 2170 anWe("TXS", txDescCache.annUnusedCacheQ); 2171 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, " 2172 "fetching and stopping ticking\n"); 2173 txTick = false; 2174 return; 2175 } 2176 anPq("TXS", txDescCache.annUnusedCacheQ); 2177 2178 2179 txDescCache.processContextDesc(); 2180 if (txDescCache.packetWaiting()) { 2181 DPRINTF(EthernetSM, 2182 "TXS: Fetching TSO header, stopping ticking\n"); 2183 txTick = false; 2184 return; 2185 } 2186 2187 unsigned size = txDescCache.getPacketSize(txPacket); 2188 if (size > 0 && txFifo.avail() > size) { 2189 anRq("TXS", "TX FIFO Q"); 2190 anBegin("TXS", "DMA Packet"); 2191 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and " 2192 "beginning DMA of next packet\n", size); 2193 txFifo.reserve(size); 2194 txDescCache.getPacketData(txPacket); 2195 } else if (size == 0) { 2196 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size); 2197 DPRINTF(EthernetSM, 2198 "TXS: No packets to get, writing back used descriptors\n"); 2199 anBegin("TXS", "Desc Writeback"); 2200 txDescCache.writeback(0); 2201 } else { 2202 anWf("TXS", "TX FIFO Q"); 2203 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space " 2204 "available in FIFO\n"); 2205 txTick = false; 2206 } 2207 2208 2209 return; 2210 } 2211 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n"); 2212 txTick = false; 2213} 2214 2215bool 2216IGbE::ethRxPkt(EthPacketPtr pkt) 2217{ 2218 rxBytes += pkt->length; 2219 rxPackets++; 2220 2221 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n"); 2222 anBegin("RXQ", "Wire Recv"); 2223 2224 2225 if (!regs.rctl.en()) { 2226 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n"); 2227 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD); 2228 return true; 2229 } 2230 2231 // restart the state machines if they are stopped 2232 rxTick = true && !drainManager; 2233 if ((rxTick || txTick) && !tickEvent.scheduled()) { 2234 DPRINTF(EthernetSM, 2235 "RXS: received packet into fifo, starting ticking\n"); 2236 restartClock(); 2237 } 2238 2239 if (!rxFifo.push(pkt)) { 2240 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n"); 2241 postInterrupt(IT_RXO, true); 2242 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD); 2243 return false; 2244 } 2245 2246 if (CPA::available() && cpa->enabled()) { 2247 assert(sys->numSystemsRunning <= 2); 2248 System *other_sys; 2249 if (sys->systemList[0] == sys) 2250 other_sys = sys->systemList[1]; 2251 else 2252 other_sys = sys->systemList[0]; 2253 2254 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys); 2255 anQ("RXQ", "RX FIFO Q"); 2256 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys); 2257 } 2258 2259 return true; 2260} 2261 2262 2263void 2264IGbE::rxStateMachine() 2265{ 2266 if (!regs.rctl.en()) { 2267 rxTick = false; 2268 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n"); 2269 return; 2270 } 2271 2272 // If the packet is done check for interrupts/descriptors/etc 2273 if (rxDescCache.packetDone()) { 2274 rxDmaPacket = false; 2275 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n"); 2276 int descLeft = rxDescCache.descLeft(); 2277 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n", 2278 descLeft, regs.rctl.rdmts(), regs.rdlen()); 2279 switch (regs.rctl.rdmts()) { 2280 case 2: if (descLeft > .125 * regs.rdlen()) break; 2281 case 1: if (descLeft > .250 * regs.rdlen()) break; 2282 case 0: if (descLeft > .500 * regs.rdlen()) break; 2283 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) " 2284 "because of descriptors left\n"); 2285 postInterrupt(IT_RXDMT); 2286 break; 2287 } 2288 2289 if (rxFifo.empty()) 2290 rxDescCache.writeback(0); 2291 2292 if (descLeft == 0) { 2293 anBegin("RXS", "Writeback Descriptors"); 2294 rxDescCache.writeback(0); 2295 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing" 2296 " writeback and stopping ticking\n"); 2297 rxTick = false; 2298 } 2299 2300 // only support descriptor granulaties 2301 assert(regs.rxdctl.gran()); 2302 2303 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) { 2304 DPRINTF(EthernetSM, 2305 "RXS: Writing back because WTHRESH >= descUsed\n"); 2306 anBegin("RXS", "Writeback Descriptors"); 2307 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4)) 2308 rxDescCache.writeback(regs.rxdctl.wthresh()-1); 2309 else 2310 rxDescCache.writeback((cacheBlockSize()-1)>>4); 2311 } 2312 2313 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) && 2314 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > 2315 regs.rxdctl.hthresh())) { 2316 DPRINTF(EthernetSM, "RXS: Fetching descriptors because " 2317 "descUnused < PTHRESH\n"); 2318 anBegin("RXS", "Fetch Descriptors"); 2319 rxDescCache.fetchDescriptors(); 2320 } 2321 2322 if (rxDescCache.descUnused() == 0) { 2323 anBegin("RXS", "Fetch Descriptors"); 2324 rxDescCache.fetchDescriptors(); 2325 anWe("RXS", rxDescCache.annUnusedCacheQ); 2326 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, " 2327 "fetching descriptors and stopping ticking\n"); 2328 rxTick = false; 2329 } 2330 return; 2331 } 2332 2333 if (rxDmaPacket) { 2334 DPRINTF(EthernetSM, 2335 "RXS: stopping ticking until packet DMA completes\n"); 2336 rxTick = false; 2337 return; 2338 } 2339 2340 if (!rxDescCache.descUnused()) { 2341 anBegin("RXS", "Fetch Descriptors"); 2342 rxDescCache.fetchDescriptors(); 2343 anWe("RXS", rxDescCache.annUnusedCacheQ); 2344 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, " 2345 "stopping ticking\n"); 2346 rxTick = false; 2347 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n"); 2348 return; 2349 } 2350 anPq("RXS", rxDescCache.annUnusedCacheQ); 2351 2352 if (rxFifo.empty()) { 2353 anWe("RXS", "RX FIFO Q"); 2354 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n"); 2355 rxTick = false; 2356 return; 2357 } 2358 anPq("RXS", "RX FIFO Q"); 2359 anBegin("RXS", "Get Desc"); 2360 2361 EthPacketPtr pkt; 2362 pkt = rxFifo.front(); 2363 2364 2365 pktOffset = rxDescCache.writePacket(pkt, pktOffset); 2366 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n"); 2367 if (pktOffset == pkt->length) { 2368 anBegin( "RXS", "FIFO Dequeue"); 2369 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n"); 2370 pktOffset = 0; 2371 anDq("RXS", "RX FIFO Q"); 2372 rxFifo.pop(); 2373 } 2374 2375 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 2376 rxTick = false; 2377 rxDmaPacket = true; 2378 anBegin("RXS", "DMA Packet"); 2379} 2380 2381void 2382IGbE::txWire() 2383{ 2384 if (txFifo.empty()) { 2385 anWe("TXQ", "TX FIFO Q"); 2386 txFifoTick = false; 2387 return; 2388 } 2389 2390 2391 anPq("TXQ", "TX FIFO Q"); 2392 if (etherInt->sendPacket(txFifo.front())) { 2393 anQ("TXQ", "WireQ"); 2394 if (DTRACE(EthernetSM)) { 2395 IpPtr ip(txFifo.front()); 2396 if (ip) 2397 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n", 2398 ip->id()); 2399 else 2400 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n"); 2401 } 2402 anDq("TXQ", "TX FIFO Q"); 2403 anBegin("TXQ", "Wire Send"); 2404 DPRINTF(EthernetSM, 2405 "TxFIFO: Successful transmit, bytes available in fifo: %d\n", 2406 txFifo.avail()); 2407 2408 txBytes += txFifo.front()->length; 2409 txPackets++; 2410 txFifoTick = false; 2411 2412 txFifo.pop(); 2413 } else { 2414 // We'll get woken up when the packet ethTxDone() gets called 2415 txFifoTick = false; 2416 } 2417} 2418 2419void 2420IGbE::tick() 2421{ 2422 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n"); 2423 2424 if (rxTick) 2425 rxStateMachine(); 2426 2427 if (txTick) 2428 txStateMachine(); 2429 2430 if (txFifoTick) 2431 txWire(); 2432 2433 2434 if (rxTick || txTick || txFifoTick) 2435 schedule(tickEvent, curTick() + clockPeriod()); 2436} 2437 2438void 2439IGbE::ethTxDone() 2440{ 2441 anBegin("TXQ", "Send Done"); 2442 // restart the tx state machines if they are stopped 2443 // fifo to send another packet 2444 // tx sm to put more data into the fifo 2445 txFifoTick = true && !drainManager; 2446 if (txDescCache.descLeft() != 0 && !drainManager) 2447 txTick = true; 2448 2449 restartClock(); 2450 txWire(); 2451 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n"); 2452} 2453 2454void 2455IGbE::serialize(std::ostream &os) 2456{ 2457 PciDevice::serialize(os); 2458 2459 regs.serialize(os); 2460 SERIALIZE_SCALAR(eeOpBits); 2461 SERIALIZE_SCALAR(eeAddrBits); 2462 SERIALIZE_SCALAR(eeDataBits); 2463 SERIALIZE_SCALAR(eeOpcode); 2464 SERIALIZE_SCALAR(eeAddr); 2465 SERIALIZE_SCALAR(lastInterrupt); 2466 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 2467 2468 rxFifo.serialize("rxfifo", os); 2469 txFifo.serialize("txfifo", os); 2470 2471 bool txPktExists = txPacket != nullptr; 2472 SERIALIZE_SCALAR(txPktExists); 2473 if (txPktExists) 2474 txPacket->serialize("txpacket", os); 2475 2476 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0, 2477 inter_time = 0; 2478 2479 if (rdtrEvent.scheduled()) 2480 rdtr_time = rdtrEvent.when(); 2481 SERIALIZE_SCALAR(rdtr_time); 2482 2483 if (radvEvent.scheduled()) 2484 radv_time = radvEvent.when(); 2485 SERIALIZE_SCALAR(radv_time); 2486 2487 if (tidvEvent.scheduled()) 2488 tidv_time = tidvEvent.when(); 2489 SERIALIZE_SCALAR(tidv_time); 2490 2491 if (tadvEvent.scheduled()) 2492 tadv_time = tadvEvent.when(); 2493 SERIALIZE_SCALAR(tadv_time); 2494 2495 if (interEvent.scheduled()) 2496 inter_time = interEvent.when(); 2497 SERIALIZE_SCALAR(inter_time); 2498 2499 SERIALIZE_SCALAR(pktOffset); 2500 2501 nameOut(os, csprintf("%s.TxDescCache", name())); 2502 txDescCache.serialize(os); 2503 2504 nameOut(os, csprintf("%s.RxDescCache", name())); 2505 rxDescCache.serialize(os); 2506} 2507 2508void 2509IGbE::unserialize(Checkpoint *cp, const std::string §ion) 2510{ 2511 PciDevice::unserialize(cp, section); 2512 2513 regs.unserialize(cp, section); 2514 UNSERIALIZE_SCALAR(eeOpBits); 2515 UNSERIALIZE_SCALAR(eeAddrBits); 2516 UNSERIALIZE_SCALAR(eeDataBits); 2517 UNSERIALIZE_SCALAR(eeOpcode); 2518 UNSERIALIZE_SCALAR(eeAddr); 2519 UNSERIALIZE_SCALAR(lastInterrupt); 2520 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 2521 2522 rxFifo.unserialize("rxfifo", cp, section); 2523 txFifo.unserialize("txfifo", cp, section); 2524 2525 bool txPktExists; 2526 UNSERIALIZE_SCALAR(txPktExists); 2527 if (txPktExists) { 2528 txPacket = std::make_shared<EthPacketData>(16384); 2529 txPacket->unserialize("txpacket", cp, section); 2530 } 2531 2532 rxTick = true; 2533 txTick = true; 2534 txFifoTick = true; 2535 2536 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time; 2537 UNSERIALIZE_SCALAR(rdtr_time); 2538 UNSERIALIZE_SCALAR(radv_time); 2539 UNSERIALIZE_SCALAR(tidv_time); 2540 UNSERIALIZE_SCALAR(tadv_time); 2541 UNSERIALIZE_SCALAR(inter_time); 2542 2543 if (rdtr_time) 2544 schedule(rdtrEvent, rdtr_time); 2545 2546 if (radv_time) 2547 schedule(radvEvent, radv_time); 2548 2549 if (tidv_time) 2550 schedule(tidvEvent, tidv_time); 2551 2552 if (tadv_time) 2553 schedule(tadvEvent, tadv_time); 2554 2555 if (inter_time) 2556 schedule(interEvent, inter_time); 2557 2558 UNSERIALIZE_SCALAR(pktOffset); 2559 2560 txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section)); 2561 2562 rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section)); 2563} 2564 2565IGbE * 2566IGbEParams::create() 2567{ 2568 return new IGbE(this); 2569} 2570