ns_gige.cc revision 1561
16657Snate@binkert.org/* 26657Snate@binkert.org * Copyright (c) 2004 The Regents of The University of Michigan 310972Sdavid.hashe@amd.com * All rights reserved. 46657Snate@binkert.org * 56657Snate@binkert.org * Redistribution and use in source and binary forms, with or without 66657Snate@binkert.org * modification, are permitted provided that the following conditions are 76657Snate@binkert.org * met: redistributions of source code must retain the above copyright 86657Snate@binkert.org * notice, this list of conditions and the following disclaimer; 96657Snate@binkert.org * redistributions in binary form must reproduce the above copyright 106657Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 116657Snate@binkert.org * documentation and/or other materials provided with the distribution; 126657Snate@binkert.org * neither the name of the copyright holders nor the names of its 136657Snate@binkert.org * contributors may be used to endorse or promote products derived from 146657Snate@binkert.org * this software without specific prior written permission. 156657Snate@binkert.org * 166657Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 176657Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 186657Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 196657Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 206657Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 216657Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 226657Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236657Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246657Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256657Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 266657Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276657Snate@binkert.org */ 286657Snate@binkert.org 296999Snate@binkert.org/* @file 306657Snate@binkert.org * Device module for modelling the National Semiconductor 316657Snate@binkert.org * DP83820 ethernet controller. Does not support priority queueing 326657Snate@binkert.org */ 336657Snate@binkert.org#include <cstdio> 348189SLisa.Hsu@amd.com#include <deque> 356657Snate@binkert.org#include <string> 369499Snilay@cs.wisc.edu 379499Snilay@cs.wisc.edu#include "base/inet.hh" 3811308Santhony.gutierrez@amd.com#include "cpu/exec_context.hh" 399364Snilay@cs.wisc.edu#include "dev/dma.hh" 407055Snate@binkert.org#include "dev/etherlink.hh" 416882SBrad.Beckmann@amd.com#include "dev/ns_gige.hh" 426882SBrad.Beckmann@amd.com#include "dev/pciconfigall.hh" 438191SLisa.Hsu@amd.com#include "mem/bus/bus.hh" 446882SBrad.Beckmann@amd.com#include "mem/bus/dma_interface.hh" 4511308Santhony.gutierrez@amd.com#include "mem/bus/pio_interface.hh" 4611308Santhony.gutierrez@amd.com#include "mem/bus/pio_interface_impl.hh" 476882SBrad.Beckmann@amd.com#include "mem/functional_mem/memory_control.hh" 4811308Santhony.gutierrez@amd.com#include "mem/functional_mem/physical_memory.hh" 499102SNuwan.Jayasena@amd.com#include "sim/builder.hh" 5011084Snilay@cs.wisc.edu#include "sim/debug.hh" 519366Snilay@cs.wisc.edu#include "sim/host.hh" 529499Snilay@cs.wisc.edu#include "sim/stats.hh" 539499Snilay@cs.wisc.edu#include "targetarch/vtophys.hh" 549499Snilay@cs.wisc.edu 556882SBrad.Beckmann@amd.comconst char *NsRxStateStrings[] = 566657Snate@binkert.org{ 576657Snate@binkert.org "rxIdle", 586657Snate@binkert.org "rxDescRefr", 596657Snate@binkert.org "rxDescRead", 6010311Snilay@cs.wisc.edu "rxFifoBlock", 6110311Snilay@cs.wisc.edu "rxFragWrite", 6210311Snilay@cs.wisc.edu "rxDescWrite", 6310311Snilay@cs.wisc.edu "rxAdvance" 646657Snate@binkert.org}; 6510311Snilay@cs.wisc.edu 669366Snilay@cs.wisc.educonst char *NsTxStateStrings[] = 677839Snilay@cs.wisc.edu{ 686657Snate@binkert.org "txIdle", 696882SBrad.Beckmann@amd.com "txDescRefr", 7010308Snilay@cs.wisc.edu "txDescRead", 7110308Snilay@cs.wisc.edu "txFifoBlock", 726882SBrad.Beckmann@amd.com "txFragRead", 7310308Snilay@cs.wisc.edu "txDescWrite", 7410308Snilay@cs.wisc.edu "txAdvance" 7510308Snilay@cs.wisc.edu}; 7610308Snilay@cs.wisc.edu 7710308Snilay@cs.wisc.educonst char *NsDmaState[] = 789366Snilay@cs.wisc.edu{ 799366Snilay@cs.wisc.edu "dmaIdle", 806657Snate@binkert.org "dmaReading", 816657Snate@binkert.org "dmaWriting", 826657Snate@binkert.org "dmaReadWaiting", 836657Snate@binkert.org "dmaWriteWaiting" 849104Shestness@cs.utexas.edu}; 856657Snate@binkert.org 866657Snate@binkert.orgusing namespace std; 876657Snate@binkert.orgusing namespace Net; 8810311Snilay@cs.wisc.edu 8910311Snilay@cs.wisc.edu/////////////////////////////////////////////////////////////////////// 9010311Snilay@cs.wisc.edu// 9110311Snilay@cs.wisc.edu// NSGigE PCI Device 926657Snate@binkert.org// 937839Snilay@cs.wisc.eduNSGigE::NSGigE(Params *p) 947839Snilay@cs.wisc.edu : PciDev(p), ioEnable(false), 9510972Sdavid.hashe@amd.com txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size), 9610972Sdavid.hashe@amd.com txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL), 9710972Sdavid.hashe@amd.com txXferLen(0), rxXferLen(0), txState(txIdle), txEnable(false), 986657Snate@binkert.org CTDD(false), 996657Snate@binkert.org txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle), 1006657Snate@binkert.org rxEnable(false), CRDD(false), rxPktBytes(0), 1016657Snate@binkert.org rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false), 1026657Snate@binkert.org rxDmaReadEvent(this), rxDmaWriteEvent(this), 1036657Snate@binkert.org txDmaReadEvent(this), txDmaWriteEvent(this), 1046657Snate@binkert.org dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free), 1056657Snate@binkert.org txDelay(p->tx_delay), rxDelay(p->rx_delay), 1066657Snate@binkert.org rxKickTick(0), txKickTick(0), 1076657Snate@binkert.org txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false), 1086657Snate@binkert.org acceptMulticast(false), acceptUnicast(false), 1096657Snate@binkert.org acceptPerfect(false), acceptArp(false), 1106657Snate@binkert.org physmem(p->pmem), intrTick(0), cpuPendingIntr(false), 1116657Snate@binkert.org intrEvent(0), interface(0) 1126657Snate@binkert.org{ 1136657Snate@binkert.org if (p->header_bus) { 1146657Snate@binkert.org pioInterface = newPioInterface(name(), p->hier, 1156657Snate@binkert.org p->header_bus, this, 1166779SBrad.Beckmann@amd.com &NSGigE::cacheAccess); 1176657Snate@binkert.org 1186657Snate@binkert.org pioLatency = p->pio_latency * p->header_bus->clockRatio; 1196657Snate@binkert.org 1206657Snate@binkert.org if (p->payload_bus) 1216657Snate@binkert.org dmaInterface = new DMAInterface<Bus>(name() + ".dma", 1226657Snate@binkert.org p->header_bus, 1236657Snate@binkert.org p->payload_bus, 1); 1246657Snate@binkert.org else 1256657Snate@binkert.org dmaInterface = new DMAInterface<Bus>(name() + ".dma", 12610972Sdavid.hashe@amd.com p->header_bus, 12710972Sdavid.hashe@amd.com p->header_bus, 1); 12810972Sdavid.hashe@amd.com } else if (p->payload_bus) { 1299104Shestness@cs.utexas.edu pioInterface = newPioInterface(name(), p->hier, 1309104Shestness@cs.utexas.edu p->payload_bus, this, 1319104Shestness@cs.utexas.edu &NSGigE::cacheAccess); 1329104Shestness@cs.utexas.edu 1336657Snate@binkert.org pioLatency = p->pio_latency * p->payload_bus->clockRatio; 1346657Snate@binkert.org 1356657Snate@binkert.org dmaInterface = new DMAInterface<Bus>(name() + ".dma", 1366657Snate@binkert.org p->payload_bus, 1376657Snate@binkert.org p->payload_bus, 1); 1386657Snate@binkert.org } 1396657Snate@binkert.org 1406657Snate@binkert.org 1416657Snate@binkert.org intrDelay = US2Ticks(p->intr_delay); 1426657Snate@binkert.org dmaReadDelay = p->dma_read_delay; 1436657Snate@binkert.org dmaWriteDelay = p->dma_write_delay; 1446657Snate@binkert.org dmaReadFactor = p->dma_read_factor; 1456657Snate@binkert.org dmaWriteFactor = p->dma_write_factor; 14610307Snilay@cs.wisc.edu 1476657Snate@binkert.org regsReset(); 1486657Snate@binkert.org memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN); 1497839Snilay@cs.wisc.edu} 1507839Snilay@cs.wisc.edu 1517839Snilay@cs.wisc.eduNSGigE::~NSGigE() 1527839Snilay@cs.wisc.edu{} 1537839Snilay@cs.wisc.edu 1547839Snilay@cs.wisc.eduvoid 1557839Snilay@cs.wisc.eduNSGigE::regStats() 1567839Snilay@cs.wisc.edu{ 1577839Snilay@cs.wisc.edu txBytes 1587839Snilay@cs.wisc.edu .name(name() + ".txBytes") 15910968Sdavid.hashe@amd.com .desc("Bytes Transmitted") 16010968Sdavid.hashe@amd.com .prereq(txBytes) 16110968Sdavid.hashe@amd.com ; 16210968Sdavid.hashe@amd.com 16310968Sdavid.hashe@amd.com rxBytes 16410968Sdavid.hashe@amd.com .name(name() + ".rxBytes") 16510968Sdavid.hashe@amd.com .desc("Bytes Received") 1667839Snilay@cs.wisc.edu .prereq(rxBytes) 1676657Snate@binkert.org ; 1686657Snate@binkert.org 1696657Snate@binkert.org txPackets 1706657Snate@binkert.org .name(name() + ".txPackets") 1716657Snate@binkert.org .desc("Number of Packets Transmitted") 1726657Snate@binkert.org .prereq(txBytes) 1736657Snate@binkert.org ; 1746657Snate@binkert.org 1756657Snate@binkert.org rxPackets 1766657Snate@binkert.org .name(name() + ".rxPackets") 1776657Snate@binkert.org .desc("Number of Packets Received") 1786657Snate@binkert.org .prereq(rxBytes) 1796657Snate@binkert.org ; 1806657Snate@binkert.org 1816657Snate@binkert.org txIpChecksums 1826657Snate@binkert.org .name(name() + ".txIpChecksums") 1836657Snate@binkert.org .desc("Number of tx IP Checksums done by device") 1846657Snate@binkert.org .precision(0) 1856657Snate@binkert.org .prereq(txBytes) 1866657Snate@binkert.org ; 1876657Snate@binkert.org 1886657Snate@binkert.org rxIpChecksums 1896657Snate@binkert.org .name(name() + ".rxIpChecksums") 1906657Snate@binkert.org .desc("Number of rx IP Checksums done by device") 1916657Snate@binkert.org .precision(0) 1926657Snate@binkert.org .prereq(rxBytes) 1936657Snate@binkert.org ; 1946657Snate@binkert.org 1956657Snate@binkert.org txTcpChecksums 1966657Snate@binkert.org .name(name() + ".txTcpChecksums") 19710963Sdavid.hashe@amd.com .desc("Number of tx TCP Checksums done by device") 19810963Sdavid.hashe@amd.com .precision(0) 19910963Sdavid.hashe@amd.com .prereq(txBytes) 20010963Sdavid.hashe@amd.com ; 20110963Sdavid.hashe@amd.com 20210963Sdavid.hashe@amd.com rxTcpChecksums 20311095Snilay@cs.wisc.edu .name(name() + ".rxTcpChecksums") 20410963Sdavid.hashe@amd.com .desc("Number of rx TCP Checksums done by device") 20510963Sdavid.hashe@amd.com .precision(0) 20610963Sdavid.hashe@amd.com .prereq(rxBytes) 20710963Sdavid.hashe@amd.com ; 20810963Sdavid.hashe@amd.com 20910963Sdavid.hashe@amd.com txUdpChecksums 21010963Sdavid.hashe@amd.com .name(name() + ".txUdpChecksums") 21110963Sdavid.hashe@amd.com .desc("Number of tx UDP Checksums done by device") 2129219Spower.jg@gmail.com .precision(0) 2136877Ssteve.reinhardt@amd.com .prereq(txBytes) 2146657Snate@binkert.org ; 2159219Spower.jg@gmail.com 2166657Snate@binkert.org rxUdpChecksums 2179219Spower.jg@gmail.com .name(name() + ".rxUdpChecksums") 2186657Snate@binkert.org .desc("Number of rx UDP Checksums done by device") 2196877Ssteve.reinhardt@amd.com .precision(0) 2206999Snate@binkert.org .prereq(rxBytes) 2216877Ssteve.reinhardt@amd.com ; 22210308Snilay@cs.wisc.edu 2236877Ssteve.reinhardt@amd.com descDmaReads 2246877Ssteve.reinhardt@amd.com .name(name() + ".descDMAReads") 22510308Snilay@cs.wisc.edu .desc("Number of descriptors the device read w/ DMA") 2266877Ssteve.reinhardt@amd.com .precision(0) 2276877Ssteve.reinhardt@amd.com ; 2286877Ssteve.reinhardt@amd.com 2296877Ssteve.reinhardt@amd.com descDmaWrites 2306877Ssteve.reinhardt@amd.com .name(name() + ".descDMAWrites") 2316877Ssteve.reinhardt@amd.com .desc("Number of descriptors the device wrote w/ DMA") 2326877Ssteve.reinhardt@amd.com .precision(0) 2339338SAndreas.Sandberg@arm.com ; 2346877Ssteve.reinhardt@amd.com 2356877Ssteve.reinhardt@amd.com descDmaRdBytes 2366877Ssteve.reinhardt@amd.com .name(name() + ".descDmaReadBytes") 2376877Ssteve.reinhardt@amd.com .desc("number of descriptor bytes read w/ DMA") 23810308Snilay@cs.wisc.edu .precision(0) 23910308Snilay@cs.wisc.edu ; 24010308Snilay@cs.wisc.edu 24110308Snilay@cs.wisc.edu descDmaWrBytes 24211084Snilay@cs.wisc.edu .name(name() + ".descDmaWriteBytes") 2436882SBrad.Beckmann@amd.com .desc("number of descriptor bytes write w/ DMA") 24410308Snilay@cs.wisc.edu .precision(0) 24510308Snilay@cs.wisc.edu ; 2466882SBrad.Beckmann@amd.com 2476882SBrad.Beckmann@amd.com txBandwidth 2486882SBrad.Beckmann@amd.com .name(name() + ".txBandwidth") 2496882SBrad.Beckmann@amd.com .desc("Transmit Bandwidth (bits/s)") 25011021Sjthestness@gmail.com .precision(0) 2516877Ssteve.reinhardt@amd.com .prereq(txBytes) 2526877Ssteve.reinhardt@amd.com ; 25310917Sbrandon.potter@amd.com 2546877Ssteve.reinhardt@amd.com rxBandwidth 2556657Snate@binkert.org .name(name() + ".rxBandwidth") 2566657Snate@binkert.org .desc("Receive Bandwidth (bits/s)") 2576999Snate@binkert.org .precision(0) 2586657Snate@binkert.org .prereq(rxBytes) 2596657Snate@binkert.org ; 2606657Snate@binkert.org 2616657Snate@binkert.org totBandwidth 2627007Snate@binkert.org .name(name() + ".totBandwidth") 2636657Snate@binkert.org .desc("Total Bandwidth (bits/s)") 2646657Snate@binkert.org .precision(0) 2656657Snate@binkert.org .prereq(totBytes) 2666657Snate@binkert.org ; 2676657Snate@binkert.org 2687007Snate@binkert.org totPackets 2697007Snate@binkert.org .name(name() + ".totPackets") 2706657Snate@binkert.org .desc("Total Packets") 2717002Snate@binkert.org .precision(0) 2727002Snate@binkert.org .prereq(totBytes) 2737002Snate@binkert.org ; 2747002Snate@binkert.org 2756657Snate@binkert.org totBytes 2766657Snate@binkert.org .name(name() + ".totBytes") 2778229Snate@binkert.org .desc("Total Bytes") 2788229Snate@binkert.org .precision(0) 2798229Snate@binkert.org .prereq(totBytes) 28010972Sdavid.hashe@amd.com ; 2816657Snate@binkert.org 2826657Snate@binkert.org totPacketRate 2836657Snate@binkert.org .name(name() + ".totPPS") 2846657Snate@binkert.org .desc("Total Tranmission Rate (packets/s)") 2856793SBrad.Beckmann@amd.com .precision(0) 2866657Snate@binkert.org .prereq(totBytes) 28710311Snilay@cs.wisc.edu ; 2886657Snate@binkert.org 2896657Snate@binkert.org txPacketRate 2906657Snate@binkert.org .name(name() + ".txPPS") 2917002Snate@binkert.org .desc("Packet Tranmission Rate (packets/s)") 2926657Snate@binkert.org .precision(0) 2937007Snate@binkert.org .prereq(txBytes) 2947007Snate@binkert.org ; 2959271Snilay@cs.wisc.edu 2966877Ssteve.reinhardt@amd.com rxPacketRate 2976877Ssteve.reinhardt@amd.com .name(name() + ".rxPPS") 2986657Snate@binkert.org .desc("Packet Reception Rate (packets/s)") 2996877Ssteve.reinhardt@amd.com .precision(0) 30010311Snilay@cs.wisc.edu .prereq(rxBytes) 30111084Snilay@cs.wisc.edu ; 30211084Snilay@cs.wisc.edu 30311021Sjthestness@gmail.com postedSwi 3049745Snilay@cs.wisc.edu .name(name() + ".postedSwi") 3057002Snate@binkert.org .desc("number of software interrupts posted to CPU") 3066657Snate@binkert.org .precision(0) 30710012Snilay@cs.wisc.edu ; 3089745Snilay@cs.wisc.edu 3099745Snilay@cs.wisc.edu totalSwi 3109745Snilay@cs.wisc.edu .name(name() + ".totalSwi") 3118683Snilay@cs.wisc.edu .desc("number of total Swi written to ISR") 31211308Santhony.gutierrez@amd.com .precision(0) 31311309Sdavid.hashe@amd.com ; 3147007Snate@binkert.org 31510524Snilay@cs.wisc.edu coalescedSwi 3169302Snilay@cs.wisc.edu .name(name() + ".coalescedSwi") 3179745Snilay@cs.wisc.edu .desc("average number of Swi's coalesced into each post") 3189745Snilay@cs.wisc.edu .precision(0) 31911061Snilay@cs.wisc.edu ; 3209745Snilay@cs.wisc.edu 32111061Snilay@cs.wisc.edu postedRxIdle 3229745Snilay@cs.wisc.edu .name(name() + ".postedRxIdle") 3236657Snate@binkert.org .desc("number of rxIdle interrupts posted to CPU") 3246657Snate@binkert.org .precision(0) 3256657Snate@binkert.org ; 3266657Snate@binkert.org 3276657Snate@binkert.org totalRxIdle 3286657Snate@binkert.org .name(name() + ".totalRxIdle") 3296882SBrad.Beckmann@amd.com .desc("number of total RxIdle written to ISR") 3306882SBrad.Beckmann@amd.com .precision(0) 3316882SBrad.Beckmann@amd.com ; 3326882SBrad.Beckmann@amd.com 3336657Snate@binkert.org coalescedRxIdle 3346657Snate@binkert.org .name(name() + ".coalescedRxIdle") 3357007Snate@binkert.org .desc("average number of RxIdle's coalesced into each post") 3367839Snilay@cs.wisc.edu .precision(0) 3377839Snilay@cs.wisc.edu ; 3387839Snilay@cs.wisc.edu 3397839Snilay@cs.wisc.edu postedRxOk 3407839Snilay@cs.wisc.edu .name(name() + ".postedRxOk") 3417839Snilay@cs.wisc.edu .desc("number of RxOk interrupts posted to CPU") 3427839Snilay@cs.wisc.edu .precision(0) 3437839Snilay@cs.wisc.edu ; 3447839Snilay@cs.wisc.edu 3457839Snilay@cs.wisc.edu totalRxOk 3467839Snilay@cs.wisc.edu .name(name() + ".totalRxOk") 3477839Snilay@cs.wisc.edu .desc("number of total RxOk written to ISR") 34811025Snilay@cs.wisc.edu .precision(0) 3497007Snate@binkert.org ; 3507007Snate@binkert.org 3517007Snate@binkert.org coalescedRxOk 3527007Snate@binkert.org .name(name() + ".coalescedRxOk") 3537839Snilay@cs.wisc.edu .desc("average number of RxOk's coalesced into each post") 3547839Snilay@cs.wisc.edu .precision(0) 3557839Snilay@cs.wisc.edu ; 3567839Snilay@cs.wisc.edu 3577839Snilay@cs.wisc.edu postedRxDesc 3587839Snilay@cs.wisc.edu .name(name() + ".postedRxDesc") 3597839Snilay@cs.wisc.edu .desc("number of RxDesc interrupts posted to CPU") 3607839Snilay@cs.wisc.edu .precision(0) 3617839Snilay@cs.wisc.edu ; 3627839Snilay@cs.wisc.edu 3637839Snilay@cs.wisc.edu totalRxDesc 3647839Snilay@cs.wisc.edu .name(name() + ".totalRxDesc") 36511025Snilay@cs.wisc.edu .desc("number of total RxDesc written to ISR") 3667007Snate@binkert.org .precision(0) 3679745Snilay@cs.wisc.edu ; 3689745Snilay@cs.wisc.edu 3699745Snilay@cs.wisc.edu coalescedRxDesc 3709745Snilay@cs.wisc.edu .name(name() + ".coalescedRxDesc") 3719745Snilay@cs.wisc.edu .desc("average number of RxDesc's coalesced into each post") 3729745Snilay@cs.wisc.edu .precision(0) 3736657Snate@binkert.org ; 3747007Snate@binkert.org 3756657Snate@binkert.org postedTxOk 3766657Snate@binkert.org .name(name() + ".postedTxOk") 3776657Snate@binkert.org .desc("number of TxOk interrupts posted to CPU") 3786657Snate@binkert.org .precision(0) 3796657Snate@binkert.org ; 3806657Snate@binkert.org 3816657Snate@binkert.org totalTxOk 3826657Snate@binkert.org .name(name() + ".totalTxOk") 3837839Snilay@cs.wisc.edu .desc("number of total TxOk written to ISR") 3847839Snilay@cs.wisc.edu .precision(0) 3857839Snilay@cs.wisc.edu ; 3867839Snilay@cs.wisc.edu 3877839Snilay@cs.wisc.edu coalescedTxOk 3887839Snilay@cs.wisc.edu .name(name() + ".coalescedTxOk") 3897839Snilay@cs.wisc.edu .desc("average number of TxOk's coalesced into each post") 3907839Snilay@cs.wisc.edu .precision(0) 3917839Snilay@cs.wisc.edu ; 3927839Snilay@cs.wisc.edu 3937839Snilay@cs.wisc.edu postedTxIdle 3947839Snilay@cs.wisc.edu .name(name() + ".postedTxIdle") 3957839Snilay@cs.wisc.edu .desc("number of TxIdle interrupts posted to CPU") 3967839Snilay@cs.wisc.edu .precision(0) 3977839Snilay@cs.wisc.edu ; 3987839Snilay@cs.wisc.edu 39910121Snilay@cs.wisc.edu totalTxIdle 4006657Snate@binkert.org .name(name() + ".totalTxIdle") 4016657Snate@binkert.org .desc("number of total TxIdle written to ISR") 4026657Snate@binkert.org .precision(0) 4036657Snate@binkert.org ; 4047839Snilay@cs.wisc.edu 4057839Snilay@cs.wisc.edu coalescedTxIdle 4067839Snilay@cs.wisc.edu .name(name() + ".coalescedTxIdle") 40710121Snilay@cs.wisc.edu .desc("average number of TxIdle's coalesced into each post") 40810121Snilay@cs.wisc.edu .precision(0) 40911025Snilay@cs.wisc.edu ; 4107839Snilay@cs.wisc.edu 4117839Snilay@cs.wisc.edu postedTxDesc 4127839Snilay@cs.wisc.edu .name(name() + ".postedTxDesc") 41310121Snilay@cs.wisc.edu .desc("number of TxDesc interrupts posted to CPU") 41411025Snilay@cs.wisc.edu .precision(0) 4157839Snilay@cs.wisc.edu ; 4167839Snilay@cs.wisc.edu 4177839Snilay@cs.wisc.edu totalTxDesc 41810121Snilay@cs.wisc.edu .name(name() + ".totalTxDesc") 41911025Snilay@cs.wisc.edu .desc("number of total TxDesc written to ISR") 4207839Snilay@cs.wisc.edu .precision(0) 4217839Snilay@cs.wisc.edu ; 4227839Snilay@cs.wisc.edu 42311025Snilay@cs.wisc.edu coalescedTxDesc 4246657Snate@binkert.org .name(name() + ".coalescedTxDesc") 4256657Snate@binkert.org .desc("average number of TxDesc's coalesced into each post") 4266657Snate@binkert.org .precision(0) 4276657Snate@binkert.org ; 4287007Snate@binkert.org 4296657Snate@binkert.org postedRxOrn 4306657Snate@binkert.org .name(name() + ".postedRxOrn") 4319273Snilay@cs.wisc.edu .desc("number of RxOrn posted to CPU") 43210305Snilay@cs.wisc.edu .precision(0) 4336657Snate@binkert.org ; 4346657Snate@binkert.org 4356657Snate@binkert.org totalRxOrn 4367007Snate@binkert.org .name(name() + ".totalRxOrn") 4376657Snate@binkert.org .desc("number of total RxOrn written to ISR") 4386657Snate@binkert.org .precision(0) 4399219Spower.jg@gmail.com ; 4406657Snate@binkert.org 4416657Snate@binkert.org coalescedRxOrn 4426999Snate@binkert.org .name(name() + ".coalescedRxOrn") 4436657Snate@binkert.org .desc("average number of RxOrn's coalesced into each post") 4446657Snate@binkert.org .precision(0) 4456657Snate@binkert.org ; 4466657Snate@binkert.org 4477007Snate@binkert.org coalescedTotal 4486657Snate@binkert.org .name(name() + ".coalescedTotal") 4496657Snate@binkert.org .desc("average number of interrupts coalesced into each post") 4506657Snate@binkert.org .precision(0) 4516657Snate@binkert.org ; 4526657Snate@binkert.org 4538946Sandreas.hansson@arm.com postedInterrupts 4548946Sandreas.hansson@arm.com .name(name() + ".postedInterrupts") 4558946Sandreas.hansson@arm.com .desc("number of posts to CPU") 4567832Snate@binkert.org .precision(0) 4577002Snate@binkert.org ; 4587002Snate@binkert.org 45910972Sdavid.hashe@amd.com droppedPackets 4607002Snate@binkert.org .name(name() + ".droppedPackets") 4618641Snate@binkert.org .desc("number of packets dropped") 46211704Santhony.gutierrez@amd.com .precision(0) 4637056Snate@binkert.org ; 46410972Sdavid.hashe@amd.com 46510972Sdavid.hashe@amd.com coalescedSwi = totalSwi / postedInterrupts; 46610972Sdavid.hashe@amd.com coalescedRxIdle = totalRxIdle / postedInterrupts; 46710972Sdavid.hashe@amd.com coalescedRxOk = totalRxOk / postedInterrupts; 46810972Sdavid.hashe@amd.com coalescedRxDesc = totalRxDesc / postedInterrupts; 4696657Snate@binkert.org coalescedTxOk = totalTxOk / postedInterrupts; 4708229Snate@binkert.org coalescedTxIdle = totalTxIdle / postedInterrupts; 4716657Snate@binkert.org coalescedTxDesc = totalTxDesc / postedInterrupts; 4726657Snate@binkert.org coalescedRxOrn = totalRxOrn / postedInterrupts; 47311793Sbrandon.potter@amd.com 47411108Sdavid.hashe@amd.com coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + totalTxOk 47510972Sdavid.hashe@amd.com + totalTxIdle + totalTxDesc + totalRxOrn) / postedInterrupts; 4769219Spower.jg@gmail.com 4779219Spower.jg@gmail.com txBandwidth = txBytes * Stats::constant(8) / simSeconds; 4789219Spower.jg@gmail.com rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 4799219Spower.jg@gmail.com totBandwidth = txBandwidth + rxBandwidth; 4809219Spower.jg@gmail.com totBytes = txBytes + rxBytes; 4817002Snate@binkert.org totPackets = txPackets + rxPackets; 4827002Snate@binkert.org 4836657Snate@binkert.org txPacketRate = txPackets / simSeconds; 4846657Snate@binkert.org rxPacketRate = rxPackets / simSeconds; 4856657Snate@binkert.org} 4866657Snate@binkert.org 4876657Snate@binkert.org/** 4886793SBrad.Beckmann@amd.com * This is to read the PCI general configuration registers 4896657Snate@binkert.org */ 4906657Snate@binkert.orgvoid 4916657Snate@binkert.orgNSGigE::ReadConfig(int offset, int size, uint8_t *data) 49210121Snilay@cs.wisc.edu{ 49310121Snilay@cs.wisc.edu if (offset < PCI_DEVICE_SPECIFIC) 4946657Snate@binkert.org PciDev::ReadConfig(offset, size, data); 4956877Ssteve.reinhardt@amd.com else 4966877Ssteve.reinhardt@amd.com panic("Device specific PCI config space not implemented!\n"); 4976877Ssteve.reinhardt@amd.com} 4986877Ssteve.reinhardt@amd.com 4996877Ssteve.reinhardt@amd.com/** 5006877Ssteve.reinhardt@amd.com * This is to write to the PCI general configuration registers 5016657Snate@binkert.org */ 5029745Snilay@cs.wisc.eduvoid 5039745Snilay@cs.wisc.eduNSGigE::WriteConfig(int offset, int size, uint32_t data) 5046657Snate@binkert.org{ 5057007Snate@binkert.org if (offset < PCI_DEVICE_SPECIFIC) 5066657Snate@binkert.org PciDev::WriteConfig(offset, size, data); 5079801Snilay@cs.wisc.edu else 5089801Snilay@cs.wisc.edu panic("Device specific PCI config space not implemented!\n"); 5096657Snate@binkert.org 5109801Snilay@cs.wisc.edu // Need to catch writes to BARs to update the PIO interface 5119801Snilay@cs.wisc.edu switch (offset) { 5129801Snilay@cs.wisc.edu // seems to work fine without all these PCI settings, but i 5137007Snate@binkert.org // put in the IO to double check, an assertion will fail if we 5146657Snate@binkert.org // need to properly implement it 5156877Ssteve.reinhardt@amd.com case PCI_COMMAND: 5166877Ssteve.reinhardt@amd.com if (config.data[offset] & PCI_CMD_IOSE) 5176657Snate@binkert.org ioEnable = true; 51810078Snilay@cs.wisc.edu else 51910078Snilay@cs.wisc.edu ioEnable = false; 52010121Snilay@cs.wisc.edu 52110121Snilay@cs.wisc.edu#if 0 52210121Snilay@cs.wisc.edu if (config.data[offset] & PCI_CMD_BME) { 5236657Snate@binkert.org bmEnabled = true; 5246657Snate@binkert.org } 5256882SBrad.Beckmann@amd.com else { 5266882SBrad.Beckmann@amd.com bmEnabled = false; 5276882SBrad.Beckmann@amd.com } 52810121Snilay@cs.wisc.edu 52910121Snilay@cs.wisc.edu if (config.data[offset] & PCI_CMD_MSE) { 5306882SBrad.Beckmann@amd.com memEnable = true; 5316877Ssteve.reinhardt@amd.com } 5326882SBrad.Beckmann@amd.com else { 53310308Snilay@cs.wisc.edu memEnable = false; 5346882SBrad.Beckmann@amd.com } 53510308Snilay@cs.wisc.edu#endif 53610311Snilay@cs.wisc.edu break; 53711308Santhony.gutierrez@amd.com 53811308Santhony.gutierrez@amd.com case PCI0_BASE_ADDR0: 53911308Santhony.gutierrez@amd.com if (BARAddrs[0] != 0) { 54011308Santhony.gutierrez@amd.com if (pioInterface) 54111308Santhony.gutierrez@amd.com pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 54211308Santhony.gutierrez@amd.com 54311308Santhony.gutierrez@amd.com BARAddrs[0] &= EV5::PAddrUncachedMask; 54411308Santhony.gutierrez@amd.com } 54510917Sbrandon.potter@amd.com break; 5469595Snilay@cs.wisc.edu case PCI0_BASE_ADDR1: 5479745Snilay@cs.wisc.edu if (BARAddrs[1] != 0) { 5489745Snilay@cs.wisc.edu if (pioInterface) 5499745Snilay@cs.wisc.edu pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 5509745Snilay@cs.wisc.edu 5519745Snilay@cs.wisc.edu BARAddrs[1] &= EV5::PAddrUncachedMask; 5529745Snilay@cs.wisc.edu } 5539745Snilay@cs.wisc.edu break; 5549745Snilay@cs.wisc.edu } 5559745Snilay@cs.wisc.edu} 5569745Snilay@cs.wisc.edu 5579595Snilay@cs.wisc.edu/** 5586657Snate@binkert.org * This reads the device registers, which are detailed in the NS83820 5596657Snate@binkert.org * spec sheet 5606657Snate@binkert.org */ 5616657Snate@binkert.orgFault 5627007Snate@binkert.orgNSGigE::read(MemReqPtr &req, uint8_t *data) 56311021Sjthestness@gmail.com{ 56410311Snilay@cs.wisc.edu assert(ioEnable); 56510311Snilay@cs.wisc.edu 56610311Snilay@cs.wisc.edu //The mask is to give you only the offset into the device register file 56710311Snilay@cs.wisc.edu Addr daddr = req->paddr & 0xfff; 56810311Snilay@cs.wisc.edu DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n", 56910311Snilay@cs.wisc.edu daddr, req->paddr, req->vaddr, req->size); 57010311Snilay@cs.wisc.edu 57110311Snilay@cs.wisc.edu 57210311Snilay@cs.wisc.edu // there are some reserved registers, you can see ns_gige_reg.h and 57310311Snilay@cs.wisc.edu // the spec sheet for details 57410311Snilay@cs.wisc.edu if (daddr > LAST && daddr <= RESERVED) { 57510311Snilay@cs.wisc.edu panic("Accessing reserved register"); 57610311Snilay@cs.wisc.edu } else if (daddr > RESERVED && daddr <= 0x3FC) { 57711084Snilay@cs.wisc.edu ReadConfig(daddr & 0xff, req->size, data); 57810311Snilay@cs.wisc.edu return No_Fault; 57910311Snilay@cs.wisc.edu } else if (daddr >= MIB_START && daddr <= MIB_END) { 58011021Sjthestness@gmail.com // don't implement all the MIB's. hopefully the kernel 58111021Sjthestness@gmail.com // doesn't actually DEPEND upon their values 58210311Snilay@cs.wisc.edu // MIB are just hardware stats keepers 58310311Snilay@cs.wisc.edu uint32_t ® = *(uint32_t *) data; 58410311Snilay@cs.wisc.edu reg = 0; 58510311Snilay@cs.wisc.edu return No_Fault; 58610311Snilay@cs.wisc.edu } else if (daddr > 0x3FC) 58710311Snilay@cs.wisc.edu panic("Something is messed up!\n"); 58810311Snilay@cs.wisc.edu 58910311Snilay@cs.wisc.edu switch (req->size) { 59010311Snilay@cs.wisc.edu case sizeof(uint32_t): 59110311Snilay@cs.wisc.edu { 59210311Snilay@cs.wisc.edu uint32_t ® = *(uint32_t *)data; 59311021Sjthestness@gmail.com 59411021Sjthestness@gmail.com switch (daddr) { 59510311Snilay@cs.wisc.edu case CR: 59610311Snilay@cs.wisc.edu reg = regs.command; 59710311Snilay@cs.wisc.edu //these are supposed to be cleared on a read 59810311Snilay@cs.wisc.edu reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR); 59910311Snilay@cs.wisc.edu break; 60010311Snilay@cs.wisc.edu 60110311Snilay@cs.wisc.edu case CFG: 60210311Snilay@cs.wisc.edu reg = regs.config; 60310311Snilay@cs.wisc.edu break; 60410311Snilay@cs.wisc.edu 6057007Snate@binkert.org case MEAR: 6066657Snate@binkert.org reg = regs.mear; 6077007Snate@binkert.org break; 6086657Snate@binkert.org 6096657Snate@binkert.org case PTSCR: 6106657Snate@binkert.org reg = regs.ptscr; 61110311Snilay@cs.wisc.edu break; 6126657Snate@binkert.org 6136657Snate@binkert.org case ISR: 61410305Snilay@cs.wisc.edu reg = regs.isr; 6156657Snate@binkert.org devIntrClear(ISR_ALL); 6166657Snate@binkert.org break; 6176657Snate@binkert.org 6186657Snate@binkert.org case IMR: 6196657Snate@binkert.org reg = regs.imr; 6206657Snate@binkert.org break; 6216657Snate@binkert.org 6226657Snate@binkert.org case IER: 62311084Snilay@cs.wisc.edu reg = regs.ier; 62411084Snilay@cs.wisc.edu break; 62511084Snilay@cs.wisc.edu 62611084Snilay@cs.wisc.edu case IHR: 62711084Snilay@cs.wisc.edu reg = regs.ihr; 6286657Snate@binkert.org break; 62911084Snilay@cs.wisc.edu 6306657Snate@binkert.org case TXDP: 6316657Snate@binkert.org reg = regs.txdp; 6326657Snate@binkert.org break; 6337007Snate@binkert.org 6346657Snate@binkert.org case TXDP_HI: 6357007Snate@binkert.org reg = regs.txdp_hi; 6367007Snate@binkert.org break; 6376657Snate@binkert.org 6389366Snilay@cs.wisc.edu case TXCFG: 6399366Snilay@cs.wisc.edu reg = regs.txcfg; 6409366Snilay@cs.wisc.edu break; 6419366Snilay@cs.wisc.edu 6427566SBrad.Beckmann@amd.com case GPIOR: 6437672Snate@binkert.org reg = regs.gpior; 6446657Snate@binkert.org break; 6459465Snilay@cs.wisc.edu 6466657Snate@binkert.org case RXDP: 6476657Snate@binkert.org reg = regs.rxdp; 6486657Snate@binkert.org break; 6497672Snate@binkert.org 6506657Snate@binkert.org case RXDP_HI: 6516657Snate@binkert.org reg = regs.rxdp_hi; 6526657Snate@binkert.org break; 6536657Snate@binkert.org 6546657Snate@binkert.org case RXCFG: 6556657Snate@binkert.org reg = regs.rxcfg; 6566657Snate@binkert.org break; 6576657Snate@binkert.org 6586657Snate@binkert.org case PQCR: 6596657Snate@binkert.org reg = regs.pqcr; 6606657Snate@binkert.org break; 6619745Snilay@cs.wisc.edu 6626657Snate@binkert.org case WCSR: 6636657Snate@binkert.org reg = regs.wcsr; 6649496Snilay@cs.wisc.edu break; 6659496Snilay@cs.wisc.edu 66610012Snilay@cs.wisc.edu case PCR: 6679496Snilay@cs.wisc.edu reg = regs.pcr; 6689496Snilay@cs.wisc.edu break; 6696657Snate@binkert.org 67010121Snilay@cs.wisc.edu // see the spec sheet for how RFCR and RFDR work 6716657Snate@binkert.org // basically, you write to RFCR to tell the machine 6726657Snate@binkert.org // what you want to do next, then you act upon RFDR, 67310305Snilay@cs.wisc.edu // and the device will be prepared b/c of what you 6746657Snate@binkert.org // wrote to RFCR 67511021Sjthestness@gmail.com case RFCR: 67611021Sjthestness@gmail.com reg = regs.rfcr; 67711021Sjthestness@gmail.com break; 67811021Sjthestness@gmail.com 67911021Sjthestness@gmail.com case RFDR: 6808683Snilay@cs.wisc.edu switch (regs.rfcr & RFCR_RFADDR) { 6818683Snilay@cs.wisc.edu case 0x000: 68210308Snilay@cs.wisc.edu reg = rom.perfectMatch[1]; 6838683Snilay@cs.wisc.edu reg = reg << 8; 68410308Snilay@cs.wisc.edu reg += rom.perfectMatch[0]; 6858683Snilay@cs.wisc.edu break; 68611309Sdavid.hashe@amd.com case 0x002: 68711309Sdavid.hashe@amd.com reg = rom.perfectMatch[3] << 8; 68811309Sdavid.hashe@amd.com reg += rom.perfectMatch[2]; 68911309Sdavid.hashe@amd.com break; 69011309Sdavid.hashe@amd.com case 0x004: 69111309Sdavid.hashe@amd.com reg = rom.perfectMatch[5] << 8; 69211308Santhony.gutierrez@amd.com reg += rom.perfectMatch[4]; 69311308Santhony.gutierrez@amd.com break; 69411308Santhony.gutierrez@amd.com default: 69511308Santhony.gutierrez@amd.com panic("reading RFDR for something other than PMATCH!\n"); 69611308Santhony.gutierrez@amd.com // didn't implement other RFDR functionality b/c 69711308Santhony.gutierrez@amd.com // driver didn't use it 69811308Santhony.gutierrez@amd.com } 69911308Santhony.gutierrez@amd.com break; 70011308Santhony.gutierrez@amd.com 70111308Santhony.gutierrez@amd.com case SRR: 70211308Santhony.gutierrez@amd.com reg = regs.srr; 70311308Santhony.gutierrez@amd.com break; 70411308Santhony.gutierrez@amd.com 70511308Santhony.gutierrez@amd.com case MIBC: 70611308Santhony.gutierrez@amd.com reg = regs.mibc; 70711308Santhony.gutierrez@amd.com reg &= ~(MIBC_MIBS | MIBC_ACLR); 70811308Santhony.gutierrez@amd.com break; 70911308Santhony.gutierrez@amd.com 71011308Santhony.gutierrez@amd.com case VRCR: 71111308Santhony.gutierrez@amd.com reg = regs.vrcr; 71211308Santhony.gutierrez@amd.com break; 71311308Santhony.gutierrez@amd.com 71411309Sdavid.hashe@amd.com case VTCR: 71511309Sdavid.hashe@amd.com reg = regs.vtcr; 71611309Sdavid.hashe@amd.com break; 71711309Sdavid.hashe@amd.com 71811309Sdavid.hashe@amd.com case VDR: 71911309Sdavid.hashe@amd.com reg = regs.vdr; 72011309Sdavid.hashe@amd.com break; 72111309Sdavid.hashe@amd.com 72211309Sdavid.hashe@amd.com case CCSR: 72311309Sdavid.hashe@amd.com reg = regs.ccsr; 72411309Sdavid.hashe@amd.com break; 72511309Sdavid.hashe@amd.com 72611309Sdavid.hashe@amd.com case TBICR: 72711309Sdavid.hashe@amd.com reg = regs.tbicr; 72811309Sdavid.hashe@amd.com break; 72911309Sdavid.hashe@amd.com 73011309Sdavid.hashe@amd.com case TBISR: 73111309Sdavid.hashe@amd.com reg = regs.tbisr; 73211309Sdavid.hashe@amd.com break; 73311309Sdavid.hashe@amd.com 73411309Sdavid.hashe@amd.com case TANAR: 73511309Sdavid.hashe@amd.com reg = regs.tanar; 7366657Snate@binkert.org break; 7379745Snilay@cs.wisc.edu 7389745Snilay@cs.wisc.edu case TANLPAR: 7399745Snilay@cs.wisc.edu reg = regs.tanlpar; 7409745Snilay@cs.wisc.edu break; 74110012Snilay@cs.wisc.edu 74210012Snilay@cs.wisc.edu case TANER: 7439745Snilay@cs.wisc.edu reg = regs.taner; 7449745Snilay@cs.wisc.edu break; 7459745Snilay@cs.wisc.edu 7469745Snilay@cs.wisc.edu case TESR: 7479745Snilay@cs.wisc.edu reg = regs.tesr; 74810919Sbrandon.potter@amd.com break; 74910012Snilay@cs.wisc.edu 7509745Snilay@cs.wisc.edu default: 7519745Snilay@cs.wisc.edu panic("reading unimplemented register: addr=%#x", daddr); 7529745Snilay@cs.wisc.edu } 7539745Snilay@cs.wisc.edu 7549745Snilay@cs.wisc.edu DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n", 7559745Snilay@cs.wisc.edu daddr, reg, reg); 7569745Snilay@cs.wisc.edu } 7579745Snilay@cs.wisc.edu break; 7589745Snilay@cs.wisc.edu 7599745Snilay@cs.wisc.edu default: 7609745Snilay@cs.wisc.edu panic("accessing register with invalid size: addr=%#x, size=%d", 7619745Snilay@cs.wisc.edu daddr, req->size); 7629745Snilay@cs.wisc.edu } 7639745Snilay@cs.wisc.edu 7649745Snilay@cs.wisc.edu return No_Fault; 7659745Snilay@cs.wisc.edu} 76610919Sbrandon.potter@amd.com 76710012Snilay@cs.wisc.eduFault 7689745Snilay@cs.wisc.eduNSGigE::write(MemReqPtr &req, const uint8_t *data) 7699745Snilay@cs.wisc.edu{ 7709745Snilay@cs.wisc.edu assert(ioEnable); 7719745Snilay@cs.wisc.edu 7729745Snilay@cs.wisc.edu Addr daddr = req->paddr & 0xfff; 7739745Snilay@cs.wisc.edu DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n", 7749745Snilay@cs.wisc.edu daddr, req->paddr, req->vaddr, req->size); 7759745Snilay@cs.wisc.edu 7769745Snilay@cs.wisc.edu if (daddr > LAST && daddr <= RESERVED) { 7779745Snilay@cs.wisc.edu panic("Accessing reserved register"); 7789745Snilay@cs.wisc.edu } else if (daddr > RESERVED && daddr <= 0x3FC) { 7799745Snilay@cs.wisc.edu WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data); 7809745Snilay@cs.wisc.edu return No_Fault; 7819745Snilay@cs.wisc.edu } else if (daddr > 0x3FC) 7829745Snilay@cs.wisc.edu panic("Something is messed up!\n"); 7839745Snilay@cs.wisc.edu 78410920Sbrandon.potter@amd.com if (req->size == sizeof(uint32_t)) { 7859745Snilay@cs.wisc.edu uint32_t reg = *(uint32_t *)data; 78610920Sbrandon.potter@amd.com DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg); 78710920Sbrandon.potter@amd.com 7889745Snilay@cs.wisc.edu switch (daddr) { 7899745Snilay@cs.wisc.edu case CR: 7909745Snilay@cs.wisc.edu regs.command = reg; 7919745Snilay@cs.wisc.edu if (reg & CR_TXD) { 7929745Snilay@cs.wisc.edu txEnable = false; 7939745Snilay@cs.wisc.edu } else if (reg & CR_TXE) { 7949745Snilay@cs.wisc.edu txEnable = true; 7959745Snilay@cs.wisc.edu 7969745Snilay@cs.wisc.edu // the kernel is enabling the transmit machine 7979745Snilay@cs.wisc.edu if (txState == txIdle) 7989745Snilay@cs.wisc.edu txKick(); 7999745Snilay@cs.wisc.edu } 80010920Sbrandon.potter@amd.com 8019745Snilay@cs.wisc.edu if (reg & CR_RXD) { 80210920Sbrandon.potter@amd.com rxEnable = false; 80310920Sbrandon.potter@amd.com } else if (reg & CR_RXE) { 8049745Snilay@cs.wisc.edu rxEnable = true; 8059745Snilay@cs.wisc.edu 8069745Snilay@cs.wisc.edu if (rxState == rxIdle) 8079745Snilay@cs.wisc.edu rxKick(); 8089745Snilay@cs.wisc.edu } 8099745Snilay@cs.wisc.edu 8109745Snilay@cs.wisc.edu if (reg & CR_TXR) 8119745Snilay@cs.wisc.edu txReset(); 8129745Snilay@cs.wisc.edu 8139745Snilay@cs.wisc.edu if (reg & CR_RXR) 8149745Snilay@cs.wisc.edu rxReset(); 8159745Snilay@cs.wisc.edu 8169745Snilay@cs.wisc.edu if (reg & CR_SWI) 8179745Snilay@cs.wisc.edu devIntrPost(ISR_SWI); 8189745Snilay@cs.wisc.edu 8199745Snilay@cs.wisc.edu if (reg & CR_RST) { 8209745Snilay@cs.wisc.edu txReset(); 8219745Snilay@cs.wisc.edu rxReset(); 8229745Snilay@cs.wisc.edu 8239745Snilay@cs.wisc.edu regsReset(); 8249745Snilay@cs.wisc.edu } 82511061Snilay@cs.wisc.edu break; 8269745Snilay@cs.wisc.edu 8279745Snilay@cs.wisc.edu case CFG: 8289745Snilay@cs.wisc.edu if (reg & CFG_LNKSTS || 8299745Snilay@cs.wisc.edu reg & CFG_SPDSTS || 8309745Snilay@cs.wisc.edu reg & CFG_DUPSTS || 8319745Snilay@cs.wisc.edu reg & CFG_RESERVED || 8329745Snilay@cs.wisc.edu reg & CFG_T64ADDR || 8339745Snilay@cs.wisc.edu reg & CFG_PCI64_DET) 8349745Snilay@cs.wisc.edu panic("writing to read-only or reserved CFG bits!\n"); 8359745Snilay@cs.wisc.edu 8369745Snilay@cs.wisc.edu regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS | 83711061Snilay@cs.wisc.edu CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET); 8389745Snilay@cs.wisc.edu 8399745Snilay@cs.wisc.edu// all these #if 0's are because i don't THINK the kernel needs to 8409745Snilay@cs.wisc.edu// have these implemented. if there is a problem relating to one of 8419745Snilay@cs.wisc.edu// these, you may need to add functionality in. 8429745Snilay@cs.wisc.edu#if 0 8439745Snilay@cs.wisc.edu if (reg & CFG_TBI_EN) ; 8447007Snate@binkert.org if (reg & CFG_MODE_1000) ; 8457007Snate@binkert.org#endif 8467007Snate@binkert.org 8476657Snate@binkert.org if (reg & CFG_AUTO_1000) 8486657Snate@binkert.org panic("CFG_AUTO_1000 not implemented!\n"); 8496657Snate@binkert.org 8507007Snate@binkert.org#if 0 8517007Snate@binkert.org if (reg & CFG_PINT_DUPSTS || 8527007Snate@binkert.org reg & CFG_PINT_LNKSTS || 8536657Snate@binkert.org reg & CFG_PINT_SPDSTS) 8546657Snate@binkert.org ; 8556657Snate@binkert.org 85611021Sjthestness@gmail.com if (reg & CFG_TMRTEST) ; 85711021Sjthestness@gmail.com if (reg & CFG_MRM_DIS) ; 85811021Sjthestness@gmail.com if (reg & CFG_MWI_DIS) ; 85911021Sjthestness@gmail.com 86011021Sjthestness@gmail.com if (reg & CFG_T64ADDR) 86111021Sjthestness@gmail.com panic("CFG_T64ADDR is read only register!\n"); 8627007Snate@binkert.org 8637007Snate@binkert.org if (reg & CFG_PCI64_DET) 8647007Snate@binkert.org panic("CFG_PCI64_DET is read only register!\n"); 8657007Snate@binkert.org 8667007Snate@binkert.org if (reg & CFG_DATA64_EN) ; 8676657Snate@binkert.org if (reg & CFG_M64ADDR) ; 86810012Snilay@cs.wisc.edu if (reg & CFG_PHY_RST) ; 8699745Snilay@cs.wisc.edu if (reg & CFG_PHY_DIS) ; 8709745Snilay@cs.wisc.edu#endif 8719745Snilay@cs.wisc.edu 8729745Snilay@cs.wisc.edu if (reg & CFG_EXTSTS_EN) 8739745Snilay@cs.wisc.edu extstsEnable = true; 8749745Snilay@cs.wisc.edu else 8756902SBrad.Beckmann@amd.com extstsEnable = false; 8769745Snilay@cs.wisc.edu 8779745Snilay@cs.wisc.edu#if 0 8789745Snilay@cs.wisc.edu if (reg & CFG_REQALG) ; 8799745Snilay@cs.wisc.edu if (reg & CFG_SB) ; 88010012Snilay@cs.wisc.edu if (reg & CFG_POW) ; 8816902SBrad.Beckmann@amd.com if (reg & CFG_EXD) ; 8827839Snilay@cs.wisc.edu if (reg & CFG_PESEL) ; 8837839Snilay@cs.wisc.edu if (reg & CFG_BROM_DIS) ; 8847839Snilay@cs.wisc.edu if (reg & CFG_EXT_125) ; 8857839Snilay@cs.wisc.edu if (reg & CFG_BEM) ; 8867839Snilay@cs.wisc.edu#endif 8877839Snilay@cs.wisc.edu break; 8887839Snilay@cs.wisc.edu 8897839Snilay@cs.wisc.edu case MEAR: 8907839Snilay@cs.wisc.edu regs.mear = reg; 8917839Snilay@cs.wisc.edu // since phy is completely faked, MEAR_MD* don't matter 8927839Snilay@cs.wisc.edu // and since the driver never uses MEAR_EE*, they don't 8937839Snilay@cs.wisc.edu // matter 8947839Snilay@cs.wisc.edu#if 0 8957839Snilay@cs.wisc.edu if (reg & MEAR_EEDI) ; 8967839Snilay@cs.wisc.edu if (reg & MEAR_EEDO) ; // this one is read only 8977839Snilay@cs.wisc.edu if (reg & MEAR_EECLK) ; 8987839Snilay@cs.wisc.edu if (reg & MEAR_EESEL) ; 8997839Snilay@cs.wisc.edu if (reg & MEAR_MDIO) ; 9007839Snilay@cs.wisc.edu if (reg & MEAR_MDDIR) ; 9017839Snilay@cs.wisc.edu if (reg & MEAR_MDC) ; 9027839Snilay@cs.wisc.edu#endif 9037839Snilay@cs.wisc.edu break; 9047839Snilay@cs.wisc.edu 9057839Snilay@cs.wisc.edu case PTSCR: 9067839Snilay@cs.wisc.edu regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY); 9077839Snilay@cs.wisc.edu // these control BISTs for various parts of chip - we 9087839Snilay@cs.wisc.edu // don't care or do just fake that the BIST is done 9097839Snilay@cs.wisc.edu if (reg & PTSCR_RBIST_EN) 9107839Snilay@cs.wisc.edu regs.ptscr |= PTSCR_RBIST_DONE; 9117839Snilay@cs.wisc.edu if (reg & PTSCR_EEBIST_EN) 9127839Snilay@cs.wisc.edu regs.ptscr &= ~PTSCR_EEBIST_EN; 9137839Snilay@cs.wisc.edu if (reg & PTSCR_EELOAD_EN) 9147839Snilay@cs.wisc.edu regs.ptscr &= ~PTSCR_EELOAD_EN; 9157839Snilay@cs.wisc.edu break; 9167839Snilay@cs.wisc.edu 9177839Snilay@cs.wisc.edu case ISR: /* writing to the ISR has no effect */ 9187839Snilay@cs.wisc.edu panic("ISR is a read only register!\n"); 9196902SBrad.Beckmann@amd.com 9208683Snilay@cs.wisc.edu case IMR: 9218683Snilay@cs.wisc.edu regs.imr = reg; 9228683Snilay@cs.wisc.edu devIntrChangeMask(); 9238683Snilay@cs.wisc.edu break; 9248683Snilay@cs.wisc.edu 9258683Snilay@cs.wisc.edu case IER: 9268683Snilay@cs.wisc.edu regs.ier = reg; 9278683Snilay@cs.wisc.edu break; 9288683Snilay@cs.wisc.edu 9298683Snilay@cs.wisc.edu case IHR: 9308683Snilay@cs.wisc.edu regs.ihr = reg; 9318683Snilay@cs.wisc.edu /* not going to implement real interrupt holdoff */ 9328683Snilay@cs.wisc.edu break; 9338683Snilay@cs.wisc.edu 9348683Snilay@cs.wisc.edu case TXDP: 9358683Snilay@cs.wisc.edu regs.txdp = (reg & 0xFFFFFFFC); 9368683Snilay@cs.wisc.edu assert(txState == txIdle); 9376657Snate@binkert.org CTDD = false; 9386657Snate@binkert.org break; 9397839Snilay@cs.wisc.edu 9407839Snilay@cs.wisc.edu case TXDP_HI: 9417839Snilay@cs.wisc.edu regs.txdp_hi = reg; 9427839Snilay@cs.wisc.edu break; 9436657Snate@binkert.org 9447839Snilay@cs.wisc.edu case TXCFG: 9457839Snilay@cs.wisc.edu regs.txcfg = reg; 9467839Snilay@cs.wisc.edu#if 0 94711025Snilay@cs.wisc.edu if (reg & TXCFG_CSI) ; 9487839Snilay@cs.wisc.edu if (reg & TXCFG_HBI) ; 9498055Sksewell@umich.edu if (reg & TXCFG_MLB) ; 95010963Sdavid.hashe@amd.com if (reg & TXCFG_ATP) ; 95110963Sdavid.hashe@amd.com if (reg & TXCFG_ECRETRY) { 95210963Sdavid.hashe@amd.com /* 95310963Sdavid.hashe@amd.com * this could easily be implemented, but considering 95410963Sdavid.hashe@amd.com * the network is just a fake pipe, wouldn't make 95510963Sdavid.hashe@amd.com * sense to do this 95610963Sdavid.hashe@amd.com */ 9577839Snilay@cs.wisc.edu } 9586657Snate@binkert.org 9597839Snilay@cs.wisc.edu if (reg & TXCFG_BRST_DIS) ; 9607839Snilay@cs.wisc.edu#endif 9617839Snilay@cs.wisc.edu 9627839Snilay@cs.wisc.edu#if 0 9637839Snilay@cs.wisc.edu /* we handle our own DMA, ignore the kernel's exhortations */ 9647839Snilay@cs.wisc.edu if (reg & TXCFG_MXDMA) ; 9657839Snilay@cs.wisc.edu#endif 9667839Snilay@cs.wisc.edu 9677839Snilay@cs.wisc.edu // also, we currently don't care about fill/drain 96811025Snilay@cs.wisc.edu // thresholds though this may change in the future with 9697839Snilay@cs.wisc.edu // more realistic networks or a driver which changes it 9708055Sksewell@umich.edu // according to feedback 9717839Snilay@cs.wisc.edu 9727839Snilay@cs.wisc.edu break; 9737839Snilay@cs.wisc.edu 9747839Snilay@cs.wisc.edu case GPIOR: 9757839Snilay@cs.wisc.edu regs.gpior = reg; 9767839Snilay@cs.wisc.edu /* these just control general purpose i/o pins, don't matter */ 9777839Snilay@cs.wisc.edu break; 9787839Snilay@cs.wisc.edu 9797839Snilay@cs.wisc.edu case RXDP: 9807839Snilay@cs.wisc.edu regs.rxdp = reg; 9817839Snilay@cs.wisc.edu CRDD = false; 9827839Snilay@cs.wisc.edu break; 98311025Snilay@cs.wisc.edu 9847839Snilay@cs.wisc.edu case RXDP_HI: 9858055Sksewell@umich.edu regs.rxdp_hi = reg; 9867839Snilay@cs.wisc.edu break; 9877839Snilay@cs.wisc.edu 9887839Snilay@cs.wisc.edu case RXCFG: 9897839Snilay@cs.wisc.edu regs.rxcfg = reg; 9907839Snilay@cs.wisc.edu#if 0 9917839Snilay@cs.wisc.edu if (reg & RXCFG_AEP) ; 9927839Snilay@cs.wisc.edu if (reg & RXCFG_ARP) ; 9937839Snilay@cs.wisc.edu if (reg & RXCFG_STRIPCRC) ; 9947839Snilay@cs.wisc.edu if (reg & RXCFG_RX_RD) ; 9957839Snilay@cs.wisc.edu if (reg & RXCFG_ALP) ; 9966657Snate@binkert.org if (reg & RXCFG_AIRL) ; 9977007Snate@binkert.org 99811025Snilay@cs.wisc.edu /* we handle our own DMA, ignore what kernel says about it */ 9996657Snate@binkert.org if (reg & RXCFG_MXDMA) ; 10008055Sksewell@umich.edu 10016657Snate@binkert.org //also, we currently don't care about fill/drain thresholds 10026657Snate@binkert.org //though this may change in the future with more realistic 10036657Snate@binkert.org //networks or a driver which changes it according to feedback 10046657Snate@binkert.org if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ; 10058478Snilay@cs.wisc.edu#endif 10068478Snilay@cs.wisc.edu break; 10078478Snilay@cs.wisc.edu 10089302Snilay@cs.wisc.edu case PQCR: 10099302Snilay@cs.wisc.edu /* there is no priority queueing used in the linux 2.6 driver */ 101010524Snilay@cs.wisc.edu regs.pqcr = reg; 10119302Snilay@cs.wisc.edu break; 10129302Snilay@cs.wisc.edu 101310524Snilay@cs.wisc.edu case WCSR: 10149302Snilay@cs.wisc.edu /* not going to implement wake on LAN */ 10159302Snilay@cs.wisc.edu regs.wcsr = reg; 10169302Snilay@cs.wisc.edu break; 10179302Snilay@cs.wisc.edu 101810305Snilay@cs.wisc.edu case PCR: 10199302Snilay@cs.wisc.edu /* not going to implement pause control */ 102010311Snilay@cs.wisc.edu regs.pcr = reg; 102110311Snilay@cs.wisc.edu break; 102210311Snilay@cs.wisc.edu 102310311Snilay@cs.wisc.edu case RFCR: 102410311Snilay@cs.wisc.edu regs.rfcr = reg; 102510311Snilay@cs.wisc.edu 102610311Snilay@cs.wisc.edu rxFilterEnable = (reg & RFCR_RFEN) ? true : false; 10279302Snilay@cs.wisc.edu acceptBroadcast = (reg & RFCR_AAB) ? true : false; 10289302Snilay@cs.wisc.edu acceptMulticast = (reg & RFCR_AAM) ? true : false; 10299302Snilay@cs.wisc.edu acceptUnicast = (reg & RFCR_AAU) ? true : false; 10309302Snilay@cs.wisc.edu acceptPerfect = (reg & RFCR_APM) ? true : false; 10319302Snilay@cs.wisc.edu acceptArp = (reg & RFCR_AARP) ? true : false; 10326657Snate@binkert.org 10336657Snate@binkert.org#if 0 10349219Spower.jg@gmail.com if (reg & RFCR_APAT) 10356657Snate@binkert.org panic("RFCR_APAT not implemented!\n"); 10366657Snate@binkert.org#endif 10376999Snate@binkert.org 10386657Snate@binkert.org if (reg & RFCR_MHEN || reg & RFCR_UHEN) 10396657Snate@binkert.org panic("hash filtering not implemented!\n"); 10409104Shestness@cs.utexas.edu 10419104Shestness@cs.utexas.edu if (reg & RFCR_ULM) 10429104Shestness@cs.utexas.edu panic("RFCR_ULM not implemented!\n"); 10439104Shestness@cs.utexas.edu 10446657Snate@binkert.org break; 10456657Snate@binkert.org 10466657Snate@binkert.org case RFDR: 10476657Snate@binkert.org panic("the driver never writes to RFDR, something is wrong!\n"); 10488946Sandreas.hansson@arm.com 10498946Sandreas.hansson@arm.com case BRAR: 10508946Sandreas.hansson@arm.com panic("the driver never uses BRAR, something is wrong!\n"); 10517832Snate@binkert.org 105210972Sdavid.hashe@amd.com case BRDR: 10537832Snate@binkert.org panic("the driver never uses BRDR, something is wrong!\n"); 105412334Sgabeblack@google.com 105510972Sdavid.hashe@amd.com case SRR: 105610972Sdavid.hashe@amd.com panic("SRR is read only register!\n"); 105710972Sdavid.hashe@amd.com 105810972Sdavid.hashe@amd.com case MIBC: 105910972Sdavid.hashe@amd.com panic("the driver never uses MIBC, something is wrong!\n"); 10608229Snate@binkert.org 10618229Snate@binkert.org case VRCR: 10628229Snate@binkert.org regs.vrcr = reg; 106310972Sdavid.hashe@amd.com break; 10649104Shestness@cs.utexas.edu 10659104Shestness@cs.utexas.edu case VTCR: 10669104Shestness@cs.utexas.edu regs.vtcr = reg; 10679104Shestness@cs.utexas.edu break; 10689104Shestness@cs.utexas.edu 10699104Shestness@cs.utexas.edu case VDR: 10708229Snate@binkert.org panic("the driver never uses VDR, something is wrong!\n"); 107111108Sdavid.hashe@amd.com break; 107210972Sdavid.hashe@amd.com 10739219Spower.jg@gmail.com case CCSR: 10749219Spower.jg@gmail.com /* not going to implement clockrun stuff */ 10759219Spower.jg@gmail.com regs.ccsr = reg; 10769219Spower.jg@gmail.com break; 10779219Spower.jg@gmail.com 10789219Spower.jg@gmail.com case TBICR: 107910963Sdavid.hashe@amd.com regs.tbicr = reg; 108010963Sdavid.hashe@amd.com if (reg & TBICR_MR_LOOPBACK) 10819219Spower.jg@gmail.com panic("TBICR_MR_LOOPBACK never used, something wrong!\n"); 10826657Snate@binkert.org 10837055Snate@binkert.org if (reg & TBICR_MR_AN_ENABLE) { 10847055Snate@binkert.org regs.tanlpar = regs.tanar; 10857007Snate@binkert.org regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS); 10867007Snate@binkert.org } 10876657Snate@binkert.org 10886657Snate@binkert.org#if 0 10896657Snate@binkert.org if (reg & TBICR_MR_RESTART_AN) ; 109010963Sdavid.hashe@amd.com#endif 109110963Sdavid.hashe@amd.com 10926657Snate@binkert.org break; 10936657Snate@binkert.org 10946657Snate@binkert.org case TBISR: 10957007Snate@binkert.org panic("TBISR is read only register!\n"); 10969496Snilay@cs.wisc.edu 10977007Snate@binkert.org case TANAR: 10987007Snate@binkert.org regs.tanar = reg; 10999499Snilay@cs.wisc.edu if (reg & TANAR_PS2) 11006657Snate@binkert.org panic("this isn't used in driver, something wrong!\n"); 11016657Snate@binkert.org 11026657Snate@binkert.org if (reg & TANAR_PS1) 11036657Snate@binkert.org panic("this isn't used in driver, something wrong!\n"); 11046657Snate@binkert.org break; 11056657Snate@binkert.org 11066657Snate@binkert.org case TANLPAR: 11076657Snate@binkert.org panic("this should only be written to by the fake phy!\n"); 11086657Snate@binkert.org 11096657Snate@binkert.org case TANER: 11106657Snate@binkert.org panic("TANER is read only register!\n"); 11116657Snate@binkert.org 11127567SBrad.Beckmann@amd.com case TESR: 11139996Snilay@cs.wisc.edu regs.tesr = reg; 11147567SBrad.Beckmann@amd.com break; 11159996Snilay@cs.wisc.edu 111610963Sdavid.hashe@amd.com default: 111710963Sdavid.hashe@amd.com panic("invalid register access daddr=%#x", daddr); 111810963Sdavid.hashe@amd.com } 11196657Snate@binkert.org } else { 112010963Sdavid.hashe@amd.com panic("Invalid Request Size"); 112110963Sdavid.hashe@amd.com } 112210963Sdavid.hashe@amd.com 112310963Sdavid.hashe@amd.com return No_Fault; 112410963Sdavid.hashe@amd.com} 112510963Sdavid.hashe@amd.com 112610963Sdavid.hashe@amd.comvoid 112710963Sdavid.hashe@amd.comNSGigE::devIntrPost(uint32_t interrupts) 11286657Snate@binkert.org{ 11296657Snate@binkert.org if (interrupts & ISR_RESERVE) 11306657Snate@binkert.org panic("Cannot set a reserved interrupt"); 11316657Snate@binkert.org 11326657Snate@binkert.org if (interrupts & ISR_NOIMPL) 11336657Snate@binkert.org warn("interrupt not implemented %#x\n", interrupts); 113410963Sdavid.hashe@amd.com 113510963Sdavid.hashe@amd.com interrupts &= ~ISR_NOIMPL; 113610963Sdavid.hashe@amd.com regs.isr |= interrupts; 113710963Sdavid.hashe@amd.com 113810963Sdavid.hashe@amd.com if (interrupts & regs.imr) { 113910963Sdavid.hashe@amd.com if (interrupts & ISR_SWI) { 114011116Santhony.gutierrez@amd.com totalSwi++; 114110963Sdavid.hashe@amd.com } 114210963Sdavid.hashe@amd.com if (interrupts & ISR_RXIDLE) { 114310963Sdavid.hashe@amd.com totalRxIdle++; 114410963Sdavid.hashe@amd.com } 114510963Sdavid.hashe@amd.com if (interrupts & ISR_RXOK) { 114610963Sdavid.hashe@amd.com totalRxOk++; 114710963Sdavid.hashe@amd.com } 114810963Sdavid.hashe@amd.com if (interrupts & ISR_RXDESC) { 114910963Sdavid.hashe@amd.com totalRxDesc++; 115010963Sdavid.hashe@amd.com } 115110963Sdavid.hashe@amd.com if (interrupts & ISR_TXOK) { 115210963Sdavid.hashe@amd.com totalTxOk++; 11536657Snate@binkert.org } 11546657Snate@binkert.org if (interrupts & ISR_TXIDLE) { 11556657Snate@binkert.org totalTxIdle++; 11566657Snate@binkert.org } 11576657Snate@binkert.org if (interrupts & ISR_TXDESC) { 11586657Snate@binkert.org totalTxDesc++; 11596657Snate@binkert.org } 11606657Snate@binkert.org if (interrupts & ISR_RXORN) { 11616657Snate@binkert.org totalRxOrn++; 11626999Snate@binkert.org } 11636657Snate@binkert.org } 11646657Snate@binkert.org 11656657Snate@binkert.org DPRINTF(EthernetIntr, 11666657Snate@binkert.org "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n", 11676657Snate@binkert.org interrupts, regs.isr, regs.imr); 11686657Snate@binkert.org 11697832Snate@binkert.org if ((regs.isr & regs.imr)) { 11707832Snate@binkert.org Tick when = curTick; 117112334Sgabeblack@google.com if (!(regs.isr & regs.imr & ISR_NODELAY)) 11727832Snate@binkert.org when += intrDelay; 11738232Snate@binkert.org cpuIntrPost(when); 11748232Snate@binkert.org } 11758229Snate@binkert.org} 11768229Snate@binkert.org 11778229Snate@binkert.org/* writing this interrupt counting stats inside this means that this function 11788229Snate@binkert.org is now limited to being used to clear all interrupts upon the kernel 117911108Sdavid.hashe@amd.com reading isr and servicing. just telling you in case you were thinking 11806657Snate@binkert.org of expanding use. 11816657Snate@binkert.org*/ 11826657Snate@binkert.orgvoid 11836657Snate@binkert.orgNSGigE::devIntrClear(uint32_t interrupts) 11846657Snate@binkert.org{ 11856657Snate@binkert.org if (interrupts & ISR_RESERVE) 11867007Snate@binkert.org panic("Cannot clear a reserved interrupt"); 11877007Snate@binkert.org 11887839Snilay@cs.wisc.edu if (regs.isr & regs.imr & ISR_SWI) { 11897839Snilay@cs.wisc.edu postedSwi++; 11907839Snilay@cs.wisc.edu } 11917839Snilay@cs.wisc.edu if (regs.isr & regs.imr & ISR_RXIDLE) { 11927839Snilay@cs.wisc.edu postedRxIdle++; 11937839Snilay@cs.wisc.edu } 11947839Snilay@cs.wisc.edu if (regs.isr & regs.imr & ISR_RXOK) { 11957839Snilay@cs.wisc.edu postedRxOk++; 11967839Snilay@cs.wisc.edu } 11977839Snilay@cs.wisc.edu if (regs.isr & regs.imr & ISR_RXDESC) { 119811025Snilay@cs.wisc.edu postedRxDesc++; 11996657Snate@binkert.org } 12007839Snilay@cs.wisc.edu if (regs.isr & regs.imr & ISR_TXOK) { 120110305Snilay@cs.wisc.edu postedTxOk++; 120210305Snilay@cs.wisc.edu } 12037839Snilay@cs.wisc.edu if (regs.isr & regs.imr & ISR_TXIDLE) { 12048337Snilay@cs.wisc.edu postedTxIdle++; 12057839Snilay@cs.wisc.edu } 12068337Snilay@cs.wisc.edu if (regs.isr & regs.imr & ISR_TXDESC) { 12077839Snilay@cs.wisc.edu postedTxDesc++; 12088337Snilay@cs.wisc.edu } 12097839Snilay@cs.wisc.edu if (regs.isr & regs.imr & ISR_RXORN) { 12108337Snilay@cs.wisc.edu postedRxOrn++; 12117839Snilay@cs.wisc.edu } 12127839Snilay@cs.wisc.edu 121310305Snilay@cs.wisc.edu if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC | 12146657Snate@binkert.org ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) ) 121511118Snilay@cs.wisc.edu postedInterrupts++; 121610305Snilay@cs.wisc.edu 121710305Snilay@cs.wisc.edu interrupts &= ~ISR_NOIMPL; 12186657Snate@binkert.org regs.isr &= ~interrupts; 121910305Snilay@cs.wisc.edu 12207839Snilay@cs.wisc.edu DPRINTF(EthernetIntr, 12217839Snilay@cs.wisc.edu "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n", 12227839Snilay@cs.wisc.edu interrupts, regs.isr, regs.imr); 12237839Snilay@cs.wisc.edu 12247839Snilay@cs.wisc.edu if (!(regs.isr & regs.imr)) 12257839Snilay@cs.wisc.edu cpuIntrClear(); 12267839Snilay@cs.wisc.edu} 12277839Snilay@cs.wisc.edu 12287839Snilay@cs.wisc.eduvoid 12296657Snate@binkert.orgNSGigE::devIntrChangeMask() 123011049Snilay@cs.wisc.edu{ 123111049Snilay@cs.wisc.edu DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n", 12327839Snilay@cs.wisc.edu regs.isr, regs.imr, regs.isr & regs.imr); 12336657Snate@binkert.org 123410305Snilay@cs.wisc.edu if (regs.isr & regs.imr) 123510305Snilay@cs.wisc.edu cpuIntrPost(curTick); 123610305Snilay@cs.wisc.edu else 123710305Snilay@cs.wisc.edu cpuIntrClear(); 123810305Snilay@cs.wisc.edu} 123911025Snilay@cs.wisc.edu 124010305Snilay@cs.wisc.eduvoid 124110305Snilay@cs.wisc.eduNSGigE::cpuIntrPost(Tick when) 124210305Snilay@cs.wisc.edu{ 124310305Snilay@cs.wisc.edu // If the interrupt you want to post is later than an interrupt 124411118Snilay@cs.wisc.edu // already scheduled, just let it post in the coming one and don't 124510305Snilay@cs.wisc.edu // schedule another. 124610305Snilay@cs.wisc.edu // HOWEVER, must be sure that the scheduled intrTick is in the 12477839Snilay@cs.wisc.edu // future (this was formerly the source of a bug) 12487839Snilay@cs.wisc.edu /** 12498337Snilay@cs.wisc.edu * @todo this warning should be removed and the intrTick code should 12508341Snilay@cs.wisc.edu * be fixed. 12517839Snilay@cs.wisc.edu */ 12528337Snilay@cs.wisc.edu assert(when >= curTick); 12538341Snilay@cs.wisc.edu assert(intrTick >= curTick || intrTick == 0); 12547839Snilay@cs.wisc.edu if (when > intrTick && intrTick != 0) { 12558337Snilay@cs.wisc.edu DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 12568341Snilay@cs.wisc.edu intrTick); 12577839Snilay@cs.wisc.edu return; 12588337Snilay@cs.wisc.edu } 12598341Snilay@cs.wisc.edu 12607839Snilay@cs.wisc.edu intrTick = when; 12617839Snilay@cs.wisc.edu if (intrTick < curTick) { 126210305Snilay@cs.wisc.edu debug_break(); 126311025Snilay@cs.wisc.edu intrTick = curTick; 126410305Snilay@cs.wisc.edu } 126510305Snilay@cs.wisc.edu 126610305Snilay@cs.wisc.edu DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 126710305Snilay@cs.wisc.edu intrTick); 126811118Snilay@cs.wisc.edu 126910305Snilay@cs.wisc.edu if (intrEvent) 127010305Snilay@cs.wisc.edu intrEvent->squash(); 127111025Snilay@cs.wisc.edu intrEvent = new IntrEvent(this, true); 127210305Snilay@cs.wisc.edu intrEvent->schedule(intrTick); 127310305Snilay@cs.wisc.edu} 127410305Snilay@cs.wisc.edu 127510305Snilay@cs.wisc.eduvoid 127611118Snilay@cs.wisc.eduNSGigE::cpuInterrupt() 127710305Snilay@cs.wisc.edu{ 12786657Snate@binkert.org assert(intrTick == curTick); 127910305Snilay@cs.wisc.edu 128010305Snilay@cs.wisc.edu // Whether or not there's a pending interrupt, we don't care about 128110305Snilay@cs.wisc.edu // it anymore 128210305Snilay@cs.wisc.edu intrEvent = 0; 12836657Snate@binkert.org intrTick = 0; 12846657Snate@binkert.org 12857007Snate@binkert.org // Don't send an interrupt if there's already one 12867007Snate@binkert.org if (cpuPendingIntr) { 12877007Snate@binkert.org DPRINTF(EthernetIntr, 12887007Snate@binkert.org "would send an interrupt now, but there's already pending\n"); 12897839Snilay@cs.wisc.edu } else { 12907839Snilay@cs.wisc.edu // Send interrupt 12917839Snilay@cs.wisc.edu cpuPendingIntr = true; 12927839Snilay@cs.wisc.edu 12937839Snilay@cs.wisc.edu DPRINTF(EthernetIntr, "posting interrupt\n"); 12947839Snilay@cs.wisc.edu intrPost(); 12957839Snilay@cs.wisc.edu } 12967839Snilay@cs.wisc.edu} 12977839Snilay@cs.wisc.edu 12987839Snilay@cs.wisc.eduvoid 12997839Snilay@cs.wisc.eduNSGigE::cpuIntrClear() 130011025Snilay@cs.wisc.edu{ 13016657Snate@binkert.org if (!cpuPendingIntr) 13026657Snate@binkert.org return; 13036657Snate@binkert.org 13046657Snate@binkert.org if (intrEvent) { 13056657Snate@binkert.org intrEvent->squash(); 13066657Snate@binkert.org intrEvent = 0; 13076657Snate@binkert.org } 13086657Snate@binkert.org 13096657Snate@binkert.org intrTick = 0; 13106657Snate@binkert.org 13116657Snate@binkert.org cpuPendingIntr = false; 13126999Snate@binkert.org 13136657Snate@binkert.org DPRINTF(EthernetIntr, "clearing interrupt\n"); 13146657Snate@binkert.org intrClear(); 131510964Sdavid.hashe@amd.com} 131610964Sdavid.hashe@amd.com 131710964Sdavid.hashe@amd.combool 131810964Sdavid.hashe@amd.comNSGigE::cpuIntrPending() const 131910964Sdavid.hashe@amd.com{ return cpuPendingIntr; } 132010964Sdavid.hashe@amd.com 132110964Sdavid.hashe@amd.comvoid 132210964Sdavid.hashe@amd.comNSGigE::txReset() 132310964Sdavid.hashe@amd.com{ 132410964Sdavid.hashe@amd.com 132510964Sdavid.hashe@amd.com DPRINTF(Ethernet, "transmit reset\n"); 13266657Snate@binkert.org 13276657Snate@binkert.org CTDD = false; 13289104Shestness@cs.utexas.edu txEnable = false;; 13296657Snate@binkert.org txFragPtr = 0; 13306657Snate@binkert.org assert(txDescCnt == 0); 13316657Snate@binkert.org txFifo.clear(); 13326657Snate@binkert.org txState = txIdle; 13336657Snate@binkert.org assert(txDmaState == dmaIdle); 133410228Snilay@cs.wisc.edu} 133511111Snilay@cs.wisc.edu 13366657Snate@binkert.orgvoid 13376657Snate@binkert.orgNSGigE::rxReset() 13386657Snate@binkert.org{ 13396657Snate@binkert.org DPRINTF(Ethernet, "receive reset\n"); 13409105SBrad.Beckmann@amd.com 13419105SBrad.Beckmann@amd.com CRDD = false; 13429105SBrad.Beckmann@amd.com assert(rxPktBytes == 0); 13439105SBrad.Beckmann@amd.com rxEnable = false; 13449105SBrad.Beckmann@amd.com rxFragPtr = 0; 13459105SBrad.Beckmann@amd.com assert(rxDescCnt == 0); 13469105SBrad.Beckmann@amd.com assert(rxDmaState == dmaIdle); 13479105SBrad.Beckmann@amd.com rxFifo.clear(); 13486657Snate@binkert.org rxState = rxIdle; 13496657Snate@binkert.org} 13506657Snate@binkert.org 13516657Snate@binkert.orgvoid 13526657Snate@binkert.orgNSGigE::regsReset() 13536657Snate@binkert.org{ 13546657Snate@binkert.org memset(®s, 0, sizeof(regs)); 13559104Shestness@cs.utexas.edu regs.config = CFG_LNKSTS; 13569104Shestness@cs.utexas.edu regs.mear = MEAR_MDDIR | MEAR_EEDO; 13579104Shestness@cs.utexas.edu regs.txcfg = 0x120; // set drain threshold to 1024 bytes and 13589104Shestness@cs.utexas.edu // fill threshold to 32 bytes 13596657Snate@binkert.org regs.rxcfg = 0x4; // set drain threshold to 16 bytes 13606657Snate@binkert.org regs.srr = 0x0103; // set the silicon revision to rev B or 0x103 13616657Snate@binkert.org regs.mibc = MIBC_FRZ; 13626657Snate@binkert.org regs.vdr = 0x81; // set the vlan tag type to 802.1q 13636657Snate@binkert.org regs.tesr = 0xc000; // TBI capable of both full and half duplex 13646657Snate@binkert.org 13656657Snate@binkert.org extstsEnable = false; 13666657Snate@binkert.org acceptBroadcast = false; 13676657Snate@binkert.org acceptMulticast = false; 13686657Snate@binkert.org acceptUnicast = false; 13697839Snilay@cs.wisc.edu acceptPerfect = false; 13707839Snilay@cs.wisc.edu acceptArp = false; 13717839Snilay@cs.wisc.edu} 13727839Snilay@cs.wisc.edu 13737839Snilay@cs.wisc.eduvoid 13747839Snilay@cs.wisc.eduNSGigE::rxDmaReadCopy() 13757839Snilay@cs.wisc.edu{ 13767839Snilay@cs.wisc.edu assert(rxDmaState == dmaReading); 13777839Snilay@cs.wisc.edu 13787839Snilay@cs.wisc.edu physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen); 13797839Snilay@cs.wisc.edu rxDmaState = dmaIdle; 13807839Snilay@cs.wisc.edu 13816657Snate@binkert.org DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n", 13826657Snate@binkert.org rxDmaAddr, rxDmaLen); 13836657Snate@binkert.org DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 13846657Snate@binkert.org} 13856657Snate@binkert.org 13866657Snate@binkert.orgbool 13876657Snate@binkert.orgNSGigE::doRxDmaRead() 13886657Snate@binkert.org{ 13896657Snate@binkert.org assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting); 13906657Snate@binkert.org rxDmaState = dmaReading; 13916657Snate@binkert.org 13926657Snate@binkert.org if (dmaInterface && !rxDmaFree) { 13936657Snate@binkert.org if (dmaInterface->busy()) 13946657Snate@binkert.org rxDmaState = dmaReadWaiting; 13956657Snate@binkert.org else 13966657Snate@binkert.org dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick, 13976657Snate@binkert.org &rxDmaReadEvent, true); 139810305Snilay@cs.wisc.edu return true; 13996657Snate@binkert.org } 14006657Snate@binkert.org 14016657Snate@binkert.org if (dmaReadDelay == 0 && dmaReadFactor == 0) { 140210962SBrad.Beckmann@amd.com rxDmaReadCopy(); 14038159SBrad.Beckmann@amd.com return false; 14049465Snilay@cs.wisc.edu } 14056657Snate@binkert.org 140610305Snilay@cs.wisc.edu Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 14076657Snate@binkert.org Tick start = curTick + dmaReadDelay + factor; 14086657Snate@binkert.org rxDmaReadEvent.schedule(start); 14096657Snate@binkert.org return true; 14106657Snate@binkert.org} 14116657Snate@binkert.org 14126657Snate@binkert.orgvoid 14136657Snate@binkert.orgNSGigE::rxDmaReadDone() 14146657Snate@binkert.org{ 14156657Snate@binkert.org assert(rxDmaState == dmaReading); 14167007Snate@binkert.org rxDmaReadCopy(); 14176999Snate@binkert.org 14187007Snate@binkert.org // If the transmit state machine has a pending DMA, let it go first 14197007Snate@binkert.org if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 14207007Snate@binkert.org txKick(); 14217007Snate@binkert.org 14227007Snate@binkert.org rxKick(); 14237007Snate@binkert.org} 14246657Snate@binkert.org 14256657Snate@binkert.orgvoid 14266657Snate@binkert.orgNSGigE::rxDmaWriteCopy() 14276657Snate@binkert.org{ 14286657Snate@binkert.org assert(rxDmaState == dmaWriting); 14296657Snate@binkert.org 14306657Snate@binkert.org physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen); 14316657Snate@binkert.org rxDmaState = dmaIdle; 14326657Snate@binkert.org 14336657Snate@binkert.org DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 14346657Snate@binkert.org rxDmaAddr, rxDmaLen); 14356657Snate@binkert.org DDUMP(EthernetDMA, rxDmaData, rxDmaLen); 14366657Snate@binkert.org} 14376657Snate@binkert.org 14386657Snate@binkert.orgbool 14396657Snate@binkert.orgNSGigE::doRxDmaWrite() 14406657Snate@binkert.org{ 14416657Snate@binkert.org assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting); 14426657Snate@binkert.org rxDmaState = dmaWriting; 14436657Snate@binkert.org 14446657Snate@binkert.org if (dmaInterface && !rxDmaFree) { 14456657Snate@binkert.org if (dmaInterface->busy()) 14466657Snate@binkert.org rxDmaState = dmaWriteWaiting; 14476657Snate@binkert.org else 14486657Snate@binkert.org dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick, 14496657Snate@binkert.org &rxDmaWriteEvent, true); 14506657Snate@binkert.org return true; 14516657Snate@binkert.org } 14526657Snate@binkert.org 14536999Snate@binkert.org if (dmaWriteDelay == 0 && dmaWriteFactor == 0) { 14546657Snate@binkert.org rxDmaWriteCopy(); 14556657Snate@binkert.org return false; 14567007Snate@binkert.org } 14577007Snate@binkert.org 14586657Snate@binkert.org Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 14596657Snate@binkert.org Tick start = curTick + dmaWriteDelay + factor; 14606657Snate@binkert.org rxDmaWriteEvent.schedule(start); 14616657Snate@binkert.org return true; 14626657Snate@binkert.org} 14636657Snate@binkert.org 14646657Snate@binkert.orgvoid 14656657Snate@binkert.orgNSGigE::rxDmaWriteDone() 14666657Snate@binkert.org{ 14676657Snate@binkert.org assert(rxDmaState == dmaWriting); 14686657Snate@binkert.org rxDmaWriteCopy(); 14696657Snate@binkert.org 14706657Snate@binkert.org // If the transmit state machine has a pending DMA, let it go first 14716657Snate@binkert.org if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting) 14726657Snate@binkert.org txKick(); 14736657Snate@binkert.org 14746657Snate@binkert.org rxKick(); 14756657Snate@binkert.org} 14766657Snate@binkert.org 14776657Snate@binkert.orgvoid 14786657Snate@binkert.orgNSGigE::rxKick() 14796657Snate@binkert.org{ 14806657Snate@binkert.org DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n", 14816657Snate@binkert.org NsRxStateStrings[rxState], rxFifo.size()); 14826657Snate@binkert.org 14836657Snate@binkert.org if (rxKickTick > curTick) { 14846657Snate@binkert.org DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 14856657Snate@binkert.org rxKickTick); 14866657Snate@binkert.org return; 14876657Snate@binkert.org } 14886657Snate@binkert.org 14896657Snate@binkert.org next: 14906657Snate@binkert.org switch(rxDmaState) { 14916657Snate@binkert.org case dmaReadWaiting: 14926657Snate@binkert.org if (doRxDmaRead()) 14936657Snate@binkert.org goto exit; 14946657Snate@binkert.org break; 14956657Snate@binkert.org case dmaWriteWaiting: 14966657Snate@binkert.org if (doRxDmaWrite()) 14976657Snate@binkert.org goto exit; 14986657Snate@binkert.org break; 14996657Snate@binkert.org default: 15006657Snate@binkert.org break; 15016657Snate@binkert.org } 15026657Snate@binkert.org 15036657Snate@binkert.org // see state machine from spec for details 15046657Snate@binkert.org // the way this works is, if you finish work on one state and can 15056657Snate@binkert.org // go directly to another, you do that through jumping to the 15066657Snate@binkert.org // label "next". however, if you have intermediate work, like DMA 15076657Snate@binkert.org // so that you can't go to the next state yet, you go to exit and 15086657Snate@binkert.org // exit the loop. however, when the DMA is done it will trigger 15096657Snate@binkert.org // an event and come back to this loop. 15106657Snate@binkert.org switch (rxState) { 15116657Snate@binkert.org case rxIdle: 15126657Snate@binkert.org if (!rxEnable) { 15136657Snate@binkert.org DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n"); 15146657Snate@binkert.org goto exit; 15156657Snate@binkert.org } 15166657Snate@binkert.org 15176657Snate@binkert.org if (CRDD) { 15186657Snate@binkert.org rxState = rxDescRefr; 15196657Snate@binkert.org 15206657Snate@binkert.org rxDmaAddr = regs.rxdp & 0x3fffffff; 15216657Snate@binkert.org rxDmaData = &rxDescCache + offsetof(ns_desc, link); 15226657Snate@binkert.org rxDmaLen = sizeof(rxDescCache.link); 15236657Snate@binkert.org rxDmaFree = dmaDescFree; 15246657Snate@binkert.org 15256657Snate@binkert.org descDmaReads++; 15266657Snate@binkert.org descDmaRdBytes += rxDmaLen; 15276657Snate@binkert.org 15286657Snate@binkert.org if (doRxDmaRead()) 15296657Snate@binkert.org goto exit; 15306657Snate@binkert.org } else { 15316657Snate@binkert.org rxState = rxDescRead; 15326657Snate@binkert.org 15336657Snate@binkert.org rxDmaAddr = regs.rxdp & 0x3fffffff; 15346657Snate@binkert.org rxDmaData = &rxDescCache; 15356657Snate@binkert.org rxDmaLen = sizeof(ns_desc); 15366657Snate@binkert.org rxDmaFree = dmaDescFree; 15376657Snate@binkert.org 15386657Snate@binkert.org descDmaReads++; 15396657Snate@binkert.org descDmaRdBytes += rxDmaLen; 15406657Snate@binkert.org 15416657Snate@binkert.org if (doRxDmaRead()) 15426657Snate@binkert.org goto exit; 15436657Snate@binkert.org } 15446657Snate@binkert.org break; 15456657Snate@binkert.org 15467007Snate@binkert.org case rxDescRefr: 15476657Snate@binkert.org if (rxDmaState != dmaIdle) 15486657Snate@binkert.org goto exit; 15496657Snate@binkert.org 15506657Snate@binkert.org rxState = rxAdvance; 15516657Snate@binkert.org break; 15526657Snate@binkert.org 15536657Snate@binkert.org case rxDescRead: 15547007Snate@binkert.org if (rxDmaState != dmaIdle) 15556657Snate@binkert.org goto exit; 15566657Snate@binkert.org 15576657Snate@binkert.org DPRINTF(EthernetDesc, 15586657Snate@binkert.org "rxDescCache: addr=%08x read descriptor\n", 15596657Snate@binkert.org regs.rxdp & 0x3fffffff); 15606657Snate@binkert.org DPRINTF(EthernetDesc, 15616657Snate@binkert.org "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 15626657Snate@binkert.org rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 15636657Snate@binkert.org rxDescCache.extsts); 15646657Snate@binkert.org 15656657Snate@binkert.org if (rxDescCache.cmdsts & CMDSTS_OWN) { 15666657Snate@binkert.org devIntrPost(ISR_RXIDLE); 15676657Snate@binkert.org rxState = rxIdle; 15686657Snate@binkert.org goto exit; 15696657Snate@binkert.org } else { 157010917Sbrandon.potter@amd.com rxState = rxFifoBlock; 15716657Snate@binkert.org rxFragPtr = rxDescCache.bufptr; 15726657Snate@binkert.org rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK; 15736657Snate@binkert.org } 15746657Snate@binkert.org break; 15756657Snate@binkert.org 15766657Snate@binkert.org case rxFifoBlock: 15776657Snate@binkert.org if (!rxPacket) { 15786657Snate@binkert.org /** 15796657Snate@binkert.org * @todo in reality, we should be able to start processing 15806657Snate@binkert.org * the packet as it arrives, and not have to wait for the 15816657Snate@binkert.org * full packet ot be in the receive fifo. 15826657Snate@binkert.org */ 15836657Snate@binkert.org if (rxFifo.empty()) 15846657Snate@binkert.org goto exit; 15856657Snate@binkert.org 15866657Snate@binkert.org DPRINTF(EthernetSM, "****processing receive of new packet****\n"); 15876657Snate@binkert.org 15886657Snate@binkert.org // If we don't have a packet, grab a new one from the fifo. 15896657Snate@binkert.org rxPacket = rxFifo.front(); 15906657Snate@binkert.org rxPktBytes = rxPacket->length; 15916657Snate@binkert.org rxPacketBufPtr = rxPacket->data; 15926657Snate@binkert.org 1593#if TRACING_ON 1594 if (DTRACE(Ethernet)) { 1595 IpPtr ip(rxPacket); 1596 if (ip) { 1597 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1598 TcpPtr tcp(ip); 1599 if (tcp) { 1600 DPRINTF(Ethernet, 1601 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1602 tcp->sport(), tcp->dport(), tcp->seq(), 1603 tcp->ack()); 1604 } 1605 } 1606 } 1607#endif 1608 1609 // sanity check - i think the driver behaves like this 1610 assert(rxDescCnt >= rxPktBytes); 1611 rxFifo.pop(); 1612 } 1613 1614 1615 // dont' need the && rxDescCnt > 0 if driver sanity check 1616 // above holds 1617 if (rxPktBytes > 0) { 1618 rxState = rxFragWrite; 1619 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity 1620 // check holds 1621 rxXferLen = rxPktBytes; 1622 1623 rxDmaAddr = rxFragPtr & 0x3fffffff; 1624 rxDmaData = rxPacketBufPtr; 1625 rxDmaLen = rxXferLen; 1626 rxDmaFree = dmaDataFree; 1627 1628 if (doRxDmaWrite()) 1629 goto exit; 1630 1631 } else { 1632 rxState = rxDescWrite; 1633 1634 //if (rxPktBytes == 0) { /* packet is done */ 1635 assert(rxPktBytes == 0); 1636 DPRINTF(EthernetSM, "done with receiving packet\n"); 1637 1638 rxDescCache.cmdsts |= CMDSTS_OWN; 1639 rxDescCache.cmdsts &= ~CMDSTS_MORE; 1640 rxDescCache.cmdsts |= CMDSTS_OK; 1641 rxDescCache.cmdsts &= 0xffff0000; 1642 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE 1643 1644#if 0 1645 /* 1646 * all the driver uses these are for its own stats keeping 1647 * which we don't care about, aren't necessary for 1648 * functionality and doing this would just slow us down. 1649 * if they end up using this in a later version for 1650 * functional purposes, just undef 1651 */ 1652 if (rxFilterEnable) { 1653 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK; 1654 const EthAddr &dst = rxFifoFront()->dst(); 1655 if (dst->unicast()) 1656 rxDescCache.cmdsts |= CMDSTS_DEST_SELF; 1657 if (dst->multicast()) 1658 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI; 1659 if (dst->broadcast()) 1660 rxDescCache.cmdsts |= CMDSTS_DEST_MASK; 1661 } 1662#endif 1663 1664 IpPtr ip(rxPacket); 1665 if (extstsEnable && ip) { 1666 rxDescCache.extsts |= EXTSTS_IPPKT; 1667 rxIpChecksums++; 1668 if (cksum(ip) != 0) { 1669 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 1670 rxDescCache.extsts |= EXTSTS_IPERR; 1671 } 1672 TcpPtr tcp(ip); 1673 UdpPtr udp(ip); 1674 if (tcp) { 1675 rxDescCache.extsts |= EXTSTS_TCPPKT; 1676 rxTcpChecksums++; 1677 if (cksum(tcp) != 0) { 1678 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 1679 rxDescCache.extsts |= EXTSTS_TCPERR; 1680 1681 } 1682 } else if (udp) { 1683 rxDescCache.extsts |= EXTSTS_UDPPKT; 1684 rxUdpChecksums++; 1685 if (cksum(udp) != 0) { 1686 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 1687 rxDescCache.extsts |= EXTSTS_UDPERR; 1688 } 1689 } 1690 } 1691 rxPacket = 0; 1692 1693 /* 1694 * the driver seems to always receive into desc buffers 1695 * of size 1514, so you never have a pkt that is split 1696 * into multiple descriptors on the receive side, so 1697 * i don't implement that case, hence the assert above. 1698 */ 1699 1700 DPRINTF(EthernetDesc, 1701 "rxDescCache: addr=%08x writeback cmdsts extsts\n", 1702 regs.rxdp & 0x3fffffff); 1703 DPRINTF(EthernetDesc, 1704 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 1705 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts, 1706 rxDescCache.extsts); 1707 1708 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff; 1709 rxDmaData = &(rxDescCache.cmdsts); 1710 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts); 1711 rxDmaFree = dmaDescFree; 1712 1713 descDmaWrites++; 1714 descDmaWrBytes += rxDmaLen; 1715 1716 if (doRxDmaWrite()) 1717 goto exit; 1718 } 1719 break; 1720 1721 case rxFragWrite: 1722 if (rxDmaState != dmaIdle) 1723 goto exit; 1724 1725 rxPacketBufPtr += rxXferLen; 1726 rxFragPtr += rxXferLen; 1727 rxPktBytes -= rxXferLen; 1728 1729 rxState = rxFifoBlock; 1730 break; 1731 1732 case rxDescWrite: 1733 if (rxDmaState != dmaIdle) 1734 goto exit; 1735 1736 assert(rxDescCache.cmdsts & CMDSTS_OWN); 1737 1738 assert(rxPacket == 0); 1739 devIntrPost(ISR_RXOK); 1740 1741 if (rxDescCache.cmdsts & CMDSTS_INTR) 1742 devIntrPost(ISR_RXDESC); 1743 1744 if (!rxEnable) { 1745 DPRINTF(EthernetSM, "Halting the RX state machine\n"); 1746 rxState = rxIdle; 1747 goto exit; 1748 } else 1749 rxState = rxAdvance; 1750 break; 1751 1752 case rxAdvance: 1753 if (rxDescCache.link == 0) { 1754 devIntrPost(ISR_RXIDLE); 1755 rxState = rxIdle; 1756 CRDD = true; 1757 goto exit; 1758 } else { 1759 rxState = rxDescRead; 1760 regs.rxdp = rxDescCache.link; 1761 CRDD = false; 1762 1763 rxDmaAddr = regs.rxdp & 0x3fffffff; 1764 rxDmaData = &rxDescCache; 1765 rxDmaLen = sizeof(ns_desc); 1766 rxDmaFree = dmaDescFree; 1767 1768 if (doRxDmaRead()) 1769 goto exit; 1770 } 1771 break; 1772 1773 default: 1774 panic("Invalid rxState!"); 1775 } 1776 1777 DPRINTF(EthernetSM, "entering next rxState=%s\n", 1778 NsRxStateStrings[rxState]); 1779 1780 goto next; 1781 1782 exit: 1783 /** 1784 * @todo do we want to schedule a future kick? 1785 */ 1786 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 1787 NsRxStateStrings[rxState]); 1788} 1789 1790void 1791NSGigE::transmit() 1792{ 1793 if (txFifo.empty()) { 1794 DPRINTF(Ethernet, "nothing to transmit\n"); 1795 return; 1796 } 1797 1798 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n", 1799 txFifo.size()); 1800 if (interface->sendPacket(txFifo.front())) { 1801#if TRACING_ON 1802 if (DTRACE(Ethernet)) { 1803 IpPtr ip(txFifo.front()); 1804 if (ip) { 1805 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1806 TcpPtr tcp(ip); 1807 if (tcp) { 1808 DPRINTF(Ethernet, 1809 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1810 tcp->sport(), tcp->dport(), tcp->seq(), tcp->ack()); 1811 } 1812 } 1813 } 1814#endif 1815 1816 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length); 1817 txBytes += txFifo.front()->length; 1818 txPackets++; 1819 1820 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n", 1821 txFifo.avail()); 1822 txFifo.pop(); 1823 1824 /* 1825 * normally do a writeback of the descriptor here, and ONLY 1826 * after that is done, send this interrupt. but since our 1827 * stuff never actually fails, just do this interrupt here, 1828 * otherwise the code has to stray from this nice format. 1829 * besides, it's functionally the same. 1830 */ 1831 devIntrPost(ISR_TXOK); 1832 } 1833 1834 if (!txFifo.empty() && !txEvent.scheduled()) { 1835 DPRINTF(Ethernet, "reschedule transmit\n"); 1836 txEvent.schedule(curTick + 1000); 1837 } 1838} 1839 1840void 1841NSGigE::txDmaReadCopy() 1842{ 1843 assert(txDmaState == dmaReading); 1844 1845 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen); 1846 txDmaState = dmaIdle; 1847 1848 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 1849 txDmaAddr, txDmaLen); 1850 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1851} 1852 1853bool 1854NSGigE::doTxDmaRead() 1855{ 1856 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting); 1857 txDmaState = dmaReading; 1858 1859 if (dmaInterface && !txDmaFree) { 1860 if (dmaInterface->busy()) 1861 txDmaState = dmaReadWaiting; 1862 else 1863 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick, 1864 &txDmaReadEvent, true); 1865 return true; 1866 } 1867 1868 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) { 1869 txDmaReadCopy(); 1870 return false; 1871 } 1872 1873 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1874 Tick start = curTick + dmaReadDelay + factor; 1875 txDmaReadEvent.schedule(start); 1876 return true; 1877} 1878 1879void 1880NSGigE::txDmaReadDone() 1881{ 1882 assert(txDmaState == dmaReading); 1883 txDmaReadCopy(); 1884 1885 // If the receive state machine has a pending DMA, let it go first 1886 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1887 rxKick(); 1888 1889 txKick(); 1890} 1891 1892void 1893NSGigE::txDmaWriteCopy() 1894{ 1895 assert(txDmaState == dmaWriting); 1896 1897 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen); 1898 txDmaState = dmaIdle; 1899 1900 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n", 1901 txDmaAddr, txDmaLen); 1902 DDUMP(EthernetDMA, txDmaData, txDmaLen); 1903} 1904 1905bool 1906NSGigE::doTxDmaWrite() 1907{ 1908 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting); 1909 txDmaState = dmaWriting; 1910 1911 if (dmaInterface && !txDmaFree) { 1912 if (dmaInterface->busy()) 1913 txDmaState = dmaWriteWaiting; 1914 else 1915 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick, 1916 &txDmaWriteEvent, true); 1917 return true; 1918 } 1919 1920 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) { 1921 txDmaWriteCopy(); 1922 return false; 1923 } 1924 1925 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 1926 Tick start = curTick + dmaWriteDelay + factor; 1927 txDmaWriteEvent.schedule(start); 1928 return true; 1929} 1930 1931void 1932NSGigE::txDmaWriteDone() 1933{ 1934 assert(txDmaState == dmaWriting); 1935 txDmaWriteCopy(); 1936 1937 // If the receive state machine has a pending DMA, let it go first 1938 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting) 1939 rxKick(); 1940 1941 txKick(); 1942} 1943 1944void 1945NSGigE::txKick() 1946{ 1947 DPRINTF(EthernetSM, "transmit kick txState=%s\n", 1948 NsTxStateStrings[txState]); 1949 1950 if (txKickTick > curTick) { 1951 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1952 txKickTick); 1953 1954 return; 1955 } 1956 1957 next: 1958 switch(txDmaState) { 1959 case dmaReadWaiting: 1960 if (doTxDmaRead()) 1961 goto exit; 1962 break; 1963 case dmaWriteWaiting: 1964 if (doTxDmaWrite()) 1965 goto exit; 1966 break; 1967 default: 1968 break; 1969 } 1970 1971 switch (txState) { 1972 case txIdle: 1973 if (!txEnable) { 1974 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n"); 1975 goto exit; 1976 } 1977 1978 if (CTDD) { 1979 txState = txDescRefr; 1980 1981 txDmaAddr = regs.txdp & 0x3fffffff; 1982 txDmaData = &txDescCache + offsetof(ns_desc, link); 1983 txDmaLen = sizeof(txDescCache.link); 1984 txDmaFree = dmaDescFree; 1985 1986 descDmaReads++; 1987 descDmaRdBytes += txDmaLen; 1988 1989 if (doTxDmaRead()) 1990 goto exit; 1991 1992 } else { 1993 txState = txDescRead; 1994 1995 txDmaAddr = regs.txdp & 0x3fffffff; 1996 txDmaData = &txDescCache; 1997 txDmaLen = sizeof(ns_desc); 1998 txDmaFree = dmaDescFree; 1999 2000 descDmaReads++; 2001 descDmaRdBytes += txDmaLen; 2002 2003 if (doTxDmaRead()) 2004 goto exit; 2005 } 2006 break; 2007 2008 case txDescRefr: 2009 if (txDmaState != dmaIdle) 2010 goto exit; 2011 2012 txState = txAdvance; 2013 break; 2014 2015 case txDescRead: 2016 if (txDmaState != dmaIdle) 2017 goto exit; 2018 2019 DPRINTF(EthernetDesc, 2020 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n", 2021 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts, 2022 txDescCache.extsts); 2023 2024 if (txDescCache.cmdsts & CMDSTS_OWN) { 2025 txState = txFifoBlock; 2026 txFragPtr = txDescCache.bufptr; 2027 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK; 2028 } else { 2029 devIntrPost(ISR_TXIDLE); 2030 txState = txIdle; 2031 goto exit; 2032 } 2033 break; 2034 2035 case txFifoBlock: 2036 if (!txPacket) { 2037 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n"); 2038 txPacket = new PacketData(16384); 2039 txPacketBufPtr = txPacket->data; 2040 } 2041 2042 if (txDescCnt == 0) { 2043 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n"); 2044 if (txDescCache.cmdsts & CMDSTS_MORE) { 2045 DPRINTF(EthernetSM, "there are more descriptors to come\n"); 2046 txState = txDescWrite; 2047 2048 txDescCache.cmdsts &= ~CMDSTS_OWN; 2049 2050 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2051 txDmaAddr &= 0x3fffffff; 2052 txDmaData = &(txDescCache.cmdsts); 2053 txDmaLen = sizeof(txDescCache.cmdsts); 2054 txDmaFree = dmaDescFree; 2055 2056 if (doTxDmaWrite()) 2057 goto exit; 2058 2059 } else { /* this packet is totally done */ 2060 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n"); 2061 /* deal with the the packet that just finished */ 2062 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) { 2063 IpPtr ip(txPacket); 2064 if (txDescCache.extsts & EXTSTS_UDPPKT) { 2065 UdpPtr udp(ip); 2066 udp->sum(0); 2067 udp->sum(cksum(udp)); 2068 txUdpChecksums++; 2069 } else if (txDescCache.extsts & EXTSTS_TCPPKT) { 2070 TcpPtr tcp(ip); 2071 tcp->sum(0); 2072 tcp->sum(cksum(tcp)); 2073 txTcpChecksums++; 2074 } 2075 if (txDescCache.extsts & EXTSTS_IPPKT) { 2076 ip->sum(0); 2077 ip->sum(cksum(ip)); 2078 txIpChecksums++; 2079 } 2080 } 2081 2082 txPacket->length = txPacketBufPtr - txPacket->data; 2083 // this is just because the receive can't handle a 2084 // packet bigger want to make sure 2085 assert(txPacket->length <= 1514); 2086#ifndef NDEBUG 2087 bool success = 2088#endif 2089 txFifo.push(txPacket); 2090 assert(success); 2091 2092 /* 2093 * this following section is not tqo spec, but 2094 * functionally shouldn't be any different. normally, 2095 * the chip will wait til the transmit has occurred 2096 * before writing back the descriptor because it has 2097 * to wait to see that it was successfully transmitted 2098 * to decide whether to set CMDSTS_OK or not. 2099 * however, in the simulator since it is always 2100 * successfully transmitted, and writing it exactly to 2101 * spec would complicate the code, we just do it here 2102 */ 2103 2104 txDescCache.cmdsts &= ~CMDSTS_OWN; 2105 txDescCache.cmdsts |= CMDSTS_OK; 2106 2107 DPRINTF(EthernetDesc, 2108 "txDesc writeback: cmdsts=%08x extsts=%08x\n", 2109 txDescCache.cmdsts, txDescCache.extsts); 2110 2111 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts); 2112 txDmaAddr &= 0x3fffffff; 2113 txDmaData = &(txDescCache.cmdsts); 2114 txDmaLen = sizeof(txDescCache.cmdsts) + 2115 sizeof(txDescCache.extsts); 2116 txDmaFree = dmaDescFree; 2117 2118 descDmaWrites++; 2119 descDmaWrBytes += txDmaLen; 2120 2121 transmit(); 2122 txPacket = 0; 2123 2124 if (!txEnable) { 2125 DPRINTF(EthernetSM, "halting TX state machine\n"); 2126 txState = txIdle; 2127 goto exit; 2128 } else 2129 txState = txAdvance; 2130 2131 if (doTxDmaWrite()) 2132 goto exit; 2133 } 2134 } else { 2135 DPRINTF(EthernetSM, "this descriptor isn't done yet\n"); 2136 if (!txFifo.full()) { 2137 txState = txFragRead; 2138 2139 /* 2140 * The number of bytes transferred is either whatever 2141 * is left in the descriptor (txDescCnt), or if there 2142 * is not enough room in the fifo, just whatever room 2143 * is left in the fifo 2144 */ 2145 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail()); 2146 2147 txDmaAddr = txFragPtr & 0x3fffffff; 2148 txDmaData = txPacketBufPtr; 2149 txDmaLen = txXferLen; 2150 txDmaFree = dmaDataFree; 2151 2152 if (doTxDmaRead()) 2153 goto exit; 2154 } else { 2155 txState = txFifoBlock; 2156 transmit(); 2157 2158 goto exit; 2159 } 2160 2161 } 2162 break; 2163 2164 case txFragRead: 2165 if (txDmaState != dmaIdle) 2166 goto exit; 2167 2168 txPacketBufPtr += txXferLen; 2169 txFragPtr += txXferLen; 2170 txDescCnt -= txXferLen; 2171 txFifo.reserve(txXferLen); 2172 2173 txState = txFifoBlock; 2174 break; 2175 2176 case txDescWrite: 2177 if (txDmaState != dmaIdle) 2178 goto exit; 2179 2180 if (txDescCache.cmdsts & CMDSTS_INTR) 2181 devIntrPost(ISR_TXDESC); 2182 2183 txState = txAdvance; 2184 break; 2185 2186 case txAdvance: 2187 if (txDescCache.link == 0) { 2188 devIntrPost(ISR_TXIDLE); 2189 txState = txIdle; 2190 goto exit; 2191 } else { 2192 txState = txDescRead; 2193 regs.txdp = txDescCache.link; 2194 CTDD = false; 2195 2196 txDmaAddr = txDescCache.link & 0x3fffffff; 2197 txDmaData = &txDescCache; 2198 txDmaLen = sizeof(ns_desc); 2199 txDmaFree = dmaDescFree; 2200 2201 if (doTxDmaRead()) 2202 goto exit; 2203 } 2204 break; 2205 2206 default: 2207 panic("invalid state"); 2208 } 2209 2210 DPRINTF(EthernetSM, "entering next txState=%s\n", 2211 NsTxStateStrings[txState]); 2212 2213 goto next; 2214 2215 exit: 2216 /** 2217 * @todo do we want to schedule a future kick? 2218 */ 2219 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 2220 NsTxStateStrings[txState]); 2221} 2222 2223void 2224NSGigE::transferDone() 2225{ 2226 if (txFifo.empty()) { 2227 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 2228 return; 2229 } 2230 2231 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 2232 2233 if (txEvent.scheduled()) 2234 txEvent.reschedule(curTick + 1); 2235 else 2236 txEvent.schedule(curTick + 1); 2237} 2238 2239bool 2240NSGigE::rxFilter(const PacketPtr &packet) 2241{ 2242 EthPtr eth = packet; 2243 bool drop = true; 2244 string type; 2245 2246 const EthAddr &dst = eth->dst(); 2247 if (dst.unicast()) { 2248 // If we're accepting all unicast addresses 2249 if (acceptUnicast) 2250 drop = false; 2251 2252 // If we make a perfect match 2253 if (acceptPerfect && dst == rom.perfectMatch) 2254 drop = false; 2255 2256 if (acceptArp && eth->type() == ETH_TYPE_ARP) 2257 drop = false; 2258 2259 } else if (dst.broadcast()) { 2260 // if we're accepting broadcasts 2261 if (acceptBroadcast) 2262 drop = false; 2263 2264 } else if (dst.multicast()) { 2265 // if we're accepting all multicasts 2266 if (acceptMulticast) 2267 drop = false; 2268 2269 } 2270 2271 if (drop) { 2272 DPRINTF(Ethernet, "rxFilter drop\n"); 2273 DDUMP(EthernetData, packet->data, packet->length); 2274 } 2275 2276 return drop; 2277} 2278 2279bool 2280NSGigE::recvPacket(PacketPtr packet) 2281{ 2282 rxBytes += packet->length; 2283 rxPackets++; 2284 2285 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n", 2286 rxFifo.avail()); 2287 2288 if (!rxEnable) { 2289 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 2290 debug_break(); 2291 interface->recvDone(); 2292 return true; 2293 } 2294 2295 if (rxFilterEnable && rxFilter(packet)) { 2296 DPRINTF(Ethernet, "packet filtered...dropped\n"); 2297 interface->recvDone(); 2298 return true; 2299 } 2300 2301 if (rxFifo.avail() < packet->length) { 2302#if TRACING_ON 2303 IpPtr ip(packet); 2304 TcpPtr tcp(ip); 2305 if (ip) { 2306 DPRINTF(Ethernet, 2307 "packet won't fit in receive buffer...pkt ID %d dropped\n", 2308 ip->id()); 2309 if (tcp) { 2310 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq()); 2311 } 2312 } 2313#endif 2314 droppedPackets++; 2315 devIntrPost(ISR_RXORN); 2316 return false; 2317 } 2318 2319 rxFifo.push(packet); 2320 interface->recvDone(); 2321 2322 rxKick(); 2323 return true; 2324} 2325 2326//===================================================================== 2327// 2328// 2329void 2330NSGigE::serialize(ostream &os) 2331{ 2332 // Serialize the PciDev base class 2333 PciDev::serialize(os); 2334 2335 /* 2336 * Finalize any DMA events now. 2337 */ 2338 if (rxDmaReadEvent.scheduled()) 2339 rxDmaReadCopy(); 2340 if (rxDmaWriteEvent.scheduled()) 2341 rxDmaWriteCopy(); 2342 if (txDmaReadEvent.scheduled()) 2343 txDmaReadCopy(); 2344 if (txDmaWriteEvent.scheduled()) 2345 txDmaWriteCopy(); 2346 2347 /* 2348 * Serialize the device registers 2349 */ 2350 SERIALIZE_SCALAR(regs.command); 2351 SERIALIZE_SCALAR(regs.config); 2352 SERIALIZE_SCALAR(regs.mear); 2353 SERIALIZE_SCALAR(regs.ptscr); 2354 SERIALIZE_SCALAR(regs.isr); 2355 SERIALIZE_SCALAR(regs.imr); 2356 SERIALIZE_SCALAR(regs.ier); 2357 SERIALIZE_SCALAR(regs.ihr); 2358 SERIALIZE_SCALAR(regs.txdp); 2359 SERIALIZE_SCALAR(regs.txdp_hi); 2360 SERIALIZE_SCALAR(regs.txcfg); 2361 SERIALIZE_SCALAR(regs.gpior); 2362 SERIALIZE_SCALAR(regs.rxdp); 2363 SERIALIZE_SCALAR(regs.rxdp_hi); 2364 SERIALIZE_SCALAR(regs.rxcfg); 2365 SERIALIZE_SCALAR(regs.pqcr); 2366 SERIALIZE_SCALAR(regs.wcsr); 2367 SERIALIZE_SCALAR(regs.pcr); 2368 SERIALIZE_SCALAR(regs.rfcr); 2369 SERIALIZE_SCALAR(regs.rfdr); 2370 SERIALIZE_SCALAR(regs.srr); 2371 SERIALIZE_SCALAR(regs.mibc); 2372 SERIALIZE_SCALAR(regs.vrcr); 2373 SERIALIZE_SCALAR(regs.vtcr); 2374 SERIALIZE_SCALAR(regs.vdr); 2375 SERIALIZE_SCALAR(regs.ccsr); 2376 SERIALIZE_SCALAR(regs.tbicr); 2377 SERIALIZE_SCALAR(regs.tbisr); 2378 SERIALIZE_SCALAR(regs.tanar); 2379 SERIALIZE_SCALAR(regs.tanlpar); 2380 SERIALIZE_SCALAR(regs.taner); 2381 SERIALIZE_SCALAR(regs.tesr); 2382 2383 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2384 2385 SERIALIZE_SCALAR(ioEnable); 2386 2387 /* 2388 * Serialize the data Fifos 2389 */ 2390 rxFifo.serialize("rxFifo", os); 2391 txFifo.serialize("txFifo", os); 2392 2393 /* 2394 * Serialize the various helper variables 2395 */ 2396 bool txPacketExists = txPacket; 2397 SERIALIZE_SCALAR(txPacketExists); 2398 if (txPacketExists) { 2399 txPacket->length = txPacketBufPtr - txPacket->data; 2400 txPacket->serialize("txPacket", os); 2401 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data); 2402 SERIALIZE_SCALAR(txPktBufPtr); 2403 } 2404 2405 bool rxPacketExists = rxPacket; 2406 SERIALIZE_SCALAR(rxPacketExists); 2407 if (rxPacketExists) { 2408 rxPacket->serialize("rxPacket", os); 2409 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data); 2410 SERIALIZE_SCALAR(rxPktBufPtr); 2411 } 2412 2413 SERIALIZE_SCALAR(txXferLen); 2414 SERIALIZE_SCALAR(rxXferLen); 2415 2416 /* 2417 * Serialize DescCaches 2418 */ 2419 SERIALIZE_SCALAR(txDescCache.link); 2420 SERIALIZE_SCALAR(txDescCache.bufptr); 2421 SERIALIZE_SCALAR(txDescCache.cmdsts); 2422 SERIALIZE_SCALAR(txDescCache.extsts); 2423 SERIALIZE_SCALAR(rxDescCache.link); 2424 SERIALIZE_SCALAR(rxDescCache.bufptr); 2425 SERIALIZE_SCALAR(rxDescCache.cmdsts); 2426 SERIALIZE_SCALAR(rxDescCache.extsts); 2427 2428 /* 2429 * Serialize tx state machine 2430 */ 2431 int txState = this->txState; 2432 SERIALIZE_SCALAR(txState); 2433 SERIALIZE_SCALAR(txEnable); 2434 SERIALIZE_SCALAR(CTDD); 2435 SERIALIZE_SCALAR(txFragPtr); 2436 SERIALIZE_SCALAR(txDescCnt); 2437 int txDmaState = this->txDmaState; 2438 SERIALIZE_SCALAR(txDmaState); 2439 2440 /* 2441 * Serialize rx state machine 2442 */ 2443 int rxState = this->rxState; 2444 SERIALIZE_SCALAR(rxState); 2445 SERIALIZE_SCALAR(rxEnable); 2446 SERIALIZE_SCALAR(CRDD); 2447 SERIALIZE_SCALAR(rxPktBytes); 2448 SERIALIZE_SCALAR(rxFragPtr); 2449 SERIALIZE_SCALAR(rxDescCnt); 2450 int rxDmaState = this->rxDmaState; 2451 SERIALIZE_SCALAR(rxDmaState); 2452 2453 SERIALIZE_SCALAR(extstsEnable); 2454 2455 /* 2456 * If there's a pending transmit, store the time so we can 2457 * reschedule it later 2458 */ 2459 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 2460 SERIALIZE_SCALAR(transmitTick); 2461 2462 /* 2463 * receive address filter settings 2464 */ 2465 SERIALIZE_SCALAR(rxFilterEnable); 2466 SERIALIZE_SCALAR(acceptBroadcast); 2467 SERIALIZE_SCALAR(acceptMulticast); 2468 SERIALIZE_SCALAR(acceptUnicast); 2469 SERIALIZE_SCALAR(acceptPerfect); 2470 SERIALIZE_SCALAR(acceptArp); 2471 2472 /* 2473 * Keep track of pending interrupt status. 2474 */ 2475 SERIALIZE_SCALAR(intrTick); 2476 SERIALIZE_SCALAR(cpuPendingIntr); 2477 Tick intrEventTick = 0; 2478 if (intrEvent) 2479 intrEventTick = intrEvent->when(); 2480 SERIALIZE_SCALAR(intrEventTick); 2481 2482} 2483 2484void 2485NSGigE::unserialize(Checkpoint *cp, const std::string §ion) 2486{ 2487 // Unserialize the PciDev base class 2488 PciDev::unserialize(cp, section); 2489 2490 UNSERIALIZE_SCALAR(regs.command); 2491 UNSERIALIZE_SCALAR(regs.config); 2492 UNSERIALIZE_SCALAR(regs.mear); 2493 UNSERIALIZE_SCALAR(regs.ptscr); 2494 UNSERIALIZE_SCALAR(regs.isr); 2495 UNSERIALIZE_SCALAR(regs.imr); 2496 UNSERIALIZE_SCALAR(regs.ier); 2497 UNSERIALIZE_SCALAR(regs.ihr); 2498 UNSERIALIZE_SCALAR(regs.txdp); 2499 UNSERIALIZE_SCALAR(regs.txdp_hi); 2500 UNSERIALIZE_SCALAR(regs.txcfg); 2501 UNSERIALIZE_SCALAR(regs.gpior); 2502 UNSERIALIZE_SCALAR(regs.rxdp); 2503 UNSERIALIZE_SCALAR(regs.rxdp_hi); 2504 UNSERIALIZE_SCALAR(regs.rxcfg); 2505 UNSERIALIZE_SCALAR(regs.pqcr); 2506 UNSERIALIZE_SCALAR(regs.wcsr); 2507 UNSERIALIZE_SCALAR(regs.pcr); 2508 UNSERIALIZE_SCALAR(regs.rfcr); 2509 UNSERIALIZE_SCALAR(regs.rfdr); 2510 UNSERIALIZE_SCALAR(regs.srr); 2511 UNSERIALIZE_SCALAR(regs.mibc); 2512 UNSERIALIZE_SCALAR(regs.vrcr); 2513 UNSERIALIZE_SCALAR(regs.vtcr); 2514 UNSERIALIZE_SCALAR(regs.vdr); 2515 UNSERIALIZE_SCALAR(regs.ccsr); 2516 UNSERIALIZE_SCALAR(regs.tbicr); 2517 UNSERIALIZE_SCALAR(regs.tbisr); 2518 UNSERIALIZE_SCALAR(regs.tanar); 2519 UNSERIALIZE_SCALAR(regs.tanlpar); 2520 UNSERIALIZE_SCALAR(regs.taner); 2521 UNSERIALIZE_SCALAR(regs.tesr); 2522 2523 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN); 2524 2525 UNSERIALIZE_SCALAR(ioEnable); 2526 2527 /* 2528 * unserialize the data fifos 2529 */ 2530 rxFifo.unserialize("rxFifo", cp, section); 2531 txFifo.unserialize("txFifo", cp, section); 2532 2533 /* 2534 * unserialize the various helper variables 2535 */ 2536 bool txPacketExists; 2537 UNSERIALIZE_SCALAR(txPacketExists); 2538 if (txPacketExists) { 2539 txPacket = new PacketData(16384); 2540 txPacket->unserialize("txPacket", cp, section); 2541 uint32_t txPktBufPtr; 2542 UNSERIALIZE_SCALAR(txPktBufPtr); 2543 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr; 2544 } else 2545 txPacket = 0; 2546 2547 bool rxPacketExists; 2548 UNSERIALIZE_SCALAR(rxPacketExists); 2549 rxPacket = 0; 2550 if (rxPacketExists) { 2551 rxPacket = new PacketData(16384); 2552 rxPacket->unserialize("rxPacket", cp, section); 2553 uint32_t rxPktBufPtr; 2554 UNSERIALIZE_SCALAR(rxPktBufPtr); 2555 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr; 2556 } else 2557 rxPacket = 0; 2558 2559 UNSERIALIZE_SCALAR(txXferLen); 2560 UNSERIALIZE_SCALAR(rxXferLen); 2561 2562 /* 2563 * Unserialize DescCaches 2564 */ 2565 UNSERIALIZE_SCALAR(txDescCache.link); 2566 UNSERIALIZE_SCALAR(txDescCache.bufptr); 2567 UNSERIALIZE_SCALAR(txDescCache.cmdsts); 2568 UNSERIALIZE_SCALAR(txDescCache.extsts); 2569 UNSERIALIZE_SCALAR(rxDescCache.link); 2570 UNSERIALIZE_SCALAR(rxDescCache.bufptr); 2571 UNSERIALIZE_SCALAR(rxDescCache.cmdsts); 2572 UNSERIALIZE_SCALAR(rxDescCache.extsts); 2573 2574 /* 2575 * unserialize tx state machine 2576 */ 2577 int txState; 2578 UNSERIALIZE_SCALAR(txState); 2579 this->txState = (TxState) txState; 2580 UNSERIALIZE_SCALAR(txEnable); 2581 UNSERIALIZE_SCALAR(CTDD); 2582 UNSERIALIZE_SCALAR(txFragPtr); 2583 UNSERIALIZE_SCALAR(txDescCnt); 2584 int txDmaState; 2585 UNSERIALIZE_SCALAR(txDmaState); 2586 this->txDmaState = (DmaState) txDmaState; 2587 2588 /* 2589 * unserialize rx state machine 2590 */ 2591 int rxState; 2592 UNSERIALIZE_SCALAR(rxState); 2593 this->rxState = (RxState) rxState; 2594 UNSERIALIZE_SCALAR(rxEnable); 2595 UNSERIALIZE_SCALAR(CRDD); 2596 UNSERIALIZE_SCALAR(rxPktBytes); 2597 UNSERIALIZE_SCALAR(rxFragPtr); 2598 UNSERIALIZE_SCALAR(rxDescCnt); 2599 int rxDmaState; 2600 UNSERIALIZE_SCALAR(rxDmaState); 2601 this->rxDmaState = (DmaState) rxDmaState; 2602 2603 UNSERIALIZE_SCALAR(extstsEnable); 2604 2605 /* 2606 * If there's a pending transmit, reschedule it now 2607 */ 2608 Tick transmitTick; 2609 UNSERIALIZE_SCALAR(transmitTick); 2610 if (transmitTick) 2611 txEvent.schedule(curTick + transmitTick); 2612 2613 /* 2614 * unserialize receive address filter settings 2615 */ 2616 UNSERIALIZE_SCALAR(rxFilterEnable); 2617 UNSERIALIZE_SCALAR(acceptBroadcast); 2618 UNSERIALIZE_SCALAR(acceptMulticast); 2619 UNSERIALIZE_SCALAR(acceptUnicast); 2620 UNSERIALIZE_SCALAR(acceptPerfect); 2621 UNSERIALIZE_SCALAR(acceptArp); 2622 2623 /* 2624 * Keep track of pending interrupt status. 2625 */ 2626 UNSERIALIZE_SCALAR(intrTick); 2627 UNSERIALIZE_SCALAR(cpuPendingIntr); 2628 Tick intrEventTick; 2629 UNSERIALIZE_SCALAR(intrEventTick); 2630 if (intrEventTick) { 2631 intrEvent = new IntrEvent(this, true); 2632 intrEvent->schedule(intrEventTick); 2633 } 2634 2635 /* 2636 * re-add addrRanges to bus bridges 2637 */ 2638 if (pioInterface) { 2639 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 2640 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 2641 } 2642} 2643 2644Tick 2645NSGigE::cacheAccess(MemReqPtr &req) 2646{ 2647 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n", 2648 req->paddr, req->paddr - addr); 2649 return curTick + pioLatency; 2650} 2651 2652BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2653 2654 SimObjectParam<EtherInt *> peer; 2655 SimObjectParam<NSGigE *> device; 2656 2657END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt) 2658 2659BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2660 2661 INIT_PARAM_DFLT(peer, "peer interface", NULL), 2662 INIT_PARAM(device, "Ethernet device of this interface") 2663 2664END_INIT_SIM_OBJECT_PARAMS(NSGigEInt) 2665 2666CREATE_SIM_OBJECT(NSGigEInt) 2667{ 2668 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device); 2669 2670 EtherInt *p = (EtherInt *)peer; 2671 if (p) { 2672 dev_int->setPeer(p); 2673 p->setPeer(dev_int); 2674 } 2675 2676 return dev_int; 2677} 2678 2679REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt) 2680 2681 2682BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2683 2684 Param<Addr> addr; 2685 Param<Tick> tx_delay; 2686 Param<Tick> rx_delay; 2687 Param<Tick> intr_delay; 2688 SimObjectParam<MemoryController *> mmu; 2689 SimObjectParam<PhysicalMemory *> physmem; 2690 Param<bool> rx_filter; 2691 Param<string> hardware_address; 2692 SimObjectParam<Bus*> io_bus; 2693 SimObjectParam<Bus*> payload_bus; 2694 SimObjectParam<HierParams *> hier; 2695 Param<Tick> pio_latency; 2696 Param<bool> dma_desc_free; 2697 Param<bool> dma_data_free; 2698 Param<Tick> dma_read_delay; 2699 Param<Tick> dma_write_delay; 2700 Param<Tick> dma_read_factor; 2701 Param<Tick> dma_write_factor; 2702 SimObjectParam<PciConfigAll *> configspace; 2703 SimObjectParam<PciConfigData *> configdata; 2704 SimObjectParam<Platform *> platform; 2705 Param<uint32_t> pci_bus; 2706 Param<uint32_t> pci_dev; 2707 Param<uint32_t> pci_func; 2708 Param<uint32_t> tx_fifo_size; 2709 Param<uint32_t> rx_fifo_size; 2710 2711END_DECLARE_SIM_OBJECT_PARAMS(NSGigE) 2712 2713BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE) 2714 2715 INIT_PARAM(addr, "Device Address"), 2716 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000), 2717 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000), 2718 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0), 2719 INIT_PARAM(mmu, "Memory Controller"), 2720 INIT_PARAM(physmem, "Physical Memory"), 2721 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true), 2722 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address", 2723 "00:99:00:00:00:01"), 2724 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL), 2725 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL), 2726 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams), 2727 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1), 2728 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false), 2729 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false), 2730 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0), 2731 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0), 2732 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0), 2733 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0), 2734 INIT_PARAM(configspace, "PCI Configspace"), 2735 INIT_PARAM(configdata, "PCI Config data"), 2736 INIT_PARAM(platform, "Platform"), 2737 INIT_PARAM(pci_bus, "PCI bus"), 2738 INIT_PARAM(pci_dev, "PCI device number"), 2739 INIT_PARAM(pci_func, "PCI function code"), 2740 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072), 2741 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072) 2742 2743END_INIT_SIM_OBJECT_PARAMS(NSGigE) 2744 2745 2746CREATE_SIM_OBJECT(NSGigE) 2747{ 2748 NSGigE::Params *params = new NSGigE::Params; 2749 2750 params->name = getInstanceName(); 2751 params->mmu = mmu; 2752 params->configSpace = configspace; 2753 params->configData = configdata; 2754 params->plat = platform; 2755 params->busNum = pci_bus; 2756 params->deviceNum = pci_dev; 2757 params->functionNum = pci_func; 2758 2759 params->intr_delay = intr_delay; 2760 params->pmem = physmem; 2761 params->tx_delay = tx_delay; 2762 params->rx_delay = rx_delay; 2763 params->hier = hier; 2764 params->header_bus = io_bus; 2765 params->payload_bus = payload_bus; 2766 params->pio_latency = pio_latency; 2767 params->dma_desc_free = dma_desc_free; 2768 params->dma_data_free = dma_data_free; 2769 params->dma_read_delay = dma_read_delay; 2770 params->dma_write_delay = dma_write_delay; 2771 params->dma_read_factor = dma_read_factor; 2772 params->dma_write_factor = dma_write_factor; 2773 params->rx_filter = rx_filter; 2774 params->eaddr = hardware_address; 2775 params->tx_fifo_size = tx_fifo_size; 2776 params->rx_fifo_size = rx_fifo_size; 2777 return new NSGigE(params); 2778} 2779 2780REGISTER_SIM_OBJECT("NSGigE", NSGigE) 2781