i8254xGBe.cc revision 10469
15390SN/A/*
25443SN/A * Copyright (c) 2006 The Regents of The University of Michigan
35390SN/A * All rights reserved.
45390SN/A *
55390SN/A * Redistribution and use in source and binary forms, with or without
65390SN/A * modification, are permitted provided that the following conditions are
75390SN/A * met: redistributions of source code must retain the above copyright
85390SN/A * notice, this list of conditions and the following disclaimer;
95390SN/A * redistributions in binary form must reproduce the above copyright
105390SN/A * notice, this list of conditions and the following disclaimer in the
115390SN/A * documentation and/or other materials provided with the distribution;
125390SN/A * neither the name of the copyright holders nor the names of its
135390SN/A * contributors may be used to endorse or promote products derived from
145390SN/A * this software without specific prior written permission.
155390SN/A *
165390SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
175390SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
185390SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
195390SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
205390SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
215390SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
225390SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
235390SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
245390SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
255390SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
265390SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
275390SN/A *
285390SN/A * Authors: Ali Saidi
295390SN/A */
305390SN/A
315636SN/A/* @file
325636SN/A * Device model for Intel's 8254x line of gigabit ethernet controllers.
335446SN/A * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
345637Sgblack@eecs.umich.edu * fewest workarounds in the driver. It will probably work with most of the
355390SN/A * other MACs with slight modifications.
365390SN/A */
375390SN/A
385636SN/A
395636SN/A/*
405636SN/A * @todo really there are multiple dma engines.. we should implement them.
415390SN/A */
425446SN/A
435638Sgblack@eecs.umich.edu#include <algorithm>
445446SN/A#include <memory>
455446SN/A
465390SN/A#include "base/inet.hh"
475390SN/A#include "base/trace.hh"
485390SN/A#include "debug/Drain.hh"
495390SN/A#include "debug/EthernetAll.hh"
505390SN/A#include "dev/i8254xGBe.hh"
515390SN/A#include "mem/packet.hh"
525390SN/A#include "mem/packet_access.hh"
53#include "params/IGbE.hh"
54#include "sim/stats.hh"
55#include "sim/system.hh"
56
57using namespace iGbReg;
58using namespace Net;
59
60IGbE::IGbE(const Params *p)
61    : EtherDevice(p), etherInt(NULL),  drainManager(NULL),
62      rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
63      txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
64      fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
65      fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
66      rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
67      rdtrEvent(this), radvEvent(this),
68      tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
69      rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
70      txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
71      lastInterrupt(0)
72{
73    etherInt = new IGbEInt(name() + ".int", this);
74
75    // Initialized internal registers per Intel documentation
76    // All registers intialized to 0 by per register constructor
77    regs.ctrl.fd(1);
78    regs.ctrl.lrst(1);
79    regs.ctrl.speed(2);
80    regs.ctrl.frcspd(1);
81    regs.sts.speed(3); // Say we're 1000Mbps
82    regs.sts.fd(1); // full duplex
83    regs.sts.lu(1); // link up
84    regs.eecd.fwe(1);
85    regs.eecd.ee_type(1);
86    regs.imr = 0;
87    regs.iam = 0;
88    regs.rxdctl.gran(1);
89    regs.rxdctl.wthresh(1);
90    regs.fcrth(1);
91    regs.tdwba = 0;
92    regs.rlpml = 0;
93    regs.sw_fw_sync = 0;
94
95    regs.pba.rxa(0x30);
96    regs.pba.txa(0x10);
97
98    eeOpBits            = 0;
99    eeAddrBits          = 0;
100    eeDataBits          = 0;
101    eeOpcode            = 0;
102
103    // clear all 64 16 bit words of the eeprom
104    memset(&flash, 0, EEPROM_SIZE*2);
105
106    // Set the MAC address
107    memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
108    for (int x = 0; x < ETH_ADDR_LEN/2; x++)
109        flash[x] = htobe(flash[x]);
110
111    uint16_t csum = 0;
112    for (int x = 0; x < EEPROM_SIZE; x++)
113        csum += htobe(flash[x]);
114
115
116    // Magic happy checksum value
117    flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
118
119    // Store the MAC address as queue ID
120    macAddr = p->hardware_address;
121
122    rxFifo.clear();
123    txFifo.clear();
124}
125
126IGbE::~IGbE()
127{
128    delete etherInt;
129}
130
131void
132IGbE::init()
133{
134    cpa = CPA::cpa();
135    PciDevice::init();
136}
137
138EtherInt*
139IGbE::getEthPort(const std::string &if_name, int idx)
140{
141
142    if (if_name == "interface") {
143        if (etherInt->getPeer())
144            panic("Port already connected to\n");
145        return etherInt;
146    }
147    return NULL;
148}
149
150Tick
151IGbE::writeConfig(PacketPtr pkt)
152{
153    int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
154    if (offset < PCI_DEVICE_SPECIFIC)
155        PciDevice::writeConfig(pkt);
156    else
157        panic("Device specific PCI config space not implemented.\n");
158
159    //
160    // Some work may need to be done here based for the pci COMMAND bits.
161    //
162
163    return configDelay;
164}
165
166// Handy macro for range-testing register access addresses
167#define IN_RANGE(val, base, len) (val >= base && val < (base + len))
168
169Tick
170IGbE::read(PacketPtr pkt)
171{
172    int bar;
173    Addr daddr;
174
175    if (!getBAR(pkt->getAddr(), bar, daddr))
176        panic("Invalid PCI memory access to unmapped memory.\n");
177
178    // Only Memory register BAR is allowed
179    assert(bar == 0);
180
181    // Only 32bit accesses allowed
182    assert(pkt->getSize() == 4);
183
184    DPRINTF(Ethernet, "Read device register %#X\n", daddr);
185
186    pkt->allocate();
187
188    //
189    // Handle read of register here
190    //
191
192
193    switch (daddr) {
194      case REG_CTRL:
195        pkt->set<uint32_t>(regs.ctrl());
196        break;
197      case REG_STATUS:
198        pkt->set<uint32_t>(regs.sts());
199        break;
200      case REG_EECD:
201        pkt->set<uint32_t>(regs.eecd());
202        break;
203      case REG_EERD:
204        pkt->set<uint32_t>(regs.eerd());
205        break;
206      case REG_CTRL_EXT:
207        pkt->set<uint32_t>(regs.ctrl_ext());
208        break;
209      case REG_MDIC:
210        pkt->set<uint32_t>(regs.mdic());
211        break;
212      case REG_ICR:
213        DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
214                regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
215        pkt->set<uint32_t>(regs.icr());
216        if (regs.icr.int_assert() || regs.imr == 0) {
217            regs.icr = regs.icr() & ~mask(30);
218            DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
219        }
220        if (regs.ctrl_ext.iame() && regs.icr.int_assert())
221            regs.imr &= ~regs.iam;
222        chkInterrupt();
223        break;
224      case REG_EICR:
225        // This is only useful for MSI, but the driver reads it every time
226        // Just don't do anything
227        pkt->set<uint32_t>(0);
228        break;
229      case REG_ITR:
230        pkt->set<uint32_t>(regs.itr());
231        break;
232      case REG_RCTL:
233        pkt->set<uint32_t>(regs.rctl());
234        break;
235      case REG_FCTTV:
236        pkt->set<uint32_t>(regs.fcttv());
237        break;
238      case REG_TCTL:
239        pkt->set<uint32_t>(regs.tctl());
240        break;
241      case REG_PBA:
242        pkt->set<uint32_t>(regs.pba());
243        break;
244      case REG_WUC:
245      case REG_LEDCTL:
246        pkt->set<uint32_t>(0); // We don't care, so just return 0
247        break;
248      case REG_FCRTL:
249        pkt->set<uint32_t>(regs.fcrtl());
250        break;
251      case REG_FCRTH:
252        pkt->set<uint32_t>(regs.fcrth());
253        break;
254      case REG_RDBAL:
255        pkt->set<uint32_t>(regs.rdba.rdbal());
256        break;
257      case REG_RDBAH:
258        pkt->set<uint32_t>(regs.rdba.rdbah());
259        break;
260      case REG_RDLEN:
261        pkt->set<uint32_t>(regs.rdlen());
262        break;
263      case REG_SRRCTL:
264        pkt->set<uint32_t>(regs.srrctl());
265        break;
266      case REG_RDH:
267        pkt->set<uint32_t>(regs.rdh());
268        break;
269      case REG_RDT:
270        pkt->set<uint32_t>(regs.rdt());
271        break;
272      case REG_RDTR:
273        pkt->set<uint32_t>(regs.rdtr());
274        if (regs.rdtr.fpd()) {
275            rxDescCache.writeback(0);
276            DPRINTF(EthernetIntr,
277                    "Posting interrupt because of RDTR.FPD write\n");
278            postInterrupt(IT_RXT);
279            regs.rdtr.fpd(0);
280        }
281        break;
282      case REG_RXDCTL:
283        pkt->set<uint32_t>(regs.rxdctl());
284        break;
285      case REG_RADV:
286        pkt->set<uint32_t>(regs.radv());
287        break;
288      case REG_TDBAL:
289        pkt->set<uint32_t>(regs.tdba.tdbal());
290        break;
291      case REG_TDBAH:
292        pkt->set<uint32_t>(regs.tdba.tdbah());
293        break;
294      case REG_TDLEN:
295        pkt->set<uint32_t>(regs.tdlen());
296        break;
297      case REG_TDH:
298        pkt->set<uint32_t>(regs.tdh());
299        break;
300      case REG_TXDCA_CTL:
301        pkt->set<uint32_t>(regs.txdca_ctl());
302        break;
303      case REG_TDT:
304        pkt->set<uint32_t>(regs.tdt());
305        break;
306      case REG_TIDV:
307        pkt->set<uint32_t>(regs.tidv());
308        break;
309      case REG_TXDCTL:
310        pkt->set<uint32_t>(regs.txdctl());
311        break;
312      case REG_TADV:
313        pkt->set<uint32_t>(regs.tadv());
314        break;
315      case REG_TDWBAL:
316        pkt->set<uint32_t>(regs.tdwba & mask(32));
317        break;
318      case REG_TDWBAH:
319        pkt->set<uint32_t>(regs.tdwba >> 32);
320        break;
321      case REG_RXCSUM:
322        pkt->set<uint32_t>(regs.rxcsum());
323        break;
324      case REG_RLPML:
325        pkt->set<uint32_t>(regs.rlpml);
326        break;
327      case REG_RFCTL:
328        pkt->set<uint32_t>(regs.rfctl());
329        break;
330      case REG_MANC:
331        pkt->set<uint32_t>(regs.manc());
332        break;
333      case REG_SWSM:
334        pkt->set<uint32_t>(regs.swsm());
335        regs.swsm.smbi(1);
336        break;
337      case REG_FWSM:
338        pkt->set<uint32_t>(regs.fwsm());
339        break;
340      case REG_SWFWSYNC:
341        pkt->set<uint32_t>(regs.sw_fw_sync);
342        break;
343      default:
344        if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
345            !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
346            !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
347            !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
348            panic("Read request to unknown register number: %#x\n", daddr);
349        else
350            pkt->set<uint32_t>(0);
351    };
352
353    pkt->makeAtomicResponse();
354    return pioDelay;
355}
356
357Tick
358IGbE::write(PacketPtr pkt)
359{
360    int bar;
361    Addr daddr;
362
363
364    if (!getBAR(pkt->getAddr(), bar, daddr))
365        panic("Invalid PCI memory access to unmapped memory.\n");
366
367    // Only Memory register BAR is allowed
368    assert(bar == 0);
369
370    // Only 32bit accesses allowed
371    assert(pkt->getSize() == sizeof(uint32_t));
372
373    DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
374            daddr, pkt->get<uint32_t>());
375
376    //
377    // Handle write of register here
378    //
379    uint32_t val = pkt->get<uint32_t>();
380
381    Regs::RCTL oldrctl;
382    Regs::TCTL oldtctl;
383
384    switch (daddr) {
385      case REG_CTRL:
386        regs.ctrl = val;
387        if (regs.ctrl.tfce())
388            warn("TX Flow control enabled, should implement\n");
389        if (regs.ctrl.rfce())
390            warn("RX Flow control enabled, should implement\n");
391        break;
392      case REG_CTRL_EXT:
393        regs.ctrl_ext = val;
394        break;
395      case REG_STATUS:
396        regs.sts = val;
397        break;
398      case REG_EECD:
399        int oldClk;
400        oldClk = regs.eecd.sk();
401        regs.eecd = val;
402        // See if this is a eeprom access and emulate accordingly
403        if (!oldClk && regs.eecd.sk()) {
404            if (eeOpBits < 8) {
405                eeOpcode = eeOpcode << 1 | regs.eecd.din();
406                eeOpBits++;
407            } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
408                eeAddr = eeAddr << 1 | regs.eecd.din();
409                eeAddrBits++;
410            } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
411                assert(eeAddr>>1 < EEPROM_SIZE);
412                DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
413                        flash[eeAddr>>1] >> eeDataBits & 0x1,
414                        flash[eeAddr>>1]);
415                regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
416                eeDataBits++;
417            } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
418                regs.eecd.dout(0);
419                eeDataBits++;
420            } else
421                panic("What's going on with eeprom interface? opcode:"
422                      " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
423                      (uint32_t)eeOpBits, (uint32_t)eeAddr,
424                      (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
425
426            // Reset everything for the next command
427            if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
428                (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
429                eeOpBits = 0;
430                eeAddrBits = 0;
431                eeDataBits = 0;
432                eeOpcode = 0;
433                eeAddr = 0;
434            }
435
436            DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
437                    (uint32_t)eeOpcode, (uint32_t) eeOpBits,
438                    (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
439            if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
440                                   eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
441                panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
442                      (uint32_t)eeOpBits);
443
444
445        }
446        // If driver requests eeprom access, immediately give it to it
447        regs.eecd.ee_gnt(regs.eecd.ee_req());
448        break;
449      case REG_EERD:
450        regs.eerd = val;
451        if (regs.eerd.start()) {
452            regs.eerd.done(1);
453            assert(regs.eerd.addr() < EEPROM_SIZE);
454            regs.eerd.data(flash[regs.eerd.addr()]);
455            regs.eerd.start(0);
456            DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
457                    regs.eerd.addr(), regs.eerd.data());
458        }
459        break;
460      case REG_MDIC:
461        regs.mdic = val;
462        if (regs.mdic.i())
463            panic("No support for interrupt on mdic complete\n");
464        if (regs.mdic.phyadd() != 1)
465            panic("No support for reading anything but phy\n");
466        DPRINTF(Ethernet, "%s phy address %x\n",
467                regs.mdic.op() == 1 ? "Writing" : "Reading",
468                regs.mdic.regadd());
469        switch (regs.mdic.regadd()) {
470          case PHY_PSTATUS:
471            regs.mdic.data(0x796D); // link up
472            break;
473          case PHY_PID:
474            regs.mdic.data(params()->phy_pid);
475            break;
476          case PHY_EPID:
477            regs.mdic.data(params()->phy_epid);
478            break;
479          case PHY_GSTATUS:
480            regs.mdic.data(0x7C00);
481            break;
482          case PHY_EPSTATUS:
483            regs.mdic.data(0x3000);
484            break;
485          case PHY_AGC:
486            regs.mdic.data(0x180); // some random length
487            break;
488          default:
489            regs.mdic.data(0);
490        }
491        regs.mdic.r(1);
492        break;
493      case REG_ICR:
494        DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
495                regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
496        if (regs.ctrl_ext.iame())
497            regs.imr &= ~regs.iam;
498        regs.icr = ~bits(val,30,0) & regs.icr();
499        chkInterrupt();
500        break;
501      case REG_ITR:
502        regs.itr = val;
503        break;
504      case REG_ICS:
505        DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
506        postInterrupt((IntTypes)val);
507        break;
508      case REG_IMS:
509        regs.imr |= val;
510        chkInterrupt();
511        break;
512      case REG_IMC:
513        regs.imr &= ~val;
514        chkInterrupt();
515        break;
516      case REG_IAM:
517        regs.iam = val;
518        break;
519      case REG_RCTL:
520        oldrctl = regs.rctl;
521        regs.rctl = val;
522        if (regs.rctl.rst()) {
523            rxDescCache.reset();
524            DPRINTF(EthernetSM, "RXS: Got RESET!\n");
525            rxFifo.clear();
526            regs.rctl.rst(0);
527        }
528        if (regs.rctl.en())
529            rxTick = true;
530        restartClock();
531        break;
532      case REG_FCTTV:
533        regs.fcttv = val;
534        break;
535      case REG_TCTL:
536        regs.tctl = val;
537        oldtctl = regs.tctl;
538        regs.tctl = val;
539        if (regs.tctl.en())
540            txTick = true;
541        restartClock();
542        if (regs.tctl.en() && !oldtctl.en()) {
543            txDescCache.reset();
544        }
545        break;
546      case REG_PBA:
547        regs.pba.rxa(val);
548        regs.pba.txa(64 - regs.pba.rxa());
549        break;
550      case REG_WUC:
551      case REG_LEDCTL:
552      case REG_FCAL:
553      case REG_FCAH:
554      case REG_FCT:
555      case REG_VET:
556      case REG_AIFS:
557      case REG_TIPG:
558        ; // We don't care, so don't store anything
559        break;
560      case REG_IVAR0:
561        warn("Writing to IVAR0, ignoring...\n");
562        break;
563      case REG_FCRTL:
564        regs.fcrtl = val;
565        break;
566      case REG_FCRTH:
567        regs.fcrth = val;
568        break;
569      case REG_RDBAL:
570        regs.rdba.rdbal( val & ~mask(4));
571        rxDescCache.areaChanged();
572        break;
573      case REG_RDBAH:
574        regs.rdba.rdbah(val);
575        rxDescCache.areaChanged();
576        break;
577      case REG_RDLEN:
578        regs.rdlen = val & ~mask(7);
579        rxDescCache.areaChanged();
580        break;
581      case REG_SRRCTL:
582        regs.srrctl = val;
583        break;
584      case REG_RDH:
585        regs.rdh = val;
586        rxDescCache.areaChanged();
587        break;
588      case REG_RDT:
589        regs.rdt = val;
590        DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
591        if (getDrainState() == Drainable::Running) {
592            DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
593            rxDescCache.fetchDescriptors();
594        } else {
595            DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
596        }
597        break;
598      case REG_RDTR:
599        regs.rdtr = val;
600        break;
601      case REG_RADV:
602        regs.radv = val;
603        break;
604      case REG_RXDCTL:
605        regs.rxdctl = val;
606        break;
607      case REG_TDBAL:
608        regs.tdba.tdbal( val & ~mask(4));
609        txDescCache.areaChanged();
610        break;
611      case REG_TDBAH:
612        regs.tdba.tdbah(val);
613        txDescCache.areaChanged();
614        break;
615      case REG_TDLEN:
616        regs.tdlen = val & ~mask(7);
617        txDescCache.areaChanged();
618        break;
619      case REG_TDH:
620        regs.tdh = val;
621        txDescCache.areaChanged();
622        break;
623      case REG_TXDCA_CTL:
624        regs.txdca_ctl = val;
625        if (regs.txdca_ctl.enabled())
626            panic("No support for DCA\n");
627        break;
628      case REG_TDT:
629        regs.tdt = val;
630        DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
631        if (getDrainState() == Drainable::Running) {
632            DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
633            txDescCache.fetchDescriptors();
634        } else {
635            DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
636        }
637        break;
638      case REG_TIDV:
639        regs.tidv = val;
640        break;
641      case REG_TXDCTL:
642        regs.txdctl = val;
643        break;
644      case REG_TADV:
645        regs.tadv = val;
646        break;
647      case REG_TDWBAL:
648        regs.tdwba &= ~mask(32);
649        regs.tdwba |= val;
650        txDescCache.completionWriteback(regs.tdwba & ~mask(1),
651                                        regs.tdwba & mask(1));
652        break;
653      case REG_TDWBAH:
654        regs.tdwba &= mask(32);
655        regs.tdwba |= (uint64_t)val << 32;
656        txDescCache.completionWriteback(regs.tdwba & ~mask(1),
657                                        regs.tdwba & mask(1));
658        break;
659      case REG_RXCSUM:
660        regs.rxcsum = val;
661        break;
662      case REG_RLPML:
663        regs.rlpml = val;
664        break;
665      case REG_RFCTL:
666        regs.rfctl = val;
667        if (regs.rfctl.exsten())
668            panic("Extended RX descriptors not implemented\n");
669        break;
670      case REG_MANC:
671        regs.manc = val;
672        break;
673      case REG_SWSM:
674        regs.swsm = val;
675        if (regs.fwsm.eep_fw_semaphore())
676            regs.swsm.swesmbi(0);
677        break;
678      case REG_SWFWSYNC:
679        regs.sw_fw_sync = val;
680        break;
681      default:
682        if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
683            !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
684            !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
685            panic("Write request to unknown register number: %#x\n", daddr);
686    };
687
688    pkt->makeAtomicResponse();
689    return pioDelay;
690}
691
692void
693IGbE::postInterrupt(IntTypes t, bool now)
694{
695    assert(t);
696
697    // Interrupt is already pending
698    if (t & regs.icr() && !now)
699        return;
700
701    regs.icr = regs.icr() | t;
702
703    Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
704    DPRINTF(EthernetIntr,
705            "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
706            curTick(), regs.itr.interval(), itr_interval);
707
708    if (regs.itr.interval() == 0 || now ||
709        lastInterrupt + itr_interval <= curTick()) {
710        if (interEvent.scheduled()) {
711            deschedule(interEvent);
712        }
713        cpuPostInt();
714    } else {
715        Tick int_time = lastInterrupt + itr_interval;
716        assert(int_time > 0);
717        DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
718                int_time);
719        if (!interEvent.scheduled()) {
720            schedule(interEvent, int_time);
721        }
722    }
723}
724
725void
726IGbE::delayIntEvent()
727{
728    cpuPostInt();
729}
730
731
732void
733IGbE::cpuPostInt()
734{
735
736    postedInterrupts++;
737
738    if (!(regs.icr() & regs.imr)) {
739        DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
740        return;
741    }
742
743    DPRINTF(Ethernet, "Posting Interrupt\n");
744
745
746    if (interEvent.scheduled()) {
747        deschedule(interEvent);
748    }
749
750    if (rdtrEvent.scheduled()) {
751        regs.icr.rxt0(1);
752        deschedule(rdtrEvent);
753    }
754    if (radvEvent.scheduled()) {
755        regs.icr.rxt0(1);
756        deschedule(radvEvent);
757    }
758    if (tadvEvent.scheduled()) {
759        regs.icr.txdw(1);
760        deschedule(tadvEvent);
761    }
762    if (tidvEvent.scheduled()) {
763        regs.icr.txdw(1);
764        deschedule(tidvEvent);
765    }
766
767    regs.icr.int_assert(1);
768    DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
769            regs.icr());
770
771    intrPost();
772
773    lastInterrupt = curTick();
774}
775
776void
777IGbE::cpuClearInt()
778{
779    if (regs.icr.int_assert()) {
780        regs.icr.int_assert(0);
781        DPRINTF(EthernetIntr,
782                "EINT: Clearing interrupt to CPU now. Vector %#x\n",
783                regs.icr());
784        intrClear();
785    }
786}
787
788void
789IGbE::chkInterrupt()
790{
791    DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
792            regs.imr);
793    // Check if we need to clear the cpu interrupt
794    if (!(regs.icr() & regs.imr)) {
795        DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
796        if (interEvent.scheduled())
797            deschedule(interEvent);
798        if (regs.icr.int_assert())
799            cpuClearInt();
800    }
801    DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
802            regs.itr(), regs.itr.interval());
803
804    if (regs.icr() & regs.imr) {
805        if (regs.itr.interval() == 0)  {
806            cpuPostInt();
807        } else {
808            DPRINTF(Ethernet,
809                    "Possibly scheduling interrupt because of imr write\n");
810            if (!interEvent.scheduled()) {
811                Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
812                DPRINTF(Ethernet, "Scheduling for %d\n", t);
813                schedule(interEvent, t);
814            }
815        }
816    }
817}
818
819
820///////////////////////////// IGbE::DescCache //////////////////////////////
821
822template<class T>
823IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
824    : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
825      wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL),
826      wbDelayEvent(this), fetchDelayEvent(this), fetchEvent(this),
827      wbEvent(this)
828{
829    fetchBuf = new T[size];
830    wbBuf = new T[size];
831}
832
833template<class T>
834IGbE::DescCache<T>::~DescCache()
835{
836    reset();
837    delete[] fetchBuf;
838    delete[] wbBuf;
839}
840
841template<class T>
842void
843IGbE::DescCache<T>::areaChanged()
844{
845    if (usedCache.size() > 0 || curFetching || wbOut)
846        panic("Descriptor Address, Length or Head changed. Bad\n");
847    reset();
848
849}
850
851template<class T>
852void
853IGbE::DescCache<T>::writeback(Addr aMask)
854{
855    int curHead = descHead();
856    int max_to_wb = usedCache.size();
857
858    // Check if this writeback is less restrictive that the previous
859    // and if so setup another one immediately following it
860    if (wbOut) {
861        if (aMask < wbAlignment) {
862            moreToWb = true;
863            wbAlignment = aMask;
864        }
865        DPRINTF(EthernetDesc,
866                "Writing back already in process, returning\n");
867        return;
868    }
869
870    moreToWb = false;
871    wbAlignment = aMask;
872
873
874    DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
875            "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
876            curHead, descTail(), descLen(), cachePnt, max_to_wb,
877            descLeft());
878
879    if (max_to_wb + curHead >= descLen()) {
880        max_to_wb = descLen() - curHead;
881        moreToWb = true;
882        // this is by definition aligned correctly
883    } else if (wbAlignment != 0) {
884        // align the wb point to the mask
885        max_to_wb = max_to_wb & ~wbAlignment;
886    }
887
888    DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
889
890    if (max_to_wb <= 0) {
891        if (usedCache.size())
892            igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
893        else
894            igbe->anWe(annSmWb, annUsedCacheQ);
895        return;
896    }
897
898    wbOut = max_to_wb;
899
900    assert(!wbDelayEvent.scheduled());
901    igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
902    igbe->anBegin(annSmWb, "Prepare Writeback Desc");
903}
904
905template<class T>
906void
907IGbE::DescCache<T>::writeback1()
908{
909    // If we're draining delay issuing this DMA
910    if (igbe->getDrainState() != Drainable::Running) {
911        igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
912        return;
913    }
914
915    DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
916
917    for (int x = 0; x < wbOut; x++) {
918        assert(usedCache.size());
919        memcpy(&wbBuf[x], usedCache[x], sizeof(T));
920        igbe->anPq(annSmWb, annUsedCacheQ);
921        igbe->anPq(annSmWb, annDescQ);
922        igbe->anQ(annSmWb, annUsedDescQ);
923    }
924
925
926    igbe->anBegin(annSmWb, "Writeback Desc DMA");
927
928    assert(wbOut);
929    igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
930                   wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
931                   igbe->wbCompDelay);
932}
933
934template<class T>
935void
936IGbE::DescCache<T>::fetchDescriptors()
937{
938    size_t max_to_fetch;
939
940    if (curFetching) {
941        DPRINTF(EthernetDesc,
942                "Currently fetching %d descriptors, returning\n",
943                curFetching);
944        return;
945    }
946
947    if (descTail() >= cachePnt)
948        max_to_fetch = descTail() - cachePnt;
949    else
950        max_to_fetch = descLen() - cachePnt;
951
952    size_t free_cache = size - usedCache.size() - unusedCache.size();
953
954    if (!max_to_fetch)
955        igbe->anWe(annSmFetch, annUnusedDescQ);
956    else
957        igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
958
959    if (max_to_fetch) {
960        if (!free_cache)
961            igbe->anWf(annSmFetch, annDescQ);
962        else
963            igbe->anRq(annSmFetch, annDescQ, free_cache);
964    }
965
966    max_to_fetch = std::min(max_to_fetch, free_cache);
967
968
969    DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
970            "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
971            descHead(), descTail(), descLen(), cachePnt,
972            max_to_fetch, descLeft());
973
974    // Nothing to do
975    if (max_to_fetch == 0)
976        return;
977
978    // So we don't have two descriptor fetches going on at once
979    curFetching = max_to_fetch;
980
981    assert(!fetchDelayEvent.scheduled());
982    igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
983    igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
984}
985
986template<class T>
987void
988IGbE::DescCache<T>::fetchDescriptors1()
989{
990    // If we're draining delay issuing this DMA
991    if (igbe->getDrainState() != Drainable::Running) {
992        igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
993        return;
994    }
995
996    igbe->anBegin(annSmFetch, "Fetch Desc");
997
998    DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
999            descBase() + cachePnt * sizeof(T),
1000            pciToDma(descBase() + cachePnt * sizeof(T)),
1001            curFetching * sizeof(T));
1002    assert(curFetching);
1003    igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
1004                  curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
1005                  igbe->fetchCompDelay);
1006}
1007
1008template<class T>
1009void
1010IGbE::DescCache<T>::fetchComplete()
1011{
1012    T *newDesc;
1013    igbe->anBegin(annSmFetch, "Fetch Complete");
1014    for (int x = 0; x < curFetching; x++) {
1015        newDesc = new T;
1016        memcpy(newDesc, &fetchBuf[x], sizeof(T));
1017        unusedCache.push_back(newDesc);
1018        igbe->anDq(annSmFetch, annUnusedDescQ);
1019        igbe->anQ(annSmFetch, annUnusedCacheQ);
1020        igbe->anQ(annSmFetch, annDescQ);
1021    }
1022
1023
1024#ifndef NDEBUG
1025    int oldCp = cachePnt;
1026#endif
1027
1028    cachePnt += curFetching;
1029    assert(cachePnt <= descLen());
1030    if (cachePnt == descLen())
1031        cachePnt = 0;
1032
1033    curFetching = 0;
1034
1035    DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1036            oldCp, cachePnt);
1037
1038    if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1039                                                             cachePnt)) == 0)
1040    {
1041        igbe->anWe(annSmFetch, annUnusedDescQ);
1042    } else if (!(size - usedCache.size() - unusedCache.size())) {
1043        igbe->anWf(annSmFetch, annDescQ);
1044    } else {
1045        igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1046    }
1047
1048    enableSm();
1049    igbe->checkDrain();
1050}
1051
1052template<class T>
1053void
1054IGbE::DescCache<T>::wbComplete()
1055{
1056
1057    igbe->anBegin(annSmWb, "Finish Writeback");
1058
1059    long  curHead = descHead();
1060#ifndef NDEBUG
1061    long oldHead = curHead;
1062#endif
1063
1064    for (int x = 0; x < wbOut; x++) {
1065        assert(usedCache.size());
1066        delete usedCache[0];
1067        usedCache.pop_front();
1068
1069        igbe->anDq(annSmWb, annUsedCacheQ);
1070        igbe->anDq(annSmWb, annDescQ);
1071    }
1072
1073    curHead += wbOut;
1074    wbOut = 0;
1075
1076    if (curHead >= descLen())
1077        curHead -= descLen();
1078
1079    // Update the head
1080    updateHead(curHead);
1081
1082    DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1083            oldHead, curHead);
1084
1085    // If we still have more to wb, call wb now
1086    actionAfterWb();
1087    if (moreToWb) {
1088        moreToWb = false;
1089        DPRINTF(EthernetDesc, "Writeback has more todo\n");
1090        writeback(wbAlignment);
1091    }
1092
1093    if (!wbOut) {
1094        igbe->checkDrain();
1095        if (usedCache.size())
1096            igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1097        else
1098            igbe->anWe(annSmWb, annUsedCacheQ);
1099    }
1100    fetchAfterWb();
1101}
1102
1103template<class T>
1104void
1105IGbE::DescCache<T>::reset()
1106{
1107    DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1108    for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1109        delete usedCache[x];
1110    for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1111        delete unusedCache[x];
1112
1113    usedCache.clear();
1114    unusedCache.clear();
1115
1116    cachePnt = 0;
1117
1118}
1119
1120template<class T>
1121void
1122IGbE::DescCache<T>::serialize(std::ostream &os)
1123{
1124    SERIALIZE_SCALAR(cachePnt);
1125    SERIALIZE_SCALAR(curFetching);
1126    SERIALIZE_SCALAR(wbOut);
1127    SERIALIZE_SCALAR(moreToWb);
1128    SERIALIZE_SCALAR(wbAlignment);
1129
1130    typename CacheType::size_type usedCacheSize = usedCache.size();
1131    SERIALIZE_SCALAR(usedCacheSize);
1132    for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1133        arrayParamOut(os, csprintf("usedCache_%d", x),
1134                      (uint8_t*)usedCache[x],sizeof(T));
1135    }
1136
1137    typename CacheType::size_type unusedCacheSize = unusedCache.size();
1138    SERIALIZE_SCALAR(unusedCacheSize);
1139    for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1140        arrayParamOut(os, csprintf("unusedCache_%d", x),
1141                      (uint8_t*)unusedCache[x],sizeof(T));
1142    }
1143
1144    Tick fetch_delay = 0, wb_delay = 0;
1145    if (fetchDelayEvent.scheduled())
1146        fetch_delay = fetchDelayEvent.when();
1147    SERIALIZE_SCALAR(fetch_delay);
1148    if (wbDelayEvent.scheduled())
1149        wb_delay = wbDelayEvent.when();
1150    SERIALIZE_SCALAR(wb_delay);
1151
1152
1153}
1154
1155template<class T>
1156void
1157IGbE::DescCache<T>::unserialize(Checkpoint *cp, const std::string &section)
1158{
1159    UNSERIALIZE_SCALAR(cachePnt);
1160    UNSERIALIZE_SCALAR(curFetching);
1161    UNSERIALIZE_SCALAR(wbOut);
1162    UNSERIALIZE_SCALAR(moreToWb);
1163    UNSERIALIZE_SCALAR(wbAlignment);
1164
1165    typename CacheType::size_type usedCacheSize;
1166    UNSERIALIZE_SCALAR(usedCacheSize);
1167    T *temp;
1168    for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1169        temp = new T;
1170        arrayParamIn(cp, section, csprintf("usedCache_%d", x),
1171                     (uint8_t*)temp,sizeof(T));
1172        usedCache.push_back(temp);
1173    }
1174
1175    typename CacheType::size_type unusedCacheSize;
1176    UNSERIALIZE_SCALAR(unusedCacheSize);
1177    for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1178        temp = new T;
1179        arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
1180                     (uint8_t*)temp,sizeof(T));
1181        unusedCache.push_back(temp);
1182    }
1183    Tick fetch_delay = 0, wb_delay = 0;
1184    UNSERIALIZE_SCALAR(fetch_delay);
1185    UNSERIALIZE_SCALAR(wb_delay);
1186    if (fetch_delay)
1187        igbe->schedule(fetchDelayEvent, fetch_delay);
1188    if (wb_delay)
1189        igbe->schedule(wbDelayEvent, wb_delay);
1190
1191
1192}
1193
1194///////////////////////////// IGbE::RxDescCache //////////////////////////////
1195
1196IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1197    : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1198      pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
1199
1200{
1201    annSmFetch = "RX Desc Fetch";
1202    annSmWb = "RX Desc Writeback";
1203    annUnusedDescQ = "RX Unused Descriptors";
1204    annUnusedCacheQ = "RX Unused Descriptor Cache";
1205    annUsedCacheQ = "RX Used Descriptor Cache";
1206    annUsedDescQ = "RX Used Descriptors";
1207    annDescQ = "RX Descriptors";
1208}
1209
1210void
1211IGbE::RxDescCache::pktSplitDone()
1212{
1213    splitCount++;
1214    DPRINTF(EthernetDesc,
1215            "Part of split packet done: splitcount now %d\n", splitCount);
1216    assert(splitCount <= 2);
1217    if (splitCount != 2)
1218        return;
1219    splitCount = 0;
1220    DPRINTF(EthernetDesc,
1221            "Part of split packet done: calling pktComplete()\n");
1222    pktComplete();
1223}
1224
1225int
1226IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
1227{
1228    assert(unusedCache.size());
1229    //if (!unusedCache.size())
1230    //    return false;
1231
1232    pktPtr = packet;
1233    pktDone = false;
1234    unsigned buf_len, hdr_len;
1235
1236    RxDesc *desc = unusedCache.front();
1237    switch (igbe->regs.srrctl.desctype()) {
1238      case RXDT_LEGACY:
1239        assert(pkt_offset == 0);
1240        bytesCopied = packet->length;
1241        DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1242                packet->length, igbe->regs.rctl.descSize());
1243        assert(packet->length < igbe->regs.rctl.descSize());
1244        igbe->dmaWrite(pciToDma(desc->legacy.buf),
1245                       packet->length, &pktEvent, packet->data,
1246                       igbe->rxWriteDelay);
1247        break;
1248      case RXDT_ADV_ONEBUF:
1249        assert(pkt_offset == 0);
1250        bytesCopied = packet->length;
1251        buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1252            igbe->regs.rctl.descSize();
1253        DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1254                packet->length, igbe->regs.srrctl(), buf_len);
1255        assert(packet->length < buf_len);
1256        igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1257                       packet->length, &pktEvent, packet->data,
1258                       igbe->rxWriteDelay);
1259        desc->adv_wb.header_len = htole(0);
1260        desc->adv_wb.sph = htole(0);
1261        desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1262        break;
1263      case RXDT_ADV_SPLIT_A:
1264        int split_point;
1265
1266        buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1267            igbe->regs.rctl.descSize();
1268        hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1269        DPRINTF(EthernetDesc,
1270                "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1271                "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1272                igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1273                igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1274                desc->adv_read.pkt, buf_len);
1275
1276        split_point = hsplit(pktPtr);
1277
1278        if (packet->length <= hdr_len) {
1279            bytesCopied = packet->length;
1280            assert(pkt_offset == 0);
1281            DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1282            igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1283                           packet->length, &pktEvent, packet->data,
1284                           igbe->rxWriteDelay);
1285            desc->adv_wb.header_len = htole((uint16_t)packet->length);
1286            desc->adv_wb.sph = htole(0);
1287            desc->adv_wb.pkt_len = htole(0);
1288        } else if (split_point) {
1289            if (pkt_offset) {
1290                // we are only copying some data, header/data has already been
1291                // copied
1292                int max_to_copy =
1293                    std::min(packet->length - pkt_offset, buf_len);
1294                bytesCopied += max_to_copy;
1295                DPRINTF(EthernetDesc,
1296                        "Hdr split: Continuing data buffer copy\n");
1297                igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1298                               max_to_copy, &pktEvent,
1299                               packet->data + pkt_offset, igbe->rxWriteDelay);
1300                desc->adv_wb.header_len = htole(0);
1301                desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1302                desc->adv_wb.sph = htole(0);
1303            } else {
1304                int max_to_copy =
1305                    std::min(packet->length - split_point, buf_len);
1306                bytesCopied += max_to_copy + split_point;
1307
1308                DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1309                        split_point);
1310                igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1311                               split_point, &pktHdrEvent,
1312                               packet->data, igbe->rxWriteDelay);
1313                igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1314                               max_to_copy, &pktDataEvent,
1315                               packet->data + split_point, igbe->rxWriteDelay);
1316                desc->adv_wb.header_len = htole(split_point);
1317                desc->adv_wb.sph = 1;
1318                desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1319            }
1320        } else {
1321            panic("Header split not fitting within header buffer or "
1322                  "undecodable packet not fitting in header unsupported\n");
1323        }
1324        break;
1325      default:
1326        panic("Unimplemnted RX receive buffer type: %d\n",
1327              igbe->regs.srrctl.desctype());
1328    }
1329    return bytesCopied;
1330
1331}
1332
1333void
1334IGbE::RxDescCache::pktComplete()
1335{
1336    assert(unusedCache.size());
1337    RxDesc *desc;
1338    desc = unusedCache.front();
1339
1340    igbe->anBegin("RXS", "Update Desc");
1341
1342    uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1343    DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1344            "stripcrc offset: %d value written: %d %d\n",
1345            pktPtr->length, bytesCopied, crcfixup,
1346            htole((uint16_t)(pktPtr->length + crcfixup)),
1347            (uint16_t)(pktPtr->length + crcfixup));
1348
1349    // no support for anything but starting at 0
1350    assert(igbe->regs.rxcsum.pcss() == 0);
1351
1352    DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1353
1354    uint16_t status = RXDS_DD;
1355    uint8_t err = 0;
1356    uint16_t ext_err = 0;
1357    uint16_t csum = 0;
1358    uint16_t ptype = 0;
1359    uint16_t ip_id = 0;
1360
1361    assert(bytesCopied <= pktPtr->length);
1362    if (bytesCopied == pktPtr->length)
1363        status |= RXDS_EOP;
1364
1365    IpPtr ip(pktPtr);
1366
1367    if (ip) {
1368        DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
1369        ptype |= RXDP_IPV4;
1370        ip_id = ip->id();
1371
1372        if (igbe->regs.rxcsum.ipofld()) {
1373            DPRINTF(EthernetDesc, "Checking IP checksum\n");
1374            status |= RXDS_IPCS;
1375            csum = htole(cksum(ip));
1376            igbe->rxIpChecksums++;
1377            if (cksum(ip) != 0) {
1378                err |= RXDE_IPE;
1379                ext_err |= RXDEE_IPE;
1380                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1381            }
1382        }
1383        TcpPtr tcp(ip);
1384        if (tcp && igbe->regs.rxcsum.tuofld()) {
1385            DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1386            status |= RXDS_TCPCS;
1387            ptype |= RXDP_TCP;
1388            csum = htole(cksum(tcp));
1389            igbe->rxTcpChecksums++;
1390            if (cksum(tcp) != 0) {
1391                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1392                err |= RXDE_TCPE;
1393                ext_err |= RXDEE_TCPE;
1394            }
1395        }
1396
1397        UdpPtr udp(ip);
1398        if (udp && igbe->regs.rxcsum.tuofld()) {
1399            DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1400            status |= RXDS_UDPCS;
1401            ptype |= RXDP_UDP;
1402            csum = htole(cksum(udp));
1403            igbe->rxUdpChecksums++;
1404            if (cksum(udp) != 0) {
1405                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1406                ext_err |= RXDEE_TCPE;
1407                err |= RXDE_TCPE;
1408            }
1409        }
1410    } else { // if ip
1411        DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1412    }
1413
1414    switch (igbe->regs.srrctl.desctype()) {
1415      case RXDT_LEGACY:
1416        desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1417        desc->legacy.status = htole(status);
1418        desc->legacy.errors = htole(err);
1419        // No vlan support at this point... just set it to 0
1420        desc->legacy.vlan = 0;
1421        break;
1422      case RXDT_ADV_SPLIT_A:
1423      case RXDT_ADV_ONEBUF:
1424        desc->adv_wb.rss_type = htole(0);
1425        desc->adv_wb.pkt_type = htole(ptype);
1426        if (igbe->regs.rxcsum.pcsd()) {
1427            // no rss support right now
1428            desc->adv_wb.rss_hash = htole(0);
1429        } else {
1430            desc->adv_wb.id = htole(ip_id);
1431            desc->adv_wb.csum = htole(csum);
1432        }
1433        desc->adv_wb.status = htole(status);
1434        desc->adv_wb.errors = htole(ext_err);
1435        // no vlan support
1436        desc->adv_wb.vlan_tag = htole(0);
1437        break;
1438      default:
1439        panic("Unimplemnted RX receive buffer type %d\n",
1440              igbe->regs.srrctl.desctype());
1441    }
1442
1443    DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1444            desc->adv_read.pkt, desc->adv_read.hdr);
1445
1446    if (bytesCopied == pktPtr->length) {
1447        DPRINTF(EthernetDesc,
1448                "Packet completely written to descriptor buffers\n");
1449        // Deal with the rx timer interrupts
1450        if (igbe->regs.rdtr.delay()) {
1451            Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1452            DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1453            igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1454        }
1455
1456        if (igbe->regs.radv.idv()) {
1457            Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1458            DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1459            if (!igbe->radvEvent.scheduled()) {
1460                igbe->schedule(igbe->radvEvent, curTick() + delay);
1461            }
1462        }
1463
1464        // if neither radv or rdtr, maybe itr is set...
1465        if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1466            DPRINTF(EthernetSM,
1467                    "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1468            igbe->postInterrupt(IT_RXT);
1469        }
1470
1471        // If the packet is small enough, interrupt appropriately
1472        // I wonder if this is delayed or not?!
1473        if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1474            DPRINTF(EthernetSM,
1475                    "RXS: Posting IT_SRPD beacuse small packet received\n");
1476            igbe->postInterrupt(IT_SRPD);
1477        }
1478        bytesCopied = 0;
1479    }
1480
1481    pktPtr = NULL;
1482    igbe->checkDrain();
1483    enableSm();
1484    pktDone = true;
1485
1486    igbe->anBegin("RXS", "Done Updating Desc");
1487    DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1488    igbe->anDq("RXS", annUnusedCacheQ);
1489    unusedCache.pop_front();
1490    igbe->anQ("RXS", annUsedCacheQ);
1491    usedCache.push_back(desc);
1492}
1493
1494void
1495IGbE::RxDescCache::enableSm()
1496{
1497    if (!igbe->drainManager) {
1498        igbe->rxTick = true;
1499        igbe->restartClock();
1500    }
1501}
1502
1503bool
1504IGbE::RxDescCache::packetDone()
1505{
1506    if (pktDone) {
1507        pktDone = false;
1508        return true;
1509    }
1510    return false;
1511}
1512
1513bool
1514IGbE::RxDescCache::hasOutstandingEvents()
1515{
1516    return pktEvent.scheduled() || wbEvent.scheduled() ||
1517        fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1518        pktDataEvent.scheduled();
1519
1520}
1521
1522void
1523IGbE::RxDescCache::serialize(std::ostream &os)
1524{
1525    DescCache<RxDesc>::serialize(os);
1526    SERIALIZE_SCALAR(pktDone);
1527    SERIALIZE_SCALAR(splitCount);
1528    SERIALIZE_SCALAR(bytesCopied);
1529}
1530
1531void
1532IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1533{
1534    DescCache<RxDesc>::unserialize(cp, section);
1535    UNSERIALIZE_SCALAR(pktDone);
1536    UNSERIALIZE_SCALAR(splitCount);
1537    UNSERIALIZE_SCALAR(bytesCopied);
1538}
1539
1540
1541///////////////////////////// IGbE::TxDescCache //////////////////////////////
1542
1543IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1544    : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1545      pktWaiting(false), pktMultiDesc(false),
1546      completionAddress(0), completionEnabled(false),
1547      useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1548      tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1549      tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1550      pktEvent(this), headerEvent(this), nullEvent(this)
1551{
1552    annSmFetch = "TX Desc Fetch";
1553    annSmWb = "TX Desc Writeback";
1554    annUnusedDescQ = "TX Unused Descriptors";
1555    annUnusedCacheQ = "TX Unused Descriptor Cache";
1556    annUsedCacheQ = "TX Used Descriptor Cache";
1557    annUsedDescQ = "TX Used Descriptors";
1558    annDescQ = "TX Descriptors";
1559}
1560
1561void
1562IGbE::TxDescCache::processContextDesc()
1563{
1564    assert(unusedCache.size());
1565    TxDesc *desc;
1566
1567    DPRINTF(EthernetDesc, "Checking and  processing context descriptors\n");
1568
1569    while (!useTso && unusedCache.size() &&
1570           TxdOp::isContext(unusedCache.front())) {
1571        DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1572
1573        desc = unusedCache.front();
1574        DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1575                desc->d1, desc->d2);
1576
1577
1578        // is this going to be a tcp or udp packet?
1579        isTcp = TxdOp::tcp(desc) ? true : false;
1580
1581        // setup all the TSO variables, they'll be ignored if we don't use
1582        // tso for this connection
1583        tsoHeaderLen = TxdOp::hdrlen(desc);
1584        tsoMss  = TxdOp::mss(desc);
1585
1586        if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1587            DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1588                    "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1589                    TxdOp::mss(desc), TxdOp::getLen(desc));
1590            useTso = true;
1591            tsoTotalLen = TxdOp::getLen(desc);
1592            tsoLoadedHeader = false;
1593            tsoDescBytesUsed = 0;
1594            tsoUsedLen = 0;
1595            tsoPrevSeq = 0;
1596            tsoPktHasHeader = false;
1597            tsoPkts = 0;
1598            tsoCopyBytes = 0;
1599        }
1600
1601        TxdOp::setDd(desc);
1602        unusedCache.pop_front();
1603        igbe->anDq("TXS", annUnusedCacheQ);
1604        usedCache.push_back(desc);
1605        igbe->anQ("TXS", annUsedCacheQ);
1606    }
1607
1608    if (!unusedCache.size())
1609        return;
1610
1611    desc = unusedCache.front();
1612    if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1613        TxdOp::tse(desc)) {
1614        DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1615                "hdrlen: %d mss: %d paylen %d\n",
1616                tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1617        useTso = true;
1618        tsoTotalLen = TxdOp::getTsoLen(desc);
1619        tsoLoadedHeader = false;
1620        tsoDescBytesUsed = 0;
1621        tsoUsedLen = 0;
1622        tsoPrevSeq = 0;
1623        tsoPktHasHeader = false;
1624        tsoPkts = 0;
1625    }
1626
1627    if (useTso && !tsoLoadedHeader) {
1628        // we need to fetch a header
1629        DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1630        assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1631        pktWaiting = true;
1632        assert(tsoHeaderLen <= 256);
1633        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1634                      tsoHeaderLen, &headerEvent, tsoHeader, 0);
1635    }
1636}
1637
1638void
1639IGbE::TxDescCache::headerComplete()
1640{
1641    DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1642    pktWaiting = false;
1643
1644    assert(unusedCache.size());
1645    TxDesc *desc = unusedCache.front();
1646    DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1647            TxdOp::getLen(desc), tsoHeaderLen);
1648
1649    if (TxdOp::getLen(desc) == tsoHeaderLen) {
1650        tsoDescBytesUsed = 0;
1651        tsoLoadedHeader = true;
1652        unusedCache.pop_front();
1653        usedCache.push_back(desc);
1654    } else {
1655        DPRINTF(EthernetDesc, "TSO: header part of larger payload\n");
1656        tsoDescBytesUsed = tsoHeaderLen;
1657        tsoLoadedHeader = true;
1658    }
1659    enableSm();
1660    igbe->checkDrain();
1661}
1662
1663unsigned
1664IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1665{
1666    if (!unusedCache.size())
1667        return 0;
1668
1669    DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1670
1671    assert(!useTso || tsoLoadedHeader);
1672    TxDesc *desc = unusedCache.front();
1673
1674    if (useTso) {
1675        DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1676                "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1677        DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1678                "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1679                tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1680
1681        if (tsoPktHasHeader)
1682            tsoCopyBytes =  std::min((tsoMss + tsoHeaderLen) - p->length,
1683                                     TxdOp::getLen(desc) - tsoDescBytesUsed);
1684        else
1685            tsoCopyBytes =  std::min(tsoMss,
1686                                     TxdOp::getLen(desc) - tsoDescBytesUsed);
1687        unsigned pkt_size =
1688            tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1689
1690        DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1691                "this descLen: %d\n",
1692                tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1693        DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1694        DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1695        return pkt_size;
1696    }
1697
1698    DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1699            TxdOp::getLen(unusedCache.front()));
1700    return TxdOp::getLen(desc);
1701}
1702
1703void
1704IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1705{
1706    assert(unusedCache.size());
1707
1708    TxDesc *desc;
1709    desc = unusedCache.front();
1710
1711    DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1712            "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1713    assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1714           TxdOp::getLen(desc));
1715
1716    pktPtr = p;
1717
1718    pktWaiting = true;
1719
1720    DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1721
1722    if (useTso) {
1723        assert(tsoLoadedHeader);
1724        if (!tsoPktHasHeader) {
1725            DPRINTF(EthernetDesc,
1726                    "Loading TSO header (%d bytes) into start of packet\n",
1727                    tsoHeaderLen);
1728            memcpy(p->data, &tsoHeader,tsoHeaderLen);
1729            p->length +=tsoHeaderLen;
1730            tsoPktHasHeader = true;
1731        }
1732    }
1733
1734    if (useTso) {
1735        DPRINTF(EthernetDesc,
1736                "Starting DMA of packet at offset %d length: %d\n",
1737                p->length, tsoCopyBytes);
1738        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1739                      + tsoDescBytesUsed,
1740                      tsoCopyBytes, &pktEvent, p->data + p->length,
1741                      igbe->txReadDelay);
1742        tsoDescBytesUsed += tsoCopyBytes;
1743        assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1744    } else {
1745        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1746                      TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1747                      igbe->txReadDelay);
1748    }
1749}
1750
1751void
1752IGbE::TxDescCache::pktComplete()
1753{
1754
1755    TxDesc *desc;
1756    assert(unusedCache.size());
1757    assert(pktPtr);
1758
1759    igbe->anBegin("TXS", "Update Desc");
1760
1761    DPRINTF(EthernetDesc, "DMA of packet complete\n");
1762
1763
1764    desc = unusedCache.front();
1765    assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1766           TxdOp::getLen(desc));
1767
1768    DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1769            desc->d1, desc->d2);
1770
1771    // Set the length of the data in the EtherPacket
1772    if (useTso) {
1773        DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1774            "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1775            tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1776        pktPtr->length += tsoCopyBytes;
1777        tsoUsedLen += tsoCopyBytes;
1778        DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1779            tsoDescBytesUsed, tsoCopyBytes);
1780    } else
1781        pktPtr->length += TxdOp::getLen(desc);
1782
1783
1784
1785    if ((!TxdOp::eop(desc) && !useTso) ||
1786        (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1787         tsoTotalLen != tsoUsedLen && useTso)) {
1788        assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1789        igbe->anDq("TXS", annUnusedCacheQ);
1790        unusedCache.pop_front();
1791        igbe->anQ("TXS", annUsedCacheQ);
1792        usedCache.push_back(desc);
1793
1794        tsoDescBytesUsed = 0;
1795        pktDone = true;
1796        pktWaiting = false;
1797        pktMultiDesc = true;
1798
1799        DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1800                pktPtr->length);
1801        pktPtr = NULL;
1802
1803        enableSm();
1804        igbe->checkDrain();
1805        return;
1806    }
1807
1808
1809    pktMultiDesc = false;
1810    // no support for vlans
1811    assert(!TxdOp::vle(desc));
1812
1813    // we only support single packet descriptors at this point
1814    if (!useTso)
1815        assert(TxdOp::eop(desc));
1816
1817    // set that this packet is done
1818    if (TxdOp::rs(desc))
1819        TxdOp::setDd(desc);
1820
1821    DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1822            desc->d1, desc->d2);
1823
1824    if (useTso) {
1825        IpPtr ip(pktPtr);
1826        if (ip) {
1827            DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1828                    tsoPkts);
1829            ip->id(ip->id() + tsoPkts++);
1830            ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1831
1832            TcpPtr tcp(ip);
1833            if (tcp) {
1834                DPRINTF(EthernetDesc,
1835                        "TSO: Modifying TCP header. old seq %d + %d\n",
1836                        tcp->seq(), tsoPrevSeq);
1837                tcp->seq(tcp->seq() + tsoPrevSeq);
1838                if (tsoUsedLen != tsoTotalLen)
1839                    tcp->flags(tcp->flags() & ~9); // clear fin & psh
1840            }
1841            UdpPtr udp(ip);
1842            if (udp) {
1843                DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1844                udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1845            }
1846        }
1847        tsoPrevSeq = tsoUsedLen;
1848    }
1849
1850    if (DTRACE(EthernetDesc)) {
1851        IpPtr ip(pktPtr);
1852        if (ip)
1853            DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1854                    ip->id());
1855        else
1856            DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1857    }
1858
1859    // Checksums are only ofloaded for new descriptor types
1860    if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1861        DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1862        IpPtr ip(pktPtr);
1863        assert(ip);
1864        if (TxdOp::ixsm(desc)) {
1865            ip->sum(0);
1866            ip->sum(cksum(ip));
1867            igbe->txIpChecksums++;
1868            DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1869        }
1870        if (TxdOp::txsm(desc)) {
1871            TcpPtr tcp(ip);
1872            UdpPtr udp(ip);
1873            if (tcp) {
1874                tcp->sum(0);
1875                tcp->sum(cksum(tcp));
1876                igbe->txTcpChecksums++;
1877                DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1878            } else if (udp) {
1879                assert(udp);
1880                udp->sum(0);
1881                udp->sum(cksum(udp));
1882                igbe->txUdpChecksums++;
1883                DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1884            } else {
1885                panic("Told to checksum, but don't know how\n");
1886            }
1887        }
1888    }
1889
1890    if (TxdOp::ide(desc)) {
1891        // Deal with the rx timer interrupts
1892        DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1893        if (igbe->regs.tidv.idv()) {
1894            Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1895            DPRINTF(EthernetDesc, "setting tidv\n");
1896            igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1897        }
1898
1899        if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1900            Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1901            DPRINTF(EthernetDesc, "setting tadv\n");
1902            if (!igbe->tadvEvent.scheduled()) {
1903                igbe->schedule(igbe->tadvEvent, curTick() + delay);
1904            }
1905        }
1906    }
1907
1908
1909    if (!useTso ||  TxdOp::getLen(desc) == tsoDescBytesUsed) {
1910        DPRINTF(EthernetDesc, "Descriptor Done\n");
1911        igbe->anDq("TXS", annUnusedCacheQ);
1912        unusedCache.pop_front();
1913        igbe->anQ("TXS", annUsedCacheQ);
1914        usedCache.push_back(desc);
1915        tsoDescBytesUsed = 0;
1916    }
1917
1918    if (useTso && tsoUsedLen == tsoTotalLen)
1919        useTso = false;
1920
1921
1922    DPRINTF(EthernetDesc,
1923            "------Packet of %d bytes ready for transmission-------\n",
1924            pktPtr->length);
1925    pktDone = true;
1926    pktWaiting = false;
1927    pktPtr = NULL;
1928    tsoPktHasHeader = false;
1929
1930    if (igbe->regs.txdctl.wthresh() == 0) {
1931        igbe->anBegin("TXS", "Desc Writeback");
1932        DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1933        writeback(0);
1934    } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <=
1935               descInBlock(usedCache.size())) {
1936        DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1937        igbe->anBegin("TXS", "Desc Writeback");
1938        writeback((igbe->cacheBlockSize()-1)>>4);
1939    } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) {
1940        DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1941        igbe->anBegin("TXS", "Desc Writeback");
1942        writeback((igbe->cacheBlockSize()-1)>>4);
1943    }
1944
1945    enableSm();
1946    igbe->checkDrain();
1947}
1948
1949void
1950IGbE::TxDescCache::actionAfterWb()
1951{
1952    DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1953            completionEnabled);
1954    igbe->postInterrupt(iGbReg::IT_TXDW);
1955    if (completionEnabled) {
1956        descEnd = igbe->regs.tdh();
1957        DPRINTF(EthernetDesc,
1958                "Completion writing back value: %d to addr: %#x\n", descEnd,
1959                completionAddress);
1960        igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1961                       sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1962    }
1963}
1964
1965void
1966IGbE::TxDescCache::serialize(std::ostream &os)
1967{
1968    DescCache<TxDesc>::serialize(os);
1969    SERIALIZE_SCALAR(pktDone);
1970    SERIALIZE_SCALAR(isTcp);
1971    SERIALIZE_SCALAR(pktWaiting);
1972    SERIALIZE_SCALAR(pktMultiDesc);
1973
1974    SERIALIZE_SCALAR(useTso);
1975    SERIALIZE_SCALAR(tsoHeaderLen);
1976    SERIALIZE_SCALAR(tsoMss);
1977    SERIALIZE_SCALAR(tsoTotalLen);
1978    SERIALIZE_SCALAR(tsoUsedLen);
1979    SERIALIZE_SCALAR(tsoPrevSeq);;
1980    SERIALIZE_SCALAR(tsoPktPayloadBytes);
1981    SERIALIZE_SCALAR(tsoLoadedHeader);
1982    SERIALIZE_SCALAR(tsoPktHasHeader);
1983    SERIALIZE_ARRAY(tsoHeader, 256);
1984    SERIALIZE_SCALAR(tsoDescBytesUsed);
1985    SERIALIZE_SCALAR(tsoCopyBytes);
1986    SERIALIZE_SCALAR(tsoPkts);
1987
1988    SERIALIZE_SCALAR(completionAddress);
1989    SERIALIZE_SCALAR(completionEnabled);
1990    SERIALIZE_SCALAR(descEnd);
1991}
1992
1993void
1994IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1995{
1996    DescCache<TxDesc>::unserialize(cp, section);
1997    UNSERIALIZE_SCALAR(pktDone);
1998    UNSERIALIZE_SCALAR(isTcp);
1999    UNSERIALIZE_SCALAR(pktWaiting);
2000    UNSERIALIZE_SCALAR(pktMultiDesc);
2001
2002    UNSERIALIZE_SCALAR(useTso);
2003    UNSERIALIZE_SCALAR(tsoHeaderLen);
2004    UNSERIALIZE_SCALAR(tsoMss);
2005    UNSERIALIZE_SCALAR(tsoTotalLen);
2006    UNSERIALIZE_SCALAR(tsoUsedLen);
2007    UNSERIALIZE_SCALAR(tsoPrevSeq);;
2008    UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
2009    UNSERIALIZE_SCALAR(tsoLoadedHeader);
2010    UNSERIALIZE_SCALAR(tsoPktHasHeader);
2011    UNSERIALIZE_ARRAY(tsoHeader, 256);
2012    UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2013    UNSERIALIZE_SCALAR(tsoCopyBytes);
2014    UNSERIALIZE_SCALAR(tsoPkts);
2015
2016    UNSERIALIZE_SCALAR(completionAddress);
2017    UNSERIALIZE_SCALAR(completionEnabled);
2018    UNSERIALIZE_SCALAR(descEnd);
2019}
2020
2021bool
2022IGbE::TxDescCache::packetAvailable()
2023{
2024    if (pktDone) {
2025        pktDone = false;
2026        return true;
2027    }
2028    return false;
2029}
2030
2031void
2032IGbE::TxDescCache::enableSm()
2033{
2034    if (!igbe->drainManager) {
2035        igbe->txTick = true;
2036        igbe->restartClock();
2037    }
2038}
2039
2040bool
2041IGbE::TxDescCache::hasOutstandingEvents()
2042{
2043    return pktEvent.scheduled() || wbEvent.scheduled() ||
2044        fetchEvent.scheduled();
2045}
2046
2047
2048///////////////////////////////////// IGbE /////////////////////////////////
2049
2050void
2051IGbE::restartClock()
2052{
2053    if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2054        getDrainState() == Drainable::Running)
2055        schedule(tickEvent, clockEdge(Cycles(1)));
2056}
2057
2058unsigned int
2059IGbE::drain(DrainManager *dm)
2060{
2061    unsigned int count;
2062    count = pioPort.drain(dm) + dmaPort.drain(dm);
2063    if (rxDescCache.hasOutstandingEvents() ||
2064        txDescCache.hasOutstandingEvents()) {
2065        count++;
2066        drainManager = dm;
2067    }
2068
2069    txFifoTick = false;
2070    txTick = false;
2071    rxTick = false;
2072
2073    if (tickEvent.scheduled())
2074        deschedule(tickEvent);
2075
2076    if (count) {
2077        DPRINTF(Drain, "IGbE not drained\n");
2078        setDrainState(Drainable::Draining);
2079    } else
2080        setDrainState(Drainable::Drained);
2081
2082    return count;
2083}
2084
2085void
2086IGbE::drainResume()
2087{
2088    Drainable::drainResume();
2089
2090    txFifoTick = true;
2091    txTick = true;
2092    rxTick = true;
2093
2094    restartClock();
2095    DPRINTF(EthernetSM, "resuming from drain");
2096}
2097
2098void
2099IGbE::checkDrain()
2100{
2101    if (!drainManager)
2102        return;
2103
2104    txFifoTick = false;
2105    txTick = false;
2106    rxTick = false;
2107    if (!rxDescCache.hasOutstandingEvents() &&
2108        !txDescCache.hasOutstandingEvents()) {
2109        DPRINTF(Drain, "IGbE done draining, processing drain event\n");
2110        drainManager->signalDrainDone();
2111        drainManager = NULL;
2112    }
2113}
2114
2115void
2116IGbE::txStateMachine()
2117{
2118    if (!regs.tctl.en()) {
2119        txTick = false;
2120        DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2121        return;
2122    }
2123
2124    // If we have a packet available and it's length is not 0 (meaning it's not
2125    // a multidescriptor packet) put it in the fifo, otherwise an the next
2126    // iteration we'll get the rest of the data
2127    if (txPacket && txDescCache.packetAvailable()
2128        && !txDescCache.packetMultiDesc() && txPacket->length) {
2129        anQ("TXS", "TX FIFO Q");
2130        DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2131#ifndef NDEBUG
2132        bool success =
2133#endif
2134            txFifo.push(txPacket);
2135        txFifoTick = true && !drainManager;
2136        assert(success);
2137        txPacket = NULL;
2138        anBegin("TXS", "Desc Writeback");
2139        txDescCache.writeback((cacheBlockSize()-1)>>4);
2140        return;
2141    }
2142
2143    // Only support descriptor granularity
2144    if (regs.txdctl.lwthresh() &&
2145        txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2146        DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2147        postInterrupt(IT_TXDLOW);
2148    }
2149
2150    if (!txPacket) {
2151        txPacket = std::make_shared<EthPacketData>(16384);
2152    }
2153
2154    if (!txDescCache.packetWaiting()) {
2155        if (txDescCache.descLeft() == 0) {
2156            postInterrupt(IT_TXQE);
2157            anBegin("TXS", "Desc Writeback");
2158            txDescCache.writeback(0);
2159            anBegin("TXS", "Desc Fetch");
2160            anWe("TXS", txDescCache.annUnusedCacheQ);
2161            txDescCache.fetchDescriptors();
2162            DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2163                    "writeback stopping ticking and posting TXQE\n");
2164            txTick = false;
2165            return;
2166        }
2167
2168
2169        if (!(txDescCache.descUnused())) {
2170            anBegin("TXS", "Desc Fetch");
2171            txDescCache.fetchDescriptors();
2172            anWe("TXS", txDescCache.annUnusedCacheQ);
2173            DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2174                    "fetching and stopping ticking\n");
2175            txTick = false;
2176            return;
2177        }
2178        anPq("TXS", txDescCache.annUnusedCacheQ);
2179
2180
2181        txDescCache.processContextDesc();
2182        if (txDescCache.packetWaiting()) {
2183            DPRINTF(EthernetSM,
2184                    "TXS: Fetching TSO header, stopping ticking\n");
2185            txTick = false;
2186            return;
2187        }
2188
2189        unsigned size = txDescCache.getPacketSize(txPacket);
2190        if (size > 0 && txFifo.avail() > size) {
2191            anRq("TXS", "TX FIFO Q");
2192            anBegin("TXS", "DMA Packet");
2193            DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2194                    "beginning DMA of next packet\n", size);
2195            txFifo.reserve(size);
2196            txDescCache.getPacketData(txPacket);
2197        } else if (size == 0) {
2198            DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2199            DPRINTF(EthernetSM,
2200                    "TXS: No packets to get, writing back used descriptors\n");
2201            anBegin("TXS", "Desc Writeback");
2202            txDescCache.writeback(0);
2203        } else {
2204            anWf("TXS", "TX FIFO Q");
2205            DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2206                    "available in FIFO\n");
2207            txTick = false;
2208        }
2209
2210
2211        return;
2212    }
2213    DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2214    txTick = false;
2215}
2216
2217bool
2218IGbE::ethRxPkt(EthPacketPtr pkt)
2219{
2220    rxBytes += pkt->length;
2221    rxPackets++;
2222
2223    DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2224    anBegin("RXQ", "Wire Recv");
2225
2226
2227    if (!regs.rctl.en()) {
2228        DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2229        anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2230        return true;
2231    }
2232
2233    // restart the state machines if they are stopped
2234    rxTick = true && !drainManager;
2235    if ((rxTick || txTick) && !tickEvent.scheduled()) {
2236        DPRINTF(EthernetSM,
2237                "RXS: received packet into fifo, starting ticking\n");
2238        restartClock();
2239    }
2240
2241    if (!rxFifo.push(pkt)) {
2242        DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2243        postInterrupt(IT_RXO, true);
2244        anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2245        return false;
2246    }
2247
2248    if (CPA::available() && cpa->enabled()) {
2249        assert(sys->numSystemsRunning <= 2);
2250        System *other_sys;
2251        if (sys->systemList[0] == sys)
2252            other_sys = sys->systemList[1];
2253        else
2254            other_sys = sys->systemList[0];
2255
2256        cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2257        anQ("RXQ", "RX FIFO Q");
2258        cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2259    }
2260
2261    return true;
2262}
2263
2264
2265void
2266IGbE::rxStateMachine()
2267{
2268    if (!regs.rctl.en()) {
2269        rxTick = false;
2270        DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2271        return;
2272    }
2273
2274    // If the packet is done check for interrupts/descriptors/etc
2275    if (rxDescCache.packetDone()) {
2276        rxDmaPacket = false;
2277        DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2278        int descLeft = rxDescCache.descLeft();
2279        DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2280                descLeft, regs.rctl.rdmts(), regs.rdlen());
2281        switch (regs.rctl.rdmts()) {
2282          case 2: if (descLeft > .125 * regs.rdlen()) break;
2283          case 1: if (descLeft > .250 * regs.rdlen()) break;
2284          case 0: if (descLeft > .500 * regs.rdlen())  break;
2285            DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2286                    "because of descriptors left\n");
2287            postInterrupt(IT_RXDMT);
2288            break;
2289        }
2290
2291        if (rxFifo.empty())
2292            rxDescCache.writeback(0);
2293
2294        if (descLeft == 0) {
2295            anBegin("RXS", "Writeback Descriptors");
2296            rxDescCache.writeback(0);
2297            DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2298                    " writeback and stopping ticking\n");
2299            rxTick = false;
2300        }
2301
2302        // only support descriptor granulaties
2303        assert(regs.rxdctl.gran());
2304
2305        if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2306            DPRINTF(EthernetSM,
2307                    "RXS: Writing back because WTHRESH >= descUsed\n");
2308            anBegin("RXS", "Writeback Descriptors");
2309            if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2310                rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2311            else
2312                rxDescCache.writeback((cacheBlockSize()-1)>>4);
2313        }
2314
2315        if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2316            ((rxDescCache.descLeft() - rxDescCache.descUnused()) >
2317             regs.rxdctl.hthresh())) {
2318            DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2319                    "descUnused < PTHRESH\n");
2320            anBegin("RXS", "Fetch Descriptors");
2321            rxDescCache.fetchDescriptors();
2322        }
2323
2324        if (rxDescCache.descUnused() == 0) {
2325            anBegin("RXS", "Fetch Descriptors");
2326            rxDescCache.fetchDescriptors();
2327            anWe("RXS", rxDescCache.annUnusedCacheQ);
2328            DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2329                    "fetching descriptors and stopping ticking\n");
2330            rxTick = false;
2331        }
2332        return;
2333    }
2334
2335    if (rxDmaPacket) {
2336        DPRINTF(EthernetSM,
2337                "RXS: stopping ticking until packet DMA completes\n");
2338        rxTick = false;
2339        return;
2340    }
2341
2342    if (!rxDescCache.descUnused()) {
2343        anBegin("RXS", "Fetch Descriptors");
2344        rxDescCache.fetchDescriptors();
2345        anWe("RXS", rxDescCache.annUnusedCacheQ);
2346        DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2347                "stopping ticking\n");
2348        rxTick = false;
2349        DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2350        return;
2351    }
2352    anPq("RXS", rxDescCache.annUnusedCacheQ);
2353
2354    if (rxFifo.empty()) {
2355        anWe("RXS", "RX FIFO Q");
2356        DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2357        rxTick = false;
2358        return;
2359    }
2360    anPq("RXS", "RX FIFO Q");
2361    anBegin("RXS", "Get Desc");
2362
2363    EthPacketPtr pkt;
2364    pkt = rxFifo.front();
2365
2366
2367    pktOffset = rxDescCache.writePacket(pkt, pktOffset);
2368    DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2369    if (pktOffset == pkt->length) {
2370        anBegin( "RXS", "FIFO Dequeue");
2371        DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2372        pktOffset = 0;
2373        anDq("RXS", "RX FIFO Q");
2374        rxFifo.pop();
2375    }
2376
2377    DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2378    rxTick = false;
2379    rxDmaPacket = true;
2380    anBegin("RXS", "DMA Packet");
2381}
2382
2383void
2384IGbE::txWire()
2385{
2386    if (txFifo.empty()) {
2387        anWe("TXQ", "TX FIFO Q");
2388        txFifoTick = false;
2389        return;
2390    }
2391
2392
2393    anPq("TXQ", "TX FIFO Q");
2394    if (etherInt->sendPacket(txFifo.front())) {
2395        cpa->hwQ(CPA::FL_NONE, sys, macAddr, "TXQ", "WireQ", 0);
2396        if (DTRACE(EthernetSM)) {
2397            IpPtr ip(txFifo.front());
2398            if (ip)
2399                DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2400                        ip->id());
2401            else
2402                DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2403        }
2404        anDq("TXQ", "TX FIFO Q");
2405        anBegin("TXQ", "Wire Send");
2406        DPRINTF(EthernetSM,
2407                "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2408                txFifo.avail());
2409
2410        txBytes += txFifo.front()->length;
2411        txPackets++;
2412        txFifoTick = false;
2413
2414        txFifo.pop();
2415    } else {
2416        // We'll get woken up when the packet ethTxDone() gets called
2417        txFifoTick = false;
2418    }
2419}
2420
2421void
2422IGbE::tick()
2423{
2424    DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2425
2426    if (rxTick)
2427        rxStateMachine();
2428
2429    if (txTick)
2430        txStateMachine();
2431
2432    if (txFifoTick)
2433        txWire();
2434
2435
2436    if (rxTick || txTick || txFifoTick)
2437        schedule(tickEvent, curTick() + clockPeriod());
2438}
2439
2440void
2441IGbE::ethTxDone()
2442{
2443    anBegin("TXQ", "Send Done");
2444    // restart the tx state machines if they are stopped
2445    // fifo to send another packet
2446    // tx sm to put more data into the fifo
2447    txFifoTick = true && !drainManager;
2448    if (txDescCache.descLeft() != 0 && !drainManager)
2449        txTick = true;
2450
2451    restartClock();
2452    txWire();
2453    DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2454}
2455
2456void
2457IGbE::serialize(std::ostream &os)
2458{
2459    PciDevice::serialize(os);
2460
2461    regs.serialize(os);
2462    SERIALIZE_SCALAR(eeOpBits);
2463    SERIALIZE_SCALAR(eeAddrBits);
2464    SERIALIZE_SCALAR(eeDataBits);
2465    SERIALIZE_SCALAR(eeOpcode);
2466    SERIALIZE_SCALAR(eeAddr);
2467    SERIALIZE_SCALAR(lastInterrupt);
2468    SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2469
2470    rxFifo.serialize("rxfifo", os);
2471    txFifo.serialize("txfifo", os);
2472
2473    bool txPktExists = txPacket != nullptr;
2474    SERIALIZE_SCALAR(txPktExists);
2475    if (txPktExists)
2476        txPacket->serialize("txpacket", os);
2477
2478    Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2479        inter_time = 0;
2480
2481    if (rdtrEvent.scheduled())
2482        rdtr_time = rdtrEvent.when();
2483    SERIALIZE_SCALAR(rdtr_time);
2484
2485    if (radvEvent.scheduled())
2486        radv_time = radvEvent.when();
2487    SERIALIZE_SCALAR(radv_time);
2488
2489    if (tidvEvent.scheduled())
2490        tidv_time = tidvEvent.when();
2491    SERIALIZE_SCALAR(tidv_time);
2492
2493    if (tadvEvent.scheduled())
2494        tadv_time = tadvEvent.when();
2495    SERIALIZE_SCALAR(tadv_time);
2496
2497    if (interEvent.scheduled())
2498        inter_time = interEvent.when();
2499    SERIALIZE_SCALAR(inter_time);
2500
2501    SERIALIZE_SCALAR(pktOffset);
2502
2503    nameOut(os, csprintf("%s.TxDescCache", name()));
2504    txDescCache.serialize(os);
2505
2506    nameOut(os, csprintf("%s.RxDescCache", name()));
2507    rxDescCache.serialize(os);
2508}
2509
2510void
2511IGbE::unserialize(Checkpoint *cp, const std::string &section)
2512{
2513    PciDevice::unserialize(cp, section);
2514
2515    regs.unserialize(cp, section);
2516    UNSERIALIZE_SCALAR(eeOpBits);
2517    UNSERIALIZE_SCALAR(eeAddrBits);
2518    UNSERIALIZE_SCALAR(eeDataBits);
2519    UNSERIALIZE_SCALAR(eeOpcode);
2520    UNSERIALIZE_SCALAR(eeAddr);
2521    UNSERIALIZE_SCALAR(lastInterrupt);
2522    UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2523
2524    rxFifo.unserialize("rxfifo", cp, section);
2525    txFifo.unserialize("txfifo", cp, section);
2526
2527    bool txPktExists;
2528    UNSERIALIZE_SCALAR(txPktExists);
2529    if (txPktExists) {
2530        txPacket = std::make_shared<EthPacketData>(16384);
2531        txPacket->unserialize("txpacket", cp, section);
2532    }
2533
2534    rxTick = true;
2535    txTick = true;
2536    txFifoTick = true;
2537
2538    Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2539    UNSERIALIZE_SCALAR(rdtr_time);
2540    UNSERIALIZE_SCALAR(radv_time);
2541    UNSERIALIZE_SCALAR(tidv_time);
2542    UNSERIALIZE_SCALAR(tadv_time);
2543    UNSERIALIZE_SCALAR(inter_time);
2544
2545    if (rdtr_time)
2546        schedule(rdtrEvent, rdtr_time);
2547
2548    if (radv_time)
2549        schedule(radvEvent, radv_time);
2550
2551    if (tidv_time)
2552        schedule(tidvEvent, tidv_time);
2553
2554    if (tadv_time)
2555        schedule(tadvEvent, tadv_time);
2556
2557    if (inter_time)
2558        schedule(interEvent, inter_time);
2559
2560    UNSERIALIZE_SCALAR(pktOffset);
2561
2562    txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
2563
2564    rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
2565}
2566
2567IGbE *
2568IGbEParams::create()
2569{
2570    return new IGbE(this);
2571}
2572