1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31/* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38#include "dev/net/i8254xGBe.hh"
39
40/*
41 * @todo really there are multiple dma engines.. we should implement them.
42 */
43
44#include <algorithm>
45#include <memory>
46
47#include "base/inet.hh"
48#include "base/trace.hh"
49#include "debug/Drain.hh"
50#include "debug/EthernetAll.hh"
51#include "mem/packet.hh"
52#include "mem/packet_access.hh"
53#include "params/IGbE.hh"
54#include "sim/stats.hh"
55#include "sim/system.hh"
56
57using namespace iGbReg;
58using namespace Net;
59
60IGbE::IGbE(const Params *p)
61    : EtherDevice(p), etherInt(NULL), cpa(NULL),
62      rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), inTick(false),
63      rxTick(false), txTick(false), txFifoTick(false), rxDmaPacket(false),
64      pktOffset(0), fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
65      fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
66      rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
67      rdtrEvent([this]{ rdtrProcess(); }, name()),
68      radvEvent([this]{ radvProcess(); }, name()),
69      tadvEvent([this]{ tadvProcess(); }, name()),
70      tidvEvent([this]{ tidvProcess(); }, name()),
71      tickEvent([this]{ tick(); }, name()),
72      interEvent([this]{ delayIntEvent(); }, name()),
73      rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
74      txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
75      lastInterrupt(0)
76{
77    etherInt = new IGbEInt(name() + ".int", this);
78
79    // Initialized internal registers per Intel documentation
80    // All registers intialized to 0 by per register constructor
81    regs.ctrl.fd(1);
82    regs.ctrl.lrst(1);
83    regs.ctrl.speed(2);
84    regs.ctrl.frcspd(1);
85    regs.sts.speed(3); // Say we're 1000Mbps
86    regs.sts.fd(1); // full duplex
87    regs.sts.lu(1); // link up
88    regs.eecd.fwe(1);
89    regs.eecd.ee_type(1);
90    regs.imr = 0;
91    regs.iam = 0;
92    regs.rxdctl.gran(1);
93    regs.rxdctl.wthresh(1);
94    regs.fcrth(1);
95    regs.tdwba = 0;
96    regs.rlpml = 0;
97    regs.sw_fw_sync = 0;
98
99    regs.pba.rxa(0x30);
100    regs.pba.txa(0x10);
101
102    eeOpBits            = 0;
103    eeAddrBits          = 0;
104    eeDataBits          = 0;
105    eeOpcode            = 0;
106
107    // clear all 64 16 bit words of the eeprom
108    memset(&flash, 0, EEPROM_SIZE*2);
109
110    // Set the MAC address
111    memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
112    for (int x = 0; x < ETH_ADDR_LEN/2; x++)
113        flash[x] = htobe(flash[x]);
114
115    uint16_t csum = 0;
116    for (int x = 0; x < EEPROM_SIZE; x++)
117        csum += htobe(flash[x]);
118
119
120    // Magic happy checksum value
121    flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
122
123    // Store the MAC address as queue ID
124    macAddr = p->hardware_address;
125
126    rxFifo.clear();
127    txFifo.clear();
128}
129
130IGbE::~IGbE()
131{
132    delete etherInt;
133}
134
135void
136IGbE::init()
137{
138    cpa = CPA::cpa();
139    PciDevice::init();
140}
141
142Port &
143IGbE::getPort(const std::string &if_name, PortID idx)
144{
145    if (if_name == "interface")
146        return *etherInt;
147    return EtherDevice::getPort(if_name, idx);
148}
149
150Tick
151IGbE::writeConfig(PacketPtr pkt)
152{
153    int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
154    if (offset < PCI_DEVICE_SPECIFIC)
155        PciDevice::writeConfig(pkt);
156    else
157        panic("Device specific PCI config space not implemented.\n");
158
159    //
160    // Some work may need to be done here based for the pci COMMAND bits.
161    //
162
163    return configDelay;
164}
165
166// Handy macro for range-testing register access addresses
167#define IN_RANGE(val, base, len) (val >= base && val < (base + len))
168
169Tick
170IGbE::read(PacketPtr pkt)
171{
172    int bar;
173    Addr daddr;
174
175    if (!getBAR(pkt->getAddr(), bar, daddr))
176        panic("Invalid PCI memory access to unmapped memory.\n");
177
178    // Only Memory register BAR is allowed
179    assert(bar == 0);
180
181    // Only 32bit accesses allowed
182    assert(pkt->getSize() == 4);
183
184    DPRINTF(Ethernet, "Read device register %#X\n", daddr);
185
186    //
187    // Handle read of register here
188    //
189
190
191    switch (daddr) {
192      case REG_CTRL:
193        pkt->setLE<uint32_t>(regs.ctrl());
194        break;
195      case REG_STATUS:
196        pkt->setLE<uint32_t>(regs.sts());
197        break;
198      case REG_EECD:
199        pkt->setLE<uint32_t>(regs.eecd());
200        break;
201      case REG_EERD:
202        pkt->setLE<uint32_t>(regs.eerd());
203        break;
204      case REG_CTRL_EXT:
205        pkt->setLE<uint32_t>(regs.ctrl_ext());
206        break;
207      case REG_MDIC:
208        pkt->setLE<uint32_t>(regs.mdic());
209        break;
210      case REG_ICR:
211        DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
212                regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
213        pkt->setLE<uint32_t>(regs.icr());
214        if (regs.icr.int_assert() || regs.imr == 0) {
215            regs.icr = regs.icr() & ~mask(30);
216            DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
217        }
218        if (regs.ctrl_ext.iame() && regs.icr.int_assert())
219            regs.imr &= ~regs.iam;
220        chkInterrupt();
221        break;
222      case REG_EICR:
223        // This is only useful for MSI, but the driver reads it every time
224        // Just don't do anything
225        pkt->setLE<uint32_t>(0);
226        break;
227      case REG_ITR:
228        pkt->setLE<uint32_t>(regs.itr());
229        break;
230      case REG_RCTL:
231        pkt->setLE<uint32_t>(regs.rctl());
232        break;
233      case REG_FCTTV:
234        pkt->setLE<uint32_t>(regs.fcttv());
235        break;
236      case REG_TCTL:
237        pkt->setLE<uint32_t>(regs.tctl());
238        break;
239      case REG_PBA:
240        pkt->setLE<uint32_t>(regs.pba());
241        break;
242      case REG_WUC:
243      case REG_WUFC:
244      case REG_WUS:
245      case REG_LEDCTL:
246        pkt->setLE<uint32_t>(0); // We don't care, so just return 0
247        break;
248      case REG_FCRTL:
249        pkt->setLE<uint32_t>(regs.fcrtl());
250        break;
251      case REG_FCRTH:
252        pkt->setLE<uint32_t>(regs.fcrth());
253        break;
254      case REG_RDBAL:
255        pkt->setLE<uint32_t>(regs.rdba.rdbal());
256        break;
257      case REG_RDBAH:
258        pkt->setLE<uint32_t>(regs.rdba.rdbah());
259        break;
260      case REG_RDLEN:
261        pkt->setLE<uint32_t>(regs.rdlen());
262        break;
263      case REG_SRRCTL:
264        pkt->setLE<uint32_t>(regs.srrctl());
265        break;
266      case REG_RDH:
267        pkt->setLE<uint32_t>(regs.rdh());
268        break;
269      case REG_RDT:
270        pkt->setLE<uint32_t>(regs.rdt());
271        break;
272      case REG_RDTR:
273        pkt->setLE<uint32_t>(regs.rdtr());
274        if (regs.rdtr.fpd()) {
275            rxDescCache.writeback(0);
276            DPRINTF(EthernetIntr,
277                    "Posting interrupt because of RDTR.FPD write\n");
278            postInterrupt(IT_RXT);
279            regs.rdtr.fpd(0);
280        }
281        break;
282      case REG_RXDCTL:
283        pkt->setLE<uint32_t>(regs.rxdctl());
284        break;
285      case REG_RADV:
286        pkt->setLE<uint32_t>(regs.radv());
287        break;
288      case REG_TDBAL:
289        pkt->setLE<uint32_t>(regs.tdba.tdbal());
290        break;
291      case REG_TDBAH:
292        pkt->setLE<uint32_t>(regs.tdba.tdbah());
293        break;
294      case REG_TDLEN:
295        pkt->setLE<uint32_t>(regs.tdlen());
296        break;
297      case REG_TDH:
298        pkt->setLE<uint32_t>(regs.tdh());
299        break;
300      case REG_TXDCA_CTL:
301        pkt->setLE<uint32_t>(regs.txdca_ctl());
302        break;
303      case REG_TDT:
304        pkt->setLE<uint32_t>(regs.tdt());
305        break;
306      case REG_TIDV:
307        pkt->setLE<uint32_t>(regs.tidv());
308        break;
309      case REG_TXDCTL:
310        pkt->setLE<uint32_t>(regs.txdctl());
311        break;
312      case REG_TADV:
313        pkt->setLE<uint32_t>(regs.tadv());
314        break;
315      case REG_TDWBAL:
316        pkt->setLE<uint32_t>(regs.tdwba & mask(32));
317        break;
318      case REG_TDWBAH:
319        pkt->setLE<uint32_t>(regs.tdwba >> 32);
320        break;
321      case REG_RXCSUM:
322        pkt->setLE<uint32_t>(regs.rxcsum());
323        break;
324      case REG_RLPML:
325        pkt->setLE<uint32_t>(regs.rlpml);
326        break;
327      case REG_RFCTL:
328        pkt->setLE<uint32_t>(regs.rfctl());
329        break;
330      case REG_MANC:
331        pkt->setLE<uint32_t>(regs.manc());
332        break;
333      case REG_SWSM:
334        pkt->setLE<uint32_t>(regs.swsm());
335        regs.swsm.smbi(1);
336        break;
337      case REG_FWSM:
338        pkt->setLE<uint32_t>(regs.fwsm());
339        break;
340      case REG_SWFWSYNC:
341        pkt->setLE<uint32_t>(regs.sw_fw_sync);
342        break;
343      default:
344        if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
345            !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
346            !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
347            !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
348            panic("Read request to unknown register number: %#x\n", daddr);
349        else
350            pkt->setLE<uint32_t>(0);
351    };
352
353    pkt->makeAtomicResponse();
354    return pioDelay;
355}
356
357Tick
358IGbE::write(PacketPtr pkt)
359{
360    int bar;
361    Addr daddr;
362
363
364    if (!getBAR(pkt->getAddr(), bar, daddr))
365        panic("Invalid PCI memory access to unmapped memory.\n");
366
367    // Only Memory register BAR is allowed
368    assert(bar == 0);
369
370    // Only 32bit accesses allowed
371    assert(pkt->getSize() == sizeof(uint32_t));
372
373    DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
374            daddr, pkt->getLE<uint32_t>());
375
376    //
377    // Handle write of register here
378    //
379    uint32_t val = pkt->getLE<uint32_t>();
380
381    Regs::RCTL oldrctl;
382    Regs::TCTL oldtctl;
383
384    switch (daddr) {
385      case REG_CTRL:
386        regs.ctrl = val;
387        if (regs.ctrl.tfce())
388            warn("TX Flow control enabled, should implement\n");
389        if (regs.ctrl.rfce())
390            warn("RX Flow control enabled, should implement\n");
391        break;
392      case REG_CTRL_EXT:
393        regs.ctrl_ext = val;
394        break;
395      case REG_STATUS:
396        regs.sts = val;
397        break;
398      case REG_EECD:
399        int oldClk;
400        oldClk = regs.eecd.sk();
401        regs.eecd = val;
402        // See if this is a eeprom access and emulate accordingly
403        if (!oldClk && regs.eecd.sk()) {
404            if (eeOpBits < 8) {
405                eeOpcode = eeOpcode << 1 | regs.eecd.din();
406                eeOpBits++;
407            } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
408                eeAddr = eeAddr << 1 | regs.eecd.din();
409                eeAddrBits++;
410            } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
411                assert(eeAddr>>1 < EEPROM_SIZE);
412                DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
413                        flash[eeAddr>>1] >> eeDataBits & 0x1,
414                        flash[eeAddr>>1]);
415                regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
416                eeDataBits++;
417            } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
418                regs.eecd.dout(0);
419                eeDataBits++;
420            } else
421                panic("What's going on with eeprom interface? opcode:"
422                      " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
423                      (uint32_t)eeOpBits, (uint32_t)eeAddr,
424                      (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
425
426            // Reset everything for the next command
427            if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
428                (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
429                eeOpBits = 0;
430                eeAddrBits = 0;
431                eeDataBits = 0;
432                eeOpcode = 0;
433                eeAddr = 0;
434            }
435
436            DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
437                    (uint32_t)eeOpcode, (uint32_t) eeOpBits,
438                    (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
439            if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
440                                   eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
441                panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
442                      (uint32_t)eeOpBits);
443
444
445        }
446        // If driver requests eeprom access, immediately give it to it
447        regs.eecd.ee_gnt(regs.eecd.ee_req());
448        break;
449      case REG_EERD:
450        regs.eerd = val;
451        if (regs.eerd.start()) {
452            regs.eerd.done(1);
453            assert(regs.eerd.addr() < EEPROM_SIZE);
454            regs.eerd.data(flash[regs.eerd.addr()]);
455            regs.eerd.start(0);
456            DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
457                    regs.eerd.addr(), regs.eerd.data());
458        }
459        break;
460      case REG_MDIC:
461        regs.mdic = val;
462        if (regs.mdic.i())
463            panic("No support for interrupt on mdic complete\n");
464        if (regs.mdic.phyadd() != 1)
465            panic("No support for reading anything but phy\n");
466        DPRINTF(Ethernet, "%s phy address %x\n",
467                regs.mdic.op() == 1 ? "Writing" : "Reading",
468                regs.mdic.regadd());
469        switch (regs.mdic.regadd()) {
470          case PHY_PSTATUS:
471            regs.mdic.data(0x796D); // link up
472            break;
473          case PHY_PID:
474            regs.mdic.data(params()->phy_pid);
475            break;
476          case PHY_EPID:
477            regs.mdic.data(params()->phy_epid);
478            break;
479          case PHY_GSTATUS:
480            regs.mdic.data(0x7C00);
481            break;
482          case PHY_EPSTATUS:
483            regs.mdic.data(0x3000);
484            break;
485          case PHY_AGC:
486            regs.mdic.data(0x180); // some random length
487            break;
488          default:
489            regs.mdic.data(0);
490        }
491        regs.mdic.r(1);
492        break;
493      case REG_ICR:
494        DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
495                regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
496        if (regs.ctrl_ext.iame())
497            regs.imr &= ~regs.iam;
498        regs.icr = ~bits(val,30,0) & regs.icr();
499        chkInterrupt();
500        break;
501      case REG_ITR:
502        regs.itr = val;
503        break;
504      case REG_ICS:
505        DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
506        postInterrupt((IntTypes)val);
507        break;
508      case REG_IMS:
509        regs.imr |= val;
510        chkInterrupt();
511        break;
512      case REG_IMC:
513        regs.imr &= ~val;
514        chkInterrupt();
515        break;
516      case REG_IAM:
517        regs.iam = val;
518        break;
519      case REG_RCTL:
520        oldrctl = regs.rctl;
521        regs.rctl = val;
522        if (regs.rctl.rst()) {
523            rxDescCache.reset();
524            DPRINTF(EthernetSM, "RXS: Got RESET!\n");
525            rxFifo.clear();
526            regs.rctl.rst(0);
527        }
528        if (regs.rctl.en())
529            rxTick = true;
530        restartClock();
531        break;
532      case REG_FCTTV:
533        regs.fcttv = val;
534        break;
535      case REG_TCTL:
536        regs.tctl = val;
537        oldtctl = regs.tctl;
538        regs.tctl = val;
539        if (regs.tctl.en())
540            txTick = true;
541        restartClock();
542        if (regs.tctl.en() && !oldtctl.en()) {
543            txDescCache.reset();
544        }
545        break;
546      case REG_PBA:
547        regs.pba.rxa(val);
548        regs.pba.txa(64 - regs.pba.rxa());
549        break;
550      case REG_WUC:
551      case REG_WUFC:
552      case REG_WUS:
553      case REG_LEDCTL:
554      case REG_FCAL:
555      case REG_FCAH:
556      case REG_FCT:
557      case REG_VET:
558      case REG_AIFS:
559      case REG_TIPG:
560        ; // We don't care, so don't store anything
561        break;
562      case REG_IVAR0:
563        warn("Writing to IVAR0, ignoring...\n");
564        break;
565      case REG_FCRTL:
566        regs.fcrtl = val;
567        break;
568      case REG_FCRTH:
569        regs.fcrth = val;
570        break;
571      case REG_RDBAL:
572        regs.rdba.rdbal( val & ~mask(4));
573        rxDescCache.areaChanged();
574        break;
575      case REG_RDBAH:
576        regs.rdba.rdbah(val);
577        rxDescCache.areaChanged();
578        break;
579      case REG_RDLEN:
580        regs.rdlen = val & ~mask(7);
581        rxDescCache.areaChanged();
582        break;
583      case REG_SRRCTL:
584        regs.srrctl = val;
585        break;
586      case REG_RDH:
587        regs.rdh = val;
588        rxDescCache.areaChanged();
589        break;
590      case REG_RDT:
591        regs.rdt = val;
592        DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
593        if (drainState() == DrainState::Running) {
594            DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
595            rxDescCache.fetchDescriptors();
596        } else {
597            DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
598        }
599        break;
600      case REG_RDTR:
601        regs.rdtr = val;
602        break;
603      case REG_RADV:
604        regs.radv = val;
605        break;
606      case REG_RXDCTL:
607        regs.rxdctl = val;
608        break;
609      case REG_TDBAL:
610        regs.tdba.tdbal( val & ~mask(4));
611        txDescCache.areaChanged();
612        break;
613      case REG_TDBAH:
614        regs.tdba.tdbah(val);
615        txDescCache.areaChanged();
616        break;
617      case REG_TDLEN:
618        regs.tdlen = val & ~mask(7);
619        txDescCache.areaChanged();
620        break;
621      case REG_TDH:
622        regs.tdh = val;
623        txDescCache.areaChanged();
624        break;
625      case REG_TXDCA_CTL:
626        regs.txdca_ctl = val;
627        if (regs.txdca_ctl.enabled())
628            panic("No support for DCA\n");
629        break;
630      case REG_TDT:
631        regs.tdt = val;
632        DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
633        if (drainState() == DrainState::Running) {
634            DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
635            txDescCache.fetchDescriptors();
636        } else {
637            DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
638        }
639        break;
640      case REG_TIDV:
641        regs.tidv = val;
642        break;
643      case REG_TXDCTL:
644        regs.txdctl = val;
645        break;
646      case REG_TADV:
647        regs.tadv = val;
648        break;
649      case REG_TDWBAL:
650        regs.tdwba &= ~mask(32);
651        regs.tdwba |= val;
652        txDescCache.completionWriteback(regs.tdwba & ~mask(1),
653                                        regs.tdwba & mask(1));
654        break;
655      case REG_TDWBAH:
656        regs.tdwba &= mask(32);
657        regs.tdwba |= (uint64_t)val << 32;
658        txDescCache.completionWriteback(regs.tdwba & ~mask(1),
659                                        regs.tdwba & mask(1));
660        break;
661      case REG_RXCSUM:
662        regs.rxcsum = val;
663        break;
664      case REG_RLPML:
665        regs.rlpml = val;
666        break;
667      case REG_RFCTL:
668        regs.rfctl = val;
669        if (regs.rfctl.exsten())
670            panic("Extended RX descriptors not implemented\n");
671        break;
672      case REG_MANC:
673        regs.manc = val;
674        break;
675      case REG_SWSM:
676        regs.swsm = val;
677        if (regs.fwsm.eep_fw_semaphore())
678            regs.swsm.swesmbi(0);
679        break;
680      case REG_SWFWSYNC:
681        regs.sw_fw_sync = val;
682        break;
683      default:
684        if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
685            !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
686            !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
687            panic("Write request to unknown register number: %#x\n", daddr);
688    };
689
690    pkt->makeAtomicResponse();
691    return pioDelay;
692}
693
694void
695IGbE::postInterrupt(IntTypes t, bool now)
696{
697    assert(t);
698
699    // Interrupt is already pending
700    if (t & regs.icr() && !now)
701        return;
702
703    regs.icr = regs.icr() | t;
704
705    Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
706    DPRINTF(EthernetIntr,
707            "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
708            curTick(), regs.itr.interval(), itr_interval);
709
710    if (regs.itr.interval() == 0 || now ||
711        lastInterrupt + itr_interval <= curTick()) {
712        if (interEvent.scheduled()) {
713            deschedule(interEvent);
714        }
715        cpuPostInt();
716    } else {
717        Tick int_time = lastInterrupt + itr_interval;
718        assert(int_time > 0);
719        DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
720                int_time);
721        if (!interEvent.scheduled()) {
722            schedule(interEvent, int_time);
723        }
724    }
725}
726
727void
728IGbE::delayIntEvent()
729{
730    cpuPostInt();
731}
732
733
734void
735IGbE::cpuPostInt()
736{
737
738    postedInterrupts++;
739
740    if (!(regs.icr() & regs.imr)) {
741        DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
742        return;
743    }
744
745    DPRINTF(Ethernet, "Posting Interrupt\n");
746
747
748    if (interEvent.scheduled()) {
749        deschedule(interEvent);
750    }
751
752    if (rdtrEvent.scheduled()) {
753        regs.icr.rxt0(1);
754        deschedule(rdtrEvent);
755    }
756    if (radvEvent.scheduled()) {
757        regs.icr.rxt0(1);
758        deschedule(radvEvent);
759    }
760    if (tadvEvent.scheduled()) {
761        regs.icr.txdw(1);
762        deschedule(tadvEvent);
763    }
764    if (tidvEvent.scheduled()) {
765        regs.icr.txdw(1);
766        deschedule(tidvEvent);
767    }
768
769    regs.icr.int_assert(1);
770    DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
771            regs.icr());
772
773    intrPost();
774
775    lastInterrupt = curTick();
776}
777
778void
779IGbE::cpuClearInt()
780{
781    if (regs.icr.int_assert()) {
782        regs.icr.int_assert(0);
783        DPRINTF(EthernetIntr,
784                "EINT: Clearing interrupt to CPU now. Vector %#x\n",
785                regs.icr());
786        intrClear();
787    }
788}
789
790void
791IGbE::chkInterrupt()
792{
793    DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
794            regs.imr);
795    // Check if we need to clear the cpu interrupt
796    if (!(regs.icr() & regs.imr)) {
797        DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
798        if (interEvent.scheduled())
799            deschedule(interEvent);
800        if (regs.icr.int_assert())
801            cpuClearInt();
802    }
803    DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
804            regs.itr(), regs.itr.interval());
805
806    if (regs.icr() & regs.imr) {
807        if (regs.itr.interval() == 0)  {
808            cpuPostInt();
809        } else {
810            DPRINTF(Ethernet,
811                    "Possibly scheduling interrupt because of imr write\n");
812            if (!interEvent.scheduled()) {
813                Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
814                DPRINTF(Ethernet, "Scheduling for %d\n", t);
815                schedule(interEvent, t);
816            }
817        }
818    }
819}
820
821
822///////////////////////////// IGbE::DescCache //////////////////////////////
823
824template<class T>
825IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
826    : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
827      wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL),
828      wbDelayEvent([this]{ writeback1(); }, n),
829      fetchDelayEvent([this]{ fetchDescriptors1(); }, n),
830      fetchEvent([this]{ fetchComplete(); }, n),
831      wbEvent([this]{ wbComplete(); }, n)
832{
833    fetchBuf = new T[size];
834    wbBuf = new T[size];
835}
836
837template<class T>
838IGbE::DescCache<T>::~DescCache()
839{
840    reset();
841    delete[] fetchBuf;
842    delete[] wbBuf;
843}
844
845template<class T>
846void
847IGbE::DescCache<T>::areaChanged()
848{
849    if (usedCache.size() > 0 || curFetching || wbOut)
850        panic("Descriptor Address, Length or Head changed. Bad\n");
851    reset();
852
853}
854
855template<class T>
856void
857IGbE::DescCache<T>::writeback(Addr aMask)
858{
859    int curHead = descHead();
860    int max_to_wb = usedCache.size();
861
862    // Check if this writeback is less restrictive that the previous
863    // and if so setup another one immediately following it
864    if (wbOut) {
865        if (aMask < wbAlignment) {
866            moreToWb = true;
867            wbAlignment = aMask;
868        }
869        DPRINTF(EthernetDesc,
870                "Writing back already in process, returning\n");
871        return;
872    }
873
874    moreToWb = false;
875    wbAlignment = aMask;
876
877
878    DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
879            "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
880            curHead, descTail(), descLen(), cachePnt, max_to_wb,
881            descLeft());
882
883    if (max_to_wb + curHead >= descLen()) {
884        max_to_wb = descLen() - curHead;
885        moreToWb = true;
886        // this is by definition aligned correctly
887    } else if (wbAlignment != 0) {
888        // align the wb point to the mask
889        max_to_wb = max_to_wb & ~wbAlignment;
890    }
891
892    DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
893
894    if (max_to_wb <= 0) {
895        if (usedCache.size())
896            igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
897        else
898            igbe->anWe(annSmWb, annUsedCacheQ);
899        return;
900    }
901
902    wbOut = max_to_wb;
903
904    assert(!wbDelayEvent.scheduled());
905    igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
906    igbe->anBegin(annSmWb, "Prepare Writeback Desc");
907}
908
909template<class T>
910void
911IGbE::DescCache<T>::writeback1()
912{
913    // If we're draining delay issuing this DMA
914    if (igbe->drainState() != DrainState::Running) {
915        igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
916        return;
917    }
918
919    DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
920
921    for (int x = 0; x < wbOut; x++) {
922        assert(usedCache.size());
923        memcpy(&wbBuf[x], usedCache[x], sizeof(T));
924        igbe->anPq(annSmWb, annUsedCacheQ);
925        igbe->anPq(annSmWb, annDescQ);
926        igbe->anQ(annSmWb, annUsedDescQ);
927    }
928
929
930    igbe->anBegin(annSmWb, "Writeback Desc DMA");
931
932    assert(wbOut);
933    igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
934                   wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
935                   igbe->wbCompDelay);
936}
937
938template<class T>
939void
940IGbE::DescCache<T>::fetchDescriptors()
941{
942    size_t max_to_fetch;
943
944    if (curFetching) {
945        DPRINTF(EthernetDesc,
946                "Currently fetching %d descriptors, returning\n",
947                curFetching);
948        return;
949    }
950
951    if (descTail() >= cachePnt)
952        max_to_fetch = descTail() - cachePnt;
953    else
954        max_to_fetch = descLen() - cachePnt;
955
956    size_t free_cache = size - usedCache.size() - unusedCache.size();
957
958    if (!max_to_fetch)
959        igbe->anWe(annSmFetch, annUnusedDescQ);
960    else
961        igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
962
963    if (max_to_fetch) {
964        if (!free_cache)
965            igbe->anWf(annSmFetch, annDescQ);
966        else
967            igbe->anRq(annSmFetch, annDescQ, free_cache);
968    }
969
970    max_to_fetch = std::min(max_to_fetch, free_cache);
971
972
973    DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
974            "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
975            descHead(), descTail(), descLen(), cachePnt,
976            max_to_fetch, descLeft());
977
978    // Nothing to do
979    if (max_to_fetch == 0)
980        return;
981
982    // So we don't have two descriptor fetches going on at once
983    curFetching = max_to_fetch;
984
985    assert(!fetchDelayEvent.scheduled());
986    igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
987    igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
988}
989
990template<class T>
991void
992IGbE::DescCache<T>::fetchDescriptors1()
993{
994    // If we're draining delay issuing this DMA
995    if (igbe->drainState() != DrainState::Running) {
996        igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
997        return;
998    }
999
1000    igbe->anBegin(annSmFetch, "Fetch Desc");
1001
1002    DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
1003            descBase() + cachePnt * sizeof(T),
1004            pciToDma(descBase() + cachePnt * sizeof(T)),
1005            curFetching * sizeof(T));
1006    assert(curFetching);
1007    igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
1008                  curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
1009                  igbe->fetchCompDelay);
1010}
1011
1012template<class T>
1013void
1014IGbE::DescCache<T>::fetchComplete()
1015{
1016    T *newDesc;
1017    igbe->anBegin(annSmFetch, "Fetch Complete");
1018    for (int x = 0; x < curFetching; x++) {
1019        newDesc = new T;
1020        memcpy(newDesc, &fetchBuf[x], sizeof(T));
1021        unusedCache.push_back(newDesc);
1022        igbe->anDq(annSmFetch, annUnusedDescQ);
1023        igbe->anQ(annSmFetch, annUnusedCacheQ);
1024        igbe->anQ(annSmFetch, annDescQ);
1025    }
1026
1027
1028#ifndef NDEBUG
1029    int oldCp = cachePnt;
1030#endif
1031
1032    cachePnt += curFetching;
1033    assert(cachePnt <= descLen());
1034    if (cachePnt == descLen())
1035        cachePnt = 0;
1036
1037    curFetching = 0;
1038
1039    DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1040            oldCp, cachePnt);
1041
1042    if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1043                                                             cachePnt)) == 0)
1044    {
1045        igbe->anWe(annSmFetch, annUnusedDescQ);
1046    } else if (!(size - usedCache.size() - unusedCache.size())) {
1047        igbe->anWf(annSmFetch, annDescQ);
1048    } else {
1049        igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1050    }
1051
1052    enableSm();
1053    igbe->checkDrain();
1054}
1055
1056template<class T>
1057void
1058IGbE::DescCache<T>::wbComplete()
1059{
1060
1061    igbe->anBegin(annSmWb, "Finish Writeback");
1062
1063    long  curHead = descHead();
1064#ifndef NDEBUG
1065    long oldHead = curHead;
1066#endif
1067
1068    for (int x = 0; x < wbOut; x++) {
1069        assert(usedCache.size());
1070        delete usedCache[0];
1071        usedCache.pop_front();
1072
1073        igbe->anDq(annSmWb, annUsedCacheQ);
1074        igbe->anDq(annSmWb, annDescQ);
1075    }
1076
1077    curHead += wbOut;
1078    wbOut = 0;
1079
1080    if (curHead >= descLen())
1081        curHead -= descLen();
1082
1083    // Update the head
1084    updateHead(curHead);
1085
1086    DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1087            oldHead, curHead);
1088
1089    // If we still have more to wb, call wb now
1090    actionAfterWb();
1091    if (moreToWb) {
1092        moreToWb = false;
1093        DPRINTF(EthernetDesc, "Writeback has more todo\n");
1094        writeback(wbAlignment);
1095    }
1096
1097    if (!wbOut) {
1098        igbe->checkDrain();
1099        if (usedCache.size())
1100            igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1101        else
1102            igbe->anWe(annSmWb, annUsedCacheQ);
1103    }
1104    fetchAfterWb();
1105}
1106
1107template<class T>
1108void
1109IGbE::DescCache<T>::reset()
1110{
1111    DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1112    for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1113        delete usedCache[x];
1114    for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1115        delete unusedCache[x];
1116
1117    usedCache.clear();
1118    unusedCache.clear();
1119
1120    cachePnt = 0;
1121
1122}
1123
1124template<class T>
1125void
1126IGbE::DescCache<T>::serialize(CheckpointOut &cp) const
1127{
1128    SERIALIZE_SCALAR(cachePnt);
1129    SERIALIZE_SCALAR(curFetching);
1130    SERIALIZE_SCALAR(wbOut);
1131    SERIALIZE_SCALAR(moreToWb);
1132    SERIALIZE_SCALAR(wbAlignment);
1133
1134    typename CacheType::size_type usedCacheSize = usedCache.size();
1135    SERIALIZE_SCALAR(usedCacheSize);
1136    for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1137        arrayParamOut(cp, csprintf("usedCache_%d", x),
1138                      (uint8_t*)usedCache[x],sizeof(T));
1139    }
1140
1141    typename CacheType::size_type unusedCacheSize = unusedCache.size();
1142    SERIALIZE_SCALAR(unusedCacheSize);
1143    for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1144        arrayParamOut(cp, csprintf("unusedCache_%d", x),
1145                      (uint8_t*)unusedCache[x],sizeof(T));
1146    }
1147
1148    Tick fetch_delay = 0, wb_delay = 0;
1149    if (fetchDelayEvent.scheduled())
1150        fetch_delay = fetchDelayEvent.when();
1151    SERIALIZE_SCALAR(fetch_delay);
1152    if (wbDelayEvent.scheduled())
1153        wb_delay = wbDelayEvent.when();
1154    SERIALIZE_SCALAR(wb_delay);
1155
1156
1157}
1158
1159template<class T>
1160void
1161IGbE::DescCache<T>::unserialize(CheckpointIn &cp)
1162{
1163    UNSERIALIZE_SCALAR(cachePnt);
1164    UNSERIALIZE_SCALAR(curFetching);
1165    UNSERIALIZE_SCALAR(wbOut);
1166    UNSERIALIZE_SCALAR(moreToWb);
1167    UNSERIALIZE_SCALAR(wbAlignment);
1168
1169    typename CacheType::size_type usedCacheSize;
1170    UNSERIALIZE_SCALAR(usedCacheSize);
1171    T *temp;
1172    for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1173        temp = new T;
1174        arrayParamIn(cp, csprintf("usedCache_%d", x),
1175                     (uint8_t*)temp,sizeof(T));
1176        usedCache.push_back(temp);
1177    }
1178
1179    typename CacheType::size_type unusedCacheSize;
1180    UNSERIALIZE_SCALAR(unusedCacheSize);
1181    for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1182        temp = new T;
1183        arrayParamIn(cp, csprintf("unusedCache_%d", x),
1184                     (uint8_t*)temp,sizeof(T));
1185        unusedCache.push_back(temp);
1186    }
1187    Tick fetch_delay = 0, wb_delay = 0;
1188    UNSERIALIZE_SCALAR(fetch_delay);
1189    UNSERIALIZE_SCALAR(wb_delay);
1190    if (fetch_delay)
1191        igbe->schedule(fetchDelayEvent, fetch_delay);
1192    if (wb_delay)
1193        igbe->schedule(wbDelayEvent, wb_delay);
1194
1195
1196}
1197
1198///////////////////////////// IGbE::RxDescCache //////////////////////////////
1199
1200IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1201    : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1202    pktEvent([this]{ pktComplete(); }, n),
1203    pktHdrEvent([this]{ pktSplitDone(); }, n),
1204    pktDataEvent([this]{ pktSplitDone(); }, n)
1205
1206{
1207    annSmFetch = "RX Desc Fetch";
1208    annSmWb = "RX Desc Writeback";
1209    annUnusedDescQ = "RX Unused Descriptors";
1210    annUnusedCacheQ = "RX Unused Descriptor Cache";
1211    annUsedCacheQ = "RX Used Descriptor Cache";
1212    annUsedDescQ = "RX Used Descriptors";
1213    annDescQ = "RX Descriptors";
1214}
1215
1216void
1217IGbE::RxDescCache::pktSplitDone()
1218{
1219    splitCount++;
1220    DPRINTF(EthernetDesc,
1221            "Part of split packet done: splitcount now %d\n", splitCount);
1222    assert(splitCount <= 2);
1223    if (splitCount != 2)
1224        return;
1225    splitCount = 0;
1226    DPRINTF(EthernetDesc,
1227            "Part of split packet done: calling pktComplete()\n");
1228    pktComplete();
1229}
1230
1231int
1232IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
1233{
1234    assert(unusedCache.size());
1235    //if (!unusedCache.size())
1236    //    return false;
1237
1238    pktPtr = packet;
1239    pktDone = false;
1240    unsigned buf_len, hdr_len;
1241
1242    RxDesc *desc = unusedCache.front();
1243    switch (igbe->regs.srrctl.desctype()) {
1244      case RXDT_LEGACY:
1245        assert(pkt_offset == 0);
1246        bytesCopied = packet->length;
1247        DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1248                packet->length, igbe->regs.rctl.descSize());
1249        assert(packet->length < igbe->regs.rctl.descSize());
1250        igbe->dmaWrite(pciToDma(desc->legacy.buf),
1251                       packet->length, &pktEvent, packet->data,
1252                       igbe->rxWriteDelay);
1253        break;
1254      case RXDT_ADV_ONEBUF:
1255        assert(pkt_offset == 0);
1256        bytesCopied = packet->length;
1257        buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1258            igbe->regs.rctl.descSize();
1259        DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1260                packet->length, igbe->regs.srrctl(), buf_len);
1261        assert(packet->length < buf_len);
1262        igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1263                       packet->length, &pktEvent, packet->data,
1264                       igbe->rxWriteDelay);
1265        desc->adv_wb.header_len = htole(0);
1266        desc->adv_wb.sph = htole(0);
1267        desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1268        break;
1269      case RXDT_ADV_SPLIT_A:
1270        int split_point;
1271
1272        buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1273            igbe->regs.rctl.descSize();
1274        hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1275        DPRINTF(EthernetDesc,
1276                "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1277                "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1278                igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1279                igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1280                desc->adv_read.pkt, buf_len);
1281
1282        split_point = hsplit(pktPtr);
1283
1284        if (packet->length <= hdr_len) {
1285            bytesCopied = packet->length;
1286            assert(pkt_offset == 0);
1287            DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1288            igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1289                           packet->length, &pktEvent, packet->data,
1290                           igbe->rxWriteDelay);
1291            desc->adv_wb.header_len = htole((uint16_t)packet->length);
1292            desc->adv_wb.sph = htole(0);
1293            desc->adv_wb.pkt_len = htole(0);
1294        } else if (split_point) {
1295            if (pkt_offset) {
1296                // we are only copying some data, header/data has already been
1297                // copied
1298                int max_to_copy =
1299                    std::min(packet->length - pkt_offset, buf_len);
1300                bytesCopied += max_to_copy;
1301                DPRINTF(EthernetDesc,
1302                        "Hdr split: Continuing data buffer copy\n");
1303                igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1304                               max_to_copy, &pktEvent,
1305                               packet->data + pkt_offset, igbe->rxWriteDelay);
1306                desc->adv_wb.header_len = htole(0);
1307                desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1308                desc->adv_wb.sph = htole(0);
1309            } else {
1310                int max_to_copy =
1311                    std::min(packet->length - split_point, buf_len);
1312                bytesCopied += max_to_copy + split_point;
1313
1314                DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1315                        split_point);
1316                igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1317                               split_point, &pktHdrEvent,
1318                               packet->data, igbe->rxWriteDelay);
1319                igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1320                               max_to_copy, &pktDataEvent,
1321                               packet->data + split_point, igbe->rxWriteDelay);
1322                desc->adv_wb.header_len = htole(split_point);
1323                desc->adv_wb.sph = 1;
1324                desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1325            }
1326        } else {
1327            panic("Header split not fitting within header buffer or "
1328                  "undecodable packet not fitting in header unsupported\n");
1329        }
1330        break;
1331      default:
1332        panic("Unimplemnted RX receive buffer type: %d\n",
1333              igbe->regs.srrctl.desctype());
1334    }
1335    return bytesCopied;
1336
1337}
1338
1339void
1340IGbE::RxDescCache::pktComplete()
1341{
1342    assert(unusedCache.size());
1343    RxDesc *desc;
1344    desc = unusedCache.front();
1345
1346    igbe->anBegin("RXS", "Update Desc");
1347
1348    uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1349    DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1350            "stripcrc offset: %d value written: %d %d\n",
1351            pktPtr->length, bytesCopied, crcfixup,
1352            htole((uint16_t)(pktPtr->length + crcfixup)),
1353            (uint16_t)(pktPtr->length + crcfixup));
1354
1355    // no support for anything but starting at 0
1356    assert(igbe->regs.rxcsum.pcss() == 0);
1357
1358    DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1359
1360    uint16_t status = RXDS_DD;
1361    uint8_t err = 0;
1362    uint16_t ext_err = 0;
1363    uint16_t csum = 0;
1364    uint16_t ptype = 0;
1365    uint16_t ip_id = 0;
1366
1367    assert(bytesCopied <= pktPtr->length);
1368    if (bytesCopied == pktPtr->length)
1369        status |= RXDS_EOP;
1370
1371    IpPtr ip(pktPtr);
1372    Ip6Ptr ip6(pktPtr);
1373
1374    if (ip || ip6) {
1375        if (ip) {
1376            DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1377                    ip->id());
1378            ptype |= RXDP_IPV4;
1379            ip_id = ip->id();
1380        }
1381        if (ip6)
1382            ptype |= RXDP_IPV6;
1383
1384        if (ip && igbe->regs.rxcsum.ipofld()) {
1385            DPRINTF(EthernetDesc, "Checking IP checksum\n");
1386            status |= RXDS_IPCS;
1387            csum = htole(cksum(ip));
1388            igbe->rxIpChecksums++;
1389            if (cksum(ip) != 0) {
1390                err |= RXDE_IPE;
1391                ext_err |= RXDEE_IPE;
1392                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1393            }
1394        }
1395        TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
1396        if (tcp && igbe->regs.rxcsum.tuofld()) {
1397            DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1398            status |= RXDS_TCPCS;
1399            ptype |= RXDP_TCP;
1400            csum = htole(cksum(tcp));
1401            igbe->rxTcpChecksums++;
1402            if (cksum(tcp) != 0) {
1403                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1404                err |= RXDE_TCPE;
1405                ext_err |= RXDEE_TCPE;
1406            }
1407        }
1408
1409        UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
1410        if (udp && igbe->regs.rxcsum.tuofld()) {
1411            DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1412            status |= RXDS_UDPCS;
1413            ptype |= RXDP_UDP;
1414            csum = htole(cksum(udp));
1415            igbe->rxUdpChecksums++;
1416            if (cksum(udp) != 0) {
1417                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1418                ext_err |= RXDEE_TCPE;
1419                err |= RXDE_TCPE;
1420            }
1421        }
1422    } else { // if ip
1423        DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1424    }
1425
1426    switch (igbe->regs.srrctl.desctype()) {
1427      case RXDT_LEGACY:
1428        desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1429        desc->legacy.status = htole(status);
1430        desc->legacy.errors = htole(err);
1431        // No vlan support at this point... just set it to 0
1432        desc->legacy.vlan = 0;
1433        break;
1434      case RXDT_ADV_SPLIT_A:
1435      case RXDT_ADV_ONEBUF:
1436        desc->adv_wb.rss_type = htole(0);
1437        desc->adv_wb.pkt_type = htole(ptype);
1438        if (igbe->regs.rxcsum.pcsd()) {
1439            // no rss support right now
1440            desc->adv_wb.rss_hash = htole(0);
1441        } else {
1442            desc->adv_wb.id = htole(ip_id);
1443            desc->adv_wb.csum = htole(csum);
1444        }
1445        desc->adv_wb.status = htole(status);
1446        desc->adv_wb.errors = htole(ext_err);
1447        // no vlan support
1448        desc->adv_wb.vlan_tag = htole(0);
1449        break;
1450      default:
1451        panic("Unimplemnted RX receive buffer type %d\n",
1452              igbe->regs.srrctl.desctype());
1453    }
1454
1455    DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1456            desc->adv_read.pkt, desc->adv_read.hdr);
1457
1458    if (bytesCopied == pktPtr->length) {
1459        DPRINTF(EthernetDesc,
1460                "Packet completely written to descriptor buffers\n");
1461        // Deal with the rx timer interrupts
1462        if (igbe->regs.rdtr.delay()) {
1463            Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1464            DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1465            igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1466        }
1467
1468        if (igbe->regs.radv.idv()) {
1469            Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1470            DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1471            if (!igbe->radvEvent.scheduled()) {
1472                igbe->schedule(igbe->radvEvent, curTick() + delay);
1473            }
1474        }
1475
1476        // if neither radv or rdtr, maybe itr is set...
1477        if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1478            DPRINTF(EthernetSM,
1479                    "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1480            igbe->postInterrupt(IT_RXT);
1481        }
1482
1483        // If the packet is small enough, interrupt appropriately
1484        // I wonder if this is delayed or not?!
1485        if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1486            DPRINTF(EthernetSM,
1487                    "RXS: Posting IT_SRPD beacuse small packet received\n");
1488            igbe->postInterrupt(IT_SRPD);
1489        }
1490        bytesCopied = 0;
1491    }
1492
1493    pktPtr = NULL;
1494    igbe->checkDrain();
1495    enableSm();
1496    pktDone = true;
1497
1498    igbe->anBegin("RXS", "Done Updating Desc");
1499    DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1500    igbe->anDq("RXS", annUnusedCacheQ);
1501    unusedCache.pop_front();
1502    igbe->anQ("RXS", annUsedCacheQ);
1503    usedCache.push_back(desc);
1504}
1505
1506void
1507IGbE::RxDescCache::enableSm()
1508{
1509    if (igbe->drainState() != DrainState::Draining) {
1510        igbe->rxTick = true;
1511        igbe->restartClock();
1512    }
1513}
1514
1515bool
1516IGbE::RxDescCache::packetDone()
1517{
1518    if (pktDone) {
1519        pktDone = false;
1520        return true;
1521    }
1522    return false;
1523}
1524
1525bool
1526IGbE::RxDescCache::hasOutstandingEvents()
1527{
1528    return pktEvent.scheduled() || wbEvent.scheduled() ||
1529        fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1530        pktDataEvent.scheduled();
1531
1532}
1533
1534void
1535IGbE::RxDescCache::serialize(CheckpointOut &cp) const
1536{
1537    DescCache<RxDesc>::serialize(cp);
1538    SERIALIZE_SCALAR(pktDone);
1539    SERIALIZE_SCALAR(splitCount);
1540    SERIALIZE_SCALAR(bytesCopied);
1541}
1542
1543void
1544IGbE::RxDescCache::unserialize(CheckpointIn &cp)
1545{
1546    DescCache<RxDesc>::unserialize(cp);
1547    UNSERIALIZE_SCALAR(pktDone);
1548    UNSERIALIZE_SCALAR(splitCount);
1549    UNSERIALIZE_SCALAR(bytesCopied);
1550}
1551
1552
1553///////////////////////////// IGbE::TxDescCache //////////////////////////////
1554
1555IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1556    : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1557      pktWaiting(false), pktMultiDesc(false),
1558      completionAddress(0), completionEnabled(false),
1559      useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1560      tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1561      tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1562    pktEvent([this]{ pktComplete(); }, n),
1563    headerEvent([this]{ headerComplete(); }, n),
1564    nullEvent([this]{ nullCallback(); }, n)
1565{
1566    annSmFetch = "TX Desc Fetch";
1567    annSmWb = "TX Desc Writeback";
1568    annUnusedDescQ = "TX Unused Descriptors";
1569    annUnusedCacheQ = "TX Unused Descriptor Cache";
1570    annUsedCacheQ = "TX Used Descriptor Cache";
1571    annUsedDescQ = "TX Used Descriptors";
1572    annDescQ = "TX Descriptors";
1573}
1574
1575void
1576IGbE::TxDescCache::processContextDesc()
1577{
1578    assert(unusedCache.size());
1579    TxDesc *desc;
1580
1581    DPRINTF(EthernetDesc, "Checking and  processing context descriptors\n");
1582
1583    while (!useTso && unusedCache.size() &&
1584           TxdOp::isContext(unusedCache.front())) {
1585        DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1586
1587        desc = unusedCache.front();
1588        DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1589                desc->d1, desc->d2);
1590
1591
1592        // is this going to be a tcp or udp packet?
1593        isTcp = TxdOp::tcp(desc) ? true : false;
1594
1595        // setup all the TSO variables, they'll be ignored if we don't use
1596        // tso for this connection
1597        tsoHeaderLen = TxdOp::hdrlen(desc);
1598        tsoMss  = TxdOp::mss(desc);
1599
1600        if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1601            DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1602                    "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1603                    TxdOp::mss(desc), TxdOp::getLen(desc));
1604            useTso = true;
1605            tsoTotalLen = TxdOp::getLen(desc);
1606            tsoLoadedHeader = false;
1607            tsoDescBytesUsed = 0;
1608            tsoUsedLen = 0;
1609            tsoPrevSeq = 0;
1610            tsoPktHasHeader = false;
1611            tsoPkts = 0;
1612            tsoCopyBytes = 0;
1613        }
1614
1615        TxdOp::setDd(desc);
1616        unusedCache.pop_front();
1617        igbe->anDq("TXS", annUnusedCacheQ);
1618        usedCache.push_back(desc);
1619        igbe->anQ("TXS", annUsedCacheQ);
1620    }
1621
1622    if (!unusedCache.size())
1623        return;
1624
1625    desc = unusedCache.front();
1626    if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1627        TxdOp::tse(desc)) {
1628        DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1629                "hdrlen: %d mss: %d paylen %d\n",
1630                tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1631        useTso = true;
1632        tsoTotalLen = TxdOp::getTsoLen(desc);
1633        tsoLoadedHeader = false;
1634        tsoDescBytesUsed = 0;
1635        tsoUsedLen = 0;
1636        tsoPrevSeq = 0;
1637        tsoPktHasHeader = false;
1638        tsoPkts = 0;
1639    }
1640
1641    if (useTso && !tsoLoadedHeader) {
1642        // we need to fetch a header
1643        DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1644        assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1645        pktWaiting = true;
1646        assert(tsoHeaderLen <= 256);
1647        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1648                      tsoHeaderLen, &headerEvent, tsoHeader, 0);
1649    }
1650}
1651
1652void
1653IGbE::TxDescCache::headerComplete()
1654{
1655    DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1656    pktWaiting = false;
1657
1658    assert(unusedCache.size());
1659    TxDesc *desc = unusedCache.front();
1660    DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1661            TxdOp::getLen(desc), tsoHeaderLen);
1662
1663    if (TxdOp::getLen(desc) == tsoHeaderLen) {
1664        tsoDescBytesUsed = 0;
1665        tsoLoadedHeader = true;
1666        unusedCache.pop_front();
1667        usedCache.push_back(desc);
1668    } else {
1669        DPRINTF(EthernetDesc, "TSO: header part of larger payload\n");
1670        tsoDescBytesUsed = tsoHeaderLen;
1671        tsoLoadedHeader = true;
1672    }
1673    enableSm();
1674    igbe->checkDrain();
1675}
1676
1677unsigned
1678IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1679{
1680    if (!unusedCache.size())
1681        return 0;
1682
1683    DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1684
1685    assert(!useTso || tsoLoadedHeader);
1686    TxDesc *desc = unusedCache.front();
1687
1688    if (useTso) {
1689        DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1690                "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1691        DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1692                "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1693                tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1694
1695        if (tsoPktHasHeader)
1696            tsoCopyBytes =  std::min((tsoMss + tsoHeaderLen) - p->length,
1697                                     TxdOp::getLen(desc) - tsoDescBytesUsed);
1698        else
1699            tsoCopyBytes =  std::min(tsoMss,
1700                                     TxdOp::getLen(desc) - tsoDescBytesUsed);
1701        unsigned pkt_size =
1702            tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1703
1704        DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1705                "this descLen: %d\n",
1706                tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1707        DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1708        DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1709        return pkt_size;
1710    }
1711
1712    DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1713            TxdOp::getLen(unusedCache.front()));
1714    return TxdOp::getLen(desc);
1715}
1716
1717void
1718IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1719{
1720    assert(unusedCache.size());
1721
1722    TxDesc *desc;
1723    desc = unusedCache.front();
1724
1725    DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1726            "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1727    assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1728           TxdOp::getLen(desc));
1729
1730    pktPtr = p;
1731
1732    pktWaiting = true;
1733
1734    DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1735
1736    if (useTso) {
1737        assert(tsoLoadedHeader);
1738        if (!tsoPktHasHeader) {
1739            DPRINTF(EthernetDesc,
1740                    "Loading TSO header (%d bytes) into start of packet\n",
1741                    tsoHeaderLen);
1742            memcpy(p->data, &tsoHeader,tsoHeaderLen);
1743            p->length +=tsoHeaderLen;
1744            tsoPktHasHeader = true;
1745        }
1746    }
1747
1748    if (useTso) {
1749        DPRINTF(EthernetDesc,
1750                "Starting DMA of packet at offset %d length: %d\n",
1751                p->length, tsoCopyBytes);
1752        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1753                      + tsoDescBytesUsed,
1754                      tsoCopyBytes, &pktEvent, p->data + p->length,
1755                      igbe->txReadDelay);
1756        tsoDescBytesUsed += tsoCopyBytes;
1757        assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1758    } else {
1759        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1760                      TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1761                      igbe->txReadDelay);
1762    }
1763}
1764
1765void
1766IGbE::TxDescCache::pktComplete()
1767{
1768
1769    TxDesc *desc;
1770    assert(unusedCache.size());
1771    assert(pktPtr);
1772
1773    igbe->anBegin("TXS", "Update Desc");
1774
1775    DPRINTF(EthernetDesc, "DMA of packet complete\n");
1776
1777
1778    desc = unusedCache.front();
1779    assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1780           TxdOp::getLen(desc));
1781
1782    DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1783            desc->d1, desc->d2);
1784
1785    // Set the length of the data in the EtherPacket
1786    if (useTso) {
1787        DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1788            "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1789            tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1790        pktPtr->simLength += tsoCopyBytes;
1791        pktPtr->length += tsoCopyBytes;
1792        tsoUsedLen += tsoCopyBytes;
1793        DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1794            tsoDescBytesUsed, tsoCopyBytes);
1795    } else {
1796        pktPtr->simLength += TxdOp::getLen(desc);
1797        pktPtr->length += TxdOp::getLen(desc);
1798    }
1799
1800
1801
1802    if ((!TxdOp::eop(desc) && !useTso) ||
1803        (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1804         tsoTotalLen != tsoUsedLen && useTso)) {
1805        assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1806        igbe->anDq("TXS", annUnusedCacheQ);
1807        unusedCache.pop_front();
1808        igbe->anQ("TXS", annUsedCacheQ);
1809        usedCache.push_back(desc);
1810
1811        tsoDescBytesUsed = 0;
1812        pktDone = true;
1813        pktWaiting = false;
1814        pktMultiDesc = true;
1815
1816        DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1817                pktPtr->length);
1818        pktPtr = NULL;
1819
1820        enableSm();
1821        igbe->checkDrain();
1822        return;
1823    }
1824
1825
1826    pktMultiDesc = false;
1827    // no support for vlans
1828    assert(!TxdOp::vle(desc));
1829
1830    // we only support single packet descriptors at this point
1831    if (!useTso)
1832        assert(TxdOp::eop(desc));
1833
1834    // set that this packet is done
1835    if (TxdOp::rs(desc))
1836        TxdOp::setDd(desc);
1837
1838    DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1839            desc->d1, desc->d2);
1840
1841    if (useTso) {
1842        IpPtr ip(pktPtr);
1843        Ip6Ptr ip6(pktPtr);
1844        if (ip) {
1845            DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1846                    tsoPkts);
1847            ip->id(ip->id() + tsoPkts++);
1848            ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1849        }
1850        if (ip6)
1851            ip6->plen(pktPtr->length - EthPtr(pktPtr)->size());
1852        TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
1853        if (tcp) {
1854            DPRINTF(EthernetDesc,
1855                    "TSO: Modifying TCP header. old seq %d + %d\n",
1856                    tcp->seq(), tsoPrevSeq);
1857            tcp->seq(tcp->seq() + tsoPrevSeq);
1858            if (tsoUsedLen != tsoTotalLen)
1859                tcp->flags(tcp->flags() & ~9); // clear fin & psh
1860        }
1861        UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
1862        if (udp) {
1863            DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1864            udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1865        }
1866        tsoPrevSeq = tsoUsedLen;
1867    }
1868
1869    if (DTRACE(EthernetDesc)) {
1870        IpPtr ip(pktPtr);
1871        if (ip)
1872            DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1873                    ip->id());
1874        else
1875            DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1876    }
1877
1878    // Checksums are only ofloaded for new descriptor types
1879    if (TxdOp::isData(desc) && (TxdOp::ixsm(desc) || TxdOp::txsm(desc))) {
1880        DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1881        IpPtr ip(pktPtr);
1882        Ip6Ptr ip6(pktPtr);
1883        assert(ip || ip6);
1884        if (ip && TxdOp::ixsm(desc)) {
1885            ip->sum(0);
1886            ip->sum(cksum(ip));
1887            igbe->txIpChecksums++;
1888            DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1889        }
1890        if (TxdOp::txsm(desc)) {
1891            TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
1892            UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
1893            if (tcp) {
1894                tcp->sum(0);
1895                tcp->sum(cksum(tcp));
1896                igbe->txTcpChecksums++;
1897                DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1898            } else if (udp) {
1899                assert(udp);
1900                udp->sum(0);
1901                udp->sum(cksum(udp));
1902                igbe->txUdpChecksums++;
1903                DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1904            } else {
1905                panic("Told to checksum, but don't know how\n");
1906            }
1907        }
1908    }
1909
1910    if (TxdOp::ide(desc)) {
1911        // Deal with the rx timer interrupts
1912        DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1913        if (igbe->regs.tidv.idv()) {
1914            Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1915            DPRINTF(EthernetDesc, "setting tidv\n");
1916            igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1917        }
1918
1919        if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1920            Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1921            DPRINTF(EthernetDesc, "setting tadv\n");
1922            if (!igbe->tadvEvent.scheduled()) {
1923                igbe->schedule(igbe->tadvEvent, curTick() + delay);
1924            }
1925        }
1926    }
1927
1928
1929    if (!useTso ||  TxdOp::getLen(desc) == tsoDescBytesUsed) {
1930        DPRINTF(EthernetDesc, "Descriptor Done\n");
1931        igbe->anDq("TXS", annUnusedCacheQ);
1932        unusedCache.pop_front();
1933        igbe->anQ("TXS", annUsedCacheQ);
1934        usedCache.push_back(desc);
1935        tsoDescBytesUsed = 0;
1936    }
1937
1938    if (useTso && tsoUsedLen == tsoTotalLen)
1939        useTso = false;
1940
1941
1942    DPRINTF(EthernetDesc,
1943            "------Packet of %d bytes ready for transmission-------\n",
1944            pktPtr->length);
1945    pktDone = true;
1946    pktWaiting = false;
1947    pktPtr = NULL;
1948    tsoPktHasHeader = false;
1949
1950    if (igbe->regs.txdctl.wthresh() == 0) {
1951        igbe->anBegin("TXS", "Desc Writeback");
1952        DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1953        writeback(0);
1954    } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <=
1955               descInBlock(usedCache.size())) {
1956        DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1957        igbe->anBegin("TXS", "Desc Writeback");
1958        writeback((igbe->cacheBlockSize()-1)>>4);
1959    } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) {
1960        DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1961        igbe->anBegin("TXS", "Desc Writeback");
1962        writeback((igbe->cacheBlockSize()-1)>>4);
1963    }
1964
1965    enableSm();
1966    igbe->checkDrain();
1967}
1968
1969void
1970IGbE::TxDescCache::actionAfterWb()
1971{
1972    DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1973            completionEnabled);
1974    igbe->postInterrupt(iGbReg::IT_TXDW);
1975    if (completionEnabled) {
1976        descEnd = igbe->regs.tdh();
1977        DPRINTF(EthernetDesc,
1978                "Completion writing back value: %d to addr: %#x\n", descEnd,
1979                completionAddress);
1980        igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1981                       sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1982    }
1983}
1984
1985void
1986IGbE::TxDescCache::serialize(CheckpointOut &cp) const
1987{
1988    DescCache<TxDesc>::serialize(cp);
1989
1990    SERIALIZE_SCALAR(pktDone);
1991    SERIALIZE_SCALAR(isTcp);
1992    SERIALIZE_SCALAR(pktWaiting);
1993    SERIALIZE_SCALAR(pktMultiDesc);
1994
1995    SERIALIZE_SCALAR(useTso);
1996    SERIALIZE_SCALAR(tsoHeaderLen);
1997    SERIALIZE_SCALAR(tsoMss);
1998    SERIALIZE_SCALAR(tsoTotalLen);
1999    SERIALIZE_SCALAR(tsoUsedLen);
2000    SERIALIZE_SCALAR(tsoPrevSeq);;
2001    SERIALIZE_SCALAR(tsoPktPayloadBytes);
2002    SERIALIZE_SCALAR(tsoLoadedHeader);
2003    SERIALIZE_SCALAR(tsoPktHasHeader);
2004    SERIALIZE_ARRAY(tsoHeader, 256);
2005    SERIALIZE_SCALAR(tsoDescBytesUsed);
2006    SERIALIZE_SCALAR(tsoCopyBytes);
2007    SERIALIZE_SCALAR(tsoPkts);
2008
2009    SERIALIZE_SCALAR(completionAddress);
2010    SERIALIZE_SCALAR(completionEnabled);
2011    SERIALIZE_SCALAR(descEnd);
2012}
2013
2014void
2015IGbE::TxDescCache::unserialize(CheckpointIn &cp)
2016{
2017    DescCache<TxDesc>::unserialize(cp);
2018
2019    UNSERIALIZE_SCALAR(pktDone);
2020    UNSERIALIZE_SCALAR(isTcp);
2021    UNSERIALIZE_SCALAR(pktWaiting);
2022    UNSERIALIZE_SCALAR(pktMultiDesc);
2023
2024    UNSERIALIZE_SCALAR(useTso);
2025    UNSERIALIZE_SCALAR(tsoHeaderLen);
2026    UNSERIALIZE_SCALAR(tsoMss);
2027    UNSERIALIZE_SCALAR(tsoTotalLen);
2028    UNSERIALIZE_SCALAR(tsoUsedLen);
2029    UNSERIALIZE_SCALAR(tsoPrevSeq);;
2030    UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
2031    UNSERIALIZE_SCALAR(tsoLoadedHeader);
2032    UNSERIALIZE_SCALAR(tsoPktHasHeader);
2033    UNSERIALIZE_ARRAY(tsoHeader, 256);
2034    UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2035    UNSERIALIZE_SCALAR(tsoCopyBytes);
2036    UNSERIALIZE_SCALAR(tsoPkts);
2037
2038    UNSERIALIZE_SCALAR(completionAddress);
2039    UNSERIALIZE_SCALAR(completionEnabled);
2040    UNSERIALIZE_SCALAR(descEnd);
2041}
2042
2043bool
2044IGbE::TxDescCache::packetAvailable()
2045{
2046    if (pktDone) {
2047        pktDone = false;
2048        return true;
2049    }
2050    return false;
2051}
2052
2053void
2054IGbE::TxDescCache::enableSm()
2055{
2056    if (igbe->drainState() != DrainState::Draining) {
2057        igbe->txTick = true;
2058        igbe->restartClock();
2059    }
2060}
2061
2062bool
2063IGbE::TxDescCache::hasOutstandingEvents()
2064{
2065    return pktEvent.scheduled() || wbEvent.scheduled() ||
2066        fetchEvent.scheduled();
2067}
2068
2069
2070///////////////////////////////////// IGbE /////////////////////////////////
2071
2072void
2073IGbE::restartClock()
2074{
2075    if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2076        drainState() == DrainState::Running)
2077        schedule(tickEvent, clockEdge(Cycles(1)));
2078}
2079
2080DrainState
2081IGbE::drain()
2082{
2083    unsigned int count(0);
2084    if (rxDescCache.hasOutstandingEvents() ||
2085        txDescCache.hasOutstandingEvents()) {
2086        count++;
2087    }
2088
2089    txFifoTick = false;
2090    txTick = false;
2091    rxTick = false;
2092
2093    if (tickEvent.scheduled())
2094        deschedule(tickEvent);
2095
2096    if (count) {
2097        DPRINTF(Drain, "IGbE not drained\n");
2098        return DrainState::Draining;
2099    } else
2100        return DrainState::Drained;
2101}
2102
2103void
2104IGbE::drainResume()
2105{
2106    Drainable::drainResume();
2107
2108    txFifoTick = true;
2109    txTick = true;
2110    rxTick = true;
2111
2112    restartClock();
2113    DPRINTF(EthernetSM, "resuming from drain");
2114}
2115
2116void
2117IGbE::checkDrain()
2118{
2119    if (drainState() != DrainState::Draining)
2120        return;
2121
2122    txFifoTick = false;
2123    txTick = false;
2124    rxTick = false;
2125    if (!rxDescCache.hasOutstandingEvents() &&
2126        !txDescCache.hasOutstandingEvents()) {
2127        DPRINTF(Drain, "IGbE done draining, processing drain event\n");
2128        signalDrainDone();
2129    }
2130}
2131
2132void
2133IGbE::txStateMachine()
2134{
2135    if (!regs.tctl.en()) {
2136        txTick = false;
2137        DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2138        return;
2139    }
2140
2141    // If we have a packet available and it's length is not 0 (meaning it's not
2142    // a multidescriptor packet) put it in the fifo, otherwise an the next
2143    // iteration we'll get the rest of the data
2144    if (txPacket && txDescCache.packetAvailable()
2145        && !txDescCache.packetMultiDesc() && txPacket->length) {
2146        anQ("TXS", "TX FIFO Q");
2147        DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2148#ifndef NDEBUG
2149        bool success =
2150#endif
2151            txFifo.push(txPacket);
2152        txFifoTick = true && drainState() != DrainState::Draining;
2153        assert(success);
2154        txPacket = NULL;
2155        anBegin("TXS", "Desc Writeback");
2156        txDescCache.writeback((cacheBlockSize()-1)>>4);
2157        return;
2158    }
2159
2160    // Only support descriptor granularity
2161    if (regs.txdctl.lwthresh() &&
2162        txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2163        DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2164        postInterrupt(IT_TXDLOW);
2165    }
2166
2167    if (!txPacket) {
2168        txPacket = std::make_shared<EthPacketData>(16384);
2169    }
2170
2171    if (!txDescCache.packetWaiting()) {
2172        if (txDescCache.descLeft() == 0) {
2173            postInterrupt(IT_TXQE);
2174            anBegin("TXS", "Desc Writeback");
2175            txDescCache.writeback(0);
2176            anBegin("TXS", "Desc Fetch");
2177            anWe("TXS", txDescCache.annUnusedCacheQ);
2178            txDescCache.fetchDescriptors();
2179            DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2180                    "writeback stopping ticking and posting TXQE\n");
2181            txTick = false;
2182            return;
2183        }
2184
2185
2186        if (!(txDescCache.descUnused())) {
2187            anBegin("TXS", "Desc Fetch");
2188            txDescCache.fetchDescriptors();
2189            anWe("TXS", txDescCache.annUnusedCacheQ);
2190            DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2191                    "fetching and stopping ticking\n");
2192            txTick = false;
2193            return;
2194        }
2195        anPq("TXS", txDescCache.annUnusedCacheQ);
2196
2197
2198        txDescCache.processContextDesc();
2199        if (txDescCache.packetWaiting()) {
2200            DPRINTF(EthernetSM,
2201                    "TXS: Fetching TSO header, stopping ticking\n");
2202            txTick = false;
2203            return;
2204        }
2205
2206        unsigned size = txDescCache.getPacketSize(txPacket);
2207        if (size > 0 && txFifo.avail() > size) {
2208            anRq("TXS", "TX FIFO Q");
2209            anBegin("TXS", "DMA Packet");
2210            DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2211                    "beginning DMA of next packet\n", size);
2212            txFifo.reserve(size);
2213            txDescCache.getPacketData(txPacket);
2214        } else if (size == 0) {
2215            DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2216            DPRINTF(EthernetSM,
2217                    "TXS: No packets to get, writing back used descriptors\n");
2218            anBegin("TXS", "Desc Writeback");
2219            txDescCache.writeback(0);
2220        } else {
2221            anWf("TXS", "TX FIFO Q");
2222            DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2223                    "available in FIFO\n");
2224            txTick = false;
2225        }
2226
2227
2228        return;
2229    }
2230    DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2231    txTick = false;
2232}
2233
2234bool
2235IGbE::ethRxPkt(EthPacketPtr pkt)
2236{
2237    rxBytes += pkt->length;
2238    rxPackets++;
2239
2240    DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2241    anBegin("RXQ", "Wire Recv");
2242
2243
2244    if (!regs.rctl.en()) {
2245        DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2246        anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2247        return true;
2248    }
2249
2250    // restart the state machines if they are stopped
2251    rxTick = true && drainState() != DrainState::Draining;
2252    if ((rxTick || txTick) && !tickEvent.scheduled()) {
2253        DPRINTF(EthernetSM,
2254                "RXS: received packet into fifo, starting ticking\n");
2255        restartClock();
2256    }
2257
2258    if (!rxFifo.push(pkt)) {
2259        DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2260        postInterrupt(IT_RXO, true);
2261        anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2262        return false;
2263    }
2264
2265    if (CPA::available() && cpa->enabled()) {
2266        assert(sys->numSystemsRunning <= 2);
2267        System *other_sys;
2268        if (sys->systemList[0] == sys)
2269            other_sys = sys->systemList[1];
2270        else
2271            other_sys = sys->systemList[0];
2272
2273        cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2274        anQ("RXQ", "RX FIFO Q");
2275        cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2276    }
2277
2278    return true;
2279}
2280
2281
2282void
2283IGbE::rxStateMachine()
2284{
2285    if (!regs.rctl.en()) {
2286        rxTick = false;
2287        DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2288        return;
2289    }
2290
2291    // If the packet is done check for interrupts/descriptors/etc
2292    if (rxDescCache.packetDone()) {
2293        rxDmaPacket = false;
2294        DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2295        int descLeft = rxDescCache.descLeft();
2296        DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2297                descLeft, regs.rctl.rdmts(), regs.rdlen());
2298
2299        // rdmts 2->1/8, 1->1/4, 0->1/2
2300        int ratio = (1ULL << (regs.rctl.rdmts() + 1));
2301        if (descLeft * ratio <= regs.rdlen()) {
2302            DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2303                    "because of descriptors left\n");
2304            postInterrupt(IT_RXDMT);
2305        }
2306
2307        if (rxFifo.empty())
2308            rxDescCache.writeback(0);
2309
2310        if (descLeft == 0) {
2311            anBegin("RXS", "Writeback Descriptors");
2312            rxDescCache.writeback(0);
2313            DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2314                    " writeback and stopping ticking\n");
2315            rxTick = false;
2316        }
2317
2318        // only support descriptor granulaties
2319        assert(regs.rxdctl.gran());
2320
2321        if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2322            DPRINTF(EthernetSM,
2323                    "RXS: Writing back because WTHRESH >= descUsed\n");
2324            anBegin("RXS", "Writeback Descriptors");
2325            if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2326                rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2327            else
2328                rxDescCache.writeback((cacheBlockSize()-1)>>4);
2329        }
2330
2331        if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2332            ((rxDescCache.descLeft() - rxDescCache.descUnused()) >
2333             regs.rxdctl.hthresh())) {
2334            DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2335                    "descUnused < PTHRESH\n");
2336            anBegin("RXS", "Fetch Descriptors");
2337            rxDescCache.fetchDescriptors();
2338        }
2339
2340        if (rxDescCache.descUnused() == 0) {
2341            anBegin("RXS", "Fetch Descriptors");
2342            rxDescCache.fetchDescriptors();
2343            anWe("RXS", rxDescCache.annUnusedCacheQ);
2344            DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2345                    "fetching descriptors and stopping ticking\n");
2346            rxTick = false;
2347        }
2348        return;
2349    }
2350
2351    if (rxDmaPacket) {
2352        DPRINTF(EthernetSM,
2353                "RXS: stopping ticking until packet DMA completes\n");
2354        rxTick = false;
2355        return;
2356    }
2357
2358    if (!rxDescCache.descUnused()) {
2359        anBegin("RXS", "Fetch Descriptors");
2360        rxDescCache.fetchDescriptors();
2361        anWe("RXS", rxDescCache.annUnusedCacheQ);
2362        DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2363                "stopping ticking\n");
2364        rxTick = false;
2365        DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2366        return;
2367    }
2368    anPq("RXS", rxDescCache.annUnusedCacheQ);
2369
2370    if (rxFifo.empty()) {
2371        anWe("RXS", "RX FIFO Q");
2372        DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2373        rxTick = false;
2374        return;
2375    }
2376    anPq("RXS", "RX FIFO Q");
2377    anBegin("RXS", "Get Desc");
2378
2379    EthPacketPtr pkt;
2380    pkt = rxFifo.front();
2381
2382
2383    pktOffset = rxDescCache.writePacket(pkt, pktOffset);
2384    DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2385    if (pktOffset == pkt->length) {
2386        anBegin( "RXS", "FIFO Dequeue");
2387        DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2388        pktOffset = 0;
2389        anDq("RXS", "RX FIFO Q");
2390        rxFifo.pop();
2391    }
2392
2393    DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2394    rxTick = false;
2395    rxDmaPacket = true;
2396    anBegin("RXS", "DMA Packet");
2397}
2398
2399void
2400IGbE::txWire()
2401{
2402    txFifoTick = false;
2403
2404    if (txFifo.empty()) {
2405        anWe("TXQ", "TX FIFO Q");
2406        return;
2407    }
2408
2409
2410    anPq("TXQ", "TX FIFO Q");
2411    if (etherInt->sendPacket(txFifo.front())) {
2412        anQ("TXQ", "WireQ");
2413        if (DTRACE(EthernetSM)) {
2414            IpPtr ip(txFifo.front());
2415            if (ip)
2416                DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2417                        ip->id());
2418            else
2419                DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2420        }
2421        anDq("TXQ", "TX FIFO Q");
2422        anBegin("TXQ", "Wire Send");
2423        DPRINTF(EthernetSM,
2424                "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2425                txFifo.avail());
2426
2427        txBytes += txFifo.front()->length;
2428        txPackets++;
2429
2430        txFifo.pop();
2431    }
2432}
2433
2434void
2435IGbE::tick()
2436{
2437    DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2438
2439    inTick = true;
2440
2441    if (rxTick)
2442        rxStateMachine();
2443
2444    if (txTick)
2445        txStateMachine();
2446
2447    // If txWire returns and txFifoTick is still set, that means the data we
2448    // sent to the other end was already accepted and we can send another
2449    // frame right away. This is consistent with the previous behavior which
2450    // would send another frame if one was ready in ethTxDone. This version
2451    // avoids growing the stack with each frame sent which can cause stack
2452    // overflow.
2453    while (txFifoTick)
2454        txWire();
2455
2456    if (rxTick || txTick || txFifoTick)
2457        schedule(tickEvent, curTick() + clockPeriod());
2458
2459    inTick = false;
2460}
2461
2462void
2463IGbE::ethTxDone()
2464{
2465    anBegin("TXQ", "Send Done");
2466    // restart the tx state machines if they are stopped
2467    // fifo to send another packet
2468    // tx sm to put more data into the fifo
2469    txFifoTick = true && drainState() != DrainState::Draining;
2470    if (txDescCache.descLeft() != 0 && drainState() != DrainState::Draining)
2471        txTick = true;
2472
2473    if (!inTick)
2474        restartClock();
2475    DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2476}
2477
2478void
2479IGbE::serialize(CheckpointOut &cp) const
2480{
2481    PciDevice::serialize(cp);
2482
2483    regs.serialize(cp);
2484    SERIALIZE_SCALAR(eeOpBits);
2485    SERIALIZE_SCALAR(eeAddrBits);
2486    SERIALIZE_SCALAR(eeDataBits);
2487    SERIALIZE_SCALAR(eeOpcode);
2488    SERIALIZE_SCALAR(eeAddr);
2489    SERIALIZE_SCALAR(lastInterrupt);
2490    SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2491
2492    rxFifo.serialize("rxfifo", cp);
2493    txFifo.serialize("txfifo", cp);
2494
2495    bool txPktExists = txPacket != nullptr;
2496    SERIALIZE_SCALAR(txPktExists);
2497    if (txPktExists)
2498        txPacket->serialize("txpacket", cp);
2499
2500    Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2501        inter_time = 0;
2502
2503    if (rdtrEvent.scheduled())
2504        rdtr_time = rdtrEvent.when();
2505    SERIALIZE_SCALAR(rdtr_time);
2506
2507    if (radvEvent.scheduled())
2508        radv_time = radvEvent.when();
2509    SERIALIZE_SCALAR(radv_time);
2510
2511    if (tidvEvent.scheduled())
2512        tidv_time = tidvEvent.when();
2513    SERIALIZE_SCALAR(tidv_time);
2514
2515    if (tadvEvent.scheduled())
2516        tadv_time = tadvEvent.when();
2517    SERIALIZE_SCALAR(tadv_time);
2518
2519    if (interEvent.scheduled())
2520        inter_time = interEvent.when();
2521    SERIALIZE_SCALAR(inter_time);
2522
2523    SERIALIZE_SCALAR(pktOffset);
2524
2525    txDescCache.serializeSection(cp, "TxDescCache");
2526    rxDescCache.serializeSection(cp, "RxDescCache");
2527}
2528
2529void
2530IGbE::unserialize(CheckpointIn &cp)
2531{
2532    PciDevice::unserialize(cp);
2533
2534    regs.unserialize(cp);
2535    UNSERIALIZE_SCALAR(eeOpBits);
2536    UNSERIALIZE_SCALAR(eeAddrBits);
2537    UNSERIALIZE_SCALAR(eeDataBits);
2538    UNSERIALIZE_SCALAR(eeOpcode);
2539    UNSERIALIZE_SCALAR(eeAddr);
2540    UNSERIALIZE_SCALAR(lastInterrupt);
2541    UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2542
2543    rxFifo.unserialize("rxfifo", cp);
2544    txFifo.unserialize("txfifo", cp);
2545
2546    bool txPktExists;
2547    UNSERIALIZE_SCALAR(txPktExists);
2548    if (txPktExists) {
2549        txPacket = std::make_shared<EthPacketData>(16384);
2550        txPacket->unserialize("txpacket", cp);
2551    }
2552
2553    rxTick = true;
2554    txTick = true;
2555    txFifoTick = true;
2556
2557    Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2558    UNSERIALIZE_SCALAR(rdtr_time);
2559    UNSERIALIZE_SCALAR(radv_time);
2560    UNSERIALIZE_SCALAR(tidv_time);
2561    UNSERIALIZE_SCALAR(tadv_time);
2562    UNSERIALIZE_SCALAR(inter_time);
2563
2564    if (rdtr_time)
2565        schedule(rdtrEvent, rdtr_time);
2566
2567    if (radv_time)
2568        schedule(radvEvent, radv_time);
2569
2570    if (tidv_time)
2571        schedule(tidvEvent, tidv_time);
2572
2573    if (tadv_time)
2574        schedule(tadvEvent, tadv_time);
2575
2576    if (inter_time)
2577        schedule(interEvent, inter_time);
2578
2579    UNSERIALIZE_SCALAR(pktOffset);
2580
2581    txDescCache.unserializeSection(cp, "TxDescCache");
2582    rxDescCache.unserializeSection(cp, "RxDescCache");
2583}
2584
2585IGbE *
2586IGbEParams::create()
2587{
2588    return new IGbE(this);
2589}
2590