i8254xGBe.cc revision 12963:214e39f63149
1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31/* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38#include "dev/net/i8254xGBe.hh"
39
40/*
41 * @todo really there are multiple dma engines.. we should implement them.
42 */
43
44#include <algorithm>
45#include <memory>
46
47#include "base/inet.hh"
48#include "base/trace.hh"
49#include "debug/Drain.hh"
50#include "debug/EthernetAll.hh"
51#include "mem/packet.hh"
52#include "mem/packet_access.hh"
53#include "params/IGbE.hh"
54#include "sim/stats.hh"
55#include "sim/system.hh"
56
57using namespace iGbReg;
58using namespace Net;
59
60IGbE::IGbE(const Params *p)
61    : EtherDevice(p), etherInt(NULL), cpa(NULL),
62      rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), inTick(false),
63      rxTick(false), txTick(false), txFifoTick(false), rxDmaPacket(false),
64      pktOffset(0), fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
65      fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
66      rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
67      rdtrEvent([this]{ rdtrProcess(); }, name()),
68      radvEvent([this]{ radvProcess(); }, name()),
69      tadvEvent([this]{ tadvProcess(); }, name()),
70      tidvEvent([this]{ tidvProcess(); }, name()),
71      tickEvent([this]{ tick(); }, name()),
72      interEvent([this]{ delayIntEvent(); }, name()),
73      rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
74      txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
75      lastInterrupt(0)
76{
77    etherInt = new IGbEInt(name() + ".int", this);
78
79    // Initialized internal registers per Intel documentation
80    // All registers intialized to 0 by per register constructor
81    regs.ctrl.fd(1);
82    regs.ctrl.lrst(1);
83    regs.ctrl.speed(2);
84    regs.ctrl.frcspd(1);
85    regs.sts.speed(3); // Say we're 1000Mbps
86    regs.sts.fd(1); // full duplex
87    regs.sts.lu(1); // link up
88    regs.eecd.fwe(1);
89    regs.eecd.ee_type(1);
90    regs.imr = 0;
91    regs.iam = 0;
92    regs.rxdctl.gran(1);
93    regs.rxdctl.wthresh(1);
94    regs.fcrth(1);
95    regs.tdwba = 0;
96    regs.rlpml = 0;
97    regs.sw_fw_sync = 0;
98
99    regs.pba.rxa(0x30);
100    regs.pba.txa(0x10);
101
102    eeOpBits            = 0;
103    eeAddrBits          = 0;
104    eeDataBits          = 0;
105    eeOpcode            = 0;
106
107    // clear all 64 16 bit words of the eeprom
108    memset(&flash, 0, EEPROM_SIZE*2);
109
110    // Set the MAC address
111    memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
112    for (int x = 0; x < ETH_ADDR_LEN/2; x++)
113        flash[x] = htobe(flash[x]);
114
115    uint16_t csum = 0;
116    for (int x = 0; x < EEPROM_SIZE; x++)
117        csum += htobe(flash[x]);
118
119
120    // Magic happy checksum value
121    flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
122
123    // Store the MAC address as queue ID
124    macAddr = p->hardware_address;
125
126    rxFifo.clear();
127    txFifo.clear();
128}
129
130IGbE::~IGbE()
131{
132    delete etherInt;
133}
134
135void
136IGbE::init()
137{
138    cpa = CPA::cpa();
139    PciDevice::init();
140}
141
142EtherInt*
143IGbE::getEthPort(const std::string &if_name, int idx)
144{
145
146    if (if_name == "interface") {
147        if (etherInt->getPeer())
148            panic("Port already connected to\n");
149        return etherInt;
150    }
151    return NULL;
152}
153
154Tick
155IGbE::writeConfig(PacketPtr pkt)
156{
157    int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
158    if (offset < PCI_DEVICE_SPECIFIC)
159        PciDevice::writeConfig(pkt);
160    else
161        panic("Device specific PCI config space not implemented.\n");
162
163    //
164    // Some work may need to be done here based for the pci COMMAND bits.
165    //
166
167    return configDelay;
168}
169
170// Handy macro for range-testing register access addresses
171#define IN_RANGE(val, base, len) (val >= base && val < (base + len))
172
173Tick
174IGbE::read(PacketPtr pkt)
175{
176    int bar;
177    Addr daddr;
178
179    if (!getBAR(pkt->getAddr(), bar, daddr))
180        panic("Invalid PCI memory access to unmapped memory.\n");
181
182    // Only Memory register BAR is allowed
183    assert(bar == 0);
184
185    // Only 32bit accesses allowed
186    assert(pkt->getSize() == 4);
187
188    DPRINTF(Ethernet, "Read device register %#X\n", daddr);
189
190    //
191    // Handle read of register here
192    //
193
194
195    switch (daddr) {
196      case REG_CTRL:
197        pkt->set<uint32_t>(regs.ctrl());
198        break;
199      case REG_STATUS:
200        pkt->set<uint32_t>(regs.sts());
201        break;
202      case REG_EECD:
203        pkt->set<uint32_t>(regs.eecd());
204        break;
205      case REG_EERD:
206        pkt->set<uint32_t>(regs.eerd());
207        break;
208      case REG_CTRL_EXT:
209        pkt->set<uint32_t>(regs.ctrl_ext());
210        break;
211      case REG_MDIC:
212        pkt->set<uint32_t>(regs.mdic());
213        break;
214      case REG_ICR:
215        DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
216                regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
217        pkt->set<uint32_t>(regs.icr());
218        if (regs.icr.int_assert() || regs.imr == 0) {
219            regs.icr = regs.icr() & ~mask(30);
220            DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
221        }
222        if (regs.ctrl_ext.iame() && regs.icr.int_assert())
223            regs.imr &= ~regs.iam;
224        chkInterrupt();
225        break;
226      case REG_EICR:
227        // This is only useful for MSI, but the driver reads it every time
228        // Just don't do anything
229        pkt->set<uint32_t>(0);
230        break;
231      case REG_ITR:
232        pkt->set<uint32_t>(regs.itr());
233        break;
234      case REG_RCTL:
235        pkt->set<uint32_t>(regs.rctl());
236        break;
237      case REG_FCTTV:
238        pkt->set<uint32_t>(regs.fcttv());
239        break;
240      case REG_TCTL:
241        pkt->set<uint32_t>(regs.tctl());
242        break;
243      case REG_PBA:
244        pkt->set<uint32_t>(regs.pba());
245        break;
246      case REG_WUC:
247      case REG_WUFC:
248      case REG_WUS:
249      case REG_LEDCTL:
250        pkt->set<uint32_t>(0); // We don't care, so just return 0
251        break;
252      case REG_FCRTL:
253        pkt->set<uint32_t>(regs.fcrtl());
254        break;
255      case REG_FCRTH:
256        pkt->set<uint32_t>(regs.fcrth());
257        break;
258      case REG_RDBAL:
259        pkt->set<uint32_t>(regs.rdba.rdbal());
260        break;
261      case REG_RDBAH:
262        pkt->set<uint32_t>(regs.rdba.rdbah());
263        break;
264      case REG_RDLEN:
265        pkt->set<uint32_t>(regs.rdlen());
266        break;
267      case REG_SRRCTL:
268        pkt->set<uint32_t>(regs.srrctl());
269        break;
270      case REG_RDH:
271        pkt->set<uint32_t>(regs.rdh());
272        break;
273      case REG_RDT:
274        pkt->set<uint32_t>(regs.rdt());
275        break;
276      case REG_RDTR:
277        pkt->set<uint32_t>(regs.rdtr());
278        if (regs.rdtr.fpd()) {
279            rxDescCache.writeback(0);
280            DPRINTF(EthernetIntr,
281                    "Posting interrupt because of RDTR.FPD write\n");
282            postInterrupt(IT_RXT);
283            regs.rdtr.fpd(0);
284        }
285        break;
286      case REG_RXDCTL:
287        pkt->set<uint32_t>(regs.rxdctl());
288        break;
289      case REG_RADV:
290        pkt->set<uint32_t>(regs.radv());
291        break;
292      case REG_TDBAL:
293        pkt->set<uint32_t>(regs.tdba.tdbal());
294        break;
295      case REG_TDBAH:
296        pkt->set<uint32_t>(regs.tdba.tdbah());
297        break;
298      case REG_TDLEN:
299        pkt->set<uint32_t>(regs.tdlen());
300        break;
301      case REG_TDH:
302        pkt->set<uint32_t>(regs.tdh());
303        break;
304      case REG_TXDCA_CTL:
305        pkt->set<uint32_t>(regs.txdca_ctl());
306        break;
307      case REG_TDT:
308        pkt->set<uint32_t>(regs.tdt());
309        break;
310      case REG_TIDV:
311        pkt->set<uint32_t>(regs.tidv());
312        break;
313      case REG_TXDCTL:
314        pkt->set<uint32_t>(regs.txdctl());
315        break;
316      case REG_TADV:
317        pkt->set<uint32_t>(regs.tadv());
318        break;
319      case REG_TDWBAL:
320        pkt->set<uint32_t>(regs.tdwba & mask(32));
321        break;
322      case REG_TDWBAH:
323        pkt->set<uint32_t>(regs.tdwba >> 32);
324        break;
325      case REG_RXCSUM:
326        pkt->set<uint32_t>(regs.rxcsum());
327        break;
328      case REG_RLPML:
329        pkt->set<uint32_t>(regs.rlpml);
330        break;
331      case REG_RFCTL:
332        pkt->set<uint32_t>(regs.rfctl());
333        break;
334      case REG_MANC:
335        pkt->set<uint32_t>(regs.manc());
336        break;
337      case REG_SWSM:
338        pkt->set<uint32_t>(regs.swsm());
339        regs.swsm.smbi(1);
340        break;
341      case REG_FWSM:
342        pkt->set<uint32_t>(regs.fwsm());
343        break;
344      case REG_SWFWSYNC:
345        pkt->set<uint32_t>(regs.sw_fw_sync);
346        break;
347      default:
348        if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
349            !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
350            !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
351            !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
352            panic("Read request to unknown register number: %#x\n", daddr);
353        else
354            pkt->set<uint32_t>(0);
355    };
356
357    pkt->makeAtomicResponse();
358    return pioDelay;
359}
360
361Tick
362IGbE::write(PacketPtr pkt)
363{
364    int bar;
365    Addr daddr;
366
367
368    if (!getBAR(pkt->getAddr(), bar, daddr))
369        panic("Invalid PCI memory access to unmapped memory.\n");
370
371    // Only Memory register BAR is allowed
372    assert(bar == 0);
373
374    // Only 32bit accesses allowed
375    assert(pkt->getSize() == sizeof(uint32_t));
376
377    DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
378            daddr, pkt->get<uint32_t>());
379
380    //
381    // Handle write of register here
382    //
383    uint32_t val = pkt->get<uint32_t>();
384
385    Regs::RCTL oldrctl;
386    Regs::TCTL oldtctl;
387
388    switch (daddr) {
389      case REG_CTRL:
390        regs.ctrl = val;
391        if (regs.ctrl.tfce())
392            warn("TX Flow control enabled, should implement\n");
393        if (regs.ctrl.rfce())
394            warn("RX Flow control enabled, should implement\n");
395        break;
396      case REG_CTRL_EXT:
397        regs.ctrl_ext = val;
398        break;
399      case REG_STATUS:
400        regs.sts = val;
401        break;
402      case REG_EECD:
403        int oldClk;
404        oldClk = regs.eecd.sk();
405        regs.eecd = val;
406        // See if this is a eeprom access and emulate accordingly
407        if (!oldClk && regs.eecd.sk()) {
408            if (eeOpBits < 8) {
409                eeOpcode = eeOpcode << 1 | regs.eecd.din();
410                eeOpBits++;
411            } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
412                eeAddr = eeAddr << 1 | regs.eecd.din();
413                eeAddrBits++;
414            } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
415                assert(eeAddr>>1 < EEPROM_SIZE);
416                DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
417                        flash[eeAddr>>1] >> eeDataBits & 0x1,
418                        flash[eeAddr>>1]);
419                regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
420                eeDataBits++;
421            } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
422                regs.eecd.dout(0);
423                eeDataBits++;
424            } else
425                panic("What's going on with eeprom interface? opcode:"
426                      " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
427                      (uint32_t)eeOpBits, (uint32_t)eeAddr,
428                      (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
429
430            // Reset everything for the next command
431            if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
432                (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
433                eeOpBits = 0;
434                eeAddrBits = 0;
435                eeDataBits = 0;
436                eeOpcode = 0;
437                eeAddr = 0;
438            }
439
440            DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
441                    (uint32_t)eeOpcode, (uint32_t) eeOpBits,
442                    (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
443            if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
444                                   eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
445                panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
446                      (uint32_t)eeOpBits);
447
448
449        }
450        // If driver requests eeprom access, immediately give it to it
451        regs.eecd.ee_gnt(regs.eecd.ee_req());
452        break;
453      case REG_EERD:
454        regs.eerd = val;
455        if (regs.eerd.start()) {
456            regs.eerd.done(1);
457            assert(regs.eerd.addr() < EEPROM_SIZE);
458            regs.eerd.data(flash[regs.eerd.addr()]);
459            regs.eerd.start(0);
460            DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
461                    regs.eerd.addr(), regs.eerd.data());
462        }
463        break;
464      case REG_MDIC:
465        regs.mdic = val;
466        if (regs.mdic.i())
467            panic("No support for interrupt on mdic complete\n");
468        if (regs.mdic.phyadd() != 1)
469            panic("No support for reading anything but phy\n");
470        DPRINTF(Ethernet, "%s phy address %x\n",
471                regs.mdic.op() == 1 ? "Writing" : "Reading",
472                regs.mdic.regadd());
473        switch (regs.mdic.regadd()) {
474          case PHY_PSTATUS:
475            regs.mdic.data(0x796D); // link up
476            break;
477          case PHY_PID:
478            regs.mdic.data(params()->phy_pid);
479            break;
480          case PHY_EPID:
481            regs.mdic.data(params()->phy_epid);
482            break;
483          case PHY_GSTATUS:
484            regs.mdic.data(0x7C00);
485            break;
486          case PHY_EPSTATUS:
487            regs.mdic.data(0x3000);
488            break;
489          case PHY_AGC:
490            regs.mdic.data(0x180); // some random length
491            break;
492          default:
493            regs.mdic.data(0);
494        }
495        regs.mdic.r(1);
496        break;
497      case REG_ICR:
498        DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
499                regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
500        if (regs.ctrl_ext.iame())
501            regs.imr &= ~regs.iam;
502        regs.icr = ~bits(val,30,0) & regs.icr();
503        chkInterrupt();
504        break;
505      case REG_ITR:
506        regs.itr = val;
507        break;
508      case REG_ICS:
509        DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
510        postInterrupt((IntTypes)val);
511        break;
512      case REG_IMS:
513        regs.imr |= val;
514        chkInterrupt();
515        break;
516      case REG_IMC:
517        regs.imr &= ~val;
518        chkInterrupt();
519        break;
520      case REG_IAM:
521        regs.iam = val;
522        break;
523      case REG_RCTL:
524        oldrctl = regs.rctl;
525        regs.rctl = val;
526        if (regs.rctl.rst()) {
527            rxDescCache.reset();
528            DPRINTF(EthernetSM, "RXS: Got RESET!\n");
529            rxFifo.clear();
530            regs.rctl.rst(0);
531        }
532        if (regs.rctl.en())
533            rxTick = true;
534        restartClock();
535        break;
536      case REG_FCTTV:
537        regs.fcttv = val;
538        break;
539      case REG_TCTL:
540        regs.tctl = val;
541        oldtctl = regs.tctl;
542        regs.tctl = val;
543        if (regs.tctl.en())
544            txTick = true;
545        restartClock();
546        if (regs.tctl.en() && !oldtctl.en()) {
547            txDescCache.reset();
548        }
549        break;
550      case REG_PBA:
551        regs.pba.rxa(val);
552        regs.pba.txa(64 - regs.pba.rxa());
553        break;
554      case REG_WUC:
555      case REG_WUFC:
556      case REG_WUS:
557      case REG_LEDCTL:
558      case REG_FCAL:
559      case REG_FCAH:
560      case REG_FCT:
561      case REG_VET:
562      case REG_AIFS:
563      case REG_TIPG:
564        ; // We don't care, so don't store anything
565        break;
566      case REG_IVAR0:
567        warn("Writing to IVAR0, ignoring...\n");
568        break;
569      case REG_FCRTL:
570        regs.fcrtl = val;
571        break;
572      case REG_FCRTH:
573        regs.fcrth = val;
574        break;
575      case REG_RDBAL:
576        regs.rdba.rdbal( val & ~mask(4));
577        rxDescCache.areaChanged();
578        break;
579      case REG_RDBAH:
580        regs.rdba.rdbah(val);
581        rxDescCache.areaChanged();
582        break;
583      case REG_RDLEN:
584        regs.rdlen = val & ~mask(7);
585        rxDescCache.areaChanged();
586        break;
587      case REG_SRRCTL:
588        regs.srrctl = val;
589        break;
590      case REG_RDH:
591        regs.rdh = val;
592        rxDescCache.areaChanged();
593        break;
594      case REG_RDT:
595        regs.rdt = val;
596        DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
597        if (drainState() == DrainState::Running) {
598            DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
599            rxDescCache.fetchDescriptors();
600        } else {
601            DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
602        }
603        break;
604      case REG_RDTR:
605        regs.rdtr = val;
606        break;
607      case REG_RADV:
608        regs.radv = val;
609        break;
610      case REG_RXDCTL:
611        regs.rxdctl = val;
612        break;
613      case REG_TDBAL:
614        regs.tdba.tdbal( val & ~mask(4));
615        txDescCache.areaChanged();
616        break;
617      case REG_TDBAH:
618        regs.tdba.tdbah(val);
619        txDescCache.areaChanged();
620        break;
621      case REG_TDLEN:
622        regs.tdlen = val & ~mask(7);
623        txDescCache.areaChanged();
624        break;
625      case REG_TDH:
626        regs.tdh = val;
627        txDescCache.areaChanged();
628        break;
629      case REG_TXDCA_CTL:
630        regs.txdca_ctl = val;
631        if (regs.txdca_ctl.enabled())
632            panic("No support for DCA\n");
633        break;
634      case REG_TDT:
635        regs.tdt = val;
636        DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
637        if (drainState() == DrainState::Running) {
638            DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
639            txDescCache.fetchDescriptors();
640        } else {
641            DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
642        }
643        break;
644      case REG_TIDV:
645        regs.tidv = val;
646        break;
647      case REG_TXDCTL:
648        regs.txdctl = val;
649        break;
650      case REG_TADV:
651        regs.tadv = val;
652        break;
653      case REG_TDWBAL:
654        regs.tdwba &= ~mask(32);
655        regs.tdwba |= val;
656        txDescCache.completionWriteback(regs.tdwba & ~mask(1),
657                                        regs.tdwba & mask(1));
658        break;
659      case REG_TDWBAH:
660        regs.tdwba &= mask(32);
661        regs.tdwba |= (uint64_t)val << 32;
662        txDescCache.completionWriteback(regs.tdwba & ~mask(1),
663                                        regs.tdwba & mask(1));
664        break;
665      case REG_RXCSUM:
666        regs.rxcsum = val;
667        break;
668      case REG_RLPML:
669        regs.rlpml = val;
670        break;
671      case REG_RFCTL:
672        regs.rfctl = val;
673        if (regs.rfctl.exsten())
674            panic("Extended RX descriptors not implemented\n");
675        break;
676      case REG_MANC:
677        regs.manc = val;
678        break;
679      case REG_SWSM:
680        regs.swsm = val;
681        if (regs.fwsm.eep_fw_semaphore())
682            regs.swsm.swesmbi(0);
683        break;
684      case REG_SWFWSYNC:
685        regs.sw_fw_sync = val;
686        break;
687      default:
688        if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
689            !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
690            !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
691            panic("Write request to unknown register number: %#x\n", daddr);
692    };
693
694    pkt->makeAtomicResponse();
695    return pioDelay;
696}
697
698void
699IGbE::postInterrupt(IntTypes t, bool now)
700{
701    assert(t);
702
703    // Interrupt is already pending
704    if (t & regs.icr() && !now)
705        return;
706
707    regs.icr = regs.icr() | t;
708
709    Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
710    DPRINTF(EthernetIntr,
711            "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
712            curTick(), regs.itr.interval(), itr_interval);
713
714    if (regs.itr.interval() == 0 || now ||
715        lastInterrupt + itr_interval <= curTick()) {
716        if (interEvent.scheduled()) {
717            deschedule(interEvent);
718        }
719        cpuPostInt();
720    } else {
721        Tick int_time = lastInterrupt + itr_interval;
722        assert(int_time > 0);
723        DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
724                int_time);
725        if (!interEvent.scheduled()) {
726            schedule(interEvent, int_time);
727        }
728    }
729}
730
731void
732IGbE::delayIntEvent()
733{
734    cpuPostInt();
735}
736
737
738void
739IGbE::cpuPostInt()
740{
741
742    postedInterrupts++;
743
744    if (!(regs.icr() & regs.imr)) {
745        DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
746        return;
747    }
748
749    DPRINTF(Ethernet, "Posting Interrupt\n");
750
751
752    if (interEvent.scheduled()) {
753        deschedule(interEvent);
754    }
755
756    if (rdtrEvent.scheduled()) {
757        regs.icr.rxt0(1);
758        deschedule(rdtrEvent);
759    }
760    if (radvEvent.scheduled()) {
761        regs.icr.rxt0(1);
762        deschedule(radvEvent);
763    }
764    if (tadvEvent.scheduled()) {
765        regs.icr.txdw(1);
766        deschedule(tadvEvent);
767    }
768    if (tidvEvent.scheduled()) {
769        regs.icr.txdw(1);
770        deschedule(tidvEvent);
771    }
772
773    regs.icr.int_assert(1);
774    DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
775            regs.icr());
776
777    intrPost();
778
779    lastInterrupt = curTick();
780}
781
782void
783IGbE::cpuClearInt()
784{
785    if (regs.icr.int_assert()) {
786        regs.icr.int_assert(0);
787        DPRINTF(EthernetIntr,
788                "EINT: Clearing interrupt to CPU now. Vector %#x\n",
789                regs.icr());
790        intrClear();
791    }
792}
793
794void
795IGbE::chkInterrupt()
796{
797    DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
798            regs.imr);
799    // Check if we need to clear the cpu interrupt
800    if (!(regs.icr() & regs.imr)) {
801        DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
802        if (interEvent.scheduled())
803            deschedule(interEvent);
804        if (regs.icr.int_assert())
805            cpuClearInt();
806    }
807    DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
808            regs.itr(), regs.itr.interval());
809
810    if (regs.icr() & regs.imr) {
811        if (regs.itr.interval() == 0)  {
812            cpuPostInt();
813        } else {
814            DPRINTF(Ethernet,
815                    "Possibly scheduling interrupt because of imr write\n");
816            if (!interEvent.scheduled()) {
817                Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
818                DPRINTF(Ethernet, "Scheduling for %d\n", t);
819                schedule(interEvent, t);
820            }
821        }
822    }
823}
824
825
826///////////////////////////// IGbE::DescCache //////////////////////////////
827
828template<class T>
829IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
830    : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
831      wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL),
832      wbDelayEvent([this]{ writeback1(); }, n),
833      fetchDelayEvent([this]{ fetchDescriptors1(); }, n),
834      fetchEvent([this]{ fetchComplete(); }, n),
835      wbEvent([this]{ wbComplete(); }, n)
836{
837    fetchBuf = new T[size];
838    wbBuf = new T[size];
839}
840
841template<class T>
842IGbE::DescCache<T>::~DescCache()
843{
844    reset();
845    delete[] fetchBuf;
846    delete[] wbBuf;
847}
848
849template<class T>
850void
851IGbE::DescCache<T>::areaChanged()
852{
853    if (usedCache.size() > 0 || curFetching || wbOut)
854        panic("Descriptor Address, Length or Head changed. Bad\n");
855    reset();
856
857}
858
859template<class T>
860void
861IGbE::DescCache<T>::writeback(Addr aMask)
862{
863    int curHead = descHead();
864    int max_to_wb = usedCache.size();
865
866    // Check if this writeback is less restrictive that the previous
867    // and if so setup another one immediately following it
868    if (wbOut) {
869        if (aMask < wbAlignment) {
870            moreToWb = true;
871            wbAlignment = aMask;
872        }
873        DPRINTF(EthernetDesc,
874                "Writing back already in process, returning\n");
875        return;
876    }
877
878    moreToWb = false;
879    wbAlignment = aMask;
880
881
882    DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
883            "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
884            curHead, descTail(), descLen(), cachePnt, max_to_wb,
885            descLeft());
886
887    if (max_to_wb + curHead >= descLen()) {
888        max_to_wb = descLen() - curHead;
889        moreToWb = true;
890        // this is by definition aligned correctly
891    } else if (wbAlignment != 0) {
892        // align the wb point to the mask
893        max_to_wb = max_to_wb & ~wbAlignment;
894    }
895
896    DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
897
898    if (max_to_wb <= 0) {
899        if (usedCache.size())
900            igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
901        else
902            igbe->anWe(annSmWb, annUsedCacheQ);
903        return;
904    }
905
906    wbOut = max_to_wb;
907
908    assert(!wbDelayEvent.scheduled());
909    igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
910    igbe->anBegin(annSmWb, "Prepare Writeback Desc");
911}
912
913template<class T>
914void
915IGbE::DescCache<T>::writeback1()
916{
917    // If we're draining delay issuing this DMA
918    if (igbe->drainState() != DrainState::Running) {
919        igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
920        return;
921    }
922
923    DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
924
925    for (int x = 0; x < wbOut; x++) {
926        assert(usedCache.size());
927        memcpy(&wbBuf[x], usedCache[x], sizeof(T));
928        igbe->anPq(annSmWb, annUsedCacheQ);
929        igbe->anPq(annSmWb, annDescQ);
930        igbe->anQ(annSmWb, annUsedDescQ);
931    }
932
933
934    igbe->anBegin(annSmWb, "Writeback Desc DMA");
935
936    assert(wbOut);
937    igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
938                   wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
939                   igbe->wbCompDelay);
940}
941
942template<class T>
943void
944IGbE::DescCache<T>::fetchDescriptors()
945{
946    size_t max_to_fetch;
947
948    if (curFetching) {
949        DPRINTF(EthernetDesc,
950                "Currently fetching %d descriptors, returning\n",
951                curFetching);
952        return;
953    }
954
955    if (descTail() >= cachePnt)
956        max_to_fetch = descTail() - cachePnt;
957    else
958        max_to_fetch = descLen() - cachePnt;
959
960    size_t free_cache = size - usedCache.size() - unusedCache.size();
961
962    if (!max_to_fetch)
963        igbe->anWe(annSmFetch, annUnusedDescQ);
964    else
965        igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
966
967    if (max_to_fetch) {
968        if (!free_cache)
969            igbe->anWf(annSmFetch, annDescQ);
970        else
971            igbe->anRq(annSmFetch, annDescQ, free_cache);
972    }
973
974    max_to_fetch = std::min(max_to_fetch, free_cache);
975
976
977    DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
978            "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
979            descHead(), descTail(), descLen(), cachePnt,
980            max_to_fetch, descLeft());
981
982    // Nothing to do
983    if (max_to_fetch == 0)
984        return;
985
986    // So we don't have two descriptor fetches going on at once
987    curFetching = max_to_fetch;
988
989    assert(!fetchDelayEvent.scheduled());
990    igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
991    igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
992}
993
994template<class T>
995void
996IGbE::DescCache<T>::fetchDescriptors1()
997{
998    // If we're draining delay issuing this DMA
999    if (igbe->drainState() != DrainState::Running) {
1000        igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
1001        return;
1002    }
1003
1004    igbe->anBegin(annSmFetch, "Fetch Desc");
1005
1006    DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
1007            descBase() + cachePnt * sizeof(T),
1008            pciToDma(descBase() + cachePnt * sizeof(T)),
1009            curFetching * sizeof(T));
1010    assert(curFetching);
1011    igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
1012                  curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
1013                  igbe->fetchCompDelay);
1014}
1015
1016template<class T>
1017void
1018IGbE::DescCache<T>::fetchComplete()
1019{
1020    T *newDesc;
1021    igbe->anBegin(annSmFetch, "Fetch Complete");
1022    for (int x = 0; x < curFetching; x++) {
1023        newDesc = new T;
1024        memcpy(newDesc, &fetchBuf[x], sizeof(T));
1025        unusedCache.push_back(newDesc);
1026        igbe->anDq(annSmFetch, annUnusedDescQ);
1027        igbe->anQ(annSmFetch, annUnusedCacheQ);
1028        igbe->anQ(annSmFetch, annDescQ);
1029    }
1030
1031
1032#ifndef NDEBUG
1033    int oldCp = cachePnt;
1034#endif
1035
1036    cachePnt += curFetching;
1037    assert(cachePnt <= descLen());
1038    if (cachePnt == descLen())
1039        cachePnt = 0;
1040
1041    curFetching = 0;
1042
1043    DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1044            oldCp, cachePnt);
1045
1046    if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1047                                                             cachePnt)) == 0)
1048    {
1049        igbe->anWe(annSmFetch, annUnusedDescQ);
1050    } else if (!(size - usedCache.size() - unusedCache.size())) {
1051        igbe->anWf(annSmFetch, annDescQ);
1052    } else {
1053        igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1054    }
1055
1056    enableSm();
1057    igbe->checkDrain();
1058}
1059
1060template<class T>
1061void
1062IGbE::DescCache<T>::wbComplete()
1063{
1064
1065    igbe->anBegin(annSmWb, "Finish Writeback");
1066
1067    long  curHead = descHead();
1068#ifndef NDEBUG
1069    long oldHead = curHead;
1070#endif
1071
1072    for (int x = 0; x < wbOut; x++) {
1073        assert(usedCache.size());
1074        delete usedCache[0];
1075        usedCache.pop_front();
1076
1077        igbe->anDq(annSmWb, annUsedCacheQ);
1078        igbe->anDq(annSmWb, annDescQ);
1079    }
1080
1081    curHead += wbOut;
1082    wbOut = 0;
1083
1084    if (curHead >= descLen())
1085        curHead -= descLen();
1086
1087    // Update the head
1088    updateHead(curHead);
1089
1090    DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1091            oldHead, curHead);
1092
1093    // If we still have more to wb, call wb now
1094    actionAfterWb();
1095    if (moreToWb) {
1096        moreToWb = false;
1097        DPRINTF(EthernetDesc, "Writeback has more todo\n");
1098        writeback(wbAlignment);
1099    }
1100
1101    if (!wbOut) {
1102        igbe->checkDrain();
1103        if (usedCache.size())
1104            igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1105        else
1106            igbe->anWe(annSmWb, annUsedCacheQ);
1107    }
1108    fetchAfterWb();
1109}
1110
1111template<class T>
1112void
1113IGbE::DescCache<T>::reset()
1114{
1115    DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1116    for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1117        delete usedCache[x];
1118    for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1119        delete unusedCache[x];
1120
1121    usedCache.clear();
1122    unusedCache.clear();
1123
1124    cachePnt = 0;
1125
1126}
1127
1128template<class T>
1129void
1130IGbE::DescCache<T>::serialize(CheckpointOut &cp) const
1131{
1132    SERIALIZE_SCALAR(cachePnt);
1133    SERIALIZE_SCALAR(curFetching);
1134    SERIALIZE_SCALAR(wbOut);
1135    SERIALIZE_SCALAR(moreToWb);
1136    SERIALIZE_SCALAR(wbAlignment);
1137
1138    typename CacheType::size_type usedCacheSize = usedCache.size();
1139    SERIALIZE_SCALAR(usedCacheSize);
1140    for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1141        arrayParamOut(cp, csprintf("usedCache_%d", x),
1142                      (uint8_t*)usedCache[x],sizeof(T));
1143    }
1144
1145    typename CacheType::size_type unusedCacheSize = unusedCache.size();
1146    SERIALIZE_SCALAR(unusedCacheSize);
1147    for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1148        arrayParamOut(cp, csprintf("unusedCache_%d", x),
1149                      (uint8_t*)unusedCache[x],sizeof(T));
1150    }
1151
1152    Tick fetch_delay = 0, wb_delay = 0;
1153    if (fetchDelayEvent.scheduled())
1154        fetch_delay = fetchDelayEvent.when();
1155    SERIALIZE_SCALAR(fetch_delay);
1156    if (wbDelayEvent.scheduled())
1157        wb_delay = wbDelayEvent.when();
1158    SERIALIZE_SCALAR(wb_delay);
1159
1160
1161}
1162
1163template<class T>
1164void
1165IGbE::DescCache<T>::unserialize(CheckpointIn &cp)
1166{
1167    UNSERIALIZE_SCALAR(cachePnt);
1168    UNSERIALIZE_SCALAR(curFetching);
1169    UNSERIALIZE_SCALAR(wbOut);
1170    UNSERIALIZE_SCALAR(moreToWb);
1171    UNSERIALIZE_SCALAR(wbAlignment);
1172
1173    typename CacheType::size_type usedCacheSize;
1174    UNSERIALIZE_SCALAR(usedCacheSize);
1175    T *temp;
1176    for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1177        temp = new T;
1178        arrayParamIn(cp, csprintf("usedCache_%d", x),
1179                     (uint8_t*)temp,sizeof(T));
1180        usedCache.push_back(temp);
1181    }
1182
1183    typename CacheType::size_type unusedCacheSize;
1184    UNSERIALIZE_SCALAR(unusedCacheSize);
1185    for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1186        temp = new T;
1187        arrayParamIn(cp, csprintf("unusedCache_%d", x),
1188                     (uint8_t*)temp,sizeof(T));
1189        unusedCache.push_back(temp);
1190    }
1191    Tick fetch_delay = 0, wb_delay = 0;
1192    UNSERIALIZE_SCALAR(fetch_delay);
1193    UNSERIALIZE_SCALAR(wb_delay);
1194    if (fetch_delay)
1195        igbe->schedule(fetchDelayEvent, fetch_delay);
1196    if (wb_delay)
1197        igbe->schedule(wbDelayEvent, wb_delay);
1198
1199
1200}
1201
1202///////////////////////////// IGbE::RxDescCache //////////////////////////////
1203
1204IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1205    : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1206    pktEvent([this]{ pktComplete(); }, n),
1207    pktHdrEvent([this]{ pktSplitDone(); }, n),
1208    pktDataEvent([this]{ pktSplitDone(); }, n)
1209
1210{
1211    annSmFetch = "RX Desc Fetch";
1212    annSmWb = "RX Desc Writeback";
1213    annUnusedDescQ = "RX Unused Descriptors";
1214    annUnusedCacheQ = "RX Unused Descriptor Cache";
1215    annUsedCacheQ = "RX Used Descriptor Cache";
1216    annUsedDescQ = "RX Used Descriptors";
1217    annDescQ = "RX Descriptors";
1218}
1219
1220void
1221IGbE::RxDescCache::pktSplitDone()
1222{
1223    splitCount++;
1224    DPRINTF(EthernetDesc,
1225            "Part of split packet done: splitcount now %d\n", splitCount);
1226    assert(splitCount <= 2);
1227    if (splitCount != 2)
1228        return;
1229    splitCount = 0;
1230    DPRINTF(EthernetDesc,
1231            "Part of split packet done: calling pktComplete()\n");
1232    pktComplete();
1233}
1234
1235int
1236IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
1237{
1238    assert(unusedCache.size());
1239    //if (!unusedCache.size())
1240    //    return false;
1241
1242    pktPtr = packet;
1243    pktDone = false;
1244    unsigned buf_len, hdr_len;
1245
1246    RxDesc *desc = unusedCache.front();
1247    switch (igbe->regs.srrctl.desctype()) {
1248      case RXDT_LEGACY:
1249        assert(pkt_offset == 0);
1250        bytesCopied = packet->length;
1251        DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1252                packet->length, igbe->regs.rctl.descSize());
1253        assert(packet->length < igbe->regs.rctl.descSize());
1254        igbe->dmaWrite(pciToDma(desc->legacy.buf),
1255                       packet->length, &pktEvent, packet->data,
1256                       igbe->rxWriteDelay);
1257        break;
1258      case RXDT_ADV_ONEBUF:
1259        assert(pkt_offset == 0);
1260        bytesCopied = packet->length;
1261        buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1262            igbe->regs.rctl.descSize();
1263        DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1264                packet->length, igbe->regs.srrctl(), buf_len);
1265        assert(packet->length < buf_len);
1266        igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1267                       packet->length, &pktEvent, packet->data,
1268                       igbe->rxWriteDelay);
1269        desc->adv_wb.header_len = htole(0);
1270        desc->adv_wb.sph = htole(0);
1271        desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1272        break;
1273      case RXDT_ADV_SPLIT_A:
1274        int split_point;
1275
1276        buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1277            igbe->regs.rctl.descSize();
1278        hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1279        DPRINTF(EthernetDesc,
1280                "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1281                "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1282                igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1283                igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1284                desc->adv_read.pkt, buf_len);
1285
1286        split_point = hsplit(pktPtr);
1287
1288        if (packet->length <= hdr_len) {
1289            bytesCopied = packet->length;
1290            assert(pkt_offset == 0);
1291            DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1292            igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1293                           packet->length, &pktEvent, packet->data,
1294                           igbe->rxWriteDelay);
1295            desc->adv_wb.header_len = htole((uint16_t)packet->length);
1296            desc->adv_wb.sph = htole(0);
1297            desc->adv_wb.pkt_len = htole(0);
1298        } else if (split_point) {
1299            if (pkt_offset) {
1300                // we are only copying some data, header/data has already been
1301                // copied
1302                int max_to_copy =
1303                    std::min(packet->length - pkt_offset, buf_len);
1304                bytesCopied += max_to_copy;
1305                DPRINTF(EthernetDesc,
1306                        "Hdr split: Continuing data buffer copy\n");
1307                igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1308                               max_to_copy, &pktEvent,
1309                               packet->data + pkt_offset, igbe->rxWriteDelay);
1310                desc->adv_wb.header_len = htole(0);
1311                desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1312                desc->adv_wb.sph = htole(0);
1313            } else {
1314                int max_to_copy =
1315                    std::min(packet->length - split_point, buf_len);
1316                bytesCopied += max_to_copy + split_point;
1317
1318                DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1319                        split_point);
1320                igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1321                               split_point, &pktHdrEvent,
1322                               packet->data, igbe->rxWriteDelay);
1323                igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1324                               max_to_copy, &pktDataEvent,
1325                               packet->data + split_point, igbe->rxWriteDelay);
1326                desc->adv_wb.header_len = htole(split_point);
1327                desc->adv_wb.sph = 1;
1328                desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1329            }
1330        } else {
1331            panic("Header split not fitting within header buffer or "
1332                  "undecodable packet not fitting in header unsupported\n");
1333        }
1334        break;
1335      default:
1336        panic("Unimplemnted RX receive buffer type: %d\n",
1337              igbe->regs.srrctl.desctype());
1338    }
1339    return bytesCopied;
1340
1341}
1342
1343void
1344IGbE::RxDescCache::pktComplete()
1345{
1346    assert(unusedCache.size());
1347    RxDesc *desc;
1348    desc = unusedCache.front();
1349
1350    igbe->anBegin("RXS", "Update Desc");
1351
1352    uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1353    DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1354            "stripcrc offset: %d value written: %d %d\n",
1355            pktPtr->length, bytesCopied, crcfixup,
1356            htole((uint16_t)(pktPtr->length + crcfixup)),
1357            (uint16_t)(pktPtr->length + crcfixup));
1358
1359    // no support for anything but starting at 0
1360    assert(igbe->regs.rxcsum.pcss() == 0);
1361
1362    DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1363
1364    uint16_t status = RXDS_DD;
1365    uint8_t err = 0;
1366    uint16_t ext_err = 0;
1367    uint16_t csum = 0;
1368    uint16_t ptype = 0;
1369    uint16_t ip_id = 0;
1370
1371    assert(bytesCopied <= pktPtr->length);
1372    if (bytesCopied == pktPtr->length)
1373        status |= RXDS_EOP;
1374
1375    IpPtr ip(pktPtr);
1376    Ip6Ptr ip6(pktPtr);
1377
1378    if (ip || ip6) {
1379        if (ip) {
1380            DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1381                    ip->id());
1382            ptype |= RXDP_IPV4;
1383            ip_id = ip->id();
1384        }
1385        if (ip6)
1386            ptype |= RXDP_IPV6;
1387
1388        if (ip && igbe->regs.rxcsum.ipofld()) {
1389            DPRINTF(EthernetDesc, "Checking IP checksum\n");
1390            status |= RXDS_IPCS;
1391            csum = htole(cksum(ip));
1392            igbe->rxIpChecksums++;
1393            if (cksum(ip) != 0) {
1394                err |= RXDE_IPE;
1395                ext_err |= RXDEE_IPE;
1396                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1397            }
1398        }
1399        TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
1400        if (tcp && igbe->regs.rxcsum.tuofld()) {
1401            DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1402            status |= RXDS_TCPCS;
1403            ptype |= RXDP_TCP;
1404            csum = htole(cksum(tcp));
1405            igbe->rxTcpChecksums++;
1406            if (cksum(tcp) != 0) {
1407                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1408                err |= RXDE_TCPE;
1409                ext_err |= RXDEE_TCPE;
1410            }
1411        }
1412
1413        UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
1414        if (udp && igbe->regs.rxcsum.tuofld()) {
1415            DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1416            status |= RXDS_UDPCS;
1417            ptype |= RXDP_UDP;
1418            csum = htole(cksum(udp));
1419            igbe->rxUdpChecksums++;
1420            if (cksum(udp) != 0) {
1421                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1422                ext_err |= RXDEE_TCPE;
1423                err |= RXDE_TCPE;
1424            }
1425        }
1426    } else { // if ip
1427        DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1428    }
1429
1430    switch (igbe->regs.srrctl.desctype()) {
1431      case RXDT_LEGACY:
1432        desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1433        desc->legacy.status = htole(status);
1434        desc->legacy.errors = htole(err);
1435        // No vlan support at this point... just set it to 0
1436        desc->legacy.vlan = 0;
1437        break;
1438      case RXDT_ADV_SPLIT_A:
1439      case RXDT_ADV_ONEBUF:
1440        desc->adv_wb.rss_type = htole(0);
1441        desc->adv_wb.pkt_type = htole(ptype);
1442        if (igbe->regs.rxcsum.pcsd()) {
1443            // no rss support right now
1444            desc->adv_wb.rss_hash = htole(0);
1445        } else {
1446            desc->adv_wb.id = htole(ip_id);
1447            desc->adv_wb.csum = htole(csum);
1448        }
1449        desc->adv_wb.status = htole(status);
1450        desc->adv_wb.errors = htole(ext_err);
1451        // no vlan support
1452        desc->adv_wb.vlan_tag = htole(0);
1453        break;
1454      default:
1455        panic("Unimplemnted RX receive buffer type %d\n",
1456              igbe->regs.srrctl.desctype());
1457    }
1458
1459    DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1460            desc->adv_read.pkt, desc->adv_read.hdr);
1461
1462    if (bytesCopied == pktPtr->length) {
1463        DPRINTF(EthernetDesc,
1464                "Packet completely written to descriptor buffers\n");
1465        // Deal with the rx timer interrupts
1466        if (igbe->regs.rdtr.delay()) {
1467            Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1468            DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1469            igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1470        }
1471
1472        if (igbe->regs.radv.idv()) {
1473            Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1474            DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1475            if (!igbe->radvEvent.scheduled()) {
1476                igbe->schedule(igbe->radvEvent, curTick() + delay);
1477            }
1478        }
1479
1480        // if neither radv or rdtr, maybe itr is set...
1481        if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1482            DPRINTF(EthernetSM,
1483                    "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1484            igbe->postInterrupt(IT_RXT);
1485        }
1486
1487        // If the packet is small enough, interrupt appropriately
1488        // I wonder if this is delayed or not?!
1489        if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1490            DPRINTF(EthernetSM,
1491                    "RXS: Posting IT_SRPD beacuse small packet received\n");
1492            igbe->postInterrupt(IT_SRPD);
1493        }
1494        bytesCopied = 0;
1495    }
1496
1497    pktPtr = NULL;
1498    igbe->checkDrain();
1499    enableSm();
1500    pktDone = true;
1501
1502    igbe->anBegin("RXS", "Done Updating Desc");
1503    DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1504    igbe->anDq("RXS", annUnusedCacheQ);
1505    unusedCache.pop_front();
1506    igbe->anQ("RXS", annUsedCacheQ);
1507    usedCache.push_back(desc);
1508}
1509
1510void
1511IGbE::RxDescCache::enableSm()
1512{
1513    if (igbe->drainState() != DrainState::Draining) {
1514        igbe->rxTick = true;
1515        igbe->restartClock();
1516    }
1517}
1518
1519bool
1520IGbE::RxDescCache::packetDone()
1521{
1522    if (pktDone) {
1523        pktDone = false;
1524        return true;
1525    }
1526    return false;
1527}
1528
1529bool
1530IGbE::RxDescCache::hasOutstandingEvents()
1531{
1532    return pktEvent.scheduled() || wbEvent.scheduled() ||
1533        fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1534        pktDataEvent.scheduled();
1535
1536}
1537
1538void
1539IGbE::RxDescCache::serialize(CheckpointOut &cp) const
1540{
1541    DescCache<RxDesc>::serialize(cp);
1542    SERIALIZE_SCALAR(pktDone);
1543    SERIALIZE_SCALAR(splitCount);
1544    SERIALIZE_SCALAR(bytesCopied);
1545}
1546
1547void
1548IGbE::RxDescCache::unserialize(CheckpointIn &cp)
1549{
1550    DescCache<RxDesc>::unserialize(cp);
1551    UNSERIALIZE_SCALAR(pktDone);
1552    UNSERIALIZE_SCALAR(splitCount);
1553    UNSERIALIZE_SCALAR(bytesCopied);
1554}
1555
1556
1557///////////////////////////// IGbE::TxDescCache //////////////////////////////
1558
1559IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1560    : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1561      pktWaiting(false), pktMultiDesc(false),
1562      completionAddress(0), completionEnabled(false),
1563      useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1564      tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1565      tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1566    pktEvent([this]{ pktComplete(); }, n),
1567    headerEvent([this]{ headerComplete(); }, n),
1568    nullEvent([this]{ nullCallback(); }, n)
1569{
1570    annSmFetch = "TX Desc Fetch";
1571    annSmWb = "TX Desc Writeback";
1572    annUnusedDescQ = "TX Unused Descriptors";
1573    annUnusedCacheQ = "TX Unused Descriptor Cache";
1574    annUsedCacheQ = "TX Used Descriptor Cache";
1575    annUsedDescQ = "TX Used Descriptors";
1576    annDescQ = "TX Descriptors";
1577}
1578
1579void
1580IGbE::TxDescCache::processContextDesc()
1581{
1582    assert(unusedCache.size());
1583    TxDesc *desc;
1584
1585    DPRINTF(EthernetDesc, "Checking and  processing context descriptors\n");
1586
1587    while (!useTso && unusedCache.size() &&
1588           TxdOp::isContext(unusedCache.front())) {
1589        DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1590
1591        desc = unusedCache.front();
1592        DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1593                desc->d1, desc->d2);
1594
1595
1596        // is this going to be a tcp or udp packet?
1597        isTcp = TxdOp::tcp(desc) ? true : false;
1598
1599        // setup all the TSO variables, they'll be ignored if we don't use
1600        // tso for this connection
1601        tsoHeaderLen = TxdOp::hdrlen(desc);
1602        tsoMss  = TxdOp::mss(desc);
1603
1604        if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1605            DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1606                    "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1607                    TxdOp::mss(desc), TxdOp::getLen(desc));
1608            useTso = true;
1609            tsoTotalLen = TxdOp::getLen(desc);
1610            tsoLoadedHeader = false;
1611            tsoDescBytesUsed = 0;
1612            tsoUsedLen = 0;
1613            tsoPrevSeq = 0;
1614            tsoPktHasHeader = false;
1615            tsoPkts = 0;
1616            tsoCopyBytes = 0;
1617        }
1618
1619        TxdOp::setDd(desc);
1620        unusedCache.pop_front();
1621        igbe->anDq("TXS", annUnusedCacheQ);
1622        usedCache.push_back(desc);
1623        igbe->anQ("TXS", annUsedCacheQ);
1624    }
1625
1626    if (!unusedCache.size())
1627        return;
1628
1629    desc = unusedCache.front();
1630    if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1631        TxdOp::tse(desc)) {
1632        DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1633                "hdrlen: %d mss: %d paylen %d\n",
1634                tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1635        useTso = true;
1636        tsoTotalLen = TxdOp::getTsoLen(desc);
1637        tsoLoadedHeader = false;
1638        tsoDescBytesUsed = 0;
1639        tsoUsedLen = 0;
1640        tsoPrevSeq = 0;
1641        tsoPktHasHeader = false;
1642        tsoPkts = 0;
1643    }
1644
1645    if (useTso && !tsoLoadedHeader) {
1646        // we need to fetch a header
1647        DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1648        assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1649        pktWaiting = true;
1650        assert(tsoHeaderLen <= 256);
1651        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1652                      tsoHeaderLen, &headerEvent, tsoHeader, 0);
1653    }
1654}
1655
1656void
1657IGbE::TxDescCache::headerComplete()
1658{
1659    DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1660    pktWaiting = false;
1661
1662    assert(unusedCache.size());
1663    TxDesc *desc = unusedCache.front();
1664    DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1665            TxdOp::getLen(desc), tsoHeaderLen);
1666
1667    if (TxdOp::getLen(desc) == tsoHeaderLen) {
1668        tsoDescBytesUsed = 0;
1669        tsoLoadedHeader = true;
1670        unusedCache.pop_front();
1671        usedCache.push_back(desc);
1672    } else {
1673        DPRINTF(EthernetDesc, "TSO: header part of larger payload\n");
1674        tsoDescBytesUsed = tsoHeaderLen;
1675        tsoLoadedHeader = true;
1676    }
1677    enableSm();
1678    igbe->checkDrain();
1679}
1680
1681unsigned
1682IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1683{
1684    if (!unusedCache.size())
1685        return 0;
1686
1687    DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1688
1689    assert(!useTso || tsoLoadedHeader);
1690    TxDesc *desc = unusedCache.front();
1691
1692    if (useTso) {
1693        DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1694                "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1695        DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1696                "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1697                tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1698
1699        if (tsoPktHasHeader)
1700            tsoCopyBytes =  std::min((tsoMss + tsoHeaderLen) - p->length,
1701                                     TxdOp::getLen(desc) - tsoDescBytesUsed);
1702        else
1703            tsoCopyBytes =  std::min(tsoMss,
1704                                     TxdOp::getLen(desc) - tsoDescBytesUsed);
1705        unsigned pkt_size =
1706            tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1707
1708        DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1709                "this descLen: %d\n",
1710                tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1711        DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1712        DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1713        return pkt_size;
1714    }
1715
1716    DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1717            TxdOp::getLen(unusedCache.front()));
1718    return TxdOp::getLen(desc);
1719}
1720
1721void
1722IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1723{
1724    assert(unusedCache.size());
1725
1726    TxDesc *desc;
1727    desc = unusedCache.front();
1728
1729    DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1730            "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1731    assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1732           TxdOp::getLen(desc));
1733
1734    pktPtr = p;
1735
1736    pktWaiting = true;
1737
1738    DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1739
1740    if (useTso) {
1741        assert(tsoLoadedHeader);
1742        if (!tsoPktHasHeader) {
1743            DPRINTF(EthernetDesc,
1744                    "Loading TSO header (%d bytes) into start of packet\n",
1745                    tsoHeaderLen);
1746            memcpy(p->data, &tsoHeader,tsoHeaderLen);
1747            p->length +=tsoHeaderLen;
1748            tsoPktHasHeader = true;
1749        }
1750    }
1751
1752    if (useTso) {
1753        DPRINTF(EthernetDesc,
1754                "Starting DMA of packet at offset %d length: %d\n",
1755                p->length, tsoCopyBytes);
1756        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1757                      + tsoDescBytesUsed,
1758                      tsoCopyBytes, &pktEvent, p->data + p->length,
1759                      igbe->txReadDelay);
1760        tsoDescBytesUsed += tsoCopyBytes;
1761        assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1762    } else {
1763        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1764                      TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1765                      igbe->txReadDelay);
1766    }
1767}
1768
1769void
1770IGbE::TxDescCache::pktComplete()
1771{
1772
1773    TxDesc *desc;
1774    assert(unusedCache.size());
1775    assert(pktPtr);
1776
1777    igbe->anBegin("TXS", "Update Desc");
1778
1779    DPRINTF(EthernetDesc, "DMA of packet complete\n");
1780
1781
1782    desc = unusedCache.front();
1783    assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1784           TxdOp::getLen(desc));
1785
1786    DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1787            desc->d1, desc->d2);
1788
1789    // Set the length of the data in the EtherPacket
1790    if (useTso) {
1791        DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1792            "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1793            tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1794        pktPtr->simLength += tsoCopyBytes;
1795        pktPtr->length += tsoCopyBytes;
1796        tsoUsedLen += tsoCopyBytes;
1797        DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1798            tsoDescBytesUsed, tsoCopyBytes);
1799    } else {
1800        pktPtr->simLength += TxdOp::getLen(desc);
1801        pktPtr->length += TxdOp::getLen(desc);
1802    }
1803
1804
1805
1806    if ((!TxdOp::eop(desc) && !useTso) ||
1807        (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1808         tsoTotalLen != tsoUsedLen && useTso)) {
1809        assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1810        igbe->anDq("TXS", annUnusedCacheQ);
1811        unusedCache.pop_front();
1812        igbe->anQ("TXS", annUsedCacheQ);
1813        usedCache.push_back(desc);
1814
1815        tsoDescBytesUsed = 0;
1816        pktDone = true;
1817        pktWaiting = false;
1818        pktMultiDesc = true;
1819
1820        DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1821                pktPtr->length);
1822        pktPtr = NULL;
1823
1824        enableSm();
1825        igbe->checkDrain();
1826        return;
1827    }
1828
1829
1830    pktMultiDesc = false;
1831    // no support for vlans
1832    assert(!TxdOp::vle(desc));
1833
1834    // we only support single packet descriptors at this point
1835    if (!useTso)
1836        assert(TxdOp::eop(desc));
1837
1838    // set that this packet is done
1839    if (TxdOp::rs(desc))
1840        TxdOp::setDd(desc);
1841
1842    DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1843            desc->d1, desc->d2);
1844
1845    if (useTso) {
1846        IpPtr ip(pktPtr);
1847        Ip6Ptr ip6(pktPtr);
1848        if (ip) {
1849            DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1850                    tsoPkts);
1851            ip->id(ip->id() + tsoPkts++);
1852            ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1853        }
1854        if (ip6)
1855            ip6->plen(pktPtr->length - EthPtr(pktPtr)->size());
1856        TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
1857        if (tcp) {
1858            DPRINTF(EthernetDesc,
1859                    "TSO: Modifying TCP header. old seq %d + %d\n",
1860                    tcp->seq(), tsoPrevSeq);
1861            tcp->seq(tcp->seq() + tsoPrevSeq);
1862            if (tsoUsedLen != tsoTotalLen)
1863                tcp->flags(tcp->flags() & ~9); // clear fin & psh
1864        }
1865        UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
1866        if (udp) {
1867            DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1868            udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1869        }
1870        tsoPrevSeq = tsoUsedLen;
1871    }
1872
1873    if (DTRACE(EthernetDesc)) {
1874        IpPtr ip(pktPtr);
1875        if (ip)
1876            DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1877                    ip->id());
1878        else
1879            DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1880    }
1881
1882    // Checksums are only ofloaded for new descriptor types
1883    if (TxdOp::isData(desc) && (TxdOp::ixsm(desc) || TxdOp::txsm(desc))) {
1884        DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1885        IpPtr ip(pktPtr);
1886        Ip6Ptr ip6(pktPtr);
1887        assert(ip || ip6);
1888        if (ip && TxdOp::ixsm(desc)) {
1889            ip->sum(0);
1890            ip->sum(cksum(ip));
1891            igbe->txIpChecksums++;
1892            DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1893        }
1894        if (TxdOp::txsm(desc)) {
1895            TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
1896            UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
1897            if (tcp) {
1898                tcp->sum(0);
1899                tcp->sum(cksum(tcp));
1900                igbe->txTcpChecksums++;
1901                DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1902            } else if (udp) {
1903                assert(udp);
1904                udp->sum(0);
1905                udp->sum(cksum(udp));
1906                igbe->txUdpChecksums++;
1907                DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1908            } else {
1909                panic("Told to checksum, but don't know how\n");
1910            }
1911        }
1912    }
1913
1914    if (TxdOp::ide(desc)) {
1915        // Deal with the rx timer interrupts
1916        DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1917        if (igbe->regs.tidv.idv()) {
1918            Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1919            DPRINTF(EthernetDesc, "setting tidv\n");
1920            igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1921        }
1922
1923        if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1924            Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1925            DPRINTF(EthernetDesc, "setting tadv\n");
1926            if (!igbe->tadvEvent.scheduled()) {
1927                igbe->schedule(igbe->tadvEvent, curTick() + delay);
1928            }
1929        }
1930    }
1931
1932
1933    if (!useTso ||  TxdOp::getLen(desc) == tsoDescBytesUsed) {
1934        DPRINTF(EthernetDesc, "Descriptor Done\n");
1935        igbe->anDq("TXS", annUnusedCacheQ);
1936        unusedCache.pop_front();
1937        igbe->anQ("TXS", annUsedCacheQ);
1938        usedCache.push_back(desc);
1939        tsoDescBytesUsed = 0;
1940    }
1941
1942    if (useTso && tsoUsedLen == tsoTotalLen)
1943        useTso = false;
1944
1945
1946    DPRINTF(EthernetDesc,
1947            "------Packet of %d bytes ready for transmission-------\n",
1948            pktPtr->length);
1949    pktDone = true;
1950    pktWaiting = false;
1951    pktPtr = NULL;
1952    tsoPktHasHeader = false;
1953
1954    if (igbe->regs.txdctl.wthresh() == 0) {
1955        igbe->anBegin("TXS", "Desc Writeback");
1956        DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1957        writeback(0);
1958    } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <=
1959               descInBlock(usedCache.size())) {
1960        DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1961        igbe->anBegin("TXS", "Desc Writeback");
1962        writeback((igbe->cacheBlockSize()-1)>>4);
1963    } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) {
1964        DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1965        igbe->anBegin("TXS", "Desc Writeback");
1966        writeback((igbe->cacheBlockSize()-1)>>4);
1967    }
1968
1969    enableSm();
1970    igbe->checkDrain();
1971}
1972
1973void
1974IGbE::TxDescCache::actionAfterWb()
1975{
1976    DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1977            completionEnabled);
1978    igbe->postInterrupt(iGbReg::IT_TXDW);
1979    if (completionEnabled) {
1980        descEnd = igbe->regs.tdh();
1981        DPRINTF(EthernetDesc,
1982                "Completion writing back value: %d to addr: %#x\n", descEnd,
1983                completionAddress);
1984        igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1985                       sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1986    }
1987}
1988
1989void
1990IGbE::TxDescCache::serialize(CheckpointOut &cp) const
1991{
1992    DescCache<TxDesc>::serialize(cp);
1993
1994    SERIALIZE_SCALAR(pktDone);
1995    SERIALIZE_SCALAR(isTcp);
1996    SERIALIZE_SCALAR(pktWaiting);
1997    SERIALIZE_SCALAR(pktMultiDesc);
1998
1999    SERIALIZE_SCALAR(useTso);
2000    SERIALIZE_SCALAR(tsoHeaderLen);
2001    SERIALIZE_SCALAR(tsoMss);
2002    SERIALIZE_SCALAR(tsoTotalLen);
2003    SERIALIZE_SCALAR(tsoUsedLen);
2004    SERIALIZE_SCALAR(tsoPrevSeq);;
2005    SERIALIZE_SCALAR(tsoPktPayloadBytes);
2006    SERIALIZE_SCALAR(tsoLoadedHeader);
2007    SERIALIZE_SCALAR(tsoPktHasHeader);
2008    SERIALIZE_ARRAY(tsoHeader, 256);
2009    SERIALIZE_SCALAR(tsoDescBytesUsed);
2010    SERIALIZE_SCALAR(tsoCopyBytes);
2011    SERIALIZE_SCALAR(tsoPkts);
2012
2013    SERIALIZE_SCALAR(completionAddress);
2014    SERIALIZE_SCALAR(completionEnabled);
2015    SERIALIZE_SCALAR(descEnd);
2016}
2017
2018void
2019IGbE::TxDescCache::unserialize(CheckpointIn &cp)
2020{
2021    DescCache<TxDesc>::unserialize(cp);
2022
2023    UNSERIALIZE_SCALAR(pktDone);
2024    UNSERIALIZE_SCALAR(isTcp);
2025    UNSERIALIZE_SCALAR(pktWaiting);
2026    UNSERIALIZE_SCALAR(pktMultiDesc);
2027
2028    UNSERIALIZE_SCALAR(useTso);
2029    UNSERIALIZE_SCALAR(tsoHeaderLen);
2030    UNSERIALIZE_SCALAR(tsoMss);
2031    UNSERIALIZE_SCALAR(tsoTotalLen);
2032    UNSERIALIZE_SCALAR(tsoUsedLen);
2033    UNSERIALIZE_SCALAR(tsoPrevSeq);;
2034    UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
2035    UNSERIALIZE_SCALAR(tsoLoadedHeader);
2036    UNSERIALIZE_SCALAR(tsoPktHasHeader);
2037    UNSERIALIZE_ARRAY(tsoHeader, 256);
2038    UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2039    UNSERIALIZE_SCALAR(tsoCopyBytes);
2040    UNSERIALIZE_SCALAR(tsoPkts);
2041
2042    UNSERIALIZE_SCALAR(completionAddress);
2043    UNSERIALIZE_SCALAR(completionEnabled);
2044    UNSERIALIZE_SCALAR(descEnd);
2045}
2046
2047bool
2048IGbE::TxDescCache::packetAvailable()
2049{
2050    if (pktDone) {
2051        pktDone = false;
2052        return true;
2053    }
2054    return false;
2055}
2056
2057void
2058IGbE::TxDescCache::enableSm()
2059{
2060    if (igbe->drainState() != DrainState::Draining) {
2061        igbe->txTick = true;
2062        igbe->restartClock();
2063    }
2064}
2065
2066bool
2067IGbE::TxDescCache::hasOutstandingEvents()
2068{
2069    return pktEvent.scheduled() || wbEvent.scheduled() ||
2070        fetchEvent.scheduled();
2071}
2072
2073
2074///////////////////////////////////// IGbE /////////////////////////////////
2075
2076void
2077IGbE::restartClock()
2078{
2079    if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2080        drainState() == DrainState::Running)
2081        schedule(tickEvent, clockEdge(Cycles(1)));
2082}
2083
2084DrainState
2085IGbE::drain()
2086{
2087    unsigned int count(0);
2088    if (rxDescCache.hasOutstandingEvents() ||
2089        txDescCache.hasOutstandingEvents()) {
2090        count++;
2091    }
2092
2093    txFifoTick = false;
2094    txTick = false;
2095    rxTick = false;
2096
2097    if (tickEvent.scheduled())
2098        deschedule(tickEvent);
2099
2100    if (count) {
2101        DPRINTF(Drain, "IGbE not drained\n");
2102        return DrainState::Draining;
2103    } else
2104        return DrainState::Drained;
2105}
2106
2107void
2108IGbE::drainResume()
2109{
2110    Drainable::drainResume();
2111
2112    txFifoTick = true;
2113    txTick = true;
2114    rxTick = true;
2115
2116    restartClock();
2117    DPRINTF(EthernetSM, "resuming from drain");
2118}
2119
2120void
2121IGbE::checkDrain()
2122{
2123    if (drainState() != DrainState::Draining)
2124        return;
2125
2126    txFifoTick = false;
2127    txTick = false;
2128    rxTick = false;
2129    if (!rxDescCache.hasOutstandingEvents() &&
2130        !txDescCache.hasOutstandingEvents()) {
2131        DPRINTF(Drain, "IGbE done draining, processing drain event\n");
2132        signalDrainDone();
2133    }
2134}
2135
2136void
2137IGbE::txStateMachine()
2138{
2139    if (!regs.tctl.en()) {
2140        txTick = false;
2141        DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2142        return;
2143    }
2144
2145    // If we have a packet available and it's length is not 0 (meaning it's not
2146    // a multidescriptor packet) put it in the fifo, otherwise an the next
2147    // iteration we'll get the rest of the data
2148    if (txPacket && txDescCache.packetAvailable()
2149        && !txDescCache.packetMultiDesc() && txPacket->length) {
2150        anQ("TXS", "TX FIFO Q");
2151        DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2152#ifndef NDEBUG
2153        bool success =
2154#endif
2155            txFifo.push(txPacket);
2156        txFifoTick = true && drainState() != DrainState::Draining;
2157        assert(success);
2158        txPacket = NULL;
2159        anBegin("TXS", "Desc Writeback");
2160        txDescCache.writeback((cacheBlockSize()-1)>>4);
2161        return;
2162    }
2163
2164    // Only support descriptor granularity
2165    if (regs.txdctl.lwthresh() &&
2166        txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2167        DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2168        postInterrupt(IT_TXDLOW);
2169    }
2170
2171    if (!txPacket) {
2172        txPacket = std::make_shared<EthPacketData>(16384);
2173    }
2174
2175    if (!txDescCache.packetWaiting()) {
2176        if (txDescCache.descLeft() == 0) {
2177            postInterrupt(IT_TXQE);
2178            anBegin("TXS", "Desc Writeback");
2179            txDescCache.writeback(0);
2180            anBegin("TXS", "Desc Fetch");
2181            anWe("TXS", txDescCache.annUnusedCacheQ);
2182            txDescCache.fetchDescriptors();
2183            DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2184                    "writeback stopping ticking and posting TXQE\n");
2185            txTick = false;
2186            return;
2187        }
2188
2189
2190        if (!(txDescCache.descUnused())) {
2191            anBegin("TXS", "Desc Fetch");
2192            txDescCache.fetchDescriptors();
2193            anWe("TXS", txDescCache.annUnusedCacheQ);
2194            DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2195                    "fetching and stopping ticking\n");
2196            txTick = false;
2197            return;
2198        }
2199        anPq("TXS", txDescCache.annUnusedCacheQ);
2200
2201
2202        txDescCache.processContextDesc();
2203        if (txDescCache.packetWaiting()) {
2204            DPRINTF(EthernetSM,
2205                    "TXS: Fetching TSO header, stopping ticking\n");
2206            txTick = false;
2207            return;
2208        }
2209
2210        unsigned size = txDescCache.getPacketSize(txPacket);
2211        if (size > 0 && txFifo.avail() > size) {
2212            anRq("TXS", "TX FIFO Q");
2213            anBegin("TXS", "DMA Packet");
2214            DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2215                    "beginning DMA of next packet\n", size);
2216            txFifo.reserve(size);
2217            txDescCache.getPacketData(txPacket);
2218        } else if (size == 0) {
2219            DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2220            DPRINTF(EthernetSM,
2221                    "TXS: No packets to get, writing back used descriptors\n");
2222            anBegin("TXS", "Desc Writeback");
2223            txDescCache.writeback(0);
2224        } else {
2225            anWf("TXS", "TX FIFO Q");
2226            DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2227                    "available in FIFO\n");
2228            txTick = false;
2229        }
2230
2231
2232        return;
2233    }
2234    DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2235    txTick = false;
2236}
2237
2238bool
2239IGbE::ethRxPkt(EthPacketPtr pkt)
2240{
2241    rxBytes += pkt->length;
2242    rxPackets++;
2243
2244    DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2245    anBegin("RXQ", "Wire Recv");
2246
2247
2248    if (!regs.rctl.en()) {
2249        DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2250        anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2251        return true;
2252    }
2253
2254    // restart the state machines if they are stopped
2255    rxTick = true && drainState() != DrainState::Draining;
2256    if ((rxTick || txTick) && !tickEvent.scheduled()) {
2257        DPRINTF(EthernetSM,
2258                "RXS: received packet into fifo, starting ticking\n");
2259        restartClock();
2260    }
2261
2262    if (!rxFifo.push(pkt)) {
2263        DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2264        postInterrupt(IT_RXO, true);
2265        anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2266        return false;
2267    }
2268
2269    if (CPA::available() && cpa->enabled()) {
2270        assert(sys->numSystemsRunning <= 2);
2271        System *other_sys;
2272        if (sys->systemList[0] == sys)
2273            other_sys = sys->systemList[1];
2274        else
2275            other_sys = sys->systemList[0];
2276
2277        cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2278        anQ("RXQ", "RX FIFO Q");
2279        cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2280    }
2281
2282    return true;
2283}
2284
2285
2286void
2287IGbE::rxStateMachine()
2288{
2289    if (!regs.rctl.en()) {
2290        rxTick = false;
2291        DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2292        return;
2293    }
2294
2295    // If the packet is done check for interrupts/descriptors/etc
2296    if (rxDescCache.packetDone()) {
2297        rxDmaPacket = false;
2298        DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2299        int descLeft = rxDescCache.descLeft();
2300        DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2301                descLeft, regs.rctl.rdmts(), regs.rdlen());
2302
2303        // rdmts 2->1/8, 1->1/4, 0->1/2
2304        int ratio = (1ULL << (regs.rctl.rdmts() + 1));
2305        if (descLeft * ratio <= regs.rdlen()) {
2306            DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2307                    "because of descriptors left\n");
2308            postInterrupt(IT_RXDMT);
2309        }
2310
2311        if (rxFifo.empty())
2312            rxDescCache.writeback(0);
2313
2314        if (descLeft == 0) {
2315            anBegin("RXS", "Writeback Descriptors");
2316            rxDescCache.writeback(0);
2317            DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2318                    " writeback and stopping ticking\n");
2319            rxTick = false;
2320        }
2321
2322        // only support descriptor granulaties
2323        assert(regs.rxdctl.gran());
2324
2325        if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2326            DPRINTF(EthernetSM,
2327                    "RXS: Writing back because WTHRESH >= descUsed\n");
2328            anBegin("RXS", "Writeback Descriptors");
2329            if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2330                rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2331            else
2332                rxDescCache.writeback((cacheBlockSize()-1)>>4);
2333        }
2334
2335        if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2336            ((rxDescCache.descLeft() - rxDescCache.descUnused()) >
2337             regs.rxdctl.hthresh())) {
2338            DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2339                    "descUnused < PTHRESH\n");
2340            anBegin("RXS", "Fetch Descriptors");
2341            rxDescCache.fetchDescriptors();
2342        }
2343
2344        if (rxDescCache.descUnused() == 0) {
2345            anBegin("RXS", "Fetch Descriptors");
2346            rxDescCache.fetchDescriptors();
2347            anWe("RXS", rxDescCache.annUnusedCacheQ);
2348            DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2349                    "fetching descriptors and stopping ticking\n");
2350            rxTick = false;
2351        }
2352        return;
2353    }
2354
2355    if (rxDmaPacket) {
2356        DPRINTF(EthernetSM,
2357                "RXS: stopping ticking until packet DMA completes\n");
2358        rxTick = false;
2359        return;
2360    }
2361
2362    if (!rxDescCache.descUnused()) {
2363        anBegin("RXS", "Fetch Descriptors");
2364        rxDescCache.fetchDescriptors();
2365        anWe("RXS", rxDescCache.annUnusedCacheQ);
2366        DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2367                "stopping ticking\n");
2368        rxTick = false;
2369        DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2370        return;
2371    }
2372    anPq("RXS", rxDescCache.annUnusedCacheQ);
2373
2374    if (rxFifo.empty()) {
2375        anWe("RXS", "RX FIFO Q");
2376        DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2377        rxTick = false;
2378        return;
2379    }
2380    anPq("RXS", "RX FIFO Q");
2381    anBegin("RXS", "Get Desc");
2382
2383    EthPacketPtr pkt;
2384    pkt = rxFifo.front();
2385
2386
2387    pktOffset = rxDescCache.writePacket(pkt, pktOffset);
2388    DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2389    if (pktOffset == pkt->length) {
2390        anBegin( "RXS", "FIFO Dequeue");
2391        DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2392        pktOffset = 0;
2393        anDq("RXS", "RX FIFO Q");
2394        rxFifo.pop();
2395    }
2396
2397    DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2398    rxTick = false;
2399    rxDmaPacket = true;
2400    anBegin("RXS", "DMA Packet");
2401}
2402
2403void
2404IGbE::txWire()
2405{
2406    txFifoTick = false;
2407
2408    if (txFifo.empty()) {
2409        anWe("TXQ", "TX FIFO Q");
2410        return;
2411    }
2412
2413
2414    anPq("TXQ", "TX FIFO Q");
2415    if (etherInt->sendPacket(txFifo.front())) {
2416        anQ("TXQ", "WireQ");
2417        if (DTRACE(EthernetSM)) {
2418            IpPtr ip(txFifo.front());
2419            if (ip)
2420                DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2421                        ip->id());
2422            else
2423                DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2424        }
2425        anDq("TXQ", "TX FIFO Q");
2426        anBegin("TXQ", "Wire Send");
2427        DPRINTF(EthernetSM,
2428                "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2429                txFifo.avail());
2430
2431        txBytes += txFifo.front()->length;
2432        txPackets++;
2433
2434        txFifo.pop();
2435    }
2436}
2437
2438void
2439IGbE::tick()
2440{
2441    DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2442
2443    inTick = true;
2444
2445    if (rxTick)
2446        rxStateMachine();
2447
2448    if (txTick)
2449        txStateMachine();
2450
2451    // If txWire returns and txFifoTick is still set, that means the data we
2452    // sent to the other end was already accepted and we can send another
2453    // frame right away. This is consistent with the previous behavior which
2454    // would send another frame if one was ready in ethTxDone. This version
2455    // avoids growing the stack with each frame sent which can cause stack
2456    // overflow.
2457    while (txFifoTick)
2458        txWire();
2459
2460    if (rxTick || txTick || txFifoTick)
2461        schedule(tickEvent, curTick() + clockPeriod());
2462
2463    inTick = false;
2464}
2465
2466void
2467IGbE::ethTxDone()
2468{
2469    anBegin("TXQ", "Send Done");
2470    // restart the tx state machines if they are stopped
2471    // fifo to send another packet
2472    // tx sm to put more data into the fifo
2473    txFifoTick = true && drainState() != DrainState::Draining;
2474    if (txDescCache.descLeft() != 0 && drainState() != DrainState::Draining)
2475        txTick = true;
2476
2477    if (!inTick)
2478        restartClock();
2479    DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2480}
2481
2482void
2483IGbE::serialize(CheckpointOut &cp) const
2484{
2485    PciDevice::serialize(cp);
2486
2487    regs.serialize(cp);
2488    SERIALIZE_SCALAR(eeOpBits);
2489    SERIALIZE_SCALAR(eeAddrBits);
2490    SERIALIZE_SCALAR(eeDataBits);
2491    SERIALIZE_SCALAR(eeOpcode);
2492    SERIALIZE_SCALAR(eeAddr);
2493    SERIALIZE_SCALAR(lastInterrupt);
2494    SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2495
2496    rxFifo.serialize("rxfifo", cp);
2497    txFifo.serialize("txfifo", cp);
2498
2499    bool txPktExists = txPacket != nullptr;
2500    SERIALIZE_SCALAR(txPktExists);
2501    if (txPktExists)
2502        txPacket->serialize("txpacket", cp);
2503
2504    Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2505        inter_time = 0;
2506
2507    if (rdtrEvent.scheduled())
2508        rdtr_time = rdtrEvent.when();
2509    SERIALIZE_SCALAR(rdtr_time);
2510
2511    if (radvEvent.scheduled())
2512        radv_time = radvEvent.when();
2513    SERIALIZE_SCALAR(radv_time);
2514
2515    if (tidvEvent.scheduled())
2516        tidv_time = tidvEvent.when();
2517    SERIALIZE_SCALAR(tidv_time);
2518
2519    if (tadvEvent.scheduled())
2520        tadv_time = tadvEvent.when();
2521    SERIALIZE_SCALAR(tadv_time);
2522
2523    if (interEvent.scheduled())
2524        inter_time = interEvent.when();
2525    SERIALIZE_SCALAR(inter_time);
2526
2527    SERIALIZE_SCALAR(pktOffset);
2528
2529    txDescCache.serializeSection(cp, "TxDescCache");
2530    rxDescCache.serializeSection(cp, "RxDescCache");
2531}
2532
2533void
2534IGbE::unserialize(CheckpointIn &cp)
2535{
2536    PciDevice::unserialize(cp);
2537
2538    regs.unserialize(cp);
2539    UNSERIALIZE_SCALAR(eeOpBits);
2540    UNSERIALIZE_SCALAR(eeAddrBits);
2541    UNSERIALIZE_SCALAR(eeDataBits);
2542    UNSERIALIZE_SCALAR(eeOpcode);
2543    UNSERIALIZE_SCALAR(eeAddr);
2544    UNSERIALIZE_SCALAR(lastInterrupt);
2545    UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2546
2547    rxFifo.unserialize("rxfifo", cp);
2548    txFifo.unserialize("txfifo", cp);
2549
2550    bool txPktExists;
2551    UNSERIALIZE_SCALAR(txPktExists);
2552    if (txPktExists) {
2553        txPacket = std::make_shared<EthPacketData>(16384);
2554        txPacket->unserialize("txpacket", cp);
2555    }
2556
2557    rxTick = true;
2558    txTick = true;
2559    txFifoTick = true;
2560
2561    Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2562    UNSERIALIZE_SCALAR(rdtr_time);
2563    UNSERIALIZE_SCALAR(radv_time);
2564    UNSERIALIZE_SCALAR(tidv_time);
2565    UNSERIALIZE_SCALAR(tadv_time);
2566    UNSERIALIZE_SCALAR(inter_time);
2567
2568    if (rdtr_time)
2569        schedule(rdtrEvent, rdtr_time);
2570
2571    if (radv_time)
2572        schedule(radvEvent, radv_time);
2573
2574    if (tidv_time)
2575        schedule(tidvEvent, tidv_time);
2576
2577    if (tadv_time)
2578        schedule(tadvEvent, tadv_time);
2579
2580    if (inter_time)
2581        schedule(interEvent, inter_time);
2582
2583    UNSERIALIZE_SCALAR(pktOffset);
2584
2585    txDescCache.unserializeSection(cp, "TxDescCache");
2586    rxDescCache.unserializeSection(cp, "RxDescCache");
2587}
2588
2589IGbE *
2590IGbEParams::create()
2591{
2592    return new IGbE(this);
2593}
2594