i8254xGBe.cc revision 8232
18745Sgblack@eecs.umich.edu/*
28745Sgblack@eecs.umich.edu * Copyright (c) 2006 The Regents of The University of Michigan
38745Sgblack@eecs.umich.edu * All rights reserved.
48745Sgblack@eecs.umich.edu *
58745Sgblack@eecs.umich.edu * Redistribution and use in source and binary forms, with or without
68745Sgblack@eecs.umich.edu * modification, are permitted provided that the following conditions are
78745Sgblack@eecs.umich.edu * met: redistributions of source code must retain the above copyright
88745Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer;
98745Sgblack@eecs.umich.edu * redistributions in binary form must reproduce the above copyright
108745Sgblack@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the
118745Sgblack@eecs.umich.edu * documentation and/or other materials provided with the distribution;
128745Sgblack@eecs.umich.edu * neither the name of the copyright holders nor the names of its
138745Sgblack@eecs.umich.edu * contributors may be used to endorse or promote products derived from
148745Sgblack@eecs.umich.edu * this software without specific prior written permission.
158745Sgblack@eecs.umich.edu *
168745Sgblack@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
178745Sgblack@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
188745Sgblack@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
198745Sgblack@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
208745Sgblack@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
218745Sgblack@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
228745Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
238745Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
248745Sgblack@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
258745Sgblack@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
268745Sgblack@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
278745Sgblack@eecs.umich.edu *
288745Sgblack@eecs.umich.edu * Authors: Ali Saidi
298745Sgblack@eecs.umich.edu */
308745Sgblack@eecs.umich.edu
318745Sgblack@eecs.umich.edu/* @file
328745Sgblack@eecs.umich.edu * Device model for Intel's 8254x line of gigabit ethernet controllers.
338745Sgblack@eecs.umich.edu * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
348745Sgblack@eecs.umich.edu * fewest workarounds in the driver. It will probably work with most of the
358745Sgblack@eecs.umich.edu * other MACs with slight modifications.
368745Sgblack@eecs.umich.edu */
378745Sgblack@eecs.umich.edu
3811800Sbrandon.potter@amd.com
398745Sgblack@eecs.umich.edu/*
408745Sgblack@eecs.umich.edu * @todo really there are multiple dma engines.. we should implement them.
418745Sgblack@eecs.umich.edu */
428745Sgblack@eecs.umich.edu
438745Sgblack@eecs.umich.edu#include <algorithm>
448745Sgblack@eecs.umich.edu
458745Sgblack@eecs.umich.edu#include "base/inet.hh"
468745Sgblack@eecs.umich.edu#include "base/trace.hh"
478745Sgblack@eecs.umich.edu#include "debug/EthernetAll.hh"
488745Sgblack@eecs.umich.edu#include "dev/i8254xGBe.hh"
498745Sgblack@eecs.umich.edu#include "mem/packet.hh"
508745Sgblack@eecs.umich.edu#include "mem/packet_access.hh"
518745Sgblack@eecs.umich.edu#include "params/IGbE.hh"
528745Sgblack@eecs.umich.edu#include "sim/stats.hh"
538745Sgblack@eecs.umich.edu#include "sim/system.hh"
548745Sgblack@eecs.umich.edu
558745Sgblack@eecs.umich.eduusing namespace iGbReg;
568745Sgblack@eecs.umich.eduusing namespace Net;
578745Sgblack@eecs.umich.edu
588745Sgblack@eecs.umich.eduIGbE::IGbE(const Params *p)
598745Sgblack@eecs.umich.edu    : EtherDevice(p), etherInt(NULL),  drainEvent(NULL),
608745Sgblack@eecs.umich.edu      useFlowControl(p->use_flow_control),
618745Sgblack@eecs.umich.edu      rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
628745Sgblack@eecs.umich.edu      txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
638745Sgblack@eecs.umich.edu      fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
648745Sgblack@eecs.umich.edu      fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
658745Sgblack@eecs.umich.edu      rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
668745Sgblack@eecs.umich.edu      rdtrEvent(this), radvEvent(this),
678745Sgblack@eecs.umich.edu      tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
688745Sgblack@eecs.umich.edu      rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
698745Sgblack@eecs.umich.edu      txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
708745Sgblack@eecs.umich.edu      clock(p->clock), lastInterrupt(0)
718745Sgblack@eecs.umich.edu{
728745Sgblack@eecs.umich.edu    etherInt = new IGbEInt(name() + ".int", this);
738745Sgblack@eecs.umich.edu
748745Sgblack@eecs.umich.edu    // Initialized internal registers per Intel documentation
758745Sgblack@eecs.umich.edu    // All registers intialized to 0 by per register constructor
768745Sgblack@eecs.umich.edu    regs.ctrl.fd(1);
778745Sgblack@eecs.umich.edu    regs.ctrl.lrst(1);
788745Sgblack@eecs.umich.edu    regs.ctrl.speed(2);
798745Sgblack@eecs.umich.edu    regs.ctrl.frcspd(1);
808745Sgblack@eecs.umich.edu    regs.sts.speed(3); // Say we're 1000Mbps
818745Sgblack@eecs.umich.edu    regs.sts.fd(1); // full duplex
828745Sgblack@eecs.umich.edu    regs.sts.lu(1); // link up
838745Sgblack@eecs.umich.edu    regs.eecd.fwe(1);
848745Sgblack@eecs.umich.edu    regs.eecd.ee_type(1);
858745Sgblack@eecs.umich.edu    regs.imr = 0;
868745Sgblack@eecs.umich.edu    regs.iam = 0;
878745Sgblack@eecs.umich.edu    regs.rxdctl.gran(1);
888745Sgblack@eecs.umich.edu    regs.rxdctl.wthresh(1);
898745Sgblack@eecs.umich.edu    regs.fcrth(1);
908745Sgblack@eecs.umich.edu    regs.tdwba = 0;
918745Sgblack@eecs.umich.edu    regs.rlpml = 0;
928745Sgblack@eecs.umich.edu    regs.sw_fw_sync = 0;
9311566Smitch.hayenga@arm.com
948745Sgblack@eecs.umich.edu    regs.pba.rxa(0x30);
958745Sgblack@eecs.umich.edu    regs.pba.txa(0x10);
968745Sgblack@eecs.umich.edu
978745Sgblack@eecs.umich.edu    eeOpBits            = 0;
988745Sgblack@eecs.umich.edu    eeAddrBits          = 0;
998745Sgblack@eecs.umich.edu    eeDataBits          = 0;
1008745Sgblack@eecs.umich.edu    eeOpcode            = 0;
1018745Sgblack@eecs.umich.edu
1028745Sgblack@eecs.umich.edu    // clear all 64 16 bit words of the eeprom
1038745Sgblack@eecs.umich.edu    memset(&flash, 0, EEPROM_SIZE*2);
1048745Sgblack@eecs.umich.edu
1058745Sgblack@eecs.umich.edu    // Set the MAC address
1068745Sgblack@eecs.umich.edu    memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
1078745Sgblack@eecs.umich.edu    for (int x = 0; x < ETH_ADDR_LEN/2; x++)
108        flash[x] = htobe(flash[x]);
109
110    uint16_t csum = 0;
111    for (int x = 0; x < EEPROM_SIZE; x++)
112        csum += htobe(flash[x]);
113
114
115    // Magic happy checksum value
116    flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
117
118    // Store the MAC address as queue ID
119    macAddr = p->hardware_address;
120
121    rxFifo.clear();
122    txFifo.clear();
123}
124
125void
126IGbE::init()
127{
128    cpa = CPA::cpa();
129    PciDev::init();
130}
131
132EtherInt*
133IGbE::getEthPort(const std::string &if_name, int idx)
134{
135
136    if (if_name == "interface") {
137        if (etherInt->getPeer())
138            panic("Port already connected to\n");
139        return etherInt;
140    }
141    return NULL;
142}
143
144Tick
145IGbE::writeConfig(PacketPtr pkt)
146{
147    int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
148    if (offset < PCI_DEVICE_SPECIFIC)
149        PciDev::writeConfig(pkt);
150    else
151        panic("Device specific PCI config space not implemented.\n");
152
153    //
154    // Some work may need to be done here based for the pci COMMAND bits.
155    //
156
157    return pioDelay;
158}
159
160// Handy macro for range-testing register access addresses
161#define IN_RANGE(val, base, len) (val >= base && val < (base + len))
162
163Tick
164IGbE::read(PacketPtr pkt)
165{
166    int bar;
167    Addr daddr;
168
169    if (!getBAR(pkt->getAddr(), bar, daddr))
170        panic("Invalid PCI memory access to unmapped memory.\n");
171
172    // Only Memory register BAR is allowed
173    assert(bar == 0);
174
175    // Only 32bit accesses allowed
176    assert(pkt->getSize() == 4);
177
178    DPRINTF(Ethernet, "Read device register %#X\n", daddr);
179
180    pkt->allocate();
181
182    //
183    // Handle read of register here
184    //
185
186
187    switch (daddr) {
188      case REG_CTRL:
189        pkt->set<uint32_t>(regs.ctrl());
190        break;
191      case REG_STATUS:
192        pkt->set<uint32_t>(regs.sts());
193        break;
194      case REG_EECD:
195        pkt->set<uint32_t>(regs.eecd());
196        break;
197      case REG_EERD:
198        pkt->set<uint32_t>(regs.eerd());
199        break;
200      case REG_CTRL_EXT:
201        pkt->set<uint32_t>(regs.ctrl_ext());
202        break;
203      case REG_MDIC:
204        pkt->set<uint32_t>(regs.mdic());
205        break;
206      case REG_ICR:
207        DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
208                regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
209        pkt->set<uint32_t>(regs.icr());
210        if (regs.icr.int_assert() || regs.imr == 0) {
211            regs.icr = regs.icr() & ~mask(30);
212            DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
213        }
214        if (regs.ctrl_ext.iame() && regs.icr.int_assert())
215            regs.imr &= ~regs.iam;
216        chkInterrupt();
217        break;
218      case REG_EICR:
219        // This is only useful for MSI, but the driver reads it every time
220        // Just don't do anything
221        pkt->set<uint32_t>(0);
222        break;
223      case REG_ITR:
224        pkt->set<uint32_t>(regs.itr());
225        break;
226      case REG_RCTL:
227        pkt->set<uint32_t>(regs.rctl());
228        break;
229      case REG_FCTTV:
230        pkt->set<uint32_t>(regs.fcttv());
231        break;
232      case REG_TCTL:
233        pkt->set<uint32_t>(regs.tctl());
234        break;
235      case REG_PBA:
236        pkt->set<uint32_t>(regs.pba());
237        break;
238      case REG_WUC:
239      case REG_LEDCTL:
240        pkt->set<uint32_t>(0); // We don't care, so just return 0
241        break;
242      case REG_FCRTL:
243        pkt->set<uint32_t>(regs.fcrtl());
244        break;
245      case REG_FCRTH:
246        pkt->set<uint32_t>(regs.fcrth());
247        break;
248      case REG_RDBAL:
249        pkt->set<uint32_t>(regs.rdba.rdbal());
250        break;
251      case REG_RDBAH:
252        pkt->set<uint32_t>(regs.rdba.rdbah());
253        break;
254      case REG_RDLEN:
255        pkt->set<uint32_t>(regs.rdlen());
256        break;
257      case REG_SRRCTL:
258        pkt->set<uint32_t>(regs.srrctl());
259        break;
260      case REG_RDH:
261        pkt->set<uint32_t>(regs.rdh());
262        break;
263      case REG_RDT:
264        pkt->set<uint32_t>(regs.rdt());
265        break;
266      case REG_RDTR:
267        pkt->set<uint32_t>(regs.rdtr());
268        if (regs.rdtr.fpd()) {
269            rxDescCache.writeback(0);
270            DPRINTF(EthernetIntr,
271                    "Posting interrupt because of RDTR.FPD write\n");
272            postInterrupt(IT_RXT);
273            regs.rdtr.fpd(0);
274        }
275        break;
276      case REG_RXDCTL:
277        pkt->set<uint32_t>(regs.rxdctl());
278        break;
279      case REG_RADV:
280        pkt->set<uint32_t>(regs.radv());
281        break;
282      case REG_TDBAL:
283        pkt->set<uint32_t>(regs.tdba.tdbal());
284        break;
285      case REG_TDBAH:
286        pkt->set<uint32_t>(regs.tdba.tdbah());
287        break;
288      case REG_TDLEN:
289        pkt->set<uint32_t>(regs.tdlen());
290        break;
291      case REG_TDH:
292        pkt->set<uint32_t>(regs.tdh());
293        break;
294      case REG_TXDCA_CTL:
295        pkt->set<uint32_t>(regs.txdca_ctl());
296        break;
297      case REG_TDT:
298        pkt->set<uint32_t>(regs.tdt());
299        break;
300      case REG_TIDV:
301        pkt->set<uint32_t>(regs.tidv());
302        break;
303      case REG_TXDCTL:
304        pkt->set<uint32_t>(regs.txdctl());
305        break;
306      case REG_TADV:
307        pkt->set<uint32_t>(regs.tadv());
308        break;
309      case REG_TDWBAL:
310        pkt->set<uint32_t>(regs.tdwba & mask(32));
311        break;
312      case REG_TDWBAH:
313        pkt->set<uint32_t>(regs.tdwba >> 32);
314        break;
315      case REG_RXCSUM:
316        pkt->set<uint32_t>(regs.rxcsum());
317        break;
318      case REG_RLPML:
319        pkt->set<uint32_t>(regs.rlpml);
320        break;
321      case REG_RFCTL:
322        pkt->set<uint32_t>(regs.rfctl());
323        break;
324      case REG_MANC:
325        pkt->set<uint32_t>(regs.manc());
326        break;
327      case REG_SWSM:
328        pkt->set<uint32_t>(regs.swsm());
329        regs.swsm.smbi(1);
330        break;
331      case REG_FWSM:
332        pkt->set<uint32_t>(regs.fwsm());
333        break;
334      case REG_SWFWSYNC:
335        pkt->set<uint32_t>(regs.sw_fw_sync);
336        break;
337      default:
338        if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
339            !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
340            !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
341            !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
342            panic("Read request to unknown register number: %#x\n", daddr);
343        else
344            pkt->set<uint32_t>(0);
345    };
346
347    pkt->makeAtomicResponse();
348    return pioDelay;
349}
350
351Tick
352IGbE::write(PacketPtr pkt)
353{
354    int bar;
355    Addr daddr;
356
357
358    if (!getBAR(pkt->getAddr(), bar, daddr))
359        panic("Invalid PCI memory access to unmapped memory.\n");
360
361    // Only Memory register BAR is allowed
362    assert(bar == 0);
363
364    // Only 32bit accesses allowed
365    assert(pkt->getSize() == sizeof(uint32_t));
366
367    DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
368            daddr, pkt->get<uint32_t>());
369
370    //
371    // Handle write of register here
372    //
373    uint32_t val = pkt->get<uint32_t>();
374
375    Regs::RCTL oldrctl;
376    Regs::TCTL oldtctl;
377
378    switch (daddr) {
379      case REG_CTRL:
380        regs.ctrl = val;
381        if (regs.ctrl.tfce())
382            warn("TX Flow control enabled, should implement\n");
383        if (regs.ctrl.rfce())
384            warn("RX Flow control enabled, should implement\n");
385        break;
386      case REG_CTRL_EXT:
387        regs.ctrl_ext = val;
388        break;
389      case REG_STATUS:
390        regs.sts = val;
391        break;
392      case REG_EECD:
393        int oldClk;
394        oldClk = regs.eecd.sk();
395        regs.eecd = val;
396        // See if this is a eeprom access and emulate accordingly
397        if (!oldClk && regs.eecd.sk()) {
398            if (eeOpBits < 8) {
399                eeOpcode = eeOpcode << 1 | regs.eecd.din();
400                eeOpBits++;
401            } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
402                eeAddr = eeAddr << 1 | regs.eecd.din();
403                eeAddrBits++;
404            } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
405                assert(eeAddr>>1 < EEPROM_SIZE);
406                DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
407                        flash[eeAddr>>1] >> eeDataBits & 0x1,
408                        flash[eeAddr>>1]);
409                regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
410                eeDataBits++;
411            } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
412                regs.eecd.dout(0);
413                eeDataBits++;
414            } else
415                panic("What's going on with eeprom interface? opcode:"
416                      " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
417                      (uint32_t)eeOpBits, (uint32_t)eeAddr,
418                      (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
419
420            // Reset everything for the next command
421            if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
422                (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
423                eeOpBits = 0;
424                eeAddrBits = 0;
425                eeDataBits = 0;
426                eeOpcode = 0;
427                eeAddr = 0;
428            }
429
430            DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
431                    (uint32_t)eeOpcode, (uint32_t) eeOpBits,
432                    (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
433            if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
434                                   eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
435                panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
436                      (uint32_t)eeOpBits);
437
438
439        }
440        // If driver requests eeprom access, immediately give it to it
441        regs.eecd.ee_gnt(regs.eecd.ee_req());
442        break;
443      case REG_EERD:
444        regs.eerd = val;
445        if (regs.eerd.start()) {
446            regs.eerd.done(1);
447            assert(regs.eerd.addr() < EEPROM_SIZE);
448            regs.eerd.data(flash[regs.eerd.addr()]);
449            regs.eerd.start(0);
450            DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
451                    regs.eerd.addr(), regs.eerd.data());
452        }
453        break;
454      case REG_MDIC:
455        regs.mdic = val;
456        if (regs.mdic.i())
457            panic("No support for interrupt on mdic complete\n");
458        if (regs.mdic.phyadd() != 1)
459            panic("No support for reading anything but phy\n");
460        DPRINTF(Ethernet, "%s phy address %x\n",
461                regs.mdic.op() == 1 ? "Writing" : "Reading",
462                regs.mdic.regadd());
463        switch (regs.mdic.regadd()) {
464          case PHY_PSTATUS:
465            regs.mdic.data(0x796D); // link up
466            break;
467          case PHY_PID:
468            regs.mdic.data(params()->phy_pid);
469            break;
470          case PHY_EPID:
471            regs.mdic.data(params()->phy_epid);
472            break;
473          case PHY_GSTATUS:
474            regs.mdic.data(0x7C00);
475            break;
476          case PHY_EPSTATUS:
477            regs.mdic.data(0x3000);
478            break;
479          case PHY_AGC:
480            regs.mdic.data(0x180); // some random length
481            break;
482          default:
483            regs.mdic.data(0);
484        }
485        regs.mdic.r(1);
486        break;
487      case REG_ICR:
488        DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
489                regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
490        if (regs.ctrl_ext.iame())
491            regs.imr &= ~regs.iam;
492        regs.icr = ~bits(val,30,0) & regs.icr();
493        chkInterrupt();
494        break;
495      case REG_ITR:
496        regs.itr = val;
497        break;
498      case REG_ICS:
499        DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
500        postInterrupt((IntTypes)val);
501        break;
502      case REG_IMS:
503        regs.imr |= val;
504        chkInterrupt();
505        break;
506      case REG_IMC:
507        regs.imr &= ~val;
508        chkInterrupt();
509        break;
510      case REG_IAM:
511        regs.iam = val;
512        break;
513      case REG_RCTL:
514        oldrctl = regs.rctl;
515        regs.rctl = val;
516        if (regs.rctl.rst()) {
517            rxDescCache.reset();
518            DPRINTF(EthernetSM, "RXS: Got RESET!\n");
519            rxFifo.clear();
520            regs.rctl.rst(0);
521        }
522        if (regs.rctl.en())
523            rxTick = true;
524        restartClock();
525        break;
526      case REG_FCTTV:
527        regs.fcttv = val;
528        break;
529      case REG_TCTL:
530        regs.tctl = val;
531        oldtctl = regs.tctl;
532        regs.tctl = val;
533        if (regs.tctl.en())
534            txTick = true;
535        restartClock();
536        if (regs.tctl.en() && !oldtctl.en()) {
537            txDescCache.reset();
538        }
539        break;
540      case REG_PBA:
541        regs.pba.rxa(val);
542        regs.pba.txa(64 - regs.pba.rxa());
543        break;
544      case REG_WUC:
545      case REG_LEDCTL:
546      case REG_FCAL:
547      case REG_FCAH:
548      case REG_FCT:
549      case REG_VET:
550      case REG_AIFS:
551      case REG_TIPG:
552        ; // We don't care, so don't store anything
553        break;
554      case REG_IVAR0:
555        warn("Writing to IVAR0, ignoring...\n");
556        break;
557      case REG_FCRTL:
558        regs.fcrtl = val;
559        break;
560      case REG_FCRTH:
561        regs.fcrth = val;
562        break;
563      case REG_RDBAL:
564        regs.rdba.rdbal( val & ~mask(4));
565        rxDescCache.areaChanged();
566        break;
567      case REG_RDBAH:
568        regs.rdba.rdbah(val);
569        rxDescCache.areaChanged();
570        break;
571      case REG_RDLEN:
572        regs.rdlen = val & ~mask(7);
573        rxDescCache.areaChanged();
574        break;
575      case REG_SRRCTL:
576        regs.srrctl = val;
577        break;
578      case REG_RDH:
579        regs.rdh = val;
580        rxDescCache.areaChanged();
581        break;
582      case REG_RDT:
583        regs.rdt = val;
584        DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
585        if (getState() == SimObject::Running) {
586            DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
587            rxDescCache.fetchDescriptors();
588        } else {
589            DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
590        }
591        break;
592      case REG_RDTR:
593        regs.rdtr = val;
594        break;
595      case REG_RADV:
596        regs.radv = val;
597        break;
598      case REG_RXDCTL:
599        regs.rxdctl = val;
600        break;
601      case REG_TDBAL:
602        regs.tdba.tdbal( val & ~mask(4));
603        txDescCache.areaChanged();
604        break;
605      case REG_TDBAH:
606        regs.tdba.tdbah(val);
607        txDescCache.areaChanged();
608        break;
609      case REG_TDLEN:
610        regs.tdlen = val & ~mask(7);
611        txDescCache.areaChanged();
612        break;
613      case REG_TDH:
614        regs.tdh = val;
615        txDescCache.areaChanged();
616        break;
617      case REG_TXDCA_CTL:
618        regs.txdca_ctl = val;
619        if (regs.txdca_ctl.enabled())
620            panic("No support for DCA\n");
621        break;
622      case REG_TDT:
623        regs.tdt = val;
624        DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
625        if (getState() == SimObject::Running) {
626            DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
627            txDescCache.fetchDescriptors();
628        } else {
629            DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
630        }
631        break;
632      case REG_TIDV:
633        regs.tidv = val;
634        break;
635      case REG_TXDCTL:
636        regs.txdctl = val;
637        break;
638      case REG_TADV:
639        regs.tadv = val;
640        break;
641      case REG_TDWBAL:
642        regs.tdwba &= ~mask(32);
643        regs.tdwba |= val;
644        txDescCache.completionWriteback(regs.tdwba & ~mask(1),
645                                        regs.tdwba & mask(1));
646        break;
647      case REG_TDWBAH:
648        regs.tdwba &= mask(32);
649        regs.tdwba |= (uint64_t)val << 32;
650        txDescCache.completionWriteback(regs.tdwba & ~mask(1),
651                                        regs.tdwba & mask(1));
652        break;
653      case REG_RXCSUM:
654        regs.rxcsum = val;
655        break;
656      case REG_RLPML:
657        regs.rlpml = val;
658        break;
659      case REG_RFCTL:
660        regs.rfctl = val;
661        if (regs.rfctl.exsten())
662            panic("Extended RX descriptors not implemented\n");
663        break;
664      case REG_MANC:
665        regs.manc = val;
666        break;
667      case REG_SWSM:
668        regs.swsm = val;
669        if (regs.fwsm.eep_fw_semaphore())
670            regs.swsm.swesmbi(0);
671        break;
672      case REG_SWFWSYNC:
673        regs.sw_fw_sync = val;
674        break;
675      default:
676        if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
677            !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
678            !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
679            panic("Write request to unknown register number: %#x\n", daddr);
680    };
681
682    pkt->makeAtomicResponse();
683    return pioDelay;
684}
685
686void
687IGbE::postInterrupt(IntTypes t, bool now)
688{
689    assert(t);
690
691    // Interrupt is already pending
692    if (t & regs.icr() && !now)
693        return;
694
695    regs.icr = regs.icr() | t;
696
697    Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
698    DPRINTF(EthernetIntr,
699            "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
700            curTick(), regs.itr.interval(), itr_interval);
701
702    if (regs.itr.interval() == 0 || now ||
703        lastInterrupt + itr_interval <= curTick()) {
704        if (interEvent.scheduled()) {
705            deschedule(interEvent);
706        }
707        cpuPostInt();
708    } else {
709        Tick int_time = lastInterrupt + itr_interval;
710        assert(int_time > 0);
711        DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
712                int_time);
713        if (!interEvent.scheduled()) {
714            schedule(interEvent, int_time);
715        }
716    }
717}
718
719void
720IGbE::delayIntEvent()
721{
722    cpuPostInt();
723}
724
725
726void
727IGbE::cpuPostInt()
728{
729
730    postedInterrupts++;
731
732    if (!(regs.icr() & regs.imr)) {
733        DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
734        return;
735    }
736
737    DPRINTF(Ethernet, "Posting Interrupt\n");
738
739
740    if (interEvent.scheduled()) {
741        deschedule(interEvent);
742    }
743
744    if (rdtrEvent.scheduled()) {
745        regs.icr.rxt0(1);
746        deschedule(rdtrEvent);
747    }
748    if (radvEvent.scheduled()) {
749        regs.icr.rxt0(1);
750        deschedule(radvEvent);
751    }
752    if (tadvEvent.scheduled()) {
753        regs.icr.txdw(1);
754        deschedule(tadvEvent);
755    }
756    if (tidvEvent.scheduled()) {
757        regs.icr.txdw(1);
758        deschedule(tidvEvent);
759    }
760
761    regs.icr.int_assert(1);
762    DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
763            regs.icr());
764
765    intrPost();
766
767    lastInterrupt = curTick();
768}
769
770void
771IGbE::cpuClearInt()
772{
773    if (regs.icr.int_assert()) {
774        regs.icr.int_assert(0);
775        DPRINTF(EthernetIntr,
776                "EINT: Clearing interrupt to CPU now. Vector %#x\n",
777                regs.icr());
778        intrClear();
779    }
780}
781
782void
783IGbE::chkInterrupt()
784{
785    DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
786            regs.imr);
787    // Check if we need to clear the cpu interrupt
788    if (!(regs.icr() & regs.imr)) {
789        DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
790        if (interEvent.scheduled())
791            deschedule(interEvent);
792        if (regs.icr.int_assert())
793            cpuClearInt();
794    }
795    DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
796            regs.itr(), regs.itr.interval());
797
798    if (regs.icr() & regs.imr) {
799        if (regs.itr.interval() == 0)  {
800            cpuPostInt();
801        } else {
802            DPRINTF(Ethernet,
803                    "Possibly scheduling interrupt because of imr write\n");
804            if (!interEvent.scheduled()) {
805                Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
806                DPRINTF(Ethernet, "Scheduling for %d\n", t);
807                schedule(interEvent, t);
808            }
809        }
810    }
811}
812
813
814///////////////////////////// IGbE::DescCache //////////////////////////////
815
816template<class T>
817IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
818    : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
819      wbOut(0), pktPtr(NULL), wbDelayEvent(this),
820      fetchDelayEvent(this), fetchEvent(this), wbEvent(this)
821{
822    fetchBuf = new T[size];
823    wbBuf = new T[size];
824}
825
826template<class T>
827IGbE::DescCache<T>::~DescCache()
828{
829    reset();
830}
831
832template<class T>
833void
834IGbE::DescCache<T>::areaChanged()
835{
836    if (usedCache.size() > 0 || curFetching || wbOut)
837        panic("Descriptor Address, Length or Head changed. Bad\n");
838    reset();
839
840}
841
842template<class T>
843void
844IGbE::DescCache<T>::writeback(Addr aMask)
845{
846    int curHead = descHead();
847    int max_to_wb = usedCache.size();
848
849    // Check if this writeback is less restrictive that the previous
850    // and if so setup another one immediately following it
851    if (wbOut) {
852        if (aMask < wbAlignment) {
853            moreToWb = true;
854            wbAlignment = aMask;
855        }
856        DPRINTF(EthernetDesc,
857                "Writing back already in process, returning\n");
858        return;
859    }
860
861    moreToWb = false;
862    wbAlignment = aMask;
863
864
865    DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
866            "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
867            curHead, descTail(), descLen(), cachePnt, max_to_wb,
868            descLeft());
869
870    if (max_to_wb + curHead >= descLen()) {
871        max_to_wb = descLen() - curHead;
872        moreToWb = true;
873        // this is by definition aligned correctly
874    } else if (wbAlignment != 0) {
875        // align the wb point to the mask
876        max_to_wb = max_to_wb & ~wbAlignment;
877    }
878
879    DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
880
881    if (max_to_wb <= 0) {
882        if (usedCache.size())
883            igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
884        else
885            igbe->anWe(annSmWb, annUsedCacheQ);
886        return;
887    }
888
889    wbOut = max_to_wb;
890
891    assert(!wbDelayEvent.scheduled());
892    igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
893    igbe->anBegin(annSmWb, "Prepare Writeback Desc");
894}
895
896template<class T>
897void
898IGbE::DescCache<T>::writeback1()
899{
900    // If we're draining delay issuing this DMA
901    if (igbe->getState() != SimObject::Running) {
902        igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
903        return;
904    }
905
906    DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
907
908    for (int x = 0; x < wbOut; x++) {
909        assert(usedCache.size());
910        memcpy(&wbBuf[x], usedCache[x], sizeof(T));
911        igbe->anPq(annSmWb, annUsedCacheQ);
912        igbe->anPq(annSmWb, annDescQ);
913        igbe->anQ(annSmWb, annUsedDescQ);
914    }
915
916
917    igbe->anBegin(annSmWb, "Writeback Desc DMA");
918
919    assert(wbOut);
920    igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
921                   wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
922                   igbe->wbCompDelay);
923}
924
925template<class T>
926void
927IGbE::DescCache<T>::fetchDescriptors()
928{
929    size_t max_to_fetch;
930
931    if (curFetching) {
932        DPRINTF(EthernetDesc,
933                "Currently fetching %d descriptors, returning\n",
934                curFetching);
935        return;
936    }
937
938    if (descTail() >= cachePnt)
939        max_to_fetch = descTail() - cachePnt;
940    else
941        max_to_fetch = descLen() - cachePnt;
942
943    size_t free_cache = size - usedCache.size() - unusedCache.size();
944
945    if (!max_to_fetch)
946        igbe->anWe(annSmFetch, annUnusedDescQ);
947    else
948        igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
949
950    if (max_to_fetch) {
951        if (!free_cache)
952            igbe->anWf(annSmFetch, annDescQ);
953        else
954            igbe->anRq(annSmFetch, annDescQ, free_cache);
955    }
956
957    max_to_fetch = std::min(max_to_fetch, free_cache);
958
959
960    DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
961            "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
962            descHead(), descTail(), descLen(), cachePnt,
963            max_to_fetch, descLeft());
964
965    // Nothing to do
966    if (max_to_fetch == 0)
967        return;
968
969    // So we don't have two descriptor fetches going on at once
970    curFetching = max_to_fetch;
971
972    assert(!fetchDelayEvent.scheduled());
973    igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
974    igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
975}
976
977template<class T>
978void
979IGbE::DescCache<T>::fetchDescriptors1()
980{
981    // If we're draining delay issuing this DMA
982    if (igbe->getState() != SimObject::Running) {
983        igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
984        return;
985    }
986
987    igbe->anBegin(annSmFetch, "Fetch Desc");
988
989    DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
990            descBase() + cachePnt * sizeof(T),
991            pciToDma(descBase() + cachePnt * sizeof(T)),
992            curFetching * sizeof(T));
993    assert(curFetching);
994    igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
995                  curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
996                  igbe->fetchCompDelay);
997}
998
999template<class T>
1000void
1001IGbE::DescCache<T>::fetchComplete()
1002{
1003    T *newDesc;
1004    igbe->anBegin(annSmFetch, "Fetch Complete");
1005    for (int x = 0; x < curFetching; x++) {
1006        newDesc = new T;
1007        memcpy(newDesc, &fetchBuf[x], sizeof(T));
1008        unusedCache.push_back(newDesc);
1009        igbe->anDq(annSmFetch, annUnusedDescQ);
1010        igbe->anQ(annSmFetch, annUnusedCacheQ);
1011        igbe->anQ(annSmFetch, annDescQ);
1012    }
1013
1014
1015#ifndef NDEBUG
1016    int oldCp = cachePnt;
1017#endif
1018
1019    cachePnt += curFetching;
1020    assert(cachePnt <= descLen());
1021    if (cachePnt == descLen())
1022        cachePnt = 0;
1023
1024    curFetching = 0;
1025
1026    DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1027            oldCp, cachePnt);
1028
1029    if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1030                                                             cachePnt)) == 0)
1031    {
1032        igbe->anWe(annSmFetch, annUnusedDescQ);
1033    } else if (!(size - usedCache.size() - unusedCache.size())) {
1034        igbe->anWf(annSmFetch, annDescQ);
1035    } else {
1036        igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1037    }
1038
1039    enableSm();
1040    igbe->checkDrain();
1041}
1042
1043template<class T>
1044void
1045IGbE::DescCache<T>::wbComplete()
1046{
1047
1048    igbe->anBegin(annSmWb, "Finish Writeback");
1049
1050    long  curHead = descHead();
1051#ifndef NDEBUG
1052    long oldHead = curHead;
1053#endif
1054
1055    for (int x = 0; x < wbOut; x++) {
1056        assert(usedCache.size());
1057        delete usedCache[0];
1058        usedCache.pop_front();
1059
1060        igbe->anDq(annSmWb, annUsedCacheQ);
1061        igbe->anDq(annSmWb, annDescQ);
1062    }
1063
1064    curHead += wbOut;
1065    wbOut = 0;
1066
1067    if (curHead >= descLen())
1068        curHead -= descLen();
1069
1070    // Update the head
1071    updateHead(curHead);
1072
1073    DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1074            oldHead, curHead);
1075
1076    // If we still have more to wb, call wb now
1077    actionAfterWb();
1078    if (moreToWb) {
1079        moreToWb = false;
1080        DPRINTF(EthernetDesc, "Writeback has more todo\n");
1081        writeback(wbAlignment);
1082    }
1083
1084    if (!wbOut) {
1085        igbe->checkDrain();
1086        if (usedCache.size())
1087            igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1088        else
1089            igbe->anWe(annSmWb, annUsedCacheQ);
1090    }
1091    fetchAfterWb();
1092}
1093
1094template<class T>
1095void
1096IGbE::DescCache<T>::reset()
1097{
1098    DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1099    for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1100        delete usedCache[x];
1101    for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1102        delete unusedCache[x];
1103
1104    usedCache.clear();
1105    unusedCache.clear();
1106
1107    cachePnt = 0;
1108
1109}
1110
1111template<class T>
1112void
1113IGbE::DescCache<T>::serialize(std::ostream &os)
1114{
1115    SERIALIZE_SCALAR(cachePnt);
1116    SERIALIZE_SCALAR(curFetching);
1117    SERIALIZE_SCALAR(wbOut);
1118    SERIALIZE_SCALAR(moreToWb);
1119    SERIALIZE_SCALAR(wbAlignment);
1120
1121    typename CacheType::size_type usedCacheSize = usedCache.size();
1122    SERIALIZE_SCALAR(usedCacheSize);
1123    for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1124        arrayParamOut(os, csprintf("usedCache_%d", x),
1125                      (uint8_t*)usedCache[x],sizeof(T));
1126    }
1127
1128    typename CacheType::size_type unusedCacheSize = unusedCache.size();
1129    SERIALIZE_SCALAR(unusedCacheSize);
1130    for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1131        arrayParamOut(os, csprintf("unusedCache_%d", x),
1132                      (uint8_t*)unusedCache[x],sizeof(T));
1133    }
1134
1135    Tick fetch_delay = 0, wb_delay = 0;
1136    if (fetchDelayEvent.scheduled())
1137        fetch_delay = fetchDelayEvent.when();
1138    SERIALIZE_SCALAR(fetch_delay);
1139    if (wbDelayEvent.scheduled())
1140        wb_delay = wbDelayEvent.when();
1141    SERIALIZE_SCALAR(wb_delay);
1142
1143
1144}
1145
1146template<class T>
1147void
1148IGbE::DescCache<T>::unserialize(Checkpoint *cp, const std::string &section)
1149{
1150    UNSERIALIZE_SCALAR(cachePnt);
1151    UNSERIALIZE_SCALAR(curFetching);
1152    UNSERIALIZE_SCALAR(wbOut);
1153    UNSERIALIZE_SCALAR(moreToWb);
1154    UNSERIALIZE_SCALAR(wbAlignment);
1155
1156    typename CacheType::size_type usedCacheSize;
1157    UNSERIALIZE_SCALAR(usedCacheSize);
1158    T *temp;
1159    for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1160        temp = new T;
1161        arrayParamIn(cp, section, csprintf("usedCache_%d", x),
1162                     (uint8_t*)temp,sizeof(T));
1163        usedCache.push_back(temp);
1164    }
1165
1166    typename CacheType::size_type unusedCacheSize;
1167    UNSERIALIZE_SCALAR(unusedCacheSize);
1168    for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1169        temp = new T;
1170        arrayParamIn(cp, section, csprintf("unusedCache_%d", x),
1171                     (uint8_t*)temp,sizeof(T));
1172        unusedCache.push_back(temp);
1173    }
1174    Tick fetch_delay = 0, wb_delay = 0;
1175    UNSERIALIZE_SCALAR(fetch_delay);
1176    UNSERIALIZE_SCALAR(wb_delay);
1177    if (fetch_delay)
1178        igbe->schedule(fetchDelayEvent, fetch_delay);
1179    if (wb_delay)
1180        igbe->schedule(wbDelayEvent, wb_delay);
1181
1182
1183}
1184
1185///////////////////////////// IGbE::RxDescCache //////////////////////////////
1186
1187IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1188    : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1189      pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
1190
1191{
1192    annSmFetch = "RX Desc Fetch";
1193    annSmWb = "RX Desc Writeback";
1194    annUnusedDescQ = "RX Unused Descriptors";
1195    annUnusedCacheQ = "RX Unused Descriptor Cache";
1196    annUsedCacheQ = "RX Used Descriptor Cache";
1197    annUsedDescQ = "RX Used Descriptors";
1198    annDescQ = "RX Descriptors";
1199}
1200
1201void
1202IGbE::RxDescCache::pktSplitDone()
1203{
1204    splitCount++;
1205    DPRINTF(EthernetDesc,
1206            "Part of split packet done: splitcount now %d\n", splitCount);
1207    assert(splitCount <= 2);
1208    if (splitCount != 2)
1209        return;
1210    splitCount = 0;
1211    DPRINTF(EthernetDesc,
1212            "Part of split packet done: calling pktComplete()\n");
1213    pktComplete();
1214}
1215
1216int
1217IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
1218{
1219    assert(unusedCache.size());
1220    //if (!unusedCache.size())
1221    //    return false;
1222
1223    pktPtr = packet;
1224    pktDone = false;
1225    unsigned buf_len, hdr_len;
1226
1227    RxDesc *desc = unusedCache.front();
1228    switch (igbe->regs.srrctl.desctype()) {
1229      case RXDT_LEGACY:
1230        assert(pkt_offset == 0);
1231        bytesCopied = packet->length;
1232        DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1233                packet->length, igbe->regs.rctl.descSize());
1234        assert(packet->length < igbe->regs.rctl.descSize());
1235        igbe->dmaWrite(pciToDma(desc->legacy.buf),
1236                       packet->length, &pktEvent, packet->data,
1237                       igbe->rxWriteDelay);
1238        break;
1239      case RXDT_ADV_ONEBUF:
1240        assert(pkt_offset == 0);
1241        bytesCopied = packet->length;
1242        buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1243            igbe->regs.rctl.descSize();
1244        DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1245                packet->length, igbe->regs.srrctl(), buf_len);
1246        assert(packet->length < buf_len);
1247        igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1248                       packet->length, &pktEvent, packet->data,
1249                       igbe->rxWriteDelay);
1250        desc->adv_wb.header_len = htole(0);
1251        desc->adv_wb.sph = htole(0);
1252        desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1253        break;
1254      case RXDT_ADV_SPLIT_A:
1255        int split_point;
1256
1257        buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1258            igbe->regs.rctl.descSize();
1259        hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1260        DPRINTF(EthernetDesc,
1261                "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1262                "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1263                igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1264                igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1265                desc->adv_read.pkt, buf_len);
1266
1267        split_point = hsplit(pktPtr);
1268
1269        if (packet->length <= hdr_len) {
1270            bytesCopied = packet->length;
1271            assert(pkt_offset == 0);
1272            DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1273            igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1274                           packet->length, &pktEvent, packet->data,
1275                           igbe->rxWriteDelay);
1276            desc->adv_wb.header_len = htole((uint16_t)packet->length);
1277            desc->adv_wb.sph = htole(0);
1278            desc->adv_wb.pkt_len = htole(0);
1279        } else if (split_point) {
1280            if (pkt_offset) {
1281                // we are only copying some data, header/data has already been
1282                // copied
1283                int max_to_copy =
1284                    std::min(packet->length - pkt_offset, buf_len);
1285                bytesCopied += max_to_copy;
1286                DPRINTF(EthernetDesc,
1287                        "Hdr split: Continuing data buffer copy\n");
1288                igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1289                               max_to_copy, &pktEvent,
1290                               packet->data + pkt_offset, igbe->rxWriteDelay);
1291                desc->adv_wb.header_len = htole(0);
1292                desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1293                desc->adv_wb.sph = htole(0);
1294            } else {
1295                int max_to_copy =
1296                    std::min(packet->length - split_point, buf_len);
1297                bytesCopied += max_to_copy + split_point;
1298
1299                DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1300                        split_point);
1301                igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1302                               split_point, &pktHdrEvent,
1303                               packet->data, igbe->rxWriteDelay);
1304                igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1305                               max_to_copy, &pktDataEvent,
1306                               packet->data + split_point, igbe->rxWriteDelay);
1307                desc->adv_wb.header_len = htole(split_point);
1308                desc->adv_wb.sph = 1;
1309                desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1310            }
1311        } else {
1312            panic("Header split not fitting within header buffer or "
1313                  "undecodable packet not fitting in header unsupported\n");
1314        }
1315        break;
1316      default:
1317        panic("Unimplemnted RX receive buffer type: %d\n",
1318              igbe->regs.srrctl.desctype());
1319    }
1320    return bytesCopied;
1321
1322}
1323
1324void
1325IGbE::RxDescCache::pktComplete()
1326{
1327    assert(unusedCache.size());
1328    RxDesc *desc;
1329    desc = unusedCache.front();
1330
1331    igbe->anBegin("RXS", "Update Desc");
1332
1333    uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1334    DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1335            "stripcrc offset: %d value written: %d %d\n",
1336            pktPtr->length, bytesCopied, crcfixup,
1337            htole((uint16_t)(pktPtr->length + crcfixup)),
1338            (uint16_t)(pktPtr->length + crcfixup));
1339
1340    // no support for anything but starting at 0
1341    assert(igbe->regs.rxcsum.pcss() == 0);
1342
1343    DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1344
1345    uint16_t status = RXDS_DD;
1346    uint8_t err = 0;
1347    uint16_t ext_err = 0;
1348    uint16_t csum = 0;
1349    uint16_t ptype = 0;
1350    uint16_t ip_id = 0;
1351
1352    assert(bytesCopied <= pktPtr->length);
1353    if (bytesCopied == pktPtr->length)
1354        status |= RXDS_EOP;
1355
1356    IpPtr ip(pktPtr);
1357
1358    if (ip) {
1359        DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
1360        ptype |= RXDP_IPV4;
1361        ip_id = ip->id();
1362
1363        if (igbe->regs.rxcsum.ipofld()) {
1364            DPRINTF(EthernetDesc, "Checking IP checksum\n");
1365            status |= RXDS_IPCS;
1366            csum = htole(cksum(ip));
1367            igbe->rxIpChecksums++;
1368            if (cksum(ip) != 0) {
1369                err |= RXDE_IPE;
1370                ext_err |= RXDEE_IPE;
1371                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1372            }
1373        }
1374        TcpPtr tcp(ip);
1375        if (tcp && igbe->regs.rxcsum.tuofld()) {
1376            DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1377            status |= RXDS_TCPCS;
1378            ptype |= RXDP_TCP;
1379            csum = htole(cksum(tcp));
1380            igbe->rxTcpChecksums++;
1381            if (cksum(tcp) != 0) {
1382                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1383                err |= RXDE_TCPE;
1384                ext_err |= RXDEE_TCPE;
1385            }
1386        }
1387
1388        UdpPtr udp(ip);
1389        if (udp && igbe->regs.rxcsum.tuofld()) {
1390            DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1391            status |= RXDS_UDPCS;
1392            ptype |= RXDP_UDP;
1393            csum = htole(cksum(udp));
1394            igbe->rxUdpChecksums++;
1395            if (cksum(udp) != 0) {
1396                DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1397                ext_err |= RXDEE_TCPE;
1398                err |= RXDE_TCPE;
1399            }
1400        }
1401    } else { // if ip
1402        DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1403    }
1404
1405    switch (igbe->regs.srrctl.desctype()) {
1406      case RXDT_LEGACY:
1407        desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1408        desc->legacy.status = htole(status);
1409        desc->legacy.errors = htole(err);
1410        // No vlan support at this point... just set it to 0
1411        desc->legacy.vlan = 0;
1412        break;
1413      case RXDT_ADV_SPLIT_A:
1414      case RXDT_ADV_ONEBUF:
1415        desc->adv_wb.rss_type = htole(0);
1416        desc->adv_wb.pkt_type = htole(ptype);
1417        if (igbe->regs.rxcsum.pcsd()) {
1418            // no rss support right now
1419            desc->adv_wb.rss_hash = htole(0);
1420        } else {
1421            desc->adv_wb.id = htole(ip_id);
1422            desc->adv_wb.csum = htole(csum);
1423        }
1424        desc->adv_wb.status = htole(status);
1425        desc->adv_wb.errors = htole(ext_err);
1426        // no vlan support
1427        desc->adv_wb.vlan_tag = htole(0);
1428        break;
1429      default:
1430        panic("Unimplemnted RX receive buffer type %d\n",
1431              igbe->regs.srrctl.desctype());
1432    }
1433
1434    DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1435            desc->adv_read.pkt, desc->adv_read.hdr);
1436
1437    if (bytesCopied == pktPtr->length) {
1438        DPRINTF(EthernetDesc,
1439                "Packet completely written to descriptor buffers\n");
1440        // Deal with the rx timer interrupts
1441        if (igbe->regs.rdtr.delay()) {
1442            Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1443            DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1444            igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1445        }
1446
1447        if (igbe->regs.radv.idv()) {
1448            Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1449            DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1450            if (!igbe->radvEvent.scheduled()) {
1451                igbe->schedule(igbe->radvEvent, curTick() + delay);
1452            }
1453        }
1454
1455        // if neither radv or rdtr, maybe itr is set...
1456        if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1457            DPRINTF(EthernetSM,
1458                    "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1459            igbe->postInterrupt(IT_RXT);
1460        }
1461
1462        // If the packet is small enough, interrupt appropriately
1463        // I wonder if this is delayed or not?!
1464        if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1465            DPRINTF(EthernetSM,
1466                    "RXS: Posting IT_SRPD beacuse small packet received\n");
1467            igbe->postInterrupt(IT_SRPD);
1468        }
1469        bytesCopied = 0;
1470    }
1471
1472    pktPtr = NULL;
1473    igbe->checkDrain();
1474    enableSm();
1475    pktDone = true;
1476
1477    igbe->anBegin("RXS", "Done Updating Desc");
1478    DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1479    igbe->anDq("RXS", annUnusedCacheQ);
1480    unusedCache.pop_front();
1481    igbe->anQ("RXS", annUsedCacheQ);
1482    usedCache.push_back(desc);
1483}
1484
1485void
1486IGbE::RxDescCache::enableSm()
1487{
1488    if (!igbe->drainEvent) {
1489        igbe->rxTick = true;
1490        igbe->restartClock();
1491    }
1492}
1493
1494bool
1495IGbE::RxDescCache::packetDone()
1496{
1497    if (pktDone) {
1498        pktDone = false;
1499        return true;
1500    }
1501    return false;
1502}
1503
1504bool
1505IGbE::RxDescCache::hasOutstandingEvents()
1506{
1507    return pktEvent.scheduled() || wbEvent.scheduled() ||
1508        fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1509        pktDataEvent.scheduled();
1510
1511}
1512
1513void
1514IGbE::RxDescCache::serialize(std::ostream &os)
1515{
1516    DescCache<RxDesc>::serialize(os);
1517    SERIALIZE_SCALAR(pktDone);
1518    SERIALIZE_SCALAR(splitCount);
1519    SERIALIZE_SCALAR(bytesCopied);
1520}
1521
1522void
1523IGbE::RxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1524{
1525    DescCache<RxDesc>::unserialize(cp, section);
1526    UNSERIALIZE_SCALAR(pktDone);
1527    UNSERIALIZE_SCALAR(splitCount);
1528    UNSERIALIZE_SCALAR(bytesCopied);
1529}
1530
1531
1532///////////////////////////// IGbE::TxDescCache //////////////////////////////
1533
1534IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1535    : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1536      pktWaiting(false), completionAddress(0), completionEnabled(false),
1537      useTso(false), pktEvent(this), headerEvent(this), nullEvent(this)
1538{
1539    annSmFetch = "TX Desc Fetch";
1540    annSmWb = "TX Desc Writeback";
1541    annUnusedDescQ = "TX Unused Descriptors";
1542    annUnusedCacheQ = "TX Unused Descriptor Cache";
1543    annUsedCacheQ = "TX Used Descriptor Cache";
1544    annUsedDescQ = "TX Used Descriptors";
1545    annDescQ = "TX Descriptors";
1546}
1547
1548void
1549IGbE::TxDescCache::processContextDesc()
1550{
1551    assert(unusedCache.size());
1552    TxDesc *desc;
1553
1554    DPRINTF(EthernetDesc, "Checking and  processing context descriptors\n");
1555
1556    while (!useTso && unusedCache.size() &&
1557           TxdOp::isContext(unusedCache.front())) {
1558        DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1559
1560        desc = unusedCache.front();
1561        DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1562                desc->d1, desc->d2);
1563
1564
1565        // is this going to be a tcp or udp packet?
1566        isTcp = TxdOp::tcp(desc) ? true : false;
1567
1568        // setup all the TSO variables, they'll be ignored if we don't use
1569        // tso for this connection
1570        tsoHeaderLen = TxdOp::hdrlen(desc);
1571        tsoMss  = TxdOp::mss(desc);
1572
1573        if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1574            DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1575                    "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1576                    TxdOp::mss(desc), TxdOp::getLen(desc));
1577            useTso = true;
1578            tsoTotalLen = TxdOp::getLen(desc);
1579            tsoLoadedHeader = false;
1580            tsoDescBytesUsed = 0;
1581            tsoUsedLen = 0;
1582            tsoPrevSeq = 0;
1583            tsoPktHasHeader = false;
1584            tsoPkts = 0;
1585
1586        }
1587
1588        TxdOp::setDd(desc);
1589        unusedCache.pop_front();
1590        igbe->anDq("TXS", annUnusedCacheQ);
1591        usedCache.push_back(desc);
1592        igbe->anQ("TXS", annUsedCacheQ);
1593    }
1594
1595    if (!unusedCache.size())
1596        return;
1597
1598    desc = unusedCache.front();
1599    if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1600        TxdOp::tse(desc)) {
1601        DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1602                "hdrlen: %d mss: %d paylen %d\n",
1603                tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1604        useTso = true;
1605        tsoTotalLen = TxdOp::getTsoLen(desc);
1606        tsoLoadedHeader = false;
1607        tsoDescBytesUsed = 0;
1608        tsoUsedLen = 0;
1609        tsoPrevSeq = 0;
1610        tsoPktHasHeader = false;
1611        tsoPkts = 0;
1612    }
1613
1614    if (useTso && !tsoLoadedHeader) {
1615        // we need to fetch a header
1616        DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1617        assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1618        pktWaiting = true;
1619        assert(tsoHeaderLen <= 256);
1620        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1621                      tsoHeaderLen, &headerEvent, tsoHeader, 0);
1622    }
1623}
1624
1625void
1626IGbE::TxDescCache::headerComplete()
1627{
1628    DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1629    pktWaiting = false;
1630
1631    assert(unusedCache.size());
1632    TxDesc *desc = unusedCache.front();
1633    DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1634            TxdOp::getLen(desc), tsoHeaderLen);
1635
1636    if (TxdOp::getLen(desc) == tsoHeaderLen) {
1637        tsoDescBytesUsed = 0;
1638        tsoLoadedHeader = true;
1639        unusedCache.pop_front();
1640        usedCache.push_back(desc);
1641    } else {
1642        // I don't think this case happens, I think the headrer is always
1643        // it's own packet, if it wasn't it might be as simple as just
1644        // incrementing descBytesUsed by the header length, but I'm not
1645        // completely sure
1646        panic("TSO header part of bigger packet, not implemented\n");
1647    }
1648    enableSm();
1649    igbe->checkDrain();
1650}
1651
1652unsigned
1653IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1654{
1655    if (!unusedCache.size())
1656        return 0;
1657
1658    DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1659
1660    assert(!useTso || tsoLoadedHeader);
1661    TxDesc *desc = unusedCache.front();
1662
1663    if (useTso) {
1664        DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1665                "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1666        DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1667                "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1668                tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1669        DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1670                "this descLen: %d\n",
1671                tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1672        DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1673
1674        if (tsoPktHasHeader)
1675            tsoCopyBytes =  std::min((tsoMss + tsoHeaderLen) - p->length,
1676                                     TxdOp::getLen(desc) - tsoDescBytesUsed);
1677        else
1678            tsoCopyBytes =  std::min(tsoMss,
1679                                     TxdOp::getLen(desc) - tsoDescBytesUsed);
1680        unsigned pkt_size =
1681            tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1682        DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1683        return pkt_size;
1684    }
1685
1686    DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1687            TxdOp::getLen(unusedCache.front()));
1688    return TxdOp::getLen(desc);
1689}
1690
1691void
1692IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1693{
1694    assert(unusedCache.size());
1695
1696    TxDesc *desc;
1697    desc = unusedCache.front();
1698
1699    DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1700            "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1701    assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1702           TxdOp::getLen(desc));
1703
1704    pktPtr = p;
1705
1706    pktWaiting = true;
1707
1708    DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1709
1710    if (useTso) {
1711        assert(tsoLoadedHeader);
1712        if (!tsoPktHasHeader) {
1713            DPRINTF(EthernetDesc,
1714                    "Loading TSO header (%d bytes) into start of packet\n",
1715                    tsoHeaderLen);
1716            memcpy(p->data, &tsoHeader,tsoHeaderLen);
1717            p->length +=tsoHeaderLen;
1718            tsoPktHasHeader = true;
1719        }
1720    }
1721
1722    if (useTso) {
1723        tsoDescBytesUsed += tsoCopyBytes;
1724        assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1725        DPRINTF(EthernetDesc,
1726                "Starting DMA of packet at offset %d length: %d\n",
1727                p->length, tsoCopyBytes);
1728        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1729                      + tsoDescBytesUsed,
1730                      tsoCopyBytes, &pktEvent, p->data + p->length,
1731                      igbe->txReadDelay);
1732    } else {
1733        igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1734                      TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1735                      igbe->txReadDelay);
1736    }
1737}
1738
1739void
1740IGbE::TxDescCache::pktComplete()
1741{
1742
1743    TxDesc *desc;
1744    assert(unusedCache.size());
1745    assert(pktPtr);
1746
1747    igbe->anBegin("TXS", "Update Desc");
1748
1749    DPRINTF(EthernetDesc, "DMA of packet complete\n");
1750
1751
1752    desc = unusedCache.front();
1753    assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1754           TxdOp::getLen(desc));
1755
1756    DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1757            desc->d1, desc->d2);
1758    DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1759            "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1760            tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1761
1762    // Set the length of the data in the EtherPacket
1763    if (useTso) {
1764        pktPtr->length += tsoCopyBytes;
1765        tsoUsedLen += tsoCopyBytes;
1766    } else
1767        pktPtr->length += TxdOp::getLen(desc);
1768
1769    DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1770            tsoDescBytesUsed, tsoCopyBytes);
1771
1772
1773    if ((!TxdOp::eop(desc) && !useTso) ||
1774        (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1775         tsoTotalLen != tsoUsedLen && useTso)) {
1776        assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1777        igbe->anDq("TXS", annUnusedCacheQ);
1778        unusedCache.pop_front();
1779        igbe->anQ("TXS", annUsedCacheQ);
1780        usedCache.push_back(desc);
1781
1782        tsoDescBytesUsed = 0;
1783        pktDone = true;
1784        pktWaiting = false;
1785        pktMultiDesc = true;
1786
1787        DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1788                pktPtr->length);
1789        pktPtr = NULL;
1790
1791        enableSm();
1792        igbe->checkDrain();
1793        return;
1794    }
1795
1796
1797    pktMultiDesc = false;
1798    // no support for vlans
1799    assert(!TxdOp::vle(desc));
1800
1801    // we only support single packet descriptors at this point
1802    if (!useTso)
1803        assert(TxdOp::eop(desc));
1804
1805    // set that this packet is done
1806    if (TxdOp::rs(desc))
1807        TxdOp::setDd(desc);
1808
1809    DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1810            desc->d1, desc->d2);
1811
1812    if (useTso) {
1813        IpPtr ip(pktPtr);
1814        if (ip) {
1815            DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1816                    tsoPkts);
1817            ip->id(ip->id() + tsoPkts++);
1818            ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1819
1820            TcpPtr tcp(ip);
1821            if (tcp) {
1822                DPRINTF(EthernetDesc,
1823                        "TSO: Modifying TCP header. old seq %d + %d\n",
1824                        tcp->seq(), tsoPrevSeq);
1825                tcp->seq(tcp->seq() + tsoPrevSeq);
1826                if (tsoUsedLen != tsoTotalLen)
1827                    tcp->flags(tcp->flags() & ~9); // clear fin & psh
1828            }
1829            UdpPtr udp(ip);
1830            if (udp) {
1831                DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1832                udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1833            }
1834        }
1835        tsoPrevSeq = tsoUsedLen;
1836    }
1837
1838    if (DTRACE(EthernetDesc)) {
1839        IpPtr ip(pktPtr);
1840        if (ip)
1841            DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1842                    ip->id());
1843        else
1844            DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1845    }
1846
1847    // Checksums are only ofloaded for new descriptor types
1848    if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1849        DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1850        IpPtr ip(pktPtr);
1851        assert(ip);
1852        if (TxdOp::ixsm(desc)) {
1853            ip->sum(0);
1854            ip->sum(cksum(ip));
1855            igbe->txIpChecksums++;
1856            DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1857        }
1858        if (TxdOp::txsm(desc)) {
1859            TcpPtr tcp(ip);
1860            UdpPtr udp(ip);
1861            if (tcp) {
1862                tcp->sum(0);
1863                tcp->sum(cksum(tcp));
1864                igbe->txTcpChecksums++;
1865                DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1866            } else if (udp) {
1867                assert(udp);
1868                udp->sum(0);
1869                udp->sum(cksum(udp));
1870                igbe->txUdpChecksums++;
1871                DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1872            } else {
1873                panic("Told to checksum, but don't know how\n");
1874            }
1875        }
1876    }
1877
1878    if (TxdOp::ide(desc)) {
1879        // Deal with the rx timer interrupts
1880        DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1881        if (igbe->regs.tidv.idv()) {
1882            Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1883            DPRINTF(EthernetDesc, "setting tidv\n");
1884            igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1885        }
1886
1887        if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1888            Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1889            DPRINTF(EthernetDesc, "setting tadv\n");
1890            if (!igbe->tadvEvent.scheduled()) {
1891                igbe->schedule(igbe->tadvEvent, curTick() + delay);
1892            }
1893        }
1894    }
1895
1896
1897    if (!useTso ||  TxdOp::getLen(desc) == tsoDescBytesUsed) {
1898        DPRINTF(EthernetDesc, "Descriptor Done\n");
1899        igbe->anDq("TXS", annUnusedCacheQ);
1900        unusedCache.pop_front();
1901        igbe->anQ("TXS", annUsedCacheQ);
1902        usedCache.push_back(desc);
1903        tsoDescBytesUsed = 0;
1904    }
1905
1906    if (useTso && tsoUsedLen == tsoTotalLen)
1907        useTso = false;
1908
1909
1910    DPRINTF(EthernetDesc,
1911            "------Packet of %d bytes ready for transmission-------\n",
1912            pktPtr->length);
1913    pktDone = true;
1914    pktWaiting = false;
1915    pktPtr = NULL;
1916    tsoPktHasHeader = false;
1917
1918    if (igbe->regs.txdctl.wthresh() == 0) {
1919        igbe->anBegin("TXS", "Desc Writeback");
1920        DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1921        writeback(0);
1922    } else if (igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() >=
1923               descInBlock(usedCache.size())) {
1924        DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1925        igbe->anBegin("TXS", "Desc Writeback");
1926        writeback((igbe->cacheBlockSize()-1)>>4);
1927    } else if (igbe->regs.txdctl.wthresh() >= usedCache.size()) {
1928        DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1929        igbe->anBegin("TXS", "Desc Writeback");
1930        writeback((igbe->cacheBlockSize()-1)>>4);
1931    }
1932
1933    enableSm();
1934    igbe->checkDrain();
1935}
1936
1937void
1938IGbE::TxDescCache::actionAfterWb()
1939{
1940    DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1941            completionEnabled);
1942    igbe->postInterrupt(iGbReg::IT_TXDW);
1943    if (completionEnabled) {
1944        descEnd = igbe->regs.tdh();
1945        DPRINTF(EthernetDesc,
1946                "Completion writing back value: %d to addr: %#x\n", descEnd,
1947                completionAddress);
1948        igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1949                       sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1950    }
1951}
1952
1953void
1954IGbE::TxDescCache::serialize(std::ostream &os)
1955{
1956    DescCache<TxDesc>::serialize(os);
1957    SERIALIZE_SCALAR(pktDone);
1958    SERIALIZE_SCALAR(isTcp);
1959    SERIALIZE_SCALAR(pktWaiting);
1960    SERIALIZE_SCALAR(pktMultiDesc);
1961
1962    SERIALIZE_SCALAR(useTso);
1963    SERIALIZE_SCALAR(tsoHeaderLen);
1964    SERIALIZE_SCALAR(tsoMss);
1965    SERIALIZE_SCALAR(tsoTotalLen);
1966    SERIALIZE_SCALAR(tsoUsedLen);
1967    SERIALIZE_SCALAR(tsoPrevSeq);;
1968    SERIALIZE_SCALAR(tsoPktPayloadBytes);
1969    SERIALIZE_SCALAR(tsoLoadedHeader);
1970    SERIALIZE_SCALAR(tsoPktHasHeader);
1971    SERIALIZE_ARRAY(tsoHeader, 256);
1972    SERIALIZE_SCALAR(tsoDescBytesUsed);
1973    SERIALIZE_SCALAR(tsoCopyBytes);
1974    SERIALIZE_SCALAR(tsoPkts);
1975
1976    SERIALIZE_SCALAR(completionAddress);
1977    SERIALIZE_SCALAR(completionEnabled);
1978    SERIALIZE_SCALAR(descEnd);
1979}
1980
1981void
1982IGbE::TxDescCache::unserialize(Checkpoint *cp, const std::string &section)
1983{
1984    DescCache<TxDesc>::unserialize(cp, section);
1985    UNSERIALIZE_SCALAR(pktDone);
1986    UNSERIALIZE_SCALAR(isTcp);
1987    UNSERIALIZE_SCALAR(pktWaiting);
1988    UNSERIALIZE_SCALAR(pktMultiDesc);
1989
1990    UNSERIALIZE_SCALAR(useTso);
1991    UNSERIALIZE_SCALAR(tsoHeaderLen);
1992    UNSERIALIZE_SCALAR(tsoMss);
1993    UNSERIALIZE_SCALAR(tsoTotalLen);
1994    UNSERIALIZE_SCALAR(tsoUsedLen);
1995    UNSERIALIZE_SCALAR(tsoPrevSeq);;
1996    UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
1997    UNSERIALIZE_SCALAR(tsoLoadedHeader);
1998    UNSERIALIZE_SCALAR(tsoPktHasHeader);
1999    UNSERIALIZE_ARRAY(tsoHeader, 256);
2000    UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2001    UNSERIALIZE_SCALAR(tsoCopyBytes);
2002    UNSERIALIZE_SCALAR(tsoPkts);
2003
2004    UNSERIALIZE_SCALAR(completionAddress);
2005    UNSERIALIZE_SCALAR(completionEnabled);
2006    UNSERIALIZE_SCALAR(descEnd);
2007}
2008
2009bool
2010IGbE::TxDescCache::packetAvailable()
2011{
2012    if (pktDone) {
2013        pktDone = false;
2014        return true;
2015    }
2016    return false;
2017}
2018
2019void
2020IGbE::TxDescCache::enableSm()
2021{
2022    if (!igbe->drainEvent) {
2023        igbe->txTick = true;
2024        igbe->restartClock();
2025    }
2026}
2027
2028bool
2029IGbE::TxDescCache::hasOutstandingEvents()
2030{
2031    return pktEvent.scheduled() || wbEvent.scheduled() ||
2032        fetchEvent.scheduled();
2033}
2034
2035
2036///////////////////////////////////// IGbE /////////////////////////////////
2037
2038void
2039IGbE::restartClock()
2040{
2041    if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2042        getState() == SimObject::Running)
2043        schedule(tickEvent, (curTick() / ticks(1)) * ticks(1) + ticks(1));
2044}
2045
2046unsigned int
2047IGbE::drain(Event *de)
2048{
2049    unsigned int count;
2050    count = pioPort->drain(de) + dmaPort->drain(de);
2051    if (rxDescCache.hasOutstandingEvents() ||
2052        txDescCache.hasOutstandingEvents()) {
2053        count++;
2054        drainEvent = de;
2055    }
2056
2057    txFifoTick = false;
2058    txTick = false;
2059    rxTick = false;
2060
2061    if (tickEvent.scheduled())
2062        deschedule(tickEvent);
2063
2064    if (count)
2065        changeState(Draining);
2066    else
2067        changeState(Drained);
2068
2069    DPRINTF(EthernetSM, "got drain() returning %d", count);
2070    return count;
2071}
2072
2073void
2074IGbE::resume()
2075{
2076    SimObject::resume();
2077
2078    txFifoTick = true;
2079    txTick = true;
2080    rxTick = true;
2081
2082    restartClock();
2083    DPRINTF(EthernetSM, "resuming from drain");
2084}
2085
2086void
2087IGbE::checkDrain()
2088{
2089    if (!drainEvent)
2090        return;
2091
2092    DPRINTF(EthernetSM, "checkDrain() in drain\n");
2093    txFifoTick = false;
2094    txTick = false;
2095    rxTick = false;
2096    if (!rxDescCache.hasOutstandingEvents() &&
2097        !txDescCache.hasOutstandingEvents()) {
2098        drainEvent->process();
2099        drainEvent = NULL;
2100    }
2101}
2102
2103void
2104IGbE::txStateMachine()
2105{
2106    if (!regs.tctl.en()) {
2107        txTick = false;
2108        DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2109        return;
2110    }
2111
2112    // If we have a packet available and it's length is not 0 (meaning it's not
2113    // a multidescriptor packet) put it in the fifo, otherwise an the next
2114    // iteration we'll get the rest of the data
2115    if (txPacket && txDescCache.packetAvailable()
2116        && !txDescCache.packetMultiDesc() && txPacket->length) {
2117        bool success;
2118
2119        anQ("TXS", "TX FIFO Q");
2120        DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2121        success = txFifo.push(txPacket);
2122        txFifoTick = true && !drainEvent;
2123        assert(success);
2124        txPacket = NULL;
2125        anBegin("TXS", "Desc Writeback");
2126        txDescCache.writeback((cacheBlockSize()-1)>>4);
2127        return;
2128    }
2129
2130    // Only support descriptor granularity
2131    if (regs.txdctl.lwthresh() &&
2132        txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2133        DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2134        postInterrupt(IT_TXDLOW);
2135    }
2136
2137    if (!txPacket) {
2138        txPacket = new EthPacketData(16384);
2139    }
2140
2141    if (!txDescCache.packetWaiting()) {
2142        if (txDescCache.descLeft() == 0) {
2143            postInterrupt(IT_TXQE);
2144            anBegin("TXS", "Desc Writeback");
2145            txDescCache.writeback(0);
2146            anBegin("TXS", "Desc Fetch");
2147            anWe("TXS", txDescCache.annUnusedCacheQ);
2148            txDescCache.fetchDescriptors();
2149            DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2150                    "writeback stopping ticking and posting TXQE\n");
2151            txTick = false;
2152            return;
2153        }
2154
2155
2156        if (!(txDescCache.descUnused())) {
2157            anBegin("TXS", "Desc Fetch");
2158            txDescCache.fetchDescriptors();
2159            anWe("TXS", txDescCache.annUnusedCacheQ);
2160            DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2161                    "fetching and stopping ticking\n");
2162            txTick = false;
2163            return;
2164        }
2165        anPq("TXS", txDescCache.annUnusedCacheQ);
2166
2167
2168        txDescCache.processContextDesc();
2169        if (txDescCache.packetWaiting()) {
2170            DPRINTF(EthernetSM,
2171                    "TXS: Fetching TSO header, stopping ticking\n");
2172            txTick = false;
2173            return;
2174        }
2175
2176        unsigned size = txDescCache.getPacketSize(txPacket);
2177        if (size > 0 && txFifo.avail() > size) {
2178            anRq("TXS", "TX FIFO Q");
2179            anBegin("TXS", "DMA Packet");
2180            DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2181                    "beginning DMA of next packet\n", size);
2182            txFifo.reserve(size);
2183            txDescCache.getPacketData(txPacket);
2184        } else if (size == 0) {
2185            DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2186            DPRINTF(EthernetSM,
2187                    "TXS: No packets to get, writing back used descriptors\n");
2188            anBegin("TXS", "Desc Writeback");
2189            txDescCache.writeback(0);
2190        } else {
2191            anWf("TXS", "TX FIFO Q");
2192            DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2193                    "available in FIFO\n");
2194            txTick = false;
2195        }
2196
2197
2198        return;
2199    }
2200    DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2201    txTick = false;
2202}
2203
2204bool
2205IGbE::ethRxPkt(EthPacketPtr pkt)
2206{
2207    rxBytes += pkt->length;
2208    rxPackets++;
2209
2210    DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2211    anBegin("RXQ", "Wire Recv");
2212
2213
2214    if (!regs.rctl.en()) {
2215        DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2216        anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2217        return true;
2218    }
2219
2220    // restart the state machines if they are stopped
2221    rxTick = true && !drainEvent;
2222    if ((rxTick || txTick) && !tickEvent.scheduled()) {
2223        DPRINTF(EthernetSM,
2224                "RXS: received packet into fifo, starting ticking\n");
2225        restartClock();
2226    }
2227
2228    if (!rxFifo.push(pkt)) {
2229        DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2230        postInterrupt(IT_RXO, true);
2231        anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2232        return false;
2233    }
2234
2235    if (CPA::available() && cpa->enabled()) {
2236        assert(sys->numSystemsRunning <= 2);
2237        System *other_sys;
2238        if (sys->systemList[0] == sys)
2239            other_sys = sys->systemList[1];
2240        else
2241            other_sys = sys->systemList[0];
2242
2243        cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2244        anQ("RXQ", "RX FIFO Q");
2245        cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2246    }
2247
2248    return true;
2249}
2250
2251
2252void
2253IGbE::rxStateMachine()
2254{
2255    if (!regs.rctl.en()) {
2256        rxTick = false;
2257        DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2258        return;
2259    }
2260
2261    // If the packet is done check for interrupts/descriptors/etc
2262    if (rxDescCache.packetDone()) {
2263        rxDmaPacket = false;
2264        DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2265        int descLeft = rxDescCache.descLeft();
2266        DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2267                descLeft, regs.rctl.rdmts(), regs.rdlen());
2268        switch (regs.rctl.rdmts()) {
2269          case 2: if (descLeft > .125 * regs.rdlen()) break;
2270          case 1: if (descLeft > .250 * regs.rdlen()) break;
2271          case 0: if (descLeft > .500 * regs.rdlen())  break;
2272            DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2273                    "because of descriptors left\n");
2274            postInterrupt(IT_RXDMT);
2275            break;
2276        }
2277
2278        if (rxFifo.empty())
2279            rxDescCache.writeback(0);
2280
2281        if (descLeft == 0) {
2282            anBegin("RXS", "Writeback Descriptors");
2283            rxDescCache.writeback(0);
2284            DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2285                    " writeback and stopping ticking\n");
2286            rxTick = false;
2287        }
2288
2289        // only support descriptor granulaties
2290        assert(regs.rxdctl.gran());
2291
2292        if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2293            DPRINTF(EthernetSM,
2294                    "RXS: Writing back because WTHRESH >= descUsed\n");
2295            anBegin("RXS", "Writeback Descriptors");
2296            if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2297                rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2298            else
2299                rxDescCache.writeback((cacheBlockSize()-1)>>4);
2300        }
2301
2302        if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2303            ((rxDescCache.descLeft() - rxDescCache.descUnused()) >
2304             regs.rxdctl.hthresh())) {
2305            DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2306                    "descUnused < PTHRESH\n");
2307            anBegin("RXS", "Fetch Descriptors");
2308            rxDescCache.fetchDescriptors();
2309        }
2310
2311        if (rxDescCache.descUnused() == 0) {
2312            anBegin("RXS", "Fetch Descriptors");
2313            rxDescCache.fetchDescriptors();
2314            anWe("RXS", rxDescCache.annUnusedCacheQ);
2315            DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2316                    "fetching descriptors and stopping ticking\n");
2317            rxTick = false;
2318        }
2319        return;
2320    }
2321
2322    if (rxDmaPacket) {
2323        DPRINTF(EthernetSM,
2324                "RXS: stopping ticking until packet DMA completes\n");
2325        rxTick = false;
2326        return;
2327    }
2328
2329    if (!rxDescCache.descUnused()) {
2330        anBegin("RXS", "Fetch Descriptors");
2331        rxDescCache.fetchDescriptors();
2332        anWe("RXS", rxDescCache.annUnusedCacheQ);
2333        DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2334                "stopping ticking\n");
2335        rxTick = false;
2336        DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2337        return;
2338    }
2339    anPq("RXS", rxDescCache.annUnusedCacheQ);
2340
2341    if (rxFifo.empty()) {
2342        anWe("RXS", "RX FIFO Q");
2343        DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2344        rxTick = false;
2345        return;
2346    }
2347    anPq("RXS", "RX FIFO Q");
2348    anBegin("RXS", "Get Desc");
2349
2350    EthPacketPtr pkt;
2351    pkt = rxFifo.front();
2352
2353
2354    pktOffset = rxDescCache.writePacket(pkt, pktOffset);
2355    DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2356    if (pktOffset == pkt->length) {
2357        anBegin( "RXS", "FIFO Dequeue");
2358        DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2359        pktOffset = 0;
2360        anDq("RXS", "RX FIFO Q");
2361        rxFifo.pop();
2362    }
2363
2364    DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2365    rxTick = false;
2366    rxDmaPacket = true;
2367    anBegin("RXS", "DMA Packet");
2368}
2369
2370void
2371IGbE::txWire()
2372{
2373    if (txFifo.empty()) {
2374        anWe("TXQ", "TX FIFO Q");
2375        txFifoTick = false;
2376        return;
2377    }
2378
2379
2380    anPq("TXQ", "TX FIFO Q");
2381    if (etherInt->sendPacket(txFifo.front())) {
2382        cpa->hwQ(CPA::FL_NONE, sys, macAddr, "TXQ", "WireQ", 0);
2383        if (DTRACE(EthernetSM)) {
2384            IpPtr ip(txFifo.front());
2385            if (ip)
2386                DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2387                        ip->id());
2388            else
2389                DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2390        }
2391        anDq("TXQ", "TX FIFO Q");
2392        anBegin("TXQ", "Wire Send");
2393        DPRINTF(EthernetSM,
2394                "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2395                txFifo.avail());
2396
2397        txBytes += txFifo.front()->length;
2398        txPackets++;
2399        txFifoTick = false;
2400
2401        txFifo.pop();
2402    } else {
2403        // We'll get woken up when the packet ethTxDone() gets called
2404        txFifoTick = false;
2405    }
2406}
2407
2408void
2409IGbE::tick()
2410{
2411    DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2412
2413    if (rxTick)
2414        rxStateMachine();
2415
2416    if (txTick)
2417        txStateMachine();
2418
2419    if (txFifoTick)
2420        txWire();
2421
2422
2423    if (rxTick || txTick || txFifoTick)
2424        schedule(tickEvent, curTick() + ticks(1));
2425}
2426
2427void
2428IGbE::ethTxDone()
2429{
2430    anBegin("TXQ", "Send Done");
2431    // restart the tx state machines if they are stopped
2432    // fifo to send another packet
2433    // tx sm to put more data into the fifo
2434    txFifoTick = true && !drainEvent;
2435    if (txDescCache.descLeft() != 0 && !drainEvent)
2436        txTick = true;
2437
2438    restartClock();
2439    txWire();
2440    DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2441}
2442
2443void
2444IGbE::serialize(std::ostream &os)
2445{
2446    PciDev::serialize(os);
2447
2448    regs.serialize(os);
2449    SERIALIZE_SCALAR(eeOpBits);
2450    SERIALIZE_SCALAR(eeAddrBits);
2451    SERIALIZE_SCALAR(eeDataBits);
2452    SERIALIZE_SCALAR(eeOpcode);
2453    SERIALIZE_SCALAR(eeAddr);
2454    SERIALIZE_SCALAR(lastInterrupt);
2455    SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2456
2457    rxFifo.serialize("rxfifo", os);
2458    txFifo.serialize("txfifo", os);
2459
2460    bool txPktExists = txPacket;
2461    SERIALIZE_SCALAR(txPktExists);
2462    if (txPktExists)
2463        txPacket->serialize("txpacket", os);
2464
2465    Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2466        inter_time = 0;
2467
2468    if (rdtrEvent.scheduled())
2469        rdtr_time = rdtrEvent.when();
2470    SERIALIZE_SCALAR(rdtr_time);
2471
2472    if (radvEvent.scheduled())
2473        radv_time = radvEvent.when();
2474    SERIALIZE_SCALAR(radv_time);
2475
2476    if (tidvEvent.scheduled())
2477        tidv_time = tidvEvent.when();
2478    SERIALIZE_SCALAR(tidv_time);
2479
2480    if (tadvEvent.scheduled())
2481        tadv_time = tadvEvent.when();
2482    SERIALIZE_SCALAR(tadv_time);
2483
2484    if (interEvent.scheduled())
2485        inter_time = interEvent.when();
2486    SERIALIZE_SCALAR(inter_time);
2487
2488    SERIALIZE_SCALAR(pktOffset);
2489
2490    nameOut(os, csprintf("%s.TxDescCache", name()));
2491    txDescCache.serialize(os);
2492
2493    nameOut(os, csprintf("%s.RxDescCache", name()));
2494    rxDescCache.serialize(os);
2495}
2496
2497void
2498IGbE::unserialize(Checkpoint *cp, const std::string &section)
2499{
2500    PciDev::unserialize(cp, section);
2501
2502    regs.unserialize(cp, section);
2503    UNSERIALIZE_SCALAR(eeOpBits);
2504    UNSERIALIZE_SCALAR(eeAddrBits);
2505    UNSERIALIZE_SCALAR(eeDataBits);
2506    UNSERIALIZE_SCALAR(eeOpcode);
2507    UNSERIALIZE_SCALAR(eeAddr);
2508    UNSERIALIZE_SCALAR(lastInterrupt);
2509    UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2510
2511    rxFifo.unserialize("rxfifo", cp, section);
2512    txFifo.unserialize("txfifo", cp, section);
2513
2514    bool txPktExists;
2515    UNSERIALIZE_SCALAR(txPktExists);
2516    if (txPktExists) {
2517        txPacket = new EthPacketData(16384);
2518        txPacket->unserialize("txpacket", cp, section);
2519    }
2520
2521    rxTick = true;
2522    txTick = true;
2523    txFifoTick = true;
2524
2525    Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2526    UNSERIALIZE_SCALAR(rdtr_time);
2527    UNSERIALIZE_SCALAR(radv_time);
2528    UNSERIALIZE_SCALAR(tidv_time);
2529    UNSERIALIZE_SCALAR(tadv_time);
2530    UNSERIALIZE_SCALAR(inter_time);
2531
2532    if (rdtr_time)
2533        schedule(rdtrEvent, rdtr_time);
2534
2535    if (radv_time)
2536        schedule(radvEvent, radv_time);
2537
2538    if (tidv_time)
2539        schedule(tidvEvent, tidv_time);
2540
2541    if (tadv_time)
2542        schedule(tadvEvent, tadv_time);
2543
2544    if (inter_time)
2545        schedule(interEvent, inter_time);
2546
2547    UNSERIALIZE_SCALAR(pktOffset);
2548
2549    txDescCache.unserialize(cp, csprintf("%s.TxDescCache", section));
2550
2551    rxDescCache.unserialize(cp, csprintf("%s.RxDescCache", section));
2552}
2553
2554IGbE *
2555IGbEParams::create()
2556{
2557    return new IGbE(this);
2558}
2559