i8254xGBe.cc (11701:5e7599457b97) i8254xGBe.cc (11719:e832056deaed)
1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31/* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38#include "dev/net/i8254xGBe.hh"
39
40/*
41 * @todo really there are multiple dma engines.. we should implement them.
42 */
43
44#include <algorithm>
45#include <memory>
46
47#include "base/inet.hh"
48#include "base/trace.hh"
49#include "debug/Drain.hh"
50#include "debug/EthernetAll.hh"
51#include "mem/packet.hh"
52#include "mem/packet_access.hh"
53#include "params/IGbE.hh"
54#include "sim/stats.hh"
55#include "sim/system.hh"
56
57using namespace iGbReg;
58using namespace Net;
59
60IGbE::IGbE(const Params *p)
61 : EtherDevice(p), etherInt(NULL), cpa(NULL),
62 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
63 txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
64 fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
65 fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
66 rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
67 rdtrEvent(this), radvEvent(this),
68 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
69 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
70 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
71 lastInterrupt(0)
72{
73 etherInt = new IGbEInt(name() + ".int", this);
74
75 // Initialized internal registers per Intel documentation
76 // All registers intialized to 0 by per register constructor
77 regs.ctrl.fd(1);
78 regs.ctrl.lrst(1);
79 regs.ctrl.speed(2);
80 regs.ctrl.frcspd(1);
81 regs.sts.speed(3); // Say we're 1000Mbps
82 regs.sts.fd(1); // full duplex
83 regs.sts.lu(1); // link up
84 regs.eecd.fwe(1);
85 regs.eecd.ee_type(1);
86 regs.imr = 0;
87 regs.iam = 0;
88 regs.rxdctl.gran(1);
89 regs.rxdctl.wthresh(1);
90 regs.fcrth(1);
91 regs.tdwba = 0;
92 regs.rlpml = 0;
93 regs.sw_fw_sync = 0;
94
95 regs.pba.rxa(0x30);
96 regs.pba.txa(0x10);
97
98 eeOpBits = 0;
99 eeAddrBits = 0;
100 eeDataBits = 0;
101 eeOpcode = 0;
102
103 // clear all 64 16 bit words of the eeprom
104 memset(&flash, 0, EEPROM_SIZE*2);
105
106 // Set the MAC address
107 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
108 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
109 flash[x] = htobe(flash[x]);
110
111 uint16_t csum = 0;
112 for (int x = 0; x < EEPROM_SIZE; x++)
113 csum += htobe(flash[x]);
114
115
116 // Magic happy checksum value
117 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
118
119 // Store the MAC address as queue ID
120 macAddr = p->hardware_address;
121
122 rxFifo.clear();
123 txFifo.clear();
124}
125
126IGbE::~IGbE()
127{
128 delete etherInt;
129}
130
131void
132IGbE::init()
133{
134 cpa = CPA::cpa();
135 PciDevice::init();
136}
137
138EtherInt*
139IGbE::getEthPort(const std::string &if_name, int idx)
140{
141
142 if (if_name == "interface") {
143 if (etherInt->getPeer())
144 panic("Port already connected to\n");
145 return etherInt;
146 }
147 return NULL;
148}
149
150Tick
151IGbE::writeConfig(PacketPtr pkt)
152{
153 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
154 if (offset < PCI_DEVICE_SPECIFIC)
155 PciDevice::writeConfig(pkt);
156 else
157 panic("Device specific PCI config space not implemented.\n");
158
159 //
160 // Some work may need to be done here based for the pci COMMAND bits.
161 //
162
163 return configDelay;
164}
165
166// Handy macro for range-testing register access addresses
167#define IN_RANGE(val, base, len) (val >= base && val < (base + len))
168
169Tick
170IGbE::read(PacketPtr pkt)
171{
172 int bar;
173 Addr daddr;
174
175 if (!getBAR(pkt->getAddr(), bar, daddr))
176 panic("Invalid PCI memory access to unmapped memory.\n");
177
178 // Only Memory register BAR is allowed
179 assert(bar == 0);
180
181 // Only 32bit accesses allowed
182 assert(pkt->getSize() == 4);
183
184 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
185
186 //
187 // Handle read of register here
188 //
189
190
191 switch (daddr) {
192 case REG_CTRL:
193 pkt->set<uint32_t>(regs.ctrl());
194 break;
195 case REG_STATUS:
196 pkt->set<uint32_t>(regs.sts());
197 break;
198 case REG_EECD:
199 pkt->set<uint32_t>(regs.eecd());
200 break;
201 case REG_EERD:
202 pkt->set<uint32_t>(regs.eerd());
203 break;
204 case REG_CTRL_EXT:
205 pkt->set<uint32_t>(regs.ctrl_ext());
206 break;
207 case REG_MDIC:
208 pkt->set<uint32_t>(regs.mdic());
209 break;
210 case REG_ICR:
211 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
212 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
213 pkt->set<uint32_t>(regs.icr());
214 if (regs.icr.int_assert() || regs.imr == 0) {
215 regs.icr = regs.icr() & ~mask(30);
216 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
217 }
218 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
219 regs.imr &= ~regs.iam;
220 chkInterrupt();
221 break;
222 case REG_EICR:
223 // This is only useful for MSI, but the driver reads it every time
224 // Just don't do anything
225 pkt->set<uint32_t>(0);
226 break;
227 case REG_ITR:
228 pkt->set<uint32_t>(regs.itr());
229 break;
230 case REG_RCTL:
231 pkt->set<uint32_t>(regs.rctl());
232 break;
233 case REG_FCTTV:
234 pkt->set<uint32_t>(regs.fcttv());
235 break;
236 case REG_TCTL:
237 pkt->set<uint32_t>(regs.tctl());
238 break;
239 case REG_PBA:
240 pkt->set<uint32_t>(regs.pba());
241 break;
242 case REG_WUC:
243 case REG_LEDCTL:
244 pkt->set<uint32_t>(0); // We don't care, so just return 0
245 break;
246 case REG_FCRTL:
247 pkt->set<uint32_t>(regs.fcrtl());
248 break;
249 case REG_FCRTH:
250 pkt->set<uint32_t>(regs.fcrth());
251 break;
252 case REG_RDBAL:
253 pkt->set<uint32_t>(regs.rdba.rdbal());
254 break;
255 case REG_RDBAH:
256 pkt->set<uint32_t>(regs.rdba.rdbah());
257 break;
258 case REG_RDLEN:
259 pkt->set<uint32_t>(regs.rdlen());
260 break;
261 case REG_SRRCTL:
262 pkt->set<uint32_t>(regs.srrctl());
263 break;
264 case REG_RDH:
265 pkt->set<uint32_t>(regs.rdh());
266 break;
267 case REG_RDT:
268 pkt->set<uint32_t>(regs.rdt());
269 break;
270 case REG_RDTR:
271 pkt->set<uint32_t>(regs.rdtr());
272 if (regs.rdtr.fpd()) {
273 rxDescCache.writeback(0);
274 DPRINTF(EthernetIntr,
275 "Posting interrupt because of RDTR.FPD write\n");
276 postInterrupt(IT_RXT);
277 regs.rdtr.fpd(0);
278 }
279 break;
280 case REG_RXDCTL:
281 pkt->set<uint32_t>(regs.rxdctl());
282 break;
283 case REG_RADV:
284 pkt->set<uint32_t>(regs.radv());
285 break;
286 case REG_TDBAL:
287 pkt->set<uint32_t>(regs.tdba.tdbal());
288 break;
289 case REG_TDBAH:
290 pkt->set<uint32_t>(regs.tdba.tdbah());
291 break;
292 case REG_TDLEN:
293 pkt->set<uint32_t>(regs.tdlen());
294 break;
295 case REG_TDH:
296 pkt->set<uint32_t>(regs.tdh());
297 break;
298 case REG_TXDCA_CTL:
299 pkt->set<uint32_t>(regs.txdca_ctl());
300 break;
301 case REG_TDT:
302 pkt->set<uint32_t>(regs.tdt());
303 break;
304 case REG_TIDV:
305 pkt->set<uint32_t>(regs.tidv());
306 break;
307 case REG_TXDCTL:
308 pkt->set<uint32_t>(regs.txdctl());
309 break;
310 case REG_TADV:
311 pkt->set<uint32_t>(regs.tadv());
312 break;
313 case REG_TDWBAL:
314 pkt->set<uint32_t>(regs.tdwba & mask(32));
315 break;
316 case REG_TDWBAH:
317 pkt->set<uint32_t>(regs.tdwba >> 32);
318 break;
319 case REG_RXCSUM:
320 pkt->set<uint32_t>(regs.rxcsum());
321 break;
322 case REG_RLPML:
323 pkt->set<uint32_t>(regs.rlpml);
324 break;
325 case REG_RFCTL:
326 pkt->set<uint32_t>(regs.rfctl());
327 break;
328 case REG_MANC:
329 pkt->set<uint32_t>(regs.manc());
330 break;
331 case REG_SWSM:
332 pkt->set<uint32_t>(regs.swsm());
333 regs.swsm.smbi(1);
334 break;
335 case REG_FWSM:
336 pkt->set<uint32_t>(regs.fwsm());
337 break;
338 case REG_SWFWSYNC:
339 pkt->set<uint32_t>(regs.sw_fw_sync);
340 break;
341 default:
342 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
343 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
344 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
345 !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
346 panic("Read request to unknown register number: %#x\n", daddr);
347 else
348 pkt->set<uint32_t>(0);
349 };
350
351 pkt->makeAtomicResponse();
352 return pioDelay;
353}
354
355Tick
356IGbE::write(PacketPtr pkt)
357{
358 int bar;
359 Addr daddr;
360
361
362 if (!getBAR(pkt->getAddr(), bar, daddr))
363 panic("Invalid PCI memory access to unmapped memory.\n");
364
365 // Only Memory register BAR is allowed
366 assert(bar == 0);
367
368 // Only 32bit accesses allowed
369 assert(pkt->getSize() == sizeof(uint32_t));
370
371 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
372 daddr, pkt->get<uint32_t>());
373
374 //
375 // Handle write of register here
376 //
377 uint32_t val = pkt->get<uint32_t>();
378
379 Regs::RCTL oldrctl;
380 Regs::TCTL oldtctl;
381
382 switch (daddr) {
383 case REG_CTRL:
384 regs.ctrl = val;
385 if (regs.ctrl.tfce())
386 warn("TX Flow control enabled, should implement\n");
387 if (regs.ctrl.rfce())
388 warn("RX Flow control enabled, should implement\n");
389 break;
390 case REG_CTRL_EXT:
391 regs.ctrl_ext = val;
392 break;
393 case REG_STATUS:
394 regs.sts = val;
395 break;
396 case REG_EECD:
397 int oldClk;
398 oldClk = regs.eecd.sk();
399 regs.eecd = val;
400 // See if this is a eeprom access and emulate accordingly
401 if (!oldClk && regs.eecd.sk()) {
402 if (eeOpBits < 8) {
403 eeOpcode = eeOpcode << 1 | regs.eecd.din();
404 eeOpBits++;
405 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
406 eeAddr = eeAddr << 1 | regs.eecd.din();
407 eeAddrBits++;
408 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
409 assert(eeAddr>>1 < EEPROM_SIZE);
410 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
411 flash[eeAddr>>1] >> eeDataBits & 0x1,
412 flash[eeAddr>>1]);
413 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
414 eeDataBits++;
415 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
416 regs.eecd.dout(0);
417 eeDataBits++;
418 } else
419 panic("What's going on with eeprom interface? opcode:"
420 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
421 (uint32_t)eeOpBits, (uint32_t)eeAddr,
422 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
423
424 // Reset everything for the next command
425 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
426 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
427 eeOpBits = 0;
428 eeAddrBits = 0;
429 eeDataBits = 0;
430 eeOpcode = 0;
431 eeAddr = 0;
432 }
433
434 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
435 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
436 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
437 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
438 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
439 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
440 (uint32_t)eeOpBits);
441
442
443 }
444 // If driver requests eeprom access, immediately give it to it
445 regs.eecd.ee_gnt(regs.eecd.ee_req());
446 break;
447 case REG_EERD:
448 regs.eerd = val;
449 if (regs.eerd.start()) {
450 regs.eerd.done(1);
451 assert(regs.eerd.addr() < EEPROM_SIZE);
452 regs.eerd.data(flash[regs.eerd.addr()]);
453 regs.eerd.start(0);
454 DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
455 regs.eerd.addr(), regs.eerd.data());
456 }
457 break;
458 case REG_MDIC:
459 regs.mdic = val;
460 if (regs.mdic.i())
461 panic("No support for interrupt on mdic complete\n");
462 if (regs.mdic.phyadd() != 1)
463 panic("No support for reading anything but phy\n");
464 DPRINTF(Ethernet, "%s phy address %x\n",
465 regs.mdic.op() == 1 ? "Writing" : "Reading",
466 regs.mdic.regadd());
467 switch (regs.mdic.regadd()) {
468 case PHY_PSTATUS:
469 regs.mdic.data(0x796D); // link up
470 break;
471 case PHY_PID:
472 regs.mdic.data(params()->phy_pid);
473 break;
474 case PHY_EPID:
475 regs.mdic.data(params()->phy_epid);
476 break;
477 case PHY_GSTATUS:
478 regs.mdic.data(0x7C00);
479 break;
480 case PHY_EPSTATUS:
481 regs.mdic.data(0x3000);
482 break;
483 case PHY_AGC:
484 regs.mdic.data(0x180); // some random length
485 break;
486 default:
487 regs.mdic.data(0);
488 }
489 regs.mdic.r(1);
490 break;
491 case REG_ICR:
492 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
493 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
494 if (regs.ctrl_ext.iame())
495 regs.imr &= ~regs.iam;
496 regs.icr = ~bits(val,30,0) & regs.icr();
497 chkInterrupt();
498 break;
499 case REG_ITR:
500 regs.itr = val;
501 break;
502 case REG_ICS:
503 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
504 postInterrupt((IntTypes)val);
505 break;
506 case REG_IMS:
507 regs.imr |= val;
508 chkInterrupt();
509 break;
510 case REG_IMC:
511 regs.imr &= ~val;
512 chkInterrupt();
513 break;
514 case REG_IAM:
515 regs.iam = val;
516 break;
517 case REG_RCTL:
518 oldrctl = regs.rctl;
519 regs.rctl = val;
520 if (regs.rctl.rst()) {
521 rxDescCache.reset();
522 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
523 rxFifo.clear();
524 regs.rctl.rst(0);
525 }
526 if (regs.rctl.en())
527 rxTick = true;
528 restartClock();
529 break;
530 case REG_FCTTV:
531 regs.fcttv = val;
532 break;
533 case REG_TCTL:
534 regs.tctl = val;
535 oldtctl = regs.tctl;
536 regs.tctl = val;
537 if (regs.tctl.en())
538 txTick = true;
539 restartClock();
540 if (regs.tctl.en() && !oldtctl.en()) {
541 txDescCache.reset();
542 }
543 break;
544 case REG_PBA:
545 regs.pba.rxa(val);
546 regs.pba.txa(64 - regs.pba.rxa());
547 break;
548 case REG_WUC:
549 case REG_LEDCTL:
550 case REG_FCAL:
551 case REG_FCAH:
552 case REG_FCT:
553 case REG_VET:
554 case REG_AIFS:
555 case REG_TIPG:
556 ; // We don't care, so don't store anything
557 break;
558 case REG_IVAR0:
559 warn("Writing to IVAR0, ignoring...\n");
560 break;
561 case REG_FCRTL:
562 regs.fcrtl = val;
563 break;
564 case REG_FCRTH:
565 regs.fcrth = val;
566 break;
567 case REG_RDBAL:
568 regs.rdba.rdbal( val & ~mask(4));
569 rxDescCache.areaChanged();
570 break;
571 case REG_RDBAH:
572 regs.rdba.rdbah(val);
573 rxDescCache.areaChanged();
574 break;
575 case REG_RDLEN:
576 regs.rdlen = val & ~mask(7);
577 rxDescCache.areaChanged();
578 break;
579 case REG_SRRCTL:
580 regs.srrctl = val;
581 break;
582 case REG_RDH:
583 regs.rdh = val;
584 rxDescCache.areaChanged();
585 break;
586 case REG_RDT:
587 regs.rdt = val;
588 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
589 if (drainState() == DrainState::Running) {
590 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
591 rxDescCache.fetchDescriptors();
592 } else {
593 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
594 }
595 break;
596 case REG_RDTR:
597 regs.rdtr = val;
598 break;
599 case REG_RADV:
600 regs.radv = val;
601 break;
602 case REG_RXDCTL:
603 regs.rxdctl = val;
604 break;
605 case REG_TDBAL:
606 regs.tdba.tdbal( val & ~mask(4));
607 txDescCache.areaChanged();
608 break;
609 case REG_TDBAH:
610 regs.tdba.tdbah(val);
611 txDescCache.areaChanged();
612 break;
613 case REG_TDLEN:
614 regs.tdlen = val & ~mask(7);
615 txDescCache.areaChanged();
616 break;
617 case REG_TDH:
618 regs.tdh = val;
619 txDescCache.areaChanged();
620 break;
621 case REG_TXDCA_CTL:
622 regs.txdca_ctl = val;
623 if (regs.txdca_ctl.enabled())
624 panic("No support for DCA\n");
625 break;
626 case REG_TDT:
627 regs.tdt = val;
628 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
629 if (drainState() == DrainState::Running) {
630 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
631 txDescCache.fetchDescriptors();
632 } else {
633 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
634 }
635 break;
636 case REG_TIDV:
637 regs.tidv = val;
638 break;
639 case REG_TXDCTL:
640 regs.txdctl = val;
641 break;
642 case REG_TADV:
643 regs.tadv = val;
644 break;
645 case REG_TDWBAL:
646 regs.tdwba &= ~mask(32);
647 regs.tdwba |= val;
648 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
649 regs.tdwba & mask(1));
650 break;
651 case REG_TDWBAH:
652 regs.tdwba &= mask(32);
653 regs.tdwba |= (uint64_t)val << 32;
654 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
655 regs.tdwba & mask(1));
656 break;
657 case REG_RXCSUM:
658 regs.rxcsum = val;
659 break;
660 case REG_RLPML:
661 regs.rlpml = val;
662 break;
663 case REG_RFCTL:
664 regs.rfctl = val;
665 if (regs.rfctl.exsten())
666 panic("Extended RX descriptors not implemented\n");
667 break;
668 case REG_MANC:
669 regs.manc = val;
670 break;
671 case REG_SWSM:
672 regs.swsm = val;
673 if (regs.fwsm.eep_fw_semaphore())
674 regs.swsm.swesmbi(0);
675 break;
676 case REG_SWFWSYNC:
677 regs.sw_fw_sync = val;
678 break;
679 default:
680 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
681 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
682 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
683 panic("Write request to unknown register number: %#x\n", daddr);
684 };
685
686 pkt->makeAtomicResponse();
687 return pioDelay;
688}
689
690void
691IGbE::postInterrupt(IntTypes t, bool now)
692{
693 assert(t);
694
695 // Interrupt is already pending
696 if (t & regs.icr() && !now)
697 return;
698
699 regs.icr = regs.icr() | t;
700
701 Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
702 DPRINTF(EthernetIntr,
703 "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
704 curTick(), regs.itr.interval(), itr_interval);
705
706 if (regs.itr.interval() == 0 || now ||
707 lastInterrupt + itr_interval <= curTick()) {
708 if (interEvent.scheduled()) {
709 deschedule(interEvent);
710 }
711 cpuPostInt();
712 } else {
713 Tick int_time = lastInterrupt + itr_interval;
714 assert(int_time > 0);
715 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
716 int_time);
717 if (!interEvent.scheduled()) {
718 schedule(interEvent, int_time);
719 }
720 }
721}
722
723void
724IGbE::delayIntEvent()
725{
726 cpuPostInt();
727}
728
729
730void
731IGbE::cpuPostInt()
732{
733
734 postedInterrupts++;
735
736 if (!(regs.icr() & regs.imr)) {
737 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
738 return;
739 }
740
741 DPRINTF(Ethernet, "Posting Interrupt\n");
742
743
744 if (interEvent.scheduled()) {
745 deschedule(interEvent);
746 }
747
748 if (rdtrEvent.scheduled()) {
749 regs.icr.rxt0(1);
750 deschedule(rdtrEvent);
751 }
752 if (radvEvent.scheduled()) {
753 regs.icr.rxt0(1);
754 deschedule(radvEvent);
755 }
756 if (tadvEvent.scheduled()) {
757 regs.icr.txdw(1);
758 deschedule(tadvEvent);
759 }
760 if (tidvEvent.scheduled()) {
761 regs.icr.txdw(1);
762 deschedule(tidvEvent);
763 }
764
765 regs.icr.int_assert(1);
766 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
767 regs.icr());
768
769 intrPost();
770
771 lastInterrupt = curTick();
772}
773
774void
775IGbE::cpuClearInt()
776{
777 if (regs.icr.int_assert()) {
778 regs.icr.int_assert(0);
779 DPRINTF(EthernetIntr,
780 "EINT: Clearing interrupt to CPU now. Vector %#x\n",
781 regs.icr());
782 intrClear();
783 }
784}
785
786void
787IGbE::chkInterrupt()
788{
789 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
790 regs.imr);
791 // Check if we need to clear the cpu interrupt
792 if (!(regs.icr() & regs.imr)) {
793 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
794 if (interEvent.scheduled())
795 deschedule(interEvent);
796 if (regs.icr.int_assert())
797 cpuClearInt();
798 }
799 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
800 regs.itr(), regs.itr.interval());
801
802 if (regs.icr() & regs.imr) {
803 if (regs.itr.interval() == 0) {
804 cpuPostInt();
805 } else {
806 DPRINTF(Ethernet,
807 "Possibly scheduling interrupt because of imr write\n");
808 if (!interEvent.scheduled()) {
809 Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
810 DPRINTF(Ethernet, "Scheduling for %d\n", t);
811 schedule(interEvent, t);
812 }
813 }
814 }
815}
816
817
818///////////////////////////// IGbE::DescCache //////////////////////////////
819
820template<class T>
821IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
822 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
823 wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL),
824 wbDelayEvent(this), fetchDelayEvent(this), fetchEvent(this),
825 wbEvent(this)
826{
827 fetchBuf = new T[size];
828 wbBuf = new T[size];
829}
830
831template<class T>
832IGbE::DescCache<T>::~DescCache()
833{
834 reset();
835 delete[] fetchBuf;
836 delete[] wbBuf;
837}
838
839template<class T>
840void
841IGbE::DescCache<T>::areaChanged()
842{
843 if (usedCache.size() > 0 || curFetching || wbOut)
844 panic("Descriptor Address, Length or Head changed. Bad\n");
845 reset();
846
847}
848
849template<class T>
850void
851IGbE::DescCache<T>::writeback(Addr aMask)
852{
853 int curHead = descHead();
854 int max_to_wb = usedCache.size();
855
856 // Check if this writeback is less restrictive that the previous
857 // and if so setup another one immediately following it
858 if (wbOut) {
859 if (aMask < wbAlignment) {
860 moreToWb = true;
861 wbAlignment = aMask;
862 }
863 DPRINTF(EthernetDesc,
864 "Writing back already in process, returning\n");
865 return;
866 }
867
868 moreToWb = false;
869 wbAlignment = aMask;
870
871
872 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
873 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
874 curHead, descTail(), descLen(), cachePnt, max_to_wb,
875 descLeft());
876
877 if (max_to_wb + curHead >= descLen()) {
878 max_to_wb = descLen() - curHead;
879 moreToWb = true;
880 // this is by definition aligned correctly
881 } else if (wbAlignment != 0) {
882 // align the wb point to the mask
883 max_to_wb = max_to_wb & ~wbAlignment;
884 }
885
886 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
887
888 if (max_to_wb <= 0) {
889 if (usedCache.size())
890 igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
891 else
892 igbe->anWe(annSmWb, annUsedCacheQ);
893 return;
894 }
895
896 wbOut = max_to_wb;
897
898 assert(!wbDelayEvent.scheduled());
899 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
900 igbe->anBegin(annSmWb, "Prepare Writeback Desc");
901}
902
903template<class T>
904void
905IGbE::DescCache<T>::writeback1()
906{
907 // If we're draining delay issuing this DMA
908 if (igbe->drainState() != DrainState::Running) {
909 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
910 return;
911 }
912
913 DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
914
915 for (int x = 0; x < wbOut; x++) {
916 assert(usedCache.size());
917 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
918 igbe->anPq(annSmWb, annUsedCacheQ);
919 igbe->anPq(annSmWb, annDescQ);
920 igbe->anQ(annSmWb, annUsedDescQ);
921 }
922
923
924 igbe->anBegin(annSmWb, "Writeback Desc DMA");
925
926 assert(wbOut);
927 igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
928 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
929 igbe->wbCompDelay);
930}
931
932template<class T>
933void
934IGbE::DescCache<T>::fetchDescriptors()
935{
936 size_t max_to_fetch;
937
938 if (curFetching) {
939 DPRINTF(EthernetDesc,
940 "Currently fetching %d descriptors, returning\n",
941 curFetching);
942 return;
943 }
944
945 if (descTail() >= cachePnt)
946 max_to_fetch = descTail() - cachePnt;
947 else
948 max_to_fetch = descLen() - cachePnt;
949
950 size_t free_cache = size - usedCache.size() - unusedCache.size();
951
952 if (!max_to_fetch)
953 igbe->anWe(annSmFetch, annUnusedDescQ);
954 else
955 igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
956
957 if (max_to_fetch) {
958 if (!free_cache)
959 igbe->anWf(annSmFetch, annDescQ);
960 else
961 igbe->anRq(annSmFetch, annDescQ, free_cache);
962 }
963
964 max_to_fetch = std::min(max_to_fetch, free_cache);
965
966
967 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
968 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
969 descHead(), descTail(), descLen(), cachePnt,
970 max_to_fetch, descLeft());
971
972 // Nothing to do
973 if (max_to_fetch == 0)
974 return;
975
976 // So we don't have two descriptor fetches going on at once
977 curFetching = max_to_fetch;
978
979 assert(!fetchDelayEvent.scheduled());
980 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
981 igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
982}
983
984template<class T>
985void
986IGbE::DescCache<T>::fetchDescriptors1()
987{
988 // If we're draining delay issuing this DMA
989 if (igbe->drainState() != DrainState::Running) {
990 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
991 return;
992 }
993
994 igbe->anBegin(annSmFetch, "Fetch Desc");
995
996 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
997 descBase() + cachePnt * sizeof(T),
998 pciToDma(descBase() + cachePnt * sizeof(T)),
999 curFetching * sizeof(T));
1000 assert(curFetching);
1001 igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
1002 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
1003 igbe->fetchCompDelay);
1004}
1005
1006template<class T>
1007void
1008IGbE::DescCache<T>::fetchComplete()
1009{
1010 T *newDesc;
1011 igbe->anBegin(annSmFetch, "Fetch Complete");
1012 for (int x = 0; x < curFetching; x++) {
1013 newDesc = new T;
1014 memcpy(newDesc, &fetchBuf[x], sizeof(T));
1015 unusedCache.push_back(newDesc);
1016 igbe->anDq(annSmFetch, annUnusedDescQ);
1017 igbe->anQ(annSmFetch, annUnusedCacheQ);
1018 igbe->anQ(annSmFetch, annDescQ);
1019 }
1020
1021
1022#ifndef NDEBUG
1023 int oldCp = cachePnt;
1024#endif
1025
1026 cachePnt += curFetching;
1027 assert(cachePnt <= descLen());
1028 if (cachePnt == descLen())
1029 cachePnt = 0;
1030
1031 curFetching = 0;
1032
1033 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1034 oldCp, cachePnt);
1035
1036 if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1037 cachePnt)) == 0)
1038 {
1039 igbe->anWe(annSmFetch, annUnusedDescQ);
1040 } else if (!(size - usedCache.size() - unusedCache.size())) {
1041 igbe->anWf(annSmFetch, annDescQ);
1042 } else {
1043 igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1044 }
1045
1046 enableSm();
1047 igbe->checkDrain();
1048}
1049
1050template<class T>
1051void
1052IGbE::DescCache<T>::wbComplete()
1053{
1054
1055 igbe->anBegin(annSmWb, "Finish Writeback");
1056
1057 long curHead = descHead();
1058#ifndef NDEBUG
1059 long oldHead = curHead;
1060#endif
1061
1062 for (int x = 0; x < wbOut; x++) {
1063 assert(usedCache.size());
1064 delete usedCache[0];
1065 usedCache.pop_front();
1066
1067 igbe->anDq(annSmWb, annUsedCacheQ);
1068 igbe->anDq(annSmWb, annDescQ);
1069 }
1070
1071 curHead += wbOut;
1072 wbOut = 0;
1073
1074 if (curHead >= descLen())
1075 curHead -= descLen();
1076
1077 // Update the head
1078 updateHead(curHead);
1079
1080 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1081 oldHead, curHead);
1082
1083 // If we still have more to wb, call wb now
1084 actionAfterWb();
1085 if (moreToWb) {
1086 moreToWb = false;
1087 DPRINTF(EthernetDesc, "Writeback has more todo\n");
1088 writeback(wbAlignment);
1089 }
1090
1091 if (!wbOut) {
1092 igbe->checkDrain();
1093 if (usedCache.size())
1094 igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1095 else
1096 igbe->anWe(annSmWb, annUsedCacheQ);
1097 }
1098 fetchAfterWb();
1099}
1100
1101template<class T>
1102void
1103IGbE::DescCache<T>::reset()
1104{
1105 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1106 for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1107 delete usedCache[x];
1108 for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1109 delete unusedCache[x];
1110
1111 usedCache.clear();
1112 unusedCache.clear();
1113
1114 cachePnt = 0;
1115
1116}
1117
1118template<class T>
1119void
1120IGbE::DescCache<T>::serialize(CheckpointOut &cp) const
1121{
1122 SERIALIZE_SCALAR(cachePnt);
1123 SERIALIZE_SCALAR(curFetching);
1124 SERIALIZE_SCALAR(wbOut);
1125 SERIALIZE_SCALAR(moreToWb);
1126 SERIALIZE_SCALAR(wbAlignment);
1127
1128 typename CacheType::size_type usedCacheSize = usedCache.size();
1129 SERIALIZE_SCALAR(usedCacheSize);
1130 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1131 arrayParamOut(cp, csprintf("usedCache_%d", x),
1132 (uint8_t*)usedCache[x],sizeof(T));
1133 }
1134
1135 typename CacheType::size_type unusedCacheSize = unusedCache.size();
1136 SERIALIZE_SCALAR(unusedCacheSize);
1137 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1138 arrayParamOut(cp, csprintf("unusedCache_%d", x),
1139 (uint8_t*)unusedCache[x],sizeof(T));
1140 }
1141
1142 Tick fetch_delay = 0, wb_delay = 0;
1143 if (fetchDelayEvent.scheduled())
1144 fetch_delay = fetchDelayEvent.when();
1145 SERIALIZE_SCALAR(fetch_delay);
1146 if (wbDelayEvent.scheduled())
1147 wb_delay = wbDelayEvent.when();
1148 SERIALIZE_SCALAR(wb_delay);
1149
1150
1151}
1152
1153template<class T>
1154void
1155IGbE::DescCache<T>::unserialize(CheckpointIn &cp)
1156{
1157 UNSERIALIZE_SCALAR(cachePnt);
1158 UNSERIALIZE_SCALAR(curFetching);
1159 UNSERIALIZE_SCALAR(wbOut);
1160 UNSERIALIZE_SCALAR(moreToWb);
1161 UNSERIALIZE_SCALAR(wbAlignment);
1162
1163 typename CacheType::size_type usedCacheSize;
1164 UNSERIALIZE_SCALAR(usedCacheSize);
1165 T *temp;
1166 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1167 temp = new T;
1168 arrayParamIn(cp, csprintf("usedCache_%d", x),
1169 (uint8_t*)temp,sizeof(T));
1170 usedCache.push_back(temp);
1171 }
1172
1173 typename CacheType::size_type unusedCacheSize;
1174 UNSERIALIZE_SCALAR(unusedCacheSize);
1175 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1176 temp = new T;
1177 arrayParamIn(cp, csprintf("unusedCache_%d", x),
1178 (uint8_t*)temp,sizeof(T));
1179 unusedCache.push_back(temp);
1180 }
1181 Tick fetch_delay = 0, wb_delay = 0;
1182 UNSERIALIZE_SCALAR(fetch_delay);
1183 UNSERIALIZE_SCALAR(wb_delay);
1184 if (fetch_delay)
1185 igbe->schedule(fetchDelayEvent, fetch_delay);
1186 if (wb_delay)
1187 igbe->schedule(wbDelayEvent, wb_delay);
1188
1189
1190}
1191
1192///////////////////////////// IGbE::RxDescCache //////////////////////////////
1193
1194IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1195 : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1196 pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
1197
1198{
1199 annSmFetch = "RX Desc Fetch";
1200 annSmWb = "RX Desc Writeback";
1201 annUnusedDescQ = "RX Unused Descriptors";
1202 annUnusedCacheQ = "RX Unused Descriptor Cache";
1203 annUsedCacheQ = "RX Used Descriptor Cache";
1204 annUsedDescQ = "RX Used Descriptors";
1205 annDescQ = "RX Descriptors";
1206}
1207
1208void
1209IGbE::RxDescCache::pktSplitDone()
1210{
1211 splitCount++;
1212 DPRINTF(EthernetDesc,
1213 "Part of split packet done: splitcount now %d\n", splitCount);
1214 assert(splitCount <= 2);
1215 if (splitCount != 2)
1216 return;
1217 splitCount = 0;
1218 DPRINTF(EthernetDesc,
1219 "Part of split packet done: calling pktComplete()\n");
1220 pktComplete();
1221}
1222
1223int
1224IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
1225{
1226 assert(unusedCache.size());
1227 //if (!unusedCache.size())
1228 // return false;
1229
1230 pktPtr = packet;
1231 pktDone = false;
1232 unsigned buf_len, hdr_len;
1233
1234 RxDesc *desc = unusedCache.front();
1235 switch (igbe->regs.srrctl.desctype()) {
1236 case RXDT_LEGACY:
1237 assert(pkt_offset == 0);
1238 bytesCopied = packet->length;
1239 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1240 packet->length, igbe->regs.rctl.descSize());
1241 assert(packet->length < igbe->regs.rctl.descSize());
1242 igbe->dmaWrite(pciToDma(desc->legacy.buf),
1243 packet->length, &pktEvent, packet->data,
1244 igbe->rxWriteDelay);
1245 break;
1246 case RXDT_ADV_ONEBUF:
1247 assert(pkt_offset == 0);
1248 bytesCopied = packet->length;
1249 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1250 igbe->regs.rctl.descSize();
1251 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1252 packet->length, igbe->regs.srrctl(), buf_len);
1253 assert(packet->length < buf_len);
1254 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1255 packet->length, &pktEvent, packet->data,
1256 igbe->rxWriteDelay);
1257 desc->adv_wb.header_len = htole(0);
1258 desc->adv_wb.sph = htole(0);
1259 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1260 break;
1261 case RXDT_ADV_SPLIT_A:
1262 int split_point;
1263
1264 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1265 igbe->regs.rctl.descSize();
1266 hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1267 DPRINTF(EthernetDesc,
1268 "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1269 "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1270 igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1271 igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1272 desc->adv_read.pkt, buf_len);
1273
1274 split_point = hsplit(pktPtr);
1275
1276 if (packet->length <= hdr_len) {
1277 bytesCopied = packet->length;
1278 assert(pkt_offset == 0);
1279 DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1280 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1281 packet->length, &pktEvent, packet->data,
1282 igbe->rxWriteDelay);
1283 desc->adv_wb.header_len = htole((uint16_t)packet->length);
1284 desc->adv_wb.sph = htole(0);
1285 desc->adv_wb.pkt_len = htole(0);
1286 } else if (split_point) {
1287 if (pkt_offset) {
1288 // we are only copying some data, header/data has already been
1289 // copied
1290 int max_to_copy =
1291 std::min(packet->length - pkt_offset, buf_len);
1292 bytesCopied += max_to_copy;
1293 DPRINTF(EthernetDesc,
1294 "Hdr split: Continuing data buffer copy\n");
1295 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1296 max_to_copy, &pktEvent,
1297 packet->data + pkt_offset, igbe->rxWriteDelay);
1298 desc->adv_wb.header_len = htole(0);
1299 desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1300 desc->adv_wb.sph = htole(0);
1301 } else {
1302 int max_to_copy =
1303 std::min(packet->length - split_point, buf_len);
1304 bytesCopied += max_to_copy + split_point;
1305
1306 DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1307 split_point);
1308 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1309 split_point, &pktHdrEvent,
1310 packet->data, igbe->rxWriteDelay);
1311 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1312 max_to_copy, &pktDataEvent,
1313 packet->data + split_point, igbe->rxWriteDelay);
1314 desc->adv_wb.header_len = htole(split_point);
1315 desc->adv_wb.sph = 1;
1316 desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1317 }
1318 } else {
1319 panic("Header split not fitting within header buffer or "
1320 "undecodable packet not fitting in header unsupported\n");
1321 }
1322 break;
1323 default:
1324 panic("Unimplemnted RX receive buffer type: %d\n",
1325 igbe->regs.srrctl.desctype());
1326 }
1327 return bytesCopied;
1328
1329}
1330
1331void
1332IGbE::RxDescCache::pktComplete()
1333{
1334 assert(unusedCache.size());
1335 RxDesc *desc;
1336 desc = unusedCache.front();
1337
1338 igbe->anBegin("RXS", "Update Desc");
1339
1340 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1341 DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1342 "stripcrc offset: %d value written: %d %d\n",
1343 pktPtr->length, bytesCopied, crcfixup,
1344 htole((uint16_t)(pktPtr->length + crcfixup)),
1345 (uint16_t)(pktPtr->length + crcfixup));
1346
1347 // no support for anything but starting at 0
1348 assert(igbe->regs.rxcsum.pcss() == 0);
1349
1350 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1351
1352 uint16_t status = RXDS_DD;
1353 uint8_t err = 0;
1354 uint16_t ext_err = 0;
1355 uint16_t csum = 0;
1356 uint16_t ptype = 0;
1357 uint16_t ip_id = 0;
1358
1359 assert(bytesCopied <= pktPtr->length);
1360 if (bytesCopied == pktPtr->length)
1361 status |= RXDS_EOP;
1362
1363 IpPtr ip(pktPtr);
1364
1365 if (ip) {
1366 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
1367 ptype |= RXDP_IPV4;
1368 ip_id = ip->id();
1369
1370 if (igbe->regs.rxcsum.ipofld()) {
1371 DPRINTF(EthernetDesc, "Checking IP checksum\n");
1372 status |= RXDS_IPCS;
1373 csum = htole(cksum(ip));
1374 igbe->rxIpChecksums++;
1375 if (cksum(ip) != 0) {
1376 err |= RXDE_IPE;
1377 ext_err |= RXDEE_IPE;
1378 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1379 }
1380 }
1381 TcpPtr tcp(ip);
1382 if (tcp && igbe->regs.rxcsum.tuofld()) {
1383 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1384 status |= RXDS_TCPCS;
1385 ptype |= RXDP_TCP;
1386 csum = htole(cksum(tcp));
1387 igbe->rxTcpChecksums++;
1388 if (cksum(tcp) != 0) {
1389 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1390 err |= RXDE_TCPE;
1391 ext_err |= RXDEE_TCPE;
1392 }
1393 }
1394
1395 UdpPtr udp(ip);
1396 if (udp && igbe->regs.rxcsum.tuofld()) {
1397 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1398 status |= RXDS_UDPCS;
1399 ptype |= RXDP_UDP;
1400 csum = htole(cksum(udp));
1401 igbe->rxUdpChecksums++;
1402 if (cksum(udp) != 0) {
1403 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1404 ext_err |= RXDEE_TCPE;
1405 err |= RXDE_TCPE;
1406 }
1407 }
1408 } else { // if ip
1409 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1410 }
1411
1412 switch (igbe->regs.srrctl.desctype()) {
1413 case RXDT_LEGACY:
1414 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1415 desc->legacy.status = htole(status);
1416 desc->legacy.errors = htole(err);
1417 // No vlan support at this point... just set it to 0
1418 desc->legacy.vlan = 0;
1419 break;
1420 case RXDT_ADV_SPLIT_A:
1421 case RXDT_ADV_ONEBUF:
1422 desc->adv_wb.rss_type = htole(0);
1423 desc->adv_wb.pkt_type = htole(ptype);
1424 if (igbe->regs.rxcsum.pcsd()) {
1425 // no rss support right now
1426 desc->adv_wb.rss_hash = htole(0);
1427 } else {
1428 desc->adv_wb.id = htole(ip_id);
1429 desc->adv_wb.csum = htole(csum);
1430 }
1431 desc->adv_wb.status = htole(status);
1432 desc->adv_wb.errors = htole(ext_err);
1433 // no vlan support
1434 desc->adv_wb.vlan_tag = htole(0);
1435 break;
1436 default:
1437 panic("Unimplemnted RX receive buffer type %d\n",
1438 igbe->regs.srrctl.desctype());
1439 }
1440
1441 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1442 desc->adv_read.pkt, desc->adv_read.hdr);
1443
1444 if (bytesCopied == pktPtr->length) {
1445 DPRINTF(EthernetDesc,
1446 "Packet completely written to descriptor buffers\n");
1447 // Deal with the rx timer interrupts
1448 if (igbe->regs.rdtr.delay()) {
1449 Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1450 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1451 igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1452 }
1453
1454 if (igbe->regs.radv.idv()) {
1455 Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1456 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1457 if (!igbe->radvEvent.scheduled()) {
1458 igbe->schedule(igbe->radvEvent, curTick() + delay);
1459 }
1460 }
1461
1462 // if neither radv or rdtr, maybe itr is set...
1463 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1464 DPRINTF(EthernetSM,
1465 "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1466 igbe->postInterrupt(IT_RXT);
1467 }
1468
1469 // If the packet is small enough, interrupt appropriately
1470 // I wonder if this is delayed or not?!
1471 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1472 DPRINTF(EthernetSM,
1473 "RXS: Posting IT_SRPD beacuse small packet received\n");
1474 igbe->postInterrupt(IT_SRPD);
1475 }
1476 bytesCopied = 0;
1477 }
1478
1479 pktPtr = NULL;
1480 igbe->checkDrain();
1481 enableSm();
1482 pktDone = true;
1483
1484 igbe->anBegin("RXS", "Done Updating Desc");
1485 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1486 igbe->anDq("RXS", annUnusedCacheQ);
1487 unusedCache.pop_front();
1488 igbe->anQ("RXS", annUsedCacheQ);
1489 usedCache.push_back(desc);
1490}
1491
1492void
1493IGbE::RxDescCache::enableSm()
1494{
1495 if (igbe->drainState() != DrainState::Draining) {
1496 igbe->rxTick = true;
1497 igbe->restartClock();
1498 }
1499}
1500
1501bool
1502IGbE::RxDescCache::packetDone()
1503{
1504 if (pktDone) {
1505 pktDone = false;
1506 return true;
1507 }
1508 return false;
1509}
1510
1511bool
1512IGbE::RxDescCache::hasOutstandingEvents()
1513{
1514 return pktEvent.scheduled() || wbEvent.scheduled() ||
1515 fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1516 pktDataEvent.scheduled();
1517
1518}
1519
1520void
1521IGbE::RxDescCache::serialize(CheckpointOut &cp) const
1522{
1523 DescCache<RxDesc>::serialize(cp);
1524 SERIALIZE_SCALAR(pktDone);
1525 SERIALIZE_SCALAR(splitCount);
1526 SERIALIZE_SCALAR(bytesCopied);
1527}
1528
1529void
1530IGbE::RxDescCache::unserialize(CheckpointIn &cp)
1531{
1532 DescCache<RxDesc>::unserialize(cp);
1533 UNSERIALIZE_SCALAR(pktDone);
1534 UNSERIALIZE_SCALAR(splitCount);
1535 UNSERIALIZE_SCALAR(bytesCopied);
1536}
1537
1538
1539///////////////////////////// IGbE::TxDescCache //////////////////////////////
1540
1541IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1542 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1543 pktWaiting(false), pktMultiDesc(false),
1544 completionAddress(0), completionEnabled(false),
1545 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1546 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1547 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1548 pktEvent(this), headerEvent(this), nullEvent(this)
1549{
1550 annSmFetch = "TX Desc Fetch";
1551 annSmWb = "TX Desc Writeback";
1552 annUnusedDescQ = "TX Unused Descriptors";
1553 annUnusedCacheQ = "TX Unused Descriptor Cache";
1554 annUsedCacheQ = "TX Used Descriptor Cache";
1555 annUsedDescQ = "TX Used Descriptors";
1556 annDescQ = "TX Descriptors";
1557}
1558
1559void
1560IGbE::TxDescCache::processContextDesc()
1561{
1562 assert(unusedCache.size());
1563 TxDesc *desc;
1564
1565 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
1566
1567 while (!useTso && unusedCache.size() &&
1568 TxdOp::isContext(unusedCache.front())) {
1569 DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1570
1571 desc = unusedCache.front();
1572 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1573 desc->d1, desc->d2);
1574
1575
1576 // is this going to be a tcp or udp packet?
1577 isTcp = TxdOp::tcp(desc) ? true : false;
1578
1579 // setup all the TSO variables, they'll be ignored if we don't use
1580 // tso for this connection
1581 tsoHeaderLen = TxdOp::hdrlen(desc);
1582 tsoMss = TxdOp::mss(desc);
1583
1584 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1585 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1586 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1587 TxdOp::mss(desc), TxdOp::getLen(desc));
1588 useTso = true;
1589 tsoTotalLen = TxdOp::getLen(desc);
1590 tsoLoadedHeader = false;
1591 tsoDescBytesUsed = 0;
1592 tsoUsedLen = 0;
1593 tsoPrevSeq = 0;
1594 tsoPktHasHeader = false;
1595 tsoPkts = 0;
1596 tsoCopyBytes = 0;
1597 }
1598
1599 TxdOp::setDd(desc);
1600 unusedCache.pop_front();
1601 igbe->anDq("TXS", annUnusedCacheQ);
1602 usedCache.push_back(desc);
1603 igbe->anQ("TXS", annUsedCacheQ);
1604 }
1605
1606 if (!unusedCache.size())
1607 return;
1608
1609 desc = unusedCache.front();
1610 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1611 TxdOp::tse(desc)) {
1612 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1613 "hdrlen: %d mss: %d paylen %d\n",
1614 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1615 useTso = true;
1616 tsoTotalLen = TxdOp::getTsoLen(desc);
1617 tsoLoadedHeader = false;
1618 tsoDescBytesUsed = 0;
1619 tsoUsedLen = 0;
1620 tsoPrevSeq = 0;
1621 tsoPktHasHeader = false;
1622 tsoPkts = 0;
1623 }
1624
1625 if (useTso && !tsoLoadedHeader) {
1626 // we need to fetch a header
1627 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1628 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1629 pktWaiting = true;
1630 assert(tsoHeaderLen <= 256);
1631 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1632 tsoHeaderLen, &headerEvent, tsoHeader, 0);
1633 }
1634}
1635
1636void
1637IGbE::TxDescCache::headerComplete()
1638{
1639 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1640 pktWaiting = false;
1641
1642 assert(unusedCache.size());
1643 TxDesc *desc = unusedCache.front();
1644 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1645 TxdOp::getLen(desc), tsoHeaderLen);
1646
1647 if (TxdOp::getLen(desc) == tsoHeaderLen) {
1648 tsoDescBytesUsed = 0;
1649 tsoLoadedHeader = true;
1650 unusedCache.pop_front();
1651 usedCache.push_back(desc);
1652 } else {
1653 DPRINTF(EthernetDesc, "TSO: header part of larger payload\n");
1654 tsoDescBytesUsed = tsoHeaderLen;
1655 tsoLoadedHeader = true;
1656 }
1657 enableSm();
1658 igbe->checkDrain();
1659}
1660
1661unsigned
1662IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1663{
1664 if (!unusedCache.size())
1665 return 0;
1666
1667 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1668
1669 assert(!useTso || tsoLoadedHeader);
1670 TxDesc *desc = unusedCache.front();
1671
1672 if (useTso) {
1673 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1674 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1675 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1676 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1677 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1678
1679 if (tsoPktHasHeader)
1680 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length,
1681 TxdOp::getLen(desc) - tsoDescBytesUsed);
1682 else
1683 tsoCopyBytes = std::min(tsoMss,
1684 TxdOp::getLen(desc) - tsoDescBytesUsed);
1685 unsigned pkt_size =
1686 tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1687
1688 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1689 "this descLen: %d\n",
1690 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1691 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1692 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1693 return pkt_size;
1694 }
1695
1696 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1697 TxdOp::getLen(unusedCache.front()));
1698 return TxdOp::getLen(desc);
1699}
1700
1701void
1702IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1703{
1704 assert(unusedCache.size());
1705
1706 TxDesc *desc;
1707 desc = unusedCache.front();
1708
1709 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1710 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1711 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1712 TxdOp::getLen(desc));
1713
1714 pktPtr = p;
1715
1716 pktWaiting = true;
1717
1718 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1719
1720 if (useTso) {
1721 assert(tsoLoadedHeader);
1722 if (!tsoPktHasHeader) {
1723 DPRINTF(EthernetDesc,
1724 "Loading TSO header (%d bytes) into start of packet\n",
1725 tsoHeaderLen);
1726 memcpy(p->data, &tsoHeader,tsoHeaderLen);
1727 p->length +=tsoHeaderLen;
1728 tsoPktHasHeader = true;
1729 }
1730 }
1731
1732 if (useTso) {
1733 DPRINTF(EthernetDesc,
1734 "Starting DMA of packet at offset %d length: %d\n",
1735 p->length, tsoCopyBytes);
1736 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1737 + tsoDescBytesUsed,
1738 tsoCopyBytes, &pktEvent, p->data + p->length,
1739 igbe->txReadDelay);
1740 tsoDescBytesUsed += tsoCopyBytes;
1741 assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1742 } else {
1743 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1744 TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1745 igbe->txReadDelay);
1746 }
1747}
1748
1749void
1750IGbE::TxDescCache::pktComplete()
1751{
1752
1753 TxDesc *desc;
1754 assert(unusedCache.size());
1755 assert(pktPtr);
1756
1757 igbe->anBegin("TXS", "Update Desc");
1758
1759 DPRINTF(EthernetDesc, "DMA of packet complete\n");
1760
1761
1762 desc = unusedCache.front();
1763 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1764 TxdOp::getLen(desc));
1765
1766 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1767 desc->d1, desc->d2);
1768
1769 // Set the length of the data in the EtherPacket
1770 if (useTso) {
1771 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1772 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1773 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1774 pktPtr->simLength += tsoCopyBytes;
1775 pktPtr->length += tsoCopyBytes;
1776 tsoUsedLen += tsoCopyBytes;
1777 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1778 tsoDescBytesUsed, tsoCopyBytes);
1779 } else {
1780 pktPtr->simLength += TxdOp::getLen(desc);
1781 pktPtr->length += TxdOp::getLen(desc);
1782 }
1783
1784
1785
1786 if ((!TxdOp::eop(desc) && !useTso) ||
1787 (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1788 tsoTotalLen != tsoUsedLen && useTso)) {
1789 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1790 igbe->anDq("TXS", annUnusedCacheQ);
1791 unusedCache.pop_front();
1792 igbe->anQ("TXS", annUsedCacheQ);
1793 usedCache.push_back(desc);
1794
1795 tsoDescBytesUsed = 0;
1796 pktDone = true;
1797 pktWaiting = false;
1798 pktMultiDesc = true;
1799
1800 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1801 pktPtr->length);
1802 pktPtr = NULL;
1803
1804 enableSm();
1805 igbe->checkDrain();
1806 return;
1807 }
1808
1809
1810 pktMultiDesc = false;
1811 // no support for vlans
1812 assert(!TxdOp::vle(desc));
1813
1814 // we only support single packet descriptors at this point
1815 if (!useTso)
1816 assert(TxdOp::eop(desc));
1817
1818 // set that this packet is done
1819 if (TxdOp::rs(desc))
1820 TxdOp::setDd(desc);
1821
1822 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1823 desc->d1, desc->d2);
1824
1825 if (useTso) {
1826 IpPtr ip(pktPtr);
1827 if (ip) {
1828 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1829 tsoPkts);
1830 ip->id(ip->id() + tsoPkts++);
1831 ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1832
1833 TcpPtr tcp(ip);
1834 if (tcp) {
1835 DPRINTF(EthernetDesc,
1836 "TSO: Modifying TCP header. old seq %d + %d\n",
1837 tcp->seq(), tsoPrevSeq);
1838 tcp->seq(tcp->seq() + tsoPrevSeq);
1839 if (tsoUsedLen != tsoTotalLen)
1840 tcp->flags(tcp->flags() & ~9); // clear fin & psh
1841 }
1842 UdpPtr udp(ip);
1843 if (udp) {
1844 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1845 udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1846 }
1847 }
1848 tsoPrevSeq = tsoUsedLen;
1849 }
1850
1851 if (DTRACE(EthernetDesc)) {
1852 IpPtr ip(pktPtr);
1853 if (ip)
1854 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1855 ip->id());
1856 else
1857 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1858 }
1859
1860 // Checksums are only ofloaded for new descriptor types
1861 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1862 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1863 IpPtr ip(pktPtr);
1864 assert(ip);
1865 if (TxdOp::ixsm(desc)) {
1866 ip->sum(0);
1867 ip->sum(cksum(ip));
1868 igbe->txIpChecksums++;
1869 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1870 }
1871 if (TxdOp::txsm(desc)) {
1872 TcpPtr tcp(ip);
1873 UdpPtr udp(ip);
1874 if (tcp) {
1875 tcp->sum(0);
1876 tcp->sum(cksum(tcp));
1877 igbe->txTcpChecksums++;
1878 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1879 } else if (udp) {
1880 assert(udp);
1881 udp->sum(0);
1882 udp->sum(cksum(udp));
1883 igbe->txUdpChecksums++;
1884 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1885 } else {
1886 panic("Told to checksum, but don't know how\n");
1887 }
1888 }
1889 }
1890
1891 if (TxdOp::ide(desc)) {
1892 // Deal with the rx timer interrupts
1893 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1894 if (igbe->regs.tidv.idv()) {
1895 Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1896 DPRINTF(EthernetDesc, "setting tidv\n");
1897 igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1898 }
1899
1900 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1901 Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1902 DPRINTF(EthernetDesc, "setting tadv\n");
1903 if (!igbe->tadvEvent.scheduled()) {
1904 igbe->schedule(igbe->tadvEvent, curTick() + delay);
1905 }
1906 }
1907 }
1908
1909
1910 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) {
1911 DPRINTF(EthernetDesc, "Descriptor Done\n");
1912 igbe->anDq("TXS", annUnusedCacheQ);
1913 unusedCache.pop_front();
1914 igbe->anQ("TXS", annUsedCacheQ);
1915 usedCache.push_back(desc);
1916 tsoDescBytesUsed = 0;
1917 }
1918
1919 if (useTso && tsoUsedLen == tsoTotalLen)
1920 useTso = false;
1921
1922
1923 DPRINTF(EthernetDesc,
1924 "------Packet of %d bytes ready for transmission-------\n",
1925 pktPtr->length);
1926 pktDone = true;
1927 pktWaiting = false;
1928 pktPtr = NULL;
1929 tsoPktHasHeader = false;
1930
1931 if (igbe->regs.txdctl.wthresh() == 0) {
1932 igbe->anBegin("TXS", "Desc Writeback");
1933 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1934 writeback(0);
1935 } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <=
1936 descInBlock(usedCache.size())) {
1937 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1938 igbe->anBegin("TXS", "Desc Writeback");
1939 writeback((igbe->cacheBlockSize()-1)>>4);
1940 } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) {
1941 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1942 igbe->anBegin("TXS", "Desc Writeback");
1943 writeback((igbe->cacheBlockSize()-1)>>4);
1944 }
1945
1946 enableSm();
1947 igbe->checkDrain();
1948}
1949
1950void
1951IGbE::TxDescCache::actionAfterWb()
1952{
1953 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1954 completionEnabled);
1955 igbe->postInterrupt(iGbReg::IT_TXDW);
1956 if (completionEnabled) {
1957 descEnd = igbe->regs.tdh();
1958 DPRINTF(EthernetDesc,
1959 "Completion writing back value: %d to addr: %#x\n", descEnd,
1960 completionAddress);
1961 igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1962 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1963 }
1964}
1965
1966void
1967IGbE::TxDescCache::serialize(CheckpointOut &cp) const
1968{
1969 DescCache<TxDesc>::serialize(cp);
1970
1971 SERIALIZE_SCALAR(pktDone);
1972 SERIALIZE_SCALAR(isTcp);
1973 SERIALIZE_SCALAR(pktWaiting);
1974 SERIALIZE_SCALAR(pktMultiDesc);
1975
1976 SERIALIZE_SCALAR(useTso);
1977 SERIALIZE_SCALAR(tsoHeaderLen);
1978 SERIALIZE_SCALAR(tsoMss);
1979 SERIALIZE_SCALAR(tsoTotalLen);
1980 SERIALIZE_SCALAR(tsoUsedLen);
1981 SERIALIZE_SCALAR(tsoPrevSeq);;
1982 SERIALIZE_SCALAR(tsoPktPayloadBytes);
1983 SERIALIZE_SCALAR(tsoLoadedHeader);
1984 SERIALIZE_SCALAR(tsoPktHasHeader);
1985 SERIALIZE_ARRAY(tsoHeader, 256);
1986 SERIALIZE_SCALAR(tsoDescBytesUsed);
1987 SERIALIZE_SCALAR(tsoCopyBytes);
1988 SERIALIZE_SCALAR(tsoPkts);
1989
1990 SERIALIZE_SCALAR(completionAddress);
1991 SERIALIZE_SCALAR(completionEnabled);
1992 SERIALIZE_SCALAR(descEnd);
1993}
1994
1995void
1996IGbE::TxDescCache::unserialize(CheckpointIn &cp)
1997{
1998 DescCache<TxDesc>::unserialize(cp);
1999
2000 UNSERIALIZE_SCALAR(pktDone);
2001 UNSERIALIZE_SCALAR(isTcp);
2002 UNSERIALIZE_SCALAR(pktWaiting);
2003 UNSERIALIZE_SCALAR(pktMultiDesc);
2004
2005 UNSERIALIZE_SCALAR(useTso);
2006 UNSERIALIZE_SCALAR(tsoHeaderLen);
2007 UNSERIALIZE_SCALAR(tsoMss);
2008 UNSERIALIZE_SCALAR(tsoTotalLen);
2009 UNSERIALIZE_SCALAR(tsoUsedLen);
2010 UNSERIALIZE_SCALAR(tsoPrevSeq);;
2011 UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
2012 UNSERIALIZE_SCALAR(tsoLoadedHeader);
2013 UNSERIALIZE_SCALAR(tsoPktHasHeader);
2014 UNSERIALIZE_ARRAY(tsoHeader, 256);
2015 UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2016 UNSERIALIZE_SCALAR(tsoCopyBytes);
2017 UNSERIALIZE_SCALAR(tsoPkts);
2018
2019 UNSERIALIZE_SCALAR(completionAddress);
2020 UNSERIALIZE_SCALAR(completionEnabled);
2021 UNSERIALIZE_SCALAR(descEnd);
2022}
2023
2024bool
2025IGbE::TxDescCache::packetAvailable()
2026{
2027 if (pktDone) {
2028 pktDone = false;
2029 return true;
2030 }
2031 return false;
2032}
2033
2034void
2035IGbE::TxDescCache::enableSm()
2036{
2037 if (igbe->drainState() != DrainState::Draining) {
2038 igbe->txTick = true;
2039 igbe->restartClock();
2040 }
2041}
2042
2043bool
2044IGbE::TxDescCache::hasOutstandingEvents()
2045{
2046 return pktEvent.scheduled() || wbEvent.scheduled() ||
2047 fetchEvent.scheduled();
2048}
2049
2050
2051///////////////////////////////////// IGbE /////////////////////////////////
2052
2053void
2054IGbE::restartClock()
2055{
2056 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2057 drainState() == DrainState::Running)
2058 schedule(tickEvent, clockEdge(Cycles(1)));
2059}
2060
2061DrainState
2062IGbE::drain()
2063{
2064 unsigned int count(0);
2065 if (rxDescCache.hasOutstandingEvents() ||
2066 txDescCache.hasOutstandingEvents()) {
2067 count++;
2068 }
2069
2070 txFifoTick = false;
2071 txTick = false;
2072 rxTick = false;
2073
2074 if (tickEvent.scheduled())
2075 deschedule(tickEvent);
2076
2077 if (count) {
2078 DPRINTF(Drain, "IGbE not drained\n");
2079 return DrainState::Draining;
2080 } else
2081 return DrainState::Drained;
2082}
2083
2084void
2085IGbE::drainResume()
2086{
2087 Drainable::drainResume();
2088
2089 txFifoTick = true;
2090 txTick = true;
2091 rxTick = true;
2092
2093 restartClock();
2094 DPRINTF(EthernetSM, "resuming from drain");
2095}
2096
2097void
2098IGbE::checkDrain()
2099{
2100 if (drainState() != DrainState::Draining)
2101 return;
2102
2103 txFifoTick = false;
2104 txTick = false;
2105 rxTick = false;
2106 if (!rxDescCache.hasOutstandingEvents() &&
2107 !txDescCache.hasOutstandingEvents()) {
2108 DPRINTF(Drain, "IGbE done draining, processing drain event\n");
2109 signalDrainDone();
2110 }
2111}
2112
2113void
2114IGbE::txStateMachine()
2115{
2116 if (!regs.tctl.en()) {
2117 txTick = false;
2118 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2119 return;
2120 }
2121
2122 // If we have a packet available and it's length is not 0 (meaning it's not
2123 // a multidescriptor packet) put it in the fifo, otherwise an the next
2124 // iteration we'll get the rest of the data
2125 if (txPacket && txDescCache.packetAvailable()
2126 && !txDescCache.packetMultiDesc() && txPacket->length) {
2127 anQ("TXS", "TX FIFO Q");
2128 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2129#ifndef NDEBUG
2130 bool success =
2131#endif
2132 txFifo.push(txPacket);
2133 txFifoTick = true && drainState() != DrainState::Draining;
2134 assert(success);
2135 txPacket = NULL;
2136 anBegin("TXS", "Desc Writeback");
2137 txDescCache.writeback((cacheBlockSize()-1)>>4);
2138 return;
2139 }
2140
2141 // Only support descriptor granularity
2142 if (regs.txdctl.lwthresh() &&
2143 txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2144 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2145 postInterrupt(IT_TXDLOW);
2146 }
2147
2148 if (!txPacket) {
2149 txPacket = std::make_shared<EthPacketData>(16384);
2150 }
2151
2152 if (!txDescCache.packetWaiting()) {
2153 if (txDescCache.descLeft() == 0) {
2154 postInterrupt(IT_TXQE);
2155 anBegin("TXS", "Desc Writeback");
2156 txDescCache.writeback(0);
2157 anBegin("TXS", "Desc Fetch");
2158 anWe("TXS", txDescCache.annUnusedCacheQ);
2159 txDescCache.fetchDescriptors();
2160 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2161 "writeback stopping ticking and posting TXQE\n");
2162 txTick = false;
2163 return;
2164 }
2165
2166
2167 if (!(txDescCache.descUnused())) {
2168 anBegin("TXS", "Desc Fetch");
2169 txDescCache.fetchDescriptors();
2170 anWe("TXS", txDescCache.annUnusedCacheQ);
2171 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2172 "fetching and stopping ticking\n");
2173 txTick = false;
2174 return;
2175 }
2176 anPq("TXS", txDescCache.annUnusedCacheQ);
2177
2178
2179 txDescCache.processContextDesc();
2180 if (txDescCache.packetWaiting()) {
2181 DPRINTF(EthernetSM,
2182 "TXS: Fetching TSO header, stopping ticking\n");
2183 txTick = false;
2184 return;
2185 }
2186
2187 unsigned size = txDescCache.getPacketSize(txPacket);
2188 if (size > 0 && txFifo.avail() > size) {
2189 anRq("TXS", "TX FIFO Q");
2190 anBegin("TXS", "DMA Packet");
2191 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2192 "beginning DMA of next packet\n", size);
2193 txFifo.reserve(size);
2194 txDescCache.getPacketData(txPacket);
2195 } else if (size == 0) {
2196 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2197 DPRINTF(EthernetSM,
2198 "TXS: No packets to get, writing back used descriptors\n");
2199 anBegin("TXS", "Desc Writeback");
2200 txDescCache.writeback(0);
2201 } else {
2202 anWf("TXS", "TX FIFO Q");
2203 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2204 "available in FIFO\n");
2205 txTick = false;
2206 }
2207
2208
2209 return;
2210 }
2211 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2212 txTick = false;
2213}
2214
2215bool
2216IGbE::ethRxPkt(EthPacketPtr pkt)
2217{
2218 rxBytes += pkt->length;
2219 rxPackets++;
2220
2221 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2222 anBegin("RXQ", "Wire Recv");
2223
2224
2225 if (!regs.rctl.en()) {
2226 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2227 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2228 return true;
2229 }
2230
2231 // restart the state machines if they are stopped
2232 rxTick = true && drainState() != DrainState::Draining;
2233 if ((rxTick || txTick) && !tickEvent.scheduled()) {
2234 DPRINTF(EthernetSM,
2235 "RXS: received packet into fifo, starting ticking\n");
2236 restartClock();
2237 }
2238
2239 if (!rxFifo.push(pkt)) {
2240 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2241 postInterrupt(IT_RXO, true);
2242 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2243 return false;
2244 }
2245
2246 if (CPA::available() && cpa->enabled()) {
2247 assert(sys->numSystemsRunning <= 2);
2248 System *other_sys;
2249 if (sys->systemList[0] == sys)
2250 other_sys = sys->systemList[1];
2251 else
2252 other_sys = sys->systemList[0];
2253
2254 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2255 anQ("RXQ", "RX FIFO Q");
2256 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2257 }
2258
2259 return true;
2260}
2261
2262
2263void
2264IGbE::rxStateMachine()
2265{
2266 if (!regs.rctl.en()) {
2267 rxTick = false;
2268 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2269 return;
2270 }
2271
2272 // If the packet is done check for interrupts/descriptors/etc
2273 if (rxDescCache.packetDone()) {
2274 rxDmaPacket = false;
2275 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2276 int descLeft = rxDescCache.descLeft();
2277 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2278 descLeft, regs.rctl.rdmts(), regs.rdlen());
2279 switch (regs.rctl.rdmts()) {
2280 case 2: if (descLeft > .125 * regs.rdlen()) break;
2281 case 1: if (descLeft > .250 * regs.rdlen()) break;
2282 case 0: if (descLeft > .500 * regs.rdlen()) break;
2283 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2284 "because of descriptors left\n");
2285 postInterrupt(IT_RXDMT);
2286 break;
2287 }
2288
2289 if (rxFifo.empty())
2290 rxDescCache.writeback(0);
2291
2292 if (descLeft == 0) {
2293 anBegin("RXS", "Writeback Descriptors");
2294 rxDescCache.writeback(0);
2295 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2296 " writeback and stopping ticking\n");
2297 rxTick = false;
2298 }
2299
2300 // only support descriptor granulaties
2301 assert(regs.rxdctl.gran());
2302
2303 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2304 DPRINTF(EthernetSM,
2305 "RXS: Writing back because WTHRESH >= descUsed\n");
2306 anBegin("RXS", "Writeback Descriptors");
2307 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2308 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2309 else
2310 rxDescCache.writeback((cacheBlockSize()-1)>>4);
2311 }
2312
2313 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2314 ((rxDescCache.descLeft() - rxDescCache.descUnused()) >
2315 regs.rxdctl.hthresh())) {
2316 DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2317 "descUnused < PTHRESH\n");
2318 anBegin("RXS", "Fetch Descriptors");
2319 rxDescCache.fetchDescriptors();
2320 }
2321
2322 if (rxDescCache.descUnused() == 0) {
2323 anBegin("RXS", "Fetch Descriptors");
2324 rxDescCache.fetchDescriptors();
2325 anWe("RXS", rxDescCache.annUnusedCacheQ);
2326 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2327 "fetching descriptors and stopping ticking\n");
2328 rxTick = false;
2329 }
2330 return;
2331 }
2332
2333 if (rxDmaPacket) {
2334 DPRINTF(EthernetSM,
2335 "RXS: stopping ticking until packet DMA completes\n");
2336 rxTick = false;
2337 return;
2338 }
2339
2340 if (!rxDescCache.descUnused()) {
2341 anBegin("RXS", "Fetch Descriptors");
2342 rxDescCache.fetchDescriptors();
2343 anWe("RXS", rxDescCache.annUnusedCacheQ);
2344 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2345 "stopping ticking\n");
2346 rxTick = false;
2347 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2348 return;
2349 }
2350 anPq("RXS", rxDescCache.annUnusedCacheQ);
2351
2352 if (rxFifo.empty()) {
2353 anWe("RXS", "RX FIFO Q");
2354 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2355 rxTick = false;
2356 return;
2357 }
2358 anPq("RXS", "RX FIFO Q");
2359 anBegin("RXS", "Get Desc");
2360
2361 EthPacketPtr pkt;
2362 pkt = rxFifo.front();
2363
2364
2365 pktOffset = rxDescCache.writePacket(pkt, pktOffset);
2366 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2367 if (pktOffset == pkt->length) {
2368 anBegin( "RXS", "FIFO Dequeue");
2369 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2370 pktOffset = 0;
2371 anDq("RXS", "RX FIFO Q");
2372 rxFifo.pop();
2373 }
2374
2375 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2376 rxTick = false;
2377 rxDmaPacket = true;
2378 anBegin("RXS", "DMA Packet");
2379}
2380
2381void
2382IGbE::txWire()
2383{
2384 if (txFifo.empty()) {
2385 anWe("TXQ", "TX FIFO Q");
2386 txFifoTick = false;
2387 return;
2388 }
2389
2390
2391 anPq("TXQ", "TX FIFO Q");
2392 if (etherInt->sendPacket(txFifo.front())) {
2393 anQ("TXQ", "WireQ");
2394 if (DTRACE(EthernetSM)) {
2395 IpPtr ip(txFifo.front());
2396 if (ip)
2397 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2398 ip->id());
2399 else
2400 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2401 }
2402 anDq("TXQ", "TX FIFO Q");
2403 anBegin("TXQ", "Wire Send");
2404 DPRINTF(EthernetSM,
2405 "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2406 txFifo.avail());
2407
2408 txBytes += txFifo.front()->length;
2409 txPackets++;
2410 txFifoTick = false;
2411
2412 txFifo.pop();
2413 } else {
2414 // We'll get woken up when the packet ethTxDone() gets called
2415 txFifoTick = false;
2416 }
2417}
2418
2419void
2420IGbE::tick()
2421{
2422 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2423
2424 if (rxTick)
2425 rxStateMachine();
2426
2427 if (txTick)
2428 txStateMachine();
2429
2430 if (txFifoTick)
2431 txWire();
2432
2433
2434 if (rxTick || txTick || txFifoTick)
2435 schedule(tickEvent, curTick() + clockPeriod());
2436}
2437
2438void
2439IGbE::ethTxDone()
2440{
2441 anBegin("TXQ", "Send Done");
2442 // restart the tx state machines if they are stopped
2443 // fifo to send another packet
2444 // tx sm to put more data into the fifo
2445 txFifoTick = true && drainState() != DrainState::Draining;
2446 if (txDescCache.descLeft() != 0 && drainState() != DrainState::Draining)
2447 txTick = true;
2448
2449 restartClock();
2450 txWire();
2451 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2452}
2453
2454void
2455IGbE::serialize(CheckpointOut &cp) const
2456{
2457 PciDevice::serialize(cp);
2458
2459 regs.serialize(cp);
2460 SERIALIZE_SCALAR(eeOpBits);
2461 SERIALIZE_SCALAR(eeAddrBits);
2462 SERIALIZE_SCALAR(eeDataBits);
2463 SERIALIZE_SCALAR(eeOpcode);
2464 SERIALIZE_SCALAR(eeAddr);
2465 SERIALIZE_SCALAR(lastInterrupt);
2466 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2467
2468 rxFifo.serialize("rxfifo", cp);
2469 txFifo.serialize("txfifo", cp);
2470
2471 bool txPktExists = txPacket != nullptr;
2472 SERIALIZE_SCALAR(txPktExists);
2473 if (txPktExists)
2474 txPacket->serialize("txpacket", cp);
2475
2476 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2477 inter_time = 0;
2478
2479 if (rdtrEvent.scheduled())
2480 rdtr_time = rdtrEvent.when();
2481 SERIALIZE_SCALAR(rdtr_time);
2482
2483 if (radvEvent.scheduled())
2484 radv_time = radvEvent.when();
2485 SERIALIZE_SCALAR(radv_time);
2486
2487 if (tidvEvent.scheduled())
2488 tidv_time = tidvEvent.when();
2489 SERIALIZE_SCALAR(tidv_time);
2490
2491 if (tadvEvent.scheduled())
2492 tadv_time = tadvEvent.when();
2493 SERIALIZE_SCALAR(tadv_time);
2494
2495 if (interEvent.scheduled())
2496 inter_time = interEvent.when();
2497 SERIALIZE_SCALAR(inter_time);
2498
2499 SERIALIZE_SCALAR(pktOffset);
2500
2501 txDescCache.serializeSection(cp, "TxDescCache");
2502 rxDescCache.serializeSection(cp, "RxDescCache");
2503}
2504
2505void
2506IGbE::unserialize(CheckpointIn &cp)
2507{
2508 PciDevice::unserialize(cp);
2509
2510 regs.unserialize(cp);
2511 UNSERIALIZE_SCALAR(eeOpBits);
2512 UNSERIALIZE_SCALAR(eeAddrBits);
2513 UNSERIALIZE_SCALAR(eeDataBits);
2514 UNSERIALIZE_SCALAR(eeOpcode);
2515 UNSERIALIZE_SCALAR(eeAddr);
2516 UNSERIALIZE_SCALAR(lastInterrupt);
2517 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2518
2519 rxFifo.unserialize("rxfifo", cp);
2520 txFifo.unserialize("txfifo", cp);
2521
2522 bool txPktExists;
2523 UNSERIALIZE_SCALAR(txPktExists);
2524 if (txPktExists) {
1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31/* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38#include "dev/net/i8254xGBe.hh"
39
40/*
41 * @todo really there are multiple dma engines.. we should implement them.
42 */
43
44#include <algorithm>
45#include <memory>
46
47#include "base/inet.hh"
48#include "base/trace.hh"
49#include "debug/Drain.hh"
50#include "debug/EthernetAll.hh"
51#include "mem/packet.hh"
52#include "mem/packet_access.hh"
53#include "params/IGbE.hh"
54#include "sim/stats.hh"
55#include "sim/system.hh"
56
57using namespace iGbReg;
58using namespace Net;
59
60IGbE::IGbE(const Params *p)
61 : EtherDevice(p), etherInt(NULL), cpa(NULL),
62 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
63 txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
64 fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
65 fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
66 rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
67 rdtrEvent(this), radvEvent(this),
68 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
69 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
70 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
71 lastInterrupt(0)
72{
73 etherInt = new IGbEInt(name() + ".int", this);
74
75 // Initialized internal registers per Intel documentation
76 // All registers intialized to 0 by per register constructor
77 regs.ctrl.fd(1);
78 regs.ctrl.lrst(1);
79 regs.ctrl.speed(2);
80 regs.ctrl.frcspd(1);
81 regs.sts.speed(3); // Say we're 1000Mbps
82 regs.sts.fd(1); // full duplex
83 regs.sts.lu(1); // link up
84 regs.eecd.fwe(1);
85 regs.eecd.ee_type(1);
86 regs.imr = 0;
87 regs.iam = 0;
88 regs.rxdctl.gran(1);
89 regs.rxdctl.wthresh(1);
90 regs.fcrth(1);
91 regs.tdwba = 0;
92 regs.rlpml = 0;
93 regs.sw_fw_sync = 0;
94
95 regs.pba.rxa(0x30);
96 regs.pba.txa(0x10);
97
98 eeOpBits = 0;
99 eeAddrBits = 0;
100 eeDataBits = 0;
101 eeOpcode = 0;
102
103 // clear all 64 16 bit words of the eeprom
104 memset(&flash, 0, EEPROM_SIZE*2);
105
106 // Set the MAC address
107 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
108 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
109 flash[x] = htobe(flash[x]);
110
111 uint16_t csum = 0;
112 for (int x = 0; x < EEPROM_SIZE; x++)
113 csum += htobe(flash[x]);
114
115
116 // Magic happy checksum value
117 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
118
119 // Store the MAC address as queue ID
120 macAddr = p->hardware_address;
121
122 rxFifo.clear();
123 txFifo.clear();
124}
125
126IGbE::~IGbE()
127{
128 delete etherInt;
129}
130
131void
132IGbE::init()
133{
134 cpa = CPA::cpa();
135 PciDevice::init();
136}
137
138EtherInt*
139IGbE::getEthPort(const std::string &if_name, int idx)
140{
141
142 if (if_name == "interface") {
143 if (etherInt->getPeer())
144 panic("Port already connected to\n");
145 return etherInt;
146 }
147 return NULL;
148}
149
150Tick
151IGbE::writeConfig(PacketPtr pkt)
152{
153 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
154 if (offset < PCI_DEVICE_SPECIFIC)
155 PciDevice::writeConfig(pkt);
156 else
157 panic("Device specific PCI config space not implemented.\n");
158
159 //
160 // Some work may need to be done here based for the pci COMMAND bits.
161 //
162
163 return configDelay;
164}
165
166// Handy macro for range-testing register access addresses
167#define IN_RANGE(val, base, len) (val >= base && val < (base + len))
168
169Tick
170IGbE::read(PacketPtr pkt)
171{
172 int bar;
173 Addr daddr;
174
175 if (!getBAR(pkt->getAddr(), bar, daddr))
176 panic("Invalid PCI memory access to unmapped memory.\n");
177
178 // Only Memory register BAR is allowed
179 assert(bar == 0);
180
181 // Only 32bit accesses allowed
182 assert(pkt->getSize() == 4);
183
184 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
185
186 //
187 // Handle read of register here
188 //
189
190
191 switch (daddr) {
192 case REG_CTRL:
193 pkt->set<uint32_t>(regs.ctrl());
194 break;
195 case REG_STATUS:
196 pkt->set<uint32_t>(regs.sts());
197 break;
198 case REG_EECD:
199 pkt->set<uint32_t>(regs.eecd());
200 break;
201 case REG_EERD:
202 pkt->set<uint32_t>(regs.eerd());
203 break;
204 case REG_CTRL_EXT:
205 pkt->set<uint32_t>(regs.ctrl_ext());
206 break;
207 case REG_MDIC:
208 pkt->set<uint32_t>(regs.mdic());
209 break;
210 case REG_ICR:
211 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
212 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
213 pkt->set<uint32_t>(regs.icr());
214 if (regs.icr.int_assert() || regs.imr == 0) {
215 regs.icr = regs.icr() & ~mask(30);
216 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
217 }
218 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
219 regs.imr &= ~regs.iam;
220 chkInterrupt();
221 break;
222 case REG_EICR:
223 // This is only useful for MSI, but the driver reads it every time
224 // Just don't do anything
225 pkt->set<uint32_t>(0);
226 break;
227 case REG_ITR:
228 pkt->set<uint32_t>(regs.itr());
229 break;
230 case REG_RCTL:
231 pkt->set<uint32_t>(regs.rctl());
232 break;
233 case REG_FCTTV:
234 pkt->set<uint32_t>(regs.fcttv());
235 break;
236 case REG_TCTL:
237 pkt->set<uint32_t>(regs.tctl());
238 break;
239 case REG_PBA:
240 pkt->set<uint32_t>(regs.pba());
241 break;
242 case REG_WUC:
243 case REG_LEDCTL:
244 pkt->set<uint32_t>(0); // We don't care, so just return 0
245 break;
246 case REG_FCRTL:
247 pkt->set<uint32_t>(regs.fcrtl());
248 break;
249 case REG_FCRTH:
250 pkt->set<uint32_t>(regs.fcrth());
251 break;
252 case REG_RDBAL:
253 pkt->set<uint32_t>(regs.rdba.rdbal());
254 break;
255 case REG_RDBAH:
256 pkt->set<uint32_t>(regs.rdba.rdbah());
257 break;
258 case REG_RDLEN:
259 pkt->set<uint32_t>(regs.rdlen());
260 break;
261 case REG_SRRCTL:
262 pkt->set<uint32_t>(regs.srrctl());
263 break;
264 case REG_RDH:
265 pkt->set<uint32_t>(regs.rdh());
266 break;
267 case REG_RDT:
268 pkt->set<uint32_t>(regs.rdt());
269 break;
270 case REG_RDTR:
271 pkt->set<uint32_t>(regs.rdtr());
272 if (regs.rdtr.fpd()) {
273 rxDescCache.writeback(0);
274 DPRINTF(EthernetIntr,
275 "Posting interrupt because of RDTR.FPD write\n");
276 postInterrupt(IT_RXT);
277 regs.rdtr.fpd(0);
278 }
279 break;
280 case REG_RXDCTL:
281 pkt->set<uint32_t>(regs.rxdctl());
282 break;
283 case REG_RADV:
284 pkt->set<uint32_t>(regs.radv());
285 break;
286 case REG_TDBAL:
287 pkt->set<uint32_t>(regs.tdba.tdbal());
288 break;
289 case REG_TDBAH:
290 pkt->set<uint32_t>(regs.tdba.tdbah());
291 break;
292 case REG_TDLEN:
293 pkt->set<uint32_t>(regs.tdlen());
294 break;
295 case REG_TDH:
296 pkt->set<uint32_t>(regs.tdh());
297 break;
298 case REG_TXDCA_CTL:
299 pkt->set<uint32_t>(regs.txdca_ctl());
300 break;
301 case REG_TDT:
302 pkt->set<uint32_t>(regs.tdt());
303 break;
304 case REG_TIDV:
305 pkt->set<uint32_t>(regs.tidv());
306 break;
307 case REG_TXDCTL:
308 pkt->set<uint32_t>(regs.txdctl());
309 break;
310 case REG_TADV:
311 pkt->set<uint32_t>(regs.tadv());
312 break;
313 case REG_TDWBAL:
314 pkt->set<uint32_t>(regs.tdwba & mask(32));
315 break;
316 case REG_TDWBAH:
317 pkt->set<uint32_t>(regs.tdwba >> 32);
318 break;
319 case REG_RXCSUM:
320 pkt->set<uint32_t>(regs.rxcsum());
321 break;
322 case REG_RLPML:
323 pkt->set<uint32_t>(regs.rlpml);
324 break;
325 case REG_RFCTL:
326 pkt->set<uint32_t>(regs.rfctl());
327 break;
328 case REG_MANC:
329 pkt->set<uint32_t>(regs.manc());
330 break;
331 case REG_SWSM:
332 pkt->set<uint32_t>(regs.swsm());
333 regs.swsm.smbi(1);
334 break;
335 case REG_FWSM:
336 pkt->set<uint32_t>(regs.fwsm());
337 break;
338 case REG_SWFWSYNC:
339 pkt->set<uint32_t>(regs.sw_fw_sync);
340 break;
341 default:
342 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
343 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
344 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
345 !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
346 panic("Read request to unknown register number: %#x\n", daddr);
347 else
348 pkt->set<uint32_t>(0);
349 };
350
351 pkt->makeAtomicResponse();
352 return pioDelay;
353}
354
355Tick
356IGbE::write(PacketPtr pkt)
357{
358 int bar;
359 Addr daddr;
360
361
362 if (!getBAR(pkt->getAddr(), bar, daddr))
363 panic("Invalid PCI memory access to unmapped memory.\n");
364
365 // Only Memory register BAR is allowed
366 assert(bar == 0);
367
368 // Only 32bit accesses allowed
369 assert(pkt->getSize() == sizeof(uint32_t));
370
371 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
372 daddr, pkt->get<uint32_t>());
373
374 //
375 // Handle write of register here
376 //
377 uint32_t val = pkt->get<uint32_t>();
378
379 Regs::RCTL oldrctl;
380 Regs::TCTL oldtctl;
381
382 switch (daddr) {
383 case REG_CTRL:
384 regs.ctrl = val;
385 if (regs.ctrl.tfce())
386 warn("TX Flow control enabled, should implement\n");
387 if (regs.ctrl.rfce())
388 warn("RX Flow control enabled, should implement\n");
389 break;
390 case REG_CTRL_EXT:
391 regs.ctrl_ext = val;
392 break;
393 case REG_STATUS:
394 regs.sts = val;
395 break;
396 case REG_EECD:
397 int oldClk;
398 oldClk = regs.eecd.sk();
399 regs.eecd = val;
400 // See if this is a eeprom access and emulate accordingly
401 if (!oldClk && regs.eecd.sk()) {
402 if (eeOpBits < 8) {
403 eeOpcode = eeOpcode << 1 | regs.eecd.din();
404 eeOpBits++;
405 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
406 eeAddr = eeAddr << 1 | regs.eecd.din();
407 eeAddrBits++;
408 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
409 assert(eeAddr>>1 < EEPROM_SIZE);
410 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
411 flash[eeAddr>>1] >> eeDataBits & 0x1,
412 flash[eeAddr>>1]);
413 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
414 eeDataBits++;
415 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
416 regs.eecd.dout(0);
417 eeDataBits++;
418 } else
419 panic("What's going on with eeprom interface? opcode:"
420 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
421 (uint32_t)eeOpBits, (uint32_t)eeAddr,
422 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
423
424 // Reset everything for the next command
425 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
426 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
427 eeOpBits = 0;
428 eeAddrBits = 0;
429 eeDataBits = 0;
430 eeOpcode = 0;
431 eeAddr = 0;
432 }
433
434 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
435 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
436 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
437 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
438 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
439 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
440 (uint32_t)eeOpBits);
441
442
443 }
444 // If driver requests eeprom access, immediately give it to it
445 regs.eecd.ee_gnt(regs.eecd.ee_req());
446 break;
447 case REG_EERD:
448 regs.eerd = val;
449 if (regs.eerd.start()) {
450 regs.eerd.done(1);
451 assert(regs.eerd.addr() < EEPROM_SIZE);
452 regs.eerd.data(flash[regs.eerd.addr()]);
453 regs.eerd.start(0);
454 DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
455 regs.eerd.addr(), regs.eerd.data());
456 }
457 break;
458 case REG_MDIC:
459 regs.mdic = val;
460 if (regs.mdic.i())
461 panic("No support for interrupt on mdic complete\n");
462 if (regs.mdic.phyadd() != 1)
463 panic("No support for reading anything but phy\n");
464 DPRINTF(Ethernet, "%s phy address %x\n",
465 regs.mdic.op() == 1 ? "Writing" : "Reading",
466 regs.mdic.regadd());
467 switch (regs.mdic.regadd()) {
468 case PHY_PSTATUS:
469 regs.mdic.data(0x796D); // link up
470 break;
471 case PHY_PID:
472 regs.mdic.data(params()->phy_pid);
473 break;
474 case PHY_EPID:
475 regs.mdic.data(params()->phy_epid);
476 break;
477 case PHY_GSTATUS:
478 regs.mdic.data(0x7C00);
479 break;
480 case PHY_EPSTATUS:
481 regs.mdic.data(0x3000);
482 break;
483 case PHY_AGC:
484 regs.mdic.data(0x180); // some random length
485 break;
486 default:
487 regs.mdic.data(0);
488 }
489 regs.mdic.r(1);
490 break;
491 case REG_ICR:
492 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
493 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
494 if (regs.ctrl_ext.iame())
495 regs.imr &= ~regs.iam;
496 regs.icr = ~bits(val,30,0) & regs.icr();
497 chkInterrupt();
498 break;
499 case REG_ITR:
500 regs.itr = val;
501 break;
502 case REG_ICS:
503 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
504 postInterrupt((IntTypes)val);
505 break;
506 case REG_IMS:
507 regs.imr |= val;
508 chkInterrupt();
509 break;
510 case REG_IMC:
511 regs.imr &= ~val;
512 chkInterrupt();
513 break;
514 case REG_IAM:
515 regs.iam = val;
516 break;
517 case REG_RCTL:
518 oldrctl = regs.rctl;
519 regs.rctl = val;
520 if (regs.rctl.rst()) {
521 rxDescCache.reset();
522 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
523 rxFifo.clear();
524 regs.rctl.rst(0);
525 }
526 if (regs.rctl.en())
527 rxTick = true;
528 restartClock();
529 break;
530 case REG_FCTTV:
531 regs.fcttv = val;
532 break;
533 case REG_TCTL:
534 regs.tctl = val;
535 oldtctl = regs.tctl;
536 regs.tctl = val;
537 if (regs.tctl.en())
538 txTick = true;
539 restartClock();
540 if (regs.tctl.en() && !oldtctl.en()) {
541 txDescCache.reset();
542 }
543 break;
544 case REG_PBA:
545 regs.pba.rxa(val);
546 regs.pba.txa(64 - regs.pba.rxa());
547 break;
548 case REG_WUC:
549 case REG_LEDCTL:
550 case REG_FCAL:
551 case REG_FCAH:
552 case REG_FCT:
553 case REG_VET:
554 case REG_AIFS:
555 case REG_TIPG:
556 ; // We don't care, so don't store anything
557 break;
558 case REG_IVAR0:
559 warn("Writing to IVAR0, ignoring...\n");
560 break;
561 case REG_FCRTL:
562 regs.fcrtl = val;
563 break;
564 case REG_FCRTH:
565 regs.fcrth = val;
566 break;
567 case REG_RDBAL:
568 regs.rdba.rdbal( val & ~mask(4));
569 rxDescCache.areaChanged();
570 break;
571 case REG_RDBAH:
572 regs.rdba.rdbah(val);
573 rxDescCache.areaChanged();
574 break;
575 case REG_RDLEN:
576 regs.rdlen = val & ~mask(7);
577 rxDescCache.areaChanged();
578 break;
579 case REG_SRRCTL:
580 regs.srrctl = val;
581 break;
582 case REG_RDH:
583 regs.rdh = val;
584 rxDescCache.areaChanged();
585 break;
586 case REG_RDT:
587 regs.rdt = val;
588 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
589 if (drainState() == DrainState::Running) {
590 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
591 rxDescCache.fetchDescriptors();
592 } else {
593 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
594 }
595 break;
596 case REG_RDTR:
597 regs.rdtr = val;
598 break;
599 case REG_RADV:
600 regs.radv = val;
601 break;
602 case REG_RXDCTL:
603 regs.rxdctl = val;
604 break;
605 case REG_TDBAL:
606 regs.tdba.tdbal( val & ~mask(4));
607 txDescCache.areaChanged();
608 break;
609 case REG_TDBAH:
610 regs.tdba.tdbah(val);
611 txDescCache.areaChanged();
612 break;
613 case REG_TDLEN:
614 regs.tdlen = val & ~mask(7);
615 txDescCache.areaChanged();
616 break;
617 case REG_TDH:
618 regs.tdh = val;
619 txDescCache.areaChanged();
620 break;
621 case REG_TXDCA_CTL:
622 regs.txdca_ctl = val;
623 if (regs.txdca_ctl.enabled())
624 panic("No support for DCA\n");
625 break;
626 case REG_TDT:
627 regs.tdt = val;
628 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
629 if (drainState() == DrainState::Running) {
630 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
631 txDescCache.fetchDescriptors();
632 } else {
633 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
634 }
635 break;
636 case REG_TIDV:
637 regs.tidv = val;
638 break;
639 case REG_TXDCTL:
640 regs.txdctl = val;
641 break;
642 case REG_TADV:
643 regs.tadv = val;
644 break;
645 case REG_TDWBAL:
646 regs.tdwba &= ~mask(32);
647 regs.tdwba |= val;
648 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
649 regs.tdwba & mask(1));
650 break;
651 case REG_TDWBAH:
652 regs.tdwba &= mask(32);
653 regs.tdwba |= (uint64_t)val << 32;
654 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
655 regs.tdwba & mask(1));
656 break;
657 case REG_RXCSUM:
658 regs.rxcsum = val;
659 break;
660 case REG_RLPML:
661 regs.rlpml = val;
662 break;
663 case REG_RFCTL:
664 regs.rfctl = val;
665 if (regs.rfctl.exsten())
666 panic("Extended RX descriptors not implemented\n");
667 break;
668 case REG_MANC:
669 regs.manc = val;
670 break;
671 case REG_SWSM:
672 regs.swsm = val;
673 if (regs.fwsm.eep_fw_semaphore())
674 regs.swsm.swesmbi(0);
675 break;
676 case REG_SWFWSYNC:
677 regs.sw_fw_sync = val;
678 break;
679 default:
680 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
681 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
682 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
683 panic("Write request to unknown register number: %#x\n", daddr);
684 };
685
686 pkt->makeAtomicResponse();
687 return pioDelay;
688}
689
690void
691IGbE::postInterrupt(IntTypes t, bool now)
692{
693 assert(t);
694
695 // Interrupt is already pending
696 if (t & regs.icr() && !now)
697 return;
698
699 regs.icr = regs.icr() | t;
700
701 Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
702 DPRINTF(EthernetIntr,
703 "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
704 curTick(), regs.itr.interval(), itr_interval);
705
706 if (regs.itr.interval() == 0 || now ||
707 lastInterrupt + itr_interval <= curTick()) {
708 if (interEvent.scheduled()) {
709 deschedule(interEvent);
710 }
711 cpuPostInt();
712 } else {
713 Tick int_time = lastInterrupt + itr_interval;
714 assert(int_time > 0);
715 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
716 int_time);
717 if (!interEvent.scheduled()) {
718 schedule(interEvent, int_time);
719 }
720 }
721}
722
723void
724IGbE::delayIntEvent()
725{
726 cpuPostInt();
727}
728
729
730void
731IGbE::cpuPostInt()
732{
733
734 postedInterrupts++;
735
736 if (!(regs.icr() & regs.imr)) {
737 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
738 return;
739 }
740
741 DPRINTF(Ethernet, "Posting Interrupt\n");
742
743
744 if (interEvent.scheduled()) {
745 deschedule(interEvent);
746 }
747
748 if (rdtrEvent.scheduled()) {
749 regs.icr.rxt0(1);
750 deschedule(rdtrEvent);
751 }
752 if (radvEvent.scheduled()) {
753 regs.icr.rxt0(1);
754 deschedule(radvEvent);
755 }
756 if (tadvEvent.scheduled()) {
757 regs.icr.txdw(1);
758 deschedule(tadvEvent);
759 }
760 if (tidvEvent.scheduled()) {
761 regs.icr.txdw(1);
762 deschedule(tidvEvent);
763 }
764
765 regs.icr.int_assert(1);
766 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
767 regs.icr());
768
769 intrPost();
770
771 lastInterrupt = curTick();
772}
773
774void
775IGbE::cpuClearInt()
776{
777 if (regs.icr.int_assert()) {
778 regs.icr.int_assert(0);
779 DPRINTF(EthernetIntr,
780 "EINT: Clearing interrupt to CPU now. Vector %#x\n",
781 regs.icr());
782 intrClear();
783 }
784}
785
786void
787IGbE::chkInterrupt()
788{
789 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
790 regs.imr);
791 // Check if we need to clear the cpu interrupt
792 if (!(regs.icr() & regs.imr)) {
793 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
794 if (interEvent.scheduled())
795 deschedule(interEvent);
796 if (regs.icr.int_assert())
797 cpuClearInt();
798 }
799 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
800 regs.itr(), regs.itr.interval());
801
802 if (regs.icr() & regs.imr) {
803 if (regs.itr.interval() == 0) {
804 cpuPostInt();
805 } else {
806 DPRINTF(Ethernet,
807 "Possibly scheduling interrupt because of imr write\n");
808 if (!interEvent.scheduled()) {
809 Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
810 DPRINTF(Ethernet, "Scheduling for %d\n", t);
811 schedule(interEvent, t);
812 }
813 }
814 }
815}
816
817
818///////////////////////////// IGbE::DescCache //////////////////////////////
819
820template<class T>
821IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
822 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
823 wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL),
824 wbDelayEvent(this), fetchDelayEvent(this), fetchEvent(this),
825 wbEvent(this)
826{
827 fetchBuf = new T[size];
828 wbBuf = new T[size];
829}
830
831template<class T>
832IGbE::DescCache<T>::~DescCache()
833{
834 reset();
835 delete[] fetchBuf;
836 delete[] wbBuf;
837}
838
839template<class T>
840void
841IGbE::DescCache<T>::areaChanged()
842{
843 if (usedCache.size() > 0 || curFetching || wbOut)
844 panic("Descriptor Address, Length or Head changed. Bad\n");
845 reset();
846
847}
848
849template<class T>
850void
851IGbE::DescCache<T>::writeback(Addr aMask)
852{
853 int curHead = descHead();
854 int max_to_wb = usedCache.size();
855
856 // Check if this writeback is less restrictive that the previous
857 // and if so setup another one immediately following it
858 if (wbOut) {
859 if (aMask < wbAlignment) {
860 moreToWb = true;
861 wbAlignment = aMask;
862 }
863 DPRINTF(EthernetDesc,
864 "Writing back already in process, returning\n");
865 return;
866 }
867
868 moreToWb = false;
869 wbAlignment = aMask;
870
871
872 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
873 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
874 curHead, descTail(), descLen(), cachePnt, max_to_wb,
875 descLeft());
876
877 if (max_to_wb + curHead >= descLen()) {
878 max_to_wb = descLen() - curHead;
879 moreToWb = true;
880 // this is by definition aligned correctly
881 } else if (wbAlignment != 0) {
882 // align the wb point to the mask
883 max_to_wb = max_to_wb & ~wbAlignment;
884 }
885
886 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
887
888 if (max_to_wb <= 0) {
889 if (usedCache.size())
890 igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
891 else
892 igbe->anWe(annSmWb, annUsedCacheQ);
893 return;
894 }
895
896 wbOut = max_to_wb;
897
898 assert(!wbDelayEvent.scheduled());
899 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
900 igbe->anBegin(annSmWb, "Prepare Writeback Desc");
901}
902
903template<class T>
904void
905IGbE::DescCache<T>::writeback1()
906{
907 // If we're draining delay issuing this DMA
908 if (igbe->drainState() != DrainState::Running) {
909 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
910 return;
911 }
912
913 DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
914
915 for (int x = 0; x < wbOut; x++) {
916 assert(usedCache.size());
917 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
918 igbe->anPq(annSmWb, annUsedCacheQ);
919 igbe->anPq(annSmWb, annDescQ);
920 igbe->anQ(annSmWb, annUsedDescQ);
921 }
922
923
924 igbe->anBegin(annSmWb, "Writeback Desc DMA");
925
926 assert(wbOut);
927 igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
928 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
929 igbe->wbCompDelay);
930}
931
932template<class T>
933void
934IGbE::DescCache<T>::fetchDescriptors()
935{
936 size_t max_to_fetch;
937
938 if (curFetching) {
939 DPRINTF(EthernetDesc,
940 "Currently fetching %d descriptors, returning\n",
941 curFetching);
942 return;
943 }
944
945 if (descTail() >= cachePnt)
946 max_to_fetch = descTail() - cachePnt;
947 else
948 max_to_fetch = descLen() - cachePnt;
949
950 size_t free_cache = size - usedCache.size() - unusedCache.size();
951
952 if (!max_to_fetch)
953 igbe->anWe(annSmFetch, annUnusedDescQ);
954 else
955 igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
956
957 if (max_to_fetch) {
958 if (!free_cache)
959 igbe->anWf(annSmFetch, annDescQ);
960 else
961 igbe->anRq(annSmFetch, annDescQ, free_cache);
962 }
963
964 max_to_fetch = std::min(max_to_fetch, free_cache);
965
966
967 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
968 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
969 descHead(), descTail(), descLen(), cachePnt,
970 max_to_fetch, descLeft());
971
972 // Nothing to do
973 if (max_to_fetch == 0)
974 return;
975
976 // So we don't have two descriptor fetches going on at once
977 curFetching = max_to_fetch;
978
979 assert(!fetchDelayEvent.scheduled());
980 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
981 igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
982}
983
984template<class T>
985void
986IGbE::DescCache<T>::fetchDescriptors1()
987{
988 // If we're draining delay issuing this DMA
989 if (igbe->drainState() != DrainState::Running) {
990 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
991 return;
992 }
993
994 igbe->anBegin(annSmFetch, "Fetch Desc");
995
996 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
997 descBase() + cachePnt * sizeof(T),
998 pciToDma(descBase() + cachePnt * sizeof(T)),
999 curFetching * sizeof(T));
1000 assert(curFetching);
1001 igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
1002 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
1003 igbe->fetchCompDelay);
1004}
1005
1006template<class T>
1007void
1008IGbE::DescCache<T>::fetchComplete()
1009{
1010 T *newDesc;
1011 igbe->anBegin(annSmFetch, "Fetch Complete");
1012 for (int x = 0; x < curFetching; x++) {
1013 newDesc = new T;
1014 memcpy(newDesc, &fetchBuf[x], sizeof(T));
1015 unusedCache.push_back(newDesc);
1016 igbe->anDq(annSmFetch, annUnusedDescQ);
1017 igbe->anQ(annSmFetch, annUnusedCacheQ);
1018 igbe->anQ(annSmFetch, annDescQ);
1019 }
1020
1021
1022#ifndef NDEBUG
1023 int oldCp = cachePnt;
1024#endif
1025
1026 cachePnt += curFetching;
1027 assert(cachePnt <= descLen());
1028 if (cachePnt == descLen())
1029 cachePnt = 0;
1030
1031 curFetching = 0;
1032
1033 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1034 oldCp, cachePnt);
1035
1036 if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1037 cachePnt)) == 0)
1038 {
1039 igbe->anWe(annSmFetch, annUnusedDescQ);
1040 } else if (!(size - usedCache.size() - unusedCache.size())) {
1041 igbe->anWf(annSmFetch, annDescQ);
1042 } else {
1043 igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1044 }
1045
1046 enableSm();
1047 igbe->checkDrain();
1048}
1049
1050template<class T>
1051void
1052IGbE::DescCache<T>::wbComplete()
1053{
1054
1055 igbe->anBegin(annSmWb, "Finish Writeback");
1056
1057 long curHead = descHead();
1058#ifndef NDEBUG
1059 long oldHead = curHead;
1060#endif
1061
1062 for (int x = 0; x < wbOut; x++) {
1063 assert(usedCache.size());
1064 delete usedCache[0];
1065 usedCache.pop_front();
1066
1067 igbe->anDq(annSmWb, annUsedCacheQ);
1068 igbe->anDq(annSmWb, annDescQ);
1069 }
1070
1071 curHead += wbOut;
1072 wbOut = 0;
1073
1074 if (curHead >= descLen())
1075 curHead -= descLen();
1076
1077 // Update the head
1078 updateHead(curHead);
1079
1080 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1081 oldHead, curHead);
1082
1083 // If we still have more to wb, call wb now
1084 actionAfterWb();
1085 if (moreToWb) {
1086 moreToWb = false;
1087 DPRINTF(EthernetDesc, "Writeback has more todo\n");
1088 writeback(wbAlignment);
1089 }
1090
1091 if (!wbOut) {
1092 igbe->checkDrain();
1093 if (usedCache.size())
1094 igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1095 else
1096 igbe->anWe(annSmWb, annUsedCacheQ);
1097 }
1098 fetchAfterWb();
1099}
1100
1101template<class T>
1102void
1103IGbE::DescCache<T>::reset()
1104{
1105 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1106 for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1107 delete usedCache[x];
1108 for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1109 delete unusedCache[x];
1110
1111 usedCache.clear();
1112 unusedCache.clear();
1113
1114 cachePnt = 0;
1115
1116}
1117
1118template<class T>
1119void
1120IGbE::DescCache<T>::serialize(CheckpointOut &cp) const
1121{
1122 SERIALIZE_SCALAR(cachePnt);
1123 SERIALIZE_SCALAR(curFetching);
1124 SERIALIZE_SCALAR(wbOut);
1125 SERIALIZE_SCALAR(moreToWb);
1126 SERIALIZE_SCALAR(wbAlignment);
1127
1128 typename CacheType::size_type usedCacheSize = usedCache.size();
1129 SERIALIZE_SCALAR(usedCacheSize);
1130 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1131 arrayParamOut(cp, csprintf("usedCache_%d", x),
1132 (uint8_t*)usedCache[x],sizeof(T));
1133 }
1134
1135 typename CacheType::size_type unusedCacheSize = unusedCache.size();
1136 SERIALIZE_SCALAR(unusedCacheSize);
1137 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1138 arrayParamOut(cp, csprintf("unusedCache_%d", x),
1139 (uint8_t*)unusedCache[x],sizeof(T));
1140 }
1141
1142 Tick fetch_delay = 0, wb_delay = 0;
1143 if (fetchDelayEvent.scheduled())
1144 fetch_delay = fetchDelayEvent.when();
1145 SERIALIZE_SCALAR(fetch_delay);
1146 if (wbDelayEvent.scheduled())
1147 wb_delay = wbDelayEvent.when();
1148 SERIALIZE_SCALAR(wb_delay);
1149
1150
1151}
1152
1153template<class T>
1154void
1155IGbE::DescCache<T>::unserialize(CheckpointIn &cp)
1156{
1157 UNSERIALIZE_SCALAR(cachePnt);
1158 UNSERIALIZE_SCALAR(curFetching);
1159 UNSERIALIZE_SCALAR(wbOut);
1160 UNSERIALIZE_SCALAR(moreToWb);
1161 UNSERIALIZE_SCALAR(wbAlignment);
1162
1163 typename CacheType::size_type usedCacheSize;
1164 UNSERIALIZE_SCALAR(usedCacheSize);
1165 T *temp;
1166 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1167 temp = new T;
1168 arrayParamIn(cp, csprintf("usedCache_%d", x),
1169 (uint8_t*)temp,sizeof(T));
1170 usedCache.push_back(temp);
1171 }
1172
1173 typename CacheType::size_type unusedCacheSize;
1174 UNSERIALIZE_SCALAR(unusedCacheSize);
1175 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1176 temp = new T;
1177 arrayParamIn(cp, csprintf("unusedCache_%d", x),
1178 (uint8_t*)temp,sizeof(T));
1179 unusedCache.push_back(temp);
1180 }
1181 Tick fetch_delay = 0, wb_delay = 0;
1182 UNSERIALIZE_SCALAR(fetch_delay);
1183 UNSERIALIZE_SCALAR(wb_delay);
1184 if (fetch_delay)
1185 igbe->schedule(fetchDelayEvent, fetch_delay);
1186 if (wb_delay)
1187 igbe->schedule(wbDelayEvent, wb_delay);
1188
1189
1190}
1191
1192///////////////////////////// IGbE::RxDescCache //////////////////////////////
1193
1194IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1195 : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1196 pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
1197
1198{
1199 annSmFetch = "RX Desc Fetch";
1200 annSmWb = "RX Desc Writeback";
1201 annUnusedDescQ = "RX Unused Descriptors";
1202 annUnusedCacheQ = "RX Unused Descriptor Cache";
1203 annUsedCacheQ = "RX Used Descriptor Cache";
1204 annUsedDescQ = "RX Used Descriptors";
1205 annDescQ = "RX Descriptors";
1206}
1207
1208void
1209IGbE::RxDescCache::pktSplitDone()
1210{
1211 splitCount++;
1212 DPRINTF(EthernetDesc,
1213 "Part of split packet done: splitcount now %d\n", splitCount);
1214 assert(splitCount <= 2);
1215 if (splitCount != 2)
1216 return;
1217 splitCount = 0;
1218 DPRINTF(EthernetDesc,
1219 "Part of split packet done: calling pktComplete()\n");
1220 pktComplete();
1221}
1222
1223int
1224IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
1225{
1226 assert(unusedCache.size());
1227 //if (!unusedCache.size())
1228 // return false;
1229
1230 pktPtr = packet;
1231 pktDone = false;
1232 unsigned buf_len, hdr_len;
1233
1234 RxDesc *desc = unusedCache.front();
1235 switch (igbe->regs.srrctl.desctype()) {
1236 case RXDT_LEGACY:
1237 assert(pkt_offset == 0);
1238 bytesCopied = packet->length;
1239 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1240 packet->length, igbe->regs.rctl.descSize());
1241 assert(packet->length < igbe->regs.rctl.descSize());
1242 igbe->dmaWrite(pciToDma(desc->legacy.buf),
1243 packet->length, &pktEvent, packet->data,
1244 igbe->rxWriteDelay);
1245 break;
1246 case RXDT_ADV_ONEBUF:
1247 assert(pkt_offset == 0);
1248 bytesCopied = packet->length;
1249 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1250 igbe->regs.rctl.descSize();
1251 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1252 packet->length, igbe->regs.srrctl(), buf_len);
1253 assert(packet->length < buf_len);
1254 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1255 packet->length, &pktEvent, packet->data,
1256 igbe->rxWriteDelay);
1257 desc->adv_wb.header_len = htole(0);
1258 desc->adv_wb.sph = htole(0);
1259 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1260 break;
1261 case RXDT_ADV_SPLIT_A:
1262 int split_point;
1263
1264 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1265 igbe->regs.rctl.descSize();
1266 hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1267 DPRINTF(EthernetDesc,
1268 "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1269 "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1270 igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1271 igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1272 desc->adv_read.pkt, buf_len);
1273
1274 split_point = hsplit(pktPtr);
1275
1276 if (packet->length <= hdr_len) {
1277 bytesCopied = packet->length;
1278 assert(pkt_offset == 0);
1279 DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1280 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1281 packet->length, &pktEvent, packet->data,
1282 igbe->rxWriteDelay);
1283 desc->adv_wb.header_len = htole((uint16_t)packet->length);
1284 desc->adv_wb.sph = htole(0);
1285 desc->adv_wb.pkt_len = htole(0);
1286 } else if (split_point) {
1287 if (pkt_offset) {
1288 // we are only copying some data, header/data has already been
1289 // copied
1290 int max_to_copy =
1291 std::min(packet->length - pkt_offset, buf_len);
1292 bytesCopied += max_to_copy;
1293 DPRINTF(EthernetDesc,
1294 "Hdr split: Continuing data buffer copy\n");
1295 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1296 max_to_copy, &pktEvent,
1297 packet->data + pkt_offset, igbe->rxWriteDelay);
1298 desc->adv_wb.header_len = htole(0);
1299 desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1300 desc->adv_wb.sph = htole(0);
1301 } else {
1302 int max_to_copy =
1303 std::min(packet->length - split_point, buf_len);
1304 bytesCopied += max_to_copy + split_point;
1305
1306 DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1307 split_point);
1308 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1309 split_point, &pktHdrEvent,
1310 packet->data, igbe->rxWriteDelay);
1311 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1312 max_to_copy, &pktDataEvent,
1313 packet->data + split_point, igbe->rxWriteDelay);
1314 desc->adv_wb.header_len = htole(split_point);
1315 desc->adv_wb.sph = 1;
1316 desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1317 }
1318 } else {
1319 panic("Header split not fitting within header buffer or "
1320 "undecodable packet not fitting in header unsupported\n");
1321 }
1322 break;
1323 default:
1324 panic("Unimplemnted RX receive buffer type: %d\n",
1325 igbe->regs.srrctl.desctype());
1326 }
1327 return bytesCopied;
1328
1329}
1330
1331void
1332IGbE::RxDescCache::pktComplete()
1333{
1334 assert(unusedCache.size());
1335 RxDesc *desc;
1336 desc = unusedCache.front();
1337
1338 igbe->anBegin("RXS", "Update Desc");
1339
1340 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1341 DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1342 "stripcrc offset: %d value written: %d %d\n",
1343 pktPtr->length, bytesCopied, crcfixup,
1344 htole((uint16_t)(pktPtr->length + crcfixup)),
1345 (uint16_t)(pktPtr->length + crcfixup));
1346
1347 // no support for anything but starting at 0
1348 assert(igbe->regs.rxcsum.pcss() == 0);
1349
1350 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1351
1352 uint16_t status = RXDS_DD;
1353 uint8_t err = 0;
1354 uint16_t ext_err = 0;
1355 uint16_t csum = 0;
1356 uint16_t ptype = 0;
1357 uint16_t ip_id = 0;
1358
1359 assert(bytesCopied <= pktPtr->length);
1360 if (bytesCopied == pktPtr->length)
1361 status |= RXDS_EOP;
1362
1363 IpPtr ip(pktPtr);
1364
1365 if (ip) {
1366 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
1367 ptype |= RXDP_IPV4;
1368 ip_id = ip->id();
1369
1370 if (igbe->regs.rxcsum.ipofld()) {
1371 DPRINTF(EthernetDesc, "Checking IP checksum\n");
1372 status |= RXDS_IPCS;
1373 csum = htole(cksum(ip));
1374 igbe->rxIpChecksums++;
1375 if (cksum(ip) != 0) {
1376 err |= RXDE_IPE;
1377 ext_err |= RXDEE_IPE;
1378 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1379 }
1380 }
1381 TcpPtr tcp(ip);
1382 if (tcp && igbe->regs.rxcsum.tuofld()) {
1383 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1384 status |= RXDS_TCPCS;
1385 ptype |= RXDP_TCP;
1386 csum = htole(cksum(tcp));
1387 igbe->rxTcpChecksums++;
1388 if (cksum(tcp) != 0) {
1389 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1390 err |= RXDE_TCPE;
1391 ext_err |= RXDEE_TCPE;
1392 }
1393 }
1394
1395 UdpPtr udp(ip);
1396 if (udp && igbe->regs.rxcsum.tuofld()) {
1397 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1398 status |= RXDS_UDPCS;
1399 ptype |= RXDP_UDP;
1400 csum = htole(cksum(udp));
1401 igbe->rxUdpChecksums++;
1402 if (cksum(udp) != 0) {
1403 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1404 ext_err |= RXDEE_TCPE;
1405 err |= RXDE_TCPE;
1406 }
1407 }
1408 } else { // if ip
1409 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1410 }
1411
1412 switch (igbe->regs.srrctl.desctype()) {
1413 case RXDT_LEGACY:
1414 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1415 desc->legacy.status = htole(status);
1416 desc->legacy.errors = htole(err);
1417 // No vlan support at this point... just set it to 0
1418 desc->legacy.vlan = 0;
1419 break;
1420 case RXDT_ADV_SPLIT_A:
1421 case RXDT_ADV_ONEBUF:
1422 desc->adv_wb.rss_type = htole(0);
1423 desc->adv_wb.pkt_type = htole(ptype);
1424 if (igbe->regs.rxcsum.pcsd()) {
1425 // no rss support right now
1426 desc->adv_wb.rss_hash = htole(0);
1427 } else {
1428 desc->adv_wb.id = htole(ip_id);
1429 desc->adv_wb.csum = htole(csum);
1430 }
1431 desc->adv_wb.status = htole(status);
1432 desc->adv_wb.errors = htole(ext_err);
1433 // no vlan support
1434 desc->adv_wb.vlan_tag = htole(0);
1435 break;
1436 default:
1437 panic("Unimplemnted RX receive buffer type %d\n",
1438 igbe->regs.srrctl.desctype());
1439 }
1440
1441 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1442 desc->adv_read.pkt, desc->adv_read.hdr);
1443
1444 if (bytesCopied == pktPtr->length) {
1445 DPRINTF(EthernetDesc,
1446 "Packet completely written to descriptor buffers\n");
1447 // Deal with the rx timer interrupts
1448 if (igbe->regs.rdtr.delay()) {
1449 Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1450 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1451 igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1452 }
1453
1454 if (igbe->regs.radv.idv()) {
1455 Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1456 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1457 if (!igbe->radvEvent.scheduled()) {
1458 igbe->schedule(igbe->radvEvent, curTick() + delay);
1459 }
1460 }
1461
1462 // if neither radv or rdtr, maybe itr is set...
1463 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1464 DPRINTF(EthernetSM,
1465 "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1466 igbe->postInterrupt(IT_RXT);
1467 }
1468
1469 // If the packet is small enough, interrupt appropriately
1470 // I wonder if this is delayed or not?!
1471 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1472 DPRINTF(EthernetSM,
1473 "RXS: Posting IT_SRPD beacuse small packet received\n");
1474 igbe->postInterrupt(IT_SRPD);
1475 }
1476 bytesCopied = 0;
1477 }
1478
1479 pktPtr = NULL;
1480 igbe->checkDrain();
1481 enableSm();
1482 pktDone = true;
1483
1484 igbe->anBegin("RXS", "Done Updating Desc");
1485 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1486 igbe->anDq("RXS", annUnusedCacheQ);
1487 unusedCache.pop_front();
1488 igbe->anQ("RXS", annUsedCacheQ);
1489 usedCache.push_back(desc);
1490}
1491
1492void
1493IGbE::RxDescCache::enableSm()
1494{
1495 if (igbe->drainState() != DrainState::Draining) {
1496 igbe->rxTick = true;
1497 igbe->restartClock();
1498 }
1499}
1500
1501bool
1502IGbE::RxDescCache::packetDone()
1503{
1504 if (pktDone) {
1505 pktDone = false;
1506 return true;
1507 }
1508 return false;
1509}
1510
1511bool
1512IGbE::RxDescCache::hasOutstandingEvents()
1513{
1514 return pktEvent.scheduled() || wbEvent.scheduled() ||
1515 fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1516 pktDataEvent.scheduled();
1517
1518}
1519
1520void
1521IGbE::RxDescCache::serialize(CheckpointOut &cp) const
1522{
1523 DescCache<RxDesc>::serialize(cp);
1524 SERIALIZE_SCALAR(pktDone);
1525 SERIALIZE_SCALAR(splitCount);
1526 SERIALIZE_SCALAR(bytesCopied);
1527}
1528
1529void
1530IGbE::RxDescCache::unserialize(CheckpointIn &cp)
1531{
1532 DescCache<RxDesc>::unserialize(cp);
1533 UNSERIALIZE_SCALAR(pktDone);
1534 UNSERIALIZE_SCALAR(splitCount);
1535 UNSERIALIZE_SCALAR(bytesCopied);
1536}
1537
1538
1539///////////////////////////// IGbE::TxDescCache //////////////////////////////
1540
1541IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1542 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1543 pktWaiting(false), pktMultiDesc(false),
1544 completionAddress(0), completionEnabled(false),
1545 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1546 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1547 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1548 pktEvent(this), headerEvent(this), nullEvent(this)
1549{
1550 annSmFetch = "TX Desc Fetch";
1551 annSmWb = "TX Desc Writeback";
1552 annUnusedDescQ = "TX Unused Descriptors";
1553 annUnusedCacheQ = "TX Unused Descriptor Cache";
1554 annUsedCacheQ = "TX Used Descriptor Cache";
1555 annUsedDescQ = "TX Used Descriptors";
1556 annDescQ = "TX Descriptors";
1557}
1558
1559void
1560IGbE::TxDescCache::processContextDesc()
1561{
1562 assert(unusedCache.size());
1563 TxDesc *desc;
1564
1565 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
1566
1567 while (!useTso && unusedCache.size() &&
1568 TxdOp::isContext(unusedCache.front())) {
1569 DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1570
1571 desc = unusedCache.front();
1572 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1573 desc->d1, desc->d2);
1574
1575
1576 // is this going to be a tcp or udp packet?
1577 isTcp = TxdOp::tcp(desc) ? true : false;
1578
1579 // setup all the TSO variables, they'll be ignored if we don't use
1580 // tso for this connection
1581 tsoHeaderLen = TxdOp::hdrlen(desc);
1582 tsoMss = TxdOp::mss(desc);
1583
1584 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1585 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1586 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1587 TxdOp::mss(desc), TxdOp::getLen(desc));
1588 useTso = true;
1589 tsoTotalLen = TxdOp::getLen(desc);
1590 tsoLoadedHeader = false;
1591 tsoDescBytesUsed = 0;
1592 tsoUsedLen = 0;
1593 tsoPrevSeq = 0;
1594 tsoPktHasHeader = false;
1595 tsoPkts = 0;
1596 tsoCopyBytes = 0;
1597 }
1598
1599 TxdOp::setDd(desc);
1600 unusedCache.pop_front();
1601 igbe->anDq("TXS", annUnusedCacheQ);
1602 usedCache.push_back(desc);
1603 igbe->anQ("TXS", annUsedCacheQ);
1604 }
1605
1606 if (!unusedCache.size())
1607 return;
1608
1609 desc = unusedCache.front();
1610 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1611 TxdOp::tse(desc)) {
1612 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1613 "hdrlen: %d mss: %d paylen %d\n",
1614 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1615 useTso = true;
1616 tsoTotalLen = TxdOp::getTsoLen(desc);
1617 tsoLoadedHeader = false;
1618 tsoDescBytesUsed = 0;
1619 tsoUsedLen = 0;
1620 tsoPrevSeq = 0;
1621 tsoPktHasHeader = false;
1622 tsoPkts = 0;
1623 }
1624
1625 if (useTso && !tsoLoadedHeader) {
1626 // we need to fetch a header
1627 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1628 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1629 pktWaiting = true;
1630 assert(tsoHeaderLen <= 256);
1631 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1632 tsoHeaderLen, &headerEvent, tsoHeader, 0);
1633 }
1634}
1635
1636void
1637IGbE::TxDescCache::headerComplete()
1638{
1639 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1640 pktWaiting = false;
1641
1642 assert(unusedCache.size());
1643 TxDesc *desc = unusedCache.front();
1644 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1645 TxdOp::getLen(desc), tsoHeaderLen);
1646
1647 if (TxdOp::getLen(desc) == tsoHeaderLen) {
1648 tsoDescBytesUsed = 0;
1649 tsoLoadedHeader = true;
1650 unusedCache.pop_front();
1651 usedCache.push_back(desc);
1652 } else {
1653 DPRINTF(EthernetDesc, "TSO: header part of larger payload\n");
1654 tsoDescBytesUsed = tsoHeaderLen;
1655 tsoLoadedHeader = true;
1656 }
1657 enableSm();
1658 igbe->checkDrain();
1659}
1660
1661unsigned
1662IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1663{
1664 if (!unusedCache.size())
1665 return 0;
1666
1667 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1668
1669 assert(!useTso || tsoLoadedHeader);
1670 TxDesc *desc = unusedCache.front();
1671
1672 if (useTso) {
1673 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1674 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1675 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1676 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1677 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1678
1679 if (tsoPktHasHeader)
1680 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length,
1681 TxdOp::getLen(desc) - tsoDescBytesUsed);
1682 else
1683 tsoCopyBytes = std::min(tsoMss,
1684 TxdOp::getLen(desc) - tsoDescBytesUsed);
1685 unsigned pkt_size =
1686 tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1687
1688 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1689 "this descLen: %d\n",
1690 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1691 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1692 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1693 return pkt_size;
1694 }
1695
1696 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1697 TxdOp::getLen(unusedCache.front()));
1698 return TxdOp::getLen(desc);
1699}
1700
1701void
1702IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1703{
1704 assert(unusedCache.size());
1705
1706 TxDesc *desc;
1707 desc = unusedCache.front();
1708
1709 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1710 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1711 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1712 TxdOp::getLen(desc));
1713
1714 pktPtr = p;
1715
1716 pktWaiting = true;
1717
1718 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1719
1720 if (useTso) {
1721 assert(tsoLoadedHeader);
1722 if (!tsoPktHasHeader) {
1723 DPRINTF(EthernetDesc,
1724 "Loading TSO header (%d bytes) into start of packet\n",
1725 tsoHeaderLen);
1726 memcpy(p->data, &tsoHeader,tsoHeaderLen);
1727 p->length +=tsoHeaderLen;
1728 tsoPktHasHeader = true;
1729 }
1730 }
1731
1732 if (useTso) {
1733 DPRINTF(EthernetDesc,
1734 "Starting DMA of packet at offset %d length: %d\n",
1735 p->length, tsoCopyBytes);
1736 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1737 + tsoDescBytesUsed,
1738 tsoCopyBytes, &pktEvent, p->data + p->length,
1739 igbe->txReadDelay);
1740 tsoDescBytesUsed += tsoCopyBytes;
1741 assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1742 } else {
1743 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1744 TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1745 igbe->txReadDelay);
1746 }
1747}
1748
1749void
1750IGbE::TxDescCache::pktComplete()
1751{
1752
1753 TxDesc *desc;
1754 assert(unusedCache.size());
1755 assert(pktPtr);
1756
1757 igbe->anBegin("TXS", "Update Desc");
1758
1759 DPRINTF(EthernetDesc, "DMA of packet complete\n");
1760
1761
1762 desc = unusedCache.front();
1763 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1764 TxdOp::getLen(desc));
1765
1766 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1767 desc->d1, desc->d2);
1768
1769 // Set the length of the data in the EtherPacket
1770 if (useTso) {
1771 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1772 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1773 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1774 pktPtr->simLength += tsoCopyBytes;
1775 pktPtr->length += tsoCopyBytes;
1776 tsoUsedLen += tsoCopyBytes;
1777 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1778 tsoDescBytesUsed, tsoCopyBytes);
1779 } else {
1780 pktPtr->simLength += TxdOp::getLen(desc);
1781 pktPtr->length += TxdOp::getLen(desc);
1782 }
1783
1784
1785
1786 if ((!TxdOp::eop(desc) && !useTso) ||
1787 (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1788 tsoTotalLen != tsoUsedLen && useTso)) {
1789 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1790 igbe->anDq("TXS", annUnusedCacheQ);
1791 unusedCache.pop_front();
1792 igbe->anQ("TXS", annUsedCacheQ);
1793 usedCache.push_back(desc);
1794
1795 tsoDescBytesUsed = 0;
1796 pktDone = true;
1797 pktWaiting = false;
1798 pktMultiDesc = true;
1799
1800 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1801 pktPtr->length);
1802 pktPtr = NULL;
1803
1804 enableSm();
1805 igbe->checkDrain();
1806 return;
1807 }
1808
1809
1810 pktMultiDesc = false;
1811 // no support for vlans
1812 assert(!TxdOp::vle(desc));
1813
1814 // we only support single packet descriptors at this point
1815 if (!useTso)
1816 assert(TxdOp::eop(desc));
1817
1818 // set that this packet is done
1819 if (TxdOp::rs(desc))
1820 TxdOp::setDd(desc);
1821
1822 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1823 desc->d1, desc->d2);
1824
1825 if (useTso) {
1826 IpPtr ip(pktPtr);
1827 if (ip) {
1828 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1829 tsoPkts);
1830 ip->id(ip->id() + tsoPkts++);
1831 ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1832
1833 TcpPtr tcp(ip);
1834 if (tcp) {
1835 DPRINTF(EthernetDesc,
1836 "TSO: Modifying TCP header. old seq %d + %d\n",
1837 tcp->seq(), tsoPrevSeq);
1838 tcp->seq(tcp->seq() + tsoPrevSeq);
1839 if (tsoUsedLen != tsoTotalLen)
1840 tcp->flags(tcp->flags() & ~9); // clear fin & psh
1841 }
1842 UdpPtr udp(ip);
1843 if (udp) {
1844 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1845 udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1846 }
1847 }
1848 tsoPrevSeq = tsoUsedLen;
1849 }
1850
1851 if (DTRACE(EthernetDesc)) {
1852 IpPtr ip(pktPtr);
1853 if (ip)
1854 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1855 ip->id());
1856 else
1857 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1858 }
1859
1860 // Checksums are only ofloaded for new descriptor types
1861 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1862 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1863 IpPtr ip(pktPtr);
1864 assert(ip);
1865 if (TxdOp::ixsm(desc)) {
1866 ip->sum(0);
1867 ip->sum(cksum(ip));
1868 igbe->txIpChecksums++;
1869 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1870 }
1871 if (TxdOp::txsm(desc)) {
1872 TcpPtr tcp(ip);
1873 UdpPtr udp(ip);
1874 if (tcp) {
1875 tcp->sum(0);
1876 tcp->sum(cksum(tcp));
1877 igbe->txTcpChecksums++;
1878 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1879 } else if (udp) {
1880 assert(udp);
1881 udp->sum(0);
1882 udp->sum(cksum(udp));
1883 igbe->txUdpChecksums++;
1884 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1885 } else {
1886 panic("Told to checksum, but don't know how\n");
1887 }
1888 }
1889 }
1890
1891 if (TxdOp::ide(desc)) {
1892 // Deal with the rx timer interrupts
1893 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1894 if (igbe->regs.tidv.idv()) {
1895 Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1896 DPRINTF(EthernetDesc, "setting tidv\n");
1897 igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1898 }
1899
1900 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1901 Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1902 DPRINTF(EthernetDesc, "setting tadv\n");
1903 if (!igbe->tadvEvent.scheduled()) {
1904 igbe->schedule(igbe->tadvEvent, curTick() + delay);
1905 }
1906 }
1907 }
1908
1909
1910 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) {
1911 DPRINTF(EthernetDesc, "Descriptor Done\n");
1912 igbe->anDq("TXS", annUnusedCacheQ);
1913 unusedCache.pop_front();
1914 igbe->anQ("TXS", annUsedCacheQ);
1915 usedCache.push_back(desc);
1916 tsoDescBytesUsed = 0;
1917 }
1918
1919 if (useTso && tsoUsedLen == tsoTotalLen)
1920 useTso = false;
1921
1922
1923 DPRINTF(EthernetDesc,
1924 "------Packet of %d bytes ready for transmission-------\n",
1925 pktPtr->length);
1926 pktDone = true;
1927 pktWaiting = false;
1928 pktPtr = NULL;
1929 tsoPktHasHeader = false;
1930
1931 if (igbe->regs.txdctl.wthresh() == 0) {
1932 igbe->anBegin("TXS", "Desc Writeback");
1933 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1934 writeback(0);
1935 } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <=
1936 descInBlock(usedCache.size())) {
1937 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1938 igbe->anBegin("TXS", "Desc Writeback");
1939 writeback((igbe->cacheBlockSize()-1)>>4);
1940 } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) {
1941 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1942 igbe->anBegin("TXS", "Desc Writeback");
1943 writeback((igbe->cacheBlockSize()-1)>>4);
1944 }
1945
1946 enableSm();
1947 igbe->checkDrain();
1948}
1949
1950void
1951IGbE::TxDescCache::actionAfterWb()
1952{
1953 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1954 completionEnabled);
1955 igbe->postInterrupt(iGbReg::IT_TXDW);
1956 if (completionEnabled) {
1957 descEnd = igbe->regs.tdh();
1958 DPRINTF(EthernetDesc,
1959 "Completion writing back value: %d to addr: %#x\n", descEnd,
1960 completionAddress);
1961 igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1962 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1963 }
1964}
1965
1966void
1967IGbE::TxDescCache::serialize(CheckpointOut &cp) const
1968{
1969 DescCache<TxDesc>::serialize(cp);
1970
1971 SERIALIZE_SCALAR(pktDone);
1972 SERIALIZE_SCALAR(isTcp);
1973 SERIALIZE_SCALAR(pktWaiting);
1974 SERIALIZE_SCALAR(pktMultiDesc);
1975
1976 SERIALIZE_SCALAR(useTso);
1977 SERIALIZE_SCALAR(tsoHeaderLen);
1978 SERIALIZE_SCALAR(tsoMss);
1979 SERIALIZE_SCALAR(tsoTotalLen);
1980 SERIALIZE_SCALAR(tsoUsedLen);
1981 SERIALIZE_SCALAR(tsoPrevSeq);;
1982 SERIALIZE_SCALAR(tsoPktPayloadBytes);
1983 SERIALIZE_SCALAR(tsoLoadedHeader);
1984 SERIALIZE_SCALAR(tsoPktHasHeader);
1985 SERIALIZE_ARRAY(tsoHeader, 256);
1986 SERIALIZE_SCALAR(tsoDescBytesUsed);
1987 SERIALIZE_SCALAR(tsoCopyBytes);
1988 SERIALIZE_SCALAR(tsoPkts);
1989
1990 SERIALIZE_SCALAR(completionAddress);
1991 SERIALIZE_SCALAR(completionEnabled);
1992 SERIALIZE_SCALAR(descEnd);
1993}
1994
1995void
1996IGbE::TxDescCache::unserialize(CheckpointIn &cp)
1997{
1998 DescCache<TxDesc>::unserialize(cp);
1999
2000 UNSERIALIZE_SCALAR(pktDone);
2001 UNSERIALIZE_SCALAR(isTcp);
2002 UNSERIALIZE_SCALAR(pktWaiting);
2003 UNSERIALIZE_SCALAR(pktMultiDesc);
2004
2005 UNSERIALIZE_SCALAR(useTso);
2006 UNSERIALIZE_SCALAR(tsoHeaderLen);
2007 UNSERIALIZE_SCALAR(tsoMss);
2008 UNSERIALIZE_SCALAR(tsoTotalLen);
2009 UNSERIALIZE_SCALAR(tsoUsedLen);
2010 UNSERIALIZE_SCALAR(tsoPrevSeq);;
2011 UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
2012 UNSERIALIZE_SCALAR(tsoLoadedHeader);
2013 UNSERIALIZE_SCALAR(tsoPktHasHeader);
2014 UNSERIALIZE_ARRAY(tsoHeader, 256);
2015 UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2016 UNSERIALIZE_SCALAR(tsoCopyBytes);
2017 UNSERIALIZE_SCALAR(tsoPkts);
2018
2019 UNSERIALIZE_SCALAR(completionAddress);
2020 UNSERIALIZE_SCALAR(completionEnabled);
2021 UNSERIALIZE_SCALAR(descEnd);
2022}
2023
2024bool
2025IGbE::TxDescCache::packetAvailable()
2026{
2027 if (pktDone) {
2028 pktDone = false;
2029 return true;
2030 }
2031 return false;
2032}
2033
2034void
2035IGbE::TxDescCache::enableSm()
2036{
2037 if (igbe->drainState() != DrainState::Draining) {
2038 igbe->txTick = true;
2039 igbe->restartClock();
2040 }
2041}
2042
2043bool
2044IGbE::TxDescCache::hasOutstandingEvents()
2045{
2046 return pktEvent.scheduled() || wbEvent.scheduled() ||
2047 fetchEvent.scheduled();
2048}
2049
2050
2051///////////////////////////////////// IGbE /////////////////////////////////
2052
2053void
2054IGbE::restartClock()
2055{
2056 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2057 drainState() == DrainState::Running)
2058 schedule(tickEvent, clockEdge(Cycles(1)));
2059}
2060
2061DrainState
2062IGbE::drain()
2063{
2064 unsigned int count(0);
2065 if (rxDescCache.hasOutstandingEvents() ||
2066 txDescCache.hasOutstandingEvents()) {
2067 count++;
2068 }
2069
2070 txFifoTick = false;
2071 txTick = false;
2072 rxTick = false;
2073
2074 if (tickEvent.scheduled())
2075 deschedule(tickEvent);
2076
2077 if (count) {
2078 DPRINTF(Drain, "IGbE not drained\n");
2079 return DrainState::Draining;
2080 } else
2081 return DrainState::Drained;
2082}
2083
2084void
2085IGbE::drainResume()
2086{
2087 Drainable::drainResume();
2088
2089 txFifoTick = true;
2090 txTick = true;
2091 rxTick = true;
2092
2093 restartClock();
2094 DPRINTF(EthernetSM, "resuming from drain");
2095}
2096
2097void
2098IGbE::checkDrain()
2099{
2100 if (drainState() != DrainState::Draining)
2101 return;
2102
2103 txFifoTick = false;
2104 txTick = false;
2105 rxTick = false;
2106 if (!rxDescCache.hasOutstandingEvents() &&
2107 !txDescCache.hasOutstandingEvents()) {
2108 DPRINTF(Drain, "IGbE done draining, processing drain event\n");
2109 signalDrainDone();
2110 }
2111}
2112
2113void
2114IGbE::txStateMachine()
2115{
2116 if (!regs.tctl.en()) {
2117 txTick = false;
2118 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2119 return;
2120 }
2121
2122 // If we have a packet available and it's length is not 0 (meaning it's not
2123 // a multidescriptor packet) put it in the fifo, otherwise an the next
2124 // iteration we'll get the rest of the data
2125 if (txPacket && txDescCache.packetAvailable()
2126 && !txDescCache.packetMultiDesc() && txPacket->length) {
2127 anQ("TXS", "TX FIFO Q");
2128 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2129#ifndef NDEBUG
2130 bool success =
2131#endif
2132 txFifo.push(txPacket);
2133 txFifoTick = true && drainState() != DrainState::Draining;
2134 assert(success);
2135 txPacket = NULL;
2136 anBegin("TXS", "Desc Writeback");
2137 txDescCache.writeback((cacheBlockSize()-1)>>4);
2138 return;
2139 }
2140
2141 // Only support descriptor granularity
2142 if (regs.txdctl.lwthresh() &&
2143 txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2144 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2145 postInterrupt(IT_TXDLOW);
2146 }
2147
2148 if (!txPacket) {
2149 txPacket = std::make_shared<EthPacketData>(16384);
2150 }
2151
2152 if (!txDescCache.packetWaiting()) {
2153 if (txDescCache.descLeft() == 0) {
2154 postInterrupt(IT_TXQE);
2155 anBegin("TXS", "Desc Writeback");
2156 txDescCache.writeback(0);
2157 anBegin("TXS", "Desc Fetch");
2158 anWe("TXS", txDescCache.annUnusedCacheQ);
2159 txDescCache.fetchDescriptors();
2160 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2161 "writeback stopping ticking and posting TXQE\n");
2162 txTick = false;
2163 return;
2164 }
2165
2166
2167 if (!(txDescCache.descUnused())) {
2168 anBegin("TXS", "Desc Fetch");
2169 txDescCache.fetchDescriptors();
2170 anWe("TXS", txDescCache.annUnusedCacheQ);
2171 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2172 "fetching and stopping ticking\n");
2173 txTick = false;
2174 return;
2175 }
2176 anPq("TXS", txDescCache.annUnusedCacheQ);
2177
2178
2179 txDescCache.processContextDesc();
2180 if (txDescCache.packetWaiting()) {
2181 DPRINTF(EthernetSM,
2182 "TXS: Fetching TSO header, stopping ticking\n");
2183 txTick = false;
2184 return;
2185 }
2186
2187 unsigned size = txDescCache.getPacketSize(txPacket);
2188 if (size > 0 && txFifo.avail() > size) {
2189 anRq("TXS", "TX FIFO Q");
2190 anBegin("TXS", "DMA Packet");
2191 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2192 "beginning DMA of next packet\n", size);
2193 txFifo.reserve(size);
2194 txDescCache.getPacketData(txPacket);
2195 } else if (size == 0) {
2196 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2197 DPRINTF(EthernetSM,
2198 "TXS: No packets to get, writing back used descriptors\n");
2199 anBegin("TXS", "Desc Writeback");
2200 txDescCache.writeback(0);
2201 } else {
2202 anWf("TXS", "TX FIFO Q");
2203 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2204 "available in FIFO\n");
2205 txTick = false;
2206 }
2207
2208
2209 return;
2210 }
2211 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2212 txTick = false;
2213}
2214
2215bool
2216IGbE::ethRxPkt(EthPacketPtr pkt)
2217{
2218 rxBytes += pkt->length;
2219 rxPackets++;
2220
2221 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2222 anBegin("RXQ", "Wire Recv");
2223
2224
2225 if (!regs.rctl.en()) {
2226 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2227 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2228 return true;
2229 }
2230
2231 // restart the state machines if they are stopped
2232 rxTick = true && drainState() != DrainState::Draining;
2233 if ((rxTick || txTick) && !tickEvent.scheduled()) {
2234 DPRINTF(EthernetSM,
2235 "RXS: received packet into fifo, starting ticking\n");
2236 restartClock();
2237 }
2238
2239 if (!rxFifo.push(pkt)) {
2240 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2241 postInterrupt(IT_RXO, true);
2242 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2243 return false;
2244 }
2245
2246 if (CPA::available() && cpa->enabled()) {
2247 assert(sys->numSystemsRunning <= 2);
2248 System *other_sys;
2249 if (sys->systemList[0] == sys)
2250 other_sys = sys->systemList[1];
2251 else
2252 other_sys = sys->systemList[0];
2253
2254 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2255 anQ("RXQ", "RX FIFO Q");
2256 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2257 }
2258
2259 return true;
2260}
2261
2262
2263void
2264IGbE::rxStateMachine()
2265{
2266 if (!regs.rctl.en()) {
2267 rxTick = false;
2268 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2269 return;
2270 }
2271
2272 // If the packet is done check for interrupts/descriptors/etc
2273 if (rxDescCache.packetDone()) {
2274 rxDmaPacket = false;
2275 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2276 int descLeft = rxDescCache.descLeft();
2277 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2278 descLeft, regs.rctl.rdmts(), regs.rdlen());
2279 switch (regs.rctl.rdmts()) {
2280 case 2: if (descLeft > .125 * regs.rdlen()) break;
2281 case 1: if (descLeft > .250 * regs.rdlen()) break;
2282 case 0: if (descLeft > .500 * regs.rdlen()) break;
2283 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2284 "because of descriptors left\n");
2285 postInterrupt(IT_RXDMT);
2286 break;
2287 }
2288
2289 if (rxFifo.empty())
2290 rxDescCache.writeback(0);
2291
2292 if (descLeft == 0) {
2293 anBegin("RXS", "Writeback Descriptors");
2294 rxDescCache.writeback(0);
2295 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2296 " writeback and stopping ticking\n");
2297 rxTick = false;
2298 }
2299
2300 // only support descriptor granulaties
2301 assert(regs.rxdctl.gran());
2302
2303 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2304 DPRINTF(EthernetSM,
2305 "RXS: Writing back because WTHRESH >= descUsed\n");
2306 anBegin("RXS", "Writeback Descriptors");
2307 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2308 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2309 else
2310 rxDescCache.writeback((cacheBlockSize()-1)>>4);
2311 }
2312
2313 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2314 ((rxDescCache.descLeft() - rxDescCache.descUnused()) >
2315 regs.rxdctl.hthresh())) {
2316 DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2317 "descUnused < PTHRESH\n");
2318 anBegin("RXS", "Fetch Descriptors");
2319 rxDescCache.fetchDescriptors();
2320 }
2321
2322 if (rxDescCache.descUnused() == 0) {
2323 anBegin("RXS", "Fetch Descriptors");
2324 rxDescCache.fetchDescriptors();
2325 anWe("RXS", rxDescCache.annUnusedCacheQ);
2326 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2327 "fetching descriptors and stopping ticking\n");
2328 rxTick = false;
2329 }
2330 return;
2331 }
2332
2333 if (rxDmaPacket) {
2334 DPRINTF(EthernetSM,
2335 "RXS: stopping ticking until packet DMA completes\n");
2336 rxTick = false;
2337 return;
2338 }
2339
2340 if (!rxDescCache.descUnused()) {
2341 anBegin("RXS", "Fetch Descriptors");
2342 rxDescCache.fetchDescriptors();
2343 anWe("RXS", rxDescCache.annUnusedCacheQ);
2344 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2345 "stopping ticking\n");
2346 rxTick = false;
2347 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2348 return;
2349 }
2350 anPq("RXS", rxDescCache.annUnusedCacheQ);
2351
2352 if (rxFifo.empty()) {
2353 anWe("RXS", "RX FIFO Q");
2354 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2355 rxTick = false;
2356 return;
2357 }
2358 anPq("RXS", "RX FIFO Q");
2359 anBegin("RXS", "Get Desc");
2360
2361 EthPacketPtr pkt;
2362 pkt = rxFifo.front();
2363
2364
2365 pktOffset = rxDescCache.writePacket(pkt, pktOffset);
2366 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2367 if (pktOffset == pkt->length) {
2368 anBegin( "RXS", "FIFO Dequeue");
2369 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2370 pktOffset = 0;
2371 anDq("RXS", "RX FIFO Q");
2372 rxFifo.pop();
2373 }
2374
2375 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2376 rxTick = false;
2377 rxDmaPacket = true;
2378 anBegin("RXS", "DMA Packet");
2379}
2380
2381void
2382IGbE::txWire()
2383{
2384 if (txFifo.empty()) {
2385 anWe("TXQ", "TX FIFO Q");
2386 txFifoTick = false;
2387 return;
2388 }
2389
2390
2391 anPq("TXQ", "TX FIFO Q");
2392 if (etherInt->sendPacket(txFifo.front())) {
2393 anQ("TXQ", "WireQ");
2394 if (DTRACE(EthernetSM)) {
2395 IpPtr ip(txFifo.front());
2396 if (ip)
2397 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2398 ip->id());
2399 else
2400 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2401 }
2402 anDq("TXQ", "TX FIFO Q");
2403 anBegin("TXQ", "Wire Send");
2404 DPRINTF(EthernetSM,
2405 "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2406 txFifo.avail());
2407
2408 txBytes += txFifo.front()->length;
2409 txPackets++;
2410 txFifoTick = false;
2411
2412 txFifo.pop();
2413 } else {
2414 // We'll get woken up when the packet ethTxDone() gets called
2415 txFifoTick = false;
2416 }
2417}
2418
2419void
2420IGbE::tick()
2421{
2422 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2423
2424 if (rxTick)
2425 rxStateMachine();
2426
2427 if (txTick)
2428 txStateMachine();
2429
2430 if (txFifoTick)
2431 txWire();
2432
2433
2434 if (rxTick || txTick || txFifoTick)
2435 schedule(tickEvent, curTick() + clockPeriod());
2436}
2437
2438void
2439IGbE::ethTxDone()
2440{
2441 anBegin("TXQ", "Send Done");
2442 // restart the tx state machines if they are stopped
2443 // fifo to send another packet
2444 // tx sm to put more data into the fifo
2445 txFifoTick = true && drainState() != DrainState::Draining;
2446 if (txDescCache.descLeft() != 0 && drainState() != DrainState::Draining)
2447 txTick = true;
2448
2449 restartClock();
2450 txWire();
2451 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2452}
2453
2454void
2455IGbE::serialize(CheckpointOut &cp) const
2456{
2457 PciDevice::serialize(cp);
2458
2459 regs.serialize(cp);
2460 SERIALIZE_SCALAR(eeOpBits);
2461 SERIALIZE_SCALAR(eeAddrBits);
2462 SERIALIZE_SCALAR(eeDataBits);
2463 SERIALIZE_SCALAR(eeOpcode);
2464 SERIALIZE_SCALAR(eeAddr);
2465 SERIALIZE_SCALAR(lastInterrupt);
2466 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2467
2468 rxFifo.serialize("rxfifo", cp);
2469 txFifo.serialize("txfifo", cp);
2470
2471 bool txPktExists = txPacket != nullptr;
2472 SERIALIZE_SCALAR(txPktExists);
2473 if (txPktExists)
2474 txPacket->serialize("txpacket", cp);
2475
2476 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2477 inter_time = 0;
2478
2479 if (rdtrEvent.scheduled())
2480 rdtr_time = rdtrEvent.when();
2481 SERIALIZE_SCALAR(rdtr_time);
2482
2483 if (radvEvent.scheduled())
2484 radv_time = radvEvent.when();
2485 SERIALIZE_SCALAR(radv_time);
2486
2487 if (tidvEvent.scheduled())
2488 tidv_time = tidvEvent.when();
2489 SERIALIZE_SCALAR(tidv_time);
2490
2491 if (tadvEvent.scheduled())
2492 tadv_time = tadvEvent.when();
2493 SERIALIZE_SCALAR(tadv_time);
2494
2495 if (interEvent.scheduled())
2496 inter_time = interEvent.when();
2497 SERIALIZE_SCALAR(inter_time);
2498
2499 SERIALIZE_SCALAR(pktOffset);
2500
2501 txDescCache.serializeSection(cp, "TxDescCache");
2502 rxDescCache.serializeSection(cp, "RxDescCache");
2503}
2504
2505void
2506IGbE::unserialize(CheckpointIn &cp)
2507{
2508 PciDevice::unserialize(cp);
2509
2510 regs.unserialize(cp);
2511 UNSERIALIZE_SCALAR(eeOpBits);
2512 UNSERIALIZE_SCALAR(eeAddrBits);
2513 UNSERIALIZE_SCALAR(eeDataBits);
2514 UNSERIALIZE_SCALAR(eeOpcode);
2515 UNSERIALIZE_SCALAR(eeAddr);
2516 UNSERIALIZE_SCALAR(lastInterrupt);
2517 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2518
2519 rxFifo.unserialize("rxfifo", cp);
2520 txFifo.unserialize("txfifo", cp);
2521
2522 bool txPktExists;
2523 UNSERIALIZE_SCALAR(txPktExists);
2524 if (txPktExists) {
2525 txPacket = std::make_shared();
2525 txPacket = std::make_shared<EthPacketData>(16384);
2526 txPacket->unserialize("txpacket", cp);
2527 }
2528
2529 rxTick = true;
2530 txTick = true;
2531 txFifoTick = true;
2532
2533 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2534 UNSERIALIZE_SCALAR(rdtr_time);
2535 UNSERIALIZE_SCALAR(radv_time);
2536 UNSERIALIZE_SCALAR(tidv_time);
2537 UNSERIALIZE_SCALAR(tadv_time);
2538 UNSERIALIZE_SCALAR(inter_time);
2539
2540 if (rdtr_time)
2541 schedule(rdtrEvent, rdtr_time);
2542
2543 if (radv_time)
2544 schedule(radvEvent, radv_time);
2545
2546 if (tidv_time)
2547 schedule(tidvEvent, tidv_time);
2548
2549 if (tadv_time)
2550 schedule(tadvEvent, tadv_time);
2551
2552 if (inter_time)
2553 schedule(interEvent, inter_time);
2554
2555 UNSERIALIZE_SCALAR(pktOffset);
2556
2557 txDescCache.unserializeSection(cp, "TxDescCache");
2558 rxDescCache.unserializeSection(cp, "RxDescCache");
2559}
2560
2561IGbE *
2562IGbEParams::create()
2563{
2564 return new IGbE(this);
2565}
2526 txPacket->unserialize("txpacket", cp);
2527 }
2528
2529 rxTick = true;
2530 txTick = true;
2531 txFifoTick = true;
2532
2533 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2534 UNSERIALIZE_SCALAR(rdtr_time);
2535 UNSERIALIZE_SCALAR(radv_time);
2536 UNSERIALIZE_SCALAR(tidv_time);
2537 UNSERIALIZE_SCALAR(tadv_time);
2538 UNSERIALIZE_SCALAR(inter_time);
2539
2540 if (rdtr_time)
2541 schedule(rdtrEvent, rdtr_time);
2542
2543 if (radv_time)
2544 schedule(radvEvent, radv_time);
2545
2546 if (tidv_time)
2547 schedule(tidvEvent, tidv_time);
2548
2549 if (tadv_time)
2550 schedule(tadvEvent, tadv_time);
2551
2552 if (inter_time)
2553 schedule(interEvent, inter_time);
2554
2555 UNSERIALIZE_SCALAR(pktOffset);
2556
2557 txDescCache.unserializeSection(cp, "TxDescCache");
2558 rxDescCache.unserializeSection(cp, "RxDescCache");
2559}
2560
2561IGbE *
2562IGbEParams::create()
2563{
2564 return new IGbE(this);
2565}