i8254xGBe.cc (12064:39f4d937dd22) i8254xGBe.cc (12087:0e082672ac6b)
1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31/* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38#include "dev/net/i8254xGBe.hh"
39
40/*
41 * @todo really there are multiple dma engines.. we should implement them.
42 */
43
44#include <algorithm>
45#include <memory>
46
47#include "base/inet.hh"
48#include "base/trace.hh"
49#include "debug/Drain.hh"
50#include "debug/EthernetAll.hh"
51#include "mem/packet.hh"
52#include "mem/packet_access.hh"
53#include "params/IGbE.hh"
54#include "sim/stats.hh"
55#include "sim/system.hh"
56
57using namespace iGbReg;
58using namespace Net;
59
60IGbE::IGbE(const Params *p)
61 : EtherDevice(p), etherInt(NULL), cpa(NULL),
62 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), inTick(false),
63 rxTick(false), txTick(false), txFifoTick(false), rxDmaPacket(false),
64 pktOffset(0), fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
65 fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
66 rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31/* @file
32 * Device model for Intel's 8254x line of gigabit ethernet controllers.
33 * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34 * fewest workarounds in the driver. It will probably work with most of the
35 * other MACs with slight modifications.
36 */
37
38#include "dev/net/i8254xGBe.hh"
39
40/*
41 * @todo really there are multiple dma engines.. we should implement them.
42 */
43
44#include <algorithm>
45#include <memory>
46
47#include "base/inet.hh"
48#include "base/trace.hh"
49#include "debug/Drain.hh"
50#include "debug/EthernetAll.hh"
51#include "mem/packet.hh"
52#include "mem/packet_access.hh"
53#include "params/IGbE.hh"
54#include "sim/stats.hh"
55#include "sim/system.hh"
56
57using namespace iGbReg;
58using namespace Net;
59
60IGbE::IGbE(const Params *p)
61 : EtherDevice(p), etherInt(NULL), cpa(NULL),
62 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), inTick(false),
63 rxTick(false), txTick(false), txFifoTick(false), rxDmaPacket(false),
64 pktOffset(0), fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
65 fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
66 rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
67 rdtrEvent(this), radvEvent(this),
68 tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
67 rdtrEvent([this]{ rdtrProcess(); }, name()),
68 radvEvent([this]{ radvProcess(); }, name()),
69 tadvEvent([this]{ tadvProcess(); }, name()),
70 tidvEvent([this]{ tidvProcess(); }, name()),
71 tickEvent([this]{ tick(); }, name()),
72 interEvent([this]{ delayIntEvent(); }, name()),
69 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
70 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
71 lastInterrupt(0)
72{
73 etherInt = new IGbEInt(name() + ".int", this);
74
75 // Initialized internal registers per Intel documentation
76 // All registers intialized to 0 by per register constructor
77 regs.ctrl.fd(1);
78 regs.ctrl.lrst(1);
79 regs.ctrl.speed(2);
80 regs.ctrl.frcspd(1);
81 regs.sts.speed(3); // Say we're 1000Mbps
82 regs.sts.fd(1); // full duplex
83 regs.sts.lu(1); // link up
84 regs.eecd.fwe(1);
85 regs.eecd.ee_type(1);
86 regs.imr = 0;
87 regs.iam = 0;
88 regs.rxdctl.gran(1);
89 regs.rxdctl.wthresh(1);
90 regs.fcrth(1);
91 regs.tdwba = 0;
92 regs.rlpml = 0;
93 regs.sw_fw_sync = 0;
94
95 regs.pba.rxa(0x30);
96 regs.pba.txa(0x10);
97
98 eeOpBits = 0;
99 eeAddrBits = 0;
100 eeDataBits = 0;
101 eeOpcode = 0;
102
103 // clear all 64 16 bit words of the eeprom
104 memset(&flash, 0, EEPROM_SIZE*2);
105
106 // Set the MAC address
107 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
108 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
109 flash[x] = htobe(flash[x]);
110
111 uint16_t csum = 0;
112 for (int x = 0; x < EEPROM_SIZE; x++)
113 csum += htobe(flash[x]);
114
115
116 // Magic happy checksum value
117 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
118
119 // Store the MAC address as queue ID
120 macAddr = p->hardware_address;
121
122 rxFifo.clear();
123 txFifo.clear();
124}
125
126IGbE::~IGbE()
127{
128 delete etherInt;
129}
130
131void
132IGbE::init()
133{
134 cpa = CPA::cpa();
135 PciDevice::init();
136}
137
138EtherInt*
139IGbE::getEthPort(const std::string &if_name, int idx)
140{
141
142 if (if_name == "interface") {
143 if (etherInt->getPeer())
144 panic("Port already connected to\n");
145 return etherInt;
146 }
147 return NULL;
148}
149
150Tick
151IGbE::writeConfig(PacketPtr pkt)
152{
153 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
154 if (offset < PCI_DEVICE_SPECIFIC)
155 PciDevice::writeConfig(pkt);
156 else
157 panic("Device specific PCI config space not implemented.\n");
158
159 //
160 // Some work may need to be done here based for the pci COMMAND bits.
161 //
162
163 return configDelay;
164}
165
166// Handy macro for range-testing register access addresses
167#define IN_RANGE(val, base, len) (val >= base && val < (base + len))
168
169Tick
170IGbE::read(PacketPtr pkt)
171{
172 int bar;
173 Addr daddr;
174
175 if (!getBAR(pkt->getAddr(), bar, daddr))
176 panic("Invalid PCI memory access to unmapped memory.\n");
177
178 // Only Memory register BAR is allowed
179 assert(bar == 0);
180
181 // Only 32bit accesses allowed
182 assert(pkt->getSize() == 4);
183
184 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
185
186 //
187 // Handle read of register here
188 //
189
190
191 switch (daddr) {
192 case REG_CTRL:
193 pkt->set<uint32_t>(regs.ctrl());
194 break;
195 case REG_STATUS:
196 pkt->set<uint32_t>(regs.sts());
197 break;
198 case REG_EECD:
199 pkt->set<uint32_t>(regs.eecd());
200 break;
201 case REG_EERD:
202 pkt->set<uint32_t>(regs.eerd());
203 break;
204 case REG_CTRL_EXT:
205 pkt->set<uint32_t>(regs.ctrl_ext());
206 break;
207 case REG_MDIC:
208 pkt->set<uint32_t>(regs.mdic());
209 break;
210 case REG_ICR:
211 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
212 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
213 pkt->set<uint32_t>(regs.icr());
214 if (regs.icr.int_assert() || regs.imr == 0) {
215 regs.icr = regs.icr() & ~mask(30);
216 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
217 }
218 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
219 regs.imr &= ~regs.iam;
220 chkInterrupt();
221 break;
222 case REG_EICR:
223 // This is only useful for MSI, but the driver reads it every time
224 // Just don't do anything
225 pkt->set<uint32_t>(0);
226 break;
227 case REG_ITR:
228 pkt->set<uint32_t>(regs.itr());
229 break;
230 case REG_RCTL:
231 pkt->set<uint32_t>(regs.rctl());
232 break;
233 case REG_FCTTV:
234 pkt->set<uint32_t>(regs.fcttv());
235 break;
236 case REG_TCTL:
237 pkt->set<uint32_t>(regs.tctl());
238 break;
239 case REG_PBA:
240 pkt->set<uint32_t>(regs.pba());
241 break;
242 case REG_WUC:
243 case REG_WUFC:
244 case REG_WUS:
245 case REG_LEDCTL:
246 pkt->set<uint32_t>(0); // We don't care, so just return 0
247 break;
248 case REG_FCRTL:
249 pkt->set<uint32_t>(regs.fcrtl());
250 break;
251 case REG_FCRTH:
252 pkt->set<uint32_t>(regs.fcrth());
253 break;
254 case REG_RDBAL:
255 pkt->set<uint32_t>(regs.rdba.rdbal());
256 break;
257 case REG_RDBAH:
258 pkt->set<uint32_t>(regs.rdba.rdbah());
259 break;
260 case REG_RDLEN:
261 pkt->set<uint32_t>(regs.rdlen());
262 break;
263 case REG_SRRCTL:
264 pkt->set<uint32_t>(regs.srrctl());
265 break;
266 case REG_RDH:
267 pkt->set<uint32_t>(regs.rdh());
268 break;
269 case REG_RDT:
270 pkt->set<uint32_t>(regs.rdt());
271 break;
272 case REG_RDTR:
273 pkt->set<uint32_t>(regs.rdtr());
274 if (regs.rdtr.fpd()) {
275 rxDescCache.writeback(0);
276 DPRINTF(EthernetIntr,
277 "Posting interrupt because of RDTR.FPD write\n");
278 postInterrupt(IT_RXT);
279 regs.rdtr.fpd(0);
280 }
281 break;
282 case REG_RXDCTL:
283 pkt->set<uint32_t>(regs.rxdctl());
284 break;
285 case REG_RADV:
286 pkt->set<uint32_t>(regs.radv());
287 break;
288 case REG_TDBAL:
289 pkt->set<uint32_t>(regs.tdba.tdbal());
290 break;
291 case REG_TDBAH:
292 pkt->set<uint32_t>(regs.tdba.tdbah());
293 break;
294 case REG_TDLEN:
295 pkt->set<uint32_t>(regs.tdlen());
296 break;
297 case REG_TDH:
298 pkt->set<uint32_t>(regs.tdh());
299 break;
300 case REG_TXDCA_CTL:
301 pkt->set<uint32_t>(regs.txdca_ctl());
302 break;
303 case REG_TDT:
304 pkt->set<uint32_t>(regs.tdt());
305 break;
306 case REG_TIDV:
307 pkt->set<uint32_t>(regs.tidv());
308 break;
309 case REG_TXDCTL:
310 pkt->set<uint32_t>(regs.txdctl());
311 break;
312 case REG_TADV:
313 pkt->set<uint32_t>(regs.tadv());
314 break;
315 case REG_TDWBAL:
316 pkt->set<uint32_t>(regs.tdwba & mask(32));
317 break;
318 case REG_TDWBAH:
319 pkt->set<uint32_t>(regs.tdwba >> 32);
320 break;
321 case REG_RXCSUM:
322 pkt->set<uint32_t>(regs.rxcsum());
323 break;
324 case REG_RLPML:
325 pkt->set<uint32_t>(regs.rlpml);
326 break;
327 case REG_RFCTL:
328 pkt->set<uint32_t>(regs.rfctl());
329 break;
330 case REG_MANC:
331 pkt->set<uint32_t>(regs.manc());
332 break;
333 case REG_SWSM:
334 pkt->set<uint32_t>(regs.swsm());
335 regs.swsm.smbi(1);
336 break;
337 case REG_FWSM:
338 pkt->set<uint32_t>(regs.fwsm());
339 break;
340 case REG_SWFWSYNC:
341 pkt->set<uint32_t>(regs.sw_fw_sync);
342 break;
343 default:
344 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
345 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
346 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
347 !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
348 panic("Read request to unknown register number: %#x\n", daddr);
349 else
350 pkt->set<uint32_t>(0);
351 };
352
353 pkt->makeAtomicResponse();
354 return pioDelay;
355}
356
357Tick
358IGbE::write(PacketPtr pkt)
359{
360 int bar;
361 Addr daddr;
362
363
364 if (!getBAR(pkt->getAddr(), bar, daddr))
365 panic("Invalid PCI memory access to unmapped memory.\n");
366
367 // Only Memory register BAR is allowed
368 assert(bar == 0);
369
370 // Only 32bit accesses allowed
371 assert(pkt->getSize() == sizeof(uint32_t));
372
373 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
374 daddr, pkt->get<uint32_t>());
375
376 //
377 // Handle write of register here
378 //
379 uint32_t val = pkt->get<uint32_t>();
380
381 Regs::RCTL oldrctl;
382 Regs::TCTL oldtctl;
383
384 switch (daddr) {
385 case REG_CTRL:
386 regs.ctrl = val;
387 if (regs.ctrl.tfce())
388 warn("TX Flow control enabled, should implement\n");
389 if (regs.ctrl.rfce())
390 warn("RX Flow control enabled, should implement\n");
391 break;
392 case REG_CTRL_EXT:
393 regs.ctrl_ext = val;
394 break;
395 case REG_STATUS:
396 regs.sts = val;
397 break;
398 case REG_EECD:
399 int oldClk;
400 oldClk = regs.eecd.sk();
401 regs.eecd = val;
402 // See if this is a eeprom access and emulate accordingly
403 if (!oldClk && regs.eecd.sk()) {
404 if (eeOpBits < 8) {
405 eeOpcode = eeOpcode << 1 | regs.eecd.din();
406 eeOpBits++;
407 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
408 eeAddr = eeAddr << 1 | regs.eecd.din();
409 eeAddrBits++;
410 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
411 assert(eeAddr>>1 < EEPROM_SIZE);
412 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
413 flash[eeAddr>>1] >> eeDataBits & 0x1,
414 flash[eeAddr>>1]);
415 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
416 eeDataBits++;
417 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
418 regs.eecd.dout(0);
419 eeDataBits++;
420 } else
421 panic("What's going on with eeprom interface? opcode:"
422 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
423 (uint32_t)eeOpBits, (uint32_t)eeAddr,
424 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
425
426 // Reset everything for the next command
427 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
428 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
429 eeOpBits = 0;
430 eeAddrBits = 0;
431 eeDataBits = 0;
432 eeOpcode = 0;
433 eeAddr = 0;
434 }
435
436 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
437 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
438 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
439 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
440 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
441 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
442 (uint32_t)eeOpBits);
443
444
445 }
446 // If driver requests eeprom access, immediately give it to it
447 regs.eecd.ee_gnt(regs.eecd.ee_req());
448 break;
449 case REG_EERD:
450 regs.eerd = val;
451 if (regs.eerd.start()) {
452 regs.eerd.done(1);
453 assert(regs.eerd.addr() < EEPROM_SIZE);
454 regs.eerd.data(flash[regs.eerd.addr()]);
455 regs.eerd.start(0);
456 DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
457 regs.eerd.addr(), regs.eerd.data());
458 }
459 break;
460 case REG_MDIC:
461 regs.mdic = val;
462 if (regs.mdic.i())
463 panic("No support for interrupt on mdic complete\n");
464 if (regs.mdic.phyadd() != 1)
465 panic("No support for reading anything but phy\n");
466 DPRINTF(Ethernet, "%s phy address %x\n",
467 regs.mdic.op() == 1 ? "Writing" : "Reading",
468 regs.mdic.regadd());
469 switch (regs.mdic.regadd()) {
470 case PHY_PSTATUS:
471 regs.mdic.data(0x796D); // link up
472 break;
473 case PHY_PID:
474 regs.mdic.data(params()->phy_pid);
475 break;
476 case PHY_EPID:
477 regs.mdic.data(params()->phy_epid);
478 break;
479 case PHY_GSTATUS:
480 regs.mdic.data(0x7C00);
481 break;
482 case PHY_EPSTATUS:
483 regs.mdic.data(0x3000);
484 break;
485 case PHY_AGC:
486 regs.mdic.data(0x180); // some random length
487 break;
488 default:
489 regs.mdic.data(0);
490 }
491 regs.mdic.r(1);
492 break;
493 case REG_ICR:
494 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
495 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
496 if (regs.ctrl_ext.iame())
497 regs.imr &= ~regs.iam;
498 regs.icr = ~bits(val,30,0) & regs.icr();
499 chkInterrupt();
500 break;
501 case REG_ITR:
502 regs.itr = val;
503 break;
504 case REG_ICS:
505 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
506 postInterrupt((IntTypes)val);
507 break;
508 case REG_IMS:
509 regs.imr |= val;
510 chkInterrupt();
511 break;
512 case REG_IMC:
513 regs.imr &= ~val;
514 chkInterrupt();
515 break;
516 case REG_IAM:
517 regs.iam = val;
518 break;
519 case REG_RCTL:
520 oldrctl = regs.rctl;
521 regs.rctl = val;
522 if (regs.rctl.rst()) {
523 rxDescCache.reset();
524 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
525 rxFifo.clear();
526 regs.rctl.rst(0);
527 }
528 if (regs.rctl.en())
529 rxTick = true;
530 restartClock();
531 break;
532 case REG_FCTTV:
533 regs.fcttv = val;
534 break;
535 case REG_TCTL:
536 regs.tctl = val;
537 oldtctl = regs.tctl;
538 regs.tctl = val;
539 if (regs.tctl.en())
540 txTick = true;
541 restartClock();
542 if (regs.tctl.en() && !oldtctl.en()) {
543 txDescCache.reset();
544 }
545 break;
546 case REG_PBA:
547 regs.pba.rxa(val);
548 regs.pba.txa(64 - regs.pba.rxa());
549 break;
550 case REG_WUC:
551 case REG_WUFC:
552 case REG_WUS:
553 case REG_LEDCTL:
554 case REG_FCAL:
555 case REG_FCAH:
556 case REG_FCT:
557 case REG_VET:
558 case REG_AIFS:
559 case REG_TIPG:
560 ; // We don't care, so don't store anything
561 break;
562 case REG_IVAR0:
563 warn("Writing to IVAR0, ignoring...\n");
564 break;
565 case REG_FCRTL:
566 regs.fcrtl = val;
567 break;
568 case REG_FCRTH:
569 regs.fcrth = val;
570 break;
571 case REG_RDBAL:
572 regs.rdba.rdbal( val & ~mask(4));
573 rxDescCache.areaChanged();
574 break;
575 case REG_RDBAH:
576 regs.rdba.rdbah(val);
577 rxDescCache.areaChanged();
578 break;
579 case REG_RDLEN:
580 regs.rdlen = val & ~mask(7);
581 rxDescCache.areaChanged();
582 break;
583 case REG_SRRCTL:
584 regs.srrctl = val;
585 break;
586 case REG_RDH:
587 regs.rdh = val;
588 rxDescCache.areaChanged();
589 break;
590 case REG_RDT:
591 regs.rdt = val;
592 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
593 if (drainState() == DrainState::Running) {
594 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
595 rxDescCache.fetchDescriptors();
596 } else {
597 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
598 }
599 break;
600 case REG_RDTR:
601 regs.rdtr = val;
602 break;
603 case REG_RADV:
604 regs.radv = val;
605 break;
606 case REG_RXDCTL:
607 regs.rxdctl = val;
608 break;
609 case REG_TDBAL:
610 regs.tdba.tdbal( val & ~mask(4));
611 txDescCache.areaChanged();
612 break;
613 case REG_TDBAH:
614 regs.tdba.tdbah(val);
615 txDescCache.areaChanged();
616 break;
617 case REG_TDLEN:
618 regs.tdlen = val & ~mask(7);
619 txDescCache.areaChanged();
620 break;
621 case REG_TDH:
622 regs.tdh = val;
623 txDescCache.areaChanged();
624 break;
625 case REG_TXDCA_CTL:
626 regs.txdca_ctl = val;
627 if (regs.txdca_ctl.enabled())
628 panic("No support for DCA\n");
629 break;
630 case REG_TDT:
631 regs.tdt = val;
632 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
633 if (drainState() == DrainState::Running) {
634 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
635 txDescCache.fetchDescriptors();
636 } else {
637 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
638 }
639 break;
640 case REG_TIDV:
641 regs.tidv = val;
642 break;
643 case REG_TXDCTL:
644 regs.txdctl = val;
645 break;
646 case REG_TADV:
647 regs.tadv = val;
648 break;
649 case REG_TDWBAL:
650 regs.tdwba &= ~mask(32);
651 regs.tdwba |= val;
652 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
653 regs.tdwba & mask(1));
654 break;
655 case REG_TDWBAH:
656 regs.tdwba &= mask(32);
657 regs.tdwba |= (uint64_t)val << 32;
658 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
659 regs.tdwba & mask(1));
660 break;
661 case REG_RXCSUM:
662 regs.rxcsum = val;
663 break;
664 case REG_RLPML:
665 regs.rlpml = val;
666 break;
667 case REG_RFCTL:
668 regs.rfctl = val;
669 if (regs.rfctl.exsten())
670 panic("Extended RX descriptors not implemented\n");
671 break;
672 case REG_MANC:
673 regs.manc = val;
674 break;
675 case REG_SWSM:
676 regs.swsm = val;
677 if (regs.fwsm.eep_fw_semaphore())
678 regs.swsm.swesmbi(0);
679 break;
680 case REG_SWFWSYNC:
681 regs.sw_fw_sync = val;
682 break;
683 default:
684 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
685 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
686 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
687 panic("Write request to unknown register number: %#x\n", daddr);
688 };
689
690 pkt->makeAtomicResponse();
691 return pioDelay;
692}
693
694void
695IGbE::postInterrupt(IntTypes t, bool now)
696{
697 assert(t);
698
699 // Interrupt is already pending
700 if (t & regs.icr() && !now)
701 return;
702
703 regs.icr = regs.icr() | t;
704
705 Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
706 DPRINTF(EthernetIntr,
707 "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
708 curTick(), regs.itr.interval(), itr_interval);
709
710 if (regs.itr.interval() == 0 || now ||
711 lastInterrupt + itr_interval <= curTick()) {
712 if (interEvent.scheduled()) {
713 deschedule(interEvent);
714 }
715 cpuPostInt();
716 } else {
717 Tick int_time = lastInterrupt + itr_interval;
718 assert(int_time > 0);
719 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
720 int_time);
721 if (!interEvent.scheduled()) {
722 schedule(interEvent, int_time);
723 }
724 }
725}
726
727void
728IGbE::delayIntEvent()
729{
730 cpuPostInt();
731}
732
733
734void
735IGbE::cpuPostInt()
736{
737
738 postedInterrupts++;
739
740 if (!(regs.icr() & regs.imr)) {
741 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
742 return;
743 }
744
745 DPRINTF(Ethernet, "Posting Interrupt\n");
746
747
748 if (interEvent.scheduled()) {
749 deschedule(interEvent);
750 }
751
752 if (rdtrEvent.scheduled()) {
753 regs.icr.rxt0(1);
754 deschedule(rdtrEvent);
755 }
756 if (radvEvent.scheduled()) {
757 regs.icr.rxt0(1);
758 deschedule(radvEvent);
759 }
760 if (tadvEvent.scheduled()) {
761 regs.icr.txdw(1);
762 deschedule(tadvEvent);
763 }
764 if (tidvEvent.scheduled()) {
765 regs.icr.txdw(1);
766 deschedule(tidvEvent);
767 }
768
769 regs.icr.int_assert(1);
770 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
771 regs.icr());
772
773 intrPost();
774
775 lastInterrupt = curTick();
776}
777
778void
779IGbE::cpuClearInt()
780{
781 if (regs.icr.int_assert()) {
782 regs.icr.int_assert(0);
783 DPRINTF(EthernetIntr,
784 "EINT: Clearing interrupt to CPU now. Vector %#x\n",
785 regs.icr());
786 intrClear();
787 }
788}
789
790void
791IGbE::chkInterrupt()
792{
793 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
794 regs.imr);
795 // Check if we need to clear the cpu interrupt
796 if (!(regs.icr() & regs.imr)) {
797 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
798 if (interEvent.scheduled())
799 deschedule(interEvent);
800 if (regs.icr.int_assert())
801 cpuClearInt();
802 }
803 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
804 regs.itr(), regs.itr.interval());
805
806 if (regs.icr() & regs.imr) {
807 if (regs.itr.interval() == 0) {
808 cpuPostInt();
809 } else {
810 DPRINTF(Ethernet,
811 "Possibly scheduling interrupt because of imr write\n");
812 if (!interEvent.scheduled()) {
813 Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
814 DPRINTF(Ethernet, "Scheduling for %d\n", t);
815 schedule(interEvent, t);
816 }
817 }
818 }
819}
820
821
822///////////////////////////// IGbE::DescCache //////////////////////////////
823
824template<class T>
825IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
826 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
827 wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL),
73 rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
74 txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
75 lastInterrupt(0)
76{
77 etherInt = new IGbEInt(name() + ".int", this);
78
79 // Initialized internal registers per Intel documentation
80 // All registers intialized to 0 by per register constructor
81 regs.ctrl.fd(1);
82 regs.ctrl.lrst(1);
83 regs.ctrl.speed(2);
84 regs.ctrl.frcspd(1);
85 regs.sts.speed(3); // Say we're 1000Mbps
86 regs.sts.fd(1); // full duplex
87 regs.sts.lu(1); // link up
88 regs.eecd.fwe(1);
89 regs.eecd.ee_type(1);
90 regs.imr = 0;
91 regs.iam = 0;
92 regs.rxdctl.gran(1);
93 regs.rxdctl.wthresh(1);
94 regs.fcrth(1);
95 regs.tdwba = 0;
96 regs.rlpml = 0;
97 regs.sw_fw_sync = 0;
98
99 regs.pba.rxa(0x30);
100 regs.pba.txa(0x10);
101
102 eeOpBits = 0;
103 eeAddrBits = 0;
104 eeDataBits = 0;
105 eeOpcode = 0;
106
107 // clear all 64 16 bit words of the eeprom
108 memset(&flash, 0, EEPROM_SIZE*2);
109
110 // Set the MAC address
111 memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
112 for (int x = 0; x < ETH_ADDR_LEN/2; x++)
113 flash[x] = htobe(flash[x]);
114
115 uint16_t csum = 0;
116 for (int x = 0; x < EEPROM_SIZE; x++)
117 csum += htobe(flash[x]);
118
119
120 // Magic happy checksum value
121 flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
122
123 // Store the MAC address as queue ID
124 macAddr = p->hardware_address;
125
126 rxFifo.clear();
127 txFifo.clear();
128}
129
130IGbE::~IGbE()
131{
132 delete etherInt;
133}
134
135void
136IGbE::init()
137{
138 cpa = CPA::cpa();
139 PciDevice::init();
140}
141
142EtherInt*
143IGbE::getEthPort(const std::string &if_name, int idx)
144{
145
146 if (if_name == "interface") {
147 if (etherInt->getPeer())
148 panic("Port already connected to\n");
149 return etherInt;
150 }
151 return NULL;
152}
153
154Tick
155IGbE::writeConfig(PacketPtr pkt)
156{
157 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
158 if (offset < PCI_DEVICE_SPECIFIC)
159 PciDevice::writeConfig(pkt);
160 else
161 panic("Device specific PCI config space not implemented.\n");
162
163 //
164 // Some work may need to be done here based for the pci COMMAND bits.
165 //
166
167 return configDelay;
168}
169
170// Handy macro for range-testing register access addresses
171#define IN_RANGE(val, base, len) (val >= base && val < (base + len))
172
173Tick
174IGbE::read(PacketPtr pkt)
175{
176 int bar;
177 Addr daddr;
178
179 if (!getBAR(pkt->getAddr(), bar, daddr))
180 panic("Invalid PCI memory access to unmapped memory.\n");
181
182 // Only Memory register BAR is allowed
183 assert(bar == 0);
184
185 // Only 32bit accesses allowed
186 assert(pkt->getSize() == 4);
187
188 DPRINTF(Ethernet, "Read device register %#X\n", daddr);
189
190 //
191 // Handle read of register here
192 //
193
194
195 switch (daddr) {
196 case REG_CTRL:
197 pkt->set<uint32_t>(regs.ctrl());
198 break;
199 case REG_STATUS:
200 pkt->set<uint32_t>(regs.sts());
201 break;
202 case REG_EECD:
203 pkt->set<uint32_t>(regs.eecd());
204 break;
205 case REG_EERD:
206 pkt->set<uint32_t>(regs.eerd());
207 break;
208 case REG_CTRL_EXT:
209 pkt->set<uint32_t>(regs.ctrl_ext());
210 break;
211 case REG_MDIC:
212 pkt->set<uint32_t>(regs.mdic());
213 break;
214 case REG_ICR:
215 DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
216 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
217 pkt->set<uint32_t>(regs.icr());
218 if (regs.icr.int_assert() || regs.imr == 0) {
219 regs.icr = regs.icr() & ~mask(30);
220 DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
221 }
222 if (regs.ctrl_ext.iame() && regs.icr.int_assert())
223 regs.imr &= ~regs.iam;
224 chkInterrupt();
225 break;
226 case REG_EICR:
227 // This is only useful for MSI, but the driver reads it every time
228 // Just don't do anything
229 pkt->set<uint32_t>(0);
230 break;
231 case REG_ITR:
232 pkt->set<uint32_t>(regs.itr());
233 break;
234 case REG_RCTL:
235 pkt->set<uint32_t>(regs.rctl());
236 break;
237 case REG_FCTTV:
238 pkt->set<uint32_t>(regs.fcttv());
239 break;
240 case REG_TCTL:
241 pkt->set<uint32_t>(regs.tctl());
242 break;
243 case REG_PBA:
244 pkt->set<uint32_t>(regs.pba());
245 break;
246 case REG_WUC:
247 case REG_WUFC:
248 case REG_WUS:
249 case REG_LEDCTL:
250 pkt->set<uint32_t>(0); // We don't care, so just return 0
251 break;
252 case REG_FCRTL:
253 pkt->set<uint32_t>(regs.fcrtl());
254 break;
255 case REG_FCRTH:
256 pkt->set<uint32_t>(regs.fcrth());
257 break;
258 case REG_RDBAL:
259 pkt->set<uint32_t>(regs.rdba.rdbal());
260 break;
261 case REG_RDBAH:
262 pkt->set<uint32_t>(regs.rdba.rdbah());
263 break;
264 case REG_RDLEN:
265 pkt->set<uint32_t>(regs.rdlen());
266 break;
267 case REG_SRRCTL:
268 pkt->set<uint32_t>(regs.srrctl());
269 break;
270 case REG_RDH:
271 pkt->set<uint32_t>(regs.rdh());
272 break;
273 case REG_RDT:
274 pkt->set<uint32_t>(regs.rdt());
275 break;
276 case REG_RDTR:
277 pkt->set<uint32_t>(regs.rdtr());
278 if (regs.rdtr.fpd()) {
279 rxDescCache.writeback(0);
280 DPRINTF(EthernetIntr,
281 "Posting interrupt because of RDTR.FPD write\n");
282 postInterrupt(IT_RXT);
283 regs.rdtr.fpd(0);
284 }
285 break;
286 case REG_RXDCTL:
287 pkt->set<uint32_t>(regs.rxdctl());
288 break;
289 case REG_RADV:
290 pkt->set<uint32_t>(regs.radv());
291 break;
292 case REG_TDBAL:
293 pkt->set<uint32_t>(regs.tdba.tdbal());
294 break;
295 case REG_TDBAH:
296 pkt->set<uint32_t>(regs.tdba.tdbah());
297 break;
298 case REG_TDLEN:
299 pkt->set<uint32_t>(regs.tdlen());
300 break;
301 case REG_TDH:
302 pkt->set<uint32_t>(regs.tdh());
303 break;
304 case REG_TXDCA_CTL:
305 pkt->set<uint32_t>(regs.txdca_ctl());
306 break;
307 case REG_TDT:
308 pkt->set<uint32_t>(regs.tdt());
309 break;
310 case REG_TIDV:
311 pkt->set<uint32_t>(regs.tidv());
312 break;
313 case REG_TXDCTL:
314 pkt->set<uint32_t>(regs.txdctl());
315 break;
316 case REG_TADV:
317 pkt->set<uint32_t>(regs.tadv());
318 break;
319 case REG_TDWBAL:
320 pkt->set<uint32_t>(regs.tdwba & mask(32));
321 break;
322 case REG_TDWBAH:
323 pkt->set<uint32_t>(regs.tdwba >> 32);
324 break;
325 case REG_RXCSUM:
326 pkt->set<uint32_t>(regs.rxcsum());
327 break;
328 case REG_RLPML:
329 pkt->set<uint32_t>(regs.rlpml);
330 break;
331 case REG_RFCTL:
332 pkt->set<uint32_t>(regs.rfctl());
333 break;
334 case REG_MANC:
335 pkt->set<uint32_t>(regs.manc());
336 break;
337 case REG_SWSM:
338 pkt->set<uint32_t>(regs.swsm());
339 regs.swsm.smbi(1);
340 break;
341 case REG_FWSM:
342 pkt->set<uint32_t>(regs.fwsm());
343 break;
344 case REG_SWFWSYNC:
345 pkt->set<uint32_t>(regs.sw_fw_sync);
346 break;
347 default:
348 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
349 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
350 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
351 !IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
352 panic("Read request to unknown register number: %#x\n", daddr);
353 else
354 pkt->set<uint32_t>(0);
355 };
356
357 pkt->makeAtomicResponse();
358 return pioDelay;
359}
360
361Tick
362IGbE::write(PacketPtr pkt)
363{
364 int bar;
365 Addr daddr;
366
367
368 if (!getBAR(pkt->getAddr(), bar, daddr))
369 panic("Invalid PCI memory access to unmapped memory.\n");
370
371 // Only Memory register BAR is allowed
372 assert(bar == 0);
373
374 // Only 32bit accesses allowed
375 assert(pkt->getSize() == sizeof(uint32_t));
376
377 DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
378 daddr, pkt->get<uint32_t>());
379
380 //
381 // Handle write of register here
382 //
383 uint32_t val = pkt->get<uint32_t>();
384
385 Regs::RCTL oldrctl;
386 Regs::TCTL oldtctl;
387
388 switch (daddr) {
389 case REG_CTRL:
390 regs.ctrl = val;
391 if (regs.ctrl.tfce())
392 warn("TX Flow control enabled, should implement\n");
393 if (regs.ctrl.rfce())
394 warn("RX Flow control enabled, should implement\n");
395 break;
396 case REG_CTRL_EXT:
397 regs.ctrl_ext = val;
398 break;
399 case REG_STATUS:
400 regs.sts = val;
401 break;
402 case REG_EECD:
403 int oldClk;
404 oldClk = regs.eecd.sk();
405 regs.eecd = val;
406 // See if this is a eeprom access and emulate accordingly
407 if (!oldClk && regs.eecd.sk()) {
408 if (eeOpBits < 8) {
409 eeOpcode = eeOpcode << 1 | regs.eecd.din();
410 eeOpBits++;
411 } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
412 eeAddr = eeAddr << 1 | regs.eecd.din();
413 eeAddrBits++;
414 } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
415 assert(eeAddr>>1 < EEPROM_SIZE);
416 DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
417 flash[eeAddr>>1] >> eeDataBits & 0x1,
418 flash[eeAddr>>1]);
419 regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
420 eeDataBits++;
421 } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
422 regs.eecd.dout(0);
423 eeDataBits++;
424 } else
425 panic("What's going on with eeprom interface? opcode:"
426 " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
427 (uint32_t)eeOpBits, (uint32_t)eeAddr,
428 (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
429
430 // Reset everything for the next command
431 if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
432 (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
433 eeOpBits = 0;
434 eeAddrBits = 0;
435 eeDataBits = 0;
436 eeOpcode = 0;
437 eeAddr = 0;
438 }
439
440 DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
441 (uint32_t)eeOpcode, (uint32_t) eeOpBits,
442 (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
443 if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
444 eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
445 panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
446 (uint32_t)eeOpBits);
447
448
449 }
450 // If driver requests eeprom access, immediately give it to it
451 regs.eecd.ee_gnt(regs.eecd.ee_req());
452 break;
453 case REG_EERD:
454 regs.eerd = val;
455 if (regs.eerd.start()) {
456 regs.eerd.done(1);
457 assert(regs.eerd.addr() < EEPROM_SIZE);
458 regs.eerd.data(flash[regs.eerd.addr()]);
459 regs.eerd.start(0);
460 DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
461 regs.eerd.addr(), regs.eerd.data());
462 }
463 break;
464 case REG_MDIC:
465 regs.mdic = val;
466 if (regs.mdic.i())
467 panic("No support for interrupt on mdic complete\n");
468 if (regs.mdic.phyadd() != 1)
469 panic("No support for reading anything but phy\n");
470 DPRINTF(Ethernet, "%s phy address %x\n",
471 regs.mdic.op() == 1 ? "Writing" : "Reading",
472 regs.mdic.regadd());
473 switch (regs.mdic.regadd()) {
474 case PHY_PSTATUS:
475 regs.mdic.data(0x796D); // link up
476 break;
477 case PHY_PID:
478 regs.mdic.data(params()->phy_pid);
479 break;
480 case PHY_EPID:
481 regs.mdic.data(params()->phy_epid);
482 break;
483 case PHY_GSTATUS:
484 regs.mdic.data(0x7C00);
485 break;
486 case PHY_EPSTATUS:
487 regs.mdic.data(0x3000);
488 break;
489 case PHY_AGC:
490 regs.mdic.data(0x180); // some random length
491 break;
492 default:
493 regs.mdic.data(0);
494 }
495 regs.mdic.r(1);
496 break;
497 case REG_ICR:
498 DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
499 regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
500 if (regs.ctrl_ext.iame())
501 regs.imr &= ~regs.iam;
502 regs.icr = ~bits(val,30,0) & regs.icr();
503 chkInterrupt();
504 break;
505 case REG_ITR:
506 regs.itr = val;
507 break;
508 case REG_ICS:
509 DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
510 postInterrupt((IntTypes)val);
511 break;
512 case REG_IMS:
513 regs.imr |= val;
514 chkInterrupt();
515 break;
516 case REG_IMC:
517 regs.imr &= ~val;
518 chkInterrupt();
519 break;
520 case REG_IAM:
521 regs.iam = val;
522 break;
523 case REG_RCTL:
524 oldrctl = regs.rctl;
525 regs.rctl = val;
526 if (regs.rctl.rst()) {
527 rxDescCache.reset();
528 DPRINTF(EthernetSM, "RXS: Got RESET!\n");
529 rxFifo.clear();
530 regs.rctl.rst(0);
531 }
532 if (regs.rctl.en())
533 rxTick = true;
534 restartClock();
535 break;
536 case REG_FCTTV:
537 regs.fcttv = val;
538 break;
539 case REG_TCTL:
540 regs.tctl = val;
541 oldtctl = regs.tctl;
542 regs.tctl = val;
543 if (regs.tctl.en())
544 txTick = true;
545 restartClock();
546 if (regs.tctl.en() && !oldtctl.en()) {
547 txDescCache.reset();
548 }
549 break;
550 case REG_PBA:
551 regs.pba.rxa(val);
552 regs.pba.txa(64 - regs.pba.rxa());
553 break;
554 case REG_WUC:
555 case REG_WUFC:
556 case REG_WUS:
557 case REG_LEDCTL:
558 case REG_FCAL:
559 case REG_FCAH:
560 case REG_FCT:
561 case REG_VET:
562 case REG_AIFS:
563 case REG_TIPG:
564 ; // We don't care, so don't store anything
565 break;
566 case REG_IVAR0:
567 warn("Writing to IVAR0, ignoring...\n");
568 break;
569 case REG_FCRTL:
570 regs.fcrtl = val;
571 break;
572 case REG_FCRTH:
573 regs.fcrth = val;
574 break;
575 case REG_RDBAL:
576 regs.rdba.rdbal( val & ~mask(4));
577 rxDescCache.areaChanged();
578 break;
579 case REG_RDBAH:
580 regs.rdba.rdbah(val);
581 rxDescCache.areaChanged();
582 break;
583 case REG_RDLEN:
584 regs.rdlen = val & ~mask(7);
585 rxDescCache.areaChanged();
586 break;
587 case REG_SRRCTL:
588 regs.srrctl = val;
589 break;
590 case REG_RDH:
591 regs.rdh = val;
592 rxDescCache.areaChanged();
593 break;
594 case REG_RDT:
595 regs.rdt = val;
596 DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
597 if (drainState() == DrainState::Running) {
598 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
599 rxDescCache.fetchDescriptors();
600 } else {
601 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
602 }
603 break;
604 case REG_RDTR:
605 regs.rdtr = val;
606 break;
607 case REG_RADV:
608 regs.radv = val;
609 break;
610 case REG_RXDCTL:
611 regs.rxdctl = val;
612 break;
613 case REG_TDBAL:
614 regs.tdba.tdbal( val & ~mask(4));
615 txDescCache.areaChanged();
616 break;
617 case REG_TDBAH:
618 regs.tdba.tdbah(val);
619 txDescCache.areaChanged();
620 break;
621 case REG_TDLEN:
622 regs.tdlen = val & ~mask(7);
623 txDescCache.areaChanged();
624 break;
625 case REG_TDH:
626 regs.tdh = val;
627 txDescCache.areaChanged();
628 break;
629 case REG_TXDCA_CTL:
630 regs.txdca_ctl = val;
631 if (regs.txdca_ctl.enabled())
632 panic("No support for DCA\n");
633 break;
634 case REG_TDT:
635 regs.tdt = val;
636 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
637 if (drainState() == DrainState::Running) {
638 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
639 txDescCache.fetchDescriptors();
640 } else {
641 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
642 }
643 break;
644 case REG_TIDV:
645 regs.tidv = val;
646 break;
647 case REG_TXDCTL:
648 regs.txdctl = val;
649 break;
650 case REG_TADV:
651 regs.tadv = val;
652 break;
653 case REG_TDWBAL:
654 regs.tdwba &= ~mask(32);
655 regs.tdwba |= val;
656 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
657 regs.tdwba & mask(1));
658 break;
659 case REG_TDWBAH:
660 regs.tdwba &= mask(32);
661 regs.tdwba |= (uint64_t)val << 32;
662 txDescCache.completionWriteback(regs.tdwba & ~mask(1),
663 regs.tdwba & mask(1));
664 break;
665 case REG_RXCSUM:
666 regs.rxcsum = val;
667 break;
668 case REG_RLPML:
669 regs.rlpml = val;
670 break;
671 case REG_RFCTL:
672 regs.rfctl = val;
673 if (regs.rfctl.exsten())
674 panic("Extended RX descriptors not implemented\n");
675 break;
676 case REG_MANC:
677 regs.manc = val;
678 break;
679 case REG_SWSM:
680 regs.swsm = val;
681 if (regs.fwsm.eep_fw_semaphore())
682 regs.swsm.swesmbi(0);
683 break;
684 case REG_SWFWSYNC:
685 regs.sw_fw_sync = val;
686 break;
687 default:
688 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
689 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
690 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
691 panic("Write request to unknown register number: %#x\n", daddr);
692 };
693
694 pkt->makeAtomicResponse();
695 return pioDelay;
696}
697
698void
699IGbE::postInterrupt(IntTypes t, bool now)
700{
701 assert(t);
702
703 // Interrupt is already pending
704 if (t & regs.icr() && !now)
705 return;
706
707 regs.icr = regs.icr() | t;
708
709 Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
710 DPRINTF(EthernetIntr,
711 "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
712 curTick(), regs.itr.interval(), itr_interval);
713
714 if (regs.itr.interval() == 0 || now ||
715 lastInterrupt + itr_interval <= curTick()) {
716 if (interEvent.scheduled()) {
717 deschedule(interEvent);
718 }
719 cpuPostInt();
720 } else {
721 Tick int_time = lastInterrupt + itr_interval;
722 assert(int_time > 0);
723 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
724 int_time);
725 if (!interEvent.scheduled()) {
726 schedule(interEvent, int_time);
727 }
728 }
729}
730
731void
732IGbE::delayIntEvent()
733{
734 cpuPostInt();
735}
736
737
738void
739IGbE::cpuPostInt()
740{
741
742 postedInterrupts++;
743
744 if (!(regs.icr() & regs.imr)) {
745 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
746 return;
747 }
748
749 DPRINTF(Ethernet, "Posting Interrupt\n");
750
751
752 if (interEvent.scheduled()) {
753 deschedule(interEvent);
754 }
755
756 if (rdtrEvent.scheduled()) {
757 regs.icr.rxt0(1);
758 deschedule(rdtrEvent);
759 }
760 if (radvEvent.scheduled()) {
761 regs.icr.rxt0(1);
762 deschedule(radvEvent);
763 }
764 if (tadvEvent.scheduled()) {
765 regs.icr.txdw(1);
766 deschedule(tadvEvent);
767 }
768 if (tidvEvent.scheduled()) {
769 regs.icr.txdw(1);
770 deschedule(tidvEvent);
771 }
772
773 regs.icr.int_assert(1);
774 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
775 regs.icr());
776
777 intrPost();
778
779 lastInterrupt = curTick();
780}
781
782void
783IGbE::cpuClearInt()
784{
785 if (regs.icr.int_assert()) {
786 regs.icr.int_assert(0);
787 DPRINTF(EthernetIntr,
788 "EINT: Clearing interrupt to CPU now. Vector %#x\n",
789 regs.icr());
790 intrClear();
791 }
792}
793
794void
795IGbE::chkInterrupt()
796{
797 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
798 regs.imr);
799 // Check if we need to clear the cpu interrupt
800 if (!(regs.icr() & regs.imr)) {
801 DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
802 if (interEvent.scheduled())
803 deschedule(interEvent);
804 if (regs.icr.int_assert())
805 cpuClearInt();
806 }
807 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
808 regs.itr(), regs.itr.interval());
809
810 if (regs.icr() & regs.imr) {
811 if (regs.itr.interval() == 0) {
812 cpuPostInt();
813 } else {
814 DPRINTF(Ethernet,
815 "Possibly scheduling interrupt because of imr write\n");
816 if (!interEvent.scheduled()) {
817 Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
818 DPRINTF(Ethernet, "Scheduling for %d\n", t);
819 schedule(interEvent, t);
820 }
821 }
822 }
823}
824
825
826///////////////////////////// IGbE::DescCache //////////////////////////////
827
828template<class T>
829IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
830 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
831 wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL),
828 wbDelayEvent(this), fetchDelayEvent(this), fetchEvent(this),
829 wbEvent(this)
832 wbDelayEvent([this]{ writeback1(); }, n),
833 fetchDelayEvent([this]{ fetchDescriptors1(); }, n),
834 fetchEvent([this]{ fetchComplete(); }, n),
835 wbEvent([this]{ wbComplete(); }, n)
830{
831 fetchBuf = new T[size];
832 wbBuf = new T[size];
833}
834
835template<class T>
836IGbE::DescCache<T>::~DescCache()
837{
838 reset();
839 delete[] fetchBuf;
840 delete[] wbBuf;
841}
842
843template<class T>
844void
845IGbE::DescCache<T>::areaChanged()
846{
847 if (usedCache.size() > 0 || curFetching || wbOut)
848 panic("Descriptor Address, Length or Head changed. Bad\n");
849 reset();
850
851}
852
853template<class T>
854void
855IGbE::DescCache<T>::writeback(Addr aMask)
856{
857 int curHead = descHead();
858 int max_to_wb = usedCache.size();
859
860 // Check if this writeback is less restrictive that the previous
861 // and if so setup another one immediately following it
862 if (wbOut) {
863 if (aMask < wbAlignment) {
864 moreToWb = true;
865 wbAlignment = aMask;
866 }
867 DPRINTF(EthernetDesc,
868 "Writing back already in process, returning\n");
869 return;
870 }
871
872 moreToWb = false;
873 wbAlignment = aMask;
874
875
876 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
877 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
878 curHead, descTail(), descLen(), cachePnt, max_to_wb,
879 descLeft());
880
881 if (max_to_wb + curHead >= descLen()) {
882 max_to_wb = descLen() - curHead;
883 moreToWb = true;
884 // this is by definition aligned correctly
885 } else if (wbAlignment != 0) {
886 // align the wb point to the mask
887 max_to_wb = max_to_wb & ~wbAlignment;
888 }
889
890 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
891
892 if (max_to_wb <= 0) {
893 if (usedCache.size())
894 igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
895 else
896 igbe->anWe(annSmWb, annUsedCacheQ);
897 return;
898 }
899
900 wbOut = max_to_wb;
901
902 assert(!wbDelayEvent.scheduled());
903 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
904 igbe->anBegin(annSmWb, "Prepare Writeback Desc");
905}
906
907template<class T>
908void
909IGbE::DescCache<T>::writeback1()
910{
911 // If we're draining delay issuing this DMA
912 if (igbe->drainState() != DrainState::Running) {
913 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
914 return;
915 }
916
917 DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
918
919 for (int x = 0; x < wbOut; x++) {
920 assert(usedCache.size());
921 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
922 igbe->anPq(annSmWb, annUsedCacheQ);
923 igbe->anPq(annSmWb, annDescQ);
924 igbe->anQ(annSmWb, annUsedDescQ);
925 }
926
927
928 igbe->anBegin(annSmWb, "Writeback Desc DMA");
929
930 assert(wbOut);
931 igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
932 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
933 igbe->wbCompDelay);
934}
935
936template<class T>
937void
938IGbE::DescCache<T>::fetchDescriptors()
939{
940 size_t max_to_fetch;
941
942 if (curFetching) {
943 DPRINTF(EthernetDesc,
944 "Currently fetching %d descriptors, returning\n",
945 curFetching);
946 return;
947 }
948
949 if (descTail() >= cachePnt)
950 max_to_fetch = descTail() - cachePnt;
951 else
952 max_to_fetch = descLen() - cachePnt;
953
954 size_t free_cache = size - usedCache.size() - unusedCache.size();
955
956 if (!max_to_fetch)
957 igbe->anWe(annSmFetch, annUnusedDescQ);
958 else
959 igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
960
961 if (max_to_fetch) {
962 if (!free_cache)
963 igbe->anWf(annSmFetch, annDescQ);
964 else
965 igbe->anRq(annSmFetch, annDescQ, free_cache);
966 }
967
968 max_to_fetch = std::min(max_to_fetch, free_cache);
969
970
971 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
972 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
973 descHead(), descTail(), descLen(), cachePnt,
974 max_to_fetch, descLeft());
975
976 // Nothing to do
977 if (max_to_fetch == 0)
978 return;
979
980 // So we don't have two descriptor fetches going on at once
981 curFetching = max_to_fetch;
982
983 assert(!fetchDelayEvent.scheduled());
984 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
985 igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
986}
987
988template<class T>
989void
990IGbE::DescCache<T>::fetchDescriptors1()
991{
992 // If we're draining delay issuing this DMA
993 if (igbe->drainState() != DrainState::Running) {
994 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
995 return;
996 }
997
998 igbe->anBegin(annSmFetch, "Fetch Desc");
999
1000 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
1001 descBase() + cachePnt * sizeof(T),
1002 pciToDma(descBase() + cachePnt * sizeof(T)),
1003 curFetching * sizeof(T));
1004 assert(curFetching);
1005 igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
1006 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
1007 igbe->fetchCompDelay);
1008}
1009
1010template<class T>
1011void
1012IGbE::DescCache<T>::fetchComplete()
1013{
1014 T *newDesc;
1015 igbe->anBegin(annSmFetch, "Fetch Complete");
1016 for (int x = 0; x < curFetching; x++) {
1017 newDesc = new T;
1018 memcpy(newDesc, &fetchBuf[x], sizeof(T));
1019 unusedCache.push_back(newDesc);
1020 igbe->anDq(annSmFetch, annUnusedDescQ);
1021 igbe->anQ(annSmFetch, annUnusedCacheQ);
1022 igbe->anQ(annSmFetch, annDescQ);
1023 }
1024
1025
1026#ifndef NDEBUG
1027 int oldCp = cachePnt;
1028#endif
1029
1030 cachePnt += curFetching;
1031 assert(cachePnt <= descLen());
1032 if (cachePnt == descLen())
1033 cachePnt = 0;
1034
1035 curFetching = 0;
1036
1037 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1038 oldCp, cachePnt);
1039
1040 if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1041 cachePnt)) == 0)
1042 {
1043 igbe->anWe(annSmFetch, annUnusedDescQ);
1044 } else if (!(size - usedCache.size() - unusedCache.size())) {
1045 igbe->anWf(annSmFetch, annDescQ);
1046 } else {
1047 igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1048 }
1049
1050 enableSm();
1051 igbe->checkDrain();
1052}
1053
1054template<class T>
1055void
1056IGbE::DescCache<T>::wbComplete()
1057{
1058
1059 igbe->anBegin(annSmWb, "Finish Writeback");
1060
1061 long curHead = descHead();
1062#ifndef NDEBUG
1063 long oldHead = curHead;
1064#endif
1065
1066 for (int x = 0; x < wbOut; x++) {
1067 assert(usedCache.size());
1068 delete usedCache[0];
1069 usedCache.pop_front();
1070
1071 igbe->anDq(annSmWb, annUsedCacheQ);
1072 igbe->anDq(annSmWb, annDescQ);
1073 }
1074
1075 curHead += wbOut;
1076 wbOut = 0;
1077
1078 if (curHead >= descLen())
1079 curHead -= descLen();
1080
1081 // Update the head
1082 updateHead(curHead);
1083
1084 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1085 oldHead, curHead);
1086
1087 // If we still have more to wb, call wb now
1088 actionAfterWb();
1089 if (moreToWb) {
1090 moreToWb = false;
1091 DPRINTF(EthernetDesc, "Writeback has more todo\n");
1092 writeback(wbAlignment);
1093 }
1094
1095 if (!wbOut) {
1096 igbe->checkDrain();
1097 if (usedCache.size())
1098 igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1099 else
1100 igbe->anWe(annSmWb, annUsedCacheQ);
1101 }
1102 fetchAfterWb();
1103}
1104
1105template<class T>
1106void
1107IGbE::DescCache<T>::reset()
1108{
1109 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1110 for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1111 delete usedCache[x];
1112 for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1113 delete unusedCache[x];
1114
1115 usedCache.clear();
1116 unusedCache.clear();
1117
1118 cachePnt = 0;
1119
1120}
1121
1122template<class T>
1123void
1124IGbE::DescCache<T>::serialize(CheckpointOut &cp) const
1125{
1126 SERIALIZE_SCALAR(cachePnt);
1127 SERIALIZE_SCALAR(curFetching);
1128 SERIALIZE_SCALAR(wbOut);
1129 SERIALIZE_SCALAR(moreToWb);
1130 SERIALIZE_SCALAR(wbAlignment);
1131
1132 typename CacheType::size_type usedCacheSize = usedCache.size();
1133 SERIALIZE_SCALAR(usedCacheSize);
1134 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1135 arrayParamOut(cp, csprintf("usedCache_%d", x),
1136 (uint8_t*)usedCache[x],sizeof(T));
1137 }
1138
1139 typename CacheType::size_type unusedCacheSize = unusedCache.size();
1140 SERIALIZE_SCALAR(unusedCacheSize);
1141 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1142 arrayParamOut(cp, csprintf("unusedCache_%d", x),
1143 (uint8_t*)unusedCache[x],sizeof(T));
1144 }
1145
1146 Tick fetch_delay = 0, wb_delay = 0;
1147 if (fetchDelayEvent.scheduled())
1148 fetch_delay = fetchDelayEvent.when();
1149 SERIALIZE_SCALAR(fetch_delay);
1150 if (wbDelayEvent.scheduled())
1151 wb_delay = wbDelayEvent.when();
1152 SERIALIZE_SCALAR(wb_delay);
1153
1154
1155}
1156
1157template<class T>
1158void
1159IGbE::DescCache<T>::unserialize(CheckpointIn &cp)
1160{
1161 UNSERIALIZE_SCALAR(cachePnt);
1162 UNSERIALIZE_SCALAR(curFetching);
1163 UNSERIALIZE_SCALAR(wbOut);
1164 UNSERIALIZE_SCALAR(moreToWb);
1165 UNSERIALIZE_SCALAR(wbAlignment);
1166
1167 typename CacheType::size_type usedCacheSize;
1168 UNSERIALIZE_SCALAR(usedCacheSize);
1169 T *temp;
1170 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1171 temp = new T;
1172 arrayParamIn(cp, csprintf("usedCache_%d", x),
1173 (uint8_t*)temp,sizeof(T));
1174 usedCache.push_back(temp);
1175 }
1176
1177 typename CacheType::size_type unusedCacheSize;
1178 UNSERIALIZE_SCALAR(unusedCacheSize);
1179 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1180 temp = new T;
1181 arrayParamIn(cp, csprintf("unusedCache_%d", x),
1182 (uint8_t*)temp,sizeof(T));
1183 unusedCache.push_back(temp);
1184 }
1185 Tick fetch_delay = 0, wb_delay = 0;
1186 UNSERIALIZE_SCALAR(fetch_delay);
1187 UNSERIALIZE_SCALAR(wb_delay);
1188 if (fetch_delay)
1189 igbe->schedule(fetchDelayEvent, fetch_delay);
1190 if (wb_delay)
1191 igbe->schedule(wbDelayEvent, wb_delay);
1192
1193
1194}
1195
1196///////////////////////////// IGbE::RxDescCache //////////////////////////////
1197
1198IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1199 : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
836{
837 fetchBuf = new T[size];
838 wbBuf = new T[size];
839}
840
841template<class T>
842IGbE::DescCache<T>::~DescCache()
843{
844 reset();
845 delete[] fetchBuf;
846 delete[] wbBuf;
847}
848
849template<class T>
850void
851IGbE::DescCache<T>::areaChanged()
852{
853 if (usedCache.size() > 0 || curFetching || wbOut)
854 panic("Descriptor Address, Length or Head changed. Bad\n");
855 reset();
856
857}
858
859template<class T>
860void
861IGbE::DescCache<T>::writeback(Addr aMask)
862{
863 int curHead = descHead();
864 int max_to_wb = usedCache.size();
865
866 // Check if this writeback is less restrictive that the previous
867 // and if so setup another one immediately following it
868 if (wbOut) {
869 if (aMask < wbAlignment) {
870 moreToWb = true;
871 wbAlignment = aMask;
872 }
873 DPRINTF(EthernetDesc,
874 "Writing back already in process, returning\n");
875 return;
876 }
877
878 moreToWb = false;
879 wbAlignment = aMask;
880
881
882 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
883 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
884 curHead, descTail(), descLen(), cachePnt, max_to_wb,
885 descLeft());
886
887 if (max_to_wb + curHead >= descLen()) {
888 max_to_wb = descLen() - curHead;
889 moreToWb = true;
890 // this is by definition aligned correctly
891 } else if (wbAlignment != 0) {
892 // align the wb point to the mask
893 max_to_wb = max_to_wb & ~wbAlignment;
894 }
895
896 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
897
898 if (max_to_wb <= 0) {
899 if (usedCache.size())
900 igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
901 else
902 igbe->anWe(annSmWb, annUsedCacheQ);
903 return;
904 }
905
906 wbOut = max_to_wb;
907
908 assert(!wbDelayEvent.scheduled());
909 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
910 igbe->anBegin(annSmWb, "Prepare Writeback Desc");
911}
912
913template<class T>
914void
915IGbE::DescCache<T>::writeback1()
916{
917 // If we're draining delay issuing this DMA
918 if (igbe->drainState() != DrainState::Running) {
919 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
920 return;
921 }
922
923 DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
924
925 for (int x = 0; x < wbOut; x++) {
926 assert(usedCache.size());
927 memcpy(&wbBuf[x], usedCache[x], sizeof(T));
928 igbe->anPq(annSmWb, annUsedCacheQ);
929 igbe->anPq(annSmWb, annDescQ);
930 igbe->anQ(annSmWb, annUsedDescQ);
931 }
932
933
934 igbe->anBegin(annSmWb, "Writeback Desc DMA");
935
936 assert(wbOut);
937 igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
938 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
939 igbe->wbCompDelay);
940}
941
942template<class T>
943void
944IGbE::DescCache<T>::fetchDescriptors()
945{
946 size_t max_to_fetch;
947
948 if (curFetching) {
949 DPRINTF(EthernetDesc,
950 "Currently fetching %d descriptors, returning\n",
951 curFetching);
952 return;
953 }
954
955 if (descTail() >= cachePnt)
956 max_to_fetch = descTail() - cachePnt;
957 else
958 max_to_fetch = descLen() - cachePnt;
959
960 size_t free_cache = size - usedCache.size() - unusedCache.size();
961
962 if (!max_to_fetch)
963 igbe->anWe(annSmFetch, annUnusedDescQ);
964 else
965 igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
966
967 if (max_to_fetch) {
968 if (!free_cache)
969 igbe->anWf(annSmFetch, annDescQ);
970 else
971 igbe->anRq(annSmFetch, annDescQ, free_cache);
972 }
973
974 max_to_fetch = std::min(max_to_fetch, free_cache);
975
976
977 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
978 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
979 descHead(), descTail(), descLen(), cachePnt,
980 max_to_fetch, descLeft());
981
982 // Nothing to do
983 if (max_to_fetch == 0)
984 return;
985
986 // So we don't have two descriptor fetches going on at once
987 curFetching = max_to_fetch;
988
989 assert(!fetchDelayEvent.scheduled());
990 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
991 igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
992}
993
994template<class T>
995void
996IGbE::DescCache<T>::fetchDescriptors1()
997{
998 // If we're draining delay issuing this DMA
999 if (igbe->drainState() != DrainState::Running) {
1000 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
1001 return;
1002 }
1003
1004 igbe->anBegin(annSmFetch, "Fetch Desc");
1005
1006 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
1007 descBase() + cachePnt * sizeof(T),
1008 pciToDma(descBase() + cachePnt * sizeof(T)),
1009 curFetching * sizeof(T));
1010 assert(curFetching);
1011 igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
1012 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
1013 igbe->fetchCompDelay);
1014}
1015
1016template<class T>
1017void
1018IGbE::DescCache<T>::fetchComplete()
1019{
1020 T *newDesc;
1021 igbe->anBegin(annSmFetch, "Fetch Complete");
1022 for (int x = 0; x < curFetching; x++) {
1023 newDesc = new T;
1024 memcpy(newDesc, &fetchBuf[x], sizeof(T));
1025 unusedCache.push_back(newDesc);
1026 igbe->anDq(annSmFetch, annUnusedDescQ);
1027 igbe->anQ(annSmFetch, annUnusedCacheQ);
1028 igbe->anQ(annSmFetch, annDescQ);
1029 }
1030
1031
1032#ifndef NDEBUG
1033 int oldCp = cachePnt;
1034#endif
1035
1036 cachePnt += curFetching;
1037 assert(cachePnt <= descLen());
1038 if (cachePnt == descLen())
1039 cachePnt = 0;
1040
1041 curFetching = 0;
1042
1043 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1044 oldCp, cachePnt);
1045
1046 if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1047 cachePnt)) == 0)
1048 {
1049 igbe->anWe(annSmFetch, annUnusedDescQ);
1050 } else if (!(size - usedCache.size() - unusedCache.size())) {
1051 igbe->anWf(annSmFetch, annDescQ);
1052 } else {
1053 igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1054 }
1055
1056 enableSm();
1057 igbe->checkDrain();
1058}
1059
1060template<class T>
1061void
1062IGbE::DescCache<T>::wbComplete()
1063{
1064
1065 igbe->anBegin(annSmWb, "Finish Writeback");
1066
1067 long curHead = descHead();
1068#ifndef NDEBUG
1069 long oldHead = curHead;
1070#endif
1071
1072 for (int x = 0; x < wbOut; x++) {
1073 assert(usedCache.size());
1074 delete usedCache[0];
1075 usedCache.pop_front();
1076
1077 igbe->anDq(annSmWb, annUsedCacheQ);
1078 igbe->anDq(annSmWb, annDescQ);
1079 }
1080
1081 curHead += wbOut;
1082 wbOut = 0;
1083
1084 if (curHead >= descLen())
1085 curHead -= descLen();
1086
1087 // Update the head
1088 updateHead(curHead);
1089
1090 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1091 oldHead, curHead);
1092
1093 // If we still have more to wb, call wb now
1094 actionAfterWb();
1095 if (moreToWb) {
1096 moreToWb = false;
1097 DPRINTF(EthernetDesc, "Writeback has more todo\n");
1098 writeback(wbAlignment);
1099 }
1100
1101 if (!wbOut) {
1102 igbe->checkDrain();
1103 if (usedCache.size())
1104 igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1105 else
1106 igbe->anWe(annSmWb, annUsedCacheQ);
1107 }
1108 fetchAfterWb();
1109}
1110
1111template<class T>
1112void
1113IGbE::DescCache<T>::reset()
1114{
1115 DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1116 for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1117 delete usedCache[x];
1118 for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1119 delete unusedCache[x];
1120
1121 usedCache.clear();
1122 unusedCache.clear();
1123
1124 cachePnt = 0;
1125
1126}
1127
1128template<class T>
1129void
1130IGbE::DescCache<T>::serialize(CheckpointOut &cp) const
1131{
1132 SERIALIZE_SCALAR(cachePnt);
1133 SERIALIZE_SCALAR(curFetching);
1134 SERIALIZE_SCALAR(wbOut);
1135 SERIALIZE_SCALAR(moreToWb);
1136 SERIALIZE_SCALAR(wbAlignment);
1137
1138 typename CacheType::size_type usedCacheSize = usedCache.size();
1139 SERIALIZE_SCALAR(usedCacheSize);
1140 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1141 arrayParamOut(cp, csprintf("usedCache_%d", x),
1142 (uint8_t*)usedCache[x],sizeof(T));
1143 }
1144
1145 typename CacheType::size_type unusedCacheSize = unusedCache.size();
1146 SERIALIZE_SCALAR(unusedCacheSize);
1147 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1148 arrayParamOut(cp, csprintf("unusedCache_%d", x),
1149 (uint8_t*)unusedCache[x],sizeof(T));
1150 }
1151
1152 Tick fetch_delay = 0, wb_delay = 0;
1153 if (fetchDelayEvent.scheduled())
1154 fetch_delay = fetchDelayEvent.when();
1155 SERIALIZE_SCALAR(fetch_delay);
1156 if (wbDelayEvent.scheduled())
1157 wb_delay = wbDelayEvent.when();
1158 SERIALIZE_SCALAR(wb_delay);
1159
1160
1161}
1162
1163template<class T>
1164void
1165IGbE::DescCache<T>::unserialize(CheckpointIn &cp)
1166{
1167 UNSERIALIZE_SCALAR(cachePnt);
1168 UNSERIALIZE_SCALAR(curFetching);
1169 UNSERIALIZE_SCALAR(wbOut);
1170 UNSERIALIZE_SCALAR(moreToWb);
1171 UNSERIALIZE_SCALAR(wbAlignment);
1172
1173 typename CacheType::size_type usedCacheSize;
1174 UNSERIALIZE_SCALAR(usedCacheSize);
1175 T *temp;
1176 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1177 temp = new T;
1178 arrayParamIn(cp, csprintf("usedCache_%d", x),
1179 (uint8_t*)temp,sizeof(T));
1180 usedCache.push_back(temp);
1181 }
1182
1183 typename CacheType::size_type unusedCacheSize;
1184 UNSERIALIZE_SCALAR(unusedCacheSize);
1185 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1186 temp = new T;
1187 arrayParamIn(cp, csprintf("unusedCache_%d", x),
1188 (uint8_t*)temp,sizeof(T));
1189 unusedCache.push_back(temp);
1190 }
1191 Tick fetch_delay = 0, wb_delay = 0;
1192 UNSERIALIZE_SCALAR(fetch_delay);
1193 UNSERIALIZE_SCALAR(wb_delay);
1194 if (fetch_delay)
1195 igbe->schedule(fetchDelayEvent, fetch_delay);
1196 if (wb_delay)
1197 igbe->schedule(wbDelayEvent, wb_delay);
1198
1199
1200}
1201
1202///////////////////////////// IGbE::RxDescCache //////////////////////////////
1203
1204IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1205 : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1200 pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
1206 pktEvent([this]{ pktComplete(); }, n),
1207 pktHdrEvent([this]{ pktSplitDone(); }, n),
1208 pktDataEvent([this]{ pktSplitDone(); }, n)
1201
1202{
1203 annSmFetch = "RX Desc Fetch";
1204 annSmWb = "RX Desc Writeback";
1205 annUnusedDescQ = "RX Unused Descriptors";
1206 annUnusedCacheQ = "RX Unused Descriptor Cache";
1207 annUsedCacheQ = "RX Used Descriptor Cache";
1208 annUsedDescQ = "RX Used Descriptors";
1209 annDescQ = "RX Descriptors";
1210}
1211
1212void
1213IGbE::RxDescCache::pktSplitDone()
1214{
1215 splitCount++;
1216 DPRINTF(EthernetDesc,
1217 "Part of split packet done: splitcount now %d\n", splitCount);
1218 assert(splitCount <= 2);
1219 if (splitCount != 2)
1220 return;
1221 splitCount = 0;
1222 DPRINTF(EthernetDesc,
1223 "Part of split packet done: calling pktComplete()\n");
1224 pktComplete();
1225}
1226
1227int
1228IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
1229{
1230 assert(unusedCache.size());
1231 //if (!unusedCache.size())
1232 // return false;
1233
1234 pktPtr = packet;
1235 pktDone = false;
1236 unsigned buf_len, hdr_len;
1237
1238 RxDesc *desc = unusedCache.front();
1239 switch (igbe->regs.srrctl.desctype()) {
1240 case RXDT_LEGACY:
1241 assert(pkt_offset == 0);
1242 bytesCopied = packet->length;
1243 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1244 packet->length, igbe->regs.rctl.descSize());
1245 assert(packet->length < igbe->regs.rctl.descSize());
1246 igbe->dmaWrite(pciToDma(desc->legacy.buf),
1247 packet->length, &pktEvent, packet->data,
1248 igbe->rxWriteDelay);
1249 break;
1250 case RXDT_ADV_ONEBUF:
1251 assert(pkt_offset == 0);
1252 bytesCopied = packet->length;
1253 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1254 igbe->regs.rctl.descSize();
1255 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1256 packet->length, igbe->regs.srrctl(), buf_len);
1257 assert(packet->length < buf_len);
1258 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1259 packet->length, &pktEvent, packet->data,
1260 igbe->rxWriteDelay);
1261 desc->adv_wb.header_len = htole(0);
1262 desc->adv_wb.sph = htole(0);
1263 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1264 break;
1265 case RXDT_ADV_SPLIT_A:
1266 int split_point;
1267
1268 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1269 igbe->regs.rctl.descSize();
1270 hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1271 DPRINTF(EthernetDesc,
1272 "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1273 "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1274 igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1275 igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1276 desc->adv_read.pkt, buf_len);
1277
1278 split_point = hsplit(pktPtr);
1279
1280 if (packet->length <= hdr_len) {
1281 bytesCopied = packet->length;
1282 assert(pkt_offset == 0);
1283 DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1284 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1285 packet->length, &pktEvent, packet->data,
1286 igbe->rxWriteDelay);
1287 desc->adv_wb.header_len = htole((uint16_t)packet->length);
1288 desc->adv_wb.sph = htole(0);
1289 desc->adv_wb.pkt_len = htole(0);
1290 } else if (split_point) {
1291 if (pkt_offset) {
1292 // we are only copying some data, header/data has already been
1293 // copied
1294 int max_to_copy =
1295 std::min(packet->length - pkt_offset, buf_len);
1296 bytesCopied += max_to_copy;
1297 DPRINTF(EthernetDesc,
1298 "Hdr split: Continuing data buffer copy\n");
1299 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1300 max_to_copy, &pktEvent,
1301 packet->data + pkt_offset, igbe->rxWriteDelay);
1302 desc->adv_wb.header_len = htole(0);
1303 desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1304 desc->adv_wb.sph = htole(0);
1305 } else {
1306 int max_to_copy =
1307 std::min(packet->length - split_point, buf_len);
1308 bytesCopied += max_to_copy + split_point;
1309
1310 DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1311 split_point);
1312 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1313 split_point, &pktHdrEvent,
1314 packet->data, igbe->rxWriteDelay);
1315 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1316 max_to_copy, &pktDataEvent,
1317 packet->data + split_point, igbe->rxWriteDelay);
1318 desc->adv_wb.header_len = htole(split_point);
1319 desc->adv_wb.sph = 1;
1320 desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1321 }
1322 } else {
1323 panic("Header split not fitting within header buffer or "
1324 "undecodable packet not fitting in header unsupported\n");
1325 }
1326 break;
1327 default:
1328 panic("Unimplemnted RX receive buffer type: %d\n",
1329 igbe->regs.srrctl.desctype());
1330 }
1331 return bytesCopied;
1332
1333}
1334
1335void
1336IGbE::RxDescCache::pktComplete()
1337{
1338 assert(unusedCache.size());
1339 RxDesc *desc;
1340 desc = unusedCache.front();
1341
1342 igbe->anBegin("RXS", "Update Desc");
1343
1344 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1345 DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1346 "stripcrc offset: %d value written: %d %d\n",
1347 pktPtr->length, bytesCopied, crcfixup,
1348 htole((uint16_t)(pktPtr->length + crcfixup)),
1349 (uint16_t)(pktPtr->length + crcfixup));
1350
1351 // no support for anything but starting at 0
1352 assert(igbe->regs.rxcsum.pcss() == 0);
1353
1354 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1355
1356 uint16_t status = RXDS_DD;
1357 uint8_t err = 0;
1358 uint16_t ext_err = 0;
1359 uint16_t csum = 0;
1360 uint16_t ptype = 0;
1361 uint16_t ip_id = 0;
1362
1363 assert(bytesCopied <= pktPtr->length);
1364 if (bytesCopied == pktPtr->length)
1365 status |= RXDS_EOP;
1366
1367 IpPtr ip(pktPtr);
1368
1369 if (ip) {
1370 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
1371 ptype |= RXDP_IPV4;
1372 ip_id = ip->id();
1373
1374 if (igbe->regs.rxcsum.ipofld()) {
1375 DPRINTF(EthernetDesc, "Checking IP checksum\n");
1376 status |= RXDS_IPCS;
1377 csum = htole(cksum(ip));
1378 igbe->rxIpChecksums++;
1379 if (cksum(ip) != 0) {
1380 err |= RXDE_IPE;
1381 ext_err |= RXDEE_IPE;
1382 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1383 }
1384 }
1385 TcpPtr tcp(ip);
1386 if (tcp && igbe->regs.rxcsum.tuofld()) {
1387 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1388 status |= RXDS_TCPCS;
1389 ptype |= RXDP_TCP;
1390 csum = htole(cksum(tcp));
1391 igbe->rxTcpChecksums++;
1392 if (cksum(tcp) != 0) {
1393 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1394 err |= RXDE_TCPE;
1395 ext_err |= RXDEE_TCPE;
1396 }
1397 }
1398
1399 UdpPtr udp(ip);
1400 if (udp && igbe->regs.rxcsum.tuofld()) {
1401 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1402 status |= RXDS_UDPCS;
1403 ptype |= RXDP_UDP;
1404 csum = htole(cksum(udp));
1405 igbe->rxUdpChecksums++;
1406 if (cksum(udp) != 0) {
1407 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1408 ext_err |= RXDEE_TCPE;
1409 err |= RXDE_TCPE;
1410 }
1411 }
1412 } else { // if ip
1413 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1414 }
1415
1416 switch (igbe->regs.srrctl.desctype()) {
1417 case RXDT_LEGACY:
1418 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1419 desc->legacy.status = htole(status);
1420 desc->legacy.errors = htole(err);
1421 // No vlan support at this point... just set it to 0
1422 desc->legacy.vlan = 0;
1423 break;
1424 case RXDT_ADV_SPLIT_A:
1425 case RXDT_ADV_ONEBUF:
1426 desc->adv_wb.rss_type = htole(0);
1427 desc->adv_wb.pkt_type = htole(ptype);
1428 if (igbe->regs.rxcsum.pcsd()) {
1429 // no rss support right now
1430 desc->adv_wb.rss_hash = htole(0);
1431 } else {
1432 desc->adv_wb.id = htole(ip_id);
1433 desc->adv_wb.csum = htole(csum);
1434 }
1435 desc->adv_wb.status = htole(status);
1436 desc->adv_wb.errors = htole(ext_err);
1437 // no vlan support
1438 desc->adv_wb.vlan_tag = htole(0);
1439 break;
1440 default:
1441 panic("Unimplemnted RX receive buffer type %d\n",
1442 igbe->regs.srrctl.desctype());
1443 }
1444
1445 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1446 desc->adv_read.pkt, desc->adv_read.hdr);
1447
1448 if (bytesCopied == pktPtr->length) {
1449 DPRINTF(EthernetDesc,
1450 "Packet completely written to descriptor buffers\n");
1451 // Deal with the rx timer interrupts
1452 if (igbe->regs.rdtr.delay()) {
1453 Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1454 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1455 igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1456 }
1457
1458 if (igbe->regs.radv.idv()) {
1459 Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1460 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1461 if (!igbe->radvEvent.scheduled()) {
1462 igbe->schedule(igbe->radvEvent, curTick() + delay);
1463 }
1464 }
1465
1466 // if neither radv or rdtr, maybe itr is set...
1467 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1468 DPRINTF(EthernetSM,
1469 "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1470 igbe->postInterrupt(IT_RXT);
1471 }
1472
1473 // If the packet is small enough, interrupt appropriately
1474 // I wonder if this is delayed or not?!
1475 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1476 DPRINTF(EthernetSM,
1477 "RXS: Posting IT_SRPD beacuse small packet received\n");
1478 igbe->postInterrupt(IT_SRPD);
1479 }
1480 bytesCopied = 0;
1481 }
1482
1483 pktPtr = NULL;
1484 igbe->checkDrain();
1485 enableSm();
1486 pktDone = true;
1487
1488 igbe->anBegin("RXS", "Done Updating Desc");
1489 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1490 igbe->anDq("RXS", annUnusedCacheQ);
1491 unusedCache.pop_front();
1492 igbe->anQ("RXS", annUsedCacheQ);
1493 usedCache.push_back(desc);
1494}
1495
1496void
1497IGbE::RxDescCache::enableSm()
1498{
1499 if (igbe->drainState() != DrainState::Draining) {
1500 igbe->rxTick = true;
1501 igbe->restartClock();
1502 }
1503}
1504
1505bool
1506IGbE::RxDescCache::packetDone()
1507{
1508 if (pktDone) {
1509 pktDone = false;
1510 return true;
1511 }
1512 return false;
1513}
1514
1515bool
1516IGbE::RxDescCache::hasOutstandingEvents()
1517{
1518 return pktEvent.scheduled() || wbEvent.scheduled() ||
1519 fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1520 pktDataEvent.scheduled();
1521
1522}
1523
1524void
1525IGbE::RxDescCache::serialize(CheckpointOut &cp) const
1526{
1527 DescCache<RxDesc>::serialize(cp);
1528 SERIALIZE_SCALAR(pktDone);
1529 SERIALIZE_SCALAR(splitCount);
1530 SERIALIZE_SCALAR(bytesCopied);
1531}
1532
1533void
1534IGbE::RxDescCache::unserialize(CheckpointIn &cp)
1535{
1536 DescCache<RxDesc>::unserialize(cp);
1537 UNSERIALIZE_SCALAR(pktDone);
1538 UNSERIALIZE_SCALAR(splitCount);
1539 UNSERIALIZE_SCALAR(bytesCopied);
1540}
1541
1542
1543///////////////////////////// IGbE::TxDescCache //////////////////////////////
1544
1545IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1546 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1547 pktWaiting(false), pktMultiDesc(false),
1548 completionAddress(0), completionEnabled(false),
1549 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1550 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1551 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1209
1210{
1211 annSmFetch = "RX Desc Fetch";
1212 annSmWb = "RX Desc Writeback";
1213 annUnusedDescQ = "RX Unused Descriptors";
1214 annUnusedCacheQ = "RX Unused Descriptor Cache";
1215 annUsedCacheQ = "RX Used Descriptor Cache";
1216 annUsedDescQ = "RX Used Descriptors";
1217 annDescQ = "RX Descriptors";
1218}
1219
1220void
1221IGbE::RxDescCache::pktSplitDone()
1222{
1223 splitCount++;
1224 DPRINTF(EthernetDesc,
1225 "Part of split packet done: splitcount now %d\n", splitCount);
1226 assert(splitCount <= 2);
1227 if (splitCount != 2)
1228 return;
1229 splitCount = 0;
1230 DPRINTF(EthernetDesc,
1231 "Part of split packet done: calling pktComplete()\n");
1232 pktComplete();
1233}
1234
1235int
1236IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
1237{
1238 assert(unusedCache.size());
1239 //if (!unusedCache.size())
1240 // return false;
1241
1242 pktPtr = packet;
1243 pktDone = false;
1244 unsigned buf_len, hdr_len;
1245
1246 RxDesc *desc = unusedCache.front();
1247 switch (igbe->regs.srrctl.desctype()) {
1248 case RXDT_LEGACY:
1249 assert(pkt_offset == 0);
1250 bytesCopied = packet->length;
1251 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1252 packet->length, igbe->regs.rctl.descSize());
1253 assert(packet->length < igbe->regs.rctl.descSize());
1254 igbe->dmaWrite(pciToDma(desc->legacy.buf),
1255 packet->length, &pktEvent, packet->data,
1256 igbe->rxWriteDelay);
1257 break;
1258 case RXDT_ADV_ONEBUF:
1259 assert(pkt_offset == 0);
1260 bytesCopied = packet->length;
1261 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1262 igbe->regs.rctl.descSize();
1263 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1264 packet->length, igbe->regs.srrctl(), buf_len);
1265 assert(packet->length < buf_len);
1266 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1267 packet->length, &pktEvent, packet->data,
1268 igbe->rxWriteDelay);
1269 desc->adv_wb.header_len = htole(0);
1270 desc->adv_wb.sph = htole(0);
1271 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1272 break;
1273 case RXDT_ADV_SPLIT_A:
1274 int split_point;
1275
1276 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1277 igbe->regs.rctl.descSize();
1278 hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1279 DPRINTF(EthernetDesc,
1280 "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1281 "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1282 igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1283 igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1284 desc->adv_read.pkt, buf_len);
1285
1286 split_point = hsplit(pktPtr);
1287
1288 if (packet->length <= hdr_len) {
1289 bytesCopied = packet->length;
1290 assert(pkt_offset == 0);
1291 DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1292 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1293 packet->length, &pktEvent, packet->data,
1294 igbe->rxWriteDelay);
1295 desc->adv_wb.header_len = htole((uint16_t)packet->length);
1296 desc->adv_wb.sph = htole(0);
1297 desc->adv_wb.pkt_len = htole(0);
1298 } else if (split_point) {
1299 if (pkt_offset) {
1300 // we are only copying some data, header/data has already been
1301 // copied
1302 int max_to_copy =
1303 std::min(packet->length - pkt_offset, buf_len);
1304 bytesCopied += max_to_copy;
1305 DPRINTF(EthernetDesc,
1306 "Hdr split: Continuing data buffer copy\n");
1307 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1308 max_to_copy, &pktEvent,
1309 packet->data + pkt_offset, igbe->rxWriteDelay);
1310 desc->adv_wb.header_len = htole(0);
1311 desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1312 desc->adv_wb.sph = htole(0);
1313 } else {
1314 int max_to_copy =
1315 std::min(packet->length - split_point, buf_len);
1316 bytesCopied += max_to_copy + split_point;
1317
1318 DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1319 split_point);
1320 igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1321 split_point, &pktHdrEvent,
1322 packet->data, igbe->rxWriteDelay);
1323 igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1324 max_to_copy, &pktDataEvent,
1325 packet->data + split_point, igbe->rxWriteDelay);
1326 desc->adv_wb.header_len = htole(split_point);
1327 desc->adv_wb.sph = 1;
1328 desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1329 }
1330 } else {
1331 panic("Header split not fitting within header buffer or "
1332 "undecodable packet not fitting in header unsupported\n");
1333 }
1334 break;
1335 default:
1336 panic("Unimplemnted RX receive buffer type: %d\n",
1337 igbe->regs.srrctl.desctype());
1338 }
1339 return bytesCopied;
1340
1341}
1342
1343void
1344IGbE::RxDescCache::pktComplete()
1345{
1346 assert(unusedCache.size());
1347 RxDesc *desc;
1348 desc = unusedCache.front();
1349
1350 igbe->anBegin("RXS", "Update Desc");
1351
1352 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1353 DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1354 "stripcrc offset: %d value written: %d %d\n",
1355 pktPtr->length, bytesCopied, crcfixup,
1356 htole((uint16_t)(pktPtr->length + crcfixup)),
1357 (uint16_t)(pktPtr->length + crcfixup));
1358
1359 // no support for anything but starting at 0
1360 assert(igbe->regs.rxcsum.pcss() == 0);
1361
1362 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1363
1364 uint16_t status = RXDS_DD;
1365 uint8_t err = 0;
1366 uint16_t ext_err = 0;
1367 uint16_t csum = 0;
1368 uint16_t ptype = 0;
1369 uint16_t ip_id = 0;
1370
1371 assert(bytesCopied <= pktPtr->length);
1372 if (bytesCopied == pktPtr->length)
1373 status |= RXDS_EOP;
1374
1375 IpPtr ip(pktPtr);
1376
1377 if (ip) {
1378 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
1379 ptype |= RXDP_IPV4;
1380 ip_id = ip->id();
1381
1382 if (igbe->regs.rxcsum.ipofld()) {
1383 DPRINTF(EthernetDesc, "Checking IP checksum\n");
1384 status |= RXDS_IPCS;
1385 csum = htole(cksum(ip));
1386 igbe->rxIpChecksums++;
1387 if (cksum(ip) != 0) {
1388 err |= RXDE_IPE;
1389 ext_err |= RXDEE_IPE;
1390 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1391 }
1392 }
1393 TcpPtr tcp(ip);
1394 if (tcp && igbe->regs.rxcsum.tuofld()) {
1395 DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1396 status |= RXDS_TCPCS;
1397 ptype |= RXDP_TCP;
1398 csum = htole(cksum(tcp));
1399 igbe->rxTcpChecksums++;
1400 if (cksum(tcp) != 0) {
1401 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1402 err |= RXDE_TCPE;
1403 ext_err |= RXDEE_TCPE;
1404 }
1405 }
1406
1407 UdpPtr udp(ip);
1408 if (udp && igbe->regs.rxcsum.tuofld()) {
1409 DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1410 status |= RXDS_UDPCS;
1411 ptype |= RXDP_UDP;
1412 csum = htole(cksum(udp));
1413 igbe->rxUdpChecksums++;
1414 if (cksum(udp) != 0) {
1415 DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1416 ext_err |= RXDEE_TCPE;
1417 err |= RXDE_TCPE;
1418 }
1419 }
1420 } else { // if ip
1421 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1422 }
1423
1424 switch (igbe->regs.srrctl.desctype()) {
1425 case RXDT_LEGACY:
1426 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1427 desc->legacy.status = htole(status);
1428 desc->legacy.errors = htole(err);
1429 // No vlan support at this point... just set it to 0
1430 desc->legacy.vlan = 0;
1431 break;
1432 case RXDT_ADV_SPLIT_A:
1433 case RXDT_ADV_ONEBUF:
1434 desc->adv_wb.rss_type = htole(0);
1435 desc->adv_wb.pkt_type = htole(ptype);
1436 if (igbe->regs.rxcsum.pcsd()) {
1437 // no rss support right now
1438 desc->adv_wb.rss_hash = htole(0);
1439 } else {
1440 desc->adv_wb.id = htole(ip_id);
1441 desc->adv_wb.csum = htole(csum);
1442 }
1443 desc->adv_wb.status = htole(status);
1444 desc->adv_wb.errors = htole(ext_err);
1445 // no vlan support
1446 desc->adv_wb.vlan_tag = htole(0);
1447 break;
1448 default:
1449 panic("Unimplemnted RX receive buffer type %d\n",
1450 igbe->regs.srrctl.desctype());
1451 }
1452
1453 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1454 desc->adv_read.pkt, desc->adv_read.hdr);
1455
1456 if (bytesCopied == pktPtr->length) {
1457 DPRINTF(EthernetDesc,
1458 "Packet completely written to descriptor buffers\n");
1459 // Deal with the rx timer interrupts
1460 if (igbe->regs.rdtr.delay()) {
1461 Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1462 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1463 igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1464 }
1465
1466 if (igbe->regs.radv.idv()) {
1467 Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1468 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1469 if (!igbe->radvEvent.scheduled()) {
1470 igbe->schedule(igbe->radvEvent, curTick() + delay);
1471 }
1472 }
1473
1474 // if neither radv or rdtr, maybe itr is set...
1475 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1476 DPRINTF(EthernetSM,
1477 "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1478 igbe->postInterrupt(IT_RXT);
1479 }
1480
1481 // If the packet is small enough, interrupt appropriately
1482 // I wonder if this is delayed or not?!
1483 if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1484 DPRINTF(EthernetSM,
1485 "RXS: Posting IT_SRPD beacuse small packet received\n");
1486 igbe->postInterrupt(IT_SRPD);
1487 }
1488 bytesCopied = 0;
1489 }
1490
1491 pktPtr = NULL;
1492 igbe->checkDrain();
1493 enableSm();
1494 pktDone = true;
1495
1496 igbe->anBegin("RXS", "Done Updating Desc");
1497 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1498 igbe->anDq("RXS", annUnusedCacheQ);
1499 unusedCache.pop_front();
1500 igbe->anQ("RXS", annUsedCacheQ);
1501 usedCache.push_back(desc);
1502}
1503
1504void
1505IGbE::RxDescCache::enableSm()
1506{
1507 if (igbe->drainState() != DrainState::Draining) {
1508 igbe->rxTick = true;
1509 igbe->restartClock();
1510 }
1511}
1512
1513bool
1514IGbE::RxDescCache::packetDone()
1515{
1516 if (pktDone) {
1517 pktDone = false;
1518 return true;
1519 }
1520 return false;
1521}
1522
1523bool
1524IGbE::RxDescCache::hasOutstandingEvents()
1525{
1526 return pktEvent.scheduled() || wbEvent.scheduled() ||
1527 fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1528 pktDataEvent.scheduled();
1529
1530}
1531
1532void
1533IGbE::RxDescCache::serialize(CheckpointOut &cp) const
1534{
1535 DescCache<RxDesc>::serialize(cp);
1536 SERIALIZE_SCALAR(pktDone);
1537 SERIALIZE_SCALAR(splitCount);
1538 SERIALIZE_SCALAR(bytesCopied);
1539}
1540
1541void
1542IGbE::RxDescCache::unserialize(CheckpointIn &cp)
1543{
1544 DescCache<RxDesc>::unserialize(cp);
1545 UNSERIALIZE_SCALAR(pktDone);
1546 UNSERIALIZE_SCALAR(splitCount);
1547 UNSERIALIZE_SCALAR(bytesCopied);
1548}
1549
1550
1551///////////////////////////// IGbE::TxDescCache //////////////////////////////
1552
1553IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1554 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1555 pktWaiting(false), pktMultiDesc(false),
1556 completionAddress(0), completionEnabled(false),
1557 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1558 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1559 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1552 pktEvent(this), headerEvent(this), nullEvent(this)
1560 pktEvent([this]{ pktComplete(); }, n),
1561 headerEvent([this]{ headerComplete(); }, n),
1562 nullEvent([this]{ nullCallback(); }, n)
1553{
1554 annSmFetch = "TX Desc Fetch";
1555 annSmWb = "TX Desc Writeback";
1556 annUnusedDescQ = "TX Unused Descriptors";
1557 annUnusedCacheQ = "TX Unused Descriptor Cache";
1558 annUsedCacheQ = "TX Used Descriptor Cache";
1559 annUsedDescQ = "TX Used Descriptors";
1560 annDescQ = "TX Descriptors";
1561}
1562
1563void
1564IGbE::TxDescCache::processContextDesc()
1565{
1566 assert(unusedCache.size());
1567 TxDesc *desc;
1568
1569 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
1570
1571 while (!useTso && unusedCache.size() &&
1572 TxdOp::isContext(unusedCache.front())) {
1573 DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1574
1575 desc = unusedCache.front();
1576 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1577 desc->d1, desc->d2);
1578
1579
1580 // is this going to be a tcp or udp packet?
1581 isTcp = TxdOp::tcp(desc) ? true : false;
1582
1583 // setup all the TSO variables, they'll be ignored if we don't use
1584 // tso for this connection
1585 tsoHeaderLen = TxdOp::hdrlen(desc);
1586 tsoMss = TxdOp::mss(desc);
1587
1588 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1589 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1590 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1591 TxdOp::mss(desc), TxdOp::getLen(desc));
1592 useTso = true;
1593 tsoTotalLen = TxdOp::getLen(desc);
1594 tsoLoadedHeader = false;
1595 tsoDescBytesUsed = 0;
1596 tsoUsedLen = 0;
1597 tsoPrevSeq = 0;
1598 tsoPktHasHeader = false;
1599 tsoPkts = 0;
1600 tsoCopyBytes = 0;
1601 }
1602
1603 TxdOp::setDd(desc);
1604 unusedCache.pop_front();
1605 igbe->anDq("TXS", annUnusedCacheQ);
1606 usedCache.push_back(desc);
1607 igbe->anQ("TXS", annUsedCacheQ);
1608 }
1609
1610 if (!unusedCache.size())
1611 return;
1612
1613 desc = unusedCache.front();
1614 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1615 TxdOp::tse(desc)) {
1616 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1617 "hdrlen: %d mss: %d paylen %d\n",
1618 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1619 useTso = true;
1620 tsoTotalLen = TxdOp::getTsoLen(desc);
1621 tsoLoadedHeader = false;
1622 tsoDescBytesUsed = 0;
1623 tsoUsedLen = 0;
1624 tsoPrevSeq = 0;
1625 tsoPktHasHeader = false;
1626 tsoPkts = 0;
1627 }
1628
1629 if (useTso && !tsoLoadedHeader) {
1630 // we need to fetch a header
1631 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1632 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1633 pktWaiting = true;
1634 assert(tsoHeaderLen <= 256);
1635 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1636 tsoHeaderLen, &headerEvent, tsoHeader, 0);
1637 }
1638}
1639
1640void
1641IGbE::TxDescCache::headerComplete()
1642{
1643 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1644 pktWaiting = false;
1645
1646 assert(unusedCache.size());
1647 TxDesc *desc = unusedCache.front();
1648 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1649 TxdOp::getLen(desc), tsoHeaderLen);
1650
1651 if (TxdOp::getLen(desc) == tsoHeaderLen) {
1652 tsoDescBytesUsed = 0;
1653 tsoLoadedHeader = true;
1654 unusedCache.pop_front();
1655 usedCache.push_back(desc);
1656 } else {
1657 DPRINTF(EthernetDesc, "TSO: header part of larger payload\n");
1658 tsoDescBytesUsed = tsoHeaderLen;
1659 tsoLoadedHeader = true;
1660 }
1661 enableSm();
1662 igbe->checkDrain();
1663}
1664
1665unsigned
1666IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1667{
1668 if (!unusedCache.size())
1669 return 0;
1670
1671 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1672
1673 assert(!useTso || tsoLoadedHeader);
1674 TxDesc *desc = unusedCache.front();
1675
1676 if (useTso) {
1677 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1678 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1679 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1680 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1681 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1682
1683 if (tsoPktHasHeader)
1684 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length,
1685 TxdOp::getLen(desc) - tsoDescBytesUsed);
1686 else
1687 tsoCopyBytes = std::min(tsoMss,
1688 TxdOp::getLen(desc) - tsoDescBytesUsed);
1689 unsigned pkt_size =
1690 tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1691
1692 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1693 "this descLen: %d\n",
1694 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1695 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1696 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1697 return pkt_size;
1698 }
1699
1700 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1701 TxdOp::getLen(unusedCache.front()));
1702 return TxdOp::getLen(desc);
1703}
1704
1705void
1706IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1707{
1708 assert(unusedCache.size());
1709
1710 TxDesc *desc;
1711 desc = unusedCache.front();
1712
1713 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1714 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1715 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1716 TxdOp::getLen(desc));
1717
1718 pktPtr = p;
1719
1720 pktWaiting = true;
1721
1722 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1723
1724 if (useTso) {
1725 assert(tsoLoadedHeader);
1726 if (!tsoPktHasHeader) {
1727 DPRINTF(EthernetDesc,
1728 "Loading TSO header (%d bytes) into start of packet\n",
1729 tsoHeaderLen);
1730 memcpy(p->data, &tsoHeader,tsoHeaderLen);
1731 p->length +=tsoHeaderLen;
1732 tsoPktHasHeader = true;
1733 }
1734 }
1735
1736 if (useTso) {
1737 DPRINTF(EthernetDesc,
1738 "Starting DMA of packet at offset %d length: %d\n",
1739 p->length, tsoCopyBytes);
1740 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1741 + tsoDescBytesUsed,
1742 tsoCopyBytes, &pktEvent, p->data + p->length,
1743 igbe->txReadDelay);
1744 tsoDescBytesUsed += tsoCopyBytes;
1745 assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1746 } else {
1747 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1748 TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1749 igbe->txReadDelay);
1750 }
1751}
1752
1753void
1754IGbE::TxDescCache::pktComplete()
1755{
1756
1757 TxDesc *desc;
1758 assert(unusedCache.size());
1759 assert(pktPtr);
1760
1761 igbe->anBegin("TXS", "Update Desc");
1762
1763 DPRINTF(EthernetDesc, "DMA of packet complete\n");
1764
1765
1766 desc = unusedCache.front();
1767 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1768 TxdOp::getLen(desc));
1769
1770 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1771 desc->d1, desc->d2);
1772
1773 // Set the length of the data in the EtherPacket
1774 if (useTso) {
1775 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1776 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1777 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1778 pktPtr->simLength += tsoCopyBytes;
1779 pktPtr->length += tsoCopyBytes;
1780 tsoUsedLen += tsoCopyBytes;
1781 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1782 tsoDescBytesUsed, tsoCopyBytes);
1783 } else {
1784 pktPtr->simLength += TxdOp::getLen(desc);
1785 pktPtr->length += TxdOp::getLen(desc);
1786 }
1787
1788
1789
1790 if ((!TxdOp::eop(desc) && !useTso) ||
1791 (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1792 tsoTotalLen != tsoUsedLen && useTso)) {
1793 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1794 igbe->anDq("TXS", annUnusedCacheQ);
1795 unusedCache.pop_front();
1796 igbe->anQ("TXS", annUsedCacheQ);
1797 usedCache.push_back(desc);
1798
1799 tsoDescBytesUsed = 0;
1800 pktDone = true;
1801 pktWaiting = false;
1802 pktMultiDesc = true;
1803
1804 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1805 pktPtr->length);
1806 pktPtr = NULL;
1807
1808 enableSm();
1809 igbe->checkDrain();
1810 return;
1811 }
1812
1813
1814 pktMultiDesc = false;
1815 // no support for vlans
1816 assert(!TxdOp::vle(desc));
1817
1818 // we only support single packet descriptors at this point
1819 if (!useTso)
1820 assert(TxdOp::eop(desc));
1821
1822 // set that this packet is done
1823 if (TxdOp::rs(desc))
1824 TxdOp::setDd(desc);
1825
1826 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1827 desc->d1, desc->d2);
1828
1829 if (useTso) {
1830 IpPtr ip(pktPtr);
1831 if (ip) {
1832 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1833 tsoPkts);
1834 ip->id(ip->id() + tsoPkts++);
1835 ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1836
1837 TcpPtr tcp(ip);
1838 if (tcp) {
1839 DPRINTF(EthernetDesc,
1840 "TSO: Modifying TCP header. old seq %d + %d\n",
1841 tcp->seq(), tsoPrevSeq);
1842 tcp->seq(tcp->seq() + tsoPrevSeq);
1843 if (tsoUsedLen != tsoTotalLen)
1844 tcp->flags(tcp->flags() & ~9); // clear fin & psh
1845 }
1846 UdpPtr udp(ip);
1847 if (udp) {
1848 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1849 udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1850 }
1851 }
1852 tsoPrevSeq = tsoUsedLen;
1853 }
1854
1855 if (DTRACE(EthernetDesc)) {
1856 IpPtr ip(pktPtr);
1857 if (ip)
1858 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1859 ip->id());
1860 else
1861 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1862 }
1863
1864 // Checksums are only ofloaded for new descriptor types
1865 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1866 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1867 IpPtr ip(pktPtr);
1868 assert(ip);
1869 if (TxdOp::ixsm(desc)) {
1870 ip->sum(0);
1871 ip->sum(cksum(ip));
1872 igbe->txIpChecksums++;
1873 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1874 }
1875 if (TxdOp::txsm(desc)) {
1876 TcpPtr tcp(ip);
1877 UdpPtr udp(ip);
1878 if (tcp) {
1879 tcp->sum(0);
1880 tcp->sum(cksum(tcp));
1881 igbe->txTcpChecksums++;
1882 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1883 } else if (udp) {
1884 assert(udp);
1885 udp->sum(0);
1886 udp->sum(cksum(udp));
1887 igbe->txUdpChecksums++;
1888 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1889 } else {
1890 panic("Told to checksum, but don't know how\n");
1891 }
1892 }
1893 }
1894
1895 if (TxdOp::ide(desc)) {
1896 // Deal with the rx timer interrupts
1897 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1898 if (igbe->regs.tidv.idv()) {
1899 Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1900 DPRINTF(EthernetDesc, "setting tidv\n");
1901 igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1902 }
1903
1904 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1905 Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1906 DPRINTF(EthernetDesc, "setting tadv\n");
1907 if (!igbe->tadvEvent.scheduled()) {
1908 igbe->schedule(igbe->tadvEvent, curTick() + delay);
1909 }
1910 }
1911 }
1912
1913
1914 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) {
1915 DPRINTF(EthernetDesc, "Descriptor Done\n");
1916 igbe->anDq("TXS", annUnusedCacheQ);
1917 unusedCache.pop_front();
1918 igbe->anQ("TXS", annUsedCacheQ);
1919 usedCache.push_back(desc);
1920 tsoDescBytesUsed = 0;
1921 }
1922
1923 if (useTso && tsoUsedLen == tsoTotalLen)
1924 useTso = false;
1925
1926
1927 DPRINTF(EthernetDesc,
1928 "------Packet of %d bytes ready for transmission-------\n",
1929 pktPtr->length);
1930 pktDone = true;
1931 pktWaiting = false;
1932 pktPtr = NULL;
1933 tsoPktHasHeader = false;
1934
1935 if (igbe->regs.txdctl.wthresh() == 0) {
1936 igbe->anBegin("TXS", "Desc Writeback");
1937 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1938 writeback(0);
1939 } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <=
1940 descInBlock(usedCache.size())) {
1941 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1942 igbe->anBegin("TXS", "Desc Writeback");
1943 writeback((igbe->cacheBlockSize()-1)>>4);
1944 } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) {
1945 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1946 igbe->anBegin("TXS", "Desc Writeback");
1947 writeback((igbe->cacheBlockSize()-1)>>4);
1948 }
1949
1950 enableSm();
1951 igbe->checkDrain();
1952}
1953
1954void
1955IGbE::TxDescCache::actionAfterWb()
1956{
1957 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1958 completionEnabled);
1959 igbe->postInterrupt(iGbReg::IT_TXDW);
1960 if (completionEnabled) {
1961 descEnd = igbe->regs.tdh();
1962 DPRINTF(EthernetDesc,
1963 "Completion writing back value: %d to addr: %#x\n", descEnd,
1964 completionAddress);
1965 igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1966 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1967 }
1968}
1969
1970void
1971IGbE::TxDescCache::serialize(CheckpointOut &cp) const
1972{
1973 DescCache<TxDesc>::serialize(cp);
1974
1975 SERIALIZE_SCALAR(pktDone);
1976 SERIALIZE_SCALAR(isTcp);
1977 SERIALIZE_SCALAR(pktWaiting);
1978 SERIALIZE_SCALAR(pktMultiDesc);
1979
1980 SERIALIZE_SCALAR(useTso);
1981 SERIALIZE_SCALAR(tsoHeaderLen);
1982 SERIALIZE_SCALAR(tsoMss);
1983 SERIALIZE_SCALAR(tsoTotalLen);
1984 SERIALIZE_SCALAR(tsoUsedLen);
1985 SERIALIZE_SCALAR(tsoPrevSeq);;
1986 SERIALIZE_SCALAR(tsoPktPayloadBytes);
1987 SERIALIZE_SCALAR(tsoLoadedHeader);
1988 SERIALIZE_SCALAR(tsoPktHasHeader);
1989 SERIALIZE_ARRAY(tsoHeader, 256);
1990 SERIALIZE_SCALAR(tsoDescBytesUsed);
1991 SERIALIZE_SCALAR(tsoCopyBytes);
1992 SERIALIZE_SCALAR(tsoPkts);
1993
1994 SERIALIZE_SCALAR(completionAddress);
1995 SERIALIZE_SCALAR(completionEnabled);
1996 SERIALIZE_SCALAR(descEnd);
1997}
1998
1999void
2000IGbE::TxDescCache::unserialize(CheckpointIn &cp)
2001{
2002 DescCache<TxDesc>::unserialize(cp);
2003
2004 UNSERIALIZE_SCALAR(pktDone);
2005 UNSERIALIZE_SCALAR(isTcp);
2006 UNSERIALIZE_SCALAR(pktWaiting);
2007 UNSERIALIZE_SCALAR(pktMultiDesc);
2008
2009 UNSERIALIZE_SCALAR(useTso);
2010 UNSERIALIZE_SCALAR(tsoHeaderLen);
2011 UNSERIALIZE_SCALAR(tsoMss);
2012 UNSERIALIZE_SCALAR(tsoTotalLen);
2013 UNSERIALIZE_SCALAR(tsoUsedLen);
2014 UNSERIALIZE_SCALAR(tsoPrevSeq);;
2015 UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
2016 UNSERIALIZE_SCALAR(tsoLoadedHeader);
2017 UNSERIALIZE_SCALAR(tsoPktHasHeader);
2018 UNSERIALIZE_ARRAY(tsoHeader, 256);
2019 UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2020 UNSERIALIZE_SCALAR(tsoCopyBytes);
2021 UNSERIALIZE_SCALAR(tsoPkts);
2022
2023 UNSERIALIZE_SCALAR(completionAddress);
2024 UNSERIALIZE_SCALAR(completionEnabled);
2025 UNSERIALIZE_SCALAR(descEnd);
2026}
2027
2028bool
2029IGbE::TxDescCache::packetAvailable()
2030{
2031 if (pktDone) {
2032 pktDone = false;
2033 return true;
2034 }
2035 return false;
2036}
2037
2038void
2039IGbE::TxDescCache::enableSm()
2040{
2041 if (igbe->drainState() != DrainState::Draining) {
2042 igbe->txTick = true;
2043 igbe->restartClock();
2044 }
2045}
2046
2047bool
2048IGbE::TxDescCache::hasOutstandingEvents()
2049{
2050 return pktEvent.scheduled() || wbEvent.scheduled() ||
2051 fetchEvent.scheduled();
2052}
2053
2054
2055///////////////////////////////////// IGbE /////////////////////////////////
2056
2057void
2058IGbE::restartClock()
2059{
2060 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2061 drainState() == DrainState::Running)
2062 schedule(tickEvent, clockEdge(Cycles(1)));
2063}
2064
2065DrainState
2066IGbE::drain()
2067{
2068 unsigned int count(0);
2069 if (rxDescCache.hasOutstandingEvents() ||
2070 txDescCache.hasOutstandingEvents()) {
2071 count++;
2072 }
2073
2074 txFifoTick = false;
2075 txTick = false;
2076 rxTick = false;
2077
2078 if (tickEvent.scheduled())
2079 deschedule(tickEvent);
2080
2081 if (count) {
2082 DPRINTF(Drain, "IGbE not drained\n");
2083 return DrainState::Draining;
2084 } else
2085 return DrainState::Drained;
2086}
2087
2088void
2089IGbE::drainResume()
2090{
2091 Drainable::drainResume();
2092
2093 txFifoTick = true;
2094 txTick = true;
2095 rxTick = true;
2096
2097 restartClock();
2098 DPRINTF(EthernetSM, "resuming from drain");
2099}
2100
2101void
2102IGbE::checkDrain()
2103{
2104 if (drainState() != DrainState::Draining)
2105 return;
2106
2107 txFifoTick = false;
2108 txTick = false;
2109 rxTick = false;
2110 if (!rxDescCache.hasOutstandingEvents() &&
2111 !txDescCache.hasOutstandingEvents()) {
2112 DPRINTF(Drain, "IGbE done draining, processing drain event\n");
2113 signalDrainDone();
2114 }
2115}
2116
2117void
2118IGbE::txStateMachine()
2119{
2120 if (!regs.tctl.en()) {
2121 txTick = false;
2122 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2123 return;
2124 }
2125
2126 // If we have a packet available and it's length is not 0 (meaning it's not
2127 // a multidescriptor packet) put it in the fifo, otherwise an the next
2128 // iteration we'll get the rest of the data
2129 if (txPacket && txDescCache.packetAvailable()
2130 && !txDescCache.packetMultiDesc() && txPacket->length) {
2131 anQ("TXS", "TX FIFO Q");
2132 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2133#ifndef NDEBUG
2134 bool success =
2135#endif
2136 txFifo.push(txPacket);
2137 txFifoTick = true && drainState() != DrainState::Draining;
2138 assert(success);
2139 txPacket = NULL;
2140 anBegin("TXS", "Desc Writeback");
2141 txDescCache.writeback((cacheBlockSize()-1)>>4);
2142 return;
2143 }
2144
2145 // Only support descriptor granularity
2146 if (regs.txdctl.lwthresh() &&
2147 txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2148 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2149 postInterrupt(IT_TXDLOW);
2150 }
2151
2152 if (!txPacket) {
2153 txPacket = std::make_shared<EthPacketData>(16384);
2154 }
2155
2156 if (!txDescCache.packetWaiting()) {
2157 if (txDescCache.descLeft() == 0) {
2158 postInterrupt(IT_TXQE);
2159 anBegin("TXS", "Desc Writeback");
2160 txDescCache.writeback(0);
2161 anBegin("TXS", "Desc Fetch");
2162 anWe("TXS", txDescCache.annUnusedCacheQ);
2163 txDescCache.fetchDescriptors();
2164 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2165 "writeback stopping ticking and posting TXQE\n");
2166 txTick = false;
2167 return;
2168 }
2169
2170
2171 if (!(txDescCache.descUnused())) {
2172 anBegin("TXS", "Desc Fetch");
2173 txDescCache.fetchDescriptors();
2174 anWe("TXS", txDescCache.annUnusedCacheQ);
2175 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2176 "fetching and stopping ticking\n");
2177 txTick = false;
2178 return;
2179 }
2180 anPq("TXS", txDescCache.annUnusedCacheQ);
2181
2182
2183 txDescCache.processContextDesc();
2184 if (txDescCache.packetWaiting()) {
2185 DPRINTF(EthernetSM,
2186 "TXS: Fetching TSO header, stopping ticking\n");
2187 txTick = false;
2188 return;
2189 }
2190
2191 unsigned size = txDescCache.getPacketSize(txPacket);
2192 if (size > 0 && txFifo.avail() > size) {
2193 anRq("TXS", "TX FIFO Q");
2194 anBegin("TXS", "DMA Packet");
2195 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2196 "beginning DMA of next packet\n", size);
2197 txFifo.reserve(size);
2198 txDescCache.getPacketData(txPacket);
2199 } else if (size == 0) {
2200 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2201 DPRINTF(EthernetSM,
2202 "TXS: No packets to get, writing back used descriptors\n");
2203 anBegin("TXS", "Desc Writeback");
2204 txDescCache.writeback(0);
2205 } else {
2206 anWf("TXS", "TX FIFO Q");
2207 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2208 "available in FIFO\n");
2209 txTick = false;
2210 }
2211
2212
2213 return;
2214 }
2215 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2216 txTick = false;
2217}
2218
2219bool
2220IGbE::ethRxPkt(EthPacketPtr pkt)
2221{
2222 rxBytes += pkt->length;
2223 rxPackets++;
2224
2225 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2226 anBegin("RXQ", "Wire Recv");
2227
2228
2229 if (!regs.rctl.en()) {
2230 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2231 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2232 return true;
2233 }
2234
2235 // restart the state machines if they are stopped
2236 rxTick = true && drainState() != DrainState::Draining;
2237 if ((rxTick || txTick) && !tickEvent.scheduled()) {
2238 DPRINTF(EthernetSM,
2239 "RXS: received packet into fifo, starting ticking\n");
2240 restartClock();
2241 }
2242
2243 if (!rxFifo.push(pkt)) {
2244 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2245 postInterrupt(IT_RXO, true);
2246 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2247 return false;
2248 }
2249
2250 if (CPA::available() && cpa->enabled()) {
2251 assert(sys->numSystemsRunning <= 2);
2252 System *other_sys;
2253 if (sys->systemList[0] == sys)
2254 other_sys = sys->systemList[1];
2255 else
2256 other_sys = sys->systemList[0];
2257
2258 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2259 anQ("RXQ", "RX FIFO Q");
2260 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2261 }
2262
2263 return true;
2264}
2265
2266
2267void
2268IGbE::rxStateMachine()
2269{
2270 if (!regs.rctl.en()) {
2271 rxTick = false;
2272 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2273 return;
2274 }
2275
2276 // If the packet is done check for interrupts/descriptors/etc
2277 if (rxDescCache.packetDone()) {
2278 rxDmaPacket = false;
2279 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2280 int descLeft = rxDescCache.descLeft();
2281 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2282 descLeft, regs.rctl.rdmts(), regs.rdlen());
2283 switch (regs.rctl.rdmts()) {
2284 case 2: if (descLeft > .125 * regs.rdlen()) break;
2285 case 1: if (descLeft > .250 * regs.rdlen()) break;
2286 case 0: if (descLeft > .500 * regs.rdlen()) break;
2287 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2288 "because of descriptors left\n");
2289 postInterrupt(IT_RXDMT);
2290 break;
2291 }
2292
2293 if (rxFifo.empty())
2294 rxDescCache.writeback(0);
2295
2296 if (descLeft == 0) {
2297 anBegin("RXS", "Writeback Descriptors");
2298 rxDescCache.writeback(0);
2299 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2300 " writeback and stopping ticking\n");
2301 rxTick = false;
2302 }
2303
2304 // only support descriptor granulaties
2305 assert(regs.rxdctl.gran());
2306
2307 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2308 DPRINTF(EthernetSM,
2309 "RXS: Writing back because WTHRESH >= descUsed\n");
2310 anBegin("RXS", "Writeback Descriptors");
2311 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2312 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2313 else
2314 rxDescCache.writeback((cacheBlockSize()-1)>>4);
2315 }
2316
2317 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2318 ((rxDescCache.descLeft() - rxDescCache.descUnused()) >
2319 regs.rxdctl.hthresh())) {
2320 DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2321 "descUnused < PTHRESH\n");
2322 anBegin("RXS", "Fetch Descriptors");
2323 rxDescCache.fetchDescriptors();
2324 }
2325
2326 if (rxDescCache.descUnused() == 0) {
2327 anBegin("RXS", "Fetch Descriptors");
2328 rxDescCache.fetchDescriptors();
2329 anWe("RXS", rxDescCache.annUnusedCacheQ);
2330 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2331 "fetching descriptors and stopping ticking\n");
2332 rxTick = false;
2333 }
2334 return;
2335 }
2336
2337 if (rxDmaPacket) {
2338 DPRINTF(EthernetSM,
2339 "RXS: stopping ticking until packet DMA completes\n");
2340 rxTick = false;
2341 return;
2342 }
2343
2344 if (!rxDescCache.descUnused()) {
2345 anBegin("RXS", "Fetch Descriptors");
2346 rxDescCache.fetchDescriptors();
2347 anWe("RXS", rxDescCache.annUnusedCacheQ);
2348 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2349 "stopping ticking\n");
2350 rxTick = false;
2351 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2352 return;
2353 }
2354 anPq("RXS", rxDescCache.annUnusedCacheQ);
2355
2356 if (rxFifo.empty()) {
2357 anWe("RXS", "RX FIFO Q");
2358 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2359 rxTick = false;
2360 return;
2361 }
2362 anPq("RXS", "RX FIFO Q");
2363 anBegin("RXS", "Get Desc");
2364
2365 EthPacketPtr pkt;
2366 pkt = rxFifo.front();
2367
2368
2369 pktOffset = rxDescCache.writePacket(pkt, pktOffset);
2370 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2371 if (pktOffset == pkt->length) {
2372 anBegin( "RXS", "FIFO Dequeue");
2373 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2374 pktOffset = 0;
2375 anDq("RXS", "RX FIFO Q");
2376 rxFifo.pop();
2377 }
2378
2379 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2380 rxTick = false;
2381 rxDmaPacket = true;
2382 anBegin("RXS", "DMA Packet");
2383}
2384
2385void
2386IGbE::txWire()
2387{
2388 txFifoTick = false;
2389
2390 if (txFifo.empty()) {
2391 anWe("TXQ", "TX FIFO Q");
2392 return;
2393 }
2394
2395
2396 anPq("TXQ", "TX FIFO Q");
2397 if (etherInt->sendPacket(txFifo.front())) {
2398 anQ("TXQ", "WireQ");
2399 if (DTRACE(EthernetSM)) {
2400 IpPtr ip(txFifo.front());
2401 if (ip)
2402 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2403 ip->id());
2404 else
2405 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2406 }
2407 anDq("TXQ", "TX FIFO Q");
2408 anBegin("TXQ", "Wire Send");
2409 DPRINTF(EthernetSM,
2410 "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2411 txFifo.avail());
2412
2413 txBytes += txFifo.front()->length;
2414 txPackets++;
2415
2416 txFifo.pop();
2417 }
2418}
2419
2420void
2421IGbE::tick()
2422{
2423 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2424
2425 inTick = true;
2426
2427 if (rxTick)
2428 rxStateMachine();
2429
2430 if (txTick)
2431 txStateMachine();
2432
2433 // If txWire returns and txFifoTick is still set, that means the data we
2434 // sent to the other end was already accepted and we can send another
2435 // frame right away. This is consistent with the previous behavior which
2436 // would send another frame if one was ready in ethTxDone. This version
2437 // avoids growing the stack with each frame sent which can cause stack
2438 // overflow.
2439 while (txFifoTick)
2440 txWire();
2441
2442 if (rxTick || txTick || txFifoTick)
2443 schedule(tickEvent, curTick() + clockPeriod());
2444
2445 inTick = false;
2446}
2447
2448void
2449IGbE::ethTxDone()
2450{
2451 anBegin("TXQ", "Send Done");
2452 // restart the tx state machines if they are stopped
2453 // fifo to send another packet
2454 // tx sm to put more data into the fifo
2455 txFifoTick = true && drainState() != DrainState::Draining;
2456 if (txDescCache.descLeft() != 0 && drainState() != DrainState::Draining)
2457 txTick = true;
2458
2459 if (!inTick)
2460 restartClock();
2461 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2462}
2463
2464void
2465IGbE::serialize(CheckpointOut &cp) const
2466{
2467 PciDevice::serialize(cp);
2468
2469 regs.serialize(cp);
2470 SERIALIZE_SCALAR(eeOpBits);
2471 SERIALIZE_SCALAR(eeAddrBits);
2472 SERIALIZE_SCALAR(eeDataBits);
2473 SERIALIZE_SCALAR(eeOpcode);
2474 SERIALIZE_SCALAR(eeAddr);
2475 SERIALIZE_SCALAR(lastInterrupt);
2476 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2477
2478 rxFifo.serialize("rxfifo", cp);
2479 txFifo.serialize("txfifo", cp);
2480
2481 bool txPktExists = txPacket != nullptr;
2482 SERIALIZE_SCALAR(txPktExists);
2483 if (txPktExists)
2484 txPacket->serialize("txpacket", cp);
2485
2486 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2487 inter_time = 0;
2488
2489 if (rdtrEvent.scheduled())
2490 rdtr_time = rdtrEvent.when();
2491 SERIALIZE_SCALAR(rdtr_time);
2492
2493 if (radvEvent.scheduled())
2494 radv_time = radvEvent.when();
2495 SERIALIZE_SCALAR(radv_time);
2496
2497 if (tidvEvent.scheduled())
2498 tidv_time = tidvEvent.when();
2499 SERIALIZE_SCALAR(tidv_time);
2500
2501 if (tadvEvent.scheduled())
2502 tadv_time = tadvEvent.when();
2503 SERIALIZE_SCALAR(tadv_time);
2504
2505 if (interEvent.scheduled())
2506 inter_time = interEvent.when();
2507 SERIALIZE_SCALAR(inter_time);
2508
2509 SERIALIZE_SCALAR(pktOffset);
2510
2511 txDescCache.serializeSection(cp, "TxDescCache");
2512 rxDescCache.serializeSection(cp, "RxDescCache");
2513}
2514
2515void
2516IGbE::unserialize(CheckpointIn &cp)
2517{
2518 PciDevice::unserialize(cp);
2519
2520 regs.unserialize(cp);
2521 UNSERIALIZE_SCALAR(eeOpBits);
2522 UNSERIALIZE_SCALAR(eeAddrBits);
2523 UNSERIALIZE_SCALAR(eeDataBits);
2524 UNSERIALIZE_SCALAR(eeOpcode);
2525 UNSERIALIZE_SCALAR(eeAddr);
2526 UNSERIALIZE_SCALAR(lastInterrupt);
2527 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2528
2529 rxFifo.unserialize("rxfifo", cp);
2530 txFifo.unserialize("txfifo", cp);
2531
2532 bool txPktExists;
2533 UNSERIALIZE_SCALAR(txPktExists);
2534 if (txPktExists) {
2535 txPacket = std::make_shared<EthPacketData>(16384);
2536 txPacket->unserialize("txpacket", cp);
2537 }
2538
2539 rxTick = true;
2540 txTick = true;
2541 txFifoTick = true;
2542
2543 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2544 UNSERIALIZE_SCALAR(rdtr_time);
2545 UNSERIALIZE_SCALAR(radv_time);
2546 UNSERIALIZE_SCALAR(tidv_time);
2547 UNSERIALIZE_SCALAR(tadv_time);
2548 UNSERIALIZE_SCALAR(inter_time);
2549
2550 if (rdtr_time)
2551 schedule(rdtrEvent, rdtr_time);
2552
2553 if (radv_time)
2554 schedule(radvEvent, radv_time);
2555
2556 if (tidv_time)
2557 schedule(tidvEvent, tidv_time);
2558
2559 if (tadv_time)
2560 schedule(tadvEvent, tadv_time);
2561
2562 if (inter_time)
2563 schedule(interEvent, inter_time);
2564
2565 UNSERIALIZE_SCALAR(pktOffset);
2566
2567 txDescCache.unserializeSection(cp, "TxDescCache");
2568 rxDescCache.unserializeSection(cp, "RxDescCache");
2569}
2570
2571IGbE *
2572IGbEParams::create()
2573{
2574 return new IGbE(this);
2575}
1563{
1564 annSmFetch = "TX Desc Fetch";
1565 annSmWb = "TX Desc Writeback";
1566 annUnusedDescQ = "TX Unused Descriptors";
1567 annUnusedCacheQ = "TX Unused Descriptor Cache";
1568 annUsedCacheQ = "TX Used Descriptor Cache";
1569 annUsedDescQ = "TX Used Descriptors";
1570 annDescQ = "TX Descriptors";
1571}
1572
1573void
1574IGbE::TxDescCache::processContextDesc()
1575{
1576 assert(unusedCache.size());
1577 TxDesc *desc;
1578
1579 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
1580
1581 while (!useTso && unusedCache.size() &&
1582 TxdOp::isContext(unusedCache.front())) {
1583 DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1584
1585 desc = unusedCache.front();
1586 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1587 desc->d1, desc->d2);
1588
1589
1590 // is this going to be a tcp or udp packet?
1591 isTcp = TxdOp::tcp(desc) ? true : false;
1592
1593 // setup all the TSO variables, they'll be ignored if we don't use
1594 // tso for this connection
1595 tsoHeaderLen = TxdOp::hdrlen(desc);
1596 tsoMss = TxdOp::mss(desc);
1597
1598 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1599 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1600 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1601 TxdOp::mss(desc), TxdOp::getLen(desc));
1602 useTso = true;
1603 tsoTotalLen = TxdOp::getLen(desc);
1604 tsoLoadedHeader = false;
1605 tsoDescBytesUsed = 0;
1606 tsoUsedLen = 0;
1607 tsoPrevSeq = 0;
1608 tsoPktHasHeader = false;
1609 tsoPkts = 0;
1610 tsoCopyBytes = 0;
1611 }
1612
1613 TxdOp::setDd(desc);
1614 unusedCache.pop_front();
1615 igbe->anDq("TXS", annUnusedCacheQ);
1616 usedCache.push_back(desc);
1617 igbe->anQ("TXS", annUsedCacheQ);
1618 }
1619
1620 if (!unusedCache.size())
1621 return;
1622
1623 desc = unusedCache.front();
1624 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1625 TxdOp::tse(desc)) {
1626 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1627 "hdrlen: %d mss: %d paylen %d\n",
1628 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1629 useTso = true;
1630 tsoTotalLen = TxdOp::getTsoLen(desc);
1631 tsoLoadedHeader = false;
1632 tsoDescBytesUsed = 0;
1633 tsoUsedLen = 0;
1634 tsoPrevSeq = 0;
1635 tsoPktHasHeader = false;
1636 tsoPkts = 0;
1637 }
1638
1639 if (useTso && !tsoLoadedHeader) {
1640 // we need to fetch a header
1641 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1642 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1643 pktWaiting = true;
1644 assert(tsoHeaderLen <= 256);
1645 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1646 tsoHeaderLen, &headerEvent, tsoHeader, 0);
1647 }
1648}
1649
1650void
1651IGbE::TxDescCache::headerComplete()
1652{
1653 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1654 pktWaiting = false;
1655
1656 assert(unusedCache.size());
1657 TxDesc *desc = unusedCache.front();
1658 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1659 TxdOp::getLen(desc), tsoHeaderLen);
1660
1661 if (TxdOp::getLen(desc) == tsoHeaderLen) {
1662 tsoDescBytesUsed = 0;
1663 tsoLoadedHeader = true;
1664 unusedCache.pop_front();
1665 usedCache.push_back(desc);
1666 } else {
1667 DPRINTF(EthernetDesc, "TSO: header part of larger payload\n");
1668 tsoDescBytesUsed = tsoHeaderLen;
1669 tsoLoadedHeader = true;
1670 }
1671 enableSm();
1672 igbe->checkDrain();
1673}
1674
1675unsigned
1676IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
1677{
1678 if (!unusedCache.size())
1679 return 0;
1680
1681 DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1682
1683 assert(!useTso || tsoLoadedHeader);
1684 TxDesc *desc = unusedCache.front();
1685
1686 if (useTso) {
1687 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1688 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1689 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1690 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1691 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1692
1693 if (tsoPktHasHeader)
1694 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length,
1695 TxdOp::getLen(desc) - tsoDescBytesUsed);
1696 else
1697 tsoCopyBytes = std::min(tsoMss,
1698 TxdOp::getLen(desc) - tsoDescBytesUsed);
1699 unsigned pkt_size =
1700 tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1701
1702 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1703 "this descLen: %d\n",
1704 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1705 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1706 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1707 return pkt_size;
1708 }
1709
1710 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1711 TxdOp::getLen(unusedCache.front()));
1712 return TxdOp::getLen(desc);
1713}
1714
1715void
1716IGbE::TxDescCache::getPacketData(EthPacketPtr p)
1717{
1718 assert(unusedCache.size());
1719
1720 TxDesc *desc;
1721 desc = unusedCache.front();
1722
1723 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1724 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1725 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1726 TxdOp::getLen(desc));
1727
1728 pktPtr = p;
1729
1730 pktWaiting = true;
1731
1732 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1733
1734 if (useTso) {
1735 assert(tsoLoadedHeader);
1736 if (!tsoPktHasHeader) {
1737 DPRINTF(EthernetDesc,
1738 "Loading TSO header (%d bytes) into start of packet\n",
1739 tsoHeaderLen);
1740 memcpy(p->data, &tsoHeader,tsoHeaderLen);
1741 p->length +=tsoHeaderLen;
1742 tsoPktHasHeader = true;
1743 }
1744 }
1745
1746 if (useTso) {
1747 DPRINTF(EthernetDesc,
1748 "Starting DMA of packet at offset %d length: %d\n",
1749 p->length, tsoCopyBytes);
1750 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1751 + tsoDescBytesUsed,
1752 tsoCopyBytes, &pktEvent, p->data + p->length,
1753 igbe->txReadDelay);
1754 tsoDescBytesUsed += tsoCopyBytes;
1755 assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1756 } else {
1757 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1758 TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1759 igbe->txReadDelay);
1760 }
1761}
1762
1763void
1764IGbE::TxDescCache::pktComplete()
1765{
1766
1767 TxDesc *desc;
1768 assert(unusedCache.size());
1769 assert(pktPtr);
1770
1771 igbe->anBegin("TXS", "Update Desc");
1772
1773 DPRINTF(EthernetDesc, "DMA of packet complete\n");
1774
1775
1776 desc = unusedCache.front();
1777 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1778 TxdOp::getLen(desc));
1779
1780 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1781 desc->d1, desc->d2);
1782
1783 // Set the length of the data in the EtherPacket
1784 if (useTso) {
1785 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1786 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1787 tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1788 pktPtr->simLength += tsoCopyBytes;
1789 pktPtr->length += tsoCopyBytes;
1790 tsoUsedLen += tsoCopyBytes;
1791 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1792 tsoDescBytesUsed, tsoCopyBytes);
1793 } else {
1794 pktPtr->simLength += TxdOp::getLen(desc);
1795 pktPtr->length += TxdOp::getLen(desc);
1796 }
1797
1798
1799
1800 if ((!TxdOp::eop(desc) && !useTso) ||
1801 (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1802 tsoTotalLen != tsoUsedLen && useTso)) {
1803 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1804 igbe->anDq("TXS", annUnusedCacheQ);
1805 unusedCache.pop_front();
1806 igbe->anQ("TXS", annUsedCacheQ);
1807 usedCache.push_back(desc);
1808
1809 tsoDescBytesUsed = 0;
1810 pktDone = true;
1811 pktWaiting = false;
1812 pktMultiDesc = true;
1813
1814 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1815 pktPtr->length);
1816 pktPtr = NULL;
1817
1818 enableSm();
1819 igbe->checkDrain();
1820 return;
1821 }
1822
1823
1824 pktMultiDesc = false;
1825 // no support for vlans
1826 assert(!TxdOp::vle(desc));
1827
1828 // we only support single packet descriptors at this point
1829 if (!useTso)
1830 assert(TxdOp::eop(desc));
1831
1832 // set that this packet is done
1833 if (TxdOp::rs(desc))
1834 TxdOp::setDd(desc);
1835
1836 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1837 desc->d1, desc->d2);
1838
1839 if (useTso) {
1840 IpPtr ip(pktPtr);
1841 if (ip) {
1842 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1843 tsoPkts);
1844 ip->id(ip->id() + tsoPkts++);
1845 ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1846
1847 TcpPtr tcp(ip);
1848 if (tcp) {
1849 DPRINTF(EthernetDesc,
1850 "TSO: Modifying TCP header. old seq %d + %d\n",
1851 tcp->seq(), tsoPrevSeq);
1852 tcp->seq(tcp->seq() + tsoPrevSeq);
1853 if (tsoUsedLen != tsoTotalLen)
1854 tcp->flags(tcp->flags() & ~9); // clear fin & psh
1855 }
1856 UdpPtr udp(ip);
1857 if (udp) {
1858 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1859 udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1860 }
1861 }
1862 tsoPrevSeq = tsoUsedLen;
1863 }
1864
1865 if (DTRACE(EthernetDesc)) {
1866 IpPtr ip(pktPtr);
1867 if (ip)
1868 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1869 ip->id());
1870 else
1871 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1872 }
1873
1874 // Checksums are only ofloaded for new descriptor types
1875 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1876 DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1877 IpPtr ip(pktPtr);
1878 assert(ip);
1879 if (TxdOp::ixsm(desc)) {
1880 ip->sum(0);
1881 ip->sum(cksum(ip));
1882 igbe->txIpChecksums++;
1883 DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1884 }
1885 if (TxdOp::txsm(desc)) {
1886 TcpPtr tcp(ip);
1887 UdpPtr udp(ip);
1888 if (tcp) {
1889 tcp->sum(0);
1890 tcp->sum(cksum(tcp));
1891 igbe->txTcpChecksums++;
1892 DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1893 } else if (udp) {
1894 assert(udp);
1895 udp->sum(0);
1896 udp->sum(cksum(udp));
1897 igbe->txUdpChecksums++;
1898 DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1899 } else {
1900 panic("Told to checksum, but don't know how\n");
1901 }
1902 }
1903 }
1904
1905 if (TxdOp::ide(desc)) {
1906 // Deal with the rx timer interrupts
1907 DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1908 if (igbe->regs.tidv.idv()) {
1909 Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1910 DPRINTF(EthernetDesc, "setting tidv\n");
1911 igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1912 }
1913
1914 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1915 Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1916 DPRINTF(EthernetDesc, "setting tadv\n");
1917 if (!igbe->tadvEvent.scheduled()) {
1918 igbe->schedule(igbe->tadvEvent, curTick() + delay);
1919 }
1920 }
1921 }
1922
1923
1924 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) {
1925 DPRINTF(EthernetDesc, "Descriptor Done\n");
1926 igbe->anDq("TXS", annUnusedCacheQ);
1927 unusedCache.pop_front();
1928 igbe->anQ("TXS", annUsedCacheQ);
1929 usedCache.push_back(desc);
1930 tsoDescBytesUsed = 0;
1931 }
1932
1933 if (useTso && tsoUsedLen == tsoTotalLen)
1934 useTso = false;
1935
1936
1937 DPRINTF(EthernetDesc,
1938 "------Packet of %d bytes ready for transmission-------\n",
1939 pktPtr->length);
1940 pktDone = true;
1941 pktWaiting = false;
1942 pktPtr = NULL;
1943 tsoPktHasHeader = false;
1944
1945 if (igbe->regs.txdctl.wthresh() == 0) {
1946 igbe->anBegin("TXS", "Desc Writeback");
1947 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1948 writeback(0);
1949 } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <=
1950 descInBlock(usedCache.size())) {
1951 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1952 igbe->anBegin("TXS", "Desc Writeback");
1953 writeback((igbe->cacheBlockSize()-1)>>4);
1954 } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) {
1955 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1956 igbe->anBegin("TXS", "Desc Writeback");
1957 writeback((igbe->cacheBlockSize()-1)>>4);
1958 }
1959
1960 enableSm();
1961 igbe->checkDrain();
1962}
1963
1964void
1965IGbE::TxDescCache::actionAfterWb()
1966{
1967 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1968 completionEnabled);
1969 igbe->postInterrupt(iGbReg::IT_TXDW);
1970 if (completionEnabled) {
1971 descEnd = igbe->regs.tdh();
1972 DPRINTF(EthernetDesc,
1973 "Completion writing back value: %d to addr: %#x\n", descEnd,
1974 completionAddress);
1975 igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1976 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1977 }
1978}
1979
1980void
1981IGbE::TxDescCache::serialize(CheckpointOut &cp) const
1982{
1983 DescCache<TxDesc>::serialize(cp);
1984
1985 SERIALIZE_SCALAR(pktDone);
1986 SERIALIZE_SCALAR(isTcp);
1987 SERIALIZE_SCALAR(pktWaiting);
1988 SERIALIZE_SCALAR(pktMultiDesc);
1989
1990 SERIALIZE_SCALAR(useTso);
1991 SERIALIZE_SCALAR(tsoHeaderLen);
1992 SERIALIZE_SCALAR(tsoMss);
1993 SERIALIZE_SCALAR(tsoTotalLen);
1994 SERIALIZE_SCALAR(tsoUsedLen);
1995 SERIALIZE_SCALAR(tsoPrevSeq);;
1996 SERIALIZE_SCALAR(tsoPktPayloadBytes);
1997 SERIALIZE_SCALAR(tsoLoadedHeader);
1998 SERIALIZE_SCALAR(tsoPktHasHeader);
1999 SERIALIZE_ARRAY(tsoHeader, 256);
2000 SERIALIZE_SCALAR(tsoDescBytesUsed);
2001 SERIALIZE_SCALAR(tsoCopyBytes);
2002 SERIALIZE_SCALAR(tsoPkts);
2003
2004 SERIALIZE_SCALAR(completionAddress);
2005 SERIALIZE_SCALAR(completionEnabled);
2006 SERIALIZE_SCALAR(descEnd);
2007}
2008
2009void
2010IGbE::TxDescCache::unserialize(CheckpointIn &cp)
2011{
2012 DescCache<TxDesc>::unserialize(cp);
2013
2014 UNSERIALIZE_SCALAR(pktDone);
2015 UNSERIALIZE_SCALAR(isTcp);
2016 UNSERIALIZE_SCALAR(pktWaiting);
2017 UNSERIALIZE_SCALAR(pktMultiDesc);
2018
2019 UNSERIALIZE_SCALAR(useTso);
2020 UNSERIALIZE_SCALAR(tsoHeaderLen);
2021 UNSERIALIZE_SCALAR(tsoMss);
2022 UNSERIALIZE_SCALAR(tsoTotalLen);
2023 UNSERIALIZE_SCALAR(tsoUsedLen);
2024 UNSERIALIZE_SCALAR(tsoPrevSeq);;
2025 UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
2026 UNSERIALIZE_SCALAR(tsoLoadedHeader);
2027 UNSERIALIZE_SCALAR(tsoPktHasHeader);
2028 UNSERIALIZE_ARRAY(tsoHeader, 256);
2029 UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2030 UNSERIALIZE_SCALAR(tsoCopyBytes);
2031 UNSERIALIZE_SCALAR(tsoPkts);
2032
2033 UNSERIALIZE_SCALAR(completionAddress);
2034 UNSERIALIZE_SCALAR(completionEnabled);
2035 UNSERIALIZE_SCALAR(descEnd);
2036}
2037
2038bool
2039IGbE::TxDescCache::packetAvailable()
2040{
2041 if (pktDone) {
2042 pktDone = false;
2043 return true;
2044 }
2045 return false;
2046}
2047
2048void
2049IGbE::TxDescCache::enableSm()
2050{
2051 if (igbe->drainState() != DrainState::Draining) {
2052 igbe->txTick = true;
2053 igbe->restartClock();
2054 }
2055}
2056
2057bool
2058IGbE::TxDescCache::hasOutstandingEvents()
2059{
2060 return pktEvent.scheduled() || wbEvent.scheduled() ||
2061 fetchEvent.scheduled();
2062}
2063
2064
2065///////////////////////////////////// IGbE /////////////////////////////////
2066
2067void
2068IGbE::restartClock()
2069{
2070 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2071 drainState() == DrainState::Running)
2072 schedule(tickEvent, clockEdge(Cycles(1)));
2073}
2074
2075DrainState
2076IGbE::drain()
2077{
2078 unsigned int count(0);
2079 if (rxDescCache.hasOutstandingEvents() ||
2080 txDescCache.hasOutstandingEvents()) {
2081 count++;
2082 }
2083
2084 txFifoTick = false;
2085 txTick = false;
2086 rxTick = false;
2087
2088 if (tickEvent.scheduled())
2089 deschedule(tickEvent);
2090
2091 if (count) {
2092 DPRINTF(Drain, "IGbE not drained\n");
2093 return DrainState::Draining;
2094 } else
2095 return DrainState::Drained;
2096}
2097
2098void
2099IGbE::drainResume()
2100{
2101 Drainable::drainResume();
2102
2103 txFifoTick = true;
2104 txTick = true;
2105 rxTick = true;
2106
2107 restartClock();
2108 DPRINTF(EthernetSM, "resuming from drain");
2109}
2110
2111void
2112IGbE::checkDrain()
2113{
2114 if (drainState() != DrainState::Draining)
2115 return;
2116
2117 txFifoTick = false;
2118 txTick = false;
2119 rxTick = false;
2120 if (!rxDescCache.hasOutstandingEvents() &&
2121 !txDescCache.hasOutstandingEvents()) {
2122 DPRINTF(Drain, "IGbE done draining, processing drain event\n");
2123 signalDrainDone();
2124 }
2125}
2126
2127void
2128IGbE::txStateMachine()
2129{
2130 if (!regs.tctl.en()) {
2131 txTick = false;
2132 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2133 return;
2134 }
2135
2136 // If we have a packet available and it's length is not 0 (meaning it's not
2137 // a multidescriptor packet) put it in the fifo, otherwise an the next
2138 // iteration we'll get the rest of the data
2139 if (txPacket && txDescCache.packetAvailable()
2140 && !txDescCache.packetMultiDesc() && txPacket->length) {
2141 anQ("TXS", "TX FIFO Q");
2142 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2143#ifndef NDEBUG
2144 bool success =
2145#endif
2146 txFifo.push(txPacket);
2147 txFifoTick = true && drainState() != DrainState::Draining;
2148 assert(success);
2149 txPacket = NULL;
2150 anBegin("TXS", "Desc Writeback");
2151 txDescCache.writeback((cacheBlockSize()-1)>>4);
2152 return;
2153 }
2154
2155 // Only support descriptor granularity
2156 if (regs.txdctl.lwthresh() &&
2157 txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2158 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2159 postInterrupt(IT_TXDLOW);
2160 }
2161
2162 if (!txPacket) {
2163 txPacket = std::make_shared<EthPacketData>(16384);
2164 }
2165
2166 if (!txDescCache.packetWaiting()) {
2167 if (txDescCache.descLeft() == 0) {
2168 postInterrupt(IT_TXQE);
2169 anBegin("TXS", "Desc Writeback");
2170 txDescCache.writeback(0);
2171 anBegin("TXS", "Desc Fetch");
2172 anWe("TXS", txDescCache.annUnusedCacheQ);
2173 txDescCache.fetchDescriptors();
2174 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2175 "writeback stopping ticking and posting TXQE\n");
2176 txTick = false;
2177 return;
2178 }
2179
2180
2181 if (!(txDescCache.descUnused())) {
2182 anBegin("TXS", "Desc Fetch");
2183 txDescCache.fetchDescriptors();
2184 anWe("TXS", txDescCache.annUnusedCacheQ);
2185 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2186 "fetching and stopping ticking\n");
2187 txTick = false;
2188 return;
2189 }
2190 anPq("TXS", txDescCache.annUnusedCacheQ);
2191
2192
2193 txDescCache.processContextDesc();
2194 if (txDescCache.packetWaiting()) {
2195 DPRINTF(EthernetSM,
2196 "TXS: Fetching TSO header, stopping ticking\n");
2197 txTick = false;
2198 return;
2199 }
2200
2201 unsigned size = txDescCache.getPacketSize(txPacket);
2202 if (size > 0 && txFifo.avail() > size) {
2203 anRq("TXS", "TX FIFO Q");
2204 anBegin("TXS", "DMA Packet");
2205 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2206 "beginning DMA of next packet\n", size);
2207 txFifo.reserve(size);
2208 txDescCache.getPacketData(txPacket);
2209 } else if (size == 0) {
2210 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2211 DPRINTF(EthernetSM,
2212 "TXS: No packets to get, writing back used descriptors\n");
2213 anBegin("TXS", "Desc Writeback");
2214 txDescCache.writeback(0);
2215 } else {
2216 anWf("TXS", "TX FIFO Q");
2217 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2218 "available in FIFO\n");
2219 txTick = false;
2220 }
2221
2222
2223 return;
2224 }
2225 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2226 txTick = false;
2227}
2228
2229bool
2230IGbE::ethRxPkt(EthPacketPtr pkt)
2231{
2232 rxBytes += pkt->length;
2233 rxPackets++;
2234
2235 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2236 anBegin("RXQ", "Wire Recv");
2237
2238
2239 if (!regs.rctl.en()) {
2240 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2241 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2242 return true;
2243 }
2244
2245 // restart the state machines if they are stopped
2246 rxTick = true && drainState() != DrainState::Draining;
2247 if ((rxTick || txTick) && !tickEvent.scheduled()) {
2248 DPRINTF(EthernetSM,
2249 "RXS: received packet into fifo, starting ticking\n");
2250 restartClock();
2251 }
2252
2253 if (!rxFifo.push(pkt)) {
2254 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2255 postInterrupt(IT_RXO, true);
2256 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2257 return false;
2258 }
2259
2260 if (CPA::available() && cpa->enabled()) {
2261 assert(sys->numSystemsRunning <= 2);
2262 System *other_sys;
2263 if (sys->systemList[0] == sys)
2264 other_sys = sys->systemList[1];
2265 else
2266 other_sys = sys->systemList[0];
2267
2268 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2269 anQ("RXQ", "RX FIFO Q");
2270 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2271 }
2272
2273 return true;
2274}
2275
2276
2277void
2278IGbE::rxStateMachine()
2279{
2280 if (!regs.rctl.en()) {
2281 rxTick = false;
2282 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2283 return;
2284 }
2285
2286 // If the packet is done check for interrupts/descriptors/etc
2287 if (rxDescCache.packetDone()) {
2288 rxDmaPacket = false;
2289 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2290 int descLeft = rxDescCache.descLeft();
2291 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2292 descLeft, regs.rctl.rdmts(), regs.rdlen());
2293 switch (regs.rctl.rdmts()) {
2294 case 2: if (descLeft > .125 * regs.rdlen()) break;
2295 case 1: if (descLeft > .250 * regs.rdlen()) break;
2296 case 0: if (descLeft > .500 * regs.rdlen()) break;
2297 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2298 "because of descriptors left\n");
2299 postInterrupt(IT_RXDMT);
2300 break;
2301 }
2302
2303 if (rxFifo.empty())
2304 rxDescCache.writeback(0);
2305
2306 if (descLeft == 0) {
2307 anBegin("RXS", "Writeback Descriptors");
2308 rxDescCache.writeback(0);
2309 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2310 " writeback and stopping ticking\n");
2311 rxTick = false;
2312 }
2313
2314 // only support descriptor granulaties
2315 assert(regs.rxdctl.gran());
2316
2317 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2318 DPRINTF(EthernetSM,
2319 "RXS: Writing back because WTHRESH >= descUsed\n");
2320 anBegin("RXS", "Writeback Descriptors");
2321 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2322 rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2323 else
2324 rxDescCache.writeback((cacheBlockSize()-1)>>4);
2325 }
2326
2327 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2328 ((rxDescCache.descLeft() - rxDescCache.descUnused()) >
2329 regs.rxdctl.hthresh())) {
2330 DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2331 "descUnused < PTHRESH\n");
2332 anBegin("RXS", "Fetch Descriptors");
2333 rxDescCache.fetchDescriptors();
2334 }
2335
2336 if (rxDescCache.descUnused() == 0) {
2337 anBegin("RXS", "Fetch Descriptors");
2338 rxDescCache.fetchDescriptors();
2339 anWe("RXS", rxDescCache.annUnusedCacheQ);
2340 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2341 "fetching descriptors and stopping ticking\n");
2342 rxTick = false;
2343 }
2344 return;
2345 }
2346
2347 if (rxDmaPacket) {
2348 DPRINTF(EthernetSM,
2349 "RXS: stopping ticking until packet DMA completes\n");
2350 rxTick = false;
2351 return;
2352 }
2353
2354 if (!rxDescCache.descUnused()) {
2355 anBegin("RXS", "Fetch Descriptors");
2356 rxDescCache.fetchDescriptors();
2357 anWe("RXS", rxDescCache.annUnusedCacheQ);
2358 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2359 "stopping ticking\n");
2360 rxTick = false;
2361 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2362 return;
2363 }
2364 anPq("RXS", rxDescCache.annUnusedCacheQ);
2365
2366 if (rxFifo.empty()) {
2367 anWe("RXS", "RX FIFO Q");
2368 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2369 rxTick = false;
2370 return;
2371 }
2372 anPq("RXS", "RX FIFO Q");
2373 anBegin("RXS", "Get Desc");
2374
2375 EthPacketPtr pkt;
2376 pkt = rxFifo.front();
2377
2378
2379 pktOffset = rxDescCache.writePacket(pkt, pktOffset);
2380 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2381 if (pktOffset == pkt->length) {
2382 anBegin( "RXS", "FIFO Dequeue");
2383 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2384 pktOffset = 0;
2385 anDq("RXS", "RX FIFO Q");
2386 rxFifo.pop();
2387 }
2388
2389 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2390 rxTick = false;
2391 rxDmaPacket = true;
2392 anBegin("RXS", "DMA Packet");
2393}
2394
2395void
2396IGbE::txWire()
2397{
2398 txFifoTick = false;
2399
2400 if (txFifo.empty()) {
2401 anWe("TXQ", "TX FIFO Q");
2402 return;
2403 }
2404
2405
2406 anPq("TXQ", "TX FIFO Q");
2407 if (etherInt->sendPacket(txFifo.front())) {
2408 anQ("TXQ", "WireQ");
2409 if (DTRACE(EthernetSM)) {
2410 IpPtr ip(txFifo.front());
2411 if (ip)
2412 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2413 ip->id());
2414 else
2415 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2416 }
2417 anDq("TXQ", "TX FIFO Q");
2418 anBegin("TXQ", "Wire Send");
2419 DPRINTF(EthernetSM,
2420 "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2421 txFifo.avail());
2422
2423 txBytes += txFifo.front()->length;
2424 txPackets++;
2425
2426 txFifo.pop();
2427 }
2428}
2429
2430void
2431IGbE::tick()
2432{
2433 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2434
2435 inTick = true;
2436
2437 if (rxTick)
2438 rxStateMachine();
2439
2440 if (txTick)
2441 txStateMachine();
2442
2443 // If txWire returns and txFifoTick is still set, that means the data we
2444 // sent to the other end was already accepted and we can send another
2445 // frame right away. This is consistent with the previous behavior which
2446 // would send another frame if one was ready in ethTxDone. This version
2447 // avoids growing the stack with each frame sent which can cause stack
2448 // overflow.
2449 while (txFifoTick)
2450 txWire();
2451
2452 if (rxTick || txTick || txFifoTick)
2453 schedule(tickEvent, curTick() + clockPeriod());
2454
2455 inTick = false;
2456}
2457
2458void
2459IGbE::ethTxDone()
2460{
2461 anBegin("TXQ", "Send Done");
2462 // restart the tx state machines if they are stopped
2463 // fifo to send another packet
2464 // tx sm to put more data into the fifo
2465 txFifoTick = true && drainState() != DrainState::Draining;
2466 if (txDescCache.descLeft() != 0 && drainState() != DrainState::Draining)
2467 txTick = true;
2468
2469 if (!inTick)
2470 restartClock();
2471 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2472}
2473
2474void
2475IGbE::serialize(CheckpointOut &cp) const
2476{
2477 PciDevice::serialize(cp);
2478
2479 regs.serialize(cp);
2480 SERIALIZE_SCALAR(eeOpBits);
2481 SERIALIZE_SCALAR(eeAddrBits);
2482 SERIALIZE_SCALAR(eeDataBits);
2483 SERIALIZE_SCALAR(eeOpcode);
2484 SERIALIZE_SCALAR(eeAddr);
2485 SERIALIZE_SCALAR(lastInterrupt);
2486 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2487
2488 rxFifo.serialize("rxfifo", cp);
2489 txFifo.serialize("txfifo", cp);
2490
2491 bool txPktExists = txPacket != nullptr;
2492 SERIALIZE_SCALAR(txPktExists);
2493 if (txPktExists)
2494 txPacket->serialize("txpacket", cp);
2495
2496 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2497 inter_time = 0;
2498
2499 if (rdtrEvent.scheduled())
2500 rdtr_time = rdtrEvent.when();
2501 SERIALIZE_SCALAR(rdtr_time);
2502
2503 if (radvEvent.scheduled())
2504 radv_time = radvEvent.when();
2505 SERIALIZE_SCALAR(radv_time);
2506
2507 if (tidvEvent.scheduled())
2508 tidv_time = tidvEvent.when();
2509 SERIALIZE_SCALAR(tidv_time);
2510
2511 if (tadvEvent.scheduled())
2512 tadv_time = tadvEvent.when();
2513 SERIALIZE_SCALAR(tadv_time);
2514
2515 if (interEvent.scheduled())
2516 inter_time = interEvent.when();
2517 SERIALIZE_SCALAR(inter_time);
2518
2519 SERIALIZE_SCALAR(pktOffset);
2520
2521 txDescCache.serializeSection(cp, "TxDescCache");
2522 rxDescCache.serializeSection(cp, "RxDescCache");
2523}
2524
2525void
2526IGbE::unserialize(CheckpointIn &cp)
2527{
2528 PciDevice::unserialize(cp);
2529
2530 regs.unserialize(cp);
2531 UNSERIALIZE_SCALAR(eeOpBits);
2532 UNSERIALIZE_SCALAR(eeAddrBits);
2533 UNSERIALIZE_SCALAR(eeDataBits);
2534 UNSERIALIZE_SCALAR(eeOpcode);
2535 UNSERIALIZE_SCALAR(eeAddr);
2536 UNSERIALIZE_SCALAR(lastInterrupt);
2537 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE);
2538
2539 rxFifo.unserialize("rxfifo", cp);
2540 txFifo.unserialize("txfifo", cp);
2541
2542 bool txPktExists;
2543 UNSERIALIZE_SCALAR(txPktExists);
2544 if (txPktExists) {
2545 txPacket = std::make_shared<EthPacketData>(16384);
2546 txPacket->unserialize("txpacket", cp);
2547 }
2548
2549 rxTick = true;
2550 txTick = true;
2551 txFifoTick = true;
2552
2553 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2554 UNSERIALIZE_SCALAR(rdtr_time);
2555 UNSERIALIZE_SCALAR(radv_time);
2556 UNSERIALIZE_SCALAR(tidv_time);
2557 UNSERIALIZE_SCALAR(tadv_time);
2558 UNSERIALIZE_SCALAR(inter_time);
2559
2560 if (rdtr_time)
2561 schedule(rdtrEvent, rdtr_time);
2562
2563 if (radv_time)
2564 schedule(radvEvent, radv_time);
2565
2566 if (tidv_time)
2567 schedule(tidvEvent, tidv_time);
2568
2569 if (tadv_time)
2570 schedule(tadvEvent, tadv_time);
2571
2572 if (inter_time)
2573 schedule(interEvent, inter_time);
2574
2575 UNSERIALIZE_SCALAR(pktOffset);
2576
2577 txDescCache.unserializeSection(cp, "TxDescCache");
2578 rxDescCache.unserializeSection(cp, "RxDescCache");
2579}
2580
2581IGbE *
2582IGbEParams::create()
2583{
2584 return new IGbE(this);
2585}