ns_gige.cc (13342:1ddb43f47325) ns_gige.cc (13784:1941dc118243)
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32/** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36
37#include "dev/net/ns_gige.hh"
38
39#include <deque>
40#include <memory>
41#include <string>
42
43#include "base/debug.hh"
44#include "base/inet.hh"
45#include "base/types.hh"
46#include "config/the_isa.hh"
47#include "debug/EthernetAll.hh"
48#include "dev/net/etherlink.hh"
49#include "mem/packet.hh"
50#include "mem/packet_access.hh"
51#include "params/NSGigE.hh"
52#include "sim/system.hh"
53
54// clang complains about std::set being overloaded with Packet::set if
55// we open up the entire namespace std
56using std::make_shared;
57using std::min;
58using std::ostream;
59using std::string;
60
61const char *NsRxStateStrings[] =
62{
63 "rxIdle",
64 "rxDescRefr",
65 "rxDescRead",
66 "rxFifoBlock",
67 "rxFragWrite",
68 "rxDescWrite",
69 "rxAdvance"
70};
71
72const char *NsTxStateStrings[] =
73{
74 "txIdle",
75 "txDescRefr",
76 "txDescRead",
77 "txFifoBlock",
78 "txFragRead",
79 "txDescWrite",
80 "txAdvance"
81};
82
83const char *NsDmaState[] =
84{
85 "dmaIdle",
86 "dmaReading",
87 "dmaWriting",
88 "dmaReadWaiting",
89 "dmaWriteWaiting"
90};
91
92using namespace Net;
93using namespace TheISA;
94
95///////////////////////////////////////////////////////////////////////
96//
97// NSGigE PCI Device
98//
99NSGigE::NSGigE(Params *p)
100 : EtherDevBase(p), ioEnable(false),
101 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
102 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
103 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
104 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
105 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
106 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
107 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
108 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
109 eepromOpcode(0), eepromAddress(0), eepromData(0),
110 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
111 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
112 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
113 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
114 rxDmaReadEvent([this]{ rxDmaReadDone(); }, name()),
115 rxDmaWriteEvent([this]{ rxDmaWriteDone(); }, name()),
116 txDmaReadEvent([this]{ txDmaReadDone(); }, name()),
117 txDmaWriteEvent([this]{ txDmaWriteDone(); }, name()),
118 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
119 txDelay(p->tx_delay), rxDelay(p->rx_delay),
120 rxKickTick(0),
121 rxKickEvent([this]{ rxKick(); }, name()),
122 txKickTick(0),
123 txKickEvent([this]{ txKick(); }, name()),
124 txEvent([this]{ txEventTransmit(); }, name()),
125 rxFilterEnable(p->rx_filter),
126 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
127 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
128 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
129 intrEvent(0), interface(0)
130{
131
132
133 interface = new NSGigEInt(name() + ".int0", this);
134
135 regsReset();
136 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN);
137
138 memset(&rxDesc32, 0, sizeof(rxDesc32));
139 memset(&txDesc32, 0, sizeof(txDesc32));
140 memset(&rxDesc64, 0, sizeof(rxDesc64));
141 memset(&txDesc64, 0, sizeof(txDesc64));
142}
143
144NSGigE::~NSGigE()
145{
146 delete interface;
147}
148
149/**
150 * This is to write to the PCI general configuration registers
151 */
152Tick
153NSGigE::writeConfig(PacketPtr pkt)
154{
155 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
156 if (offset < PCI_DEVICE_SPECIFIC)
157 PciDevice::writeConfig(pkt);
158 else
159 panic("Device specific PCI config space not implemented!\n");
160
161 switch (offset) {
162 // seems to work fine without all these PCI settings, but i
163 // put in the IO to double check, an assertion will fail if we
164 // need to properly implement it
165 case PCI_COMMAND:
166 if (config.data[offset] & PCI_CMD_IOSE)
167 ioEnable = true;
168 else
169 ioEnable = false;
170 break;
171 }
172
173 return configDelay;
174}
175
1/*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32/** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36
37#include "dev/net/ns_gige.hh"
38
39#include <deque>
40#include <memory>
41#include <string>
42
43#include "base/debug.hh"
44#include "base/inet.hh"
45#include "base/types.hh"
46#include "config/the_isa.hh"
47#include "debug/EthernetAll.hh"
48#include "dev/net/etherlink.hh"
49#include "mem/packet.hh"
50#include "mem/packet_access.hh"
51#include "params/NSGigE.hh"
52#include "sim/system.hh"
53
54// clang complains about std::set being overloaded with Packet::set if
55// we open up the entire namespace std
56using std::make_shared;
57using std::min;
58using std::ostream;
59using std::string;
60
61const char *NsRxStateStrings[] =
62{
63 "rxIdle",
64 "rxDescRefr",
65 "rxDescRead",
66 "rxFifoBlock",
67 "rxFragWrite",
68 "rxDescWrite",
69 "rxAdvance"
70};
71
72const char *NsTxStateStrings[] =
73{
74 "txIdle",
75 "txDescRefr",
76 "txDescRead",
77 "txFifoBlock",
78 "txFragRead",
79 "txDescWrite",
80 "txAdvance"
81};
82
83const char *NsDmaState[] =
84{
85 "dmaIdle",
86 "dmaReading",
87 "dmaWriting",
88 "dmaReadWaiting",
89 "dmaWriteWaiting"
90};
91
92using namespace Net;
93using namespace TheISA;
94
95///////////////////////////////////////////////////////////////////////
96//
97// NSGigE PCI Device
98//
99NSGigE::NSGigE(Params *p)
100 : EtherDevBase(p), ioEnable(false),
101 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
102 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
103 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
104 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
105 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
106 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
107 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
108 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
109 eepromOpcode(0), eepromAddress(0), eepromData(0),
110 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
111 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
112 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
113 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
114 rxDmaReadEvent([this]{ rxDmaReadDone(); }, name()),
115 rxDmaWriteEvent([this]{ rxDmaWriteDone(); }, name()),
116 txDmaReadEvent([this]{ txDmaReadDone(); }, name()),
117 txDmaWriteEvent([this]{ txDmaWriteDone(); }, name()),
118 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
119 txDelay(p->tx_delay), rxDelay(p->rx_delay),
120 rxKickTick(0),
121 rxKickEvent([this]{ rxKick(); }, name()),
122 txKickTick(0),
123 txKickEvent([this]{ txKick(); }, name()),
124 txEvent([this]{ txEventTransmit(); }, name()),
125 rxFilterEnable(p->rx_filter),
126 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
127 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
128 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
129 intrEvent(0), interface(0)
130{
131
132
133 interface = new NSGigEInt(name() + ".int0", this);
134
135 regsReset();
136 memcpy(&rom.perfectMatch, p->hardware_address.bytes(), ETH_ADDR_LEN);
137
138 memset(&rxDesc32, 0, sizeof(rxDesc32));
139 memset(&txDesc32, 0, sizeof(txDesc32));
140 memset(&rxDesc64, 0, sizeof(rxDesc64));
141 memset(&txDesc64, 0, sizeof(txDesc64));
142}
143
144NSGigE::~NSGigE()
145{
146 delete interface;
147}
148
149/**
150 * This is to write to the PCI general configuration registers
151 */
152Tick
153NSGigE::writeConfig(PacketPtr pkt)
154{
155 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
156 if (offset < PCI_DEVICE_SPECIFIC)
157 PciDevice::writeConfig(pkt);
158 else
159 panic("Device specific PCI config space not implemented!\n");
160
161 switch (offset) {
162 // seems to work fine without all these PCI settings, but i
163 // put in the IO to double check, an assertion will fail if we
164 // need to properly implement it
165 case PCI_COMMAND:
166 if (config.data[offset] & PCI_CMD_IOSE)
167 ioEnable = true;
168 else
169 ioEnable = false;
170 break;
171 }
172
173 return configDelay;
174}
175
176EtherInt*
177NSGigE::getEthPort(const std::string &if_name, int idx)
176Port &
177NSGigE::getPort(const std::string &if_name, PortID idx)
178{
178{
179 if (if_name == "interface") {
180 if (interface->getPeer())
181 panic("interface already connected to\n");
182 return interface;
183 }
184 return NULL;
179 if (if_name == "interface")
180 return *interface;
181 return EtherDevBase::getPort(if_name, idx);
185}
186
187/**
188 * This reads the device registers, which are detailed in the NS83820
189 * spec sheet
190 */
191Tick
192NSGigE::read(PacketPtr pkt)
193{
194 assert(ioEnable);
195
196 //The mask is to give you only the offset into the device register file
197 Addr daddr = pkt->getAddr() & 0xfff;
198 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
199 daddr, pkt->getAddr(), pkt->getSize());
200
201
202 // there are some reserved registers, you can see ns_gige_reg.h and
203 // the spec sheet for details
204 if (daddr > LAST && daddr <= RESERVED) {
205 panic("Accessing reserved register");
206 } else if (daddr > RESERVED && daddr <= 0x3FC) {
207 return readConfig(pkt);
208 } else if (daddr >= MIB_START && daddr <= MIB_END) {
209 // don't implement all the MIB's. hopefully the kernel
210 // doesn't actually DEPEND upon their values
211 // MIB are just hardware stats keepers
212 pkt->setLE<uint32_t>(0);
213 pkt->makeAtomicResponse();
214 return pioDelay;
215 } else if (daddr > 0x3FC)
216 panic("Something is messed up!\n");
217
218 assert(pkt->getSize() == sizeof(uint32_t));
219 uint32_t &reg = *pkt->getPtr<uint32_t>();
220 uint16_t rfaddr;
221
222 switch (daddr) {
223 case CR:
224 reg = regs.command;
225 //these are supposed to be cleared on a read
226 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
227 break;
228
229 case CFGR:
230 reg = regs.config;
231 break;
232
233 case MEAR:
234 reg = regs.mear;
235 break;
236
237 case PTSCR:
238 reg = regs.ptscr;
239 break;
240
241 case ISR:
242 reg = regs.isr;
243 devIntrClear(ISR_ALL);
244 break;
245
246 case IMR:
247 reg = regs.imr;
248 break;
249
250 case IER:
251 reg = regs.ier;
252 break;
253
254 case IHR:
255 reg = regs.ihr;
256 break;
257
258 case TXDP:
259 reg = regs.txdp;
260 break;
261
262 case TXDP_HI:
263 reg = regs.txdp_hi;
264 break;
265
266 case TX_CFG:
267 reg = regs.txcfg;
268 break;
269
270 case GPIOR:
271 reg = regs.gpior;
272 break;
273
274 case RXDP:
275 reg = regs.rxdp;
276 break;
277
278 case RXDP_HI:
279 reg = regs.rxdp_hi;
280 break;
281
282 case RX_CFG:
283 reg = regs.rxcfg;
284 break;
285
286 case PQCR:
287 reg = regs.pqcr;
288 break;
289
290 case WCSR:
291 reg = regs.wcsr;
292 break;
293
294 case PCR:
295 reg = regs.pcr;
296 break;
297
298 // see the spec sheet for how RFCR and RFDR work
299 // basically, you write to RFCR to tell the machine
300 // what you want to do next, then you act upon RFDR,
301 // and the device will be prepared b/c of what you
302 // wrote to RFCR
303 case RFCR:
304 reg = regs.rfcr;
305 break;
306
307 case RFDR:
308 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
309 switch (rfaddr) {
310 // Read from perfect match ROM octets
311 case 0x000:
312 reg = rom.perfectMatch[1];
313 reg = reg << 8;
314 reg += rom.perfectMatch[0];
315 break;
316 case 0x002:
317 reg = rom.perfectMatch[3] << 8;
318 reg += rom.perfectMatch[2];
319 break;
320 case 0x004:
321 reg = rom.perfectMatch[5] << 8;
322 reg += rom.perfectMatch[4];
323 break;
324 default:
325 // Read filter hash table
326 if (rfaddr >= FHASH_ADDR &&
327 rfaddr < FHASH_ADDR + FHASH_SIZE) {
328
329 // Only word-aligned reads supported
330 if (rfaddr % 2)
331 panic("unaligned read from filter hash table!");
332
333 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
334 reg += rom.filterHash[rfaddr - FHASH_ADDR];
335 break;
336 }
337
338 panic("reading RFDR for something other than pattern"
339 " matching or hashing! %#x\n", rfaddr);
340 }
341 break;
342
343 case SRR:
344 reg = regs.srr;
345 break;
346
347 case MIBC:
348 reg = regs.mibc;
349 reg &= ~(MIBC_MIBS | MIBC_ACLR);
350 break;
351
352 case VRCR:
353 reg = regs.vrcr;
354 break;
355
356 case VTCR:
357 reg = regs.vtcr;
358 break;
359
360 case VDR:
361 reg = regs.vdr;
362 break;
363
364 case CCSR:
365 reg = regs.ccsr;
366 break;
367
368 case TBICR:
369 reg = regs.tbicr;
370 break;
371
372 case TBISR:
373 reg = regs.tbisr;
374 break;
375
376 case TANAR:
377 reg = regs.tanar;
378 break;
379
380 case TANLPAR:
381 reg = regs.tanlpar;
382 break;
383
384 case TANER:
385 reg = regs.taner;
386 break;
387
388 case TESR:
389 reg = regs.tesr;
390 break;
391
392 case M5REG:
393 reg = 0;
394 if (params()->rx_thread)
395 reg |= M5REG_RX_THREAD;
396 if (params()->tx_thread)
397 reg |= M5REG_TX_THREAD;
398 if (params()->rss)
399 reg |= M5REG_RSS;
400 break;
401
402 default:
403 panic("reading unimplemented register: addr=%#x", daddr);
404 }
405
406 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
407 daddr, reg, reg);
408
409 pkt->makeAtomicResponse();
410 return pioDelay;
411}
412
413Tick
414NSGigE::write(PacketPtr pkt)
415{
416 assert(ioEnable);
417
418 Addr daddr = pkt->getAddr() & 0xfff;
419 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
420 daddr, pkt->getAddr(), pkt->getSize());
421
422 if (daddr > LAST && daddr <= RESERVED) {
423 panic("Accessing reserved register");
424 } else if (daddr > RESERVED && daddr <= 0x3FC) {
425 return writeConfig(pkt);
426 } else if (daddr > 0x3FC)
427 panic("Something is messed up!\n");
428
429 if (pkt->getSize() == sizeof(uint32_t)) {
430 uint32_t reg = pkt->getLE<uint32_t>();
431 uint16_t rfaddr;
432
433 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
434
435 switch (daddr) {
436 case CR:
437 regs.command = reg;
438 if (reg & CR_TXD) {
439 txEnable = false;
440 } else if (reg & CR_TXE) {
441 txEnable = true;
442
443 // the kernel is enabling the transmit machine
444 if (txState == txIdle)
445 txKick();
446 }
447
448 if (reg & CR_RXD) {
449 rxEnable = false;
450 } else if (reg & CR_RXE) {
451 rxEnable = true;
452
453 if (rxState == rxIdle)
454 rxKick();
455 }
456
457 if (reg & CR_TXR)
458 txReset();
459
460 if (reg & CR_RXR)
461 rxReset();
462
463 if (reg & CR_SWI)
464 devIntrPost(ISR_SWI);
465
466 if (reg & CR_RST) {
467 txReset();
468 rxReset();
469
470 regsReset();
471 }
472 break;
473
474 case CFGR:
475 if (reg & CFGR_LNKSTS ||
476 reg & CFGR_SPDSTS ||
477 reg & CFGR_DUPSTS ||
478 reg & CFGR_RESERVED ||
479 reg & CFGR_T64ADDR ||
480 reg & CFGR_PCI64_DET) {
481 // First clear all writable bits
482 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
483 CFGR_RESERVED | CFGR_T64ADDR |
484 CFGR_PCI64_DET;
485 // Now set the appropriate writable bits
486 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
487 CFGR_RESERVED | CFGR_T64ADDR |
488 CFGR_PCI64_DET);
489 }
490
491// all these #if 0's are because i don't THINK the kernel needs to
492// have these implemented. if there is a problem relating to one of
493// these, you may need to add functionality in.
494
495// grouped together and #if 0'ed to avoid empty if body and make clang happy
496#if 0
497 if (reg & CFGR_TBI_EN) ;
498 if (reg & CFGR_MODE_1000) ;
499
500 if (reg & CFGR_PINT_DUPSTS ||
501 reg & CFGR_PINT_LNKSTS ||
502 reg & CFGR_PINT_SPDSTS)
503 ;
504
505 if (reg & CFGR_TMRTEST) ;
506 if (reg & CFGR_MRM_DIS) ;
507 if (reg & CFGR_MWI_DIS) ;
508
509 if (reg & CFGR_DATA64_EN) ;
510 if (reg & CFGR_M64ADDR) ;
511 if (reg & CFGR_PHY_RST) ;
512 if (reg & CFGR_PHY_DIS) ;
513
514 if (reg & CFGR_REQALG) ;
515 if (reg & CFGR_SB) ;
516 if (reg & CFGR_POW) ;
517 if (reg & CFGR_EXD) ;
518 if (reg & CFGR_PESEL) ;
519 if (reg & CFGR_BROM_DIS) ;
520 if (reg & CFGR_EXT_125) ;
521 if (reg & CFGR_BEM) ;
522
523 if (reg & CFGR_T64ADDR) ;
524 // panic("CFGR_T64ADDR is read only register!\n");
525#endif
526 if (reg & CFGR_AUTO_1000)
527 panic("CFGR_AUTO_1000 not implemented!\n");
528
529 if (reg & CFGR_PCI64_DET)
530 panic("CFGR_PCI64_DET is read only register!\n");
531
532 if (reg & CFGR_EXTSTS_EN)
533 extstsEnable = true;
534 else
535 extstsEnable = false;
536 break;
537
538 case MEAR:
539 // Clear writable bits
540 regs.mear &= MEAR_EEDO;
541 // Set appropriate writable bits
542 regs.mear |= reg & ~MEAR_EEDO;
543
544 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
545 // even though it could get it through RFDR
546 if (reg & MEAR_EESEL) {
547 // Rising edge of clock
548 if (reg & MEAR_EECLK && !eepromClk)
549 eepromKick();
550 }
551 else {
552 eepromState = eepromStart;
553 regs.mear &= ~MEAR_EEDI;
554 }
555
556 eepromClk = reg & MEAR_EECLK;
557
558 // since phy is completely faked, MEAR_MD* don't matter
559
560// grouped together and #if 0'ed to avoid empty if body and make clang happy
561#if 0
562 if (reg & MEAR_MDIO) ;
563 if (reg & MEAR_MDDIR) ;
564 if (reg & MEAR_MDC) ;
565#endif
566 break;
567
568 case PTSCR:
569 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
570 // these control BISTs for various parts of chip - we
571 // don't care or do just fake that the BIST is done
572 if (reg & PTSCR_RBIST_EN)
573 regs.ptscr |= PTSCR_RBIST_DONE;
574 if (reg & PTSCR_EEBIST_EN)
575 regs.ptscr &= ~PTSCR_EEBIST_EN;
576 if (reg & PTSCR_EELOAD_EN)
577 regs.ptscr &= ~PTSCR_EELOAD_EN;
578 break;
579
580 case ISR: /* writing to the ISR has no effect */
581 panic("ISR is a read only register!\n");
582
583 case IMR:
584 regs.imr = reg;
585 devIntrChangeMask();
586 break;
587
588 case IER:
589 regs.ier = reg;
590 break;
591
592 case IHR:
593 regs.ihr = reg;
594 /* not going to implement real interrupt holdoff */
595 break;
596
597 case TXDP:
598 regs.txdp = (reg & 0xFFFFFFFC);
599 assert(txState == txIdle);
600 CTDD = false;
601 break;
602
603 case TXDP_HI:
604 regs.txdp_hi = reg;
605 break;
606
607 case TX_CFG:
608 regs.txcfg = reg;
609#if 0
610 if (reg & TX_CFG_CSI) ;
611 if (reg & TX_CFG_HBI) ;
612 if (reg & TX_CFG_MLB) ;
613 if (reg & TX_CFG_ATP) ;
614 if (reg & TX_CFG_ECRETRY) {
615 /*
616 * this could easily be implemented, but considering
617 * the network is just a fake pipe, wouldn't make
618 * sense to do this
619 */
620 }
621
622 if (reg & TX_CFG_BRST_DIS) ;
623#endif
624
625#if 0
626 /* we handle our own DMA, ignore the kernel's exhortations */
627 if (reg & TX_CFG_MXDMA) ;
628#endif
629
630 // also, we currently don't care about fill/drain
631 // thresholds though this may change in the future with
632 // more realistic networks or a driver which changes it
633 // according to feedback
634
635 break;
636
637 case GPIOR:
638 // Only write writable bits
639 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
640 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
641 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
642 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
643 /* these just control general purpose i/o pins, don't matter */
644 break;
645
646 case RXDP:
647 regs.rxdp = reg;
648 CRDD = false;
649 break;
650
651 case RXDP_HI:
652 regs.rxdp_hi = reg;
653 break;
654
655 case RX_CFG:
656 regs.rxcfg = reg;
657#if 0
658 if (reg & RX_CFG_AEP) ;
659 if (reg & RX_CFG_ARP) ;
660 if (reg & RX_CFG_STRIPCRC) ;
661 if (reg & RX_CFG_RX_RD) ;
662 if (reg & RX_CFG_ALP) ;
663 if (reg & RX_CFG_AIRL) ;
664
665 /* we handle our own DMA, ignore what kernel says about it */
666 if (reg & RX_CFG_MXDMA) ;
667
668 //also, we currently don't care about fill/drain thresholds
669 //though this may change in the future with more realistic
670 //networks or a driver which changes it according to feedback
671 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
672#endif
673 break;
674
675 case PQCR:
676 /* there is no priority queueing used in the linux 2.6 driver */
677 regs.pqcr = reg;
678 break;
679
680 case WCSR:
681 /* not going to implement wake on LAN */
682 regs.wcsr = reg;
683 break;
684
685 case PCR:
686 /* not going to implement pause control */
687 regs.pcr = reg;
688 break;
689
690 case RFCR:
691 regs.rfcr = reg;
692
693 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
694 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
695 acceptMulticast = (reg & RFCR_AAM) ? true : false;
696 acceptUnicast = (reg & RFCR_AAU) ? true : false;
697 acceptPerfect = (reg & RFCR_APM) ? true : false;
698 acceptArp = (reg & RFCR_AARP) ? true : false;
699 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
700
701#if 0
702 if (reg & RFCR_APAT)
703 panic("RFCR_APAT not implemented!\n");
704#endif
705 if (reg & RFCR_UHEN)
706 panic("Unicast hash filtering not used by drivers!\n");
707
708 if (reg & RFCR_ULM)
709 panic("RFCR_ULM not implemented!\n");
710
711 break;
712
713 case RFDR:
714 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
715 switch (rfaddr) {
716 case 0x000:
717 rom.perfectMatch[0] = (uint8_t)reg;
718 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
719 break;
720 case 0x002:
721 rom.perfectMatch[2] = (uint8_t)reg;
722 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
723 break;
724 case 0x004:
725 rom.perfectMatch[4] = (uint8_t)reg;
726 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
727 break;
728 default:
729
730 if (rfaddr >= FHASH_ADDR &&
731 rfaddr < FHASH_ADDR + FHASH_SIZE) {
732
733 // Only word-aligned writes supported
734 if (rfaddr % 2)
735 panic("unaligned write to filter hash table!");
736
737 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
738 rom.filterHash[rfaddr - FHASH_ADDR + 1]
739 = (uint8_t)(reg >> 8);
740 break;
741 }
742 panic("writing RFDR for something other than pattern matching "
743 "or hashing! %#x\n", rfaddr);
744 }
745 break;
746
747 case BRAR:
748 regs.brar = reg;
749 break;
750
751 case BRDR:
752 panic("the driver never uses BRDR, something is wrong!\n");
753
754 case SRR:
755 panic("SRR is read only register!\n");
756
757 case MIBC:
758 panic("the driver never uses MIBC, something is wrong!\n");
759
760 case VRCR:
761 regs.vrcr = reg;
762 break;
763
764 case VTCR:
765 regs.vtcr = reg;
766 break;
767
768 case VDR:
769 panic("the driver never uses VDR, something is wrong!\n");
770
771 case CCSR:
772 /* not going to implement clockrun stuff */
773 regs.ccsr = reg;
774 break;
775
776 case TBICR:
777 regs.tbicr = reg;
778 if (reg & TBICR_MR_LOOPBACK)
779 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
780
781 if (reg & TBICR_MR_AN_ENABLE) {
782 regs.tanlpar = regs.tanar;
783 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
784 }
785
786#if 0
787 if (reg & TBICR_MR_RESTART_AN) ;
788#endif
789
790 break;
791
792 case TBISR:
793 panic("TBISR is read only register!\n");
794
795 case TANAR:
796 // Only write the writable bits
797 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
798 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
799
800 // Pause capability unimplemented
801#if 0
802 if (reg & TANAR_PS2) ;
803 if (reg & TANAR_PS1) ;
804#endif
805
806 break;
807
808 case TANLPAR:
809 panic("this should only be written to by the fake phy!\n");
810
811 case TANER:
812 panic("TANER is read only register!\n");
813
814 case TESR:
815 regs.tesr = reg;
816 break;
817
818 default:
819 panic("invalid register access daddr=%#x", daddr);
820 }
821 } else {
822 panic("Invalid Request Size");
823 }
824 pkt->makeAtomicResponse();
825 return pioDelay;
826}
827
828void
829NSGigE::devIntrPost(uint32_t interrupts)
830{
831 if (interrupts & ISR_RESERVE)
832 panic("Cannot set a reserved interrupt");
833
834 if (interrupts & ISR_NOIMPL)
835 warn("interrupt not implemented %#x\n", interrupts);
836
837 interrupts &= ISR_IMPL;
838 regs.isr |= interrupts;
839
840 if (interrupts & regs.imr) {
841 if (interrupts & ISR_SWI) {
842 totalSwi++;
843 }
844 if (interrupts & ISR_RXIDLE) {
845 totalRxIdle++;
846 }
847 if (interrupts & ISR_RXOK) {
848 totalRxOk++;
849 }
850 if (interrupts & ISR_RXDESC) {
851 totalRxDesc++;
852 }
853 if (interrupts & ISR_TXOK) {
854 totalTxOk++;
855 }
856 if (interrupts & ISR_TXIDLE) {
857 totalTxIdle++;
858 }
859 if (interrupts & ISR_TXDESC) {
860 totalTxDesc++;
861 }
862 if (interrupts & ISR_RXORN) {
863 totalRxOrn++;
864 }
865 }
866
867 DPRINTF(EthernetIntr,
868 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
869 interrupts, regs.isr, regs.imr);
870
871 if ((regs.isr & regs.imr)) {
872 Tick when = curTick();
873 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
874 when += intrDelay;
875 postedInterrupts++;
876 cpuIntrPost(when);
877 }
878}
879
880/* writing this interrupt counting stats inside this means that this function
881 is now limited to being used to clear all interrupts upon the kernel
882 reading isr and servicing. just telling you in case you were thinking
883 of expanding use.
884*/
885void
886NSGigE::devIntrClear(uint32_t interrupts)
887{
888 if (interrupts & ISR_RESERVE)
889 panic("Cannot clear a reserved interrupt");
890
891 if (regs.isr & regs.imr & ISR_SWI) {
892 postedSwi++;
893 }
894 if (regs.isr & regs.imr & ISR_RXIDLE) {
895 postedRxIdle++;
896 }
897 if (regs.isr & regs.imr & ISR_RXOK) {
898 postedRxOk++;
899 }
900 if (regs.isr & regs.imr & ISR_RXDESC) {
901 postedRxDesc++;
902 }
903 if (regs.isr & regs.imr & ISR_TXOK) {
904 postedTxOk++;
905 }
906 if (regs.isr & regs.imr & ISR_TXIDLE) {
907 postedTxIdle++;
908 }
909 if (regs.isr & regs.imr & ISR_TXDESC) {
910 postedTxDesc++;
911 }
912 if (regs.isr & regs.imr & ISR_RXORN) {
913 postedRxOrn++;
914 }
915
916 interrupts &= ~ISR_NOIMPL;
917 regs.isr &= ~interrupts;
918
919 DPRINTF(EthernetIntr,
920 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
921 interrupts, regs.isr, regs.imr);
922
923 if (!(regs.isr & regs.imr))
924 cpuIntrClear();
925}
926
927void
928NSGigE::devIntrChangeMask()
929{
930 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
931 regs.isr, regs.imr, regs.isr & regs.imr);
932
933 if (regs.isr & regs.imr)
934 cpuIntrPost(curTick());
935 else
936 cpuIntrClear();
937}
938
939void
940NSGigE::cpuIntrPost(Tick when)
941{
942 // If the interrupt you want to post is later than an interrupt
943 // already scheduled, just let it post in the coming one and don't
944 // schedule another.
945 // HOWEVER, must be sure that the scheduled intrTick is in the
946 // future (this was formerly the source of a bug)
947 /**
948 * @todo this warning should be removed and the intrTick code should
949 * be fixed.
950 */
951 assert(when >= curTick());
952 assert(intrTick >= curTick() || intrTick == 0);
953 if (when > intrTick && intrTick != 0) {
954 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
955 intrTick);
956 return;
957 }
958
959 intrTick = when;
960 if (intrTick < curTick()) {
961 intrTick = curTick();
962 }
963
964 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
965 intrTick);
966
967 if (intrEvent)
968 intrEvent->squash();
969
970 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
971 name(), true);
972 schedule(intrEvent, intrTick);
973}
974
975void
976NSGigE::cpuInterrupt()
977{
978 assert(intrTick == curTick());
979
980 // Whether or not there's a pending interrupt, we don't care about
981 // it anymore
982 intrEvent = 0;
983 intrTick = 0;
984
985 // Don't send an interrupt if there's already one
986 if (cpuPendingIntr) {
987 DPRINTF(EthernetIntr,
988 "would send an interrupt now, but there's already pending\n");
989 } else {
990 // Send interrupt
991 cpuPendingIntr = true;
992
993 DPRINTF(EthernetIntr, "posting interrupt\n");
994 intrPost();
995 }
996}
997
998void
999NSGigE::cpuIntrClear()
1000{
1001 if (!cpuPendingIntr)
1002 return;
1003
1004 if (intrEvent) {
1005 intrEvent->squash();
1006 intrEvent = 0;
1007 }
1008
1009 intrTick = 0;
1010
1011 cpuPendingIntr = false;
1012
1013 DPRINTF(EthernetIntr, "clearing interrupt\n");
1014 intrClear();
1015}
1016
1017bool
1018NSGigE::cpuIntrPending() const
1019{ return cpuPendingIntr; }
1020
1021void
1022NSGigE::txReset()
1023{
1024
1025 DPRINTF(Ethernet, "transmit reset\n");
1026
1027 CTDD = false;
1028 txEnable = false;;
1029 txFragPtr = 0;
1030 assert(txDescCnt == 0);
1031 txFifo.clear();
1032 txState = txIdle;
1033 assert(txDmaState == dmaIdle);
1034}
1035
1036void
1037NSGigE::rxReset()
1038{
1039 DPRINTF(Ethernet, "receive reset\n");
1040
1041 CRDD = false;
1042 assert(rxPktBytes == 0);
1043 rxEnable = false;
1044 rxFragPtr = 0;
1045 assert(rxDescCnt == 0);
1046 assert(rxDmaState == dmaIdle);
1047 rxFifo.clear();
1048 rxState = rxIdle;
1049}
1050
1051void
1052NSGigE::regsReset()
1053{
1054 memset(&regs, 0, sizeof(regs));
1055 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1056 regs.mear = 0x12;
1057 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1058 // fill threshold to 32 bytes
1059 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1060 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1061 regs.mibc = MIBC_FRZ;
1062 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1063 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1064 regs.brar = 0xffffffff;
1065
1066 extstsEnable = false;
1067 acceptBroadcast = false;
1068 acceptMulticast = false;
1069 acceptUnicast = false;
1070 acceptPerfect = false;
1071 acceptArp = false;
1072}
1073
1074bool
1075NSGigE::doRxDmaRead()
1076{
1077 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1078 rxDmaState = dmaReading;
1079
1080 if (dmaPending() || drainState() != DrainState::Running)
1081 rxDmaState = dmaReadWaiting;
1082 else
1083 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1084
1085 return true;
1086}
1087
1088void
1089NSGigE::rxDmaReadDone()
1090{
1091 assert(rxDmaState == dmaReading);
1092 rxDmaState = dmaIdle;
1093
1094 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1095 rxDmaAddr, rxDmaLen);
1096 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1097
1098 // If the transmit state machine has a pending DMA, let it go first
1099 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1100 txKick();
1101
1102 rxKick();
1103}
1104
1105bool
1106NSGigE::doRxDmaWrite()
1107{
1108 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1109 rxDmaState = dmaWriting;
1110
1111 if (dmaPending() || drainState() != DrainState::Running)
1112 rxDmaState = dmaWriteWaiting;
1113 else
1114 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1115 return true;
1116}
1117
1118void
1119NSGigE::rxDmaWriteDone()
1120{
1121 assert(rxDmaState == dmaWriting);
1122 rxDmaState = dmaIdle;
1123
1124 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1125 rxDmaAddr, rxDmaLen);
1126 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1127
1128 // If the transmit state machine has a pending DMA, let it go first
1129 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1130 txKick();
1131
1132 rxKick();
1133}
1134
1135void
1136NSGigE::rxKick()
1137{
1138 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1139
1140 DPRINTF(EthernetSM,
1141 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1142 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1143
1144 Addr link, bufptr;
1145 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1146 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1147
1148 next:
1149 if (rxKickTick > curTick()) {
1150 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1151 rxKickTick);
1152
1153 goto exit;
1154 }
1155
1156 // Go to the next state machine clock tick.
1157 rxKickTick = clockEdge(Cycles(1));
1158
1159 switch(rxDmaState) {
1160 case dmaReadWaiting:
1161 if (doRxDmaRead())
1162 goto exit;
1163 break;
1164 case dmaWriteWaiting:
1165 if (doRxDmaWrite())
1166 goto exit;
1167 break;
1168 default:
1169 break;
1170 }
1171
1172 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1173 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1174
1175 // see state machine from spec for details
1176 // the way this works is, if you finish work on one state and can
1177 // go directly to another, you do that through jumping to the
1178 // label "next". however, if you have intermediate work, like DMA
1179 // so that you can't go to the next state yet, you go to exit and
1180 // exit the loop. however, when the DMA is done it will trigger
1181 // an event and come back to this loop.
1182 switch (rxState) {
1183 case rxIdle:
1184 if (!rxEnable) {
1185 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1186 goto exit;
1187 }
1188
1189 if (CRDD) {
1190 rxState = rxDescRefr;
1191
1192 rxDmaAddr = regs.rxdp & 0x3fffffff;
1193 rxDmaData =
1194 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1195 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1196 rxDmaFree = dmaDescFree;
1197
1198 descDmaReads++;
1199 descDmaRdBytes += rxDmaLen;
1200
1201 if (doRxDmaRead())
1202 goto exit;
1203 } else {
1204 rxState = rxDescRead;
1205
1206 rxDmaAddr = regs.rxdp & 0x3fffffff;
1207 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1208 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1209 rxDmaFree = dmaDescFree;
1210
1211 descDmaReads++;
1212 descDmaRdBytes += rxDmaLen;
1213
1214 if (doRxDmaRead())
1215 goto exit;
1216 }
1217 break;
1218
1219 case rxDescRefr:
1220 if (rxDmaState != dmaIdle)
1221 goto exit;
1222
1223 rxState = rxAdvance;
1224 break;
1225
1226 case rxDescRead:
1227 if (rxDmaState != dmaIdle)
1228 goto exit;
1229
1230 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1231 regs.rxdp & 0x3fffffff);
1232 DPRINTF(EthernetDesc,
1233 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1234 link, bufptr, cmdsts, extsts);
1235
1236 if (cmdsts & CMDSTS_OWN) {
1237 devIntrPost(ISR_RXIDLE);
1238 rxState = rxIdle;
1239 goto exit;
1240 } else {
1241 rxState = rxFifoBlock;
1242 rxFragPtr = bufptr;
1243 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1244 }
1245 break;
1246
1247 case rxFifoBlock:
1248 if (!rxPacket) {
1249 /**
1250 * @todo in reality, we should be able to start processing
1251 * the packet as it arrives, and not have to wait for the
1252 * full packet ot be in the receive fifo.
1253 */
1254 if (rxFifo.empty())
1255 goto exit;
1256
1257 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1258
1259 // If we don't have a packet, grab a new one from the fifo.
1260 rxPacket = rxFifo.front();
1261 rxPktBytes = rxPacket->length;
1262 rxPacketBufPtr = rxPacket->data;
1263
1264#if TRACING_ON
1265 if (DTRACE(Ethernet)) {
1266 IpPtr ip(rxPacket);
1267 if (ip) {
1268 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1269 TcpPtr tcp(ip);
1270 if (tcp) {
1271 DPRINTF(Ethernet,
1272 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1273 tcp->sport(), tcp->dport(), tcp->seq(),
1274 tcp->ack());
1275 }
1276 }
1277 }
1278#endif
1279
1280 // sanity check - i think the driver behaves like this
1281 assert(rxDescCnt >= rxPktBytes);
1282 rxFifo.pop();
1283 }
1284
1285
1286 // dont' need the && rxDescCnt > 0 if driver sanity check
1287 // above holds
1288 if (rxPktBytes > 0) {
1289 rxState = rxFragWrite;
1290 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1291 // check holds
1292 rxXferLen = rxPktBytes;
1293
1294 rxDmaAddr = rxFragPtr & 0x3fffffff;
1295 rxDmaData = rxPacketBufPtr;
1296 rxDmaLen = rxXferLen;
1297 rxDmaFree = dmaDataFree;
1298
1299 if (doRxDmaWrite())
1300 goto exit;
1301
1302 } else {
1303 rxState = rxDescWrite;
1304
1305 //if (rxPktBytes == 0) { /* packet is done */
1306 assert(rxPktBytes == 0);
1307 DPRINTF(EthernetSM, "done with receiving packet\n");
1308
1309 cmdsts |= CMDSTS_OWN;
1310 cmdsts &= ~CMDSTS_MORE;
1311 cmdsts |= CMDSTS_OK;
1312 cmdsts &= 0xffff0000;
1313 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1314
1315#if 0
1316 /*
1317 * all the driver uses these are for its own stats keeping
1318 * which we don't care about, aren't necessary for
1319 * functionality and doing this would just slow us down.
1320 * if they end up using this in a later version for
1321 * functional purposes, just undef
1322 */
1323 if (rxFilterEnable) {
1324 cmdsts &= ~CMDSTS_DEST_MASK;
1325 const EthAddr &dst = rxFifoFront()->dst();
1326 if (dst->unicast())
1327 cmdsts |= CMDSTS_DEST_SELF;
1328 if (dst->multicast())
1329 cmdsts |= CMDSTS_DEST_MULTI;
1330 if (dst->broadcast())
1331 cmdsts |= CMDSTS_DEST_MASK;
1332 }
1333#endif
1334
1335 IpPtr ip(rxPacket);
1336 if (extstsEnable && ip) {
1337 extsts |= EXTSTS_IPPKT;
1338 rxIpChecksums++;
1339 if (cksum(ip) != 0) {
1340 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1341 extsts |= EXTSTS_IPERR;
1342 }
1343 TcpPtr tcp(ip);
1344 UdpPtr udp(ip);
1345 if (tcp) {
1346 extsts |= EXTSTS_TCPPKT;
1347 rxTcpChecksums++;
1348 if (cksum(tcp) != 0) {
1349 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1350 extsts |= EXTSTS_TCPERR;
1351
1352 }
1353 } else if (udp) {
1354 extsts |= EXTSTS_UDPPKT;
1355 rxUdpChecksums++;
1356 if (cksum(udp) != 0) {
1357 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1358 extsts |= EXTSTS_UDPERR;
1359 }
1360 }
1361 }
1362 rxPacket = 0;
1363
1364 /*
1365 * the driver seems to always receive into desc buffers
1366 * of size 1514, so you never have a pkt that is split
1367 * into multiple descriptors on the receive side, so
1368 * i don't implement that case, hence the assert above.
1369 */
1370
1371 DPRINTF(EthernetDesc,
1372 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1373 regs.rxdp & 0x3fffffff);
1374 DPRINTF(EthernetDesc,
1375 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1376 link, bufptr, cmdsts, extsts);
1377
1378 rxDmaAddr = regs.rxdp & 0x3fffffff;
1379 rxDmaData = &cmdsts;
1380 if (is64bit) {
1381 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1382 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1383 } else {
1384 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1385 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1386 }
1387 rxDmaFree = dmaDescFree;
1388
1389 descDmaWrites++;
1390 descDmaWrBytes += rxDmaLen;
1391
1392 if (doRxDmaWrite())
1393 goto exit;
1394 }
1395 break;
1396
1397 case rxFragWrite:
1398 if (rxDmaState != dmaIdle)
1399 goto exit;
1400
1401 rxPacketBufPtr += rxXferLen;
1402 rxFragPtr += rxXferLen;
1403 rxPktBytes -= rxXferLen;
1404
1405 rxState = rxFifoBlock;
1406 break;
1407
1408 case rxDescWrite:
1409 if (rxDmaState != dmaIdle)
1410 goto exit;
1411
1412 assert(cmdsts & CMDSTS_OWN);
1413
1414 assert(rxPacket == 0);
1415 devIntrPost(ISR_RXOK);
1416
1417 if (cmdsts & CMDSTS_INTR)
1418 devIntrPost(ISR_RXDESC);
1419
1420 if (!rxEnable) {
1421 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1422 rxState = rxIdle;
1423 goto exit;
1424 } else
1425 rxState = rxAdvance;
1426 break;
1427
1428 case rxAdvance:
1429 if (link == 0) {
1430 devIntrPost(ISR_RXIDLE);
1431 rxState = rxIdle;
1432 CRDD = true;
1433 goto exit;
1434 } else {
1435 if (rxDmaState != dmaIdle)
1436 goto exit;
1437 rxState = rxDescRead;
1438 regs.rxdp = link;
1439 CRDD = false;
1440
1441 rxDmaAddr = regs.rxdp & 0x3fffffff;
1442 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1443 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1444 rxDmaFree = dmaDescFree;
1445
1446 if (doRxDmaRead())
1447 goto exit;
1448 }
1449 break;
1450
1451 default:
1452 panic("Invalid rxState!");
1453 }
1454
1455 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1456 NsRxStateStrings[rxState]);
1457 goto next;
1458
1459 exit:
1460 /**
1461 * @todo do we want to schedule a future kick?
1462 */
1463 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1464 NsRxStateStrings[rxState]);
1465
1466 if (!rxKickEvent.scheduled())
1467 schedule(rxKickEvent, rxKickTick);
1468}
1469
1470void
1471NSGigE::transmit()
1472{
1473 if (txFifo.empty()) {
1474 DPRINTF(Ethernet, "nothing to transmit\n");
1475 return;
1476 }
1477
1478 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1479 txFifo.size());
1480 if (interface->sendPacket(txFifo.front())) {
1481#if TRACING_ON
1482 if (DTRACE(Ethernet)) {
1483 IpPtr ip(txFifo.front());
1484 if (ip) {
1485 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1486 TcpPtr tcp(ip);
1487 if (tcp) {
1488 DPRINTF(Ethernet,
1489 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1490 tcp->sport(), tcp->dport(), tcp->seq(),
1491 tcp->ack());
1492 }
1493 }
1494 }
1495#endif
1496
1497 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1498 txBytes += txFifo.front()->length;
1499 txPackets++;
1500
1501 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1502 txFifo.avail());
1503 txFifo.pop();
1504
1505 /*
1506 * normally do a writeback of the descriptor here, and ONLY
1507 * after that is done, send this interrupt. but since our
1508 * stuff never actually fails, just do this interrupt here,
1509 * otherwise the code has to stray from this nice format.
1510 * besides, it's functionally the same.
1511 */
1512 devIntrPost(ISR_TXOK);
1513 }
1514
1515 if (!txFifo.empty() && !txEvent.scheduled()) {
1516 DPRINTF(Ethernet, "reschedule transmit\n");
1517 schedule(txEvent, curTick() + retryTime);
1518 }
1519}
1520
1521bool
1522NSGigE::doTxDmaRead()
1523{
1524 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1525 txDmaState = dmaReading;
1526
1527 if (dmaPending() || drainState() != DrainState::Running)
1528 txDmaState = dmaReadWaiting;
1529 else
1530 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1531
1532 return true;
1533}
1534
1535void
1536NSGigE::txDmaReadDone()
1537{
1538 assert(txDmaState == dmaReading);
1539 txDmaState = dmaIdle;
1540
1541 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1542 txDmaAddr, txDmaLen);
1543 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1544
1545 // If the receive state machine has a pending DMA, let it go first
1546 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1547 rxKick();
1548
1549 txKick();
1550}
1551
1552bool
1553NSGigE::doTxDmaWrite()
1554{
1555 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1556 txDmaState = dmaWriting;
1557
1558 if (dmaPending() || drainState() != DrainState::Running)
1559 txDmaState = dmaWriteWaiting;
1560 else
1561 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1562 return true;
1563}
1564
1565void
1566NSGigE::txDmaWriteDone()
1567{
1568 assert(txDmaState == dmaWriting);
1569 txDmaState = dmaIdle;
1570
1571 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1572 txDmaAddr, txDmaLen);
1573 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1574
1575 // If the receive state machine has a pending DMA, let it go first
1576 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1577 rxKick();
1578
1579 txKick();
1580}
1581
1582void
1583NSGigE::txKick()
1584{
1585 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1586
1587 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1588 NsTxStateStrings[txState], is64bit ? 64 : 32);
1589
1590 Addr link, bufptr;
1591 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1592 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1593
1594 next:
1595 if (txKickTick > curTick()) {
1596 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1597 txKickTick);
1598 goto exit;
1599 }
1600
1601 // Go to the next state machine clock tick.
1602 txKickTick = clockEdge(Cycles(1));
1603
1604 switch(txDmaState) {
1605 case dmaReadWaiting:
1606 if (doTxDmaRead())
1607 goto exit;
1608 break;
1609 case dmaWriteWaiting:
1610 if (doTxDmaWrite())
1611 goto exit;
1612 break;
1613 default:
1614 break;
1615 }
1616
1617 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1618 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1619 switch (txState) {
1620 case txIdle:
1621 if (!txEnable) {
1622 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1623 goto exit;
1624 }
1625
1626 if (CTDD) {
1627 txState = txDescRefr;
1628
1629 txDmaAddr = regs.txdp & 0x3fffffff;
1630 txDmaData =
1631 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1632 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1633 txDmaFree = dmaDescFree;
1634
1635 descDmaReads++;
1636 descDmaRdBytes += txDmaLen;
1637
1638 if (doTxDmaRead())
1639 goto exit;
1640
1641 } else {
1642 txState = txDescRead;
1643
1644 txDmaAddr = regs.txdp & 0x3fffffff;
1645 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1646 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1647 txDmaFree = dmaDescFree;
1648
1649 descDmaReads++;
1650 descDmaRdBytes += txDmaLen;
1651
1652 if (doTxDmaRead())
1653 goto exit;
1654 }
1655 break;
1656
1657 case txDescRefr:
1658 if (txDmaState != dmaIdle)
1659 goto exit;
1660
1661 txState = txAdvance;
1662 break;
1663
1664 case txDescRead:
1665 if (txDmaState != dmaIdle)
1666 goto exit;
1667
1668 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1669 regs.txdp & 0x3fffffff);
1670 DPRINTF(EthernetDesc,
1671 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1672 link, bufptr, cmdsts, extsts);
1673
1674 if (cmdsts & CMDSTS_OWN) {
1675 txState = txFifoBlock;
1676 txFragPtr = bufptr;
1677 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1678 } else {
1679 devIntrPost(ISR_TXIDLE);
1680 txState = txIdle;
1681 goto exit;
1682 }
1683 break;
1684
1685 case txFifoBlock:
1686 if (!txPacket) {
1687 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1688 txPacket = make_shared<EthPacketData>(16384);
1689 txPacketBufPtr = txPacket->data;
1690 }
1691
1692 if (txDescCnt == 0) {
1693 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1694 if (cmdsts & CMDSTS_MORE) {
1695 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1696 txState = txDescWrite;
1697
1698 cmdsts &= ~CMDSTS_OWN;
1699
1700 txDmaAddr = regs.txdp & 0x3fffffff;
1701 txDmaData = &cmdsts;
1702 if (is64bit) {
1703 txDmaAddr += offsetof(ns_desc64, cmdsts);
1704 txDmaLen = sizeof(txDesc64.cmdsts);
1705 } else {
1706 txDmaAddr += offsetof(ns_desc32, cmdsts);
1707 txDmaLen = sizeof(txDesc32.cmdsts);
1708 }
1709 txDmaFree = dmaDescFree;
1710
1711 if (doTxDmaWrite())
1712 goto exit;
1713
1714 } else { /* this packet is totally done */
1715 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1716 /* deal with the the packet that just finished */
1717 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1718 IpPtr ip(txPacket);
1719 if (extsts & EXTSTS_UDPPKT) {
1720 UdpPtr udp(ip);
1721 if (udp) {
1722 udp->sum(0);
1723 udp->sum(cksum(udp));
1724 txUdpChecksums++;
1725 } else {
1726 Debug::breakpoint();
1727 warn_once("UDPPKT set, but not UDP!\n");
1728 }
1729 } else if (extsts & EXTSTS_TCPPKT) {
1730 TcpPtr tcp(ip);
1731 if (tcp) {
1732 tcp->sum(0);
1733 tcp->sum(cksum(tcp));
1734 txTcpChecksums++;
1735 } else {
1736 warn_once("TCPPKT set, but not UDP!\n");
1737 }
1738 }
1739 if (extsts & EXTSTS_IPPKT) {
1740 if (ip) {
1741 ip->sum(0);
1742 ip->sum(cksum(ip));
1743 txIpChecksums++;
1744 } else {
1745 warn_once("IPPKT set, but not UDP!\n");
1746 }
1747 }
1748 }
1749
1750 txPacket->simLength = txPacketBufPtr - txPacket->data;
1751 txPacket->length = txPacketBufPtr - txPacket->data;
1752 // this is just because the receive can't handle a
1753 // packet bigger want to make sure
1754 if (txPacket->length > 1514)
1755 panic("transmit packet too large, %s > 1514\n",
1756 txPacket->length);
1757
1758#ifndef NDEBUG
1759 bool success =
1760#endif
1761 txFifo.push(txPacket);
1762 assert(success);
1763
1764 /*
1765 * this following section is not tqo spec, but
1766 * functionally shouldn't be any different. normally,
1767 * the chip will wait til the transmit has occurred
1768 * before writing back the descriptor because it has
1769 * to wait to see that it was successfully transmitted
1770 * to decide whether to set CMDSTS_OK or not.
1771 * however, in the simulator since it is always
1772 * successfully transmitted, and writing it exactly to
1773 * spec would complicate the code, we just do it here
1774 */
1775
1776 cmdsts &= ~CMDSTS_OWN;
1777 cmdsts |= CMDSTS_OK;
1778
1779 DPRINTF(EthernetDesc,
1780 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1781 cmdsts, extsts);
1782
1783 txDmaFree = dmaDescFree;
1784 txDmaAddr = regs.txdp & 0x3fffffff;
1785 txDmaData = &cmdsts;
1786 if (is64bit) {
1787 txDmaAddr += offsetof(ns_desc64, cmdsts);
1788 txDmaLen =
1789 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
1790 } else {
1791 txDmaAddr += offsetof(ns_desc32, cmdsts);
1792 txDmaLen =
1793 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
1794 }
1795
1796 descDmaWrites++;
1797 descDmaWrBytes += txDmaLen;
1798
1799 transmit();
1800 txPacket = 0;
1801
1802 if (!txEnable) {
1803 DPRINTF(EthernetSM, "halting TX state machine\n");
1804 txState = txIdle;
1805 goto exit;
1806 } else
1807 txState = txAdvance;
1808
1809 if (doTxDmaWrite())
1810 goto exit;
1811 }
1812 } else {
1813 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1814 if (!txFifo.full()) {
1815 txState = txFragRead;
1816
1817 /*
1818 * The number of bytes transferred is either whatever
1819 * is left in the descriptor (txDescCnt), or if there
1820 * is not enough room in the fifo, just whatever room
1821 * is left in the fifo
1822 */
1823 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
1824
1825 txDmaAddr = txFragPtr & 0x3fffffff;
1826 txDmaData = txPacketBufPtr;
1827 txDmaLen = txXferLen;
1828 txDmaFree = dmaDataFree;
1829
1830 if (doTxDmaRead())
1831 goto exit;
1832 } else {
1833 txState = txFifoBlock;
1834 transmit();
1835
1836 goto exit;
1837 }
1838
1839 }
1840 break;
1841
1842 case txFragRead:
1843 if (txDmaState != dmaIdle)
1844 goto exit;
1845
1846 txPacketBufPtr += txXferLen;
1847 txFragPtr += txXferLen;
1848 txDescCnt -= txXferLen;
1849 txFifo.reserve(txXferLen);
1850
1851 txState = txFifoBlock;
1852 break;
1853
1854 case txDescWrite:
1855 if (txDmaState != dmaIdle)
1856 goto exit;
1857
1858 if (cmdsts & CMDSTS_INTR)
1859 devIntrPost(ISR_TXDESC);
1860
1861 if (!txEnable) {
1862 DPRINTF(EthernetSM, "halting TX state machine\n");
1863 txState = txIdle;
1864 goto exit;
1865 } else
1866 txState = txAdvance;
1867 break;
1868
1869 case txAdvance:
1870 if (link == 0) {
1871 devIntrPost(ISR_TXIDLE);
1872 txState = txIdle;
1873 goto exit;
1874 } else {
1875 if (txDmaState != dmaIdle)
1876 goto exit;
1877 txState = txDescRead;
1878 regs.txdp = link;
1879 CTDD = false;
1880
1881 txDmaAddr = link & 0x3fffffff;
1882 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1883 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1884 txDmaFree = dmaDescFree;
1885
1886 if (doTxDmaRead())
1887 goto exit;
1888 }
1889 break;
1890
1891 default:
1892 panic("invalid state");
1893 }
1894
1895 DPRINTF(EthernetSM, "entering next txState=%s\n",
1896 NsTxStateStrings[txState]);
1897 goto next;
1898
1899 exit:
1900 /**
1901 * @todo do we want to schedule a future kick?
1902 */
1903 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1904 NsTxStateStrings[txState]);
1905
1906 if (!txKickEvent.scheduled())
1907 schedule(txKickEvent, txKickTick);
1908}
1909
1910/**
1911 * Advance the EEPROM state machine
1912 * Called on rising edge of EEPROM clock bit in MEAR
1913 */
1914void
1915NSGigE::eepromKick()
1916{
1917 switch (eepromState) {
1918
1919 case eepromStart:
1920
1921 // Wait for start bit
1922 if (regs.mear & MEAR_EEDI) {
1923 // Set up to get 2 opcode bits
1924 eepromState = eepromGetOpcode;
1925 eepromBitsToRx = 2;
1926 eepromOpcode = 0;
1927 }
1928 break;
1929
1930 case eepromGetOpcode:
1931 eepromOpcode <<= 1;
1932 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
1933 --eepromBitsToRx;
1934
1935 // Done getting opcode
1936 if (eepromBitsToRx == 0) {
1937 if (eepromOpcode != EEPROM_READ)
1938 panic("only EEPROM reads are implemented!");
1939
1940 // Set up to get address
1941 eepromState = eepromGetAddress;
1942 eepromBitsToRx = 6;
1943 eepromAddress = 0;
1944 }
1945 break;
1946
1947 case eepromGetAddress:
1948 eepromAddress <<= 1;
1949 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
1950 --eepromBitsToRx;
1951
1952 // Done getting address
1953 if (eepromBitsToRx == 0) {
1954
1955 if (eepromAddress >= EEPROM_SIZE)
1956 panic("EEPROM read access out of range!");
1957
1958 switch (eepromAddress) {
1959
1960 case EEPROM_PMATCH2_ADDR:
1961 eepromData = rom.perfectMatch[5];
1962 eepromData <<= 8;
1963 eepromData += rom.perfectMatch[4];
1964 break;
1965
1966 case EEPROM_PMATCH1_ADDR:
1967 eepromData = rom.perfectMatch[3];
1968 eepromData <<= 8;
1969 eepromData += rom.perfectMatch[2];
1970 break;
1971
1972 case EEPROM_PMATCH0_ADDR:
1973 eepromData = rom.perfectMatch[1];
1974 eepromData <<= 8;
1975 eepromData += rom.perfectMatch[0];
1976 break;
1977
1978 default:
1979 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
1980 }
1981 // Set up to read data
1982 eepromState = eepromRead;
1983 eepromBitsToRx = 16;
1984
1985 // Clear data in bit
1986 regs.mear &= ~MEAR_EEDI;
1987 }
1988 break;
1989
1990 case eepromRead:
1991 // Clear Data Out bit
1992 regs.mear &= ~MEAR_EEDO;
1993 // Set bit to value of current EEPROM bit
1994 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
1995
1996 eepromData <<= 1;
1997 --eepromBitsToRx;
1998
1999 // All done
2000 if (eepromBitsToRx == 0) {
2001 eepromState = eepromStart;
2002 }
2003 break;
2004
2005 default:
2006 panic("invalid EEPROM state");
2007 }
2008
2009}
2010
2011void
2012NSGigE::transferDone()
2013{
2014 if (txFifo.empty()) {
2015 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2016 return;
2017 }
2018
2019 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2020
2021 reschedule(txEvent, clockEdge(Cycles(1)), true);
2022}
2023
2024bool
2025NSGigE::rxFilter(const EthPacketPtr &packet)
2026{
2027 EthPtr eth = packet;
2028 bool drop = true;
2029 string type;
2030
2031 const EthAddr &dst = eth->dst();
2032 if (dst.unicast()) {
2033 // If we're accepting all unicast addresses
2034 if (acceptUnicast)
2035 drop = false;
2036
2037 // If we make a perfect match
2038 if (acceptPerfect && dst == rom.perfectMatch)
2039 drop = false;
2040
2041 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2042 drop = false;
2043
2044 } else if (dst.broadcast()) {
2045 // if we're accepting broadcasts
2046 if (acceptBroadcast)
2047 drop = false;
2048
2049 } else if (dst.multicast()) {
2050 // if we're accepting all multicasts
2051 if (acceptMulticast)
2052 drop = false;
2053
2054 // Multicast hashing faked - all packets accepted
2055 if (multicastHashEnable)
2056 drop = false;
2057 }
2058
2059 if (drop) {
2060 DPRINTF(Ethernet, "rxFilter drop\n");
2061 DDUMP(EthernetData, packet->data, packet->length);
2062 }
2063
2064 return drop;
2065}
2066
2067bool
2068NSGigE::recvPacket(EthPacketPtr packet)
2069{
2070 rxBytes += packet->length;
2071 rxPackets++;
2072
2073 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2074 rxFifo.avail());
2075
2076 if (!rxEnable) {
2077 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2078 return true;
2079 }
2080
2081 if (!rxFilterEnable) {
2082 DPRINTF(Ethernet,
2083 "receive packet filtering disabled . . . packet dropped\n");
2084 return true;
2085 }
2086
2087 if (rxFilter(packet)) {
2088 DPRINTF(Ethernet, "packet filtered...dropped\n");
2089 return true;
2090 }
2091
2092 if (rxFifo.avail() < packet->length) {
2093#if TRACING_ON
2094 IpPtr ip(packet);
2095 TcpPtr tcp(ip);
2096 if (ip) {
2097 DPRINTF(Ethernet,
2098 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2099 ip->id());
2100 if (tcp) {
2101 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2102 }
2103 }
2104#endif
2105 droppedPackets++;
2106 devIntrPost(ISR_RXORN);
2107 return false;
2108 }
2109
2110 rxFifo.push(packet);
2111
2112 rxKick();
2113 return true;
2114}
2115
2116
2117void
2118NSGigE::drainResume()
2119{
2120 Drainable::drainResume();
2121
2122 // During drain we could have left the state machines in a waiting state and
2123 // they wouldn't get out until some other event occured to kick them.
2124 // This way they'll get out immediately
2125 txKick();
2126 rxKick();
2127}
2128
2129
2130//=====================================================================
2131//
2132//
2133void
2134NSGigE::serialize(CheckpointOut &cp) const
2135{
2136 // Serialize the PciDevice base class
2137 PciDevice::serialize(cp);
2138
2139 /*
2140 * Finalize any DMA events now.
2141 */
2142 // @todo will mem system save pending dma?
2143
2144 /*
2145 * Serialize the device registers
2146 */
2147 SERIALIZE_SCALAR(regs.command);
2148 SERIALIZE_SCALAR(regs.config);
2149 SERIALIZE_SCALAR(regs.mear);
2150 SERIALIZE_SCALAR(regs.ptscr);
2151 SERIALIZE_SCALAR(regs.isr);
2152 SERIALIZE_SCALAR(regs.imr);
2153 SERIALIZE_SCALAR(regs.ier);
2154 SERIALIZE_SCALAR(regs.ihr);
2155 SERIALIZE_SCALAR(regs.txdp);
2156 SERIALIZE_SCALAR(regs.txdp_hi);
2157 SERIALIZE_SCALAR(regs.txcfg);
2158 SERIALIZE_SCALAR(regs.gpior);
2159 SERIALIZE_SCALAR(regs.rxdp);
2160 SERIALIZE_SCALAR(regs.rxdp_hi);
2161 SERIALIZE_SCALAR(regs.rxcfg);
2162 SERIALIZE_SCALAR(regs.pqcr);
2163 SERIALIZE_SCALAR(regs.wcsr);
2164 SERIALIZE_SCALAR(regs.pcr);
2165 SERIALIZE_SCALAR(regs.rfcr);
2166 SERIALIZE_SCALAR(regs.rfdr);
2167 SERIALIZE_SCALAR(regs.brar);
2168 SERIALIZE_SCALAR(regs.brdr);
2169 SERIALIZE_SCALAR(regs.srr);
2170 SERIALIZE_SCALAR(regs.mibc);
2171 SERIALIZE_SCALAR(regs.vrcr);
2172 SERIALIZE_SCALAR(regs.vtcr);
2173 SERIALIZE_SCALAR(regs.vdr);
2174 SERIALIZE_SCALAR(regs.ccsr);
2175 SERIALIZE_SCALAR(regs.tbicr);
2176 SERIALIZE_SCALAR(regs.tbisr);
2177 SERIALIZE_SCALAR(regs.tanar);
2178 SERIALIZE_SCALAR(regs.tanlpar);
2179 SERIALIZE_SCALAR(regs.taner);
2180 SERIALIZE_SCALAR(regs.tesr);
2181
2182 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2183 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2184
2185 SERIALIZE_SCALAR(ioEnable);
2186
2187 /*
2188 * Serialize the data Fifos
2189 */
2190 rxFifo.serialize("rxFifo", cp);
2191 txFifo.serialize("txFifo", cp);
2192
2193 /*
2194 * Serialize the various helper variables
2195 */
2196 bool txPacketExists = txPacket != nullptr;
2197 SERIALIZE_SCALAR(txPacketExists);
2198 if (txPacketExists) {
2199 txPacket->simLength = txPacketBufPtr - txPacket->data;
2200 txPacket->length = txPacketBufPtr - txPacket->data;
2201 txPacket->serialize("txPacket", cp);
2202 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2203 SERIALIZE_SCALAR(txPktBufPtr);
2204 }
2205
2206 bool rxPacketExists = rxPacket != nullptr;
2207 SERIALIZE_SCALAR(rxPacketExists);
2208 if (rxPacketExists) {
2209 rxPacket->serialize("rxPacket", cp);
2210 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2211 SERIALIZE_SCALAR(rxPktBufPtr);
2212 }
2213
2214 SERIALIZE_SCALAR(txXferLen);
2215 SERIALIZE_SCALAR(rxXferLen);
2216
2217 /*
2218 * Serialize Cached Descriptors
2219 */
2220 SERIALIZE_SCALAR(rxDesc64.link);
2221 SERIALIZE_SCALAR(rxDesc64.bufptr);
2222 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2223 SERIALIZE_SCALAR(rxDesc64.extsts);
2224 SERIALIZE_SCALAR(txDesc64.link);
2225 SERIALIZE_SCALAR(txDesc64.bufptr);
2226 SERIALIZE_SCALAR(txDesc64.cmdsts);
2227 SERIALIZE_SCALAR(txDesc64.extsts);
2228 SERIALIZE_SCALAR(rxDesc32.link);
2229 SERIALIZE_SCALAR(rxDesc32.bufptr);
2230 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2231 SERIALIZE_SCALAR(rxDesc32.extsts);
2232 SERIALIZE_SCALAR(txDesc32.link);
2233 SERIALIZE_SCALAR(txDesc32.bufptr);
2234 SERIALIZE_SCALAR(txDesc32.cmdsts);
2235 SERIALIZE_SCALAR(txDesc32.extsts);
2236 SERIALIZE_SCALAR(extstsEnable);
2237
2238 /*
2239 * Serialize tx state machine
2240 */
2241 int txState = this->txState;
2242 SERIALIZE_SCALAR(txState);
2243 SERIALIZE_SCALAR(txEnable);
2244 SERIALIZE_SCALAR(CTDD);
2245 SERIALIZE_SCALAR(txFragPtr);
2246 SERIALIZE_SCALAR(txDescCnt);
2247 int txDmaState = this->txDmaState;
2248 SERIALIZE_SCALAR(txDmaState);
2249 SERIALIZE_SCALAR(txKickTick);
2250
2251 /*
2252 * Serialize rx state machine
2253 */
2254 int rxState = this->rxState;
2255 SERIALIZE_SCALAR(rxState);
2256 SERIALIZE_SCALAR(rxEnable);
2257 SERIALIZE_SCALAR(CRDD);
2258 SERIALIZE_SCALAR(rxPktBytes);
2259 SERIALIZE_SCALAR(rxFragPtr);
2260 SERIALIZE_SCALAR(rxDescCnt);
2261 int rxDmaState = this->rxDmaState;
2262 SERIALIZE_SCALAR(rxDmaState);
2263 SERIALIZE_SCALAR(rxKickTick);
2264
2265 /*
2266 * Serialize EEPROM state machine
2267 */
2268 int eepromState = this->eepromState;
2269 SERIALIZE_SCALAR(eepromState);
2270 SERIALIZE_SCALAR(eepromClk);
2271 SERIALIZE_SCALAR(eepromBitsToRx);
2272 SERIALIZE_SCALAR(eepromOpcode);
2273 SERIALIZE_SCALAR(eepromAddress);
2274 SERIALIZE_SCALAR(eepromData);
2275
2276 /*
2277 * If there's a pending transmit, store the time so we can
2278 * reschedule it later
2279 */
2280 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
2281 SERIALIZE_SCALAR(transmitTick);
2282
2283 /*
2284 * receive address filter settings
2285 */
2286 SERIALIZE_SCALAR(rxFilterEnable);
2287 SERIALIZE_SCALAR(acceptBroadcast);
2288 SERIALIZE_SCALAR(acceptMulticast);
2289 SERIALIZE_SCALAR(acceptUnicast);
2290 SERIALIZE_SCALAR(acceptPerfect);
2291 SERIALIZE_SCALAR(acceptArp);
2292 SERIALIZE_SCALAR(multicastHashEnable);
2293
2294 /*
2295 * Keep track of pending interrupt status.
2296 */
2297 SERIALIZE_SCALAR(intrTick);
2298 SERIALIZE_SCALAR(cpuPendingIntr);
2299 Tick intrEventTick = 0;
2300 if (intrEvent)
2301 intrEventTick = intrEvent->when();
2302 SERIALIZE_SCALAR(intrEventTick);
2303
2304}
2305
2306void
2307NSGigE::unserialize(CheckpointIn &cp)
2308{
2309 // Unserialize the PciDevice base class
2310 PciDevice::unserialize(cp);
2311
2312 UNSERIALIZE_SCALAR(regs.command);
2313 UNSERIALIZE_SCALAR(regs.config);
2314 UNSERIALIZE_SCALAR(regs.mear);
2315 UNSERIALIZE_SCALAR(regs.ptscr);
2316 UNSERIALIZE_SCALAR(regs.isr);
2317 UNSERIALIZE_SCALAR(regs.imr);
2318 UNSERIALIZE_SCALAR(regs.ier);
2319 UNSERIALIZE_SCALAR(regs.ihr);
2320 UNSERIALIZE_SCALAR(regs.txdp);
2321 UNSERIALIZE_SCALAR(regs.txdp_hi);
2322 UNSERIALIZE_SCALAR(regs.txcfg);
2323 UNSERIALIZE_SCALAR(regs.gpior);
2324 UNSERIALIZE_SCALAR(regs.rxdp);
2325 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2326 UNSERIALIZE_SCALAR(regs.rxcfg);
2327 UNSERIALIZE_SCALAR(regs.pqcr);
2328 UNSERIALIZE_SCALAR(regs.wcsr);
2329 UNSERIALIZE_SCALAR(regs.pcr);
2330 UNSERIALIZE_SCALAR(regs.rfcr);
2331 UNSERIALIZE_SCALAR(regs.rfdr);
2332 UNSERIALIZE_SCALAR(regs.brar);
2333 UNSERIALIZE_SCALAR(regs.brdr);
2334 UNSERIALIZE_SCALAR(regs.srr);
2335 UNSERIALIZE_SCALAR(regs.mibc);
2336 UNSERIALIZE_SCALAR(regs.vrcr);
2337 UNSERIALIZE_SCALAR(regs.vtcr);
2338 UNSERIALIZE_SCALAR(regs.vdr);
2339 UNSERIALIZE_SCALAR(regs.ccsr);
2340 UNSERIALIZE_SCALAR(regs.tbicr);
2341 UNSERIALIZE_SCALAR(regs.tbisr);
2342 UNSERIALIZE_SCALAR(regs.tanar);
2343 UNSERIALIZE_SCALAR(regs.tanlpar);
2344 UNSERIALIZE_SCALAR(regs.taner);
2345 UNSERIALIZE_SCALAR(regs.tesr);
2346
2347 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2348 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2349
2350 UNSERIALIZE_SCALAR(ioEnable);
2351
2352 /*
2353 * unserialize the data fifos
2354 */
2355 rxFifo.unserialize("rxFifo", cp);
2356 txFifo.unserialize("txFifo", cp);
2357
2358 /*
2359 * unserialize the various helper variables
2360 */
2361 bool txPacketExists;
2362 UNSERIALIZE_SCALAR(txPacketExists);
2363 if (txPacketExists) {
2364 txPacket = make_shared<EthPacketData>(16384);
2365 txPacket->unserialize("txPacket", cp);
2366 uint32_t txPktBufPtr;
2367 UNSERIALIZE_SCALAR(txPktBufPtr);
2368 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2369 } else
2370 txPacket = 0;
2371
2372 bool rxPacketExists;
2373 UNSERIALIZE_SCALAR(rxPacketExists);
2374 rxPacket = 0;
2375 if (rxPacketExists) {
2376 rxPacket = make_shared<EthPacketData>();
2377 rxPacket->unserialize("rxPacket", cp);
2378 uint32_t rxPktBufPtr;
2379 UNSERIALIZE_SCALAR(rxPktBufPtr);
2380 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2381 } else
2382 rxPacket = 0;
2383
2384 UNSERIALIZE_SCALAR(txXferLen);
2385 UNSERIALIZE_SCALAR(rxXferLen);
2386
2387 /*
2388 * Unserialize Cached Descriptors
2389 */
2390 UNSERIALIZE_SCALAR(rxDesc64.link);
2391 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2392 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2393 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2394 UNSERIALIZE_SCALAR(txDesc64.link);
2395 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2396 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2397 UNSERIALIZE_SCALAR(txDesc64.extsts);
2398 UNSERIALIZE_SCALAR(rxDesc32.link);
2399 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2400 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2401 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2402 UNSERIALIZE_SCALAR(txDesc32.link);
2403 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2404 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2405 UNSERIALIZE_SCALAR(txDesc32.extsts);
2406 UNSERIALIZE_SCALAR(extstsEnable);
2407
2408 /*
2409 * unserialize tx state machine
2410 */
2411 int txState;
2412 UNSERIALIZE_SCALAR(txState);
2413 this->txState = (TxState) txState;
2414 UNSERIALIZE_SCALAR(txEnable);
2415 UNSERIALIZE_SCALAR(CTDD);
2416 UNSERIALIZE_SCALAR(txFragPtr);
2417 UNSERIALIZE_SCALAR(txDescCnt);
2418 int txDmaState;
2419 UNSERIALIZE_SCALAR(txDmaState);
2420 this->txDmaState = (DmaState) txDmaState;
2421 UNSERIALIZE_SCALAR(txKickTick);
2422 if (txKickTick)
2423 schedule(txKickEvent, txKickTick);
2424
2425 /*
2426 * unserialize rx state machine
2427 */
2428 int rxState;
2429 UNSERIALIZE_SCALAR(rxState);
2430 this->rxState = (RxState) rxState;
2431 UNSERIALIZE_SCALAR(rxEnable);
2432 UNSERIALIZE_SCALAR(CRDD);
2433 UNSERIALIZE_SCALAR(rxPktBytes);
2434 UNSERIALIZE_SCALAR(rxFragPtr);
2435 UNSERIALIZE_SCALAR(rxDescCnt);
2436 int rxDmaState;
2437 UNSERIALIZE_SCALAR(rxDmaState);
2438 this->rxDmaState = (DmaState) rxDmaState;
2439 UNSERIALIZE_SCALAR(rxKickTick);
2440 if (rxKickTick)
2441 schedule(rxKickEvent, rxKickTick);
2442
2443 /*
2444 * Unserialize EEPROM state machine
2445 */
2446 int eepromState;
2447 UNSERIALIZE_SCALAR(eepromState);
2448 this->eepromState = (EEPROMState) eepromState;
2449 UNSERIALIZE_SCALAR(eepromClk);
2450 UNSERIALIZE_SCALAR(eepromBitsToRx);
2451 UNSERIALIZE_SCALAR(eepromOpcode);
2452 UNSERIALIZE_SCALAR(eepromAddress);
2453 UNSERIALIZE_SCALAR(eepromData);
2454
2455 /*
2456 * If there's a pending transmit, reschedule it now
2457 */
2458 Tick transmitTick;
2459 UNSERIALIZE_SCALAR(transmitTick);
2460 if (transmitTick)
2461 schedule(txEvent, curTick() + transmitTick);
2462
2463 /*
2464 * unserialize receive address filter settings
2465 */
2466 UNSERIALIZE_SCALAR(rxFilterEnable);
2467 UNSERIALIZE_SCALAR(acceptBroadcast);
2468 UNSERIALIZE_SCALAR(acceptMulticast);
2469 UNSERIALIZE_SCALAR(acceptUnicast);
2470 UNSERIALIZE_SCALAR(acceptPerfect);
2471 UNSERIALIZE_SCALAR(acceptArp);
2472 UNSERIALIZE_SCALAR(multicastHashEnable);
2473
2474 /*
2475 * Keep track of pending interrupt status.
2476 */
2477 UNSERIALIZE_SCALAR(intrTick);
2478 UNSERIALIZE_SCALAR(cpuPendingIntr);
2479 Tick intrEventTick;
2480 UNSERIALIZE_SCALAR(intrEventTick);
2481 if (intrEventTick) {
2482 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
2483 name(), true);
2484 schedule(intrEvent, intrEventTick);
2485 }
2486}
2487
2488NSGigE *
2489NSGigEParams::create()
2490{
2491 return new NSGigE(this);
2492}
182}
183
184/**
185 * This reads the device registers, which are detailed in the NS83820
186 * spec sheet
187 */
188Tick
189NSGigE::read(PacketPtr pkt)
190{
191 assert(ioEnable);
192
193 //The mask is to give you only the offset into the device register file
194 Addr daddr = pkt->getAddr() & 0xfff;
195 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
196 daddr, pkt->getAddr(), pkt->getSize());
197
198
199 // there are some reserved registers, you can see ns_gige_reg.h and
200 // the spec sheet for details
201 if (daddr > LAST && daddr <= RESERVED) {
202 panic("Accessing reserved register");
203 } else if (daddr > RESERVED && daddr <= 0x3FC) {
204 return readConfig(pkt);
205 } else if (daddr >= MIB_START && daddr <= MIB_END) {
206 // don't implement all the MIB's. hopefully the kernel
207 // doesn't actually DEPEND upon their values
208 // MIB are just hardware stats keepers
209 pkt->setLE<uint32_t>(0);
210 pkt->makeAtomicResponse();
211 return pioDelay;
212 } else if (daddr > 0x3FC)
213 panic("Something is messed up!\n");
214
215 assert(pkt->getSize() == sizeof(uint32_t));
216 uint32_t &reg = *pkt->getPtr<uint32_t>();
217 uint16_t rfaddr;
218
219 switch (daddr) {
220 case CR:
221 reg = regs.command;
222 //these are supposed to be cleared on a read
223 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
224 break;
225
226 case CFGR:
227 reg = regs.config;
228 break;
229
230 case MEAR:
231 reg = regs.mear;
232 break;
233
234 case PTSCR:
235 reg = regs.ptscr;
236 break;
237
238 case ISR:
239 reg = regs.isr;
240 devIntrClear(ISR_ALL);
241 break;
242
243 case IMR:
244 reg = regs.imr;
245 break;
246
247 case IER:
248 reg = regs.ier;
249 break;
250
251 case IHR:
252 reg = regs.ihr;
253 break;
254
255 case TXDP:
256 reg = regs.txdp;
257 break;
258
259 case TXDP_HI:
260 reg = regs.txdp_hi;
261 break;
262
263 case TX_CFG:
264 reg = regs.txcfg;
265 break;
266
267 case GPIOR:
268 reg = regs.gpior;
269 break;
270
271 case RXDP:
272 reg = regs.rxdp;
273 break;
274
275 case RXDP_HI:
276 reg = regs.rxdp_hi;
277 break;
278
279 case RX_CFG:
280 reg = regs.rxcfg;
281 break;
282
283 case PQCR:
284 reg = regs.pqcr;
285 break;
286
287 case WCSR:
288 reg = regs.wcsr;
289 break;
290
291 case PCR:
292 reg = regs.pcr;
293 break;
294
295 // see the spec sheet for how RFCR and RFDR work
296 // basically, you write to RFCR to tell the machine
297 // what you want to do next, then you act upon RFDR,
298 // and the device will be prepared b/c of what you
299 // wrote to RFCR
300 case RFCR:
301 reg = regs.rfcr;
302 break;
303
304 case RFDR:
305 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
306 switch (rfaddr) {
307 // Read from perfect match ROM octets
308 case 0x000:
309 reg = rom.perfectMatch[1];
310 reg = reg << 8;
311 reg += rom.perfectMatch[0];
312 break;
313 case 0x002:
314 reg = rom.perfectMatch[3] << 8;
315 reg += rom.perfectMatch[2];
316 break;
317 case 0x004:
318 reg = rom.perfectMatch[5] << 8;
319 reg += rom.perfectMatch[4];
320 break;
321 default:
322 // Read filter hash table
323 if (rfaddr >= FHASH_ADDR &&
324 rfaddr < FHASH_ADDR + FHASH_SIZE) {
325
326 // Only word-aligned reads supported
327 if (rfaddr % 2)
328 panic("unaligned read from filter hash table!");
329
330 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
331 reg += rom.filterHash[rfaddr - FHASH_ADDR];
332 break;
333 }
334
335 panic("reading RFDR for something other than pattern"
336 " matching or hashing! %#x\n", rfaddr);
337 }
338 break;
339
340 case SRR:
341 reg = regs.srr;
342 break;
343
344 case MIBC:
345 reg = regs.mibc;
346 reg &= ~(MIBC_MIBS | MIBC_ACLR);
347 break;
348
349 case VRCR:
350 reg = regs.vrcr;
351 break;
352
353 case VTCR:
354 reg = regs.vtcr;
355 break;
356
357 case VDR:
358 reg = regs.vdr;
359 break;
360
361 case CCSR:
362 reg = regs.ccsr;
363 break;
364
365 case TBICR:
366 reg = regs.tbicr;
367 break;
368
369 case TBISR:
370 reg = regs.tbisr;
371 break;
372
373 case TANAR:
374 reg = regs.tanar;
375 break;
376
377 case TANLPAR:
378 reg = regs.tanlpar;
379 break;
380
381 case TANER:
382 reg = regs.taner;
383 break;
384
385 case TESR:
386 reg = regs.tesr;
387 break;
388
389 case M5REG:
390 reg = 0;
391 if (params()->rx_thread)
392 reg |= M5REG_RX_THREAD;
393 if (params()->tx_thread)
394 reg |= M5REG_TX_THREAD;
395 if (params()->rss)
396 reg |= M5REG_RSS;
397 break;
398
399 default:
400 panic("reading unimplemented register: addr=%#x", daddr);
401 }
402
403 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
404 daddr, reg, reg);
405
406 pkt->makeAtomicResponse();
407 return pioDelay;
408}
409
410Tick
411NSGigE::write(PacketPtr pkt)
412{
413 assert(ioEnable);
414
415 Addr daddr = pkt->getAddr() & 0xfff;
416 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
417 daddr, pkt->getAddr(), pkt->getSize());
418
419 if (daddr > LAST && daddr <= RESERVED) {
420 panic("Accessing reserved register");
421 } else if (daddr > RESERVED && daddr <= 0x3FC) {
422 return writeConfig(pkt);
423 } else if (daddr > 0x3FC)
424 panic("Something is messed up!\n");
425
426 if (pkt->getSize() == sizeof(uint32_t)) {
427 uint32_t reg = pkt->getLE<uint32_t>();
428 uint16_t rfaddr;
429
430 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
431
432 switch (daddr) {
433 case CR:
434 regs.command = reg;
435 if (reg & CR_TXD) {
436 txEnable = false;
437 } else if (reg & CR_TXE) {
438 txEnable = true;
439
440 // the kernel is enabling the transmit machine
441 if (txState == txIdle)
442 txKick();
443 }
444
445 if (reg & CR_RXD) {
446 rxEnable = false;
447 } else if (reg & CR_RXE) {
448 rxEnable = true;
449
450 if (rxState == rxIdle)
451 rxKick();
452 }
453
454 if (reg & CR_TXR)
455 txReset();
456
457 if (reg & CR_RXR)
458 rxReset();
459
460 if (reg & CR_SWI)
461 devIntrPost(ISR_SWI);
462
463 if (reg & CR_RST) {
464 txReset();
465 rxReset();
466
467 regsReset();
468 }
469 break;
470
471 case CFGR:
472 if (reg & CFGR_LNKSTS ||
473 reg & CFGR_SPDSTS ||
474 reg & CFGR_DUPSTS ||
475 reg & CFGR_RESERVED ||
476 reg & CFGR_T64ADDR ||
477 reg & CFGR_PCI64_DET) {
478 // First clear all writable bits
479 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
480 CFGR_RESERVED | CFGR_T64ADDR |
481 CFGR_PCI64_DET;
482 // Now set the appropriate writable bits
483 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
484 CFGR_RESERVED | CFGR_T64ADDR |
485 CFGR_PCI64_DET);
486 }
487
488// all these #if 0's are because i don't THINK the kernel needs to
489// have these implemented. if there is a problem relating to one of
490// these, you may need to add functionality in.
491
492// grouped together and #if 0'ed to avoid empty if body and make clang happy
493#if 0
494 if (reg & CFGR_TBI_EN) ;
495 if (reg & CFGR_MODE_1000) ;
496
497 if (reg & CFGR_PINT_DUPSTS ||
498 reg & CFGR_PINT_LNKSTS ||
499 reg & CFGR_PINT_SPDSTS)
500 ;
501
502 if (reg & CFGR_TMRTEST) ;
503 if (reg & CFGR_MRM_DIS) ;
504 if (reg & CFGR_MWI_DIS) ;
505
506 if (reg & CFGR_DATA64_EN) ;
507 if (reg & CFGR_M64ADDR) ;
508 if (reg & CFGR_PHY_RST) ;
509 if (reg & CFGR_PHY_DIS) ;
510
511 if (reg & CFGR_REQALG) ;
512 if (reg & CFGR_SB) ;
513 if (reg & CFGR_POW) ;
514 if (reg & CFGR_EXD) ;
515 if (reg & CFGR_PESEL) ;
516 if (reg & CFGR_BROM_DIS) ;
517 if (reg & CFGR_EXT_125) ;
518 if (reg & CFGR_BEM) ;
519
520 if (reg & CFGR_T64ADDR) ;
521 // panic("CFGR_T64ADDR is read only register!\n");
522#endif
523 if (reg & CFGR_AUTO_1000)
524 panic("CFGR_AUTO_1000 not implemented!\n");
525
526 if (reg & CFGR_PCI64_DET)
527 panic("CFGR_PCI64_DET is read only register!\n");
528
529 if (reg & CFGR_EXTSTS_EN)
530 extstsEnable = true;
531 else
532 extstsEnable = false;
533 break;
534
535 case MEAR:
536 // Clear writable bits
537 regs.mear &= MEAR_EEDO;
538 // Set appropriate writable bits
539 regs.mear |= reg & ~MEAR_EEDO;
540
541 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
542 // even though it could get it through RFDR
543 if (reg & MEAR_EESEL) {
544 // Rising edge of clock
545 if (reg & MEAR_EECLK && !eepromClk)
546 eepromKick();
547 }
548 else {
549 eepromState = eepromStart;
550 regs.mear &= ~MEAR_EEDI;
551 }
552
553 eepromClk = reg & MEAR_EECLK;
554
555 // since phy is completely faked, MEAR_MD* don't matter
556
557// grouped together and #if 0'ed to avoid empty if body and make clang happy
558#if 0
559 if (reg & MEAR_MDIO) ;
560 if (reg & MEAR_MDDIR) ;
561 if (reg & MEAR_MDC) ;
562#endif
563 break;
564
565 case PTSCR:
566 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
567 // these control BISTs for various parts of chip - we
568 // don't care or do just fake that the BIST is done
569 if (reg & PTSCR_RBIST_EN)
570 regs.ptscr |= PTSCR_RBIST_DONE;
571 if (reg & PTSCR_EEBIST_EN)
572 regs.ptscr &= ~PTSCR_EEBIST_EN;
573 if (reg & PTSCR_EELOAD_EN)
574 regs.ptscr &= ~PTSCR_EELOAD_EN;
575 break;
576
577 case ISR: /* writing to the ISR has no effect */
578 panic("ISR is a read only register!\n");
579
580 case IMR:
581 regs.imr = reg;
582 devIntrChangeMask();
583 break;
584
585 case IER:
586 regs.ier = reg;
587 break;
588
589 case IHR:
590 regs.ihr = reg;
591 /* not going to implement real interrupt holdoff */
592 break;
593
594 case TXDP:
595 regs.txdp = (reg & 0xFFFFFFFC);
596 assert(txState == txIdle);
597 CTDD = false;
598 break;
599
600 case TXDP_HI:
601 regs.txdp_hi = reg;
602 break;
603
604 case TX_CFG:
605 regs.txcfg = reg;
606#if 0
607 if (reg & TX_CFG_CSI) ;
608 if (reg & TX_CFG_HBI) ;
609 if (reg & TX_CFG_MLB) ;
610 if (reg & TX_CFG_ATP) ;
611 if (reg & TX_CFG_ECRETRY) {
612 /*
613 * this could easily be implemented, but considering
614 * the network is just a fake pipe, wouldn't make
615 * sense to do this
616 */
617 }
618
619 if (reg & TX_CFG_BRST_DIS) ;
620#endif
621
622#if 0
623 /* we handle our own DMA, ignore the kernel's exhortations */
624 if (reg & TX_CFG_MXDMA) ;
625#endif
626
627 // also, we currently don't care about fill/drain
628 // thresholds though this may change in the future with
629 // more realistic networks or a driver which changes it
630 // according to feedback
631
632 break;
633
634 case GPIOR:
635 // Only write writable bits
636 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
637 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
638 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
639 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
640 /* these just control general purpose i/o pins, don't matter */
641 break;
642
643 case RXDP:
644 regs.rxdp = reg;
645 CRDD = false;
646 break;
647
648 case RXDP_HI:
649 regs.rxdp_hi = reg;
650 break;
651
652 case RX_CFG:
653 regs.rxcfg = reg;
654#if 0
655 if (reg & RX_CFG_AEP) ;
656 if (reg & RX_CFG_ARP) ;
657 if (reg & RX_CFG_STRIPCRC) ;
658 if (reg & RX_CFG_RX_RD) ;
659 if (reg & RX_CFG_ALP) ;
660 if (reg & RX_CFG_AIRL) ;
661
662 /* we handle our own DMA, ignore what kernel says about it */
663 if (reg & RX_CFG_MXDMA) ;
664
665 //also, we currently don't care about fill/drain thresholds
666 //though this may change in the future with more realistic
667 //networks or a driver which changes it according to feedback
668 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
669#endif
670 break;
671
672 case PQCR:
673 /* there is no priority queueing used in the linux 2.6 driver */
674 regs.pqcr = reg;
675 break;
676
677 case WCSR:
678 /* not going to implement wake on LAN */
679 regs.wcsr = reg;
680 break;
681
682 case PCR:
683 /* not going to implement pause control */
684 regs.pcr = reg;
685 break;
686
687 case RFCR:
688 regs.rfcr = reg;
689
690 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
691 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
692 acceptMulticast = (reg & RFCR_AAM) ? true : false;
693 acceptUnicast = (reg & RFCR_AAU) ? true : false;
694 acceptPerfect = (reg & RFCR_APM) ? true : false;
695 acceptArp = (reg & RFCR_AARP) ? true : false;
696 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
697
698#if 0
699 if (reg & RFCR_APAT)
700 panic("RFCR_APAT not implemented!\n");
701#endif
702 if (reg & RFCR_UHEN)
703 panic("Unicast hash filtering not used by drivers!\n");
704
705 if (reg & RFCR_ULM)
706 panic("RFCR_ULM not implemented!\n");
707
708 break;
709
710 case RFDR:
711 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
712 switch (rfaddr) {
713 case 0x000:
714 rom.perfectMatch[0] = (uint8_t)reg;
715 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
716 break;
717 case 0x002:
718 rom.perfectMatch[2] = (uint8_t)reg;
719 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
720 break;
721 case 0x004:
722 rom.perfectMatch[4] = (uint8_t)reg;
723 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
724 break;
725 default:
726
727 if (rfaddr >= FHASH_ADDR &&
728 rfaddr < FHASH_ADDR + FHASH_SIZE) {
729
730 // Only word-aligned writes supported
731 if (rfaddr % 2)
732 panic("unaligned write to filter hash table!");
733
734 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
735 rom.filterHash[rfaddr - FHASH_ADDR + 1]
736 = (uint8_t)(reg >> 8);
737 break;
738 }
739 panic("writing RFDR for something other than pattern matching "
740 "or hashing! %#x\n", rfaddr);
741 }
742 break;
743
744 case BRAR:
745 regs.brar = reg;
746 break;
747
748 case BRDR:
749 panic("the driver never uses BRDR, something is wrong!\n");
750
751 case SRR:
752 panic("SRR is read only register!\n");
753
754 case MIBC:
755 panic("the driver never uses MIBC, something is wrong!\n");
756
757 case VRCR:
758 regs.vrcr = reg;
759 break;
760
761 case VTCR:
762 regs.vtcr = reg;
763 break;
764
765 case VDR:
766 panic("the driver never uses VDR, something is wrong!\n");
767
768 case CCSR:
769 /* not going to implement clockrun stuff */
770 regs.ccsr = reg;
771 break;
772
773 case TBICR:
774 regs.tbicr = reg;
775 if (reg & TBICR_MR_LOOPBACK)
776 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
777
778 if (reg & TBICR_MR_AN_ENABLE) {
779 regs.tanlpar = regs.tanar;
780 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
781 }
782
783#if 0
784 if (reg & TBICR_MR_RESTART_AN) ;
785#endif
786
787 break;
788
789 case TBISR:
790 panic("TBISR is read only register!\n");
791
792 case TANAR:
793 // Only write the writable bits
794 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
795 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
796
797 // Pause capability unimplemented
798#if 0
799 if (reg & TANAR_PS2) ;
800 if (reg & TANAR_PS1) ;
801#endif
802
803 break;
804
805 case TANLPAR:
806 panic("this should only be written to by the fake phy!\n");
807
808 case TANER:
809 panic("TANER is read only register!\n");
810
811 case TESR:
812 regs.tesr = reg;
813 break;
814
815 default:
816 panic("invalid register access daddr=%#x", daddr);
817 }
818 } else {
819 panic("Invalid Request Size");
820 }
821 pkt->makeAtomicResponse();
822 return pioDelay;
823}
824
825void
826NSGigE::devIntrPost(uint32_t interrupts)
827{
828 if (interrupts & ISR_RESERVE)
829 panic("Cannot set a reserved interrupt");
830
831 if (interrupts & ISR_NOIMPL)
832 warn("interrupt not implemented %#x\n", interrupts);
833
834 interrupts &= ISR_IMPL;
835 regs.isr |= interrupts;
836
837 if (interrupts & regs.imr) {
838 if (interrupts & ISR_SWI) {
839 totalSwi++;
840 }
841 if (interrupts & ISR_RXIDLE) {
842 totalRxIdle++;
843 }
844 if (interrupts & ISR_RXOK) {
845 totalRxOk++;
846 }
847 if (interrupts & ISR_RXDESC) {
848 totalRxDesc++;
849 }
850 if (interrupts & ISR_TXOK) {
851 totalTxOk++;
852 }
853 if (interrupts & ISR_TXIDLE) {
854 totalTxIdle++;
855 }
856 if (interrupts & ISR_TXDESC) {
857 totalTxDesc++;
858 }
859 if (interrupts & ISR_RXORN) {
860 totalRxOrn++;
861 }
862 }
863
864 DPRINTF(EthernetIntr,
865 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
866 interrupts, regs.isr, regs.imr);
867
868 if ((regs.isr & regs.imr)) {
869 Tick when = curTick();
870 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
871 when += intrDelay;
872 postedInterrupts++;
873 cpuIntrPost(when);
874 }
875}
876
877/* writing this interrupt counting stats inside this means that this function
878 is now limited to being used to clear all interrupts upon the kernel
879 reading isr and servicing. just telling you in case you were thinking
880 of expanding use.
881*/
882void
883NSGigE::devIntrClear(uint32_t interrupts)
884{
885 if (interrupts & ISR_RESERVE)
886 panic("Cannot clear a reserved interrupt");
887
888 if (regs.isr & regs.imr & ISR_SWI) {
889 postedSwi++;
890 }
891 if (regs.isr & regs.imr & ISR_RXIDLE) {
892 postedRxIdle++;
893 }
894 if (regs.isr & regs.imr & ISR_RXOK) {
895 postedRxOk++;
896 }
897 if (regs.isr & regs.imr & ISR_RXDESC) {
898 postedRxDesc++;
899 }
900 if (regs.isr & regs.imr & ISR_TXOK) {
901 postedTxOk++;
902 }
903 if (regs.isr & regs.imr & ISR_TXIDLE) {
904 postedTxIdle++;
905 }
906 if (regs.isr & regs.imr & ISR_TXDESC) {
907 postedTxDesc++;
908 }
909 if (regs.isr & regs.imr & ISR_RXORN) {
910 postedRxOrn++;
911 }
912
913 interrupts &= ~ISR_NOIMPL;
914 regs.isr &= ~interrupts;
915
916 DPRINTF(EthernetIntr,
917 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
918 interrupts, regs.isr, regs.imr);
919
920 if (!(regs.isr & regs.imr))
921 cpuIntrClear();
922}
923
924void
925NSGigE::devIntrChangeMask()
926{
927 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
928 regs.isr, regs.imr, regs.isr & regs.imr);
929
930 if (regs.isr & regs.imr)
931 cpuIntrPost(curTick());
932 else
933 cpuIntrClear();
934}
935
936void
937NSGigE::cpuIntrPost(Tick when)
938{
939 // If the interrupt you want to post is later than an interrupt
940 // already scheduled, just let it post in the coming one and don't
941 // schedule another.
942 // HOWEVER, must be sure that the scheduled intrTick is in the
943 // future (this was formerly the source of a bug)
944 /**
945 * @todo this warning should be removed and the intrTick code should
946 * be fixed.
947 */
948 assert(when >= curTick());
949 assert(intrTick >= curTick() || intrTick == 0);
950 if (when > intrTick && intrTick != 0) {
951 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
952 intrTick);
953 return;
954 }
955
956 intrTick = when;
957 if (intrTick < curTick()) {
958 intrTick = curTick();
959 }
960
961 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
962 intrTick);
963
964 if (intrEvent)
965 intrEvent->squash();
966
967 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
968 name(), true);
969 schedule(intrEvent, intrTick);
970}
971
972void
973NSGigE::cpuInterrupt()
974{
975 assert(intrTick == curTick());
976
977 // Whether or not there's a pending interrupt, we don't care about
978 // it anymore
979 intrEvent = 0;
980 intrTick = 0;
981
982 // Don't send an interrupt if there's already one
983 if (cpuPendingIntr) {
984 DPRINTF(EthernetIntr,
985 "would send an interrupt now, but there's already pending\n");
986 } else {
987 // Send interrupt
988 cpuPendingIntr = true;
989
990 DPRINTF(EthernetIntr, "posting interrupt\n");
991 intrPost();
992 }
993}
994
995void
996NSGigE::cpuIntrClear()
997{
998 if (!cpuPendingIntr)
999 return;
1000
1001 if (intrEvent) {
1002 intrEvent->squash();
1003 intrEvent = 0;
1004 }
1005
1006 intrTick = 0;
1007
1008 cpuPendingIntr = false;
1009
1010 DPRINTF(EthernetIntr, "clearing interrupt\n");
1011 intrClear();
1012}
1013
1014bool
1015NSGigE::cpuIntrPending() const
1016{ return cpuPendingIntr; }
1017
1018void
1019NSGigE::txReset()
1020{
1021
1022 DPRINTF(Ethernet, "transmit reset\n");
1023
1024 CTDD = false;
1025 txEnable = false;;
1026 txFragPtr = 0;
1027 assert(txDescCnt == 0);
1028 txFifo.clear();
1029 txState = txIdle;
1030 assert(txDmaState == dmaIdle);
1031}
1032
1033void
1034NSGigE::rxReset()
1035{
1036 DPRINTF(Ethernet, "receive reset\n");
1037
1038 CRDD = false;
1039 assert(rxPktBytes == 0);
1040 rxEnable = false;
1041 rxFragPtr = 0;
1042 assert(rxDescCnt == 0);
1043 assert(rxDmaState == dmaIdle);
1044 rxFifo.clear();
1045 rxState = rxIdle;
1046}
1047
1048void
1049NSGigE::regsReset()
1050{
1051 memset(&regs, 0, sizeof(regs));
1052 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1053 regs.mear = 0x12;
1054 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1055 // fill threshold to 32 bytes
1056 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1057 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1058 regs.mibc = MIBC_FRZ;
1059 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1060 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1061 regs.brar = 0xffffffff;
1062
1063 extstsEnable = false;
1064 acceptBroadcast = false;
1065 acceptMulticast = false;
1066 acceptUnicast = false;
1067 acceptPerfect = false;
1068 acceptArp = false;
1069}
1070
1071bool
1072NSGigE::doRxDmaRead()
1073{
1074 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1075 rxDmaState = dmaReading;
1076
1077 if (dmaPending() || drainState() != DrainState::Running)
1078 rxDmaState = dmaReadWaiting;
1079 else
1080 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1081
1082 return true;
1083}
1084
1085void
1086NSGigE::rxDmaReadDone()
1087{
1088 assert(rxDmaState == dmaReading);
1089 rxDmaState = dmaIdle;
1090
1091 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1092 rxDmaAddr, rxDmaLen);
1093 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1094
1095 // If the transmit state machine has a pending DMA, let it go first
1096 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1097 txKick();
1098
1099 rxKick();
1100}
1101
1102bool
1103NSGigE::doRxDmaWrite()
1104{
1105 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1106 rxDmaState = dmaWriting;
1107
1108 if (dmaPending() || drainState() != DrainState::Running)
1109 rxDmaState = dmaWriteWaiting;
1110 else
1111 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1112 return true;
1113}
1114
1115void
1116NSGigE::rxDmaWriteDone()
1117{
1118 assert(rxDmaState == dmaWriting);
1119 rxDmaState = dmaIdle;
1120
1121 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1122 rxDmaAddr, rxDmaLen);
1123 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1124
1125 // If the transmit state machine has a pending DMA, let it go first
1126 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1127 txKick();
1128
1129 rxKick();
1130}
1131
1132void
1133NSGigE::rxKick()
1134{
1135 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1136
1137 DPRINTF(EthernetSM,
1138 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1139 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1140
1141 Addr link, bufptr;
1142 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1143 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1144
1145 next:
1146 if (rxKickTick > curTick()) {
1147 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1148 rxKickTick);
1149
1150 goto exit;
1151 }
1152
1153 // Go to the next state machine clock tick.
1154 rxKickTick = clockEdge(Cycles(1));
1155
1156 switch(rxDmaState) {
1157 case dmaReadWaiting:
1158 if (doRxDmaRead())
1159 goto exit;
1160 break;
1161 case dmaWriteWaiting:
1162 if (doRxDmaWrite())
1163 goto exit;
1164 break;
1165 default:
1166 break;
1167 }
1168
1169 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1170 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1171
1172 // see state machine from spec for details
1173 // the way this works is, if you finish work on one state and can
1174 // go directly to another, you do that through jumping to the
1175 // label "next". however, if you have intermediate work, like DMA
1176 // so that you can't go to the next state yet, you go to exit and
1177 // exit the loop. however, when the DMA is done it will trigger
1178 // an event and come back to this loop.
1179 switch (rxState) {
1180 case rxIdle:
1181 if (!rxEnable) {
1182 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1183 goto exit;
1184 }
1185
1186 if (CRDD) {
1187 rxState = rxDescRefr;
1188
1189 rxDmaAddr = regs.rxdp & 0x3fffffff;
1190 rxDmaData =
1191 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1192 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1193 rxDmaFree = dmaDescFree;
1194
1195 descDmaReads++;
1196 descDmaRdBytes += rxDmaLen;
1197
1198 if (doRxDmaRead())
1199 goto exit;
1200 } else {
1201 rxState = rxDescRead;
1202
1203 rxDmaAddr = regs.rxdp & 0x3fffffff;
1204 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1205 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1206 rxDmaFree = dmaDescFree;
1207
1208 descDmaReads++;
1209 descDmaRdBytes += rxDmaLen;
1210
1211 if (doRxDmaRead())
1212 goto exit;
1213 }
1214 break;
1215
1216 case rxDescRefr:
1217 if (rxDmaState != dmaIdle)
1218 goto exit;
1219
1220 rxState = rxAdvance;
1221 break;
1222
1223 case rxDescRead:
1224 if (rxDmaState != dmaIdle)
1225 goto exit;
1226
1227 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1228 regs.rxdp & 0x3fffffff);
1229 DPRINTF(EthernetDesc,
1230 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1231 link, bufptr, cmdsts, extsts);
1232
1233 if (cmdsts & CMDSTS_OWN) {
1234 devIntrPost(ISR_RXIDLE);
1235 rxState = rxIdle;
1236 goto exit;
1237 } else {
1238 rxState = rxFifoBlock;
1239 rxFragPtr = bufptr;
1240 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1241 }
1242 break;
1243
1244 case rxFifoBlock:
1245 if (!rxPacket) {
1246 /**
1247 * @todo in reality, we should be able to start processing
1248 * the packet as it arrives, and not have to wait for the
1249 * full packet ot be in the receive fifo.
1250 */
1251 if (rxFifo.empty())
1252 goto exit;
1253
1254 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1255
1256 // If we don't have a packet, grab a new one from the fifo.
1257 rxPacket = rxFifo.front();
1258 rxPktBytes = rxPacket->length;
1259 rxPacketBufPtr = rxPacket->data;
1260
1261#if TRACING_ON
1262 if (DTRACE(Ethernet)) {
1263 IpPtr ip(rxPacket);
1264 if (ip) {
1265 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1266 TcpPtr tcp(ip);
1267 if (tcp) {
1268 DPRINTF(Ethernet,
1269 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1270 tcp->sport(), tcp->dport(), tcp->seq(),
1271 tcp->ack());
1272 }
1273 }
1274 }
1275#endif
1276
1277 // sanity check - i think the driver behaves like this
1278 assert(rxDescCnt >= rxPktBytes);
1279 rxFifo.pop();
1280 }
1281
1282
1283 // dont' need the && rxDescCnt > 0 if driver sanity check
1284 // above holds
1285 if (rxPktBytes > 0) {
1286 rxState = rxFragWrite;
1287 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1288 // check holds
1289 rxXferLen = rxPktBytes;
1290
1291 rxDmaAddr = rxFragPtr & 0x3fffffff;
1292 rxDmaData = rxPacketBufPtr;
1293 rxDmaLen = rxXferLen;
1294 rxDmaFree = dmaDataFree;
1295
1296 if (doRxDmaWrite())
1297 goto exit;
1298
1299 } else {
1300 rxState = rxDescWrite;
1301
1302 //if (rxPktBytes == 0) { /* packet is done */
1303 assert(rxPktBytes == 0);
1304 DPRINTF(EthernetSM, "done with receiving packet\n");
1305
1306 cmdsts |= CMDSTS_OWN;
1307 cmdsts &= ~CMDSTS_MORE;
1308 cmdsts |= CMDSTS_OK;
1309 cmdsts &= 0xffff0000;
1310 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1311
1312#if 0
1313 /*
1314 * all the driver uses these are for its own stats keeping
1315 * which we don't care about, aren't necessary for
1316 * functionality and doing this would just slow us down.
1317 * if they end up using this in a later version for
1318 * functional purposes, just undef
1319 */
1320 if (rxFilterEnable) {
1321 cmdsts &= ~CMDSTS_DEST_MASK;
1322 const EthAddr &dst = rxFifoFront()->dst();
1323 if (dst->unicast())
1324 cmdsts |= CMDSTS_DEST_SELF;
1325 if (dst->multicast())
1326 cmdsts |= CMDSTS_DEST_MULTI;
1327 if (dst->broadcast())
1328 cmdsts |= CMDSTS_DEST_MASK;
1329 }
1330#endif
1331
1332 IpPtr ip(rxPacket);
1333 if (extstsEnable && ip) {
1334 extsts |= EXTSTS_IPPKT;
1335 rxIpChecksums++;
1336 if (cksum(ip) != 0) {
1337 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1338 extsts |= EXTSTS_IPERR;
1339 }
1340 TcpPtr tcp(ip);
1341 UdpPtr udp(ip);
1342 if (tcp) {
1343 extsts |= EXTSTS_TCPPKT;
1344 rxTcpChecksums++;
1345 if (cksum(tcp) != 0) {
1346 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1347 extsts |= EXTSTS_TCPERR;
1348
1349 }
1350 } else if (udp) {
1351 extsts |= EXTSTS_UDPPKT;
1352 rxUdpChecksums++;
1353 if (cksum(udp) != 0) {
1354 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1355 extsts |= EXTSTS_UDPERR;
1356 }
1357 }
1358 }
1359 rxPacket = 0;
1360
1361 /*
1362 * the driver seems to always receive into desc buffers
1363 * of size 1514, so you never have a pkt that is split
1364 * into multiple descriptors on the receive side, so
1365 * i don't implement that case, hence the assert above.
1366 */
1367
1368 DPRINTF(EthernetDesc,
1369 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1370 regs.rxdp & 0x3fffffff);
1371 DPRINTF(EthernetDesc,
1372 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1373 link, bufptr, cmdsts, extsts);
1374
1375 rxDmaAddr = regs.rxdp & 0x3fffffff;
1376 rxDmaData = &cmdsts;
1377 if (is64bit) {
1378 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1379 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1380 } else {
1381 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1382 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1383 }
1384 rxDmaFree = dmaDescFree;
1385
1386 descDmaWrites++;
1387 descDmaWrBytes += rxDmaLen;
1388
1389 if (doRxDmaWrite())
1390 goto exit;
1391 }
1392 break;
1393
1394 case rxFragWrite:
1395 if (rxDmaState != dmaIdle)
1396 goto exit;
1397
1398 rxPacketBufPtr += rxXferLen;
1399 rxFragPtr += rxXferLen;
1400 rxPktBytes -= rxXferLen;
1401
1402 rxState = rxFifoBlock;
1403 break;
1404
1405 case rxDescWrite:
1406 if (rxDmaState != dmaIdle)
1407 goto exit;
1408
1409 assert(cmdsts & CMDSTS_OWN);
1410
1411 assert(rxPacket == 0);
1412 devIntrPost(ISR_RXOK);
1413
1414 if (cmdsts & CMDSTS_INTR)
1415 devIntrPost(ISR_RXDESC);
1416
1417 if (!rxEnable) {
1418 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1419 rxState = rxIdle;
1420 goto exit;
1421 } else
1422 rxState = rxAdvance;
1423 break;
1424
1425 case rxAdvance:
1426 if (link == 0) {
1427 devIntrPost(ISR_RXIDLE);
1428 rxState = rxIdle;
1429 CRDD = true;
1430 goto exit;
1431 } else {
1432 if (rxDmaState != dmaIdle)
1433 goto exit;
1434 rxState = rxDescRead;
1435 regs.rxdp = link;
1436 CRDD = false;
1437
1438 rxDmaAddr = regs.rxdp & 0x3fffffff;
1439 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1440 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1441 rxDmaFree = dmaDescFree;
1442
1443 if (doRxDmaRead())
1444 goto exit;
1445 }
1446 break;
1447
1448 default:
1449 panic("Invalid rxState!");
1450 }
1451
1452 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1453 NsRxStateStrings[rxState]);
1454 goto next;
1455
1456 exit:
1457 /**
1458 * @todo do we want to schedule a future kick?
1459 */
1460 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1461 NsRxStateStrings[rxState]);
1462
1463 if (!rxKickEvent.scheduled())
1464 schedule(rxKickEvent, rxKickTick);
1465}
1466
1467void
1468NSGigE::transmit()
1469{
1470 if (txFifo.empty()) {
1471 DPRINTF(Ethernet, "nothing to transmit\n");
1472 return;
1473 }
1474
1475 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1476 txFifo.size());
1477 if (interface->sendPacket(txFifo.front())) {
1478#if TRACING_ON
1479 if (DTRACE(Ethernet)) {
1480 IpPtr ip(txFifo.front());
1481 if (ip) {
1482 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1483 TcpPtr tcp(ip);
1484 if (tcp) {
1485 DPRINTF(Ethernet,
1486 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1487 tcp->sport(), tcp->dport(), tcp->seq(),
1488 tcp->ack());
1489 }
1490 }
1491 }
1492#endif
1493
1494 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1495 txBytes += txFifo.front()->length;
1496 txPackets++;
1497
1498 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1499 txFifo.avail());
1500 txFifo.pop();
1501
1502 /*
1503 * normally do a writeback of the descriptor here, and ONLY
1504 * after that is done, send this interrupt. but since our
1505 * stuff never actually fails, just do this interrupt here,
1506 * otherwise the code has to stray from this nice format.
1507 * besides, it's functionally the same.
1508 */
1509 devIntrPost(ISR_TXOK);
1510 }
1511
1512 if (!txFifo.empty() && !txEvent.scheduled()) {
1513 DPRINTF(Ethernet, "reschedule transmit\n");
1514 schedule(txEvent, curTick() + retryTime);
1515 }
1516}
1517
1518bool
1519NSGigE::doTxDmaRead()
1520{
1521 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1522 txDmaState = dmaReading;
1523
1524 if (dmaPending() || drainState() != DrainState::Running)
1525 txDmaState = dmaReadWaiting;
1526 else
1527 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1528
1529 return true;
1530}
1531
1532void
1533NSGigE::txDmaReadDone()
1534{
1535 assert(txDmaState == dmaReading);
1536 txDmaState = dmaIdle;
1537
1538 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1539 txDmaAddr, txDmaLen);
1540 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1541
1542 // If the receive state machine has a pending DMA, let it go first
1543 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1544 rxKick();
1545
1546 txKick();
1547}
1548
1549bool
1550NSGigE::doTxDmaWrite()
1551{
1552 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1553 txDmaState = dmaWriting;
1554
1555 if (dmaPending() || drainState() != DrainState::Running)
1556 txDmaState = dmaWriteWaiting;
1557 else
1558 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1559 return true;
1560}
1561
1562void
1563NSGigE::txDmaWriteDone()
1564{
1565 assert(txDmaState == dmaWriting);
1566 txDmaState = dmaIdle;
1567
1568 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1569 txDmaAddr, txDmaLen);
1570 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1571
1572 // If the receive state machine has a pending DMA, let it go first
1573 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1574 rxKick();
1575
1576 txKick();
1577}
1578
1579void
1580NSGigE::txKick()
1581{
1582 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1583
1584 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1585 NsTxStateStrings[txState], is64bit ? 64 : 32);
1586
1587 Addr link, bufptr;
1588 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1589 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1590
1591 next:
1592 if (txKickTick > curTick()) {
1593 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1594 txKickTick);
1595 goto exit;
1596 }
1597
1598 // Go to the next state machine clock tick.
1599 txKickTick = clockEdge(Cycles(1));
1600
1601 switch(txDmaState) {
1602 case dmaReadWaiting:
1603 if (doTxDmaRead())
1604 goto exit;
1605 break;
1606 case dmaWriteWaiting:
1607 if (doTxDmaWrite())
1608 goto exit;
1609 break;
1610 default:
1611 break;
1612 }
1613
1614 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1615 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1616 switch (txState) {
1617 case txIdle:
1618 if (!txEnable) {
1619 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1620 goto exit;
1621 }
1622
1623 if (CTDD) {
1624 txState = txDescRefr;
1625
1626 txDmaAddr = regs.txdp & 0x3fffffff;
1627 txDmaData =
1628 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1629 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1630 txDmaFree = dmaDescFree;
1631
1632 descDmaReads++;
1633 descDmaRdBytes += txDmaLen;
1634
1635 if (doTxDmaRead())
1636 goto exit;
1637
1638 } else {
1639 txState = txDescRead;
1640
1641 txDmaAddr = regs.txdp & 0x3fffffff;
1642 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1643 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1644 txDmaFree = dmaDescFree;
1645
1646 descDmaReads++;
1647 descDmaRdBytes += txDmaLen;
1648
1649 if (doTxDmaRead())
1650 goto exit;
1651 }
1652 break;
1653
1654 case txDescRefr:
1655 if (txDmaState != dmaIdle)
1656 goto exit;
1657
1658 txState = txAdvance;
1659 break;
1660
1661 case txDescRead:
1662 if (txDmaState != dmaIdle)
1663 goto exit;
1664
1665 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1666 regs.txdp & 0x3fffffff);
1667 DPRINTF(EthernetDesc,
1668 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1669 link, bufptr, cmdsts, extsts);
1670
1671 if (cmdsts & CMDSTS_OWN) {
1672 txState = txFifoBlock;
1673 txFragPtr = bufptr;
1674 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1675 } else {
1676 devIntrPost(ISR_TXIDLE);
1677 txState = txIdle;
1678 goto exit;
1679 }
1680 break;
1681
1682 case txFifoBlock:
1683 if (!txPacket) {
1684 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1685 txPacket = make_shared<EthPacketData>(16384);
1686 txPacketBufPtr = txPacket->data;
1687 }
1688
1689 if (txDescCnt == 0) {
1690 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1691 if (cmdsts & CMDSTS_MORE) {
1692 DPRINTF(EthernetSM, "there are more descriptors to come\n");
1693 txState = txDescWrite;
1694
1695 cmdsts &= ~CMDSTS_OWN;
1696
1697 txDmaAddr = regs.txdp & 0x3fffffff;
1698 txDmaData = &cmdsts;
1699 if (is64bit) {
1700 txDmaAddr += offsetof(ns_desc64, cmdsts);
1701 txDmaLen = sizeof(txDesc64.cmdsts);
1702 } else {
1703 txDmaAddr += offsetof(ns_desc32, cmdsts);
1704 txDmaLen = sizeof(txDesc32.cmdsts);
1705 }
1706 txDmaFree = dmaDescFree;
1707
1708 if (doTxDmaWrite())
1709 goto exit;
1710
1711 } else { /* this packet is totally done */
1712 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
1713 /* deal with the the packet that just finished */
1714 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
1715 IpPtr ip(txPacket);
1716 if (extsts & EXTSTS_UDPPKT) {
1717 UdpPtr udp(ip);
1718 if (udp) {
1719 udp->sum(0);
1720 udp->sum(cksum(udp));
1721 txUdpChecksums++;
1722 } else {
1723 Debug::breakpoint();
1724 warn_once("UDPPKT set, but not UDP!\n");
1725 }
1726 } else if (extsts & EXTSTS_TCPPKT) {
1727 TcpPtr tcp(ip);
1728 if (tcp) {
1729 tcp->sum(0);
1730 tcp->sum(cksum(tcp));
1731 txTcpChecksums++;
1732 } else {
1733 warn_once("TCPPKT set, but not UDP!\n");
1734 }
1735 }
1736 if (extsts & EXTSTS_IPPKT) {
1737 if (ip) {
1738 ip->sum(0);
1739 ip->sum(cksum(ip));
1740 txIpChecksums++;
1741 } else {
1742 warn_once("IPPKT set, but not UDP!\n");
1743 }
1744 }
1745 }
1746
1747 txPacket->simLength = txPacketBufPtr - txPacket->data;
1748 txPacket->length = txPacketBufPtr - txPacket->data;
1749 // this is just because the receive can't handle a
1750 // packet bigger want to make sure
1751 if (txPacket->length > 1514)
1752 panic("transmit packet too large, %s > 1514\n",
1753 txPacket->length);
1754
1755#ifndef NDEBUG
1756 bool success =
1757#endif
1758 txFifo.push(txPacket);
1759 assert(success);
1760
1761 /*
1762 * this following section is not tqo spec, but
1763 * functionally shouldn't be any different. normally,
1764 * the chip will wait til the transmit has occurred
1765 * before writing back the descriptor because it has
1766 * to wait to see that it was successfully transmitted
1767 * to decide whether to set CMDSTS_OK or not.
1768 * however, in the simulator since it is always
1769 * successfully transmitted, and writing it exactly to
1770 * spec would complicate the code, we just do it here
1771 */
1772
1773 cmdsts &= ~CMDSTS_OWN;
1774 cmdsts |= CMDSTS_OK;
1775
1776 DPRINTF(EthernetDesc,
1777 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
1778 cmdsts, extsts);
1779
1780 txDmaFree = dmaDescFree;
1781 txDmaAddr = regs.txdp & 0x3fffffff;
1782 txDmaData = &cmdsts;
1783 if (is64bit) {
1784 txDmaAddr += offsetof(ns_desc64, cmdsts);
1785 txDmaLen =
1786 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
1787 } else {
1788 txDmaAddr += offsetof(ns_desc32, cmdsts);
1789 txDmaLen =
1790 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
1791 }
1792
1793 descDmaWrites++;
1794 descDmaWrBytes += txDmaLen;
1795
1796 transmit();
1797 txPacket = 0;
1798
1799 if (!txEnable) {
1800 DPRINTF(EthernetSM, "halting TX state machine\n");
1801 txState = txIdle;
1802 goto exit;
1803 } else
1804 txState = txAdvance;
1805
1806 if (doTxDmaWrite())
1807 goto exit;
1808 }
1809 } else {
1810 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
1811 if (!txFifo.full()) {
1812 txState = txFragRead;
1813
1814 /*
1815 * The number of bytes transferred is either whatever
1816 * is left in the descriptor (txDescCnt), or if there
1817 * is not enough room in the fifo, just whatever room
1818 * is left in the fifo
1819 */
1820 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
1821
1822 txDmaAddr = txFragPtr & 0x3fffffff;
1823 txDmaData = txPacketBufPtr;
1824 txDmaLen = txXferLen;
1825 txDmaFree = dmaDataFree;
1826
1827 if (doTxDmaRead())
1828 goto exit;
1829 } else {
1830 txState = txFifoBlock;
1831 transmit();
1832
1833 goto exit;
1834 }
1835
1836 }
1837 break;
1838
1839 case txFragRead:
1840 if (txDmaState != dmaIdle)
1841 goto exit;
1842
1843 txPacketBufPtr += txXferLen;
1844 txFragPtr += txXferLen;
1845 txDescCnt -= txXferLen;
1846 txFifo.reserve(txXferLen);
1847
1848 txState = txFifoBlock;
1849 break;
1850
1851 case txDescWrite:
1852 if (txDmaState != dmaIdle)
1853 goto exit;
1854
1855 if (cmdsts & CMDSTS_INTR)
1856 devIntrPost(ISR_TXDESC);
1857
1858 if (!txEnable) {
1859 DPRINTF(EthernetSM, "halting TX state machine\n");
1860 txState = txIdle;
1861 goto exit;
1862 } else
1863 txState = txAdvance;
1864 break;
1865
1866 case txAdvance:
1867 if (link == 0) {
1868 devIntrPost(ISR_TXIDLE);
1869 txState = txIdle;
1870 goto exit;
1871 } else {
1872 if (txDmaState != dmaIdle)
1873 goto exit;
1874 txState = txDescRead;
1875 regs.txdp = link;
1876 CTDD = false;
1877
1878 txDmaAddr = link & 0x3fffffff;
1879 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1880 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1881 txDmaFree = dmaDescFree;
1882
1883 if (doTxDmaRead())
1884 goto exit;
1885 }
1886 break;
1887
1888 default:
1889 panic("invalid state");
1890 }
1891
1892 DPRINTF(EthernetSM, "entering next txState=%s\n",
1893 NsTxStateStrings[txState]);
1894 goto next;
1895
1896 exit:
1897 /**
1898 * @todo do we want to schedule a future kick?
1899 */
1900 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1901 NsTxStateStrings[txState]);
1902
1903 if (!txKickEvent.scheduled())
1904 schedule(txKickEvent, txKickTick);
1905}
1906
1907/**
1908 * Advance the EEPROM state machine
1909 * Called on rising edge of EEPROM clock bit in MEAR
1910 */
1911void
1912NSGigE::eepromKick()
1913{
1914 switch (eepromState) {
1915
1916 case eepromStart:
1917
1918 // Wait for start bit
1919 if (regs.mear & MEAR_EEDI) {
1920 // Set up to get 2 opcode bits
1921 eepromState = eepromGetOpcode;
1922 eepromBitsToRx = 2;
1923 eepromOpcode = 0;
1924 }
1925 break;
1926
1927 case eepromGetOpcode:
1928 eepromOpcode <<= 1;
1929 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
1930 --eepromBitsToRx;
1931
1932 // Done getting opcode
1933 if (eepromBitsToRx == 0) {
1934 if (eepromOpcode != EEPROM_READ)
1935 panic("only EEPROM reads are implemented!");
1936
1937 // Set up to get address
1938 eepromState = eepromGetAddress;
1939 eepromBitsToRx = 6;
1940 eepromAddress = 0;
1941 }
1942 break;
1943
1944 case eepromGetAddress:
1945 eepromAddress <<= 1;
1946 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
1947 --eepromBitsToRx;
1948
1949 // Done getting address
1950 if (eepromBitsToRx == 0) {
1951
1952 if (eepromAddress >= EEPROM_SIZE)
1953 panic("EEPROM read access out of range!");
1954
1955 switch (eepromAddress) {
1956
1957 case EEPROM_PMATCH2_ADDR:
1958 eepromData = rom.perfectMatch[5];
1959 eepromData <<= 8;
1960 eepromData += rom.perfectMatch[4];
1961 break;
1962
1963 case EEPROM_PMATCH1_ADDR:
1964 eepromData = rom.perfectMatch[3];
1965 eepromData <<= 8;
1966 eepromData += rom.perfectMatch[2];
1967 break;
1968
1969 case EEPROM_PMATCH0_ADDR:
1970 eepromData = rom.perfectMatch[1];
1971 eepromData <<= 8;
1972 eepromData += rom.perfectMatch[0];
1973 break;
1974
1975 default:
1976 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
1977 }
1978 // Set up to read data
1979 eepromState = eepromRead;
1980 eepromBitsToRx = 16;
1981
1982 // Clear data in bit
1983 regs.mear &= ~MEAR_EEDI;
1984 }
1985 break;
1986
1987 case eepromRead:
1988 // Clear Data Out bit
1989 regs.mear &= ~MEAR_EEDO;
1990 // Set bit to value of current EEPROM bit
1991 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
1992
1993 eepromData <<= 1;
1994 --eepromBitsToRx;
1995
1996 // All done
1997 if (eepromBitsToRx == 0) {
1998 eepromState = eepromStart;
1999 }
2000 break;
2001
2002 default:
2003 panic("invalid EEPROM state");
2004 }
2005
2006}
2007
2008void
2009NSGigE::transferDone()
2010{
2011 if (txFifo.empty()) {
2012 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2013 return;
2014 }
2015
2016 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2017
2018 reschedule(txEvent, clockEdge(Cycles(1)), true);
2019}
2020
2021bool
2022NSGigE::rxFilter(const EthPacketPtr &packet)
2023{
2024 EthPtr eth = packet;
2025 bool drop = true;
2026 string type;
2027
2028 const EthAddr &dst = eth->dst();
2029 if (dst.unicast()) {
2030 // If we're accepting all unicast addresses
2031 if (acceptUnicast)
2032 drop = false;
2033
2034 // If we make a perfect match
2035 if (acceptPerfect && dst == rom.perfectMatch)
2036 drop = false;
2037
2038 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2039 drop = false;
2040
2041 } else if (dst.broadcast()) {
2042 // if we're accepting broadcasts
2043 if (acceptBroadcast)
2044 drop = false;
2045
2046 } else if (dst.multicast()) {
2047 // if we're accepting all multicasts
2048 if (acceptMulticast)
2049 drop = false;
2050
2051 // Multicast hashing faked - all packets accepted
2052 if (multicastHashEnable)
2053 drop = false;
2054 }
2055
2056 if (drop) {
2057 DPRINTF(Ethernet, "rxFilter drop\n");
2058 DDUMP(EthernetData, packet->data, packet->length);
2059 }
2060
2061 return drop;
2062}
2063
2064bool
2065NSGigE::recvPacket(EthPacketPtr packet)
2066{
2067 rxBytes += packet->length;
2068 rxPackets++;
2069
2070 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2071 rxFifo.avail());
2072
2073 if (!rxEnable) {
2074 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2075 return true;
2076 }
2077
2078 if (!rxFilterEnable) {
2079 DPRINTF(Ethernet,
2080 "receive packet filtering disabled . . . packet dropped\n");
2081 return true;
2082 }
2083
2084 if (rxFilter(packet)) {
2085 DPRINTF(Ethernet, "packet filtered...dropped\n");
2086 return true;
2087 }
2088
2089 if (rxFifo.avail() < packet->length) {
2090#if TRACING_ON
2091 IpPtr ip(packet);
2092 TcpPtr tcp(ip);
2093 if (ip) {
2094 DPRINTF(Ethernet,
2095 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2096 ip->id());
2097 if (tcp) {
2098 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2099 }
2100 }
2101#endif
2102 droppedPackets++;
2103 devIntrPost(ISR_RXORN);
2104 return false;
2105 }
2106
2107 rxFifo.push(packet);
2108
2109 rxKick();
2110 return true;
2111}
2112
2113
2114void
2115NSGigE::drainResume()
2116{
2117 Drainable::drainResume();
2118
2119 // During drain we could have left the state machines in a waiting state and
2120 // they wouldn't get out until some other event occured to kick them.
2121 // This way they'll get out immediately
2122 txKick();
2123 rxKick();
2124}
2125
2126
2127//=====================================================================
2128//
2129//
2130void
2131NSGigE::serialize(CheckpointOut &cp) const
2132{
2133 // Serialize the PciDevice base class
2134 PciDevice::serialize(cp);
2135
2136 /*
2137 * Finalize any DMA events now.
2138 */
2139 // @todo will mem system save pending dma?
2140
2141 /*
2142 * Serialize the device registers
2143 */
2144 SERIALIZE_SCALAR(regs.command);
2145 SERIALIZE_SCALAR(regs.config);
2146 SERIALIZE_SCALAR(regs.mear);
2147 SERIALIZE_SCALAR(regs.ptscr);
2148 SERIALIZE_SCALAR(regs.isr);
2149 SERIALIZE_SCALAR(regs.imr);
2150 SERIALIZE_SCALAR(regs.ier);
2151 SERIALIZE_SCALAR(regs.ihr);
2152 SERIALIZE_SCALAR(regs.txdp);
2153 SERIALIZE_SCALAR(regs.txdp_hi);
2154 SERIALIZE_SCALAR(regs.txcfg);
2155 SERIALIZE_SCALAR(regs.gpior);
2156 SERIALIZE_SCALAR(regs.rxdp);
2157 SERIALIZE_SCALAR(regs.rxdp_hi);
2158 SERIALIZE_SCALAR(regs.rxcfg);
2159 SERIALIZE_SCALAR(regs.pqcr);
2160 SERIALIZE_SCALAR(regs.wcsr);
2161 SERIALIZE_SCALAR(regs.pcr);
2162 SERIALIZE_SCALAR(regs.rfcr);
2163 SERIALIZE_SCALAR(regs.rfdr);
2164 SERIALIZE_SCALAR(regs.brar);
2165 SERIALIZE_SCALAR(regs.brdr);
2166 SERIALIZE_SCALAR(regs.srr);
2167 SERIALIZE_SCALAR(regs.mibc);
2168 SERIALIZE_SCALAR(regs.vrcr);
2169 SERIALIZE_SCALAR(regs.vtcr);
2170 SERIALIZE_SCALAR(regs.vdr);
2171 SERIALIZE_SCALAR(regs.ccsr);
2172 SERIALIZE_SCALAR(regs.tbicr);
2173 SERIALIZE_SCALAR(regs.tbisr);
2174 SERIALIZE_SCALAR(regs.tanar);
2175 SERIALIZE_SCALAR(regs.tanlpar);
2176 SERIALIZE_SCALAR(regs.taner);
2177 SERIALIZE_SCALAR(regs.tesr);
2178
2179 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2180 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2181
2182 SERIALIZE_SCALAR(ioEnable);
2183
2184 /*
2185 * Serialize the data Fifos
2186 */
2187 rxFifo.serialize("rxFifo", cp);
2188 txFifo.serialize("txFifo", cp);
2189
2190 /*
2191 * Serialize the various helper variables
2192 */
2193 bool txPacketExists = txPacket != nullptr;
2194 SERIALIZE_SCALAR(txPacketExists);
2195 if (txPacketExists) {
2196 txPacket->simLength = txPacketBufPtr - txPacket->data;
2197 txPacket->length = txPacketBufPtr - txPacket->data;
2198 txPacket->serialize("txPacket", cp);
2199 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2200 SERIALIZE_SCALAR(txPktBufPtr);
2201 }
2202
2203 bool rxPacketExists = rxPacket != nullptr;
2204 SERIALIZE_SCALAR(rxPacketExists);
2205 if (rxPacketExists) {
2206 rxPacket->serialize("rxPacket", cp);
2207 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2208 SERIALIZE_SCALAR(rxPktBufPtr);
2209 }
2210
2211 SERIALIZE_SCALAR(txXferLen);
2212 SERIALIZE_SCALAR(rxXferLen);
2213
2214 /*
2215 * Serialize Cached Descriptors
2216 */
2217 SERIALIZE_SCALAR(rxDesc64.link);
2218 SERIALIZE_SCALAR(rxDesc64.bufptr);
2219 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2220 SERIALIZE_SCALAR(rxDesc64.extsts);
2221 SERIALIZE_SCALAR(txDesc64.link);
2222 SERIALIZE_SCALAR(txDesc64.bufptr);
2223 SERIALIZE_SCALAR(txDesc64.cmdsts);
2224 SERIALIZE_SCALAR(txDesc64.extsts);
2225 SERIALIZE_SCALAR(rxDesc32.link);
2226 SERIALIZE_SCALAR(rxDesc32.bufptr);
2227 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2228 SERIALIZE_SCALAR(rxDesc32.extsts);
2229 SERIALIZE_SCALAR(txDesc32.link);
2230 SERIALIZE_SCALAR(txDesc32.bufptr);
2231 SERIALIZE_SCALAR(txDesc32.cmdsts);
2232 SERIALIZE_SCALAR(txDesc32.extsts);
2233 SERIALIZE_SCALAR(extstsEnable);
2234
2235 /*
2236 * Serialize tx state machine
2237 */
2238 int txState = this->txState;
2239 SERIALIZE_SCALAR(txState);
2240 SERIALIZE_SCALAR(txEnable);
2241 SERIALIZE_SCALAR(CTDD);
2242 SERIALIZE_SCALAR(txFragPtr);
2243 SERIALIZE_SCALAR(txDescCnt);
2244 int txDmaState = this->txDmaState;
2245 SERIALIZE_SCALAR(txDmaState);
2246 SERIALIZE_SCALAR(txKickTick);
2247
2248 /*
2249 * Serialize rx state machine
2250 */
2251 int rxState = this->rxState;
2252 SERIALIZE_SCALAR(rxState);
2253 SERIALIZE_SCALAR(rxEnable);
2254 SERIALIZE_SCALAR(CRDD);
2255 SERIALIZE_SCALAR(rxPktBytes);
2256 SERIALIZE_SCALAR(rxFragPtr);
2257 SERIALIZE_SCALAR(rxDescCnt);
2258 int rxDmaState = this->rxDmaState;
2259 SERIALIZE_SCALAR(rxDmaState);
2260 SERIALIZE_SCALAR(rxKickTick);
2261
2262 /*
2263 * Serialize EEPROM state machine
2264 */
2265 int eepromState = this->eepromState;
2266 SERIALIZE_SCALAR(eepromState);
2267 SERIALIZE_SCALAR(eepromClk);
2268 SERIALIZE_SCALAR(eepromBitsToRx);
2269 SERIALIZE_SCALAR(eepromOpcode);
2270 SERIALIZE_SCALAR(eepromAddress);
2271 SERIALIZE_SCALAR(eepromData);
2272
2273 /*
2274 * If there's a pending transmit, store the time so we can
2275 * reschedule it later
2276 */
2277 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
2278 SERIALIZE_SCALAR(transmitTick);
2279
2280 /*
2281 * receive address filter settings
2282 */
2283 SERIALIZE_SCALAR(rxFilterEnable);
2284 SERIALIZE_SCALAR(acceptBroadcast);
2285 SERIALIZE_SCALAR(acceptMulticast);
2286 SERIALIZE_SCALAR(acceptUnicast);
2287 SERIALIZE_SCALAR(acceptPerfect);
2288 SERIALIZE_SCALAR(acceptArp);
2289 SERIALIZE_SCALAR(multicastHashEnable);
2290
2291 /*
2292 * Keep track of pending interrupt status.
2293 */
2294 SERIALIZE_SCALAR(intrTick);
2295 SERIALIZE_SCALAR(cpuPendingIntr);
2296 Tick intrEventTick = 0;
2297 if (intrEvent)
2298 intrEventTick = intrEvent->when();
2299 SERIALIZE_SCALAR(intrEventTick);
2300
2301}
2302
2303void
2304NSGigE::unserialize(CheckpointIn &cp)
2305{
2306 // Unserialize the PciDevice base class
2307 PciDevice::unserialize(cp);
2308
2309 UNSERIALIZE_SCALAR(regs.command);
2310 UNSERIALIZE_SCALAR(regs.config);
2311 UNSERIALIZE_SCALAR(regs.mear);
2312 UNSERIALIZE_SCALAR(regs.ptscr);
2313 UNSERIALIZE_SCALAR(regs.isr);
2314 UNSERIALIZE_SCALAR(regs.imr);
2315 UNSERIALIZE_SCALAR(regs.ier);
2316 UNSERIALIZE_SCALAR(regs.ihr);
2317 UNSERIALIZE_SCALAR(regs.txdp);
2318 UNSERIALIZE_SCALAR(regs.txdp_hi);
2319 UNSERIALIZE_SCALAR(regs.txcfg);
2320 UNSERIALIZE_SCALAR(regs.gpior);
2321 UNSERIALIZE_SCALAR(regs.rxdp);
2322 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2323 UNSERIALIZE_SCALAR(regs.rxcfg);
2324 UNSERIALIZE_SCALAR(regs.pqcr);
2325 UNSERIALIZE_SCALAR(regs.wcsr);
2326 UNSERIALIZE_SCALAR(regs.pcr);
2327 UNSERIALIZE_SCALAR(regs.rfcr);
2328 UNSERIALIZE_SCALAR(regs.rfdr);
2329 UNSERIALIZE_SCALAR(regs.brar);
2330 UNSERIALIZE_SCALAR(regs.brdr);
2331 UNSERIALIZE_SCALAR(regs.srr);
2332 UNSERIALIZE_SCALAR(regs.mibc);
2333 UNSERIALIZE_SCALAR(regs.vrcr);
2334 UNSERIALIZE_SCALAR(regs.vtcr);
2335 UNSERIALIZE_SCALAR(regs.vdr);
2336 UNSERIALIZE_SCALAR(regs.ccsr);
2337 UNSERIALIZE_SCALAR(regs.tbicr);
2338 UNSERIALIZE_SCALAR(regs.tbisr);
2339 UNSERIALIZE_SCALAR(regs.tanar);
2340 UNSERIALIZE_SCALAR(regs.tanlpar);
2341 UNSERIALIZE_SCALAR(regs.taner);
2342 UNSERIALIZE_SCALAR(regs.tesr);
2343
2344 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2345 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2346
2347 UNSERIALIZE_SCALAR(ioEnable);
2348
2349 /*
2350 * unserialize the data fifos
2351 */
2352 rxFifo.unserialize("rxFifo", cp);
2353 txFifo.unserialize("txFifo", cp);
2354
2355 /*
2356 * unserialize the various helper variables
2357 */
2358 bool txPacketExists;
2359 UNSERIALIZE_SCALAR(txPacketExists);
2360 if (txPacketExists) {
2361 txPacket = make_shared<EthPacketData>(16384);
2362 txPacket->unserialize("txPacket", cp);
2363 uint32_t txPktBufPtr;
2364 UNSERIALIZE_SCALAR(txPktBufPtr);
2365 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2366 } else
2367 txPacket = 0;
2368
2369 bool rxPacketExists;
2370 UNSERIALIZE_SCALAR(rxPacketExists);
2371 rxPacket = 0;
2372 if (rxPacketExists) {
2373 rxPacket = make_shared<EthPacketData>();
2374 rxPacket->unserialize("rxPacket", cp);
2375 uint32_t rxPktBufPtr;
2376 UNSERIALIZE_SCALAR(rxPktBufPtr);
2377 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2378 } else
2379 rxPacket = 0;
2380
2381 UNSERIALIZE_SCALAR(txXferLen);
2382 UNSERIALIZE_SCALAR(rxXferLen);
2383
2384 /*
2385 * Unserialize Cached Descriptors
2386 */
2387 UNSERIALIZE_SCALAR(rxDesc64.link);
2388 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2389 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2390 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2391 UNSERIALIZE_SCALAR(txDesc64.link);
2392 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2393 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2394 UNSERIALIZE_SCALAR(txDesc64.extsts);
2395 UNSERIALIZE_SCALAR(rxDesc32.link);
2396 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2397 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2398 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2399 UNSERIALIZE_SCALAR(txDesc32.link);
2400 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2401 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2402 UNSERIALIZE_SCALAR(txDesc32.extsts);
2403 UNSERIALIZE_SCALAR(extstsEnable);
2404
2405 /*
2406 * unserialize tx state machine
2407 */
2408 int txState;
2409 UNSERIALIZE_SCALAR(txState);
2410 this->txState = (TxState) txState;
2411 UNSERIALIZE_SCALAR(txEnable);
2412 UNSERIALIZE_SCALAR(CTDD);
2413 UNSERIALIZE_SCALAR(txFragPtr);
2414 UNSERIALIZE_SCALAR(txDescCnt);
2415 int txDmaState;
2416 UNSERIALIZE_SCALAR(txDmaState);
2417 this->txDmaState = (DmaState) txDmaState;
2418 UNSERIALIZE_SCALAR(txKickTick);
2419 if (txKickTick)
2420 schedule(txKickEvent, txKickTick);
2421
2422 /*
2423 * unserialize rx state machine
2424 */
2425 int rxState;
2426 UNSERIALIZE_SCALAR(rxState);
2427 this->rxState = (RxState) rxState;
2428 UNSERIALIZE_SCALAR(rxEnable);
2429 UNSERIALIZE_SCALAR(CRDD);
2430 UNSERIALIZE_SCALAR(rxPktBytes);
2431 UNSERIALIZE_SCALAR(rxFragPtr);
2432 UNSERIALIZE_SCALAR(rxDescCnt);
2433 int rxDmaState;
2434 UNSERIALIZE_SCALAR(rxDmaState);
2435 this->rxDmaState = (DmaState) rxDmaState;
2436 UNSERIALIZE_SCALAR(rxKickTick);
2437 if (rxKickTick)
2438 schedule(rxKickEvent, rxKickTick);
2439
2440 /*
2441 * Unserialize EEPROM state machine
2442 */
2443 int eepromState;
2444 UNSERIALIZE_SCALAR(eepromState);
2445 this->eepromState = (EEPROMState) eepromState;
2446 UNSERIALIZE_SCALAR(eepromClk);
2447 UNSERIALIZE_SCALAR(eepromBitsToRx);
2448 UNSERIALIZE_SCALAR(eepromOpcode);
2449 UNSERIALIZE_SCALAR(eepromAddress);
2450 UNSERIALIZE_SCALAR(eepromData);
2451
2452 /*
2453 * If there's a pending transmit, reschedule it now
2454 */
2455 Tick transmitTick;
2456 UNSERIALIZE_SCALAR(transmitTick);
2457 if (transmitTick)
2458 schedule(txEvent, curTick() + transmitTick);
2459
2460 /*
2461 * unserialize receive address filter settings
2462 */
2463 UNSERIALIZE_SCALAR(rxFilterEnable);
2464 UNSERIALIZE_SCALAR(acceptBroadcast);
2465 UNSERIALIZE_SCALAR(acceptMulticast);
2466 UNSERIALIZE_SCALAR(acceptUnicast);
2467 UNSERIALIZE_SCALAR(acceptPerfect);
2468 UNSERIALIZE_SCALAR(acceptArp);
2469 UNSERIALIZE_SCALAR(multicastHashEnable);
2470
2471 /*
2472 * Keep track of pending interrupt status.
2473 */
2474 UNSERIALIZE_SCALAR(intrTick);
2475 UNSERIALIZE_SCALAR(cpuPendingIntr);
2476 Tick intrEventTick;
2477 UNSERIALIZE_SCALAR(intrEventTick);
2478 if (intrEventTick) {
2479 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
2480 name(), true);
2481 schedule(intrEvent, intrEventTick);
2482 }
2483}
2484
2485NSGigE *
2486NSGigEParams::create()
2487{
2488 return new NSGigE(this);
2489}