sinic.cc revision 2008
1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <cstdio> 30#include <deque> 31#include <string> 32 33#include "base/inet.hh" 34#include "cpu/exec_context.hh" 35#include "cpu/intr_control.hh" 36#include "dev/etherlink.hh" 37#include "dev/sinic.hh" 38#include "dev/pciconfigall.hh" 39#include "mem/bus/bus.hh" 40#include "mem/bus/dma_interface.hh" 41#include "mem/bus/pio_interface.hh" 42#include "mem/bus/pio_interface_impl.hh" 43#include "mem/functional/memory_control.hh" 44#include "mem/functional/physical.hh" 45#include "sim/builder.hh" 46#include "sim/debug.hh" 47#include "sim/eventq.hh" 48#include "sim/host.hh" 49#include "sim/stats.hh" 50#include "targetarch/vtophys.hh" 51 52using namespace Net; 53 54namespace Sinic { 55 56const char *RxStateStrings[] = 57{ 58 "rxIdle", 59 "rxFifoBlock", 60 "rxBeginCopy", 61 "rxCopy", 62 "rxCopyDone" 63}; 64 65const char *TxStateStrings[] = 66{ 67 "txIdle", 68 "txFifoBlock", 69 "txBeginCopy", 70 "txCopy", 71 "txCopyDone" 72}; 73 74 75/////////////////////////////////////////////////////////////////////// 76// 77// Sinic PCI Device 78// 79Base::Base(Params *p) 80 : PciDev(p), rxEnable(false), txEnable(false), clock(p->clock), 81 intrDelay(p->intr_delay), intrTick(0), cpuIntrEnable(false), 82 cpuPendingIntr(false), intrEvent(0), interface(NULL) 83{ 84} 85 86Device::Device(Params *p) 87 : Base(p), plat(p->plat), physmem(p->physmem), 88 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), 89 rxKickTick(0), txKickTick(0), 90 txEvent(this), rxDmaEvent(this), txDmaEvent(this), 91 dmaReadDelay(p->dma_read_delay), dmaReadFactor(p->dma_read_factor), 92 dmaWriteDelay(p->dma_write_delay), dmaWriteFactor(p->dma_write_factor) 93{ 94 reset(); 95 96 if (p->pio_bus) { 97 pioInterface = newPioInterface(p->name + ".pio", p->hier, p->pio_bus, 98 this, &Device::cacheAccess); 99 pioLatency = p->pio_latency * p->pio_bus->clockRate; 100 } 101 102 if (p->header_bus) { 103 if (p->payload_bus) 104 dmaInterface = new DMAInterface<Bus>(p->name + ".dma", 105 p->header_bus, 106 p->payload_bus, 1, 107 p->dma_no_allocate); 108 else 109 dmaInterface = new DMAInterface<Bus>(p->name + ".dma", 110 p->header_bus, 111 p->header_bus, 1, 112 p->dma_no_allocate); 113 } else if (p->payload_bus) 114 panic("must define a header bus if defining a payload bus"); 115 116 pioDelayWrite = p->pio_delay_write && pioInterface; 117} 118 119Device::~Device() 120{} 121 122void 123Device::regStats() 124{ 125 rxBytes 126 .name(name() + ".rxBytes") 127 .desc("Bytes Received") 128 .prereq(rxBytes) 129 ; 130 131 rxBandwidth 132 .name(name() + ".rxBandwidth") 133 .desc("Receive Bandwidth (bits/s)") 134 .precision(0) 135 .prereq(rxBytes) 136 ; 137 138 rxPackets 139 .name(name() + ".rxPackets") 140 .desc("Number of Packets Received") 141 .prereq(rxBytes) 142 ; 143 144 rxPacketRate 145 .name(name() + ".rxPPS") 146 .desc("Packet Reception Rate (packets/s)") 147 .precision(0) 148 .prereq(rxBytes) 149 ; 150 151 rxIpPackets 152 .name(name() + ".rxIpPackets") 153 .desc("Number of IP Packets Received") 154 .prereq(rxBytes) 155 ; 156 157 rxTcpPackets 158 .name(name() + ".rxTcpPackets") 159 .desc("Number of Packets Received") 160 .prereq(rxBytes) 161 ; 162 163 rxUdpPackets 164 .name(name() + ".rxUdpPackets") 165 .desc("Number of UDP Packets Received") 166 .prereq(rxBytes) 167 ; 168 169 rxIpChecksums 170 .name(name() + ".rxIpChecksums") 171 .desc("Number of rx IP Checksums done by device") 172 .precision(0) 173 .prereq(rxBytes) 174 ; 175 176 rxTcpChecksums 177 .name(name() + ".rxTcpChecksums") 178 .desc("Number of rx TCP Checksums done by device") 179 .precision(0) 180 .prereq(rxBytes) 181 ; 182 183 rxUdpChecksums 184 .name(name() + ".rxUdpChecksums") 185 .desc("Number of rx UDP Checksums done by device") 186 .precision(0) 187 .prereq(rxBytes) 188 ; 189 190 totBandwidth 191 .name(name() + ".totBandwidth") 192 .desc("Total Bandwidth (bits/s)") 193 .precision(0) 194 .prereq(totBytes) 195 ; 196 197 totPackets 198 .name(name() + ".totPackets") 199 .desc("Total Packets") 200 .precision(0) 201 .prereq(totBytes) 202 ; 203 204 totBytes 205 .name(name() + ".totBytes") 206 .desc("Total Bytes") 207 .precision(0) 208 .prereq(totBytes) 209 ; 210 211 totPacketRate 212 .name(name() + ".totPPS") 213 .desc("Total Tranmission Rate (packets/s)") 214 .precision(0) 215 .prereq(totBytes) 216 ; 217 218 txBytes 219 .name(name() + ".txBytes") 220 .desc("Bytes Transmitted") 221 .prereq(txBytes) 222 ; 223 224 txBandwidth 225 .name(name() + ".txBandwidth") 226 .desc("Transmit Bandwidth (bits/s)") 227 .precision(0) 228 .prereq(txBytes) 229 ; 230 231 txPackets 232 .name(name() + ".txPackets") 233 .desc("Number of Packets Transmitted") 234 .prereq(txBytes) 235 ; 236 237 txPacketRate 238 .name(name() + ".txPPS") 239 .desc("Packet Tranmission Rate (packets/s)") 240 .precision(0) 241 .prereq(txBytes) 242 ; 243 244 txIpPackets 245 .name(name() + ".txIpPackets") 246 .desc("Number of IP Packets Transmitted") 247 .prereq(txBytes) 248 ; 249 250 txTcpPackets 251 .name(name() + ".txTcpPackets") 252 .desc("Number of TCP Packets Transmitted") 253 .prereq(txBytes) 254 ; 255 256 txUdpPackets 257 .name(name() + ".txUdpPackets") 258 .desc("Number of Packets Transmitted") 259 .prereq(txBytes) 260 ; 261 262 txIpChecksums 263 .name(name() + ".txIpChecksums") 264 .desc("Number of tx IP Checksums done by device") 265 .precision(0) 266 .prereq(txBytes) 267 ; 268 269 txTcpChecksums 270 .name(name() + ".txTcpChecksums") 271 .desc("Number of tx TCP Checksums done by device") 272 .precision(0) 273 .prereq(txBytes) 274 ; 275 276 txUdpChecksums 277 .name(name() + ".txUdpChecksums") 278 .desc("Number of tx UDP Checksums done by device") 279 .precision(0) 280 .prereq(txBytes) 281 ; 282 283 txBandwidth = txBytes * Stats::constant(8) / simSeconds; 284 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds; 285 totBandwidth = txBandwidth + rxBandwidth; 286 totBytes = txBytes + rxBytes; 287 totPackets = txPackets + rxPackets; 288 txPacketRate = txPackets / simSeconds; 289 rxPacketRate = rxPackets / simSeconds; 290} 291 292/** 293 * This is to write to the PCI general configuration registers 294 */ 295void 296Device::writeConfig(int offset, int size, const uint8_t *data) 297{ 298 switch (offset) { 299 case PCI0_BASE_ADDR0: 300 // Need to catch writes to BARs to update the PIO interface 301 PciDev::writeConfig(offset, size, data); 302 if (BARAddrs[0] != 0) { 303 if (pioInterface) 304 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 305 306 BARAddrs[0] &= EV5::PAddrUncachedMask; 307 } 308 break; 309 310 default: 311 PciDev::writeConfig(offset, size, data); 312 } 313} 314 315void 316Device::prepareIO(int cpu, int index) 317{ 318 int size = virtualRegs.size(); 319 if (index < size) 320 return; 321 322 virtualRegs.resize(index + 1); 323 for (int i = size; i <= index; ++i) 324 virtualRegs[i].rxPacket = rxFifo.end(); 325} 326 327void 328Device::prepareRead(int cpu, int index) 329{ 330 using namespace Regs; 331 prepareIO(cpu, index); 332 333 VirtualReg &vnic = virtualRegs[index]; 334 335 // update rx registers 336 uint64_t rxdone = vnic.RxDone; 337 rxdone = set_RxDone_Packets(rxdone, rxFifo.packets()); 338 regs.RxData = vnic.RxData; 339 regs.RxDone = rxdone; 340 regs.RxWait = rxdone; 341 342 // update tx regsiters 343 uint64_t txdone = vnic.TxDone; 344 txdone = set_TxDone_Packets(txdone, txFifo.packets()); 345 txdone = set_TxDone_Full(txdone, txFifo.avail() < regs.TxMaxCopy); 346 txdone = set_TxDone_Low(txdone, txFifo.size() < regs.TxFifoMark); 347 regs.TxData = vnic.TxData; 348 regs.TxDone = txdone; 349 regs.TxWait = txdone; 350} 351 352void 353Device::prepareWrite(int cpu, int index) 354{ 355 if (cpu >= writeQueue.size()) 356 writeQueue.resize(cpu + 1); 357 358 prepareIO(cpu, index); 359} 360 361/** 362 * I/O read of device register 363 */ 364Fault 365Device::read(MemReqPtr &req, uint8_t *data) 366{ 367 assert(config.command & PCI_CMD_MSE); 368 Fault fault = readBar(req, data); 369 370 if (fault == Machine_Check_Fault) { 371 panic("address does not map to a BAR pa=%#x va=%#x size=%d", 372 req->paddr, req->vaddr, req->size); 373 374 return Machine_Check_Fault; 375 } 376 377 return fault; 378} 379 380Fault 381Device::readBar0(MemReqPtr &req, Addr daddr, uint8_t *data) 382{ 383 int cpu = (req->xc->regs.ipr[TheISA::IPR_PALtemp16] >> 8) & 0xff; 384 Addr index = daddr >> Regs::VirtualShift; 385 Addr raddr = daddr & Regs::VirtualMask; 386 387 if (!regValid(raddr)) 388 panic("invalid register: cpu=%d, da=%#x pa=%#x va=%#x size=%d", 389 cpu, daddr, req->paddr, req->vaddr, req->size); 390 391 const Regs::Info &info = regInfo(raddr); 392 if (!info.read) 393 panic("reading %s (write only): cpu=%d da=%#x pa=%#x va=%#x size=%d", 394 info.name, cpu, daddr, req->paddr, req->vaddr, req->size); 395 396 if (req->size != info.size) 397 panic("invalid size for reg %s: cpu=%d da=%#x pa=%#x va=%#x size=%d", 398 info.name, cpu, daddr, req->paddr, req->vaddr, req->size); 399 400 prepareRead(cpu, index); 401 402 uint64_t value = 0; 403 if (req->size == 4) { 404 uint32_t ® = *(uint32_t *)data; 405 reg = regData32(raddr); 406 value = reg; 407 } 408 409 if (req->size == 8) { 410 uint64_t ® = *(uint64_t *)data; 411 reg = regData64(raddr); 412 value = reg; 413 } 414 415 DPRINTF(EthernetPIO, 416 "read %s cpu=%d da=%#x pa=%#x va=%#x size=%d val=%#x\n", 417 info.name, cpu, daddr, req->paddr, req->vaddr, req->size, value); 418 419 // reading the interrupt status register has the side effect of 420 // clearing it 421 if (raddr == Regs::IntrStatus) 422 devIntrClear(); 423 424 return No_Fault; 425} 426 427/** 428 * IPR read of device register 429 */ 430Fault 431Device::iprRead(Addr daddr, int cpu, uint64_t &result) 432{ 433 if (!regValid(daddr)) 434 panic("invalid address: da=%#x", daddr); 435 436 const Regs::Info &info = regInfo(daddr); 437 if (!info.read) 438 panic("reading %s (write only): cpu=%d da=%#x", info.name, cpu, daddr); 439 440 DPRINTF(EthernetPIO, "IPR read %s: cpu=%d da=%#x\n", 441 info.name, cpu, daddr); 442 443 prepareRead(cpu, 0); 444 445 if (info.size == 4) 446 result = regData32(daddr); 447 448 if (info.size == 8) 449 result = regData64(daddr); 450 451 DPRINTF(EthernetPIO, "IPR read %s: cpu=%s da=%#x val=%#x\n", 452 info.name, cpu, result); 453 454 return No_Fault; 455} 456 457/** 458 * I/O write of device register 459 */ 460Fault 461Device::write(MemReqPtr &req, const uint8_t *data) 462{ 463 assert(config.command & PCI_CMD_MSE); 464 Fault fault = writeBar(req, data); 465 466 if (fault == Machine_Check_Fault) { 467 panic("address does not map to a BAR pa=%#x va=%#x size=%d", 468 req->paddr, req->vaddr, req->size); 469 470 return Machine_Check_Fault; 471 } 472 473 return fault; 474} 475 476Fault 477Device::writeBar0(MemReqPtr &req, Addr daddr, const uint8_t *data) 478{ 479 int cpu = (req->xc->regs.ipr[TheISA::IPR_PALtemp16] >> 8) & 0xff; 480 Addr index = daddr >> Regs::VirtualShift; 481 Addr raddr = daddr & Regs::VirtualMask; 482 483 if (!regValid(raddr)) 484 panic("invalid address: cpu=%d da=%#x pa=%#x va=%#x size=%d", 485 cpu, daddr, req->paddr, req->vaddr, req->size); 486 487 const Regs::Info &info = regInfo(raddr); 488 if (!info.write) 489 panic("writing %s (read only): cpu=%d da=%#x", 490 info.name, cpu, daddr); 491 492 if (req->size != info.size) 493 panic("invalid size for %s: cpu=%d da=%#x pa=%#x va=%#x size=%d", 494 info.name, cpu, daddr, req->paddr, req->vaddr, req->size); 495 496 uint32_t reg32 = *(uint32_t *)data; 497 uint64_t reg64 = *(uint64_t *)data; 498 DPRINTF(EthernetPIO, 499 "write %s: cpu=%d val=%#x da=%#x pa=%#x va=%#x size=%d\n", 500 info.name, cpu, info.size == 4 ? reg32 : reg64, daddr, 501 req->paddr, req->vaddr, req->size); 502 503 prepareWrite(cpu, index); 504 505 if (pioDelayWrite) 506 writeQueue[cpu].push_back(RegWriteData(daddr, reg64)); 507 508 if (!pioDelayWrite || !info.delay_write) 509 regWrite(daddr, cpu, data); 510 511 return No_Fault; 512} 513 514void 515Device::regWrite(Addr daddr, int cpu, const uint8_t *data) 516{ 517 Addr index = daddr >> Regs::VirtualShift; 518 Addr raddr = daddr & Regs::VirtualMask; 519 520 uint32_t reg32 = *(uint32_t *)data; 521 uint64_t reg64 = *(uint64_t *)data; 522 VirtualReg &vnic = virtualRegs[index]; 523 524 switch (raddr) { 525 case Regs::Config: 526 changeConfig(reg32); 527 break; 528 529 case Regs::Command: 530 command(reg32); 531 break; 532 533 case Regs::IntrStatus: 534 devIntrClear(regs.IntrStatus & reg32); 535 break; 536 537 case Regs::IntrMask: 538 devIntrChangeMask(reg32); 539 break; 540 541 case Regs::RxData: 542 if (Regs::get_RxDone_Busy(vnic.RxDone)) 543 panic("receive machine busy with another request! rxState=%s", 544 RxStateStrings[rxState]); 545 546 vnic.RxDone = Regs::RxDone_Busy; 547 vnic.RxData = reg64; 548 rxList.push_back(index); 549 if (rxEnable && rxState == rxIdle) { 550 rxState = rxFifoBlock; 551 rxKick(); 552 } 553 break; 554 555 case Regs::TxData: 556 if (Regs::get_TxDone_Busy(vnic.TxDone)) 557 panic("transmit machine busy with another request! txState=%s", 558 TxStateStrings[txState]); 559 560 vnic.TxDone = Regs::TxDone_Busy; 561 vnic.TxData = reg64; 562 if (txList.empty() || txList.front() != index) 563 txList.push_back(index); 564 if (txEnable && txState == txIdle) { 565 txState = txFifoBlock; 566 txKick(); 567 } 568 break; 569 } 570} 571 572void 573Device::devIntrPost(uint32_t interrupts) 574{ 575 if ((interrupts & Regs::Intr_Res)) 576 panic("Cannot set a reserved interrupt"); 577 578 regs.IntrStatus |= interrupts; 579 580 DPRINTF(EthernetIntr, 581 "interrupt written to intStatus: intr=%#x status=%#x mask=%#x\n", 582 interrupts, regs.IntrStatus, regs.IntrMask); 583 584 interrupts = regs.IntrStatus & regs.IntrMask; 585 586 // Intr_RxHigh is special, we only signal it if we've emptied the fifo 587 // and then filled it above the high watermark 588 if (rxEmpty) 589 rxEmpty = false; 590 else 591 interrupts &= ~Regs::Intr_RxHigh; 592 593 // Intr_TxLow is special, we only signal it if we've filled up the fifo 594 // and then dropped below the low watermark 595 if (txFull) 596 txFull = false; 597 else 598 interrupts &= ~Regs::Intr_TxLow; 599 600 if (interrupts) { 601 Tick when = curTick; 602 if ((interrupts & Regs::Intr_NoDelay) == 0) 603 when += intrDelay; 604 cpuIntrPost(when); 605 } 606} 607 608void 609Device::devIntrClear(uint32_t interrupts) 610{ 611 if ((interrupts & Regs::Intr_Res)) 612 panic("Cannot clear a reserved interrupt"); 613 614 regs.IntrStatus &= ~interrupts; 615 616 DPRINTF(EthernetIntr, 617 "interrupt cleared from intStatus: intr=%x status=%x mask=%x\n", 618 interrupts, regs.IntrStatus, regs.IntrMask); 619 620 if (!(regs.IntrStatus & regs.IntrMask)) 621 cpuIntrClear(); 622} 623 624void 625Device::devIntrChangeMask(uint32_t newmask) 626{ 627 if (regs.IntrMask == newmask) 628 return; 629 630 regs.IntrMask = newmask; 631 632 DPRINTF(EthernetIntr, 633 "interrupt mask changed: intStatus=%x intMask=%x masked=%x\n", 634 regs.IntrStatus, regs.IntrMask, regs.IntrStatus & regs.IntrMask); 635 636 if (regs.IntrStatus & regs.IntrMask) 637 cpuIntrPost(curTick); 638 else 639 cpuIntrClear(); 640} 641 642void 643Base::cpuIntrPost(Tick when) 644{ 645 // If the interrupt you want to post is later than an interrupt 646 // already scheduled, just let it post in the coming one and don't 647 // schedule another. 648 // HOWEVER, must be sure that the scheduled intrTick is in the 649 // future (this was formerly the source of a bug) 650 /** 651 * @todo this warning should be removed and the intrTick code should 652 * be fixed. 653 */ 654 assert(when >= curTick); 655 assert(intrTick >= curTick || intrTick == 0); 656 if (!cpuIntrEnable) { 657 DPRINTF(EthernetIntr, "interrupts not enabled.\n", 658 intrTick); 659 return; 660 } 661 662 if (when > intrTick && intrTick != 0) { 663 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 664 intrTick); 665 return; 666 } 667 668 intrTick = when; 669 if (intrTick < curTick) { 670 debug_break(); 671 intrTick = curTick; 672 } 673 674 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 675 intrTick); 676 677 if (intrEvent) 678 intrEvent->squash(); 679 intrEvent = new IntrEvent(this, true); 680 intrEvent->schedule(intrTick); 681} 682 683void 684Base::cpuInterrupt() 685{ 686 assert(intrTick == curTick); 687 688 // Whether or not there's a pending interrupt, we don't care about 689 // it anymore 690 intrEvent = 0; 691 intrTick = 0; 692 693 // Don't send an interrupt if there's already one 694 if (cpuPendingIntr) { 695 DPRINTF(EthernetIntr, 696 "would send an interrupt now, but there's already pending\n"); 697 } else { 698 // Send interrupt 699 cpuPendingIntr = true; 700 701 DPRINTF(EthernetIntr, "posting interrupt\n"); 702 intrPost(); 703 } 704} 705 706void 707Base::cpuIntrClear() 708{ 709 if (!cpuPendingIntr) 710 return; 711 712 if (intrEvent) { 713 intrEvent->squash(); 714 intrEvent = 0; 715 } 716 717 intrTick = 0; 718 719 cpuPendingIntr = false; 720 721 DPRINTF(EthernetIntr, "clearing cchip interrupt\n"); 722 intrClear(); 723} 724 725bool 726Base::cpuIntrPending() const 727{ return cpuPendingIntr; } 728 729void 730Device::changeConfig(uint32_t newconf) 731{ 732 uint32_t changed = regs.Config ^ newconf; 733 if (!changed) 734 return; 735 736 regs.Config = newconf; 737 738 if ((changed & Regs::Config_IntEn)) { 739 cpuIntrEnable = regs.Config & Regs::Config_IntEn; 740 if (cpuIntrEnable) { 741 if (regs.IntrStatus & regs.IntrMask) 742 cpuIntrPost(curTick); 743 } else { 744 cpuIntrClear(); 745 } 746 } 747 748 if ((changed & Regs::Config_TxEn)) { 749 txEnable = regs.Config & Regs::Config_TxEn; 750 if (txEnable) 751 txKick(); 752 } 753 754 if ((changed & Regs::Config_RxEn)) { 755 rxEnable = regs.Config & Regs::Config_RxEn; 756 if (rxEnable) 757 rxKick(); 758 } 759} 760 761void 762Device::command(uint32_t command) 763{ 764 if (command & Regs::Command_Intr) 765 devIntrPost(Regs::Intr_Soft); 766 767 if (command & Regs::Command_Reset) 768 reset(); 769} 770 771void 772Device::reset() 773{ 774 using namespace Regs; 775 776 memset(®s, 0, sizeof(regs)); 777 778 regs.Config = 0; 779 if (params()->rx_thread) 780 regs.Config |= Config_RxThread; 781 if (params()->tx_thread) 782 regs.Config |= Config_TxThread; 783 regs.IntrMask = Intr_Soft | Intr_RxHigh | Intr_RxPacket | Intr_TxLow; 784 regs.RxMaxCopy = params()->rx_max_copy; 785 regs.TxMaxCopy = params()->tx_max_copy; 786 regs.RxMaxIntr = params()->rx_max_intr; 787 regs.RxFifoSize = params()->rx_fifo_size; 788 regs.TxFifoSize = params()->tx_fifo_size; 789 regs.RxFifoMark = params()->rx_fifo_threshold; 790 regs.TxFifoMark = params()->tx_fifo_threshold; 791 regs.HwAddr = params()->eaddr; 792 793 rxList.clear(); 794 txList.clear(); 795 796 rxState = rxIdle; 797 txState = txIdle; 798 799 rxFifo.clear(); 800 rxFifoPtr = rxFifo.end(); 801 txFifo.clear(); 802 rxEmpty = false; 803 txFull = false; 804 805 int size = virtualRegs.size(); 806 virtualRegs.clear(); 807 virtualRegs.resize(size); 808 for (int i = 0; i < size; ++i) 809 virtualRegs[i].rxPacket = rxFifo.end(); 810} 811 812void 813Device::rxDmaCopy() 814{ 815 assert(rxState == rxCopy); 816 rxState = rxCopyDone; 817 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen); 818 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n", 819 rxDmaAddr, rxDmaLen); 820 DDUMP(EthernetData, rxDmaData, rxDmaLen); 821} 822 823void 824Device::rxDmaDone() 825{ 826 rxDmaCopy(); 827 828 // If the transmit state machine has a pending DMA, let it go first 829 if (txState == txBeginCopy) 830 txKick(); 831 832 rxKick(); 833} 834 835void 836Device::rxKick() 837{ 838 VirtualReg *vnic; 839 840 DPRINTF(EthernetSM, "receive kick rxState=%s (rxFifo.size=%d)\n", 841 RxStateStrings[rxState], rxFifo.size()); 842 843 if (rxKickTick > curTick) { 844 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n", 845 rxKickTick); 846 return; 847 } 848 849 next: 850 if (rxState == rxIdle) 851 goto exit; 852 853 assert(!rxList.empty()); 854 vnic = &virtualRegs[rxList.front()]; 855 856 DPRINTF(EthernetSM, "processing rxState=%s for virtual nic %d\n", 857 RxStateStrings[rxState], rxList.front()); 858 859 switch (rxState) { 860 case rxFifoBlock: 861 if (vnic->rxPacket != rxFifo.end()) { 862 rxState = rxBeginCopy; 863 break; 864 } 865 866 if (rxFifoPtr == rxFifo.end()) { 867 DPRINTF(EthernetSM, "receive waiting for data. Nothing to do.\n"); 868 goto exit; 869 } 870 871 assert(!rxFifo.empty()); 872 873 // Grab a new packet from the fifo. 874 vnic->rxPacket = rxFifoPtr++; 875 vnic->rxPacketOffset = 0; 876 vnic->rxPacketBytes = (*vnic->rxPacket)->length; 877 assert(vnic->rxPacketBytes); 878 879 vnic->rxDoneData = 0; 880 /* scope for variables */ { 881 IpPtr ip(*vnic->rxPacket); 882 if (ip) { 883 vnic->rxDoneData |= Regs::RxDone_IpPacket; 884 rxIpChecksums++; 885 if (cksum(ip) != 0) { 886 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 887 vnic->rxDoneData |= Regs::RxDone_IpError; 888 } 889 TcpPtr tcp(ip); 890 UdpPtr udp(ip); 891 if (tcp) { 892 vnic->rxDoneData |= Regs::RxDone_TcpPacket; 893 rxTcpChecksums++; 894 if (cksum(tcp) != 0) { 895 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 896 vnic->rxDoneData |= Regs::RxDone_TcpError; 897 } 898 } else if (udp) { 899 vnic->rxDoneData |= Regs::RxDone_UdpPacket; 900 rxUdpChecksums++; 901 if (cksum(udp) != 0) { 902 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 903 vnic->rxDoneData |= Regs::RxDone_UdpError; 904 } 905 } 906 } 907 } 908 rxState = rxBeginCopy; 909 break; 910 911 case rxBeginCopy: 912 if (dmaInterface && dmaInterface->busy()) 913 goto exit; 914 915 rxDmaAddr = plat->pciToDma(Regs::get_RxData_Addr(vnic->RxData)); 916 rxDmaLen = min<int>(Regs::get_RxData_Len(vnic->RxData), 917 vnic->rxPacketBytes); 918 rxDmaData = (*vnic->rxPacket)->data + vnic->rxPacketOffset; 919 rxState = rxCopy; 920 921 if (dmaInterface) { 922 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, 923 curTick, &rxDmaEvent, true); 924 goto exit; 925 } 926 927 if (dmaWriteDelay != 0 || dmaWriteFactor != 0) { 928 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor; 929 Tick start = curTick + dmaWriteDelay + factor; 930 rxDmaEvent.schedule(start); 931 goto exit; 932 } 933 934 rxDmaCopy(); 935 break; 936 937 case rxCopy: 938 DPRINTF(EthernetSM, "receive machine still copying\n"); 939 goto exit; 940 941 case rxCopyDone: 942 vnic->RxDone = vnic->rxDoneData | rxDmaLen; 943 vnic->RxDone |= Regs::RxDone_Complete; 944 945 if (vnic->rxPacketBytes == rxDmaLen) { 946 rxFifo.remove(vnic->rxPacket); 947 vnic->rxPacket = rxFifo.end(); 948 } else { 949 vnic->RxDone |= Regs::RxDone_More; 950 vnic->rxPacketBytes -= rxDmaLen; 951 vnic->rxPacketOffset += rxDmaLen; 952 } 953 954 rxList.pop_front(); 955 rxState = rxList.empty() ? rxIdle : rxFifoBlock; 956 957 if (rxFifo.empty()) { 958 devIntrPost(Regs::Intr_RxEmpty); 959 rxEmpty = true; 960 } 961 962 devIntrPost(Regs::Intr_RxDMA); 963 break; 964 965 default: 966 panic("Invalid rxState!"); 967 } 968 969 DPRINTF(EthernetSM, "entering next rxState=%s\n", 970 RxStateStrings[rxState]); 971 972 goto next; 973 974 exit: 975 /** 976 * @todo do we want to schedule a future kick? 977 */ 978 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 979 RxStateStrings[rxState]); 980} 981 982void 983Device::txDmaCopy() 984{ 985 assert(txState == txCopy); 986 txState = txCopyDone; 987 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen); 988 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 989 txDmaAddr, txDmaLen); 990 DDUMP(EthernetData, txDmaData, txDmaLen); 991} 992 993void 994Device::txDmaDone() 995{ 996 txDmaCopy(); 997 998 // If the receive state machine has a pending DMA, let it go first 999 if (rxState == rxBeginCopy) 1000 rxKick(); 1001 1002 txKick(); 1003} 1004 1005void 1006Device::transmit() 1007{ 1008 if (txFifo.empty()) { 1009 DPRINTF(Ethernet, "nothing to transmit\n"); 1010 return; 1011 } 1012 1013 uint32_t interrupts; 1014 PacketPtr packet = txFifo.front(); 1015 if (!interface->sendPacket(packet)) { 1016 DPRINTF(Ethernet, "Packet Transmit: failed txFifo available %d\n", 1017 txFifo.avail()); 1018 goto reschedule; 1019 } 1020 1021 txFifo.pop(); 1022#if TRACING_ON 1023 if (DTRACE(Ethernet)) { 1024 IpPtr ip(packet); 1025 if (ip) { 1026 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1027 TcpPtr tcp(ip); 1028 if (tcp) { 1029 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n", 1030 tcp->sport(), tcp->dport()); 1031 } 1032 } 1033 } 1034#endif 1035 1036 DDUMP(EthernetData, packet->data, packet->length); 1037 txBytes += packet->length; 1038 txPackets++; 1039 1040 DPRINTF(Ethernet, "Packet Transmit: successful txFifo Available %d\n", 1041 txFifo.avail()); 1042 1043 interrupts = Regs::Intr_TxPacket; 1044 if (txFifo.size() < regs.TxFifoMark) 1045 interrupts |= Regs::Intr_TxLow; 1046 devIntrPost(interrupts); 1047 1048 reschedule: 1049 if (!txFifo.empty() && !txEvent.scheduled()) { 1050 DPRINTF(Ethernet, "reschedule transmit\n"); 1051 txEvent.schedule(curTick + retryTime); 1052 } 1053} 1054 1055void 1056Device::txKick() 1057{ 1058 VirtualReg *vnic; 1059 DPRINTF(EthernetSM, "transmit kick txState=%s (txFifo.size=%d)\n", 1060 TxStateStrings[txState], txFifo.size()); 1061 1062 if (txKickTick > curTick) { 1063 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n", 1064 txKickTick); 1065 return; 1066 } 1067 1068 next: 1069 if (txState == txIdle) 1070 goto exit; 1071 1072 assert(!txList.empty()); 1073 vnic = &virtualRegs[txList.front()]; 1074 1075 switch (txState) { 1076 case txFifoBlock: 1077 if (!txPacket) { 1078 // Grab a new packet from the fifo. 1079 txPacket = new PacketData(16384); 1080 txPacketOffset = 0; 1081 } 1082 1083 if (txFifo.avail() - txPacket->length < 1084 Regs::get_TxData_Len(vnic->TxData)) { 1085 DPRINTF(EthernetSM, "transmit fifo full. Nothing to do.\n"); 1086 goto exit; 1087 } 1088 1089 txState = txBeginCopy; 1090 break; 1091 1092 case txBeginCopy: 1093 if (dmaInterface && dmaInterface->busy()) 1094 goto exit; 1095 1096 txDmaAddr = plat->pciToDma(Regs::get_TxData_Addr(vnic->TxData)); 1097 txDmaLen = Regs::get_TxData_Len(vnic->TxData); 1098 txDmaData = txPacket->data + txPacketOffset; 1099 txState = txCopy; 1100 1101 if (dmaInterface) { 1102 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, 1103 curTick, &txDmaEvent, true); 1104 goto exit; 1105 } 1106 1107 if (dmaReadDelay != 0 || dmaReadFactor != 0) { 1108 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor; 1109 Tick start = curTick + dmaReadDelay + factor; 1110 txDmaEvent.schedule(start); 1111 goto exit; 1112 } 1113 1114 txDmaCopy(); 1115 break; 1116 1117 case txCopy: 1118 DPRINTF(EthernetSM, "transmit machine still copying\n"); 1119 goto exit; 1120 1121 case txCopyDone: 1122 vnic->TxDone = txDmaLen | Regs::TxDone_Complete; 1123 txPacket->length += txDmaLen; 1124 if ((vnic->TxData & Regs::TxData_More)) { 1125 txPacketOffset += txDmaLen; 1126 txState = txIdle; 1127 devIntrPost(Regs::Intr_TxDMA); 1128 break; 1129 } 1130 1131 assert(txPacket->length <= txFifo.avail()); 1132 if ((vnic->TxData & Regs::TxData_Checksum)) { 1133 IpPtr ip(txPacket); 1134 if (ip) { 1135 TcpPtr tcp(ip); 1136 if (tcp) { 1137 tcp->sum(0); 1138 tcp->sum(cksum(tcp)); 1139 txTcpChecksums++; 1140 } 1141 1142 UdpPtr udp(ip); 1143 if (udp) { 1144 udp->sum(0); 1145 udp->sum(cksum(udp)); 1146 txUdpChecksums++; 1147 } 1148 1149 ip->sum(0); 1150 ip->sum(cksum(ip)); 1151 txIpChecksums++; 1152 } 1153 } 1154 1155 txFifo.push(txPacket); 1156 if (txFifo.avail() < regs.TxMaxCopy) { 1157 devIntrPost(Regs::Intr_TxFull); 1158 txFull = true; 1159 } 1160 txPacket = 0; 1161 transmit(); 1162 txList.pop_front(); 1163 txState = txList.empty() ? txIdle : txFifoBlock; 1164 devIntrPost(Regs::Intr_TxDMA); 1165 break; 1166 1167 default: 1168 panic("Invalid txState!"); 1169 } 1170 1171 DPRINTF(EthernetSM, "entering next txState=%s\n", 1172 TxStateStrings[txState]); 1173 1174 goto next; 1175 1176 exit: 1177 /** 1178 * @todo do we want to schedule a future kick? 1179 */ 1180 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 1181 TxStateStrings[txState]); 1182} 1183 1184void 1185Device::transferDone() 1186{ 1187 if (txFifo.empty()) { 1188 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 1189 return; 1190 } 1191 1192 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 1193 1194 if (txEvent.scheduled()) 1195 txEvent.reschedule(curTick + cycles(1)); 1196 else 1197 txEvent.schedule(curTick + cycles(1)); 1198} 1199 1200bool 1201Device::rxFilter(const PacketPtr &packet) 1202{ 1203 if (!Regs::get_Config_Filter(regs.Config)) 1204 return false; 1205 1206 panic("receive filter not implemented\n"); 1207 bool drop = true; 1208 1209#if 0 1210 string type; 1211 1212 EthHdr *eth = packet->eth(); 1213 if (eth->unicast()) { 1214 // If we're accepting all unicast addresses 1215 if (acceptUnicast) 1216 drop = false; 1217 1218 // If we make a perfect match 1219 if (acceptPerfect && params->eaddr == eth.dst()) 1220 drop = false; 1221 1222 if (acceptArp && eth->type() == ETH_TYPE_ARP) 1223 drop = false; 1224 1225 } else if (eth->broadcast()) { 1226 // if we're accepting broadcasts 1227 if (acceptBroadcast) 1228 drop = false; 1229 1230 } else if (eth->multicast()) { 1231 // if we're accepting all multicasts 1232 if (acceptMulticast) 1233 drop = false; 1234 1235 } 1236 1237 if (drop) { 1238 DPRINTF(Ethernet, "rxFilter drop\n"); 1239 DDUMP(EthernetData, packet->data, packet->length); 1240 } 1241#endif 1242 return drop; 1243} 1244 1245bool 1246Device::recvPacket(PacketPtr packet) 1247{ 1248 rxBytes += packet->length; 1249 rxPackets++; 1250 1251 DPRINTF(Ethernet, "Receiving packet from wire, rxFifo Available is %d\n", 1252 rxFifo.avail()); 1253 1254 if (!rxEnable) { 1255 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 1256 return true; 1257 } 1258 1259 if (rxFilter(packet)) { 1260 DPRINTF(Ethernet, "packet filtered...dropped\n"); 1261 return true; 1262 } 1263 1264 if (rxFifo.size() >= regs.RxFifoMark) 1265 devIntrPost(Regs::Intr_RxHigh); 1266 1267 if (!rxFifo.push(packet)) { 1268 DPRINTF(Ethernet, 1269 "packet will not fit in receive buffer...packet dropped\n"); 1270 return false; 1271 } 1272 1273 // If we were at the last element, back up one ot go to the new 1274 // last element of the list. 1275 if (rxFifoPtr == rxFifo.end()) 1276 --rxFifoPtr; 1277 1278 devIntrPost(Regs::Intr_RxPacket); 1279 rxKick(); 1280 return true; 1281} 1282 1283//===================================================================== 1284// 1285// 1286void 1287Base::serialize(ostream &os) 1288{ 1289 // Serialize the PciDev base class 1290 PciDev::serialize(os); 1291 1292 SERIALIZE_SCALAR(rxEnable); 1293 SERIALIZE_SCALAR(txEnable); 1294 SERIALIZE_SCALAR(cpuIntrEnable); 1295 1296 /* 1297 * Keep track of pending interrupt status. 1298 */ 1299 SERIALIZE_SCALAR(intrTick); 1300 SERIALIZE_SCALAR(cpuPendingIntr); 1301 Tick intrEventTick = 0; 1302 if (intrEvent) 1303 intrEventTick = intrEvent->when(); 1304 SERIALIZE_SCALAR(intrEventTick); 1305} 1306 1307void 1308Base::unserialize(Checkpoint *cp, const std::string §ion) 1309{ 1310 // Unserialize the PciDev base class 1311 PciDev::unserialize(cp, section); 1312 1313 UNSERIALIZE_SCALAR(rxEnable); 1314 UNSERIALIZE_SCALAR(txEnable); 1315 UNSERIALIZE_SCALAR(cpuIntrEnable); 1316 1317 /* 1318 * Keep track of pending interrupt status. 1319 */ 1320 UNSERIALIZE_SCALAR(intrTick); 1321 UNSERIALIZE_SCALAR(cpuPendingIntr); 1322 Tick intrEventTick; 1323 UNSERIALIZE_SCALAR(intrEventTick); 1324 if (intrEventTick) { 1325 intrEvent = new IntrEvent(this, true); 1326 intrEvent->schedule(intrEventTick); 1327 } 1328} 1329 1330void 1331Device::serialize(ostream &os) 1332{ 1333 // Serialize the PciDev base class 1334 Base::serialize(os); 1335 1336 if (rxState == rxCopy) 1337 panic("can't serialize with an in flight dma request rxState=%s", 1338 RxStateStrings[rxState]); 1339 1340 if (txState == txCopy) 1341 panic("can't serialize with an in flight dma request txState=%s", 1342 TxStateStrings[txState]); 1343 1344 /* 1345 * Serialize the device registers 1346 */ 1347 SERIALIZE_SCALAR(regs.Config); 1348 SERIALIZE_SCALAR(regs.IntrStatus); 1349 SERIALIZE_SCALAR(regs.IntrMask); 1350 SERIALIZE_SCALAR(regs.RxMaxCopy); 1351 SERIALIZE_SCALAR(regs.TxMaxCopy); 1352 SERIALIZE_SCALAR(regs.RxMaxIntr); 1353 SERIALIZE_SCALAR(regs.RxData); 1354 SERIALIZE_SCALAR(regs.RxDone); 1355 SERIALIZE_SCALAR(regs.TxData); 1356 SERIALIZE_SCALAR(regs.TxDone); 1357 1358 /* 1359 * Serialize the virtual nic state 1360 */ 1361 int virtualRegsSize = virtualRegs.size(); 1362 SERIALIZE_SCALAR(virtualRegsSize); 1363 for (int i = 0; i < virtualRegsSize; ++i) { 1364 VirtualReg *vnic = &virtualRegs[i]; 1365 1366 string reg = csprintf("vnic%d", i); 1367 paramOut(os, reg + ".RxData", vnic->RxData); 1368 paramOut(os, reg + ".RxDone", vnic->RxDone); 1369 paramOut(os, reg + ".TxData", vnic->TxData); 1370 paramOut(os, reg + ".TxDone", vnic->TxDone); 1371 1372 PacketFifo::iterator rxFifoPtr; 1373 1374 bool rxPacketExists = vnic->rxPacket != rxFifo.end(); 1375 paramOut(os, reg + ".rxPacketExists", rxPacketExists); 1376 if (rxPacketExists) { 1377 int rxPacket = 0; 1378 PacketFifo::iterator i = rxFifo.begin(); 1379 while (i != vnic->rxPacket) { 1380 assert(i != rxFifo.end()); 1381 ++i; 1382 ++rxPacket; 1383 } 1384 1385 paramOut(os, reg + ".rxPacket", rxPacket); 1386 paramOut(os, reg + ".rxPacketOffset", vnic->rxPacketOffset); 1387 paramOut(os, reg + ".rxPacketBytes", vnic->rxPacketBytes); 1388 } 1389 paramOut(os, reg + ".rxDoneData", vnic->rxDoneData); 1390 } 1391 1392 VirtualList::iterator i, end; 1393 int count; 1394 1395 int rxListSize = rxList.size(); 1396 SERIALIZE_SCALAR(rxListSize); 1397 for (count = 0, i = rxList.begin(), end = rxList.end(); i != end; ++i) 1398 paramOut(os, csprintf("rxList%d", count++), *i); 1399 1400 int txListSize = txList.size(); 1401 SERIALIZE_SCALAR(txListSize); 1402 for (count = 0, i = txList.begin(), end = txList.end(); i != end; ++i) 1403 paramOut(os, csprintf("txList%d", count++), *i); 1404 1405 /* 1406 * Serialize rx state machine 1407 */ 1408 int rxState = this->rxState; 1409 SERIALIZE_SCALAR(rxState); 1410 SERIALIZE_SCALAR(rxEmpty); 1411 rxFifo.serialize("rxFifo", os); 1412 1413 /* 1414 * Serialize tx state machine 1415 */ 1416 int txState = this->txState; 1417 SERIALIZE_SCALAR(txState); 1418 SERIALIZE_SCALAR(txFull); 1419 txFifo.serialize("txFifo", os); 1420 bool txPacketExists = txPacket; 1421 SERIALIZE_SCALAR(txPacketExists); 1422 if (txPacketExists) { 1423 txPacket->serialize("txPacket", os); 1424 SERIALIZE_SCALAR(txPacketOffset); 1425 SERIALIZE_SCALAR(txPacketBytes); 1426 } 1427 1428 /* 1429 * If there's a pending transmit, store the time so we can 1430 * reschedule it later 1431 */ 1432 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0; 1433 SERIALIZE_SCALAR(transmitTick); 1434} 1435 1436void 1437Device::unserialize(Checkpoint *cp, const std::string §ion) 1438{ 1439 // Unserialize the PciDev base class 1440 Base::unserialize(cp, section); 1441 1442 /* 1443 * Unserialize the device registers 1444 */ 1445 UNSERIALIZE_SCALAR(regs.Config); 1446 UNSERIALIZE_SCALAR(regs.IntrStatus); 1447 UNSERIALIZE_SCALAR(regs.IntrMask); 1448 UNSERIALIZE_SCALAR(regs.RxMaxCopy); 1449 UNSERIALIZE_SCALAR(regs.TxMaxCopy); 1450 UNSERIALIZE_SCALAR(regs.RxMaxIntr); 1451 UNSERIALIZE_SCALAR(regs.RxData); 1452 UNSERIALIZE_SCALAR(regs.RxDone); 1453 UNSERIALIZE_SCALAR(regs.TxData); 1454 UNSERIALIZE_SCALAR(regs.TxDone); 1455 1456 int rxListSize; 1457 UNSERIALIZE_SCALAR(rxListSize); 1458 rxList.clear(); 1459 for (int i = 0; i < rxListSize; ++i) { 1460 int value; 1461 paramIn(cp, section, csprintf("rxList%d", i), value); 1462 rxList.push_back(value); 1463 } 1464 1465 int txListSize; 1466 UNSERIALIZE_SCALAR(txListSize); 1467 txList.clear(); 1468 for (int i = 0; i < txListSize; ++i) { 1469 int value; 1470 paramIn(cp, section, csprintf("txList%d", i), value); 1471 txList.push_back(value); 1472 } 1473 1474 /* 1475 * Unserialize rx state machine 1476 */ 1477 int rxState; 1478 UNSERIALIZE_SCALAR(rxState); 1479 UNSERIALIZE_SCALAR(rxEmpty); 1480 this->rxState = (RxState) rxState; 1481 rxFifo.unserialize("rxFifo", cp, section); 1482 1483 /* 1484 * Unserialize tx state machine 1485 */ 1486 int txState; 1487 UNSERIALIZE_SCALAR(txState); 1488 UNSERIALIZE_SCALAR(txFull); 1489 this->txState = (TxState) txState; 1490 txFifo.unserialize("txFifo", cp, section); 1491 bool txPacketExists; 1492 UNSERIALIZE_SCALAR(txPacketExists); 1493 txPacket = 0; 1494 if (txPacketExists) { 1495 txPacket = new PacketData(16384); 1496 txPacket->unserialize("txPacket", cp, section); 1497 UNSERIALIZE_SCALAR(txPacketOffset); 1498 UNSERIALIZE_SCALAR(txPacketBytes); 1499 } 1500 1501 /* 1502 * unserialize the virtual nic registers/state 1503 * 1504 * this must be done after the unserialization of the rxFifo 1505 * because the packet iterators depend on the fifo being populated 1506 */ 1507 int virtualRegsSize; 1508 UNSERIALIZE_SCALAR(virtualRegsSize); 1509 virtualRegs.clear(); 1510 virtualRegs.resize(virtualRegsSize); 1511 for (int i = 0; i < virtualRegsSize; ++i) { 1512 VirtualReg *vnic = &virtualRegs[i]; 1513 string reg = csprintf("vnic%d", i); 1514 1515 paramIn(cp, section, reg + ".RxData", vnic->RxData); 1516 paramIn(cp, section, reg + ".RxDone", vnic->RxDone); 1517 paramIn(cp, section, reg + ".TxData", vnic->TxData); 1518 paramIn(cp, section, reg + ".TxDone", vnic->TxDone); 1519 1520 bool rxPacketExists; 1521 paramIn(cp, section, reg + ".rxPacketExists", rxPacketExists); 1522 if (rxPacketExists) { 1523 int rxPacket; 1524 paramIn(cp, section, reg + ".rxPacket", rxPacket); 1525 vnic->rxPacket = rxFifo.begin(); 1526 while (rxPacket--) 1527 ++vnic->rxPacket; 1528 1529 paramIn(cp, section, reg + ".rxPacketOffset", 1530 vnic->rxPacketOffset); 1531 paramIn(cp, section, reg + ".rxPacketBytes", vnic->rxPacketBytes); 1532 } else { 1533 vnic->rxPacket = rxFifo.end(); 1534 } 1535 paramIn(cp, section, reg + ".rxDoneData", vnic->rxDoneData); 1536 } 1537 1538 /* 1539 * If there's a pending transmit, reschedule it now 1540 */ 1541 Tick transmitTick; 1542 UNSERIALIZE_SCALAR(transmitTick); 1543 if (transmitTick) 1544 txEvent.schedule(curTick + transmitTick); 1545 1546 /* 1547 * re-add addrRanges to bus bridges 1548 */ 1549 if (pioInterface) { 1550 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0])); 1551 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1])); 1552 } 1553} 1554 1555Tick 1556Device::cacheAccess(MemReqPtr &req) 1557{ 1558 Addr daddr; 1559 int bar; 1560 if (!getBAR(req->paddr, daddr, bar)) 1561 panic("address does not map to a BAR pa=%#x va=%#x size=%d", 1562 req->paddr, req->vaddr, req->size); 1563 1564 DPRINTF(EthernetPIO, "timing %s to paddr=%#x bar=%d daddr=%#x\n", 1565 req->cmd.toString(), req->paddr, bar, daddr); 1566 1567 if (!pioDelayWrite || !req->cmd.isWrite()) 1568 return curTick + pioLatency; 1569 1570 if (bar == 0) { 1571 int cpu = (req->xc->regs.ipr[TheISA::IPR_PALtemp16] >> 8) & 0xff; 1572 std::list<RegWriteData> &wq = writeQueue[cpu]; 1573 if (wq.empty()) 1574 panic("WriteQueue for cpu %d empty timing daddr=%#x", cpu, daddr); 1575 1576 const RegWriteData &data = wq.front(); 1577 if (data.daddr != daddr) 1578 panic("read mismatch on cpu %d, daddr functional=%#x timing=%#x", 1579 cpu, data.daddr, daddr); 1580 1581 const Regs::Info &info = regInfo(data.daddr); 1582 if (info.delay_write) 1583 regWrite(daddr, cpu, (uint8_t *)&data.value); 1584 1585 wq.pop_front(); 1586 } 1587 1588 return curTick + pioLatency; 1589} 1590 1591BEGIN_DECLARE_SIM_OBJECT_PARAMS(Interface) 1592 1593 SimObjectParam<EtherInt *> peer; 1594 SimObjectParam<Device *> device; 1595 1596END_DECLARE_SIM_OBJECT_PARAMS(Interface) 1597 1598BEGIN_INIT_SIM_OBJECT_PARAMS(Interface) 1599 1600 INIT_PARAM_DFLT(peer, "peer interface", NULL), 1601 INIT_PARAM(device, "Ethernet device of this interface") 1602 1603END_INIT_SIM_OBJECT_PARAMS(Interface) 1604 1605CREATE_SIM_OBJECT(Interface) 1606{ 1607 Interface *dev_int = new Interface(getInstanceName(), device); 1608 1609 EtherInt *p = (EtherInt *)peer; 1610 if (p) { 1611 dev_int->setPeer(p); 1612 p->setPeer(dev_int); 1613 } 1614 1615 return dev_int; 1616} 1617 1618REGISTER_SIM_OBJECT("SinicInt", Interface) 1619 1620 1621BEGIN_DECLARE_SIM_OBJECT_PARAMS(Device) 1622 1623 Param<Tick> clock; 1624 1625 Param<Addr> addr; 1626 SimObjectParam<MemoryController *> mmu; 1627 SimObjectParam<PhysicalMemory *> physmem; 1628 SimObjectParam<PciConfigAll *> configspace; 1629 SimObjectParam<PciConfigData *> configdata; 1630 SimObjectParam<Platform *> platform; 1631 Param<uint32_t> pci_bus; 1632 Param<uint32_t> pci_dev; 1633 Param<uint32_t> pci_func; 1634 1635 SimObjectParam<HierParams *> hier; 1636 SimObjectParam<Bus*> pio_bus; 1637 SimObjectParam<Bus*> dma_bus; 1638 SimObjectParam<Bus*> payload_bus; 1639 Param<Tick> dma_read_delay; 1640 Param<Tick> dma_read_factor; 1641 Param<Tick> dma_write_delay; 1642 Param<Tick> dma_write_factor; 1643 Param<bool> dma_no_allocate; 1644 Param<Tick> pio_latency; 1645 Param<bool> pio_delay_write; 1646 Param<Tick> intr_delay; 1647 1648 Param<Tick> rx_delay; 1649 Param<Tick> tx_delay; 1650 Param<uint32_t> rx_max_copy; 1651 Param<uint32_t> tx_max_copy; 1652 Param<uint32_t> rx_max_intr; 1653 Param<uint32_t> rx_fifo_size; 1654 Param<uint32_t> tx_fifo_size; 1655 Param<uint32_t> rx_fifo_threshold; 1656 Param<uint32_t> tx_fifo_threshold; 1657 1658 Param<bool> rx_filter; 1659 Param<string> hardware_address; 1660 Param<bool> rx_thread; 1661 Param<bool> tx_thread; 1662 1663END_DECLARE_SIM_OBJECT_PARAMS(Device) 1664 1665BEGIN_INIT_SIM_OBJECT_PARAMS(Device) 1666 1667 INIT_PARAM(clock, "State machine cycle time"), 1668 1669 INIT_PARAM(addr, "Device Address"), 1670 INIT_PARAM(mmu, "Memory Controller"), 1671 INIT_PARAM(physmem, "Physical Memory"), 1672 INIT_PARAM(configspace, "PCI Configspace"), 1673 INIT_PARAM(configdata, "PCI Config data"), 1674 INIT_PARAM(platform, "Platform"), 1675 INIT_PARAM(pci_bus, "PCI bus"), 1676 INIT_PARAM(pci_dev, "PCI device number"), 1677 INIT_PARAM(pci_func, "PCI function code"), 1678 1679 INIT_PARAM(hier, "Hierarchy global variables"), 1680 INIT_PARAM(pio_bus, ""), 1681 INIT_PARAM(dma_bus, ""), 1682 INIT_PARAM(payload_bus, "The IO Bus to attach to for payload"), 1683 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"), 1684 INIT_PARAM(dma_read_factor, "multiplier for dma reads"), 1685 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"), 1686 INIT_PARAM(dma_write_factor, "multiplier for dma writes"), 1687 INIT_PARAM(dma_no_allocate, "Should we allocat on read in cache"), 1688 INIT_PARAM(pio_latency, "Programmed IO latency in bus cycles"), 1689 INIT_PARAM(pio_delay_write, ""), 1690 INIT_PARAM(intr_delay, "Interrupt Delay"), 1691 1692 INIT_PARAM(rx_delay, "Receive Delay"), 1693 INIT_PARAM(tx_delay, "Transmit Delay"), 1694 INIT_PARAM(rx_max_copy, "rx max copy"), 1695 INIT_PARAM(tx_max_copy, "rx max copy"), 1696 INIT_PARAM(rx_max_intr, "rx max intr"), 1697 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"), 1698 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"), 1699 INIT_PARAM(rx_fifo_threshold, "max size in bytes of rxFifo"), 1700 INIT_PARAM(tx_fifo_threshold, "max size in bytes of txFifo"), 1701 1702 INIT_PARAM(rx_filter, "Enable Receive Filter"), 1703 INIT_PARAM(hardware_address, "Ethernet Hardware Address"), 1704 INIT_PARAM(rx_thread, ""), 1705 INIT_PARAM(tx_thread, "") 1706 1707END_INIT_SIM_OBJECT_PARAMS(Device) 1708 1709 1710CREATE_SIM_OBJECT(Device) 1711{ 1712 Device::Params *params = new Device::Params; 1713 1714 params->name = getInstanceName(); 1715 1716 params->clock = clock; 1717 1718 params->mmu = mmu; 1719 params->physmem = physmem; 1720 params->configSpace = configspace; 1721 params->configData = configdata; 1722 params->plat = platform; 1723 params->busNum = pci_bus; 1724 params->deviceNum = pci_dev; 1725 params->functionNum = pci_func; 1726 1727 params->hier = hier; 1728 params->pio_bus = pio_bus; 1729 params->header_bus = dma_bus; 1730 params->payload_bus = payload_bus; 1731 params->dma_read_delay = dma_read_delay; 1732 params->dma_read_factor = dma_read_factor; 1733 params->dma_write_delay = dma_write_delay; 1734 params->dma_write_factor = dma_write_factor; 1735 params->dma_no_allocate = dma_no_allocate; 1736 params->pio_latency = pio_latency; 1737 params->pio_delay_write = pio_delay_write; 1738 params->intr_delay = intr_delay; 1739 1740 params->tx_delay = tx_delay; 1741 params->rx_delay = rx_delay; 1742 params->rx_max_copy = rx_max_copy; 1743 params->tx_max_copy = tx_max_copy; 1744 params->rx_max_intr = rx_max_intr; 1745 params->rx_fifo_size = rx_fifo_size; 1746 params->tx_fifo_size = tx_fifo_size; 1747 params->rx_fifo_threshold = rx_fifo_threshold; 1748 params->tx_fifo_threshold = tx_fifo_threshold; 1749 1750 params->rx_filter = rx_filter; 1751 params->eaddr = hardware_address; 1752 params->rx_thread = rx_thread; 1753 params->tx_thread = tx_thread; 1754 1755 return new Device(params); 1756} 1757 1758REGISTER_SIM_OBJECT("Sinic", Device) 1759 1760/* namespace Sinic */ } 1761