1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 */ 30 31#include "dev/net/sinic.hh" 32 33#include <deque> 34#include <limits> 35#include <string> 36 37#ifdef SINIC_VTOPHYS 38#include "arch/vtophys.hh" 39 40#endif 41#include "base/compiler.hh" 42#include "base/debug.hh" 43#include "base/inet.hh" 44#include "base/types.hh" 45#include "config/the_isa.hh" 46#include "debug/EthernetAll.hh" 47#include "dev/net/etherlink.hh" 48#include "mem/packet.hh" 49#include "mem/packet_access.hh" 50#include "sim/eventq.hh" 51#include "sim/stats.hh" 52 53using namespace std; 54using namespace Net; 55using namespace TheISA; 56 57namespace Sinic { 58 59const char *RxStateStrings[] = 60{ 61 "rxIdle", 62 "rxFifoBlock", 63 "rxBeginCopy", 64 "rxCopy", 65 "rxCopyDone" 66}; 67 68const char *TxStateStrings[] = 69{ 70 "txIdle", 71 "txFifoBlock", 72 "txBeginCopy", 73 "txCopy", 74 "txCopyDone" 75}; 76 77 78/////////////////////////////////////////////////////////////////////// 79// 80// Sinic PCI Device 81// 82Base::Base(const Params *p) 83 : EtherDevBase(p), rxEnable(false), txEnable(false), 84 intrDelay(p->intr_delay), intrTick(0), cpuIntrEnable(false), 85 cpuPendingIntr(false), intrEvent(0), interface(NULL) 86{ 87} 88 89Device::Device(const Params *p) 90 : Base(p), rxUnique(0), txUnique(0), 91 virtualRegs(p->virtual_count < 1 ? 1 : p->virtual_count), 92 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), 93 rxKickTick(0), txKickTick(0),
| 1/* 2 * Copyright (c) 2004-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 */ 30 31#include "dev/net/sinic.hh" 32 33#include <deque> 34#include <limits> 35#include <string> 36 37#ifdef SINIC_VTOPHYS 38#include "arch/vtophys.hh" 39 40#endif 41#include "base/compiler.hh" 42#include "base/debug.hh" 43#include "base/inet.hh" 44#include "base/types.hh" 45#include "config/the_isa.hh" 46#include "debug/EthernetAll.hh" 47#include "dev/net/etherlink.hh" 48#include "mem/packet.hh" 49#include "mem/packet_access.hh" 50#include "sim/eventq.hh" 51#include "sim/stats.hh" 52 53using namespace std; 54using namespace Net; 55using namespace TheISA; 56 57namespace Sinic { 58 59const char *RxStateStrings[] = 60{ 61 "rxIdle", 62 "rxFifoBlock", 63 "rxBeginCopy", 64 "rxCopy", 65 "rxCopyDone" 66}; 67 68const char *TxStateStrings[] = 69{ 70 "txIdle", 71 "txFifoBlock", 72 "txBeginCopy", 73 "txCopy", 74 "txCopyDone" 75}; 76 77 78/////////////////////////////////////////////////////////////////////// 79// 80// Sinic PCI Device 81// 82Base::Base(const Params *p) 83 : EtherDevBase(p), rxEnable(false), txEnable(false), 84 intrDelay(p->intr_delay), intrTick(0), cpuIntrEnable(false), 85 cpuPendingIntr(false), intrEvent(0), interface(NULL) 86{ 87} 88 89Device::Device(const Params *p) 90 : Base(p), rxUnique(0), txUnique(0), 91 virtualRegs(p->virtual_count < 1 ? 1 : p->virtual_count), 92 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), 93 rxKickTick(0), txKickTick(0),
|
94 txEvent(this), rxDmaEvent(this), txDmaEvent(this),
| 94 txEvent([this]{ txEventTransmit(); }, name()), 95 rxDmaEvent([this]{ rxDmaDone(); }, name()), 96 txDmaEvent([this]{ txDmaDone(); }, name()),
|
95 dmaReadDelay(p->dma_read_delay), dmaReadFactor(p->dma_read_factor), 96 dmaWriteDelay(p->dma_write_delay), dmaWriteFactor(p->dma_write_factor) 97{ 98 interface = new Interface(name() + ".int0", this); 99 reset(); 100 101} 102 103Device::~Device() 104{} 105 106void 107Device::regStats() 108{ 109 Base::regStats(); 110 111 _maxVnicDistance = 0; 112 113 maxVnicDistance 114 .name(name() + ".maxVnicDistance") 115 .desc("maximum vnic distance") 116 ; 117 118 totalVnicDistance 119 .name(name() + ".totalVnicDistance") 120 .desc("total vnic distance") 121 ; 122 numVnicDistance 123 .name(name() + ".numVnicDistance") 124 .desc("number of vnic distance measurements") 125 ; 126 127 avgVnicDistance 128 .name(name() + ".avgVnicDistance") 129 .desc("average vnic distance") 130 ; 131 132 avgVnicDistance = totalVnicDistance / numVnicDistance; 133} 134 135void 136Device::resetStats() 137{ 138 Base::resetStats(); 139 140 _maxVnicDistance = 0; 141} 142 143EtherInt* 144Device::getEthPort(const std::string &if_name, int idx) 145{ 146 if (if_name == "interface") { 147 if (interface->getPeer()) 148 panic("interface already connected to\n"); 149 150 return interface; 151 } 152 return NULL; 153} 154 155 156void 157Device::prepareIO(ContextID cpu, int index) 158{ 159 int size = virtualRegs.size(); 160 if (index > size) 161 panic("Trying to access a vnic that doesn't exist %d > %d\n", 162 index, size); 163} 164 165//add stats for head of line blocking 166//add stats for average fifo length 167//add stats for average number of vnics busy 168 169void 170Device::prepareRead(ContextID cpu, int index) 171{ 172 using namespace Regs; 173 prepareIO(cpu, index); 174 175 VirtualReg &vnic = virtualRegs[index]; 176 177 // update rx registers 178 uint64_t rxdone = vnic.RxDone; 179 rxdone = set_RxDone_Packets(rxdone, rxFifo.countPacketsAfter(rxFifoPtr)); 180 rxdone = set_RxDone_Empty(rxdone, rxFifo.empty()); 181 rxdone = set_RxDone_High(rxdone, rxFifo.size() > regs.RxFifoHigh); 182 rxdone = set_RxDone_NotHigh(rxdone, rxLow); 183 regs.RxData = vnic.RxData; 184 regs.RxDone = rxdone; 185 regs.RxWait = rxdone; 186 187 // update tx regsiters 188 uint64_t txdone = vnic.TxDone; 189 txdone = set_TxDone_Packets(txdone, txFifo.packets()); 190 txdone = set_TxDone_Full(txdone, txFifo.avail() < regs.TxMaxCopy); 191 txdone = set_TxDone_Low(txdone, txFifo.size() < regs.TxFifoLow); 192 regs.TxData = vnic.TxData; 193 regs.TxDone = txdone; 194 regs.TxWait = txdone; 195 196 int head = 0xffff; 197 198 if (!rxFifo.empty()) { 199 int vnic = rxFifo.begin()->priv; 200 if (vnic != -1 && virtualRegs[vnic].rxPacketOffset > 0) 201 head = vnic; 202 } 203 204 regs.RxStatus = set_RxStatus_Head(regs.RxStatus, head); 205 regs.RxStatus = set_RxStatus_Busy(regs.RxStatus, rxBusyCount); 206 regs.RxStatus = set_RxStatus_Mapped(regs.RxStatus, rxMappedCount); 207 regs.RxStatus = set_RxStatus_Dirty(regs.RxStatus, rxDirtyCount); 208} 209 210void 211Device::prepareWrite(ContextID cpu, int index) 212{ 213 prepareIO(cpu, index); 214} 215 216/** 217 * I/O read of device register 218 */ 219Tick 220Device::read(PacketPtr pkt) 221{ 222 assert(config.command & PCI_CMD_MSE); 223 assert(pkt->getAddr() >= BARAddrs[0] && pkt->getSize() < BARSize[0]); 224 225 ContextID cpu = pkt->req->contextId(); 226 Addr daddr = pkt->getAddr() - BARAddrs[0]; 227 Addr index = daddr >> Regs::VirtualShift; 228 Addr raddr = daddr & Regs::VirtualMask; 229 230 if (!regValid(raddr)) 231 panic("invalid register: cpu=%d vnic=%d da=%#x pa=%#x size=%d", 232 cpu, index, daddr, pkt->getAddr(), pkt->getSize()); 233 234 const Regs::Info &info = regInfo(raddr); 235 if (!info.read) 236 panic("read %s (write only): " 237 "cpu=%d vnic=%d da=%#x pa=%#x size=%d", 238 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize()); 239 240 panic("read %s (invalid size): " 241 "cpu=%d vnic=%d da=%#x pa=%#x size=%d", 242 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize()); 243 244 prepareRead(cpu, index); 245 246 uint64_t value M5_VAR_USED = 0; 247 if (pkt->getSize() == 4) { 248 uint32_t reg = regData32(raddr); 249 pkt->set(reg); 250 value = reg; 251 } 252 253 if (pkt->getSize() == 8) { 254 uint64_t reg = regData64(raddr); 255 pkt->set(reg); 256 value = reg; 257 } 258 259 DPRINTF(EthernetPIO, 260 "read %s: cpu=%d vnic=%d da=%#x pa=%#x size=%d val=%#x\n", 261 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize(), value); 262 263 // reading the interrupt status register has the side effect of 264 // clearing it 265 if (raddr == Regs::IntrStatus) 266 devIntrClear(); 267 268 return pioDelay; 269} 270 271/** 272 * IPR read of device register 273 274 Fault 275Device::iprRead(Addr daddr, ContextID cpu, uint64_t &result) 276{ 277 if (!regValid(daddr)) 278 panic("invalid address: da=%#x", daddr); 279 280 const Regs::Info &info = regInfo(daddr); 281 if (!info.read) 282 panic("reading %s (write only): cpu=%d da=%#x", info.name, cpu, daddr); 283 284 DPRINTF(EthernetPIO, "IPR read %s: cpu=%d da=%#x\n", 285 info.name, cpu, daddr); 286 287 prepareRead(cpu, 0); 288 289 if (info.size == 4) 290 result = regData32(daddr); 291 292 if (info.size == 8) 293 result = regData64(daddr); 294 295 DPRINTF(EthernetPIO, "IPR read %s: cpu=%s da=%#x val=%#x\n", 296 info.name, cpu, result); 297 298 return NoFault; 299} 300*/ 301/** 302 * I/O write of device register 303 */ 304Tick 305Device::write(PacketPtr pkt) 306{ 307 assert(config.command & PCI_CMD_MSE); 308 assert(pkt->getAddr() >= BARAddrs[0] && pkt->getSize() < BARSize[0]); 309 310 ContextID cpu = pkt->req->contextId(); 311 Addr daddr = pkt->getAddr() - BARAddrs[0]; 312 Addr index = daddr >> Regs::VirtualShift; 313 Addr raddr = daddr & Regs::VirtualMask; 314 315 if (!regValid(raddr)) 316 panic("invalid register: cpu=%d, da=%#x pa=%#x size=%d", 317 cpu, daddr, pkt->getAddr(), pkt->getSize()); 318 319 const Regs::Info &info = regInfo(raddr); 320 if (!info.write) 321 panic("write %s (read only): " 322 "cpu=%d vnic=%d da=%#x pa=%#x size=%d", 323 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize()); 324 325 if (pkt->getSize() != info.size) 326 panic("write %s (invalid size): " 327 "cpu=%d vnic=%d da=%#x pa=%#x size=%d", 328 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize()); 329 330 VirtualReg &vnic = virtualRegs[index]; 331 332 DPRINTF(EthernetPIO, 333 "write %s vnic %d: cpu=%d val=%#x da=%#x pa=%#x size=%d\n", 334 info.name, index, cpu, info.size == 4 ? pkt->get<uint32_t>() : 335 pkt->get<uint64_t>(), daddr, pkt->getAddr(), pkt->getSize()); 336 337 prepareWrite(cpu, index); 338 339 switch (raddr) { 340 case Regs::Config: 341 changeConfig(pkt->get<uint32_t>()); 342 break; 343 344 case Regs::Command: 345 command(pkt->get<uint32_t>()); 346 break; 347 348 case Regs::IntrStatus: 349 devIntrClear(regs.IntrStatus & pkt->get<uint32_t>()); 350 break; 351 352 case Regs::IntrMask: 353 devIntrChangeMask(pkt->get<uint32_t>()); 354 break; 355 356 case Regs::RxData: 357 if (Regs::get_RxDone_Busy(vnic.RxDone)) 358 panic("receive machine busy with another request! rxState=%s", 359 RxStateStrings[rxState]); 360 361 vnic.rxUnique = rxUnique++; 362 vnic.RxDone = Regs::RxDone_Busy; 363 vnic.RxData = pkt->get<uint64_t>(); 364 rxBusyCount++; 365 366 if (Regs::get_RxData_Vaddr(pkt->get<uint64_t>())) { 367 panic("vtophys not implemented in newmem"); 368#ifdef SINIC_VTOPHYS 369 Addr vaddr = Regs::get_RxData_Addr(reg64); 370 Addr paddr = vtophys(req->xc, vaddr); 371 DPRINTF(EthernetPIO, "write RxData vnic %d (rxunique %d): " 372 "vaddr=%#x, paddr=%#x\n", 373 index, vnic.rxUnique, vaddr, paddr); 374 375 vnic.RxData = Regs::set_RxData_Addr(vnic.RxData, paddr); 376#endif 377 } else { 378 DPRINTF(EthernetPIO, "write RxData vnic %d (rxunique %d)\n", 379 index, vnic.rxUnique); 380 } 381 382 if (vnic.rxIndex == rxFifo.end()) { 383 DPRINTF(EthernetPIO, "request new packet...appending to rxList\n"); 384 rxList.push_back(index); 385 } else { 386 DPRINTF(EthernetPIO, "packet exists...appending to rxBusy\n"); 387 rxBusy.push_back(index); 388 } 389 390 if (rxEnable && (rxState == rxIdle || rxState == rxFifoBlock)) { 391 rxState = rxFifoBlock; 392 rxKick(); 393 } 394 break; 395 396 case Regs::TxData: 397 if (Regs::get_TxDone_Busy(vnic.TxDone)) 398 panic("transmit machine busy with another request! txState=%s", 399 TxStateStrings[txState]); 400 401 vnic.txUnique = txUnique++; 402 vnic.TxDone = Regs::TxDone_Busy; 403 404 if (Regs::get_TxData_Vaddr(pkt->get<uint64_t>())) { 405 panic("vtophys won't work here in newmem.\n"); 406#ifdef SINIC_VTOPHYS 407 Addr vaddr = Regs::get_TxData_Addr(reg64); 408 Addr paddr = vtophys(req->xc, vaddr); 409 DPRINTF(EthernetPIO, "write TxData vnic %d (txunique %d): " 410 "vaddr=%#x, paddr=%#x\n", 411 index, vnic.txUnique, vaddr, paddr); 412 413 vnic.TxData = Regs::set_TxData_Addr(vnic.TxData, paddr); 414#endif 415 } else { 416 DPRINTF(EthernetPIO, "write TxData vnic %d (txunique %d)\n", 417 index, vnic.txUnique); 418 } 419 420 if (txList.empty() || txList.front() != index) 421 txList.push_back(index); 422 if (txEnable && txState == txIdle && txList.front() == index) { 423 txState = txFifoBlock; 424 txKick(); 425 } 426 break; 427 } 428 429 return pioDelay; 430} 431 432void 433Device::devIntrPost(uint32_t interrupts) 434{ 435 if ((interrupts & Regs::Intr_Res)) 436 panic("Cannot set a reserved interrupt"); 437 438 regs.IntrStatus |= interrupts; 439 440 DPRINTF(EthernetIntr, 441 "interrupt written to intStatus: intr=%#x status=%#x mask=%#x\n", 442 interrupts, regs.IntrStatus, regs.IntrMask); 443 444 interrupts = regs.IntrStatus & regs.IntrMask; 445 446 // Intr_RxHigh is special, we only signal it if we've emptied the fifo 447 // and then filled it above the high watermark 448 if (rxEmpty) 449 rxEmpty = false; 450 else 451 interrupts &= ~Regs::Intr_RxHigh; 452 453 // Intr_TxLow is special, we only signal it if we've filled up the fifo 454 // and then dropped below the low watermark 455 if (txFull) 456 txFull = false; 457 else 458 interrupts &= ~Regs::Intr_TxLow; 459 460 if (interrupts) { 461 Tick when = curTick(); 462 if ((interrupts & Regs::Intr_NoDelay) == 0) 463 when += intrDelay; 464 cpuIntrPost(when); 465 } 466} 467 468void 469Device::devIntrClear(uint32_t interrupts) 470{ 471 if ((interrupts & Regs::Intr_Res)) 472 panic("Cannot clear a reserved interrupt"); 473 474 regs.IntrStatus &= ~interrupts; 475 476 DPRINTF(EthernetIntr, 477 "interrupt cleared from intStatus: intr=%x status=%x mask=%x\n", 478 interrupts, regs.IntrStatus, regs.IntrMask); 479 480 if (!(regs.IntrStatus & regs.IntrMask)) 481 cpuIntrClear(); 482} 483 484void 485Device::devIntrChangeMask(uint32_t newmask) 486{ 487 if (regs.IntrMask == newmask) 488 return; 489 490 regs.IntrMask = newmask; 491 492 DPRINTF(EthernetIntr, 493 "interrupt mask changed: intStatus=%x intMask=%x masked=%x\n", 494 regs.IntrStatus, regs.IntrMask, regs.IntrStatus & regs.IntrMask); 495 496 if (regs.IntrStatus & regs.IntrMask) 497 cpuIntrPost(curTick()); 498 else 499 cpuIntrClear(); 500} 501 502void 503Base::cpuIntrPost(Tick when) 504{ 505 // If the interrupt you want to post is later than an interrupt 506 // already scheduled, just let it post in the coming one and don't 507 // schedule another. 508 // HOWEVER, must be sure that the scheduled intrTick is in the 509 // future (this was formerly the source of a bug) 510 /** 511 * @todo this warning should be removed and the intrTick code should 512 * be fixed. 513 */ 514 assert(when >= curTick()); 515 assert(intrTick >= curTick() || intrTick == 0); 516 if (!cpuIntrEnable) { 517 DPRINTF(EthernetIntr, "interrupts not enabled.\n", 518 intrTick); 519 return; 520 } 521 522 if (when > intrTick && intrTick != 0) { 523 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 524 intrTick); 525 return; 526 } 527 528 intrTick = when; 529 if (intrTick < curTick()) { 530 intrTick = curTick(); 531 } 532 533 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 534 intrTick); 535 536 if (intrEvent) 537 intrEvent->squash();
| 97 dmaReadDelay(p->dma_read_delay), dmaReadFactor(p->dma_read_factor), 98 dmaWriteDelay(p->dma_write_delay), dmaWriteFactor(p->dma_write_factor) 99{ 100 interface = new Interface(name() + ".int0", this); 101 reset(); 102 103} 104 105Device::~Device() 106{} 107 108void 109Device::regStats() 110{ 111 Base::regStats(); 112 113 _maxVnicDistance = 0; 114 115 maxVnicDistance 116 .name(name() + ".maxVnicDistance") 117 .desc("maximum vnic distance") 118 ; 119 120 totalVnicDistance 121 .name(name() + ".totalVnicDistance") 122 .desc("total vnic distance") 123 ; 124 numVnicDistance 125 .name(name() + ".numVnicDistance") 126 .desc("number of vnic distance measurements") 127 ; 128 129 avgVnicDistance 130 .name(name() + ".avgVnicDistance") 131 .desc("average vnic distance") 132 ; 133 134 avgVnicDistance = totalVnicDistance / numVnicDistance; 135} 136 137void 138Device::resetStats() 139{ 140 Base::resetStats(); 141 142 _maxVnicDistance = 0; 143} 144 145EtherInt* 146Device::getEthPort(const std::string &if_name, int idx) 147{ 148 if (if_name == "interface") { 149 if (interface->getPeer()) 150 panic("interface already connected to\n"); 151 152 return interface; 153 } 154 return NULL; 155} 156 157 158void 159Device::prepareIO(ContextID cpu, int index) 160{ 161 int size = virtualRegs.size(); 162 if (index > size) 163 panic("Trying to access a vnic that doesn't exist %d > %d\n", 164 index, size); 165} 166 167//add stats for head of line blocking 168//add stats for average fifo length 169//add stats for average number of vnics busy 170 171void 172Device::prepareRead(ContextID cpu, int index) 173{ 174 using namespace Regs; 175 prepareIO(cpu, index); 176 177 VirtualReg &vnic = virtualRegs[index]; 178 179 // update rx registers 180 uint64_t rxdone = vnic.RxDone; 181 rxdone = set_RxDone_Packets(rxdone, rxFifo.countPacketsAfter(rxFifoPtr)); 182 rxdone = set_RxDone_Empty(rxdone, rxFifo.empty()); 183 rxdone = set_RxDone_High(rxdone, rxFifo.size() > regs.RxFifoHigh); 184 rxdone = set_RxDone_NotHigh(rxdone, rxLow); 185 regs.RxData = vnic.RxData; 186 regs.RxDone = rxdone; 187 regs.RxWait = rxdone; 188 189 // update tx regsiters 190 uint64_t txdone = vnic.TxDone; 191 txdone = set_TxDone_Packets(txdone, txFifo.packets()); 192 txdone = set_TxDone_Full(txdone, txFifo.avail() < regs.TxMaxCopy); 193 txdone = set_TxDone_Low(txdone, txFifo.size() < regs.TxFifoLow); 194 regs.TxData = vnic.TxData; 195 regs.TxDone = txdone; 196 regs.TxWait = txdone; 197 198 int head = 0xffff; 199 200 if (!rxFifo.empty()) { 201 int vnic = rxFifo.begin()->priv; 202 if (vnic != -1 && virtualRegs[vnic].rxPacketOffset > 0) 203 head = vnic; 204 } 205 206 regs.RxStatus = set_RxStatus_Head(regs.RxStatus, head); 207 regs.RxStatus = set_RxStatus_Busy(regs.RxStatus, rxBusyCount); 208 regs.RxStatus = set_RxStatus_Mapped(regs.RxStatus, rxMappedCount); 209 regs.RxStatus = set_RxStatus_Dirty(regs.RxStatus, rxDirtyCount); 210} 211 212void 213Device::prepareWrite(ContextID cpu, int index) 214{ 215 prepareIO(cpu, index); 216} 217 218/** 219 * I/O read of device register 220 */ 221Tick 222Device::read(PacketPtr pkt) 223{ 224 assert(config.command & PCI_CMD_MSE); 225 assert(pkt->getAddr() >= BARAddrs[0] && pkt->getSize() < BARSize[0]); 226 227 ContextID cpu = pkt->req->contextId(); 228 Addr daddr = pkt->getAddr() - BARAddrs[0]; 229 Addr index = daddr >> Regs::VirtualShift; 230 Addr raddr = daddr & Regs::VirtualMask; 231 232 if (!regValid(raddr)) 233 panic("invalid register: cpu=%d vnic=%d da=%#x pa=%#x size=%d", 234 cpu, index, daddr, pkt->getAddr(), pkt->getSize()); 235 236 const Regs::Info &info = regInfo(raddr); 237 if (!info.read) 238 panic("read %s (write only): " 239 "cpu=%d vnic=%d da=%#x pa=%#x size=%d", 240 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize()); 241 242 panic("read %s (invalid size): " 243 "cpu=%d vnic=%d da=%#x pa=%#x size=%d", 244 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize()); 245 246 prepareRead(cpu, index); 247 248 uint64_t value M5_VAR_USED = 0; 249 if (pkt->getSize() == 4) { 250 uint32_t reg = regData32(raddr); 251 pkt->set(reg); 252 value = reg; 253 } 254 255 if (pkt->getSize() == 8) { 256 uint64_t reg = regData64(raddr); 257 pkt->set(reg); 258 value = reg; 259 } 260 261 DPRINTF(EthernetPIO, 262 "read %s: cpu=%d vnic=%d da=%#x pa=%#x size=%d val=%#x\n", 263 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize(), value); 264 265 // reading the interrupt status register has the side effect of 266 // clearing it 267 if (raddr == Regs::IntrStatus) 268 devIntrClear(); 269 270 return pioDelay; 271} 272 273/** 274 * IPR read of device register 275 276 Fault 277Device::iprRead(Addr daddr, ContextID cpu, uint64_t &result) 278{ 279 if (!regValid(daddr)) 280 panic("invalid address: da=%#x", daddr); 281 282 const Regs::Info &info = regInfo(daddr); 283 if (!info.read) 284 panic("reading %s (write only): cpu=%d da=%#x", info.name, cpu, daddr); 285 286 DPRINTF(EthernetPIO, "IPR read %s: cpu=%d da=%#x\n", 287 info.name, cpu, daddr); 288 289 prepareRead(cpu, 0); 290 291 if (info.size == 4) 292 result = regData32(daddr); 293 294 if (info.size == 8) 295 result = regData64(daddr); 296 297 DPRINTF(EthernetPIO, "IPR read %s: cpu=%s da=%#x val=%#x\n", 298 info.name, cpu, result); 299 300 return NoFault; 301} 302*/ 303/** 304 * I/O write of device register 305 */ 306Tick 307Device::write(PacketPtr pkt) 308{ 309 assert(config.command & PCI_CMD_MSE); 310 assert(pkt->getAddr() >= BARAddrs[0] && pkt->getSize() < BARSize[0]); 311 312 ContextID cpu = pkt->req->contextId(); 313 Addr daddr = pkt->getAddr() - BARAddrs[0]; 314 Addr index = daddr >> Regs::VirtualShift; 315 Addr raddr = daddr & Regs::VirtualMask; 316 317 if (!regValid(raddr)) 318 panic("invalid register: cpu=%d, da=%#x pa=%#x size=%d", 319 cpu, daddr, pkt->getAddr(), pkt->getSize()); 320 321 const Regs::Info &info = regInfo(raddr); 322 if (!info.write) 323 panic("write %s (read only): " 324 "cpu=%d vnic=%d da=%#x pa=%#x size=%d", 325 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize()); 326 327 if (pkt->getSize() != info.size) 328 panic("write %s (invalid size): " 329 "cpu=%d vnic=%d da=%#x pa=%#x size=%d", 330 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize()); 331 332 VirtualReg &vnic = virtualRegs[index]; 333 334 DPRINTF(EthernetPIO, 335 "write %s vnic %d: cpu=%d val=%#x da=%#x pa=%#x size=%d\n", 336 info.name, index, cpu, info.size == 4 ? pkt->get<uint32_t>() : 337 pkt->get<uint64_t>(), daddr, pkt->getAddr(), pkt->getSize()); 338 339 prepareWrite(cpu, index); 340 341 switch (raddr) { 342 case Regs::Config: 343 changeConfig(pkt->get<uint32_t>()); 344 break; 345 346 case Regs::Command: 347 command(pkt->get<uint32_t>()); 348 break; 349 350 case Regs::IntrStatus: 351 devIntrClear(regs.IntrStatus & pkt->get<uint32_t>()); 352 break; 353 354 case Regs::IntrMask: 355 devIntrChangeMask(pkt->get<uint32_t>()); 356 break; 357 358 case Regs::RxData: 359 if (Regs::get_RxDone_Busy(vnic.RxDone)) 360 panic("receive machine busy with another request! rxState=%s", 361 RxStateStrings[rxState]); 362 363 vnic.rxUnique = rxUnique++; 364 vnic.RxDone = Regs::RxDone_Busy; 365 vnic.RxData = pkt->get<uint64_t>(); 366 rxBusyCount++; 367 368 if (Regs::get_RxData_Vaddr(pkt->get<uint64_t>())) { 369 panic("vtophys not implemented in newmem"); 370#ifdef SINIC_VTOPHYS 371 Addr vaddr = Regs::get_RxData_Addr(reg64); 372 Addr paddr = vtophys(req->xc, vaddr); 373 DPRINTF(EthernetPIO, "write RxData vnic %d (rxunique %d): " 374 "vaddr=%#x, paddr=%#x\n", 375 index, vnic.rxUnique, vaddr, paddr); 376 377 vnic.RxData = Regs::set_RxData_Addr(vnic.RxData, paddr); 378#endif 379 } else { 380 DPRINTF(EthernetPIO, "write RxData vnic %d (rxunique %d)\n", 381 index, vnic.rxUnique); 382 } 383 384 if (vnic.rxIndex == rxFifo.end()) { 385 DPRINTF(EthernetPIO, "request new packet...appending to rxList\n"); 386 rxList.push_back(index); 387 } else { 388 DPRINTF(EthernetPIO, "packet exists...appending to rxBusy\n"); 389 rxBusy.push_back(index); 390 } 391 392 if (rxEnable && (rxState == rxIdle || rxState == rxFifoBlock)) { 393 rxState = rxFifoBlock; 394 rxKick(); 395 } 396 break; 397 398 case Regs::TxData: 399 if (Regs::get_TxDone_Busy(vnic.TxDone)) 400 panic("transmit machine busy with another request! txState=%s", 401 TxStateStrings[txState]); 402 403 vnic.txUnique = txUnique++; 404 vnic.TxDone = Regs::TxDone_Busy; 405 406 if (Regs::get_TxData_Vaddr(pkt->get<uint64_t>())) { 407 panic("vtophys won't work here in newmem.\n"); 408#ifdef SINIC_VTOPHYS 409 Addr vaddr = Regs::get_TxData_Addr(reg64); 410 Addr paddr = vtophys(req->xc, vaddr); 411 DPRINTF(EthernetPIO, "write TxData vnic %d (txunique %d): " 412 "vaddr=%#x, paddr=%#x\n", 413 index, vnic.txUnique, vaddr, paddr); 414 415 vnic.TxData = Regs::set_TxData_Addr(vnic.TxData, paddr); 416#endif 417 } else { 418 DPRINTF(EthernetPIO, "write TxData vnic %d (txunique %d)\n", 419 index, vnic.txUnique); 420 } 421 422 if (txList.empty() || txList.front() != index) 423 txList.push_back(index); 424 if (txEnable && txState == txIdle && txList.front() == index) { 425 txState = txFifoBlock; 426 txKick(); 427 } 428 break; 429 } 430 431 return pioDelay; 432} 433 434void 435Device::devIntrPost(uint32_t interrupts) 436{ 437 if ((interrupts & Regs::Intr_Res)) 438 panic("Cannot set a reserved interrupt"); 439 440 regs.IntrStatus |= interrupts; 441 442 DPRINTF(EthernetIntr, 443 "interrupt written to intStatus: intr=%#x status=%#x mask=%#x\n", 444 interrupts, regs.IntrStatus, regs.IntrMask); 445 446 interrupts = regs.IntrStatus & regs.IntrMask; 447 448 // Intr_RxHigh is special, we only signal it if we've emptied the fifo 449 // and then filled it above the high watermark 450 if (rxEmpty) 451 rxEmpty = false; 452 else 453 interrupts &= ~Regs::Intr_RxHigh; 454 455 // Intr_TxLow is special, we only signal it if we've filled up the fifo 456 // and then dropped below the low watermark 457 if (txFull) 458 txFull = false; 459 else 460 interrupts &= ~Regs::Intr_TxLow; 461 462 if (interrupts) { 463 Tick when = curTick(); 464 if ((interrupts & Regs::Intr_NoDelay) == 0) 465 when += intrDelay; 466 cpuIntrPost(when); 467 } 468} 469 470void 471Device::devIntrClear(uint32_t interrupts) 472{ 473 if ((interrupts & Regs::Intr_Res)) 474 panic("Cannot clear a reserved interrupt"); 475 476 regs.IntrStatus &= ~interrupts; 477 478 DPRINTF(EthernetIntr, 479 "interrupt cleared from intStatus: intr=%x status=%x mask=%x\n", 480 interrupts, regs.IntrStatus, regs.IntrMask); 481 482 if (!(regs.IntrStatus & regs.IntrMask)) 483 cpuIntrClear(); 484} 485 486void 487Device::devIntrChangeMask(uint32_t newmask) 488{ 489 if (regs.IntrMask == newmask) 490 return; 491 492 regs.IntrMask = newmask; 493 494 DPRINTF(EthernetIntr, 495 "interrupt mask changed: intStatus=%x intMask=%x masked=%x\n", 496 regs.IntrStatus, regs.IntrMask, regs.IntrStatus & regs.IntrMask); 497 498 if (regs.IntrStatus & regs.IntrMask) 499 cpuIntrPost(curTick()); 500 else 501 cpuIntrClear(); 502} 503 504void 505Base::cpuIntrPost(Tick when) 506{ 507 // If the interrupt you want to post is later than an interrupt 508 // already scheduled, just let it post in the coming one and don't 509 // schedule another. 510 // HOWEVER, must be sure that the scheduled intrTick is in the 511 // future (this was formerly the source of a bug) 512 /** 513 * @todo this warning should be removed and the intrTick code should 514 * be fixed. 515 */ 516 assert(when >= curTick()); 517 assert(intrTick >= curTick() || intrTick == 0); 518 if (!cpuIntrEnable) { 519 DPRINTF(EthernetIntr, "interrupts not enabled.\n", 520 intrTick); 521 return; 522 } 523 524 if (when > intrTick && intrTick != 0) { 525 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n", 526 intrTick); 527 return; 528 } 529 530 intrTick = when; 531 if (intrTick < curTick()) { 532 intrTick = curTick(); 533 } 534 535 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n", 536 intrTick); 537 538 if (intrEvent) 539 intrEvent->squash();
|
538 intrEvent = new IntrEvent(this, true);
| 540 541 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); }, 542 name(), true);
|
539 schedule(intrEvent, intrTick); 540} 541 542void 543Base::cpuInterrupt() 544{ 545 assert(intrTick == curTick()); 546 547 // Whether or not there's a pending interrupt, we don't care about 548 // it anymore 549 intrEvent = 0; 550 intrTick = 0; 551 552 // Don't send an interrupt if there's already one 553 if (cpuPendingIntr) { 554 DPRINTF(EthernetIntr, 555 "would send an interrupt now, but there's already pending\n"); 556 } else { 557 // Send interrupt 558 cpuPendingIntr = true; 559 560 DPRINTF(EthernetIntr, "posting interrupt\n"); 561 intrPost(); 562 } 563} 564 565void 566Base::cpuIntrClear() 567{ 568 if (!cpuPendingIntr) 569 return; 570 571 if (intrEvent) { 572 intrEvent->squash(); 573 intrEvent = 0; 574 } 575 576 intrTick = 0; 577 578 cpuPendingIntr = false; 579 580 DPRINTF(EthernetIntr, "clearing cchip interrupt\n"); 581 intrClear(); 582} 583 584bool 585Base::cpuIntrPending() const 586{ return cpuPendingIntr; } 587 588void 589Device::changeConfig(uint32_t newconf) 590{ 591 uint32_t changed = regs.Config ^ newconf; 592 if (!changed) 593 return; 594 595 regs.Config = newconf; 596 597 if ((changed & Regs::Config_IntEn)) { 598 cpuIntrEnable = regs.Config & Regs::Config_IntEn; 599 if (cpuIntrEnable) { 600 if (regs.IntrStatus & regs.IntrMask) 601 cpuIntrPost(curTick()); 602 } else { 603 cpuIntrClear(); 604 } 605 } 606 607 if ((changed & Regs::Config_TxEn)) { 608 txEnable = regs.Config & Regs::Config_TxEn; 609 if (txEnable) 610 txKick(); 611 } 612 613 if ((changed & Regs::Config_RxEn)) { 614 rxEnable = regs.Config & Regs::Config_RxEn; 615 if (rxEnable) 616 rxKick(); 617 } 618} 619 620void 621Device::command(uint32_t command) 622{ 623 if (command & Regs::Command_Intr) 624 devIntrPost(Regs::Intr_Soft); 625 626 if (command & Regs::Command_Reset) 627 reset(); 628} 629 630void 631Device::reset() 632{ 633 using namespace Regs; 634 635 memset(®s, 0, sizeof(regs)); 636 637 regs.Config = 0; 638 if (params()->rx_thread) 639 regs.Config |= Config_RxThread; 640 if (params()->tx_thread) 641 regs.Config |= Config_TxThread; 642 if (params()->rss) 643 regs.Config |= Config_RSS; 644 if (params()->zero_copy) 645 regs.Config |= Config_ZeroCopy; 646 if (params()->delay_copy) 647 regs.Config |= Config_DelayCopy; 648 if (params()->virtual_addr) 649 regs.Config |= Config_Vaddr; 650 651 if (params()->delay_copy && params()->zero_copy) 652 panic("Can't delay copy and zero copy"); 653 654 regs.IntrMask = Intr_Soft | Intr_RxHigh | Intr_RxPacket | Intr_TxLow; 655 regs.RxMaxCopy = params()->rx_max_copy; 656 regs.TxMaxCopy = params()->tx_max_copy; 657 regs.ZeroCopySize = params()->zero_copy_size; 658 regs.ZeroCopyMark = params()->zero_copy_threshold; 659 regs.VirtualCount = params()->virtual_count; 660 regs.RxMaxIntr = params()->rx_max_intr; 661 regs.RxFifoSize = params()->rx_fifo_size; 662 regs.TxFifoSize = params()->tx_fifo_size; 663 regs.RxFifoLow = params()->rx_fifo_low_mark; 664 regs.TxFifoLow = params()->tx_fifo_threshold; 665 regs.RxFifoHigh = params()->rx_fifo_threshold; 666 regs.TxFifoHigh = params()->tx_fifo_high_mark; 667 regs.HwAddr = params()->hardware_address; 668 669 if (regs.RxMaxCopy < regs.ZeroCopyMark) 670 panic("Must be able to copy at least as many bytes as the threshold"); 671 672 if (regs.ZeroCopySize >= regs.ZeroCopyMark) 673 panic("The number of bytes to copy must be less than the threshold"); 674 675 rxList.clear(); 676 rxBusy.clear(); 677 rxActive = -1; 678 txList.clear(); 679 rxBusyCount = 0; 680 rxDirtyCount = 0; 681 rxMappedCount = 0; 682 683 rxState = rxIdle; 684 txState = txIdle; 685 686 rxFifo.clear(); 687 rxFifoPtr = rxFifo.end(); 688 txFifo.clear(); 689 rxEmpty = false; 690 rxLow = true; 691 txFull = false; 692 693 int size = virtualRegs.size(); 694 virtualRegs.clear(); 695 virtualRegs.resize(size); 696 for (int i = 0; i < size; ++i) 697 virtualRegs[i].rxIndex = rxFifo.end(); 698} 699 700void 701Device::rxDmaDone() 702{ 703 assert(rxState == rxCopy); 704 rxState = rxCopyDone; 705 DPRINTF(EthernetDMA, "end rx dma write paddr=%#x len=%d\n", 706 rxDmaAddr, rxDmaLen); 707 DDUMP(EthernetData, rxDmaData, rxDmaLen); 708 709 // If the transmit state machine has a pending DMA, let it go first 710 if (txState == txBeginCopy) 711 txKick(); 712 713 rxKick(); 714} 715 716void 717Device::rxKick() 718{ 719 VirtualReg *vnic = NULL; 720 721 DPRINTF(EthernetSM, "rxKick: rxState=%s (rxFifo.size=%d)\n", 722 RxStateStrings[rxState], rxFifo.size()); 723 724 if (rxKickTick > curTick()) { 725 DPRINTF(EthernetSM, "rxKick: exiting, can't run till %d\n", 726 rxKickTick); 727 return; 728 } 729 730 next: 731 rxFifo.check(); 732 if (rxState == rxIdle) 733 goto exit; 734 735 if (rxActive == -1) { 736 if (rxState != rxFifoBlock) 737 panic("no active vnic while in state %s", RxStateStrings[rxState]); 738 739 DPRINTF(EthernetSM, "processing rxState=%s\n", 740 RxStateStrings[rxState]); 741 } else { 742 vnic = &virtualRegs[rxActive]; 743 DPRINTF(EthernetSM, 744 "processing rxState=%s for vnic %d (rxunique %d)\n", 745 RxStateStrings[rxState], rxActive, vnic->rxUnique); 746 } 747 748 switch (rxState) { 749 case rxFifoBlock: 750 if (DTRACE(EthernetSM)) { 751 PacketFifo::iterator end = rxFifo.end(); 752 int size = virtualRegs.size(); 753 for (int i = 0; i < size; ++i) { 754 VirtualReg *vn = &virtualRegs[i]; 755 bool busy = Regs::get_RxDone_Busy(vn->RxDone); 756 if (vn->rxIndex != end) { 757#ifndef NDEBUG 758 bool dirty = vn->rxPacketOffset > 0; 759 const char *status; 760 761 if (busy && dirty) 762 status = "busy,dirty"; 763 else if (busy) 764 status = "busy"; 765 else if (dirty) 766 status = "dirty"; 767 else 768 status = "mapped"; 769 770 DPRINTF(EthernetSM, 771 "vnic %d %s (rxunique %d), packet %d, slack %d\n", 772 i, status, vn->rxUnique, 773 rxFifo.countPacketsBefore(vn->rxIndex), 774 vn->rxIndex->slack); 775#endif 776 } else if (busy) { 777 DPRINTF(EthernetSM, "vnic %d unmapped (rxunique %d)\n", 778 i, vn->rxUnique); 779 } 780 } 781 } 782 783 if (!rxBusy.empty()) { 784 rxActive = rxBusy.front(); 785 rxBusy.pop_front(); 786 vnic = &virtualRegs[rxActive]; 787 788 if (vnic->rxIndex == rxFifo.end()) 789 panic("continuing vnic without packet\n"); 790 791 DPRINTF(EthernetSM, 792 "continue processing for vnic %d (rxunique %d)\n", 793 rxActive, vnic->rxUnique); 794 795 rxState = rxBeginCopy; 796 797 int vnic_distance = rxFifo.countPacketsBefore(vnic->rxIndex); 798 totalVnicDistance += vnic_distance; 799 numVnicDistance += 1; 800 if (vnic_distance > _maxVnicDistance) { 801 maxVnicDistance = vnic_distance; 802 _maxVnicDistance = vnic_distance; 803 } 804 805 break; 806 } 807 808 if (rxFifoPtr == rxFifo.end()) { 809 DPRINTF(EthernetSM, "receive waiting for data. Nothing to do.\n"); 810 goto exit; 811 } 812 813 if (rxList.empty()) 814 panic("Not idle, but nothing to do!"); 815 816 assert(!rxFifo.empty()); 817 818 rxActive = rxList.front(); 819 rxList.pop_front(); 820 vnic = &virtualRegs[rxActive]; 821 822 DPRINTF(EthernetSM, 823 "processing new packet for vnic %d (rxunique %d)\n", 824 rxActive, vnic->rxUnique); 825 826 // Grab a new packet from the fifo. 827 vnic->rxIndex = rxFifoPtr++; 828 vnic->rxIndex->priv = rxActive; 829 vnic->rxPacketOffset = 0; 830 vnic->rxPacketBytes = vnic->rxIndex->packet->length; 831 assert(vnic->rxPacketBytes); 832 rxMappedCount++; 833 834 vnic->rxDoneData = 0; 835 /* scope for variables */ { 836 IpPtr ip(vnic->rxIndex->packet); 837 if (ip) { 838 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 839 vnic->rxDoneData |= Regs::RxDone_IpPacket; 840 rxIpChecksums++; 841 if (cksum(ip) != 0) { 842 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 843 vnic->rxDoneData |= Regs::RxDone_IpError; 844 } 845 TcpPtr tcp(ip); 846 UdpPtr udp(ip); 847 if (tcp) { 848 DPRINTF(Ethernet, 849 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 850 tcp->sport(), tcp->dport(), tcp->seq(), 851 tcp->ack()); 852 vnic->rxDoneData |= Regs::RxDone_TcpPacket; 853 rxTcpChecksums++; 854 if (cksum(tcp) != 0) { 855 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 856 vnic->rxDoneData |= Regs::RxDone_TcpError; 857 } 858 } else if (udp) { 859 vnic->rxDoneData |= Regs::RxDone_UdpPacket; 860 rxUdpChecksums++; 861 if (cksum(udp) != 0) { 862 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 863 vnic->rxDoneData |= Regs::RxDone_UdpError; 864 } 865 } 866 } 867 } 868 rxState = rxBeginCopy; 869 break; 870 871 case rxBeginCopy: 872 if (dmaPending() || drainState() != DrainState::Running) 873 goto exit; 874 875 rxDmaAddr = pciToDma(Regs::get_RxData_Addr(vnic->RxData)); 876 rxDmaLen = min<unsigned>(Regs::get_RxData_Len(vnic->RxData), 877 vnic->rxPacketBytes); 878 879 /* 880 * if we're doing zero/delay copy and we're below the fifo 881 * threshold, see if we should try to do the zero/defer copy 882 */ 883 if ((Regs::get_Config_ZeroCopy(regs.Config) || 884 Regs::get_Config_DelayCopy(regs.Config)) && 885 !Regs::get_RxData_NoDelay(vnic->RxData) && rxLow) { 886 if (rxDmaLen > regs.ZeroCopyMark) 887 rxDmaLen = regs.ZeroCopySize; 888 } 889 rxDmaData = vnic->rxIndex->packet->data + vnic->rxPacketOffset; 890 rxState = rxCopy; 891 if (rxDmaAddr == 1LL) { 892 rxState = rxCopyDone; 893 break; 894 } 895 896 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaEvent, rxDmaData); 897 break; 898 899 case rxCopy: 900 DPRINTF(EthernetSM, "receive machine still copying\n"); 901 goto exit; 902 903 case rxCopyDone: 904 vnic->RxDone = vnic->rxDoneData; 905 vnic->RxDone |= Regs::RxDone_Complete; 906 rxBusyCount--; 907 908 if (vnic->rxPacketBytes == rxDmaLen) { 909 if (vnic->rxPacketOffset) 910 rxDirtyCount--; 911 912 // Packet is complete. Indicate how many bytes were copied 913 vnic->RxDone = Regs::set_RxDone_CopyLen(vnic->RxDone, rxDmaLen); 914 915 DPRINTF(EthernetSM, 916 "rxKick: packet complete on vnic %d (rxunique %d)\n", 917 rxActive, vnic->rxUnique); 918 rxFifo.remove(vnic->rxIndex); 919 vnic->rxIndex = rxFifo.end(); 920 rxMappedCount--; 921 } else { 922 if (!vnic->rxPacketOffset) 923 rxDirtyCount++; 924 925 vnic->rxPacketBytes -= rxDmaLen; 926 vnic->rxPacketOffset += rxDmaLen; 927 vnic->RxDone |= Regs::RxDone_More; 928 vnic->RxDone = Regs::set_RxDone_CopyLen(vnic->RxDone, 929 vnic->rxPacketBytes); 930 DPRINTF(EthernetSM, 931 "rxKick: packet not complete on vnic %d (rxunique %d): " 932 "%d bytes left\n", 933 rxActive, vnic->rxUnique, vnic->rxPacketBytes); 934 } 935 936 rxActive = -1; 937 rxState = rxBusy.empty() && rxList.empty() ? rxIdle : rxFifoBlock; 938 939 if (rxFifo.empty()) { 940 devIntrPost(Regs::Intr_RxEmpty); 941 rxEmpty = true; 942 } 943 944 if (rxFifo.size() < regs.RxFifoLow) 945 rxLow = true; 946 947 if (rxFifo.size() > regs.RxFifoHigh) 948 rxLow = false; 949 950 devIntrPost(Regs::Intr_RxDMA); 951 break; 952 953 default: 954 panic("Invalid rxState!"); 955 } 956 957 DPRINTF(EthernetSM, "entering next rxState=%s\n", 958 RxStateStrings[rxState]); 959 960 goto next; 961 962 exit: 963 /** 964 * @todo do we want to schedule a future kick? 965 */ 966 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 967 RxStateStrings[rxState]); 968} 969 970void 971Device::txDmaDone() 972{ 973 assert(txState == txCopy); 974 txState = txCopyDone; 975 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 976 txDmaAddr, txDmaLen); 977 DDUMP(EthernetData, txDmaData, txDmaLen); 978 979 // If the receive state machine has a pending DMA, let it go first 980 if (rxState == rxBeginCopy) 981 rxKick(); 982 983 txKick(); 984} 985 986void 987Device::transmit() 988{ 989 if (txFifo.empty()) { 990 DPRINTF(Ethernet, "nothing to transmit\n"); 991 return; 992 } 993 994 uint32_t interrupts; 995 EthPacketPtr packet = txFifo.front(); 996 if (!interface->sendPacket(packet)) { 997 DPRINTF(Ethernet, "Packet Transmit: failed txFifo available %d\n", 998 txFifo.avail()); 999 return; 1000 } 1001 1002 txFifo.pop(); 1003#if TRACING_ON 1004 if (DTRACE(Ethernet)) { 1005 IpPtr ip(packet); 1006 if (ip) { 1007 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1008 TcpPtr tcp(ip); 1009 if (tcp) { 1010 DPRINTF(Ethernet, 1011 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1012 tcp->sport(), tcp->dport(), tcp->seq(), 1013 tcp->ack()); 1014 } 1015 } 1016 } 1017#endif 1018 1019 DDUMP(EthernetData, packet->data, packet->length); 1020 txBytes += packet->length; 1021 txPackets++; 1022 1023 DPRINTF(Ethernet, "Packet Transmit: successful txFifo Available %d\n", 1024 txFifo.avail()); 1025 1026 interrupts = Regs::Intr_TxPacket; 1027 if (txFifo.size() < regs.TxFifoLow) 1028 interrupts |= Regs::Intr_TxLow; 1029 devIntrPost(interrupts); 1030} 1031 1032void 1033Device::txKick() 1034{ 1035 VirtualReg *vnic; 1036 DPRINTF(EthernetSM, "txKick: txState=%s (txFifo.size=%d)\n", 1037 TxStateStrings[txState], txFifo.size()); 1038 1039 if (txKickTick > curTick()) { 1040 DPRINTF(EthernetSM, "txKick: exiting, can't run till %d\n", 1041 txKickTick); 1042 return; 1043 } 1044 1045 next: 1046 if (txState == txIdle) 1047 goto exit; 1048 1049 assert(!txList.empty()); 1050 vnic = &virtualRegs[txList.front()]; 1051 1052 switch (txState) { 1053 case txFifoBlock: 1054 assert(Regs::get_TxDone_Busy(vnic->TxDone)); 1055 if (!txPacket) { 1056 // Grab a new packet from the fifo. 1057 txPacket = make_shared<EthPacketData>(16384); 1058 txPacketOffset = 0; 1059 } 1060 1061 if (txFifo.avail() - txPacket->length < 1062 Regs::get_TxData_Len(vnic->TxData)) { 1063 DPRINTF(EthernetSM, "transmit fifo full. Nothing to do.\n"); 1064 goto exit; 1065 } 1066 1067 txState = txBeginCopy; 1068 break; 1069 1070 case txBeginCopy: 1071 if (dmaPending() || drainState() != DrainState::Running) 1072 goto exit; 1073 1074 txDmaAddr = pciToDma(Regs::get_TxData_Addr(vnic->TxData)); 1075 txDmaLen = Regs::get_TxData_Len(vnic->TxData); 1076 txDmaData = txPacket->data + txPacketOffset; 1077 txState = txCopy; 1078 1079 dmaRead(txDmaAddr, txDmaLen, &txDmaEvent, txDmaData); 1080 break; 1081 1082 case txCopy: 1083 DPRINTF(EthernetSM, "transmit machine still copying\n"); 1084 goto exit; 1085 1086 case txCopyDone: 1087 vnic->TxDone = txDmaLen | Regs::TxDone_Complete; 1088 txPacket->simLength += txDmaLen; 1089 txPacket->length += txDmaLen; 1090 if ((vnic->TxData & Regs::TxData_More)) { 1091 txPacketOffset += txDmaLen; 1092 txState = txIdle; 1093 devIntrPost(Regs::Intr_TxDMA); 1094 break; 1095 } 1096 1097 assert(txPacket->length <= txFifo.avail()); 1098 if ((vnic->TxData & Regs::TxData_Checksum)) { 1099 IpPtr ip(txPacket); 1100 if (ip) { 1101 TcpPtr tcp(ip); 1102 if (tcp) { 1103 tcp->sum(0); 1104 tcp->sum(cksum(tcp)); 1105 txTcpChecksums++; 1106 } 1107 1108 UdpPtr udp(ip); 1109 if (udp) { 1110 udp->sum(0); 1111 udp->sum(cksum(udp)); 1112 txUdpChecksums++; 1113 } 1114 1115 ip->sum(0); 1116 ip->sum(cksum(ip)); 1117 txIpChecksums++; 1118 } 1119 } 1120 1121 txFifo.push(txPacket); 1122 if (txFifo.avail() < regs.TxMaxCopy) { 1123 devIntrPost(Regs::Intr_TxFull); 1124 txFull = true; 1125 } 1126 txPacket = 0; 1127 transmit(); 1128 txList.pop_front(); 1129 txState = txList.empty() ? txIdle : txFifoBlock; 1130 devIntrPost(Regs::Intr_TxDMA); 1131 break; 1132 1133 default: 1134 panic("Invalid txState!"); 1135 } 1136 1137 DPRINTF(EthernetSM, "entering next txState=%s\n", 1138 TxStateStrings[txState]); 1139 1140 goto next; 1141 1142 exit: 1143 /** 1144 * @todo do we want to schedule a future kick? 1145 */ 1146 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 1147 TxStateStrings[txState]); 1148} 1149 1150void 1151Device::transferDone() 1152{ 1153 if (txFifo.empty()) { 1154 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 1155 return; 1156 } 1157 1158 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 1159 1160 reschedule(txEvent, clockEdge(Cycles(1)), true); 1161} 1162 1163bool 1164Device::rxFilter(const EthPacketPtr &packet) 1165{ 1166 if (!Regs::get_Config_Filter(regs.Config)) 1167 return false; 1168 1169 panic("receive filter not implemented\n"); 1170 bool drop = true; 1171 1172#if 0 1173 string type; 1174 1175 EthHdr *eth = packet->eth(); 1176 if (eth->unicast()) { 1177 // If we're accepting all unicast addresses 1178 if (acceptUnicast) 1179 drop = false; 1180 1181 // If we make a perfect match 1182 if (acceptPerfect && params->eaddr == eth.dst()) 1183 drop = false; 1184 1185 if (acceptArp && eth->type() == ETH_TYPE_ARP) 1186 drop = false; 1187 1188 } else if (eth->broadcast()) { 1189 // if we're accepting broadcasts 1190 if (acceptBroadcast) 1191 drop = false; 1192 1193 } else if (eth->multicast()) { 1194 // if we're accepting all multicasts 1195 if (acceptMulticast) 1196 drop = false; 1197 1198 } 1199 1200 if (drop) { 1201 DPRINTF(Ethernet, "rxFilter drop\n"); 1202 DDUMP(EthernetData, packet->data, packet->length); 1203 } 1204#endif 1205 return drop; 1206} 1207 1208bool 1209Device::recvPacket(EthPacketPtr packet) 1210{ 1211 rxBytes += packet->length; 1212 rxPackets++; 1213 1214 DPRINTF(Ethernet, "Receiving packet from wire, rxFifo Available is %d\n", 1215 rxFifo.avail()); 1216 1217 if (!rxEnable) { 1218 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 1219 return true; 1220 } 1221 1222 if (rxFilter(packet)) { 1223 DPRINTF(Ethernet, "packet filtered...dropped\n"); 1224 return true; 1225 } 1226 1227 if (rxFifo.size() >= regs.RxFifoHigh) 1228 devIntrPost(Regs::Intr_RxHigh); 1229 1230 if (!rxFifo.push(packet)) { 1231 DPRINTF(Ethernet, 1232 "packet will not fit in receive buffer...packet dropped\n"); 1233 return false; 1234 } 1235 1236 // If we were at the last element, back up one ot go to the new 1237 // last element of the list. 1238 if (rxFifoPtr == rxFifo.end()) 1239 --rxFifoPtr; 1240 1241 devIntrPost(Regs::Intr_RxPacket); 1242 rxKick(); 1243 return true; 1244} 1245 1246void 1247Device::drainResume() 1248{ 1249 Drainable::drainResume(); 1250 1251 // During drain we could have left the state machines in a waiting state and 1252 // they wouldn't get out until some other event occured to kick them. 1253 // This way they'll get out immediately 1254 txKick(); 1255 rxKick(); 1256} 1257 1258//===================================================================== 1259// 1260// 1261void 1262Base::serialize(CheckpointOut &cp) const 1263{ 1264 // Serialize the PciDevice base class 1265 PciDevice::serialize(cp); 1266 1267 SERIALIZE_SCALAR(rxEnable); 1268 SERIALIZE_SCALAR(txEnable); 1269 SERIALIZE_SCALAR(cpuIntrEnable); 1270 1271 /* 1272 * Keep track of pending interrupt status. 1273 */ 1274 SERIALIZE_SCALAR(intrTick); 1275 SERIALIZE_SCALAR(cpuPendingIntr); 1276 Tick intrEventTick = 0; 1277 if (intrEvent) 1278 intrEventTick = intrEvent->when(); 1279 SERIALIZE_SCALAR(intrEventTick); 1280} 1281 1282void 1283Base::unserialize(CheckpointIn &cp) 1284{ 1285 // Unserialize the PciDevice base class 1286 PciDevice::unserialize(cp); 1287 1288 UNSERIALIZE_SCALAR(rxEnable); 1289 UNSERIALIZE_SCALAR(txEnable); 1290 UNSERIALIZE_SCALAR(cpuIntrEnable); 1291 1292 /* 1293 * Keep track of pending interrupt status. 1294 */ 1295 UNSERIALIZE_SCALAR(intrTick); 1296 UNSERIALIZE_SCALAR(cpuPendingIntr); 1297 Tick intrEventTick; 1298 UNSERIALIZE_SCALAR(intrEventTick); 1299 if (intrEventTick) {
| 543 schedule(intrEvent, intrTick); 544} 545 546void 547Base::cpuInterrupt() 548{ 549 assert(intrTick == curTick()); 550 551 // Whether or not there's a pending interrupt, we don't care about 552 // it anymore 553 intrEvent = 0; 554 intrTick = 0; 555 556 // Don't send an interrupt if there's already one 557 if (cpuPendingIntr) { 558 DPRINTF(EthernetIntr, 559 "would send an interrupt now, but there's already pending\n"); 560 } else { 561 // Send interrupt 562 cpuPendingIntr = true; 563 564 DPRINTF(EthernetIntr, "posting interrupt\n"); 565 intrPost(); 566 } 567} 568 569void 570Base::cpuIntrClear() 571{ 572 if (!cpuPendingIntr) 573 return; 574 575 if (intrEvent) { 576 intrEvent->squash(); 577 intrEvent = 0; 578 } 579 580 intrTick = 0; 581 582 cpuPendingIntr = false; 583 584 DPRINTF(EthernetIntr, "clearing cchip interrupt\n"); 585 intrClear(); 586} 587 588bool 589Base::cpuIntrPending() const 590{ return cpuPendingIntr; } 591 592void 593Device::changeConfig(uint32_t newconf) 594{ 595 uint32_t changed = regs.Config ^ newconf; 596 if (!changed) 597 return; 598 599 regs.Config = newconf; 600 601 if ((changed & Regs::Config_IntEn)) { 602 cpuIntrEnable = regs.Config & Regs::Config_IntEn; 603 if (cpuIntrEnable) { 604 if (regs.IntrStatus & regs.IntrMask) 605 cpuIntrPost(curTick()); 606 } else { 607 cpuIntrClear(); 608 } 609 } 610 611 if ((changed & Regs::Config_TxEn)) { 612 txEnable = regs.Config & Regs::Config_TxEn; 613 if (txEnable) 614 txKick(); 615 } 616 617 if ((changed & Regs::Config_RxEn)) { 618 rxEnable = regs.Config & Regs::Config_RxEn; 619 if (rxEnable) 620 rxKick(); 621 } 622} 623 624void 625Device::command(uint32_t command) 626{ 627 if (command & Regs::Command_Intr) 628 devIntrPost(Regs::Intr_Soft); 629 630 if (command & Regs::Command_Reset) 631 reset(); 632} 633 634void 635Device::reset() 636{ 637 using namespace Regs; 638 639 memset(®s, 0, sizeof(regs)); 640 641 regs.Config = 0; 642 if (params()->rx_thread) 643 regs.Config |= Config_RxThread; 644 if (params()->tx_thread) 645 regs.Config |= Config_TxThread; 646 if (params()->rss) 647 regs.Config |= Config_RSS; 648 if (params()->zero_copy) 649 regs.Config |= Config_ZeroCopy; 650 if (params()->delay_copy) 651 regs.Config |= Config_DelayCopy; 652 if (params()->virtual_addr) 653 regs.Config |= Config_Vaddr; 654 655 if (params()->delay_copy && params()->zero_copy) 656 panic("Can't delay copy and zero copy"); 657 658 regs.IntrMask = Intr_Soft | Intr_RxHigh | Intr_RxPacket | Intr_TxLow; 659 regs.RxMaxCopy = params()->rx_max_copy; 660 regs.TxMaxCopy = params()->tx_max_copy; 661 regs.ZeroCopySize = params()->zero_copy_size; 662 regs.ZeroCopyMark = params()->zero_copy_threshold; 663 regs.VirtualCount = params()->virtual_count; 664 regs.RxMaxIntr = params()->rx_max_intr; 665 regs.RxFifoSize = params()->rx_fifo_size; 666 regs.TxFifoSize = params()->tx_fifo_size; 667 regs.RxFifoLow = params()->rx_fifo_low_mark; 668 regs.TxFifoLow = params()->tx_fifo_threshold; 669 regs.RxFifoHigh = params()->rx_fifo_threshold; 670 regs.TxFifoHigh = params()->tx_fifo_high_mark; 671 regs.HwAddr = params()->hardware_address; 672 673 if (regs.RxMaxCopy < regs.ZeroCopyMark) 674 panic("Must be able to copy at least as many bytes as the threshold"); 675 676 if (regs.ZeroCopySize >= regs.ZeroCopyMark) 677 panic("The number of bytes to copy must be less than the threshold"); 678 679 rxList.clear(); 680 rxBusy.clear(); 681 rxActive = -1; 682 txList.clear(); 683 rxBusyCount = 0; 684 rxDirtyCount = 0; 685 rxMappedCount = 0; 686 687 rxState = rxIdle; 688 txState = txIdle; 689 690 rxFifo.clear(); 691 rxFifoPtr = rxFifo.end(); 692 txFifo.clear(); 693 rxEmpty = false; 694 rxLow = true; 695 txFull = false; 696 697 int size = virtualRegs.size(); 698 virtualRegs.clear(); 699 virtualRegs.resize(size); 700 for (int i = 0; i < size; ++i) 701 virtualRegs[i].rxIndex = rxFifo.end(); 702} 703 704void 705Device::rxDmaDone() 706{ 707 assert(rxState == rxCopy); 708 rxState = rxCopyDone; 709 DPRINTF(EthernetDMA, "end rx dma write paddr=%#x len=%d\n", 710 rxDmaAddr, rxDmaLen); 711 DDUMP(EthernetData, rxDmaData, rxDmaLen); 712 713 // If the transmit state machine has a pending DMA, let it go first 714 if (txState == txBeginCopy) 715 txKick(); 716 717 rxKick(); 718} 719 720void 721Device::rxKick() 722{ 723 VirtualReg *vnic = NULL; 724 725 DPRINTF(EthernetSM, "rxKick: rxState=%s (rxFifo.size=%d)\n", 726 RxStateStrings[rxState], rxFifo.size()); 727 728 if (rxKickTick > curTick()) { 729 DPRINTF(EthernetSM, "rxKick: exiting, can't run till %d\n", 730 rxKickTick); 731 return; 732 } 733 734 next: 735 rxFifo.check(); 736 if (rxState == rxIdle) 737 goto exit; 738 739 if (rxActive == -1) { 740 if (rxState != rxFifoBlock) 741 panic("no active vnic while in state %s", RxStateStrings[rxState]); 742 743 DPRINTF(EthernetSM, "processing rxState=%s\n", 744 RxStateStrings[rxState]); 745 } else { 746 vnic = &virtualRegs[rxActive]; 747 DPRINTF(EthernetSM, 748 "processing rxState=%s for vnic %d (rxunique %d)\n", 749 RxStateStrings[rxState], rxActive, vnic->rxUnique); 750 } 751 752 switch (rxState) { 753 case rxFifoBlock: 754 if (DTRACE(EthernetSM)) { 755 PacketFifo::iterator end = rxFifo.end(); 756 int size = virtualRegs.size(); 757 for (int i = 0; i < size; ++i) { 758 VirtualReg *vn = &virtualRegs[i]; 759 bool busy = Regs::get_RxDone_Busy(vn->RxDone); 760 if (vn->rxIndex != end) { 761#ifndef NDEBUG 762 bool dirty = vn->rxPacketOffset > 0; 763 const char *status; 764 765 if (busy && dirty) 766 status = "busy,dirty"; 767 else if (busy) 768 status = "busy"; 769 else if (dirty) 770 status = "dirty"; 771 else 772 status = "mapped"; 773 774 DPRINTF(EthernetSM, 775 "vnic %d %s (rxunique %d), packet %d, slack %d\n", 776 i, status, vn->rxUnique, 777 rxFifo.countPacketsBefore(vn->rxIndex), 778 vn->rxIndex->slack); 779#endif 780 } else if (busy) { 781 DPRINTF(EthernetSM, "vnic %d unmapped (rxunique %d)\n", 782 i, vn->rxUnique); 783 } 784 } 785 } 786 787 if (!rxBusy.empty()) { 788 rxActive = rxBusy.front(); 789 rxBusy.pop_front(); 790 vnic = &virtualRegs[rxActive]; 791 792 if (vnic->rxIndex == rxFifo.end()) 793 panic("continuing vnic without packet\n"); 794 795 DPRINTF(EthernetSM, 796 "continue processing for vnic %d (rxunique %d)\n", 797 rxActive, vnic->rxUnique); 798 799 rxState = rxBeginCopy; 800 801 int vnic_distance = rxFifo.countPacketsBefore(vnic->rxIndex); 802 totalVnicDistance += vnic_distance; 803 numVnicDistance += 1; 804 if (vnic_distance > _maxVnicDistance) { 805 maxVnicDistance = vnic_distance; 806 _maxVnicDistance = vnic_distance; 807 } 808 809 break; 810 } 811 812 if (rxFifoPtr == rxFifo.end()) { 813 DPRINTF(EthernetSM, "receive waiting for data. Nothing to do.\n"); 814 goto exit; 815 } 816 817 if (rxList.empty()) 818 panic("Not idle, but nothing to do!"); 819 820 assert(!rxFifo.empty()); 821 822 rxActive = rxList.front(); 823 rxList.pop_front(); 824 vnic = &virtualRegs[rxActive]; 825 826 DPRINTF(EthernetSM, 827 "processing new packet for vnic %d (rxunique %d)\n", 828 rxActive, vnic->rxUnique); 829 830 // Grab a new packet from the fifo. 831 vnic->rxIndex = rxFifoPtr++; 832 vnic->rxIndex->priv = rxActive; 833 vnic->rxPacketOffset = 0; 834 vnic->rxPacketBytes = vnic->rxIndex->packet->length; 835 assert(vnic->rxPacketBytes); 836 rxMappedCount++; 837 838 vnic->rxDoneData = 0; 839 /* scope for variables */ { 840 IpPtr ip(vnic->rxIndex->packet); 841 if (ip) { 842 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 843 vnic->rxDoneData |= Regs::RxDone_IpPacket; 844 rxIpChecksums++; 845 if (cksum(ip) != 0) { 846 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n"); 847 vnic->rxDoneData |= Regs::RxDone_IpError; 848 } 849 TcpPtr tcp(ip); 850 UdpPtr udp(ip); 851 if (tcp) { 852 DPRINTF(Ethernet, 853 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 854 tcp->sport(), tcp->dport(), tcp->seq(), 855 tcp->ack()); 856 vnic->rxDoneData |= Regs::RxDone_TcpPacket; 857 rxTcpChecksums++; 858 if (cksum(tcp) != 0) { 859 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n"); 860 vnic->rxDoneData |= Regs::RxDone_TcpError; 861 } 862 } else if (udp) { 863 vnic->rxDoneData |= Regs::RxDone_UdpPacket; 864 rxUdpChecksums++; 865 if (cksum(udp) != 0) { 866 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n"); 867 vnic->rxDoneData |= Regs::RxDone_UdpError; 868 } 869 } 870 } 871 } 872 rxState = rxBeginCopy; 873 break; 874 875 case rxBeginCopy: 876 if (dmaPending() || drainState() != DrainState::Running) 877 goto exit; 878 879 rxDmaAddr = pciToDma(Regs::get_RxData_Addr(vnic->RxData)); 880 rxDmaLen = min<unsigned>(Regs::get_RxData_Len(vnic->RxData), 881 vnic->rxPacketBytes); 882 883 /* 884 * if we're doing zero/delay copy and we're below the fifo 885 * threshold, see if we should try to do the zero/defer copy 886 */ 887 if ((Regs::get_Config_ZeroCopy(regs.Config) || 888 Regs::get_Config_DelayCopy(regs.Config)) && 889 !Regs::get_RxData_NoDelay(vnic->RxData) && rxLow) { 890 if (rxDmaLen > regs.ZeroCopyMark) 891 rxDmaLen = regs.ZeroCopySize; 892 } 893 rxDmaData = vnic->rxIndex->packet->data + vnic->rxPacketOffset; 894 rxState = rxCopy; 895 if (rxDmaAddr == 1LL) { 896 rxState = rxCopyDone; 897 break; 898 } 899 900 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaEvent, rxDmaData); 901 break; 902 903 case rxCopy: 904 DPRINTF(EthernetSM, "receive machine still copying\n"); 905 goto exit; 906 907 case rxCopyDone: 908 vnic->RxDone = vnic->rxDoneData; 909 vnic->RxDone |= Regs::RxDone_Complete; 910 rxBusyCount--; 911 912 if (vnic->rxPacketBytes == rxDmaLen) { 913 if (vnic->rxPacketOffset) 914 rxDirtyCount--; 915 916 // Packet is complete. Indicate how many bytes were copied 917 vnic->RxDone = Regs::set_RxDone_CopyLen(vnic->RxDone, rxDmaLen); 918 919 DPRINTF(EthernetSM, 920 "rxKick: packet complete on vnic %d (rxunique %d)\n", 921 rxActive, vnic->rxUnique); 922 rxFifo.remove(vnic->rxIndex); 923 vnic->rxIndex = rxFifo.end(); 924 rxMappedCount--; 925 } else { 926 if (!vnic->rxPacketOffset) 927 rxDirtyCount++; 928 929 vnic->rxPacketBytes -= rxDmaLen; 930 vnic->rxPacketOffset += rxDmaLen; 931 vnic->RxDone |= Regs::RxDone_More; 932 vnic->RxDone = Regs::set_RxDone_CopyLen(vnic->RxDone, 933 vnic->rxPacketBytes); 934 DPRINTF(EthernetSM, 935 "rxKick: packet not complete on vnic %d (rxunique %d): " 936 "%d bytes left\n", 937 rxActive, vnic->rxUnique, vnic->rxPacketBytes); 938 } 939 940 rxActive = -1; 941 rxState = rxBusy.empty() && rxList.empty() ? rxIdle : rxFifoBlock; 942 943 if (rxFifo.empty()) { 944 devIntrPost(Regs::Intr_RxEmpty); 945 rxEmpty = true; 946 } 947 948 if (rxFifo.size() < regs.RxFifoLow) 949 rxLow = true; 950 951 if (rxFifo.size() > regs.RxFifoHigh) 952 rxLow = false; 953 954 devIntrPost(Regs::Intr_RxDMA); 955 break; 956 957 default: 958 panic("Invalid rxState!"); 959 } 960 961 DPRINTF(EthernetSM, "entering next rxState=%s\n", 962 RxStateStrings[rxState]); 963 964 goto next; 965 966 exit: 967 /** 968 * @todo do we want to schedule a future kick? 969 */ 970 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n", 971 RxStateStrings[rxState]); 972} 973 974void 975Device::txDmaDone() 976{ 977 assert(txState == txCopy); 978 txState = txCopyDone; 979 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n", 980 txDmaAddr, txDmaLen); 981 DDUMP(EthernetData, txDmaData, txDmaLen); 982 983 // If the receive state machine has a pending DMA, let it go first 984 if (rxState == rxBeginCopy) 985 rxKick(); 986 987 txKick(); 988} 989 990void 991Device::transmit() 992{ 993 if (txFifo.empty()) { 994 DPRINTF(Ethernet, "nothing to transmit\n"); 995 return; 996 } 997 998 uint32_t interrupts; 999 EthPacketPtr packet = txFifo.front(); 1000 if (!interface->sendPacket(packet)) { 1001 DPRINTF(Ethernet, "Packet Transmit: failed txFifo available %d\n", 1002 txFifo.avail()); 1003 return; 1004 } 1005 1006 txFifo.pop(); 1007#if TRACING_ON 1008 if (DTRACE(Ethernet)) { 1009 IpPtr ip(packet); 1010 if (ip) { 1011 DPRINTF(Ethernet, "ID is %d\n", ip->id()); 1012 TcpPtr tcp(ip); 1013 if (tcp) { 1014 DPRINTF(Ethernet, 1015 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n", 1016 tcp->sport(), tcp->dport(), tcp->seq(), 1017 tcp->ack()); 1018 } 1019 } 1020 } 1021#endif 1022 1023 DDUMP(EthernetData, packet->data, packet->length); 1024 txBytes += packet->length; 1025 txPackets++; 1026 1027 DPRINTF(Ethernet, "Packet Transmit: successful txFifo Available %d\n", 1028 txFifo.avail()); 1029 1030 interrupts = Regs::Intr_TxPacket; 1031 if (txFifo.size() < regs.TxFifoLow) 1032 interrupts |= Regs::Intr_TxLow; 1033 devIntrPost(interrupts); 1034} 1035 1036void 1037Device::txKick() 1038{ 1039 VirtualReg *vnic; 1040 DPRINTF(EthernetSM, "txKick: txState=%s (txFifo.size=%d)\n", 1041 TxStateStrings[txState], txFifo.size()); 1042 1043 if (txKickTick > curTick()) { 1044 DPRINTF(EthernetSM, "txKick: exiting, can't run till %d\n", 1045 txKickTick); 1046 return; 1047 } 1048 1049 next: 1050 if (txState == txIdle) 1051 goto exit; 1052 1053 assert(!txList.empty()); 1054 vnic = &virtualRegs[txList.front()]; 1055 1056 switch (txState) { 1057 case txFifoBlock: 1058 assert(Regs::get_TxDone_Busy(vnic->TxDone)); 1059 if (!txPacket) { 1060 // Grab a new packet from the fifo. 1061 txPacket = make_shared<EthPacketData>(16384); 1062 txPacketOffset = 0; 1063 } 1064 1065 if (txFifo.avail() - txPacket->length < 1066 Regs::get_TxData_Len(vnic->TxData)) { 1067 DPRINTF(EthernetSM, "transmit fifo full. Nothing to do.\n"); 1068 goto exit; 1069 } 1070 1071 txState = txBeginCopy; 1072 break; 1073 1074 case txBeginCopy: 1075 if (dmaPending() || drainState() != DrainState::Running) 1076 goto exit; 1077 1078 txDmaAddr = pciToDma(Regs::get_TxData_Addr(vnic->TxData)); 1079 txDmaLen = Regs::get_TxData_Len(vnic->TxData); 1080 txDmaData = txPacket->data + txPacketOffset; 1081 txState = txCopy; 1082 1083 dmaRead(txDmaAddr, txDmaLen, &txDmaEvent, txDmaData); 1084 break; 1085 1086 case txCopy: 1087 DPRINTF(EthernetSM, "transmit machine still copying\n"); 1088 goto exit; 1089 1090 case txCopyDone: 1091 vnic->TxDone = txDmaLen | Regs::TxDone_Complete; 1092 txPacket->simLength += txDmaLen; 1093 txPacket->length += txDmaLen; 1094 if ((vnic->TxData & Regs::TxData_More)) { 1095 txPacketOffset += txDmaLen; 1096 txState = txIdle; 1097 devIntrPost(Regs::Intr_TxDMA); 1098 break; 1099 } 1100 1101 assert(txPacket->length <= txFifo.avail()); 1102 if ((vnic->TxData & Regs::TxData_Checksum)) { 1103 IpPtr ip(txPacket); 1104 if (ip) { 1105 TcpPtr tcp(ip); 1106 if (tcp) { 1107 tcp->sum(0); 1108 tcp->sum(cksum(tcp)); 1109 txTcpChecksums++; 1110 } 1111 1112 UdpPtr udp(ip); 1113 if (udp) { 1114 udp->sum(0); 1115 udp->sum(cksum(udp)); 1116 txUdpChecksums++; 1117 } 1118 1119 ip->sum(0); 1120 ip->sum(cksum(ip)); 1121 txIpChecksums++; 1122 } 1123 } 1124 1125 txFifo.push(txPacket); 1126 if (txFifo.avail() < regs.TxMaxCopy) { 1127 devIntrPost(Regs::Intr_TxFull); 1128 txFull = true; 1129 } 1130 txPacket = 0; 1131 transmit(); 1132 txList.pop_front(); 1133 txState = txList.empty() ? txIdle : txFifoBlock; 1134 devIntrPost(Regs::Intr_TxDMA); 1135 break; 1136 1137 default: 1138 panic("Invalid txState!"); 1139 } 1140 1141 DPRINTF(EthernetSM, "entering next txState=%s\n", 1142 TxStateStrings[txState]); 1143 1144 goto next; 1145 1146 exit: 1147 /** 1148 * @todo do we want to schedule a future kick? 1149 */ 1150 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n", 1151 TxStateStrings[txState]); 1152} 1153 1154void 1155Device::transferDone() 1156{ 1157 if (txFifo.empty()) { 1158 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n"); 1159 return; 1160 } 1161 1162 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); 1163 1164 reschedule(txEvent, clockEdge(Cycles(1)), true); 1165} 1166 1167bool 1168Device::rxFilter(const EthPacketPtr &packet) 1169{ 1170 if (!Regs::get_Config_Filter(regs.Config)) 1171 return false; 1172 1173 panic("receive filter not implemented\n"); 1174 bool drop = true; 1175 1176#if 0 1177 string type; 1178 1179 EthHdr *eth = packet->eth(); 1180 if (eth->unicast()) { 1181 // If we're accepting all unicast addresses 1182 if (acceptUnicast) 1183 drop = false; 1184 1185 // If we make a perfect match 1186 if (acceptPerfect && params->eaddr == eth.dst()) 1187 drop = false; 1188 1189 if (acceptArp && eth->type() == ETH_TYPE_ARP) 1190 drop = false; 1191 1192 } else if (eth->broadcast()) { 1193 // if we're accepting broadcasts 1194 if (acceptBroadcast) 1195 drop = false; 1196 1197 } else if (eth->multicast()) { 1198 // if we're accepting all multicasts 1199 if (acceptMulticast) 1200 drop = false; 1201 1202 } 1203 1204 if (drop) { 1205 DPRINTF(Ethernet, "rxFilter drop\n"); 1206 DDUMP(EthernetData, packet->data, packet->length); 1207 } 1208#endif 1209 return drop; 1210} 1211 1212bool 1213Device::recvPacket(EthPacketPtr packet) 1214{ 1215 rxBytes += packet->length; 1216 rxPackets++; 1217 1218 DPRINTF(Ethernet, "Receiving packet from wire, rxFifo Available is %d\n", 1219 rxFifo.avail()); 1220 1221 if (!rxEnable) { 1222 DPRINTF(Ethernet, "receive disabled...packet dropped\n"); 1223 return true; 1224 } 1225 1226 if (rxFilter(packet)) { 1227 DPRINTF(Ethernet, "packet filtered...dropped\n"); 1228 return true; 1229 } 1230 1231 if (rxFifo.size() >= regs.RxFifoHigh) 1232 devIntrPost(Regs::Intr_RxHigh); 1233 1234 if (!rxFifo.push(packet)) { 1235 DPRINTF(Ethernet, 1236 "packet will not fit in receive buffer...packet dropped\n"); 1237 return false; 1238 } 1239 1240 // If we were at the last element, back up one ot go to the new 1241 // last element of the list. 1242 if (rxFifoPtr == rxFifo.end()) 1243 --rxFifoPtr; 1244 1245 devIntrPost(Regs::Intr_RxPacket); 1246 rxKick(); 1247 return true; 1248} 1249 1250void 1251Device::drainResume() 1252{ 1253 Drainable::drainResume(); 1254 1255 // During drain we could have left the state machines in a waiting state and 1256 // they wouldn't get out until some other event occured to kick them. 1257 // This way they'll get out immediately 1258 txKick(); 1259 rxKick(); 1260} 1261 1262//===================================================================== 1263// 1264// 1265void 1266Base::serialize(CheckpointOut &cp) const 1267{ 1268 // Serialize the PciDevice base class 1269 PciDevice::serialize(cp); 1270 1271 SERIALIZE_SCALAR(rxEnable); 1272 SERIALIZE_SCALAR(txEnable); 1273 SERIALIZE_SCALAR(cpuIntrEnable); 1274 1275 /* 1276 * Keep track of pending interrupt status. 1277 */ 1278 SERIALIZE_SCALAR(intrTick); 1279 SERIALIZE_SCALAR(cpuPendingIntr); 1280 Tick intrEventTick = 0; 1281 if (intrEvent) 1282 intrEventTick = intrEvent->when(); 1283 SERIALIZE_SCALAR(intrEventTick); 1284} 1285 1286void 1287Base::unserialize(CheckpointIn &cp) 1288{ 1289 // Unserialize the PciDevice base class 1290 PciDevice::unserialize(cp); 1291 1292 UNSERIALIZE_SCALAR(rxEnable); 1293 UNSERIALIZE_SCALAR(txEnable); 1294 UNSERIALIZE_SCALAR(cpuIntrEnable); 1295 1296 /* 1297 * Keep track of pending interrupt status. 1298 */ 1299 UNSERIALIZE_SCALAR(intrTick); 1300 UNSERIALIZE_SCALAR(cpuPendingIntr); 1301 Tick intrEventTick; 1302 UNSERIALIZE_SCALAR(intrEventTick); 1303 if (intrEventTick) {
|
1300 intrEvent = new IntrEvent(this, true);
| 1304 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); }, 1305 name(), true);
|
1301 schedule(intrEvent, intrEventTick); 1302 } 1303} 1304 1305void 1306Device::serialize(CheckpointOut &cp) const 1307{ 1308 int count; 1309 1310 // Serialize the PciDevice base class 1311 Base::serialize(cp); 1312 1313 if (rxState == rxCopy) 1314 panic("can't serialize with an in flight dma request rxState=%s", 1315 RxStateStrings[rxState]); 1316 1317 if (txState == txCopy) 1318 panic("can't serialize with an in flight dma request txState=%s", 1319 TxStateStrings[txState]); 1320 1321 /* 1322 * Serialize the device registers that could be modified by the OS. 1323 */ 1324 SERIALIZE_SCALAR(regs.Config); 1325 SERIALIZE_SCALAR(regs.IntrStatus); 1326 SERIALIZE_SCALAR(regs.IntrMask); 1327 SERIALIZE_SCALAR(regs.RxData); 1328 SERIALIZE_SCALAR(regs.TxData); 1329 1330 /* 1331 * Serialize the virtual nic state 1332 */ 1333 int virtualRegsSize = virtualRegs.size(); 1334 SERIALIZE_SCALAR(virtualRegsSize); 1335 for (int i = 0; i < virtualRegsSize; ++i) { 1336 const VirtualReg *vnic = &virtualRegs[i]; 1337 1338 std::string reg = csprintf("vnic%d", i); 1339 paramOut(cp, reg + ".RxData", vnic->RxData); 1340 paramOut(cp, reg + ".RxDone", vnic->RxDone); 1341 paramOut(cp, reg + ".TxData", vnic->TxData); 1342 paramOut(cp, reg + ".TxDone", vnic->TxDone); 1343 1344 bool rxPacketExists = vnic->rxIndex != rxFifo.end(); 1345 paramOut(cp, reg + ".rxPacketExists", rxPacketExists); 1346 if (rxPacketExists) { 1347 int rxPacket = 0; 1348 auto i = rxFifo.begin(); 1349 while (i != vnic->rxIndex) { 1350 assert(i != rxFifo.end()); 1351 ++i; 1352 ++rxPacket; 1353 } 1354 1355 paramOut(cp, reg + ".rxPacket", rxPacket); 1356 paramOut(cp, reg + ".rxPacketOffset", vnic->rxPacketOffset); 1357 paramOut(cp, reg + ".rxPacketBytes", vnic->rxPacketBytes); 1358 } 1359 paramOut(cp, reg + ".rxDoneData", vnic->rxDoneData); 1360 } 1361 1362 int rxFifoPtr = -1; 1363 if (this->rxFifoPtr != rxFifo.end()) 1364 rxFifoPtr = rxFifo.countPacketsBefore(this->rxFifoPtr); 1365 SERIALIZE_SCALAR(rxFifoPtr); 1366 1367 SERIALIZE_SCALAR(rxActive); 1368 SERIALIZE_SCALAR(rxBusyCount); 1369 SERIALIZE_SCALAR(rxDirtyCount); 1370 SERIALIZE_SCALAR(rxMappedCount); 1371 1372 VirtualList::const_iterator i, end; 1373 for (count = 0, i = rxList.begin(), end = rxList.end(); i != end; ++i) 1374 paramOut(cp, csprintf("rxList%d", count++), *i); 1375 int rxListSize = count; 1376 SERIALIZE_SCALAR(rxListSize); 1377 1378 for (count = 0, i = rxBusy.begin(), end = rxBusy.end(); i != end; ++i) 1379 paramOut(cp, csprintf("rxBusy%d", count++), *i); 1380 int rxBusySize = count; 1381 SERIALIZE_SCALAR(rxBusySize); 1382 1383 for (count = 0, i = txList.begin(), end = txList.end(); i != end; ++i) 1384 paramOut(cp, csprintf("txList%d", count++), *i); 1385 int txListSize = count; 1386 SERIALIZE_SCALAR(txListSize); 1387 1388 /* 1389 * Serialize rx state machine 1390 */ 1391 int rxState = this->rxState; 1392 SERIALIZE_SCALAR(rxState); 1393 SERIALIZE_SCALAR(rxEmpty); 1394 SERIALIZE_SCALAR(rxLow); 1395 rxFifo.serialize("rxFifo", cp); 1396 1397 /* 1398 * Serialize tx state machine 1399 */ 1400 int txState = this->txState; 1401 SERIALIZE_SCALAR(txState); 1402 SERIALIZE_SCALAR(txFull); 1403 txFifo.serialize("txFifo", cp); 1404 bool txPacketExists = txPacket != nullptr; 1405 SERIALIZE_SCALAR(txPacketExists); 1406 if (txPacketExists) { 1407 txPacket->serialize("txPacket", cp); 1408 SERIALIZE_SCALAR(txPacketOffset); 1409 SERIALIZE_SCALAR(txPacketBytes); 1410 } 1411 1412 /* 1413 * If there's a pending transmit, store the time so we can 1414 * reschedule it later 1415 */ 1416 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0; 1417 SERIALIZE_SCALAR(transmitTick); 1418} 1419 1420void 1421Device::unserialize(CheckpointIn &cp) 1422{ 1423 // Unserialize the PciDevice base class 1424 Base::unserialize(cp); 1425 1426 /* 1427 * Unserialize the device registers that may have been written by the OS. 1428 */ 1429 UNSERIALIZE_SCALAR(regs.Config); 1430 UNSERIALIZE_SCALAR(regs.IntrStatus); 1431 UNSERIALIZE_SCALAR(regs.IntrMask); 1432 UNSERIALIZE_SCALAR(regs.RxData); 1433 UNSERIALIZE_SCALAR(regs.TxData); 1434 1435 UNSERIALIZE_SCALAR(rxActive); 1436 UNSERIALIZE_SCALAR(rxBusyCount); 1437 UNSERIALIZE_SCALAR(rxDirtyCount); 1438 UNSERIALIZE_SCALAR(rxMappedCount); 1439 1440 int rxListSize; 1441 UNSERIALIZE_SCALAR(rxListSize); 1442 rxList.clear(); 1443 for (int i = 0; i < rxListSize; ++i) { 1444 int value; 1445 paramIn(cp, csprintf("rxList%d", i), value); 1446 rxList.push_back(value); 1447 } 1448 1449 int rxBusySize; 1450 UNSERIALIZE_SCALAR(rxBusySize); 1451 rxBusy.clear(); 1452 for (int i = 0; i < rxBusySize; ++i) { 1453 int value; 1454 paramIn(cp, csprintf("rxBusy%d", i), value); 1455 rxBusy.push_back(value); 1456 } 1457 1458 int txListSize; 1459 UNSERIALIZE_SCALAR(txListSize); 1460 txList.clear(); 1461 for (int i = 0; i < txListSize; ++i) { 1462 int value; 1463 paramIn(cp, csprintf("txList%d", i), value); 1464 txList.push_back(value); 1465 } 1466 1467 /* 1468 * Unserialize rx state machine 1469 */ 1470 int rxState; 1471 UNSERIALIZE_SCALAR(rxState); 1472 UNSERIALIZE_SCALAR(rxEmpty); 1473 UNSERIALIZE_SCALAR(rxLow); 1474 this->rxState = (RxState) rxState; 1475 rxFifo.unserialize("rxFifo", cp); 1476 1477 int rxFifoPtr; 1478 UNSERIALIZE_SCALAR(rxFifoPtr); 1479 if (rxFifoPtr >= 0) { 1480 this->rxFifoPtr = rxFifo.begin(); 1481 for (int i = 0; i < rxFifoPtr; ++i) 1482 ++this->rxFifoPtr; 1483 } else { 1484 this->rxFifoPtr = rxFifo.end(); 1485 } 1486 1487 /* 1488 * Unserialize tx state machine 1489 */ 1490 int txState; 1491 UNSERIALIZE_SCALAR(txState); 1492 UNSERIALIZE_SCALAR(txFull); 1493 this->txState = (TxState) txState; 1494 txFifo.unserialize("txFifo", cp); 1495 bool txPacketExists; 1496 UNSERIALIZE_SCALAR(txPacketExists); 1497 txPacket = 0; 1498 if (txPacketExists) { 1499 txPacket = make_shared<EthPacketData>(16384); 1500 txPacket->unserialize("txPacket", cp); 1501 UNSERIALIZE_SCALAR(txPacketOffset); 1502 UNSERIALIZE_SCALAR(txPacketBytes); 1503 } 1504 1505 /* 1506 * unserialize the virtual nic registers/state 1507 * 1508 * this must be done after the unserialization of the rxFifo 1509 * because the packet iterators depend on the fifo being populated 1510 */ 1511 int virtualRegsSize; 1512 UNSERIALIZE_SCALAR(virtualRegsSize); 1513 virtualRegs.clear(); 1514 virtualRegs.resize(virtualRegsSize); 1515 for (int i = 0; i < virtualRegsSize; ++i) { 1516 VirtualReg *vnic = &virtualRegs[i]; 1517 std::string reg = csprintf("vnic%d", i); 1518 1519 paramIn(cp, reg + ".RxData", vnic->RxData); 1520 paramIn(cp, reg + ".RxDone", vnic->RxDone); 1521 paramIn(cp, reg + ".TxData", vnic->TxData); 1522 paramIn(cp, reg + ".TxDone", vnic->TxDone); 1523 1524 vnic->rxUnique = rxUnique++; 1525 vnic->txUnique = txUnique++; 1526 1527 bool rxPacketExists; 1528 paramIn(cp, reg + ".rxPacketExists", rxPacketExists); 1529 if (rxPacketExists) { 1530 int rxPacket; 1531 paramIn(cp, reg + ".rxPacket", rxPacket); 1532 vnic->rxIndex = rxFifo.begin(); 1533 while (rxPacket--) 1534 ++vnic->rxIndex; 1535 1536 paramIn(cp, reg + ".rxPacketOffset", 1537 vnic->rxPacketOffset); 1538 paramIn(cp, reg + ".rxPacketBytes", vnic->rxPacketBytes); 1539 } else { 1540 vnic->rxIndex = rxFifo.end(); 1541 } 1542 paramIn(cp, reg + ".rxDoneData", vnic->rxDoneData); 1543 } 1544 1545 /* 1546 * If there's a pending transmit, reschedule it now 1547 */ 1548 Tick transmitTick; 1549 UNSERIALIZE_SCALAR(transmitTick); 1550 if (transmitTick) 1551 schedule(txEvent, curTick() + transmitTick); 1552 1553 pioPort.sendRangeChange(); 1554 1555} 1556 1557} // namespace Sinic 1558 1559Sinic::Device * 1560SinicParams::create() 1561{ 1562 return new Sinic::Device(this); 1563}
| 1306 schedule(intrEvent, intrEventTick); 1307 } 1308} 1309 1310void 1311Device::serialize(CheckpointOut &cp) const 1312{ 1313 int count; 1314 1315 // Serialize the PciDevice base class 1316 Base::serialize(cp); 1317 1318 if (rxState == rxCopy) 1319 panic("can't serialize with an in flight dma request rxState=%s", 1320 RxStateStrings[rxState]); 1321 1322 if (txState == txCopy) 1323 panic("can't serialize with an in flight dma request txState=%s", 1324 TxStateStrings[txState]); 1325 1326 /* 1327 * Serialize the device registers that could be modified by the OS. 1328 */ 1329 SERIALIZE_SCALAR(regs.Config); 1330 SERIALIZE_SCALAR(regs.IntrStatus); 1331 SERIALIZE_SCALAR(regs.IntrMask); 1332 SERIALIZE_SCALAR(regs.RxData); 1333 SERIALIZE_SCALAR(regs.TxData); 1334 1335 /* 1336 * Serialize the virtual nic state 1337 */ 1338 int virtualRegsSize = virtualRegs.size(); 1339 SERIALIZE_SCALAR(virtualRegsSize); 1340 for (int i = 0; i < virtualRegsSize; ++i) { 1341 const VirtualReg *vnic = &virtualRegs[i]; 1342 1343 std::string reg = csprintf("vnic%d", i); 1344 paramOut(cp, reg + ".RxData", vnic->RxData); 1345 paramOut(cp, reg + ".RxDone", vnic->RxDone); 1346 paramOut(cp, reg + ".TxData", vnic->TxData); 1347 paramOut(cp, reg + ".TxDone", vnic->TxDone); 1348 1349 bool rxPacketExists = vnic->rxIndex != rxFifo.end(); 1350 paramOut(cp, reg + ".rxPacketExists", rxPacketExists); 1351 if (rxPacketExists) { 1352 int rxPacket = 0; 1353 auto i = rxFifo.begin(); 1354 while (i != vnic->rxIndex) { 1355 assert(i != rxFifo.end()); 1356 ++i; 1357 ++rxPacket; 1358 } 1359 1360 paramOut(cp, reg + ".rxPacket", rxPacket); 1361 paramOut(cp, reg + ".rxPacketOffset", vnic->rxPacketOffset); 1362 paramOut(cp, reg + ".rxPacketBytes", vnic->rxPacketBytes); 1363 } 1364 paramOut(cp, reg + ".rxDoneData", vnic->rxDoneData); 1365 } 1366 1367 int rxFifoPtr = -1; 1368 if (this->rxFifoPtr != rxFifo.end()) 1369 rxFifoPtr = rxFifo.countPacketsBefore(this->rxFifoPtr); 1370 SERIALIZE_SCALAR(rxFifoPtr); 1371 1372 SERIALIZE_SCALAR(rxActive); 1373 SERIALIZE_SCALAR(rxBusyCount); 1374 SERIALIZE_SCALAR(rxDirtyCount); 1375 SERIALIZE_SCALAR(rxMappedCount); 1376 1377 VirtualList::const_iterator i, end; 1378 for (count = 0, i = rxList.begin(), end = rxList.end(); i != end; ++i) 1379 paramOut(cp, csprintf("rxList%d", count++), *i); 1380 int rxListSize = count; 1381 SERIALIZE_SCALAR(rxListSize); 1382 1383 for (count = 0, i = rxBusy.begin(), end = rxBusy.end(); i != end; ++i) 1384 paramOut(cp, csprintf("rxBusy%d", count++), *i); 1385 int rxBusySize = count; 1386 SERIALIZE_SCALAR(rxBusySize); 1387 1388 for (count = 0, i = txList.begin(), end = txList.end(); i != end; ++i) 1389 paramOut(cp, csprintf("txList%d", count++), *i); 1390 int txListSize = count; 1391 SERIALIZE_SCALAR(txListSize); 1392 1393 /* 1394 * Serialize rx state machine 1395 */ 1396 int rxState = this->rxState; 1397 SERIALIZE_SCALAR(rxState); 1398 SERIALIZE_SCALAR(rxEmpty); 1399 SERIALIZE_SCALAR(rxLow); 1400 rxFifo.serialize("rxFifo", cp); 1401 1402 /* 1403 * Serialize tx state machine 1404 */ 1405 int txState = this->txState; 1406 SERIALIZE_SCALAR(txState); 1407 SERIALIZE_SCALAR(txFull); 1408 txFifo.serialize("txFifo", cp); 1409 bool txPacketExists = txPacket != nullptr; 1410 SERIALIZE_SCALAR(txPacketExists); 1411 if (txPacketExists) { 1412 txPacket->serialize("txPacket", cp); 1413 SERIALIZE_SCALAR(txPacketOffset); 1414 SERIALIZE_SCALAR(txPacketBytes); 1415 } 1416 1417 /* 1418 * If there's a pending transmit, store the time so we can 1419 * reschedule it later 1420 */ 1421 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0; 1422 SERIALIZE_SCALAR(transmitTick); 1423} 1424 1425void 1426Device::unserialize(CheckpointIn &cp) 1427{ 1428 // Unserialize the PciDevice base class 1429 Base::unserialize(cp); 1430 1431 /* 1432 * Unserialize the device registers that may have been written by the OS. 1433 */ 1434 UNSERIALIZE_SCALAR(regs.Config); 1435 UNSERIALIZE_SCALAR(regs.IntrStatus); 1436 UNSERIALIZE_SCALAR(regs.IntrMask); 1437 UNSERIALIZE_SCALAR(regs.RxData); 1438 UNSERIALIZE_SCALAR(regs.TxData); 1439 1440 UNSERIALIZE_SCALAR(rxActive); 1441 UNSERIALIZE_SCALAR(rxBusyCount); 1442 UNSERIALIZE_SCALAR(rxDirtyCount); 1443 UNSERIALIZE_SCALAR(rxMappedCount); 1444 1445 int rxListSize; 1446 UNSERIALIZE_SCALAR(rxListSize); 1447 rxList.clear(); 1448 for (int i = 0; i < rxListSize; ++i) { 1449 int value; 1450 paramIn(cp, csprintf("rxList%d", i), value); 1451 rxList.push_back(value); 1452 } 1453 1454 int rxBusySize; 1455 UNSERIALIZE_SCALAR(rxBusySize); 1456 rxBusy.clear(); 1457 for (int i = 0; i < rxBusySize; ++i) { 1458 int value; 1459 paramIn(cp, csprintf("rxBusy%d", i), value); 1460 rxBusy.push_back(value); 1461 } 1462 1463 int txListSize; 1464 UNSERIALIZE_SCALAR(txListSize); 1465 txList.clear(); 1466 for (int i = 0; i < txListSize; ++i) { 1467 int value; 1468 paramIn(cp, csprintf("txList%d", i), value); 1469 txList.push_back(value); 1470 } 1471 1472 /* 1473 * Unserialize rx state machine 1474 */ 1475 int rxState; 1476 UNSERIALIZE_SCALAR(rxState); 1477 UNSERIALIZE_SCALAR(rxEmpty); 1478 UNSERIALIZE_SCALAR(rxLow); 1479 this->rxState = (RxState) rxState; 1480 rxFifo.unserialize("rxFifo", cp); 1481 1482 int rxFifoPtr; 1483 UNSERIALIZE_SCALAR(rxFifoPtr); 1484 if (rxFifoPtr >= 0) { 1485 this->rxFifoPtr = rxFifo.begin(); 1486 for (int i = 0; i < rxFifoPtr; ++i) 1487 ++this->rxFifoPtr; 1488 } else { 1489 this->rxFifoPtr = rxFifo.end(); 1490 } 1491 1492 /* 1493 * Unserialize tx state machine 1494 */ 1495 int txState; 1496 UNSERIALIZE_SCALAR(txState); 1497 UNSERIALIZE_SCALAR(txFull); 1498 this->txState = (TxState) txState; 1499 txFifo.unserialize("txFifo", cp); 1500 bool txPacketExists; 1501 UNSERIALIZE_SCALAR(txPacketExists); 1502 txPacket = 0; 1503 if (txPacketExists) { 1504 txPacket = make_shared<EthPacketData>(16384); 1505 txPacket->unserialize("txPacket", cp); 1506 UNSERIALIZE_SCALAR(txPacketOffset); 1507 UNSERIALIZE_SCALAR(txPacketBytes); 1508 } 1509 1510 /* 1511 * unserialize the virtual nic registers/state 1512 * 1513 * this must be done after the unserialization of the rxFifo 1514 * because the packet iterators depend on the fifo being populated 1515 */ 1516 int virtualRegsSize; 1517 UNSERIALIZE_SCALAR(virtualRegsSize); 1518 virtualRegs.clear(); 1519 virtualRegs.resize(virtualRegsSize); 1520 for (int i = 0; i < virtualRegsSize; ++i) { 1521 VirtualReg *vnic = &virtualRegs[i]; 1522 std::string reg = csprintf("vnic%d", i); 1523 1524 paramIn(cp, reg + ".RxData", vnic->RxData); 1525 paramIn(cp, reg + ".RxDone", vnic->RxDone); 1526 paramIn(cp, reg + ".TxData", vnic->TxData); 1527 paramIn(cp, reg + ".TxDone", vnic->TxDone); 1528 1529 vnic->rxUnique = rxUnique++; 1530 vnic->txUnique = txUnique++; 1531 1532 bool rxPacketExists; 1533 paramIn(cp, reg + ".rxPacketExists", rxPacketExists); 1534 if (rxPacketExists) { 1535 int rxPacket; 1536 paramIn(cp, reg + ".rxPacket", rxPacket); 1537 vnic->rxIndex = rxFifo.begin(); 1538 while (rxPacket--) 1539 ++vnic->rxIndex; 1540 1541 paramIn(cp, reg + ".rxPacketOffset", 1542 vnic->rxPacketOffset); 1543 paramIn(cp, reg + ".rxPacketBytes", vnic->rxPacketBytes); 1544 } else { 1545 vnic->rxIndex = rxFifo.end(); 1546 } 1547 paramIn(cp, reg + ".rxDoneData", vnic->rxDoneData); 1548 } 1549 1550 /* 1551 * If there's a pending transmit, reschedule it now 1552 */ 1553 Tick transmitTick; 1554 UNSERIALIZE_SCALAR(transmitTick); 1555 if (transmitTick) 1556 schedule(txEvent, curTick() + transmitTick); 1557 1558 pioPort.sendRangeChange(); 1559 1560} 1561 1562} // namespace Sinic 1563 1564Sinic::Device * 1565SinicParams::create() 1566{ 1567 return new Sinic::Device(this); 1568}
|