i8254xGBe.hh revision 4263
1/* 2 * Copyright (c) 2006 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Ali Saidi 29 */ 30 31/* @file 32 * Device model for Intel's 8254x line of gigabit ethernet controllers. 33 */ 34 35#ifndef __DEV_I8254XGBE_HH__ 36#define __DEV_I8254XGBE_HH__ 37 38#include <deque> 39#include <string> 40 41#include "base/inet.hh" 42#include "base/statistics.hh" 43#include "dev/etherint.hh" 44#include "dev/etherpkt.hh" 45#include "dev/i8254xGBe_defs.hh" 46#include "dev/pcidev.hh" 47#include "dev/pktfifo.hh" 48#include "sim/eventq.hh" 49 50class IGbEInt; 51 52class IGbE : public PciDev 53{ 54 private: 55 IGbEInt *etherInt; 56 57 // device registers 58 iGbReg::Regs regs; 59 60 // eeprom data, status and control bits 61 int eeOpBits, eeAddrBits, eeDataBits; 62 uint8_t eeOpcode, eeAddr; 63 uint16_t flash[iGbReg::EEPROM_SIZE]; 64 65 // cached parameters from params struct 66 Tick tickRate; 67 bool useFlowControl; 68 69 // packet fifos 70 PacketFifo rxFifo; 71 PacketFifo txFifo; 72 73 // Packet that we are currently putting into the txFifo 74 EthPacketPtr txPacket; 75 76 // Should to Rx/Tx State machine tick? 77 bool rxTick; 78 bool txTick; 79 80 // Event and function to deal with RDTR timer expiring 81 void rdtrProcess() { postInterrupt(iGbReg::IT_RXDMT, true); } 82 //friend class EventWrapper<IGbE, &IGbE::rdtrProcess>; 83 EventWrapper<IGbE, &IGbE::rdtrProcess> rdtrEvent; 84 85 // Event and function to deal with RADV timer expiring 86 void radvProcess() { postInterrupt(iGbReg::IT_RXDMT, true); } 87 //friend class EventWrapper<IGbE, &IGbE::radvProcess>; 88 EventWrapper<IGbE, &IGbE::radvProcess> radvEvent; 89 90 // Event and function to deal with TADV timer expiring 91 void tadvProcess() { postInterrupt(iGbReg::IT_TXDW, true); } 92 //friend class EventWrapper<IGbE, &IGbE::tadvProcess>; 93 EventWrapper<IGbE, &IGbE::tadvProcess> tadvEvent; 94 95 // Event and function to deal with TIDV timer expiring 96 void tidvProcess() { postInterrupt(iGbReg::IT_TXDW, true); }; 97 //friend class EventWrapper<IGbE, &IGbE::tidvProcess>; 98 EventWrapper<IGbE, &IGbE::tidvProcess> tidvEvent; 99 100 // Main event to tick the device 101 void tick(); 102 //friend class EventWrapper<IGbE, &IGbE::tick>; 103 EventWrapper<IGbE, &IGbE::tick> tickEvent; 104 105 106 void rxStateMachine(); 107 void txStateMachine(); 108 void txWire(); 109 110 /** Write an interrupt into the interrupt pending register and check mask 111 * and interrupt limit timer before sending interrupt to CPU 112 * @param t the type of interrupt we are posting 113 * @param now should we ignore the interrupt limiting timer 114 */ 115 void postInterrupt(iGbReg::IntTypes t, bool now = false); 116 117 /** Check and see if changes to the mask register have caused an interrupt 118 * to need to be sent or perhaps removed an interrupt cause. 119 */ 120 void chkInterrupt(); 121 122 /** Send an interrupt to the cpu 123 */ 124 void cpuPostInt(); 125 // Event to moderate interrupts 126 EventWrapper<IGbE, &IGbE::cpuPostInt> interEvent; 127 128 /** Clear the interupt line to the cpu 129 */ 130 void cpuClearInt(); 131 132 Tick intClock() { return Clock::Int::ns * 1024; } 133 134 template<class T> 135 class DescCache 136 { 137 protected: 138 virtual Addr descBase() const = 0; 139 virtual long descHead() const = 0; 140 virtual long descTail() const = 0; 141 virtual long descLen() const = 0; 142 virtual void updateHead(long h) = 0; 143 virtual void enableSm() = 0; 144 145 std::deque<T*> usedCache; 146 std::deque<T*> unusedCache; 147 148 T *fetchBuf; 149 T *wbBuf; 150 151 // Pointer to the device we cache for 152 IGbE *igbe; 153 154 // Name of this descriptor cache 155 std::string _name; 156 157 // How far we've cached 158 int cachePnt; 159 160 // The size of the descriptor cache 161 int size; 162 163 // How many descriptors we are currently fetching 164 int curFetching; 165 166 // How many descriptors we are currently writing back 167 int wbOut; 168 169 // if the we wrote back to the end of the descriptor ring and are going 170 // to have to wrap and write more 171 bool moreToWb; 172 173 // What the alignment is of the next descriptor writeback 174 Addr wbAlignment; 175 176 /** The packet that is currently being dmad to memory if any 177 */ 178 EthPacketPtr pktPtr; 179 180 public: 181 DescCache(IGbE *i, const std::string n, int s) 182 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0), 183 pktPtr(NULL), fetchEvent(this), wbEvent(this) 184 { 185 fetchBuf = new T[size]; 186 wbBuf = new T[size]; 187 } 188 189 virtual ~DescCache() 190 { 191 reset(); 192 } 193 194 std::string name() { return _name; } 195 196 /** If the address/len/head change when we've got descriptors that are 197 * dirty that is very bad. This function checks that we don't and if we 198 * do panics. 199 */ 200 void areaChanged() 201 { 202 if (usedCache.size() > 0 || unusedCache.size() > 0) 203 panic("Descriptor Address, Length or Head changed. Bad\n"); 204 } 205 206 void writeback(Addr aMask) 207 { 208 int curHead = descHead(); 209 int max_to_wb = usedCache.size() + curHead; 210 211 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: " 212 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n", 213 curHead, descTail(), descLen(), cachePnt, max_to_wb, 214 descLeft()); 215 216 // Check if this writeback is less restrictive that the previous 217 // and if so setup another one immediately following it 218 if (wbOut && (aMask < wbAlignment)) { 219 moreToWb = true; 220 wbAlignment = aMask; 221 DPRINTF(EthernetDesc, "Writing back already in process, returning\n"); 222 return; 223 } 224 225 226 moreToWb = false; 227 wbAlignment = aMask; 228 229 if (max_to_wb > descLen()) { 230 max_to_wb = descLen() - curHead; 231 moreToWb = true; 232 // this is by definition aligned correctly 233 } else if (aMask != 0) { 234 // align the wb point to the mask 235 max_to_wb = max_to_wb & ~(aMask>>4); 236 } 237 238 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb); 239 240 if (max_to_wb <= 0 || wbOut) 241 return; 242 243 wbOut = max_to_wb - curHead; 244 245 for (int x = 0; x < wbOut; x++) 246 memcpy(&wbBuf[x], usedCache[x], sizeof(T)); 247 248 for (int x = 0; x < wbOut; x++) { 249 assert(usedCache.size()); 250 delete usedCache[0]; 251 usedCache.pop_front(); 252 }; 253 254 igbe->dmaWrite(descBase() + curHead * sizeof(T), wbOut * sizeof(T), 255 &wbEvent, (uint8_t*)wbBuf); 256 } 257 258 /** Fetch a chunk of descriptors into the descriptor cache. 259 * Calls fetchComplete when the memory system returns the data 260 */ 261 void fetchDescriptors() 262 { 263 size_t max_to_fetch = cachePnt - descTail(); 264 if (max_to_fetch < 0) 265 max_to_fetch = descLen() - cachePnt; 266 267 max_to_fetch = std::min(max_to_fetch, (size - usedCache.size() - 268 unusedCache.size())); 269 270 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: " 271 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n", 272 descHead(), descTail(), descLen(), cachePnt, 273 max_to_fetch, descLeft()); 274 275 // Nothing to do 276 if (max_to_fetch == 0 || curFetching) 277 return; 278 279 // So we don't have two descriptor fetches going on at once 280 curFetching = max_to_fetch; 281 282 igbe->dmaRead(descBase() + cachePnt * sizeof(T), 283 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf); 284 } 285 286 287 /** Called by event when dma to read descriptors is completed 288 */ 289 void fetchComplete() 290 { 291 T *newDesc; 292 for (int x = 0; x < curFetching; x++) { 293 newDesc = new T; 294 memcpy(newDesc, &fetchBuf[x], sizeof(T)); 295 unusedCache.push_back(newDesc); 296 } 297 298#ifndef NDEBUG 299 int oldCp = cachePnt; 300#endif 301 302 cachePnt += curFetching; 303 if (cachePnt > descLen()) 304 cachePnt -= descLen(); 305 306 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n", 307 oldCp, cachePnt); 308 309 enableSm(); 310 311 } 312 313 EventWrapper<DescCache, &DescCache::fetchComplete> fetchEvent; 314 315 /** Called by event when dma to writeback descriptors is completed 316 */ 317 void wbComplete() 318 { 319 long curHead = descHead(); 320#ifndef NDEBUG 321 long oldHead = curHead; 322#endif 323 324 curHead += wbOut; 325 wbOut = 0; 326 327 if (curHead > descLen()) 328 curHead = 0; 329 330 // Update the head 331 updateHead(curHead); 332 333 DPRINTF(EthernetDesc, "Writeback complete cachePnt %d -> %d\n", 334 oldHead, curHead); 335 336 // If we still have more to wb, call wb now 337 if (moreToWb) { 338 DPRINTF(EthernetDesc, "Writeback has more todo\n"); 339 writeback(wbAlignment); 340 } 341 } 342 343 344 EventWrapper<DescCache, &DescCache::wbComplete> wbEvent; 345 346 /* Return the number of descriptors left in the ring, so the device has 347 * a way to figure out if it needs to interrupt. 348 */ 349 int descLeft() const 350 { 351 int left = unusedCache.size(); 352 if (cachePnt - descTail() >= 0) 353 left += (cachePnt - descTail()); 354 else 355 left += (descLen() - cachePnt); 356 357 return left; 358 } 359 360 /* Return the number of descriptors used and not written back. 361 */ 362 int descUsed() const { return usedCache.size(); } 363 364 /* Return the number of cache unused descriptors we have. */ 365 int descUnused() const {return unusedCache.size(); } 366 367 /* Get into a state where the descriptor address/head/etc colud be 368 * changed */ 369 void reset() 370 { 371 DPRINTF(EthernetDesc, "Reseting descriptor cache\n"); 372 for (int x = 0; x < usedCache.size(); x++) 373 delete usedCache[x]; 374 for (int x = 0; x < unusedCache.size(); x++) 375 delete unusedCache[x]; 376 377 usedCache.clear(); 378 unusedCache.clear(); 379 } 380 381 }; 382 383 384 class RxDescCache : public DescCache<iGbReg::RxDesc> 385 { 386 protected: 387 virtual Addr descBase() const { return igbe->regs.rdba(); } 388 virtual long descHead() const { return igbe->regs.rdh(); } 389 virtual long descLen() const { return igbe->regs.rdlen() >> 4; } 390 virtual long descTail() const { return igbe->regs.rdt(); } 391 virtual void updateHead(long h) { igbe->regs.rdh(h); } 392 virtual void enableSm(); 393 394 bool pktDone; 395 396 public: 397 RxDescCache(IGbE *i, std::string n, int s); 398 399 /** Write the given packet into the buffer(s) pointed to by the 400 * descriptor and update the book keeping. Should only be called when 401 * there are no dma's pending. 402 * @param packet ethernet packet to write 403 * @return if the packet could be written (there was a free descriptor) 404 */ 405 bool writePacket(EthPacketPtr packet); 406 /** Called by event when dma to write packet is completed 407 */ 408 void pktComplete(); 409 410 /** Check if the dma on the packet has completed. 411 */ 412 413 bool packetDone(); 414 415 EventWrapper<RxDescCache, &RxDescCache::pktComplete> pktEvent; 416 417 }; 418 friend class RxDescCache; 419 420 RxDescCache rxDescCache; 421 422 class TxDescCache : public DescCache<iGbReg::TxDesc> 423 { 424 protected: 425 virtual Addr descBase() const { return igbe->regs.tdba(); } 426 virtual long descHead() const { return igbe->regs.tdh(); } 427 virtual long descTail() const { return igbe->regs.tdt(); } 428 virtual long descLen() const { return igbe->regs.tdlen() >> 4; } 429 virtual void updateHead(long h) { igbe->regs.tdh(h); } 430 virtual void enableSm(); 431 432 bool pktDone; 433 bool isTcp; 434 bool pktWaiting; 435 436 public: 437 TxDescCache(IGbE *i, std::string n, int s); 438 439 /** Tell the cache to DMA a packet from main memory into its buffer and 440 * return the size the of the packet to reserve space in tx fifo. 441 * @return size of the packet 442 */ 443 int getPacketSize(); 444 void getPacketData(EthPacketPtr p); 445 446 /** Ask if the packet has been transfered so the state machine can give 447 * it to the fifo. 448 * @return packet available in descriptor cache 449 */ 450 bool packetAvailable(); 451 452 /** Ask if we are still waiting for the packet to be transfered. 453 * @return packet still in transit. 454 */ 455 bool packetWaiting() { return pktWaiting; } 456 457 /** Called by event when dma to write packet is completed 458 */ 459 void pktComplete(); 460 EventWrapper<TxDescCache, &TxDescCache::pktComplete> pktEvent; 461 462 }; 463 friend class TxDescCache; 464 465 TxDescCache txDescCache; 466 467 public: 468 struct Params : public PciDev::Params 469 { 470 bool use_flow_control; 471 int rx_fifo_size; 472 int tx_fifo_size; 473 int rx_desc_cache_size; 474 int tx_desc_cache_size; 475 Tick clock; 476 }; 477 478 IGbE(Params *params); 479 ~IGbE() {;} 480 481 Tick clock; 482 inline Tick cycles(int numCycles) const { return numCycles * clock; } 483 484 virtual Tick read(PacketPtr pkt); 485 virtual Tick write(PacketPtr pkt); 486 487 virtual Tick writeConfig(PacketPtr pkt); 488 489 bool ethRxPkt(EthPacketPtr packet); 490 void ethTxDone(); 491 492 void setEthInt(IGbEInt *i) { assert(!etherInt); etherInt = i; } 493 494 495 const Params *params() const {return (const Params *)_params; } 496 497 virtual void serialize(std::ostream &os); 498 virtual void unserialize(Checkpoint *cp, const std::string §ion); 499 500 501}; 502 503class IGbEInt : public EtherInt 504{ 505 private: 506 IGbE *dev; 507 508 public: 509 IGbEInt(const std::string &name, IGbE *d) 510 : EtherInt(name), dev(d) 511 { dev->setEthInt(this); } 512 513 virtual bool recvPacket(EthPacketPtr pkt) { return dev->ethRxPkt(pkt); } 514 virtual void sendDone() { dev->ethTxDone(); } 515}; 516 517 518 519 520 521#endif //__DEV_I8254XGBE_HH__ 522 523