549 case REG_LEDCTL: 550 case REG_FCAL: 551 case REG_FCAH: 552 case REG_FCT: 553 case REG_VET: 554 case REG_AIFS: 555 case REG_TIPG: 556 ; // We don't care, so don't store anything 557 break; 558 case REG_IVAR0: 559 warn("Writing to IVAR0, ignoring...\n"); 560 break; 561 case REG_FCRTL: 562 regs.fcrtl = val; 563 break; 564 case REG_FCRTH: 565 regs.fcrth = val; 566 break; 567 case REG_RDBAL: 568 regs.rdba.rdbal( val & ~mask(4)); 569 rxDescCache.areaChanged(); 570 break; 571 case REG_RDBAH: 572 regs.rdba.rdbah(val); 573 rxDescCache.areaChanged(); 574 break; 575 case REG_RDLEN: 576 regs.rdlen = val & ~mask(7); 577 rxDescCache.areaChanged(); 578 break; 579 case REG_SRRCTL: 580 regs.srrctl = val; 581 break; 582 case REG_RDH: 583 regs.rdh = val; 584 rxDescCache.areaChanged(); 585 break; 586 case REG_RDT: 587 regs.rdt = val; 588 DPRINTF(EthernetSM, "RXS: RDT Updated.\n"); 589 if (drainState() == DrainState::Running) { 590 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n"); 591 rxDescCache.fetchDescriptors(); 592 } else { 593 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n"); 594 } 595 break; 596 case REG_RDTR: 597 regs.rdtr = val; 598 break; 599 case REG_RADV: 600 regs.radv = val; 601 break; 602 case REG_RXDCTL: 603 regs.rxdctl = val; 604 break; 605 case REG_TDBAL: 606 regs.tdba.tdbal( val & ~mask(4)); 607 txDescCache.areaChanged(); 608 break; 609 case REG_TDBAH: 610 regs.tdba.tdbah(val); 611 txDescCache.areaChanged(); 612 break; 613 case REG_TDLEN: 614 regs.tdlen = val & ~mask(7); 615 txDescCache.areaChanged(); 616 break; 617 case REG_TDH: 618 regs.tdh = val; 619 txDescCache.areaChanged(); 620 break; 621 case REG_TXDCA_CTL: 622 regs.txdca_ctl = val; 623 if (regs.txdca_ctl.enabled()) 624 panic("No support for DCA\n"); 625 break; 626 case REG_TDT: 627 regs.tdt = val; 628 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n"); 629 if (drainState() == DrainState::Running) { 630 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n"); 631 txDescCache.fetchDescriptors(); 632 } else { 633 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n"); 634 } 635 break; 636 case REG_TIDV: 637 regs.tidv = val; 638 break; 639 case REG_TXDCTL: 640 regs.txdctl = val; 641 break; 642 case REG_TADV: 643 regs.tadv = val; 644 break; 645 case REG_TDWBAL: 646 regs.tdwba &= ~mask(32); 647 regs.tdwba |= val; 648 txDescCache.completionWriteback(regs.tdwba & ~mask(1), 649 regs.tdwba & mask(1)); 650 break; 651 case REG_TDWBAH: 652 regs.tdwba &= mask(32); 653 regs.tdwba |= (uint64_t)val << 32; 654 txDescCache.completionWriteback(regs.tdwba & ~mask(1), 655 regs.tdwba & mask(1)); 656 break; 657 case REG_RXCSUM: 658 regs.rxcsum = val; 659 break; 660 case REG_RLPML: 661 regs.rlpml = val; 662 break; 663 case REG_RFCTL: 664 regs.rfctl = val; 665 if (regs.rfctl.exsten()) 666 panic("Extended RX descriptors not implemented\n"); 667 break; 668 case REG_MANC: 669 regs.manc = val; 670 break; 671 case REG_SWSM: 672 regs.swsm = val; 673 if (regs.fwsm.eep_fw_semaphore()) 674 regs.swsm.swesmbi(0); 675 break; 676 case REG_SWFWSYNC: 677 regs.sw_fw_sync = val; 678 break; 679 default: 680 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) && 681 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) && 682 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4)) 683 panic("Write request to unknown register number: %#x\n", daddr); 684 }; 685 686 pkt->makeAtomicResponse(); 687 return pioDelay; 688} 689 690void 691IGbE::postInterrupt(IntTypes t, bool now) 692{ 693 assert(t); 694 695 // Interrupt is already pending 696 if (t & regs.icr() && !now) 697 return; 698 699 regs.icr = regs.icr() | t; 700 701 Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval(); 702 DPRINTF(EthernetIntr, 703 "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n", 704 curTick(), regs.itr.interval(), itr_interval); 705 706 if (regs.itr.interval() == 0 || now || 707 lastInterrupt + itr_interval <= curTick()) { 708 if (interEvent.scheduled()) { 709 deschedule(interEvent); 710 } 711 cpuPostInt(); 712 } else { 713 Tick int_time = lastInterrupt + itr_interval; 714 assert(int_time > 0); 715 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n", 716 int_time); 717 if (!interEvent.scheduled()) { 718 schedule(interEvent, int_time); 719 } 720 } 721} 722 723void 724IGbE::delayIntEvent() 725{ 726 cpuPostInt(); 727} 728 729 730void 731IGbE::cpuPostInt() 732{ 733 734 postedInterrupts++; 735 736 if (!(regs.icr() & regs.imr)) { 737 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n"); 738 return; 739 } 740 741 DPRINTF(Ethernet, "Posting Interrupt\n"); 742 743 744 if (interEvent.scheduled()) { 745 deschedule(interEvent); 746 } 747 748 if (rdtrEvent.scheduled()) { 749 regs.icr.rxt0(1); 750 deschedule(rdtrEvent); 751 } 752 if (radvEvent.scheduled()) { 753 regs.icr.rxt0(1); 754 deschedule(radvEvent); 755 } 756 if (tadvEvent.scheduled()) { 757 regs.icr.txdw(1); 758 deschedule(tadvEvent); 759 } 760 if (tidvEvent.scheduled()) { 761 regs.icr.txdw(1); 762 deschedule(tidvEvent); 763 } 764 765 regs.icr.int_assert(1); 766 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n", 767 regs.icr()); 768 769 intrPost(); 770 771 lastInterrupt = curTick(); 772} 773 774void 775IGbE::cpuClearInt() 776{ 777 if (regs.icr.int_assert()) { 778 regs.icr.int_assert(0); 779 DPRINTF(EthernetIntr, 780 "EINT: Clearing interrupt to CPU now. Vector %#x\n", 781 regs.icr()); 782 intrClear(); 783 } 784} 785 786void 787IGbE::chkInterrupt() 788{ 789 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(), 790 regs.imr); 791 // Check if we need to clear the cpu interrupt 792 if (!(regs.icr() & regs.imr)) { 793 DPRINTF(Ethernet, "Mask cleaned all interrupts\n"); 794 if (interEvent.scheduled()) 795 deschedule(interEvent); 796 if (regs.icr.int_assert()) 797 cpuClearInt(); 798 } 799 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", 800 regs.itr(), regs.itr.interval()); 801 802 if (regs.icr() & regs.imr) { 803 if (regs.itr.interval() == 0) { 804 cpuPostInt(); 805 } else { 806 DPRINTF(Ethernet, 807 "Possibly scheduling interrupt because of imr write\n"); 808 if (!interEvent.scheduled()) { 809 Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval(); 810 DPRINTF(Ethernet, "Scheduling for %d\n", t); 811 schedule(interEvent, t); 812 } 813 } 814 } 815} 816 817 818///////////////////////////// IGbE::DescCache ////////////////////////////// 819 820template<class T> 821IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s) 822 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), 823 wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL), 824 wbDelayEvent(this), fetchDelayEvent(this), fetchEvent(this), 825 wbEvent(this) 826{ 827 fetchBuf = new T[size]; 828 wbBuf = new T[size]; 829} 830 831template<class T> 832IGbE::DescCache<T>::~DescCache() 833{ 834 reset(); 835 delete[] fetchBuf; 836 delete[] wbBuf; 837} 838 839template<class T> 840void 841IGbE::DescCache<T>::areaChanged() 842{ 843 if (usedCache.size() > 0 || curFetching || wbOut) 844 panic("Descriptor Address, Length or Head changed. Bad\n"); 845 reset(); 846 847} 848 849template<class T> 850void 851IGbE::DescCache<T>::writeback(Addr aMask) 852{ 853 int curHead = descHead(); 854 int max_to_wb = usedCache.size(); 855 856 // Check if this writeback is less restrictive that the previous 857 // and if so setup another one immediately following it 858 if (wbOut) { 859 if (aMask < wbAlignment) { 860 moreToWb = true; 861 wbAlignment = aMask; 862 } 863 DPRINTF(EthernetDesc, 864 "Writing back already in process, returning\n"); 865 return; 866 } 867 868 moreToWb = false; 869 wbAlignment = aMask; 870 871 872 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: " 873 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n", 874 curHead, descTail(), descLen(), cachePnt, max_to_wb, 875 descLeft()); 876 877 if (max_to_wb + curHead >= descLen()) { 878 max_to_wb = descLen() - curHead; 879 moreToWb = true; 880 // this is by definition aligned correctly 881 } else if (wbAlignment != 0) { 882 // align the wb point to the mask 883 max_to_wb = max_to_wb & ~wbAlignment; 884 } 885 886 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb); 887 888 if (max_to_wb <= 0) { 889 if (usedCache.size()) 890 igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT); 891 else 892 igbe->anWe(annSmWb, annUsedCacheQ); 893 return; 894 } 895 896 wbOut = max_to_wb; 897 898 assert(!wbDelayEvent.scheduled()); 899 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay); 900 igbe->anBegin(annSmWb, "Prepare Writeback Desc"); 901} 902 903template<class T> 904void 905IGbE::DescCache<T>::writeback1() 906{ 907 // If we're draining delay issuing this DMA 908 if (igbe->drainState() != DrainState::Running) { 909 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay); 910 return; 911 } 912 913 DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut); 914 915 for (int x = 0; x < wbOut; x++) { 916 assert(usedCache.size()); 917 memcpy(&wbBuf[x], usedCache[x], sizeof(T)); 918 igbe->anPq(annSmWb, annUsedCacheQ); 919 igbe->anPq(annSmWb, annDescQ); 920 igbe->anQ(annSmWb, annUsedDescQ); 921 } 922 923 924 igbe->anBegin(annSmWb, "Writeback Desc DMA"); 925 926 assert(wbOut); 927 igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)), 928 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf, 929 igbe->wbCompDelay); 930} 931 932template<class T> 933void 934IGbE::DescCache<T>::fetchDescriptors() 935{ 936 size_t max_to_fetch; 937 938 if (curFetching) { 939 DPRINTF(EthernetDesc, 940 "Currently fetching %d descriptors, returning\n", 941 curFetching); 942 return; 943 } 944 945 if (descTail() >= cachePnt) 946 max_to_fetch = descTail() - cachePnt; 947 else 948 max_to_fetch = descLen() - cachePnt; 949 950 size_t free_cache = size - usedCache.size() - unusedCache.size(); 951 952 if (!max_to_fetch) 953 igbe->anWe(annSmFetch, annUnusedDescQ); 954 else 955 igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch); 956 957 if (max_to_fetch) { 958 if (!free_cache) 959 igbe->anWf(annSmFetch, annDescQ); 960 else 961 igbe->anRq(annSmFetch, annDescQ, free_cache); 962 } 963 964 max_to_fetch = std::min(max_to_fetch, free_cache); 965 966 967 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: " 968 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n", 969 descHead(), descTail(), descLen(), cachePnt, 970 max_to_fetch, descLeft()); 971 972 // Nothing to do 973 if (max_to_fetch == 0) 974 return; 975 976 // So we don't have two descriptor fetches going on at once 977 curFetching = max_to_fetch; 978 979 assert(!fetchDelayEvent.scheduled()); 980 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay); 981 igbe->anBegin(annSmFetch, "Prepare Fetch Desc"); 982} 983 984template<class T> 985void 986IGbE::DescCache<T>::fetchDescriptors1() 987{ 988 // If we're draining delay issuing this DMA 989 if (igbe->drainState() != DrainState::Running) { 990 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay); 991 return; 992 } 993 994 igbe->anBegin(annSmFetch, "Fetch Desc"); 995 996 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n", 997 descBase() + cachePnt * sizeof(T), 998 pciToDma(descBase() + cachePnt * sizeof(T)), 999 curFetching * sizeof(T)); 1000 assert(curFetching); 1001 igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)), 1002 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf, 1003 igbe->fetchCompDelay); 1004} 1005 1006template<class T> 1007void 1008IGbE::DescCache<T>::fetchComplete() 1009{ 1010 T *newDesc; 1011 igbe->anBegin(annSmFetch, "Fetch Complete"); 1012 for (int x = 0; x < curFetching; x++) { 1013 newDesc = new T; 1014 memcpy(newDesc, &fetchBuf[x], sizeof(T)); 1015 unusedCache.push_back(newDesc); 1016 igbe->anDq(annSmFetch, annUnusedDescQ); 1017 igbe->anQ(annSmFetch, annUnusedCacheQ); 1018 igbe->anQ(annSmFetch, annDescQ); 1019 } 1020 1021 1022#ifndef NDEBUG 1023 int oldCp = cachePnt; 1024#endif 1025 1026 cachePnt += curFetching; 1027 assert(cachePnt <= descLen()); 1028 if (cachePnt == descLen()) 1029 cachePnt = 0; 1030 1031 curFetching = 0; 1032 1033 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n", 1034 oldCp, cachePnt); 1035 1036 if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() - 1037 cachePnt)) == 0) 1038 { 1039 igbe->anWe(annSmFetch, annUnusedDescQ); 1040 } else if (!(size - usedCache.size() - unusedCache.size())) { 1041 igbe->anWf(annSmFetch, annDescQ); 1042 } else { 1043 igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT); 1044 } 1045 1046 enableSm(); 1047 igbe->checkDrain(); 1048} 1049 1050template<class T> 1051void 1052IGbE::DescCache<T>::wbComplete() 1053{ 1054 1055 igbe->anBegin(annSmWb, "Finish Writeback"); 1056 1057 long curHead = descHead(); 1058#ifndef NDEBUG 1059 long oldHead = curHead; 1060#endif 1061 1062 for (int x = 0; x < wbOut; x++) { 1063 assert(usedCache.size()); 1064 delete usedCache[0]; 1065 usedCache.pop_front(); 1066 1067 igbe->anDq(annSmWb, annUsedCacheQ); 1068 igbe->anDq(annSmWb, annDescQ); 1069 } 1070 1071 curHead += wbOut; 1072 wbOut = 0; 1073 1074 if (curHead >= descLen()) 1075 curHead -= descLen(); 1076 1077 // Update the head 1078 updateHead(curHead); 1079 1080 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n", 1081 oldHead, curHead); 1082 1083 // If we still have more to wb, call wb now 1084 actionAfterWb(); 1085 if (moreToWb) { 1086 moreToWb = false; 1087 DPRINTF(EthernetDesc, "Writeback has more todo\n"); 1088 writeback(wbAlignment); 1089 } 1090 1091 if (!wbOut) { 1092 igbe->checkDrain(); 1093 if (usedCache.size()) 1094 igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT); 1095 else 1096 igbe->anWe(annSmWb, annUsedCacheQ); 1097 } 1098 fetchAfterWb(); 1099} 1100 1101template<class T> 1102void 1103IGbE::DescCache<T>::reset() 1104{ 1105 DPRINTF(EthernetDesc, "Reseting descriptor cache\n"); 1106 for (typename CacheType::size_type x = 0; x < usedCache.size(); x++) 1107 delete usedCache[x]; 1108 for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++) 1109 delete unusedCache[x]; 1110 1111 usedCache.clear(); 1112 unusedCache.clear(); 1113 1114 cachePnt = 0; 1115 1116} 1117 1118template<class T> 1119void 1120IGbE::DescCache<T>::serialize(CheckpointOut &cp) const 1121{ 1122 SERIALIZE_SCALAR(cachePnt); 1123 SERIALIZE_SCALAR(curFetching); 1124 SERIALIZE_SCALAR(wbOut); 1125 SERIALIZE_SCALAR(moreToWb); 1126 SERIALIZE_SCALAR(wbAlignment); 1127 1128 typename CacheType::size_type usedCacheSize = usedCache.size(); 1129 SERIALIZE_SCALAR(usedCacheSize); 1130 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) { 1131 arrayParamOut(cp, csprintf("usedCache_%d", x), 1132 (uint8_t*)usedCache[x],sizeof(T)); 1133 } 1134 1135 typename CacheType::size_type unusedCacheSize = unusedCache.size(); 1136 SERIALIZE_SCALAR(unusedCacheSize); 1137 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) { 1138 arrayParamOut(cp, csprintf("unusedCache_%d", x), 1139 (uint8_t*)unusedCache[x],sizeof(T)); 1140 } 1141 1142 Tick fetch_delay = 0, wb_delay = 0; 1143 if (fetchDelayEvent.scheduled()) 1144 fetch_delay = fetchDelayEvent.when(); 1145 SERIALIZE_SCALAR(fetch_delay); 1146 if (wbDelayEvent.scheduled()) 1147 wb_delay = wbDelayEvent.when(); 1148 SERIALIZE_SCALAR(wb_delay); 1149 1150 1151} 1152 1153template<class T> 1154void 1155IGbE::DescCache<T>::unserialize(CheckpointIn &cp) 1156{ 1157 UNSERIALIZE_SCALAR(cachePnt); 1158 UNSERIALIZE_SCALAR(curFetching); 1159 UNSERIALIZE_SCALAR(wbOut); 1160 UNSERIALIZE_SCALAR(moreToWb); 1161 UNSERIALIZE_SCALAR(wbAlignment); 1162 1163 typename CacheType::size_type usedCacheSize; 1164 UNSERIALIZE_SCALAR(usedCacheSize); 1165 T *temp; 1166 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) { 1167 temp = new T; 1168 arrayParamIn(cp, csprintf("usedCache_%d", x), 1169 (uint8_t*)temp,sizeof(T)); 1170 usedCache.push_back(temp); 1171 } 1172 1173 typename CacheType::size_type unusedCacheSize; 1174 UNSERIALIZE_SCALAR(unusedCacheSize); 1175 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) { 1176 temp = new T; 1177 arrayParamIn(cp, csprintf("unusedCache_%d", x), 1178 (uint8_t*)temp,sizeof(T)); 1179 unusedCache.push_back(temp); 1180 } 1181 Tick fetch_delay = 0, wb_delay = 0; 1182 UNSERIALIZE_SCALAR(fetch_delay); 1183 UNSERIALIZE_SCALAR(wb_delay); 1184 if (fetch_delay) 1185 igbe->schedule(fetchDelayEvent, fetch_delay); 1186 if (wb_delay) 1187 igbe->schedule(wbDelayEvent, wb_delay); 1188 1189 1190} 1191 1192///////////////////////////// IGbE::RxDescCache ////////////////////////////// 1193 1194IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s) 1195 : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0), 1196 pktEvent(this), pktHdrEvent(this), pktDataEvent(this) 1197 1198{ 1199 annSmFetch = "RX Desc Fetch"; 1200 annSmWb = "RX Desc Writeback"; 1201 annUnusedDescQ = "RX Unused Descriptors"; 1202 annUnusedCacheQ = "RX Unused Descriptor Cache"; 1203 annUsedCacheQ = "RX Used Descriptor Cache"; 1204 annUsedDescQ = "RX Used Descriptors"; 1205 annDescQ = "RX Descriptors"; 1206} 1207 1208void 1209IGbE::RxDescCache::pktSplitDone() 1210{ 1211 splitCount++; 1212 DPRINTF(EthernetDesc, 1213 "Part of split packet done: splitcount now %d\n", splitCount); 1214 assert(splitCount <= 2); 1215 if (splitCount != 2) 1216 return; 1217 splitCount = 0; 1218 DPRINTF(EthernetDesc, 1219 "Part of split packet done: calling pktComplete()\n"); 1220 pktComplete(); 1221} 1222 1223int 1224IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset) 1225{ 1226 assert(unusedCache.size()); 1227 //if (!unusedCache.size()) 1228 // return false; 1229 1230 pktPtr = packet; 1231 pktDone = false; 1232 unsigned buf_len, hdr_len; 1233 1234 RxDesc *desc = unusedCache.front(); 1235 switch (igbe->regs.srrctl.desctype()) { 1236 case RXDT_LEGACY: 1237 assert(pkt_offset == 0); 1238 bytesCopied = packet->length; 1239 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n", 1240 packet->length, igbe->regs.rctl.descSize()); 1241 assert(packet->length < igbe->regs.rctl.descSize()); 1242 igbe->dmaWrite(pciToDma(desc->legacy.buf), 1243 packet->length, &pktEvent, packet->data, 1244 igbe->rxWriteDelay); 1245 break; 1246 case RXDT_ADV_ONEBUF: 1247 assert(pkt_offset == 0); 1248 bytesCopied = packet->length; 1249 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() : 1250 igbe->regs.rctl.descSize(); 1251 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n", 1252 packet->length, igbe->regs.srrctl(), buf_len); 1253 assert(packet->length < buf_len); 1254 igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 1255 packet->length, &pktEvent, packet->data, 1256 igbe->rxWriteDelay); 1257 desc->adv_wb.header_len = htole(0); 1258 desc->adv_wb.sph = htole(0); 1259 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length)); 1260 break; 1261 case RXDT_ADV_SPLIT_A: 1262 int split_point; 1263 1264 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() : 1265 igbe->regs.rctl.descSize(); 1266 hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0; 1267 DPRINTF(EthernetDesc, 1268 "lpe: %d Packet Length: %d offset: %d srrctl: %#x " 1269 "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n", 1270 igbe->regs.rctl.lpe(), packet->length, pkt_offset, 1271 igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len, 1272 desc->adv_read.pkt, buf_len); 1273 1274 split_point = hsplit(pktPtr); 1275 1276 if (packet->length <= hdr_len) { 1277 bytesCopied = packet->length; 1278 assert(pkt_offset == 0); 1279 DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n"); 1280 igbe->dmaWrite(pciToDma(desc->adv_read.hdr), 1281 packet->length, &pktEvent, packet->data, 1282 igbe->rxWriteDelay); 1283 desc->adv_wb.header_len = htole((uint16_t)packet->length); 1284 desc->adv_wb.sph = htole(0); 1285 desc->adv_wb.pkt_len = htole(0); 1286 } else if (split_point) { 1287 if (pkt_offset) { 1288 // we are only copying some data, header/data has already been 1289 // copied 1290 int max_to_copy = 1291 std::min(packet->length - pkt_offset, buf_len); 1292 bytesCopied += max_to_copy; 1293 DPRINTF(EthernetDesc, 1294 "Hdr split: Continuing data buffer copy\n"); 1295 igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 1296 max_to_copy, &pktEvent, 1297 packet->data + pkt_offset, igbe->rxWriteDelay); 1298 desc->adv_wb.header_len = htole(0); 1299 desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy); 1300 desc->adv_wb.sph = htole(0); 1301 } else { 1302 int max_to_copy = 1303 std::min(packet->length - split_point, buf_len); 1304 bytesCopied += max_to_copy + split_point; 1305 1306 DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n", 1307 split_point); 1308 igbe->dmaWrite(pciToDma(desc->adv_read.hdr), 1309 split_point, &pktHdrEvent, 1310 packet->data, igbe->rxWriteDelay); 1311 igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 1312 max_to_copy, &pktDataEvent, 1313 packet->data + split_point, igbe->rxWriteDelay); 1314 desc->adv_wb.header_len = htole(split_point); 1315 desc->adv_wb.sph = 1; 1316 desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy)); 1317 } 1318 } else { 1319 panic("Header split not fitting within header buffer or " 1320 "undecodable packet not fitting in header unsupported\n"); 1321 } 1322 break; 1323 default: 1324 panic("Unimplemnted RX receive buffer type: %d\n", 1325 igbe->regs.srrctl.desctype()); 1326 } 1327 return bytesCopied; 1328 1329} 1330 1331void 1332IGbE::RxDescCache::pktComplete() 1333{ 1334 assert(unusedCache.size()); 1335 RxDesc *desc; 1336 desc = unusedCache.front(); 1337 1338 igbe->anBegin("RXS", "Update Desc"); 1339 1340 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ; 1341 DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d " 1342 "stripcrc offset: %d value written: %d %d\n", 1343 pktPtr->length, bytesCopied, crcfixup, 1344 htole((uint16_t)(pktPtr->length + crcfixup)), 1345 (uint16_t)(pktPtr->length + crcfixup)); 1346 1347 // no support for anything but starting at 0 1348 assert(igbe->regs.rxcsum.pcss() == 0); 1349 1350 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n"); 1351 1352 uint16_t status = RXDS_DD; 1353 uint8_t err = 0; 1354 uint16_t ext_err = 0; 1355 uint16_t csum = 0; 1356 uint16_t ptype = 0; 1357 uint16_t ip_id = 0; 1358 1359 assert(bytesCopied <= pktPtr->length); 1360 if (bytesCopied == pktPtr->length) 1361 status |= RXDS_EOP; 1362 1363 IpPtr ip(pktPtr); 1364 1365 if (ip) { 1366 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id()); 1367 ptype |= RXDP_IPV4; 1368 ip_id = ip->id(); 1369 1370 if (igbe->regs.rxcsum.ipofld()) { 1371 DPRINTF(EthernetDesc, "Checking IP checksum\n"); 1372 status |= RXDS_IPCS; 1373 csum = htole(cksum(ip)); 1374 igbe->rxIpChecksums++; 1375 if (cksum(ip) != 0) { 1376 err |= RXDE_IPE; 1377 ext_err |= RXDEE_IPE; 1378 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 1379 } 1380 } 1381 TcpPtr tcp(ip); 1382 if (tcp && igbe->regs.rxcsum.tuofld()) { 1383 DPRINTF(EthernetDesc, "Checking TCP checksum\n"); 1384 status |= RXDS_TCPCS; 1385 ptype |= RXDP_TCP; 1386 csum = htole(cksum(tcp)); 1387 igbe->rxTcpChecksums++; 1388 if (cksum(tcp) != 0) { 1389 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 1390 err |= RXDE_TCPE; 1391 ext_err |= RXDEE_TCPE; 1392 } 1393 } 1394 1395 UdpPtr udp(ip); 1396 if (udp && igbe->regs.rxcsum.tuofld()) { 1397 DPRINTF(EthernetDesc, "Checking UDP checksum\n"); 1398 status |= RXDS_UDPCS; 1399 ptype |= RXDP_UDP; 1400 csum = htole(cksum(udp)); 1401 igbe->rxUdpChecksums++; 1402 if (cksum(udp) != 0) { 1403 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 1404 ext_err |= RXDEE_TCPE; 1405 err |= RXDE_TCPE; 1406 } 1407 } 1408 } else { // if ip 1409 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 1410 } 1411 1412 switch (igbe->regs.srrctl.desctype()) { 1413 case RXDT_LEGACY: 1414 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup)); 1415 desc->legacy.status = htole(status); 1416 desc->legacy.errors = htole(err); 1417 // No vlan support at this point... just set it to 0 1418 desc->legacy.vlan = 0; 1419 break; 1420 case RXDT_ADV_SPLIT_A: 1421 case RXDT_ADV_ONEBUF: 1422 desc->adv_wb.rss_type = htole(0); 1423 desc->adv_wb.pkt_type = htole(ptype); 1424 if (igbe->regs.rxcsum.pcsd()) { 1425 // no rss support right now 1426 desc->adv_wb.rss_hash = htole(0); 1427 } else { 1428 desc->adv_wb.id = htole(ip_id); 1429 desc->adv_wb.csum = htole(csum); 1430 } 1431 desc->adv_wb.status = htole(status); 1432 desc->adv_wb.errors = htole(ext_err); 1433 // no vlan support 1434 desc->adv_wb.vlan_tag = htole(0); 1435 break; 1436 default: 1437 panic("Unimplemnted RX receive buffer type %d\n", 1438 igbe->regs.srrctl.desctype()); 1439 } 1440 1441 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n", 1442 desc->adv_read.pkt, desc->adv_read.hdr); 1443 1444 if (bytesCopied == pktPtr->length) { 1445 DPRINTF(EthernetDesc, 1446 "Packet completely written to descriptor buffers\n"); 1447 // Deal with the rx timer interrupts 1448 if (igbe->regs.rdtr.delay()) { 1449 Tick delay = igbe->regs.rdtr.delay() * igbe->intClock(); 1450 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay); 1451 igbe->reschedule(igbe->rdtrEvent, curTick() + delay); 1452 } 1453 1454 if (igbe->regs.radv.idv()) { 1455 Tick delay = igbe->regs.radv.idv() * igbe->intClock(); 1456 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay); 1457 if (!igbe->radvEvent.scheduled()) { 1458 igbe->schedule(igbe->radvEvent, curTick() + delay); 1459 } 1460 } 1461 1462 // if neither radv or rdtr, maybe itr is set... 1463 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) { 1464 DPRINTF(EthernetSM, 1465 "RXS: Receive interrupt delay disabled, posting IT_RXT\n"); 1466 igbe->postInterrupt(IT_RXT); 1467 } 1468 1469 // If the packet is small enough, interrupt appropriately 1470 // I wonder if this is delayed or not?! 1471 if (pktPtr->length <= igbe->regs.rsrpd.idv()) { 1472 DPRINTF(EthernetSM, 1473 "RXS: Posting IT_SRPD beacuse small packet received\n"); 1474 igbe->postInterrupt(IT_SRPD); 1475 } 1476 bytesCopied = 0; 1477 } 1478 1479 pktPtr = NULL; 1480 igbe->checkDrain(); 1481 enableSm(); 1482 pktDone = true; 1483 1484 igbe->anBegin("RXS", "Done Updating Desc"); 1485 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n"); 1486 igbe->anDq("RXS", annUnusedCacheQ); 1487 unusedCache.pop_front(); 1488 igbe->anQ("RXS", annUsedCacheQ); 1489 usedCache.push_back(desc); 1490} 1491 1492void 1493IGbE::RxDescCache::enableSm() 1494{ 1495 if (igbe->drainState() != DrainState::Draining) { 1496 igbe->rxTick = true; 1497 igbe->restartClock(); 1498 } 1499} 1500 1501bool 1502IGbE::RxDescCache::packetDone() 1503{ 1504 if (pktDone) { 1505 pktDone = false; 1506 return true; 1507 } 1508 return false; 1509} 1510 1511bool 1512IGbE::RxDescCache::hasOutstandingEvents() 1513{ 1514 return pktEvent.scheduled() || wbEvent.scheduled() || 1515 fetchEvent.scheduled() || pktHdrEvent.scheduled() || 1516 pktDataEvent.scheduled(); 1517 1518} 1519 1520void 1521IGbE::RxDescCache::serialize(CheckpointOut &cp) const 1522{ 1523 DescCache<RxDesc>::serialize(cp); 1524 SERIALIZE_SCALAR(pktDone); 1525 SERIALIZE_SCALAR(splitCount); 1526 SERIALIZE_SCALAR(bytesCopied); 1527} 1528 1529void 1530IGbE::RxDescCache::unserialize(CheckpointIn &cp) 1531{ 1532 DescCache<RxDesc>::unserialize(cp); 1533 UNSERIALIZE_SCALAR(pktDone); 1534 UNSERIALIZE_SCALAR(splitCount); 1535 UNSERIALIZE_SCALAR(bytesCopied); 1536} 1537 1538 1539///////////////////////////// IGbE::TxDescCache ////////////////////////////// 1540 1541IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s) 1542 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), 1543 pktWaiting(false), pktMultiDesc(false), 1544 completionAddress(0), completionEnabled(false), 1545 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0), 1546 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false), 1547 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0), 1548 pktEvent(this), headerEvent(this), nullEvent(this) 1549{ 1550 annSmFetch = "TX Desc Fetch"; 1551 annSmWb = "TX Desc Writeback"; 1552 annUnusedDescQ = "TX Unused Descriptors"; 1553 annUnusedCacheQ = "TX Unused Descriptor Cache"; 1554 annUsedCacheQ = "TX Used Descriptor Cache"; 1555 annUsedDescQ = "TX Used Descriptors"; 1556 annDescQ = "TX Descriptors"; 1557} 1558 1559void 1560IGbE::TxDescCache::processContextDesc() 1561{ 1562 assert(unusedCache.size()); 1563 TxDesc *desc; 1564 1565 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n"); 1566 1567 while (!useTso && unusedCache.size() && 1568 TxdOp::isContext(unusedCache.front())) { 1569 DPRINTF(EthernetDesc, "Got context descriptor type...\n"); 1570 1571 desc = unusedCache.front(); 1572 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n", 1573 desc->d1, desc->d2); 1574 1575 1576 // is this going to be a tcp or udp packet? 1577 isTcp = TxdOp::tcp(desc) ? true : false; 1578 1579 // setup all the TSO variables, they'll be ignored if we don't use 1580 // tso for this connection 1581 tsoHeaderLen = TxdOp::hdrlen(desc); 1582 tsoMss = TxdOp::mss(desc); 1583 1584 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) { 1585 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: " 1586 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc), 1587 TxdOp::mss(desc), TxdOp::getLen(desc)); 1588 useTso = true; 1589 tsoTotalLen = TxdOp::getLen(desc); 1590 tsoLoadedHeader = false; 1591 tsoDescBytesUsed = 0; 1592 tsoUsedLen = 0; 1593 tsoPrevSeq = 0; 1594 tsoPktHasHeader = false; 1595 tsoPkts = 0; 1596 tsoCopyBytes = 0; 1597 } 1598 1599 TxdOp::setDd(desc); 1600 unusedCache.pop_front(); 1601 igbe->anDq("TXS", annUnusedCacheQ); 1602 usedCache.push_back(desc); 1603 igbe->anQ("TXS", annUsedCacheQ); 1604 } 1605 1606 if (!unusedCache.size()) 1607 return; 1608 1609 desc = unusedCache.front(); 1610 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) && 1611 TxdOp::tse(desc)) { 1612 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet " 1613 "hdrlen: %d mss: %d paylen %d\n", 1614 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc)); 1615 useTso = true; 1616 tsoTotalLen = TxdOp::getTsoLen(desc); 1617 tsoLoadedHeader = false; 1618 tsoDescBytesUsed = 0; 1619 tsoUsedLen = 0; 1620 tsoPrevSeq = 0; 1621 tsoPktHasHeader = false; 1622 tsoPkts = 0; 1623 } 1624 1625 if (useTso && !tsoLoadedHeader) { 1626 // we need to fetch a header 1627 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n"); 1628 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen); 1629 pktWaiting = true; 1630 assert(tsoHeaderLen <= 256); 1631 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)), 1632 tsoHeaderLen, &headerEvent, tsoHeader, 0); 1633 } 1634} 1635 1636void 1637IGbE::TxDescCache::headerComplete() 1638{ 1639 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n"); 1640 pktWaiting = false; 1641 1642 assert(unusedCache.size()); 1643 TxDesc *desc = unusedCache.front(); 1644 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n", 1645 TxdOp::getLen(desc), tsoHeaderLen); 1646 1647 if (TxdOp::getLen(desc) == tsoHeaderLen) { 1648 tsoDescBytesUsed = 0; 1649 tsoLoadedHeader = true; 1650 unusedCache.pop_front(); 1651 usedCache.push_back(desc); 1652 } else { 1653 DPRINTF(EthernetDesc, "TSO: header part of larger payload\n"); 1654 tsoDescBytesUsed = tsoHeaderLen; 1655 tsoLoadedHeader = true; 1656 } 1657 enableSm(); 1658 igbe->checkDrain(); 1659} 1660 1661unsigned 1662IGbE::TxDescCache::getPacketSize(EthPacketPtr p) 1663{ 1664 if (!unusedCache.size()) 1665 return 0; 1666 1667 DPRINTF(EthernetDesc, "Starting processing of descriptor\n"); 1668 1669 assert(!useTso || tsoLoadedHeader); 1670 TxDesc *desc = unusedCache.front(); 1671 1672 if (useTso) { 1673 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data " 1674 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1675 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d " 1676 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss, 1677 tsoTotalLen, tsoUsedLen, tsoLoadedHeader); 1678 1679 if (tsoPktHasHeader) 1680 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length, 1681 TxdOp::getLen(desc) - tsoDescBytesUsed); 1682 else 1683 tsoCopyBytes = std::min(tsoMss, 1684 TxdOp::getLen(desc) - tsoDescBytesUsed); 1685 unsigned pkt_size = 1686 tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen); 1687 1688 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d " 1689 "this descLen: %d\n", 1690 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc)); 1691 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader); 1692 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size); 1693 return pkt_size; 1694 } 1695 1696 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n", 1697 TxdOp::getLen(unusedCache.front())); 1698 return TxdOp::getLen(desc); 1699} 1700 1701void 1702IGbE::TxDescCache::getPacketData(EthPacketPtr p) 1703{ 1704 assert(unusedCache.size()); 1705 1706 TxDesc *desc; 1707 desc = unusedCache.front(); 1708 1709 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data " 1710 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1711 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && 1712 TxdOp::getLen(desc)); 1713 1714 pktPtr = p; 1715 1716 pktWaiting = true; 1717 1718 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length); 1719 1720 if (useTso) { 1721 assert(tsoLoadedHeader); 1722 if (!tsoPktHasHeader) { 1723 DPRINTF(EthernetDesc, 1724 "Loading TSO header (%d bytes) into start of packet\n", 1725 tsoHeaderLen); 1726 memcpy(p->data, &tsoHeader,tsoHeaderLen); 1727 p->length +=tsoHeaderLen; 1728 tsoPktHasHeader = true; 1729 } 1730 } 1731 1732 if (useTso) { 1733 DPRINTF(EthernetDesc, 1734 "Starting DMA of packet at offset %d length: %d\n", 1735 p->length, tsoCopyBytes); 1736 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)) 1737 + tsoDescBytesUsed, 1738 tsoCopyBytes, &pktEvent, p->data + p->length, 1739 igbe->txReadDelay); 1740 tsoDescBytesUsed += tsoCopyBytes; 1741 assert(tsoDescBytesUsed <= TxdOp::getLen(desc)); 1742 } else { 1743 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)), 1744 TxdOp::getLen(desc), &pktEvent, p->data + p->length, 1745 igbe->txReadDelay); 1746 } 1747} 1748 1749void 1750IGbE::TxDescCache::pktComplete() 1751{ 1752 1753 TxDesc *desc; 1754 assert(unusedCache.size()); 1755 assert(pktPtr); 1756 1757 igbe->anBegin("TXS", "Update Desc"); 1758 1759 DPRINTF(EthernetDesc, "DMA of packet complete\n"); 1760 1761 1762 desc = unusedCache.front(); 1763 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && 1764 TxdOp::getLen(desc)); 1765 1766 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", 1767 desc->d1, desc->d2); 1768 1769 // Set the length of the data in the EtherPacket 1770 if (useTso) { 1771 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d " 1772 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss, 1773 tsoTotalLen, tsoUsedLen, tsoLoadedHeader); 1774 pktPtr->simLength += tsoCopyBytes; 1775 pktPtr->length += tsoCopyBytes; 1776 tsoUsedLen += tsoCopyBytes; 1777 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n", 1778 tsoDescBytesUsed, tsoCopyBytes); 1779 } else { 1780 pktPtr->simLength += TxdOp::getLen(desc); 1781 pktPtr->length += TxdOp::getLen(desc); 1782 } 1783 1784 1785 1786 if ((!TxdOp::eop(desc) && !useTso) || 1787 (pktPtr->length < ( tsoMss + tsoHeaderLen) && 1788 tsoTotalLen != tsoUsedLen && useTso)) { 1789 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc))); 1790 igbe->anDq("TXS", annUnusedCacheQ); 1791 unusedCache.pop_front(); 1792 igbe->anQ("TXS", annUsedCacheQ); 1793 usedCache.push_back(desc); 1794 1795 tsoDescBytesUsed = 0; 1796 pktDone = true; 1797 pktWaiting = false; 1798 pktMultiDesc = true; 1799 1800 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n", 1801 pktPtr->length); 1802 pktPtr = NULL; 1803 1804 enableSm(); 1805 igbe->checkDrain(); 1806 return; 1807 } 1808 1809 1810 pktMultiDesc = false; 1811 // no support for vlans 1812 assert(!TxdOp::vle(desc)); 1813 1814 // we only support single packet descriptors at this point 1815 if (!useTso) 1816 assert(TxdOp::eop(desc)); 1817 1818 // set that this packet is done 1819 if (TxdOp::rs(desc)) 1820 TxdOp::setDd(desc); 1821 1822 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", 1823 desc->d1, desc->d2); 1824 1825 if (useTso) { 1826 IpPtr ip(pktPtr); 1827 if (ip) { 1828 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n", 1829 tsoPkts); 1830 ip->id(ip->id() + tsoPkts++); 1831 ip->len(pktPtr->length - EthPtr(pktPtr)->size()); 1832 1833 TcpPtr tcp(ip); 1834 if (tcp) { 1835 DPRINTF(EthernetDesc, 1836 "TSO: Modifying TCP header. old seq %d + %d\n", 1837 tcp->seq(), tsoPrevSeq); 1838 tcp->seq(tcp->seq() + tsoPrevSeq); 1839 if (tsoUsedLen != tsoTotalLen) 1840 tcp->flags(tcp->flags() & ~9); // clear fin & psh 1841 } 1842 UdpPtr udp(ip); 1843 if (udp) { 1844 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n"); 1845 udp->len(pktPtr->length - EthPtr(pktPtr)->size()); 1846 } 1847 } 1848 tsoPrevSeq = tsoUsedLen; 1849 } 1850 1851 if (DTRACE(EthernetDesc)) { 1852 IpPtr ip(pktPtr); 1853 if (ip) 1854 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", 1855 ip->id()); 1856 else 1857 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 1858 } 1859 1860 // Checksums are only ofloaded for new descriptor types 1861 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) { 1862 DPRINTF(EthernetDesc, "Calculating checksums for packet\n"); 1863 IpPtr ip(pktPtr); 1864 assert(ip); 1865 if (TxdOp::ixsm(desc)) { 1866 ip->sum(0); 1867 ip->sum(cksum(ip)); 1868 igbe->txIpChecksums++; 1869 DPRINTF(EthernetDesc, "Calculated IP checksum\n"); 1870 } 1871 if (TxdOp::txsm(desc)) { 1872 TcpPtr tcp(ip); 1873 UdpPtr udp(ip); 1874 if (tcp) { 1875 tcp->sum(0); 1876 tcp->sum(cksum(tcp)); 1877 igbe->txTcpChecksums++; 1878 DPRINTF(EthernetDesc, "Calculated TCP checksum\n"); 1879 } else if (udp) { 1880 assert(udp); 1881 udp->sum(0); 1882 udp->sum(cksum(udp)); 1883 igbe->txUdpChecksums++; 1884 DPRINTF(EthernetDesc, "Calculated UDP checksum\n"); 1885 } else { 1886 panic("Told to checksum, but don't know how\n"); 1887 } 1888 } 1889 } 1890 1891 if (TxdOp::ide(desc)) { 1892 // Deal with the rx timer interrupts 1893 DPRINTF(EthernetDesc, "Descriptor had IDE set\n"); 1894 if (igbe->regs.tidv.idv()) { 1895 Tick delay = igbe->regs.tidv.idv() * igbe->intClock(); 1896 DPRINTF(EthernetDesc, "setting tidv\n"); 1897 igbe->reschedule(igbe->tidvEvent, curTick() + delay, true); 1898 } 1899 1900 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) { 1901 Tick delay = igbe->regs.tadv.idv() * igbe->intClock(); 1902 DPRINTF(EthernetDesc, "setting tadv\n"); 1903 if (!igbe->tadvEvent.scheduled()) { 1904 igbe->schedule(igbe->tadvEvent, curTick() + delay); 1905 } 1906 } 1907 } 1908 1909 1910 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) { 1911 DPRINTF(EthernetDesc, "Descriptor Done\n"); 1912 igbe->anDq("TXS", annUnusedCacheQ); 1913 unusedCache.pop_front(); 1914 igbe->anQ("TXS", annUsedCacheQ); 1915 usedCache.push_back(desc); 1916 tsoDescBytesUsed = 0; 1917 } 1918 1919 if (useTso && tsoUsedLen == tsoTotalLen) 1920 useTso = false; 1921 1922 1923 DPRINTF(EthernetDesc, 1924 "------Packet of %d bytes ready for transmission-------\n", 1925 pktPtr->length); 1926 pktDone = true; 1927 pktWaiting = false; 1928 pktPtr = NULL; 1929 tsoPktHasHeader = false; 1930 1931 if (igbe->regs.txdctl.wthresh() == 0) { 1932 igbe->anBegin("TXS", "Desc Writeback"); 1933 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n"); 1934 writeback(0); 1935 } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <= 1936 descInBlock(usedCache.size())) { 1937 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1938 igbe->anBegin("TXS", "Desc Writeback"); 1939 writeback((igbe->cacheBlockSize()-1)>>4); 1940 } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) { 1941 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1942 igbe->anBegin("TXS", "Desc Writeback"); 1943 writeback((igbe->cacheBlockSize()-1)>>4); 1944 } 1945 1946 enableSm(); 1947 igbe->checkDrain(); 1948} 1949 1950void 1951IGbE::TxDescCache::actionAfterWb() 1952{ 1953 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n", 1954 completionEnabled); 1955 igbe->postInterrupt(iGbReg::IT_TXDW); 1956 if (completionEnabled) { 1957 descEnd = igbe->regs.tdh(); 1958 DPRINTF(EthernetDesc, 1959 "Completion writing back value: %d to addr: %#x\n", descEnd, 1960 completionAddress); 1961 igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)), 1962 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0); 1963 } 1964} 1965 1966void 1967IGbE::TxDescCache::serialize(CheckpointOut &cp) const 1968{ 1969 DescCache<TxDesc>::serialize(cp); 1970 1971 SERIALIZE_SCALAR(pktDone); 1972 SERIALIZE_SCALAR(isTcp); 1973 SERIALIZE_SCALAR(pktWaiting); 1974 SERIALIZE_SCALAR(pktMultiDesc); 1975 1976 SERIALIZE_SCALAR(useTso); 1977 SERIALIZE_SCALAR(tsoHeaderLen); 1978 SERIALIZE_SCALAR(tsoMss); 1979 SERIALIZE_SCALAR(tsoTotalLen); 1980 SERIALIZE_SCALAR(tsoUsedLen); 1981 SERIALIZE_SCALAR(tsoPrevSeq);; 1982 SERIALIZE_SCALAR(tsoPktPayloadBytes); 1983 SERIALIZE_SCALAR(tsoLoadedHeader); 1984 SERIALIZE_SCALAR(tsoPktHasHeader); 1985 SERIALIZE_ARRAY(tsoHeader, 256); 1986 SERIALIZE_SCALAR(tsoDescBytesUsed); 1987 SERIALIZE_SCALAR(tsoCopyBytes); 1988 SERIALIZE_SCALAR(tsoPkts); 1989 1990 SERIALIZE_SCALAR(completionAddress); 1991 SERIALIZE_SCALAR(completionEnabled); 1992 SERIALIZE_SCALAR(descEnd); 1993} 1994 1995void 1996IGbE::TxDescCache::unserialize(CheckpointIn &cp) 1997{ 1998 DescCache<TxDesc>::unserialize(cp); 1999 2000 UNSERIALIZE_SCALAR(pktDone); 2001 UNSERIALIZE_SCALAR(isTcp); 2002 UNSERIALIZE_SCALAR(pktWaiting); 2003 UNSERIALIZE_SCALAR(pktMultiDesc); 2004 2005 UNSERIALIZE_SCALAR(useTso); 2006 UNSERIALIZE_SCALAR(tsoHeaderLen); 2007 UNSERIALIZE_SCALAR(tsoMss); 2008 UNSERIALIZE_SCALAR(tsoTotalLen); 2009 UNSERIALIZE_SCALAR(tsoUsedLen); 2010 UNSERIALIZE_SCALAR(tsoPrevSeq);; 2011 UNSERIALIZE_SCALAR(tsoPktPayloadBytes); 2012 UNSERIALIZE_SCALAR(tsoLoadedHeader); 2013 UNSERIALIZE_SCALAR(tsoPktHasHeader); 2014 UNSERIALIZE_ARRAY(tsoHeader, 256); 2015 UNSERIALIZE_SCALAR(tsoDescBytesUsed); 2016 UNSERIALIZE_SCALAR(tsoCopyBytes); 2017 UNSERIALIZE_SCALAR(tsoPkts); 2018 2019 UNSERIALIZE_SCALAR(completionAddress); 2020 UNSERIALIZE_SCALAR(completionEnabled); 2021 UNSERIALIZE_SCALAR(descEnd); 2022} 2023 2024bool 2025IGbE::TxDescCache::packetAvailable() 2026{ 2027 if (pktDone) { 2028 pktDone = false; 2029 return true; 2030 } 2031 return false; 2032} 2033 2034void 2035IGbE::TxDescCache::enableSm() 2036{ 2037 if (igbe->drainState() != DrainState::Draining) { 2038 igbe->txTick = true; 2039 igbe->restartClock(); 2040 } 2041} 2042 2043bool 2044IGbE::TxDescCache::hasOutstandingEvents() 2045{ 2046 return pktEvent.scheduled() || wbEvent.scheduled() || 2047 fetchEvent.scheduled(); 2048} 2049 2050 2051///////////////////////////////////// IGbE ///////////////////////////////// 2052 2053void 2054IGbE::restartClock() 2055{ 2056 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && 2057 drainState() == DrainState::Running) 2058 schedule(tickEvent, clockEdge(Cycles(1))); 2059} 2060 2061DrainState 2062IGbE::drain() 2063{ 2064 unsigned int count(0); 2065 if (rxDescCache.hasOutstandingEvents() || 2066 txDescCache.hasOutstandingEvents()) { 2067 count++; 2068 } 2069 2070 txFifoTick = false; 2071 txTick = false; 2072 rxTick = false; 2073 2074 if (tickEvent.scheduled()) 2075 deschedule(tickEvent); 2076 2077 if (count) { 2078 DPRINTF(Drain, "IGbE not drained\n"); 2079 return DrainState::Draining; 2080 } else 2081 return DrainState::Drained; 2082} 2083 2084void 2085IGbE::drainResume() 2086{ 2087 Drainable::drainResume(); 2088 2089 txFifoTick = true; 2090 txTick = true; 2091 rxTick = true; 2092 2093 restartClock(); 2094 DPRINTF(EthernetSM, "resuming from drain"); 2095} 2096 2097void 2098IGbE::checkDrain() 2099{ 2100 if (drainState() != DrainState::Draining) 2101 return; 2102 2103 txFifoTick = false; 2104 txTick = false; 2105 rxTick = false; 2106 if (!rxDescCache.hasOutstandingEvents() && 2107 !txDescCache.hasOutstandingEvents()) { 2108 DPRINTF(Drain, "IGbE done draining, processing drain event\n"); 2109 signalDrainDone(); 2110 } 2111} 2112 2113void 2114IGbE::txStateMachine() 2115{ 2116 if (!regs.tctl.en()) { 2117 txTick = false; 2118 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n"); 2119 return; 2120 } 2121 2122 // If we have a packet available and it's length is not 0 (meaning it's not 2123 // a multidescriptor packet) put it in the fifo, otherwise an the next 2124 // iteration we'll get the rest of the data 2125 if (txPacket && txDescCache.packetAvailable() 2126 && !txDescCache.packetMultiDesc() && txPacket->length) { 2127 anQ("TXS", "TX FIFO Q"); 2128 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n"); 2129#ifndef NDEBUG 2130 bool success = 2131#endif 2132 txFifo.push(txPacket); 2133 txFifoTick = true && drainState() != DrainState::Draining; 2134 assert(success); 2135 txPacket = NULL; 2136 anBegin("TXS", "Desc Writeback"); 2137 txDescCache.writeback((cacheBlockSize()-1)>>4); 2138 return; 2139 } 2140 2141 // Only support descriptor granularity 2142 if (regs.txdctl.lwthresh() && 2143 txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) { 2144 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n"); 2145 postInterrupt(IT_TXDLOW); 2146 } 2147 2148 if (!txPacket) { 2149 txPacket = std::make_shared<EthPacketData>(16384); 2150 } 2151 2152 if (!txDescCache.packetWaiting()) { 2153 if (txDescCache.descLeft() == 0) { 2154 postInterrupt(IT_TXQE); 2155 anBegin("TXS", "Desc Writeback"); 2156 txDescCache.writeback(0); 2157 anBegin("TXS", "Desc Fetch"); 2158 anWe("TXS", txDescCache.annUnusedCacheQ); 2159 txDescCache.fetchDescriptors(); 2160 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing " 2161 "writeback stopping ticking and posting TXQE\n"); 2162 txTick = false; 2163 return; 2164 } 2165 2166 2167 if (!(txDescCache.descUnused())) { 2168 anBegin("TXS", "Desc Fetch"); 2169 txDescCache.fetchDescriptors(); 2170 anWe("TXS", txDescCache.annUnusedCacheQ); 2171 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, " 2172 "fetching and stopping ticking\n"); 2173 txTick = false; 2174 return; 2175 } 2176 anPq("TXS", txDescCache.annUnusedCacheQ); 2177 2178 2179 txDescCache.processContextDesc(); 2180 if (txDescCache.packetWaiting()) { 2181 DPRINTF(EthernetSM, 2182 "TXS: Fetching TSO header, stopping ticking\n"); 2183 txTick = false; 2184 return; 2185 } 2186 2187 unsigned size = txDescCache.getPacketSize(txPacket); 2188 if (size > 0 && txFifo.avail() > size) { 2189 anRq("TXS", "TX FIFO Q"); 2190 anBegin("TXS", "DMA Packet"); 2191 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and " 2192 "beginning DMA of next packet\n", size); 2193 txFifo.reserve(size); 2194 txDescCache.getPacketData(txPacket); 2195 } else if (size == 0) { 2196 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size); 2197 DPRINTF(EthernetSM, 2198 "TXS: No packets to get, writing back used descriptors\n"); 2199 anBegin("TXS", "Desc Writeback"); 2200 txDescCache.writeback(0); 2201 } else { 2202 anWf("TXS", "TX FIFO Q"); 2203 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space " 2204 "available in FIFO\n"); 2205 txTick = false; 2206 } 2207 2208 2209 return; 2210 } 2211 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n"); 2212 txTick = false; 2213} 2214 2215bool 2216IGbE::ethRxPkt(EthPacketPtr pkt) 2217{ 2218 rxBytes += pkt->length; 2219 rxPackets++; 2220 2221 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n"); 2222 anBegin("RXQ", "Wire Recv"); 2223 2224 2225 if (!regs.rctl.en()) { 2226 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n"); 2227 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD); 2228 return true; 2229 } 2230 2231 // restart the state machines if they are stopped 2232 rxTick = true && drainState() != DrainState::Draining; 2233 if ((rxTick || txTick) && !tickEvent.scheduled()) { 2234 DPRINTF(EthernetSM, 2235 "RXS: received packet into fifo, starting ticking\n"); 2236 restartClock(); 2237 } 2238 2239 if (!rxFifo.push(pkt)) { 2240 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n"); 2241 postInterrupt(IT_RXO, true); 2242 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD); 2243 return false; 2244 } 2245 2246 if (CPA::available() && cpa->enabled()) { 2247 assert(sys->numSystemsRunning <= 2); 2248 System *other_sys; 2249 if (sys->systemList[0] == sys) 2250 other_sys = sys->systemList[1]; 2251 else 2252 other_sys = sys->systemList[0]; 2253 2254 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys); 2255 anQ("RXQ", "RX FIFO Q"); 2256 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys); 2257 } 2258 2259 return true; 2260} 2261 2262 2263void 2264IGbE::rxStateMachine() 2265{ 2266 if (!regs.rctl.en()) { 2267 rxTick = false; 2268 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n"); 2269 return; 2270 } 2271 2272 // If the packet is done check for interrupts/descriptors/etc 2273 if (rxDescCache.packetDone()) { 2274 rxDmaPacket = false; 2275 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n"); 2276 int descLeft = rxDescCache.descLeft(); 2277 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n", 2278 descLeft, regs.rctl.rdmts(), regs.rdlen()); 2279 switch (regs.rctl.rdmts()) { 2280 case 2: if (descLeft > .125 * regs.rdlen()) break; 2281 case 1: if (descLeft > .250 * regs.rdlen()) break; 2282 case 0: if (descLeft > .500 * regs.rdlen()) break; 2283 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) " 2284 "because of descriptors left\n"); 2285 postInterrupt(IT_RXDMT); 2286 break; 2287 } 2288 2289 if (rxFifo.empty()) 2290 rxDescCache.writeback(0); 2291 2292 if (descLeft == 0) { 2293 anBegin("RXS", "Writeback Descriptors"); 2294 rxDescCache.writeback(0); 2295 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing" 2296 " writeback and stopping ticking\n"); 2297 rxTick = false; 2298 } 2299 2300 // only support descriptor granulaties 2301 assert(regs.rxdctl.gran()); 2302 2303 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) { 2304 DPRINTF(EthernetSM, 2305 "RXS: Writing back because WTHRESH >= descUsed\n"); 2306 anBegin("RXS", "Writeback Descriptors"); 2307 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4)) 2308 rxDescCache.writeback(regs.rxdctl.wthresh()-1); 2309 else 2310 rxDescCache.writeback((cacheBlockSize()-1)>>4); 2311 } 2312 2313 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) && 2314 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > 2315 regs.rxdctl.hthresh())) { 2316 DPRINTF(EthernetSM, "RXS: Fetching descriptors because " 2317 "descUnused < PTHRESH\n"); 2318 anBegin("RXS", "Fetch Descriptors"); 2319 rxDescCache.fetchDescriptors(); 2320 } 2321 2322 if (rxDescCache.descUnused() == 0) { 2323 anBegin("RXS", "Fetch Descriptors"); 2324 rxDescCache.fetchDescriptors(); 2325 anWe("RXS", rxDescCache.annUnusedCacheQ); 2326 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, " 2327 "fetching descriptors and stopping ticking\n"); 2328 rxTick = false; 2329 } 2330 return; 2331 } 2332 2333 if (rxDmaPacket) { 2334 DPRINTF(EthernetSM, 2335 "RXS: stopping ticking until packet DMA completes\n"); 2336 rxTick = false; 2337 return; 2338 } 2339 2340 if (!rxDescCache.descUnused()) { 2341 anBegin("RXS", "Fetch Descriptors"); 2342 rxDescCache.fetchDescriptors(); 2343 anWe("RXS", rxDescCache.annUnusedCacheQ); 2344 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, " 2345 "stopping ticking\n"); 2346 rxTick = false; 2347 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n"); 2348 return; 2349 } 2350 anPq("RXS", rxDescCache.annUnusedCacheQ); 2351 2352 if (rxFifo.empty()) { 2353 anWe("RXS", "RX FIFO Q"); 2354 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n"); 2355 rxTick = false; 2356 return; 2357 } 2358 anPq("RXS", "RX FIFO Q"); 2359 anBegin("RXS", "Get Desc"); 2360 2361 EthPacketPtr pkt; 2362 pkt = rxFifo.front(); 2363 2364 2365 pktOffset = rxDescCache.writePacket(pkt, pktOffset); 2366 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n"); 2367 if (pktOffset == pkt->length) { 2368 anBegin( "RXS", "FIFO Dequeue"); 2369 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n"); 2370 pktOffset = 0; 2371 anDq("RXS", "RX FIFO Q"); 2372 rxFifo.pop(); 2373 } 2374 2375 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 2376 rxTick = false; 2377 rxDmaPacket = true; 2378 anBegin("RXS", "DMA Packet"); 2379} 2380 2381void 2382IGbE::txWire() 2383{ 2384 if (txFifo.empty()) { 2385 anWe("TXQ", "TX FIFO Q"); 2386 txFifoTick = false; 2387 return; 2388 } 2389 2390 2391 anPq("TXQ", "TX FIFO Q"); 2392 if (etherInt->sendPacket(txFifo.front())) { 2393 anQ("TXQ", "WireQ"); 2394 if (DTRACE(EthernetSM)) { 2395 IpPtr ip(txFifo.front()); 2396 if (ip) 2397 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n", 2398 ip->id()); 2399 else 2400 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n"); 2401 } 2402 anDq("TXQ", "TX FIFO Q"); 2403 anBegin("TXQ", "Wire Send"); 2404 DPRINTF(EthernetSM, 2405 "TxFIFO: Successful transmit, bytes available in fifo: %d\n", 2406 txFifo.avail()); 2407 2408 txBytes += txFifo.front()->length; 2409 txPackets++; 2410 txFifoTick = false; 2411 2412 txFifo.pop(); 2413 } else { 2414 // We'll get woken up when the packet ethTxDone() gets called 2415 txFifoTick = false; 2416 } 2417} 2418 2419void 2420IGbE::tick() 2421{ 2422 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n"); 2423 2424 if (rxTick) 2425 rxStateMachine(); 2426 2427 if (txTick) 2428 txStateMachine(); 2429 2430 if (txFifoTick) 2431 txWire(); 2432 2433 2434 if (rxTick || txTick || txFifoTick) 2435 schedule(tickEvent, curTick() + clockPeriod()); 2436} 2437 2438void 2439IGbE::ethTxDone() 2440{ 2441 anBegin("TXQ", "Send Done"); 2442 // restart the tx state machines if they are stopped 2443 // fifo to send another packet 2444 // tx sm to put more data into the fifo 2445 txFifoTick = true && drainState() != DrainState::Draining; 2446 if (txDescCache.descLeft() != 0 && drainState() != DrainState::Draining) 2447 txTick = true; 2448 2449 restartClock(); 2450 txWire(); 2451 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n"); 2452} 2453 2454void 2455IGbE::serialize(CheckpointOut &cp) const 2456{ 2457 PciDevice::serialize(cp); 2458 2459 regs.serialize(cp); 2460 SERIALIZE_SCALAR(eeOpBits); 2461 SERIALIZE_SCALAR(eeAddrBits); 2462 SERIALIZE_SCALAR(eeDataBits); 2463 SERIALIZE_SCALAR(eeOpcode); 2464 SERIALIZE_SCALAR(eeAddr); 2465 SERIALIZE_SCALAR(lastInterrupt); 2466 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 2467 2468 rxFifo.serialize("rxfifo", cp); 2469 txFifo.serialize("txfifo", cp); 2470 2471 bool txPktExists = txPacket != nullptr; 2472 SERIALIZE_SCALAR(txPktExists); 2473 if (txPktExists) 2474 txPacket->serialize("txpacket", cp); 2475 2476 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0, 2477 inter_time = 0; 2478 2479 if (rdtrEvent.scheduled()) 2480 rdtr_time = rdtrEvent.when(); 2481 SERIALIZE_SCALAR(rdtr_time); 2482 2483 if (radvEvent.scheduled()) 2484 radv_time = radvEvent.when(); 2485 SERIALIZE_SCALAR(radv_time); 2486 2487 if (tidvEvent.scheduled()) 2488 tidv_time = tidvEvent.when(); 2489 SERIALIZE_SCALAR(tidv_time); 2490 2491 if (tadvEvent.scheduled()) 2492 tadv_time = tadvEvent.when(); 2493 SERIALIZE_SCALAR(tadv_time); 2494 2495 if (interEvent.scheduled()) 2496 inter_time = interEvent.when(); 2497 SERIALIZE_SCALAR(inter_time); 2498 2499 SERIALIZE_SCALAR(pktOffset); 2500 2501 txDescCache.serializeSection(cp, "TxDescCache"); 2502 rxDescCache.serializeSection(cp, "RxDescCache"); 2503} 2504 2505void 2506IGbE::unserialize(CheckpointIn &cp) 2507{ 2508 PciDevice::unserialize(cp); 2509 2510 regs.unserialize(cp); 2511 UNSERIALIZE_SCALAR(eeOpBits); 2512 UNSERIALIZE_SCALAR(eeAddrBits); 2513 UNSERIALIZE_SCALAR(eeDataBits); 2514 UNSERIALIZE_SCALAR(eeOpcode); 2515 UNSERIALIZE_SCALAR(eeAddr); 2516 UNSERIALIZE_SCALAR(lastInterrupt); 2517 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 2518 2519 rxFifo.unserialize("rxfifo", cp); 2520 txFifo.unserialize("txfifo", cp); 2521 2522 bool txPktExists; 2523 UNSERIALIZE_SCALAR(txPktExists); 2524 if (txPktExists) { 2525 txPacket = std::make_shared<EthPacketData>(16384); 2526 txPacket->unserialize("txpacket", cp); 2527 } 2528 2529 rxTick = true; 2530 txTick = true; 2531 txFifoTick = true; 2532 2533 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time; 2534 UNSERIALIZE_SCALAR(rdtr_time); 2535 UNSERIALIZE_SCALAR(radv_time); 2536 UNSERIALIZE_SCALAR(tidv_time); 2537 UNSERIALIZE_SCALAR(tadv_time); 2538 UNSERIALIZE_SCALAR(inter_time); 2539 2540 if (rdtr_time) 2541 schedule(rdtrEvent, rdtr_time); 2542 2543 if (radv_time) 2544 schedule(radvEvent, radv_time); 2545 2546 if (tidv_time) 2547 schedule(tidvEvent, tidv_time); 2548 2549 if (tadv_time) 2550 schedule(tadvEvent, tadv_time); 2551 2552 if (inter_time) 2553 schedule(interEvent, inter_time); 2554 2555 UNSERIALIZE_SCALAR(pktOffset); 2556 2557 txDescCache.unserializeSection(cp, "TxDescCache"); 2558 rxDescCache.unserializeSection(cp, "RxDescCache"); 2559} 2560 2561IGbE * 2562IGbEParams::create() 2563{ 2564 return new IGbE(this); 2565}
| 553 case REG_LEDCTL: 554 case REG_FCAL: 555 case REG_FCAH: 556 case REG_FCT: 557 case REG_VET: 558 case REG_AIFS: 559 case REG_TIPG: 560 ; // We don't care, so don't store anything 561 break; 562 case REG_IVAR0: 563 warn("Writing to IVAR0, ignoring...\n"); 564 break; 565 case REG_FCRTL: 566 regs.fcrtl = val; 567 break; 568 case REG_FCRTH: 569 regs.fcrth = val; 570 break; 571 case REG_RDBAL: 572 regs.rdba.rdbal( val & ~mask(4)); 573 rxDescCache.areaChanged(); 574 break; 575 case REG_RDBAH: 576 regs.rdba.rdbah(val); 577 rxDescCache.areaChanged(); 578 break; 579 case REG_RDLEN: 580 regs.rdlen = val & ~mask(7); 581 rxDescCache.areaChanged(); 582 break; 583 case REG_SRRCTL: 584 regs.srrctl = val; 585 break; 586 case REG_RDH: 587 regs.rdh = val; 588 rxDescCache.areaChanged(); 589 break; 590 case REG_RDT: 591 regs.rdt = val; 592 DPRINTF(EthernetSM, "RXS: RDT Updated.\n"); 593 if (drainState() == DrainState::Running) { 594 DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n"); 595 rxDescCache.fetchDescriptors(); 596 } else { 597 DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n"); 598 } 599 break; 600 case REG_RDTR: 601 regs.rdtr = val; 602 break; 603 case REG_RADV: 604 regs.radv = val; 605 break; 606 case REG_RXDCTL: 607 regs.rxdctl = val; 608 break; 609 case REG_TDBAL: 610 regs.tdba.tdbal( val & ~mask(4)); 611 txDescCache.areaChanged(); 612 break; 613 case REG_TDBAH: 614 regs.tdba.tdbah(val); 615 txDescCache.areaChanged(); 616 break; 617 case REG_TDLEN: 618 regs.tdlen = val & ~mask(7); 619 txDescCache.areaChanged(); 620 break; 621 case REG_TDH: 622 regs.tdh = val; 623 txDescCache.areaChanged(); 624 break; 625 case REG_TXDCA_CTL: 626 regs.txdca_ctl = val; 627 if (regs.txdca_ctl.enabled()) 628 panic("No support for DCA\n"); 629 break; 630 case REG_TDT: 631 regs.tdt = val; 632 DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n"); 633 if (drainState() == DrainState::Running) { 634 DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n"); 635 txDescCache.fetchDescriptors(); 636 } else { 637 DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n"); 638 } 639 break; 640 case REG_TIDV: 641 regs.tidv = val; 642 break; 643 case REG_TXDCTL: 644 regs.txdctl = val; 645 break; 646 case REG_TADV: 647 regs.tadv = val; 648 break; 649 case REG_TDWBAL: 650 regs.tdwba &= ~mask(32); 651 regs.tdwba |= val; 652 txDescCache.completionWriteback(regs.tdwba & ~mask(1), 653 regs.tdwba & mask(1)); 654 break; 655 case REG_TDWBAH: 656 regs.tdwba &= mask(32); 657 regs.tdwba |= (uint64_t)val << 32; 658 txDescCache.completionWriteback(regs.tdwba & ~mask(1), 659 regs.tdwba & mask(1)); 660 break; 661 case REG_RXCSUM: 662 regs.rxcsum = val; 663 break; 664 case REG_RLPML: 665 regs.rlpml = val; 666 break; 667 case REG_RFCTL: 668 regs.rfctl = val; 669 if (regs.rfctl.exsten()) 670 panic("Extended RX descriptors not implemented\n"); 671 break; 672 case REG_MANC: 673 regs.manc = val; 674 break; 675 case REG_SWSM: 676 regs.swsm = val; 677 if (regs.fwsm.eep_fw_semaphore()) 678 regs.swsm.swesmbi(0); 679 break; 680 case REG_SWFWSYNC: 681 regs.sw_fw_sync = val; 682 break; 683 default: 684 if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) && 685 !IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) && 686 !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4)) 687 panic("Write request to unknown register number: %#x\n", daddr); 688 }; 689 690 pkt->makeAtomicResponse(); 691 return pioDelay; 692} 693 694void 695IGbE::postInterrupt(IntTypes t, bool now) 696{ 697 assert(t); 698 699 // Interrupt is already pending 700 if (t & regs.icr() && !now) 701 return; 702 703 regs.icr = regs.icr() | t; 704 705 Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval(); 706 DPRINTF(EthernetIntr, 707 "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n", 708 curTick(), regs.itr.interval(), itr_interval); 709 710 if (regs.itr.interval() == 0 || now || 711 lastInterrupt + itr_interval <= curTick()) { 712 if (interEvent.scheduled()) { 713 deschedule(interEvent); 714 } 715 cpuPostInt(); 716 } else { 717 Tick int_time = lastInterrupt + itr_interval; 718 assert(int_time > 0); 719 DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n", 720 int_time); 721 if (!interEvent.scheduled()) { 722 schedule(interEvent, int_time); 723 } 724 } 725} 726 727void 728IGbE::delayIntEvent() 729{ 730 cpuPostInt(); 731} 732 733 734void 735IGbE::cpuPostInt() 736{ 737 738 postedInterrupts++; 739 740 if (!(regs.icr() & regs.imr)) { 741 DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n"); 742 return; 743 } 744 745 DPRINTF(Ethernet, "Posting Interrupt\n"); 746 747 748 if (interEvent.scheduled()) { 749 deschedule(interEvent); 750 } 751 752 if (rdtrEvent.scheduled()) { 753 regs.icr.rxt0(1); 754 deschedule(rdtrEvent); 755 } 756 if (radvEvent.scheduled()) { 757 regs.icr.rxt0(1); 758 deschedule(radvEvent); 759 } 760 if (tadvEvent.scheduled()) { 761 regs.icr.txdw(1); 762 deschedule(tadvEvent); 763 } 764 if (tidvEvent.scheduled()) { 765 regs.icr.txdw(1); 766 deschedule(tidvEvent); 767 } 768 769 regs.icr.int_assert(1); 770 DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n", 771 regs.icr()); 772 773 intrPost(); 774 775 lastInterrupt = curTick(); 776} 777 778void 779IGbE::cpuClearInt() 780{ 781 if (regs.icr.int_assert()) { 782 regs.icr.int_assert(0); 783 DPRINTF(EthernetIntr, 784 "EINT: Clearing interrupt to CPU now. Vector %#x\n", 785 regs.icr()); 786 intrClear(); 787 } 788} 789 790void 791IGbE::chkInterrupt() 792{ 793 DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(), 794 regs.imr); 795 // Check if we need to clear the cpu interrupt 796 if (!(regs.icr() & regs.imr)) { 797 DPRINTF(Ethernet, "Mask cleaned all interrupts\n"); 798 if (interEvent.scheduled()) 799 deschedule(interEvent); 800 if (regs.icr.int_assert()) 801 cpuClearInt(); 802 } 803 DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n", 804 regs.itr(), regs.itr.interval()); 805 806 if (regs.icr() & regs.imr) { 807 if (regs.itr.interval() == 0) { 808 cpuPostInt(); 809 } else { 810 DPRINTF(Ethernet, 811 "Possibly scheduling interrupt because of imr write\n"); 812 if (!interEvent.scheduled()) { 813 Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval(); 814 DPRINTF(Ethernet, "Scheduling for %d\n", t); 815 schedule(interEvent, t); 816 } 817 } 818 } 819} 820 821 822///////////////////////////// IGbE::DescCache ////////////////////////////// 823 824template<class T> 825IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s) 826 : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), 827 wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL), 828 wbDelayEvent(this), fetchDelayEvent(this), fetchEvent(this), 829 wbEvent(this) 830{ 831 fetchBuf = new T[size]; 832 wbBuf = new T[size]; 833} 834 835template<class T> 836IGbE::DescCache<T>::~DescCache() 837{ 838 reset(); 839 delete[] fetchBuf; 840 delete[] wbBuf; 841} 842 843template<class T> 844void 845IGbE::DescCache<T>::areaChanged() 846{ 847 if (usedCache.size() > 0 || curFetching || wbOut) 848 panic("Descriptor Address, Length or Head changed. Bad\n"); 849 reset(); 850 851} 852 853template<class T> 854void 855IGbE::DescCache<T>::writeback(Addr aMask) 856{ 857 int curHead = descHead(); 858 int max_to_wb = usedCache.size(); 859 860 // Check if this writeback is less restrictive that the previous 861 // and if so setup another one immediately following it 862 if (wbOut) { 863 if (aMask < wbAlignment) { 864 moreToWb = true; 865 wbAlignment = aMask; 866 } 867 DPRINTF(EthernetDesc, 868 "Writing back already in process, returning\n"); 869 return; 870 } 871 872 moreToWb = false; 873 wbAlignment = aMask; 874 875 876 DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: " 877 "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n", 878 curHead, descTail(), descLen(), cachePnt, max_to_wb, 879 descLeft()); 880 881 if (max_to_wb + curHead >= descLen()) { 882 max_to_wb = descLen() - curHead; 883 moreToWb = true; 884 // this is by definition aligned correctly 885 } else if (wbAlignment != 0) { 886 // align the wb point to the mask 887 max_to_wb = max_to_wb & ~wbAlignment; 888 } 889 890 DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb); 891 892 if (max_to_wb <= 0) { 893 if (usedCache.size()) 894 igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT); 895 else 896 igbe->anWe(annSmWb, annUsedCacheQ); 897 return; 898 } 899 900 wbOut = max_to_wb; 901 902 assert(!wbDelayEvent.scheduled()); 903 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay); 904 igbe->anBegin(annSmWb, "Prepare Writeback Desc"); 905} 906 907template<class T> 908void 909IGbE::DescCache<T>::writeback1() 910{ 911 // If we're draining delay issuing this DMA 912 if (igbe->drainState() != DrainState::Running) { 913 igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay); 914 return; 915 } 916 917 DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut); 918 919 for (int x = 0; x < wbOut; x++) { 920 assert(usedCache.size()); 921 memcpy(&wbBuf[x], usedCache[x], sizeof(T)); 922 igbe->anPq(annSmWb, annUsedCacheQ); 923 igbe->anPq(annSmWb, annDescQ); 924 igbe->anQ(annSmWb, annUsedDescQ); 925 } 926 927 928 igbe->anBegin(annSmWb, "Writeback Desc DMA"); 929 930 assert(wbOut); 931 igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)), 932 wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf, 933 igbe->wbCompDelay); 934} 935 936template<class T> 937void 938IGbE::DescCache<T>::fetchDescriptors() 939{ 940 size_t max_to_fetch; 941 942 if (curFetching) { 943 DPRINTF(EthernetDesc, 944 "Currently fetching %d descriptors, returning\n", 945 curFetching); 946 return; 947 } 948 949 if (descTail() >= cachePnt) 950 max_to_fetch = descTail() - cachePnt; 951 else 952 max_to_fetch = descLen() - cachePnt; 953 954 size_t free_cache = size - usedCache.size() - unusedCache.size(); 955 956 if (!max_to_fetch) 957 igbe->anWe(annSmFetch, annUnusedDescQ); 958 else 959 igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch); 960 961 if (max_to_fetch) { 962 if (!free_cache) 963 igbe->anWf(annSmFetch, annDescQ); 964 else 965 igbe->anRq(annSmFetch, annDescQ, free_cache); 966 } 967 968 max_to_fetch = std::min(max_to_fetch, free_cache); 969 970 971 DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: " 972 "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n", 973 descHead(), descTail(), descLen(), cachePnt, 974 max_to_fetch, descLeft()); 975 976 // Nothing to do 977 if (max_to_fetch == 0) 978 return; 979 980 // So we don't have two descriptor fetches going on at once 981 curFetching = max_to_fetch; 982 983 assert(!fetchDelayEvent.scheduled()); 984 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay); 985 igbe->anBegin(annSmFetch, "Prepare Fetch Desc"); 986} 987 988template<class T> 989void 990IGbE::DescCache<T>::fetchDescriptors1() 991{ 992 // If we're draining delay issuing this DMA 993 if (igbe->drainState() != DrainState::Running) { 994 igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay); 995 return; 996 } 997 998 igbe->anBegin(annSmFetch, "Fetch Desc"); 999 1000 DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n", 1001 descBase() + cachePnt * sizeof(T), 1002 pciToDma(descBase() + cachePnt * sizeof(T)), 1003 curFetching * sizeof(T)); 1004 assert(curFetching); 1005 igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)), 1006 curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf, 1007 igbe->fetchCompDelay); 1008} 1009 1010template<class T> 1011void 1012IGbE::DescCache<T>::fetchComplete() 1013{ 1014 T *newDesc; 1015 igbe->anBegin(annSmFetch, "Fetch Complete"); 1016 for (int x = 0; x < curFetching; x++) { 1017 newDesc = new T; 1018 memcpy(newDesc, &fetchBuf[x], sizeof(T)); 1019 unusedCache.push_back(newDesc); 1020 igbe->anDq(annSmFetch, annUnusedDescQ); 1021 igbe->anQ(annSmFetch, annUnusedCacheQ); 1022 igbe->anQ(annSmFetch, annDescQ); 1023 } 1024 1025 1026#ifndef NDEBUG 1027 int oldCp = cachePnt; 1028#endif 1029 1030 cachePnt += curFetching; 1031 assert(cachePnt <= descLen()); 1032 if (cachePnt == descLen()) 1033 cachePnt = 0; 1034 1035 curFetching = 0; 1036 1037 DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n", 1038 oldCp, cachePnt); 1039 1040 if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() - 1041 cachePnt)) == 0) 1042 { 1043 igbe->anWe(annSmFetch, annUnusedDescQ); 1044 } else if (!(size - usedCache.size() - unusedCache.size())) { 1045 igbe->anWf(annSmFetch, annDescQ); 1046 } else { 1047 igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT); 1048 } 1049 1050 enableSm(); 1051 igbe->checkDrain(); 1052} 1053 1054template<class T> 1055void 1056IGbE::DescCache<T>::wbComplete() 1057{ 1058 1059 igbe->anBegin(annSmWb, "Finish Writeback"); 1060 1061 long curHead = descHead(); 1062#ifndef NDEBUG 1063 long oldHead = curHead; 1064#endif 1065 1066 for (int x = 0; x < wbOut; x++) { 1067 assert(usedCache.size()); 1068 delete usedCache[0]; 1069 usedCache.pop_front(); 1070 1071 igbe->anDq(annSmWb, annUsedCacheQ); 1072 igbe->anDq(annSmWb, annDescQ); 1073 } 1074 1075 curHead += wbOut; 1076 wbOut = 0; 1077 1078 if (curHead >= descLen()) 1079 curHead -= descLen(); 1080 1081 // Update the head 1082 updateHead(curHead); 1083 1084 DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n", 1085 oldHead, curHead); 1086 1087 // If we still have more to wb, call wb now 1088 actionAfterWb(); 1089 if (moreToWb) { 1090 moreToWb = false; 1091 DPRINTF(EthernetDesc, "Writeback has more todo\n"); 1092 writeback(wbAlignment); 1093 } 1094 1095 if (!wbOut) { 1096 igbe->checkDrain(); 1097 if (usedCache.size()) 1098 igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT); 1099 else 1100 igbe->anWe(annSmWb, annUsedCacheQ); 1101 } 1102 fetchAfterWb(); 1103} 1104 1105template<class T> 1106void 1107IGbE::DescCache<T>::reset() 1108{ 1109 DPRINTF(EthernetDesc, "Reseting descriptor cache\n"); 1110 for (typename CacheType::size_type x = 0; x < usedCache.size(); x++) 1111 delete usedCache[x]; 1112 for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++) 1113 delete unusedCache[x]; 1114 1115 usedCache.clear(); 1116 unusedCache.clear(); 1117 1118 cachePnt = 0; 1119 1120} 1121 1122template<class T> 1123void 1124IGbE::DescCache<T>::serialize(CheckpointOut &cp) const 1125{ 1126 SERIALIZE_SCALAR(cachePnt); 1127 SERIALIZE_SCALAR(curFetching); 1128 SERIALIZE_SCALAR(wbOut); 1129 SERIALIZE_SCALAR(moreToWb); 1130 SERIALIZE_SCALAR(wbAlignment); 1131 1132 typename CacheType::size_type usedCacheSize = usedCache.size(); 1133 SERIALIZE_SCALAR(usedCacheSize); 1134 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) { 1135 arrayParamOut(cp, csprintf("usedCache_%d", x), 1136 (uint8_t*)usedCache[x],sizeof(T)); 1137 } 1138 1139 typename CacheType::size_type unusedCacheSize = unusedCache.size(); 1140 SERIALIZE_SCALAR(unusedCacheSize); 1141 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) { 1142 arrayParamOut(cp, csprintf("unusedCache_%d", x), 1143 (uint8_t*)unusedCache[x],sizeof(T)); 1144 } 1145 1146 Tick fetch_delay = 0, wb_delay = 0; 1147 if (fetchDelayEvent.scheduled()) 1148 fetch_delay = fetchDelayEvent.when(); 1149 SERIALIZE_SCALAR(fetch_delay); 1150 if (wbDelayEvent.scheduled()) 1151 wb_delay = wbDelayEvent.when(); 1152 SERIALIZE_SCALAR(wb_delay); 1153 1154 1155} 1156 1157template<class T> 1158void 1159IGbE::DescCache<T>::unserialize(CheckpointIn &cp) 1160{ 1161 UNSERIALIZE_SCALAR(cachePnt); 1162 UNSERIALIZE_SCALAR(curFetching); 1163 UNSERIALIZE_SCALAR(wbOut); 1164 UNSERIALIZE_SCALAR(moreToWb); 1165 UNSERIALIZE_SCALAR(wbAlignment); 1166 1167 typename CacheType::size_type usedCacheSize; 1168 UNSERIALIZE_SCALAR(usedCacheSize); 1169 T *temp; 1170 for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) { 1171 temp = new T; 1172 arrayParamIn(cp, csprintf("usedCache_%d", x), 1173 (uint8_t*)temp,sizeof(T)); 1174 usedCache.push_back(temp); 1175 } 1176 1177 typename CacheType::size_type unusedCacheSize; 1178 UNSERIALIZE_SCALAR(unusedCacheSize); 1179 for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) { 1180 temp = new T; 1181 arrayParamIn(cp, csprintf("unusedCache_%d", x), 1182 (uint8_t*)temp,sizeof(T)); 1183 unusedCache.push_back(temp); 1184 } 1185 Tick fetch_delay = 0, wb_delay = 0; 1186 UNSERIALIZE_SCALAR(fetch_delay); 1187 UNSERIALIZE_SCALAR(wb_delay); 1188 if (fetch_delay) 1189 igbe->schedule(fetchDelayEvent, fetch_delay); 1190 if (wb_delay) 1191 igbe->schedule(wbDelayEvent, wb_delay); 1192 1193 1194} 1195 1196///////////////////////////// IGbE::RxDescCache ////////////////////////////// 1197 1198IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s) 1199 : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0), 1200 pktEvent(this), pktHdrEvent(this), pktDataEvent(this) 1201 1202{ 1203 annSmFetch = "RX Desc Fetch"; 1204 annSmWb = "RX Desc Writeback"; 1205 annUnusedDescQ = "RX Unused Descriptors"; 1206 annUnusedCacheQ = "RX Unused Descriptor Cache"; 1207 annUsedCacheQ = "RX Used Descriptor Cache"; 1208 annUsedDescQ = "RX Used Descriptors"; 1209 annDescQ = "RX Descriptors"; 1210} 1211 1212void 1213IGbE::RxDescCache::pktSplitDone() 1214{ 1215 splitCount++; 1216 DPRINTF(EthernetDesc, 1217 "Part of split packet done: splitcount now %d\n", splitCount); 1218 assert(splitCount <= 2); 1219 if (splitCount != 2) 1220 return; 1221 splitCount = 0; 1222 DPRINTF(EthernetDesc, 1223 "Part of split packet done: calling pktComplete()\n"); 1224 pktComplete(); 1225} 1226 1227int 1228IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset) 1229{ 1230 assert(unusedCache.size()); 1231 //if (!unusedCache.size()) 1232 // return false; 1233 1234 pktPtr = packet; 1235 pktDone = false; 1236 unsigned buf_len, hdr_len; 1237 1238 RxDesc *desc = unusedCache.front(); 1239 switch (igbe->regs.srrctl.desctype()) { 1240 case RXDT_LEGACY: 1241 assert(pkt_offset == 0); 1242 bytesCopied = packet->length; 1243 DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n", 1244 packet->length, igbe->regs.rctl.descSize()); 1245 assert(packet->length < igbe->regs.rctl.descSize()); 1246 igbe->dmaWrite(pciToDma(desc->legacy.buf), 1247 packet->length, &pktEvent, packet->data, 1248 igbe->rxWriteDelay); 1249 break; 1250 case RXDT_ADV_ONEBUF: 1251 assert(pkt_offset == 0); 1252 bytesCopied = packet->length; 1253 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() : 1254 igbe->regs.rctl.descSize(); 1255 DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n", 1256 packet->length, igbe->regs.srrctl(), buf_len); 1257 assert(packet->length < buf_len); 1258 igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 1259 packet->length, &pktEvent, packet->data, 1260 igbe->rxWriteDelay); 1261 desc->adv_wb.header_len = htole(0); 1262 desc->adv_wb.sph = htole(0); 1263 desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length)); 1264 break; 1265 case RXDT_ADV_SPLIT_A: 1266 int split_point; 1267 1268 buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() : 1269 igbe->regs.rctl.descSize(); 1270 hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0; 1271 DPRINTF(EthernetDesc, 1272 "lpe: %d Packet Length: %d offset: %d srrctl: %#x " 1273 "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n", 1274 igbe->regs.rctl.lpe(), packet->length, pkt_offset, 1275 igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len, 1276 desc->adv_read.pkt, buf_len); 1277 1278 split_point = hsplit(pktPtr); 1279 1280 if (packet->length <= hdr_len) { 1281 bytesCopied = packet->length; 1282 assert(pkt_offset == 0); 1283 DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n"); 1284 igbe->dmaWrite(pciToDma(desc->adv_read.hdr), 1285 packet->length, &pktEvent, packet->data, 1286 igbe->rxWriteDelay); 1287 desc->adv_wb.header_len = htole((uint16_t)packet->length); 1288 desc->adv_wb.sph = htole(0); 1289 desc->adv_wb.pkt_len = htole(0); 1290 } else if (split_point) { 1291 if (pkt_offset) { 1292 // we are only copying some data, header/data has already been 1293 // copied 1294 int max_to_copy = 1295 std::min(packet->length - pkt_offset, buf_len); 1296 bytesCopied += max_to_copy; 1297 DPRINTF(EthernetDesc, 1298 "Hdr split: Continuing data buffer copy\n"); 1299 igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 1300 max_to_copy, &pktEvent, 1301 packet->data + pkt_offset, igbe->rxWriteDelay); 1302 desc->adv_wb.header_len = htole(0); 1303 desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy); 1304 desc->adv_wb.sph = htole(0); 1305 } else { 1306 int max_to_copy = 1307 std::min(packet->length - split_point, buf_len); 1308 bytesCopied += max_to_copy + split_point; 1309 1310 DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n", 1311 split_point); 1312 igbe->dmaWrite(pciToDma(desc->adv_read.hdr), 1313 split_point, &pktHdrEvent, 1314 packet->data, igbe->rxWriteDelay); 1315 igbe->dmaWrite(pciToDma(desc->adv_read.pkt), 1316 max_to_copy, &pktDataEvent, 1317 packet->data + split_point, igbe->rxWriteDelay); 1318 desc->adv_wb.header_len = htole(split_point); 1319 desc->adv_wb.sph = 1; 1320 desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy)); 1321 } 1322 } else { 1323 panic("Header split not fitting within header buffer or " 1324 "undecodable packet not fitting in header unsupported\n"); 1325 } 1326 break; 1327 default: 1328 panic("Unimplemnted RX receive buffer type: %d\n", 1329 igbe->regs.srrctl.desctype()); 1330 } 1331 return bytesCopied; 1332 1333} 1334 1335void 1336IGbE::RxDescCache::pktComplete() 1337{ 1338 assert(unusedCache.size()); 1339 RxDesc *desc; 1340 desc = unusedCache.front(); 1341 1342 igbe->anBegin("RXS", "Update Desc"); 1343 1344 uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ; 1345 DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d " 1346 "stripcrc offset: %d value written: %d %d\n", 1347 pktPtr->length, bytesCopied, crcfixup, 1348 htole((uint16_t)(pktPtr->length + crcfixup)), 1349 (uint16_t)(pktPtr->length + crcfixup)); 1350 1351 // no support for anything but starting at 0 1352 assert(igbe->regs.rxcsum.pcss() == 0); 1353 1354 DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n"); 1355 1356 uint16_t status = RXDS_DD; 1357 uint8_t err = 0; 1358 uint16_t ext_err = 0; 1359 uint16_t csum = 0; 1360 uint16_t ptype = 0; 1361 uint16_t ip_id = 0; 1362 1363 assert(bytesCopied <= pktPtr->length); 1364 if (bytesCopied == pktPtr->length) 1365 status |= RXDS_EOP; 1366 1367 IpPtr ip(pktPtr); 1368 1369 if (ip) { 1370 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id()); 1371 ptype |= RXDP_IPV4; 1372 ip_id = ip->id(); 1373 1374 if (igbe->regs.rxcsum.ipofld()) { 1375 DPRINTF(EthernetDesc, "Checking IP checksum\n"); 1376 status |= RXDS_IPCS; 1377 csum = htole(cksum(ip)); 1378 igbe->rxIpChecksums++; 1379 if (cksum(ip) != 0) { 1380 err |= RXDE_IPE; 1381 ext_err |= RXDEE_IPE; 1382 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 1383 } 1384 } 1385 TcpPtr tcp(ip); 1386 if (tcp && igbe->regs.rxcsum.tuofld()) { 1387 DPRINTF(EthernetDesc, "Checking TCP checksum\n"); 1388 status |= RXDS_TCPCS; 1389 ptype |= RXDP_TCP; 1390 csum = htole(cksum(tcp)); 1391 igbe->rxTcpChecksums++; 1392 if (cksum(tcp) != 0) { 1393 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 1394 err |= RXDE_TCPE; 1395 ext_err |= RXDEE_TCPE; 1396 } 1397 } 1398 1399 UdpPtr udp(ip); 1400 if (udp && igbe->regs.rxcsum.tuofld()) { 1401 DPRINTF(EthernetDesc, "Checking UDP checksum\n"); 1402 status |= RXDS_UDPCS; 1403 ptype |= RXDP_UDP; 1404 csum = htole(cksum(udp)); 1405 igbe->rxUdpChecksums++; 1406 if (cksum(udp) != 0) { 1407 DPRINTF(EthernetDesc, "Checksum is bad!!\n"); 1408 ext_err |= RXDEE_TCPE; 1409 err |= RXDE_TCPE; 1410 } 1411 } 1412 } else { // if ip 1413 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 1414 } 1415 1416 switch (igbe->regs.srrctl.desctype()) { 1417 case RXDT_LEGACY: 1418 desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup)); 1419 desc->legacy.status = htole(status); 1420 desc->legacy.errors = htole(err); 1421 // No vlan support at this point... just set it to 0 1422 desc->legacy.vlan = 0; 1423 break; 1424 case RXDT_ADV_SPLIT_A: 1425 case RXDT_ADV_ONEBUF: 1426 desc->adv_wb.rss_type = htole(0); 1427 desc->adv_wb.pkt_type = htole(ptype); 1428 if (igbe->regs.rxcsum.pcsd()) { 1429 // no rss support right now 1430 desc->adv_wb.rss_hash = htole(0); 1431 } else { 1432 desc->adv_wb.id = htole(ip_id); 1433 desc->adv_wb.csum = htole(csum); 1434 } 1435 desc->adv_wb.status = htole(status); 1436 desc->adv_wb.errors = htole(ext_err); 1437 // no vlan support 1438 desc->adv_wb.vlan_tag = htole(0); 1439 break; 1440 default: 1441 panic("Unimplemnted RX receive buffer type %d\n", 1442 igbe->regs.srrctl.desctype()); 1443 } 1444 1445 DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n", 1446 desc->adv_read.pkt, desc->adv_read.hdr); 1447 1448 if (bytesCopied == pktPtr->length) { 1449 DPRINTF(EthernetDesc, 1450 "Packet completely written to descriptor buffers\n"); 1451 // Deal with the rx timer interrupts 1452 if (igbe->regs.rdtr.delay()) { 1453 Tick delay = igbe->regs.rdtr.delay() * igbe->intClock(); 1454 DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay); 1455 igbe->reschedule(igbe->rdtrEvent, curTick() + delay); 1456 } 1457 1458 if (igbe->regs.radv.idv()) { 1459 Tick delay = igbe->regs.radv.idv() * igbe->intClock(); 1460 DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay); 1461 if (!igbe->radvEvent.scheduled()) { 1462 igbe->schedule(igbe->radvEvent, curTick() + delay); 1463 } 1464 } 1465 1466 // if neither radv or rdtr, maybe itr is set... 1467 if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) { 1468 DPRINTF(EthernetSM, 1469 "RXS: Receive interrupt delay disabled, posting IT_RXT\n"); 1470 igbe->postInterrupt(IT_RXT); 1471 } 1472 1473 // If the packet is small enough, interrupt appropriately 1474 // I wonder if this is delayed or not?! 1475 if (pktPtr->length <= igbe->regs.rsrpd.idv()) { 1476 DPRINTF(EthernetSM, 1477 "RXS: Posting IT_SRPD beacuse small packet received\n"); 1478 igbe->postInterrupt(IT_SRPD); 1479 } 1480 bytesCopied = 0; 1481 } 1482 1483 pktPtr = NULL; 1484 igbe->checkDrain(); 1485 enableSm(); 1486 pktDone = true; 1487 1488 igbe->anBegin("RXS", "Done Updating Desc"); 1489 DPRINTF(EthernetDesc, "Processing of this descriptor complete\n"); 1490 igbe->anDq("RXS", annUnusedCacheQ); 1491 unusedCache.pop_front(); 1492 igbe->anQ("RXS", annUsedCacheQ); 1493 usedCache.push_back(desc); 1494} 1495 1496void 1497IGbE::RxDescCache::enableSm() 1498{ 1499 if (igbe->drainState() != DrainState::Draining) { 1500 igbe->rxTick = true; 1501 igbe->restartClock(); 1502 } 1503} 1504 1505bool 1506IGbE::RxDescCache::packetDone() 1507{ 1508 if (pktDone) { 1509 pktDone = false; 1510 return true; 1511 } 1512 return false; 1513} 1514 1515bool 1516IGbE::RxDescCache::hasOutstandingEvents() 1517{ 1518 return pktEvent.scheduled() || wbEvent.scheduled() || 1519 fetchEvent.scheduled() || pktHdrEvent.scheduled() || 1520 pktDataEvent.scheduled(); 1521 1522} 1523 1524void 1525IGbE::RxDescCache::serialize(CheckpointOut &cp) const 1526{ 1527 DescCache<RxDesc>::serialize(cp); 1528 SERIALIZE_SCALAR(pktDone); 1529 SERIALIZE_SCALAR(splitCount); 1530 SERIALIZE_SCALAR(bytesCopied); 1531} 1532 1533void 1534IGbE::RxDescCache::unserialize(CheckpointIn &cp) 1535{ 1536 DescCache<RxDesc>::unserialize(cp); 1537 UNSERIALIZE_SCALAR(pktDone); 1538 UNSERIALIZE_SCALAR(splitCount); 1539 UNSERIALIZE_SCALAR(bytesCopied); 1540} 1541 1542 1543///////////////////////////// IGbE::TxDescCache ////////////////////////////// 1544 1545IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s) 1546 : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false), 1547 pktWaiting(false), pktMultiDesc(false), 1548 completionAddress(0), completionEnabled(false), 1549 useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0), 1550 tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false), 1551 tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0), 1552 pktEvent(this), headerEvent(this), nullEvent(this) 1553{ 1554 annSmFetch = "TX Desc Fetch"; 1555 annSmWb = "TX Desc Writeback"; 1556 annUnusedDescQ = "TX Unused Descriptors"; 1557 annUnusedCacheQ = "TX Unused Descriptor Cache"; 1558 annUsedCacheQ = "TX Used Descriptor Cache"; 1559 annUsedDescQ = "TX Used Descriptors"; 1560 annDescQ = "TX Descriptors"; 1561} 1562 1563void 1564IGbE::TxDescCache::processContextDesc() 1565{ 1566 assert(unusedCache.size()); 1567 TxDesc *desc; 1568 1569 DPRINTF(EthernetDesc, "Checking and processing context descriptors\n"); 1570 1571 while (!useTso && unusedCache.size() && 1572 TxdOp::isContext(unusedCache.front())) { 1573 DPRINTF(EthernetDesc, "Got context descriptor type...\n"); 1574 1575 desc = unusedCache.front(); 1576 DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n", 1577 desc->d1, desc->d2); 1578 1579 1580 // is this going to be a tcp or udp packet? 1581 isTcp = TxdOp::tcp(desc) ? true : false; 1582 1583 // setup all the TSO variables, they'll be ignored if we don't use 1584 // tso for this connection 1585 tsoHeaderLen = TxdOp::hdrlen(desc); 1586 tsoMss = TxdOp::mss(desc); 1587 1588 if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) { 1589 DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: " 1590 "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc), 1591 TxdOp::mss(desc), TxdOp::getLen(desc)); 1592 useTso = true; 1593 tsoTotalLen = TxdOp::getLen(desc); 1594 tsoLoadedHeader = false; 1595 tsoDescBytesUsed = 0; 1596 tsoUsedLen = 0; 1597 tsoPrevSeq = 0; 1598 tsoPktHasHeader = false; 1599 tsoPkts = 0; 1600 tsoCopyBytes = 0; 1601 } 1602 1603 TxdOp::setDd(desc); 1604 unusedCache.pop_front(); 1605 igbe->anDq("TXS", annUnusedCacheQ); 1606 usedCache.push_back(desc); 1607 igbe->anQ("TXS", annUsedCacheQ); 1608 } 1609 1610 if (!unusedCache.size()) 1611 return; 1612 1613 desc = unusedCache.front(); 1614 if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) && 1615 TxdOp::tse(desc)) { 1616 DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet " 1617 "hdrlen: %d mss: %d paylen %d\n", 1618 tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc)); 1619 useTso = true; 1620 tsoTotalLen = TxdOp::getTsoLen(desc); 1621 tsoLoadedHeader = false; 1622 tsoDescBytesUsed = 0; 1623 tsoUsedLen = 0; 1624 tsoPrevSeq = 0; 1625 tsoPktHasHeader = false; 1626 tsoPkts = 0; 1627 } 1628 1629 if (useTso && !tsoLoadedHeader) { 1630 // we need to fetch a header 1631 DPRINTF(EthernetDesc, "Starting DMA of TSO header\n"); 1632 assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen); 1633 pktWaiting = true; 1634 assert(tsoHeaderLen <= 256); 1635 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)), 1636 tsoHeaderLen, &headerEvent, tsoHeader, 0); 1637 } 1638} 1639 1640void 1641IGbE::TxDescCache::headerComplete() 1642{ 1643 DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n"); 1644 pktWaiting = false; 1645 1646 assert(unusedCache.size()); 1647 TxDesc *desc = unusedCache.front(); 1648 DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n", 1649 TxdOp::getLen(desc), tsoHeaderLen); 1650 1651 if (TxdOp::getLen(desc) == tsoHeaderLen) { 1652 tsoDescBytesUsed = 0; 1653 tsoLoadedHeader = true; 1654 unusedCache.pop_front(); 1655 usedCache.push_back(desc); 1656 } else { 1657 DPRINTF(EthernetDesc, "TSO: header part of larger payload\n"); 1658 tsoDescBytesUsed = tsoHeaderLen; 1659 tsoLoadedHeader = true; 1660 } 1661 enableSm(); 1662 igbe->checkDrain(); 1663} 1664 1665unsigned 1666IGbE::TxDescCache::getPacketSize(EthPacketPtr p) 1667{ 1668 if (!unusedCache.size()) 1669 return 0; 1670 1671 DPRINTF(EthernetDesc, "Starting processing of descriptor\n"); 1672 1673 assert(!useTso || tsoLoadedHeader); 1674 TxDesc *desc = unusedCache.front(); 1675 1676 if (useTso) { 1677 DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data " 1678 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1679 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d " 1680 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss, 1681 tsoTotalLen, tsoUsedLen, tsoLoadedHeader); 1682 1683 if (tsoPktHasHeader) 1684 tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length, 1685 TxdOp::getLen(desc) - tsoDescBytesUsed); 1686 else 1687 tsoCopyBytes = std::min(tsoMss, 1688 TxdOp::getLen(desc) - tsoDescBytesUsed); 1689 unsigned pkt_size = 1690 tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen); 1691 1692 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d " 1693 "this descLen: %d\n", 1694 tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc)); 1695 DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader); 1696 DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size); 1697 return pkt_size; 1698 } 1699 1700 DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n", 1701 TxdOp::getLen(unusedCache.front())); 1702 return TxdOp::getLen(desc); 1703} 1704 1705void 1706IGbE::TxDescCache::getPacketData(EthPacketPtr p) 1707{ 1708 assert(unusedCache.size()); 1709 1710 TxDesc *desc; 1711 desc = unusedCache.front(); 1712 1713 DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data " 1714 "d1: %#llx d2: %#llx\n", desc->d1, desc->d2); 1715 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && 1716 TxdOp::getLen(desc)); 1717 1718 pktPtr = p; 1719 1720 pktWaiting = true; 1721 1722 DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length); 1723 1724 if (useTso) { 1725 assert(tsoLoadedHeader); 1726 if (!tsoPktHasHeader) { 1727 DPRINTF(EthernetDesc, 1728 "Loading TSO header (%d bytes) into start of packet\n", 1729 tsoHeaderLen); 1730 memcpy(p->data, &tsoHeader,tsoHeaderLen); 1731 p->length +=tsoHeaderLen; 1732 tsoPktHasHeader = true; 1733 } 1734 } 1735 1736 if (useTso) { 1737 DPRINTF(EthernetDesc, 1738 "Starting DMA of packet at offset %d length: %d\n", 1739 p->length, tsoCopyBytes); 1740 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)) 1741 + tsoDescBytesUsed, 1742 tsoCopyBytes, &pktEvent, p->data + p->length, 1743 igbe->txReadDelay); 1744 tsoDescBytesUsed += tsoCopyBytes; 1745 assert(tsoDescBytesUsed <= TxdOp::getLen(desc)); 1746 } else { 1747 igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)), 1748 TxdOp::getLen(desc), &pktEvent, p->data + p->length, 1749 igbe->txReadDelay); 1750 } 1751} 1752 1753void 1754IGbE::TxDescCache::pktComplete() 1755{ 1756 1757 TxDesc *desc; 1758 assert(unusedCache.size()); 1759 assert(pktPtr); 1760 1761 igbe->anBegin("TXS", "Update Desc"); 1762 1763 DPRINTF(EthernetDesc, "DMA of packet complete\n"); 1764 1765 1766 desc = unusedCache.front(); 1767 assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && 1768 TxdOp::getLen(desc)); 1769 1770 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", 1771 desc->d1, desc->d2); 1772 1773 // Set the length of the data in the EtherPacket 1774 if (useTso) { 1775 DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d " 1776 "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss, 1777 tsoTotalLen, tsoUsedLen, tsoLoadedHeader); 1778 pktPtr->simLength += tsoCopyBytes; 1779 pktPtr->length += tsoCopyBytes; 1780 tsoUsedLen += tsoCopyBytes; 1781 DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n", 1782 tsoDescBytesUsed, tsoCopyBytes); 1783 } else { 1784 pktPtr->simLength += TxdOp::getLen(desc); 1785 pktPtr->length += TxdOp::getLen(desc); 1786 } 1787 1788 1789 1790 if ((!TxdOp::eop(desc) && !useTso) || 1791 (pktPtr->length < ( tsoMss + tsoHeaderLen) && 1792 tsoTotalLen != tsoUsedLen && useTso)) { 1793 assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc))); 1794 igbe->anDq("TXS", annUnusedCacheQ); 1795 unusedCache.pop_front(); 1796 igbe->anQ("TXS", annUsedCacheQ); 1797 usedCache.push_back(desc); 1798 1799 tsoDescBytesUsed = 0; 1800 pktDone = true; 1801 pktWaiting = false; 1802 pktMultiDesc = true; 1803 1804 DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n", 1805 pktPtr->length); 1806 pktPtr = NULL; 1807 1808 enableSm(); 1809 igbe->checkDrain(); 1810 return; 1811 } 1812 1813 1814 pktMultiDesc = false; 1815 // no support for vlans 1816 assert(!TxdOp::vle(desc)); 1817 1818 // we only support single packet descriptors at this point 1819 if (!useTso) 1820 assert(TxdOp::eop(desc)); 1821 1822 // set that this packet is done 1823 if (TxdOp::rs(desc)) 1824 TxdOp::setDd(desc); 1825 1826 DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n", 1827 desc->d1, desc->d2); 1828 1829 if (useTso) { 1830 IpPtr ip(pktPtr); 1831 if (ip) { 1832 DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n", 1833 tsoPkts); 1834 ip->id(ip->id() + tsoPkts++); 1835 ip->len(pktPtr->length - EthPtr(pktPtr)->size()); 1836 1837 TcpPtr tcp(ip); 1838 if (tcp) { 1839 DPRINTF(EthernetDesc, 1840 "TSO: Modifying TCP header. old seq %d + %d\n", 1841 tcp->seq(), tsoPrevSeq); 1842 tcp->seq(tcp->seq() + tsoPrevSeq); 1843 if (tsoUsedLen != tsoTotalLen) 1844 tcp->flags(tcp->flags() & ~9); // clear fin & psh 1845 } 1846 UdpPtr udp(ip); 1847 if (udp) { 1848 DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n"); 1849 udp->len(pktPtr->length - EthPtr(pktPtr)->size()); 1850 } 1851 } 1852 tsoPrevSeq = tsoUsedLen; 1853 } 1854 1855 if (DTRACE(EthernetDesc)) { 1856 IpPtr ip(pktPtr); 1857 if (ip) 1858 DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", 1859 ip->id()); 1860 else 1861 DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n"); 1862 } 1863 1864 // Checksums are only ofloaded for new descriptor types 1865 if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) { 1866 DPRINTF(EthernetDesc, "Calculating checksums for packet\n"); 1867 IpPtr ip(pktPtr); 1868 assert(ip); 1869 if (TxdOp::ixsm(desc)) { 1870 ip->sum(0); 1871 ip->sum(cksum(ip)); 1872 igbe->txIpChecksums++; 1873 DPRINTF(EthernetDesc, "Calculated IP checksum\n"); 1874 } 1875 if (TxdOp::txsm(desc)) { 1876 TcpPtr tcp(ip); 1877 UdpPtr udp(ip); 1878 if (tcp) { 1879 tcp->sum(0); 1880 tcp->sum(cksum(tcp)); 1881 igbe->txTcpChecksums++; 1882 DPRINTF(EthernetDesc, "Calculated TCP checksum\n"); 1883 } else if (udp) { 1884 assert(udp); 1885 udp->sum(0); 1886 udp->sum(cksum(udp)); 1887 igbe->txUdpChecksums++; 1888 DPRINTF(EthernetDesc, "Calculated UDP checksum\n"); 1889 } else { 1890 panic("Told to checksum, but don't know how\n"); 1891 } 1892 } 1893 } 1894 1895 if (TxdOp::ide(desc)) { 1896 // Deal with the rx timer interrupts 1897 DPRINTF(EthernetDesc, "Descriptor had IDE set\n"); 1898 if (igbe->regs.tidv.idv()) { 1899 Tick delay = igbe->regs.tidv.idv() * igbe->intClock(); 1900 DPRINTF(EthernetDesc, "setting tidv\n"); 1901 igbe->reschedule(igbe->tidvEvent, curTick() + delay, true); 1902 } 1903 1904 if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) { 1905 Tick delay = igbe->regs.tadv.idv() * igbe->intClock(); 1906 DPRINTF(EthernetDesc, "setting tadv\n"); 1907 if (!igbe->tadvEvent.scheduled()) { 1908 igbe->schedule(igbe->tadvEvent, curTick() + delay); 1909 } 1910 } 1911 } 1912 1913 1914 if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) { 1915 DPRINTF(EthernetDesc, "Descriptor Done\n"); 1916 igbe->anDq("TXS", annUnusedCacheQ); 1917 unusedCache.pop_front(); 1918 igbe->anQ("TXS", annUsedCacheQ); 1919 usedCache.push_back(desc); 1920 tsoDescBytesUsed = 0; 1921 } 1922 1923 if (useTso && tsoUsedLen == tsoTotalLen) 1924 useTso = false; 1925 1926 1927 DPRINTF(EthernetDesc, 1928 "------Packet of %d bytes ready for transmission-------\n", 1929 pktPtr->length); 1930 pktDone = true; 1931 pktWaiting = false; 1932 pktPtr = NULL; 1933 tsoPktHasHeader = false; 1934 1935 if (igbe->regs.txdctl.wthresh() == 0) { 1936 igbe->anBegin("TXS", "Desc Writeback"); 1937 DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n"); 1938 writeback(0); 1939 } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <= 1940 descInBlock(usedCache.size())) { 1941 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1942 igbe->anBegin("TXS", "Desc Writeback"); 1943 writeback((igbe->cacheBlockSize()-1)>>4); 1944 } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) { 1945 DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n"); 1946 igbe->anBegin("TXS", "Desc Writeback"); 1947 writeback((igbe->cacheBlockSize()-1)>>4); 1948 } 1949 1950 enableSm(); 1951 igbe->checkDrain(); 1952} 1953 1954void 1955IGbE::TxDescCache::actionAfterWb() 1956{ 1957 DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n", 1958 completionEnabled); 1959 igbe->postInterrupt(iGbReg::IT_TXDW); 1960 if (completionEnabled) { 1961 descEnd = igbe->regs.tdh(); 1962 DPRINTF(EthernetDesc, 1963 "Completion writing back value: %d to addr: %#x\n", descEnd, 1964 completionAddress); 1965 igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)), 1966 sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0); 1967 } 1968} 1969 1970void 1971IGbE::TxDescCache::serialize(CheckpointOut &cp) const 1972{ 1973 DescCache<TxDesc>::serialize(cp); 1974 1975 SERIALIZE_SCALAR(pktDone); 1976 SERIALIZE_SCALAR(isTcp); 1977 SERIALIZE_SCALAR(pktWaiting); 1978 SERIALIZE_SCALAR(pktMultiDesc); 1979 1980 SERIALIZE_SCALAR(useTso); 1981 SERIALIZE_SCALAR(tsoHeaderLen); 1982 SERIALIZE_SCALAR(tsoMss); 1983 SERIALIZE_SCALAR(tsoTotalLen); 1984 SERIALIZE_SCALAR(tsoUsedLen); 1985 SERIALIZE_SCALAR(tsoPrevSeq);; 1986 SERIALIZE_SCALAR(tsoPktPayloadBytes); 1987 SERIALIZE_SCALAR(tsoLoadedHeader); 1988 SERIALIZE_SCALAR(tsoPktHasHeader); 1989 SERIALIZE_ARRAY(tsoHeader, 256); 1990 SERIALIZE_SCALAR(tsoDescBytesUsed); 1991 SERIALIZE_SCALAR(tsoCopyBytes); 1992 SERIALIZE_SCALAR(tsoPkts); 1993 1994 SERIALIZE_SCALAR(completionAddress); 1995 SERIALIZE_SCALAR(completionEnabled); 1996 SERIALIZE_SCALAR(descEnd); 1997} 1998 1999void 2000IGbE::TxDescCache::unserialize(CheckpointIn &cp) 2001{ 2002 DescCache<TxDesc>::unserialize(cp); 2003 2004 UNSERIALIZE_SCALAR(pktDone); 2005 UNSERIALIZE_SCALAR(isTcp); 2006 UNSERIALIZE_SCALAR(pktWaiting); 2007 UNSERIALIZE_SCALAR(pktMultiDesc); 2008 2009 UNSERIALIZE_SCALAR(useTso); 2010 UNSERIALIZE_SCALAR(tsoHeaderLen); 2011 UNSERIALIZE_SCALAR(tsoMss); 2012 UNSERIALIZE_SCALAR(tsoTotalLen); 2013 UNSERIALIZE_SCALAR(tsoUsedLen); 2014 UNSERIALIZE_SCALAR(tsoPrevSeq);; 2015 UNSERIALIZE_SCALAR(tsoPktPayloadBytes); 2016 UNSERIALIZE_SCALAR(tsoLoadedHeader); 2017 UNSERIALIZE_SCALAR(tsoPktHasHeader); 2018 UNSERIALIZE_ARRAY(tsoHeader, 256); 2019 UNSERIALIZE_SCALAR(tsoDescBytesUsed); 2020 UNSERIALIZE_SCALAR(tsoCopyBytes); 2021 UNSERIALIZE_SCALAR(tsoPkts); 2022 2023 UNSERIALIZE_SCALAR(completionAddress); 2024 UNSERIALIZE_SCALAR(completionEnabled); 2025 UNSERIALIZE_SCALAR(descEnd); 2026} 2027 2028bool 2029IGbE::TxDescCache::packetAvailable() 2030{ 2031 if (pktDone) { 2032 pktDone = false; 2033 return true; 2034 } 2035 return false; 2036} 2037 2038void 2039IGbE::TxDescCache::enableSm() 2040{ 2041 if (igbe->drainState() != DrainState::Draining) { 2042 igbe->txTick = true; 2043 igbe->restartClock(); 2044 } 2045} 2046 2047bool 2048IGbE::TxDescCache::hasOutstandingEvents() 2049{ 2050 return pktEvent.scheduled() || wbEvent.scheduled() || 2051 fetchEvent.scheduled(); 2052} 2053 2054 2055///////////////////////////////////// IGbE ///////////////////////////////// 2056 2057void 2058IGbE::restartClock() 2059{ 2060 if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && 2061 drainState() == DrainState::Running) 2062 schedule(tickEvent, clockEdge(Cycles(1))); 2063} 2064 2065DrainState 2066IGbE::drain() 2067{ 2068 unsigned int count(0); 2069 if (rxDescCache.hasOutstandingEvents() || 2070 txDescCache.hasOutstandingEvents()) { 2071 count++; 2072 } 2073 2074 txFifoTick = false; 2075 txTick = false; 2076 rxTick = false; 2077 2078 if (tickEvent.scheduled()) 2079 deschedule(tickEvent); 2080 2081 if (count) { 2082 DPRINTF(Drain, "IGbE not drained\n"); 2083 return DrainState::Draining; 2084 } else 2085 return DrainState::Drained; 2086} 2087 2088void 2089IGbE::drainResume() 2090{ 2091 Drainable::drainResume(); 2092 2093 txFifoTick = true; 2094 txTick = true; 2095 rxTick = true; 2096 2097 restartClock(); 2098 DPRINTF(EthernetSM, "resuming from drain"); 2099} 2100 2101void 2102IGbE::checkDrain() 2103{ 2104 if (drainState() != DrainState::Draining) 2105 return; 2106 2107 txFifoTick = false; 2108 txTick = false; 2109 rxTick = false; 2110 if (!rxDescCache.hasOutstandingEvents() && 2111 !txDescCache.hasOutstandingEvents()) { 2112 DPRINTF(Drain, "IGbE done draining, processing drain event\n"); 2113 signalDrainDone(); 2114 } 2115} 2116 2117void 2118IGbE::txStateMachine() 2119{ 2120 if (!regs.tctl.en()) { 2121 txTick = false; 2122 DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n"); 2123 return; 2124 } 2125 2126 // If we have a packet available and it's length is not 0 (meaning it's not 2127 // a multidescriptor packet) put it in the fifo, otherwise an the next 2128 // iteration we'll get the rest of the data 2129 if (txPacket && txDescCache.packetAvailable() 2130 && !txDescCache.packetMultiDesc() && txPacket->length) { 2131 anQ("TXS", "TX FIFO Q"); 2132 DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n"); 2133#ifndef NDEBUG 2134 bool success = 2135#endif 2136 txFifo.push(txPacket); 2137 txFifoTick = true && drainState() != DrainState::Draining; 2138 assert(success); 2139 txPacket = NULL; 2140 anBegin("TXS", "Desc Writeback"); 2141 txDescCache.writeback((cacheBlockSize()-1)>>4); 2142 return; 2143 } 2144 2145 // Only support descriptor granularity 2146 if (regs.txdctl.lwthresh() && 2147 txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) { 2148 DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n"); 2149 postInterrupt(IT_TXDLOW); 2150 } 2151 2152 if (!txPacket) { 2153 txPacket = std::make_shared<EthPacketData>(16384); 2154 } 2155 2156 if (!txDescCache.packetWaiting()) { 2157 if (txDescCache.descLeft() == 0) { 2158 postInterrupt(IT_TXQE); 2159 anBegin("TXS", "Desc Writeback"); 2160 txDescCache.writeback(0); 2161 anBegin("TXS", "Desc Fetch"); 2162 anWe("TXS", txDescCache.annUnusedCacheQ); 2163 txDescCache.fetchDescriptors(); 2164 DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing " 2165 "writeback stopping ticking and posting TXQE\n"); 2166 txTick = false; 2167 return; 2168 } 2169 2170 2171 if (!(txDescCache.descUnused())) { 2172 anBegin("TXS", "Desc Fetch"); 2173 txDescCache.fetchDescriptors(); 2174 anWe("TXS", txDescCache.annUnusedCacheQ); 2175 DPRINTF(EthernetSM, "TXS: No descriptors available in cache, " 2176 "fetching and stopping ticking\n"); 2177 txTick = false; 2178 return; 2179 } 2180 anPq("TXS", txDescCache.annUnusedCacheQ); 2181 2182 2183 txDescCache.processContextDesc(); 2184 if (txDescCache.packetWaiting()) { 2185 DPRINTF(EthernetSM, 2186 "TXS: Fetching TSO header, stopping ticking\n"); 2187 txTick = false; 2188 return; 2189 } 2190 2191 unsigned size = txDescCache.getPacketSize(txPacket); 2192 if (size > 0 && txFifo.avail() > size) { 2193 anRq("TXS", "TX FIFO Q"); 2194 anBegin("TXS", "DMA Packet"); 2195 DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and " 2196 "beginning DMA of next packet\n", size); 2197 txFifo.reserve(size); 2198 txDescCache.getPacketData(txPacket); 2199 } else if (size == 0) { 2200 DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size); 2201 DPRINTF(EthernetSM, 2202 "TXS: No packets to get, writing back used descriptors\n"); 2203 anBegin("TXS", "Desc Writeback"); 2204 txDescCache.writeback(0); 2205 } else { 2206 anWf("TXS", "TX FIFO Q"); 2207 DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space " 2208 "available in FIFO\n"); 2209 txTick = false; 2210 } 2211 2212 2213 return; 2214 } 2215 DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n"); 2216 txTick = false; 2217} 2218 2219bool 2220IGbE::ethRxPkt(EthPacketPtr pkt) 2221{ 2222 rxBytes += pkt->length; 2223 rxPackets++; 2224 2225 DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n"); 2226 anBegin("RXQ", "Wire Recv"); 2227 2228 2229 if (!regs.rctl.en()) { 2230 DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n"); 2231 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD); 2232 return true; 2233 } 2234 2235 // restart the state machines if they are stopped 2236 rxTick = true && drainState() != DrainState::Draining; 2237 if ((rxTick || txTick) && !tickEvent.scheduled()) { 2238 DPRINTF(EthernetSM, 2239 "RXS: received packet into fifo, starting ticking\n"); 2240 restartClock(); 2241 } 2242 2243 if (!rxFifo.push(pkt)) { 2244 DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n"); 2245 postInterrupt(IT_RXO, true); 2246 anBegin("RXQ", "FIFO Drop", CPA::FL_BAD); 2247 return false; 2248 } 2249 2250 if (CPA::available() && cpa->enabled()) { 2251 assert(sys->numSystemsRunning <= 2); 2252 System *other_sys; 2253 if (sys->systemList[0] == sys) 2254 other_sys = sys->systemList[1]; 2255 else 2256 other_sys = sys->systemList[0]; 2257 2258 cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys); 2259 anQ("RXQ", "RX FIFO Q"); 2260 cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys); 2261 } 2262 2263 return true; 2264} 2265 2266 2267void 2268IGbE::rxStateMachine() 2269{ 2270 if (!regs.rctl.en()) { 2271 rxTick = false; 2272 DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n"); 2273 return; 2274 } 2275 2276 // If the packet is done check for interrupts/descriptors/etc 2277 if (rxDescCache.packetDone()) { 2278 rxDmaPacket = false; 2279 DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n"); 2280 int descLeft = rxDescCache.descLeft(); 2281 DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n", 2282 descLeft, regs.rctl.rdmts(), regs.rdlen()); 2283 switch (regs.rctl.rdmts()) { 2284 case 2: if (descLeft > .125 * regs.rdlen()) break; 2285 case 1: if (descLeft > .250 * regs.rdlen()) break; 2286 case 0: if (descLeft > .500 * regs.rdlen()) break; 2287 DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) " 2288 "because of descriptors left\n"); 2289 postInterrupt(IT_RXDMT); 2290 break; 2291 } 2292 2293 if (rxFifo.empty()) 2294 rxDescCache.writeback(0); 2295 2296 if (descLeft == 0) { 2297 anBegin("RXS", "Writeback Descriptors"); 2298 rxDescCache.writeback(0); 2299 DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing" 2300 " writeback and stopping ticking\n"); 2301 rxTick = false; 2302 } 2303 2304 // only support descriptor granulaties 2305 assert(regs.rxdctl.gran()); 2306 2307 if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) { 2308 DPRINTF(EthernetSM, 2309 "RXS: Writing back because WTHRESH >= descUsed\n"); 2310 anBegin("RXS", "Writeback Descriptors"); 2311 if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4)) 2312 rxDescCache.writeback(regs.rxdctl.wthresh()-1); 2313 else 2314 rxDescCache.writeback((cacheBlockSize()-1)>>4); 2315 } 2316 2317 if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) && 2318 ((rxDescCache.descLeft() - rxDescCache.descUnused()) > 2319 regs.rxdctl.hthresh())) { 2320 DPRINTF(EthernetSM, "RXS: Fetching descriptors because " 2321 "descUnused < PTHRESH\n"); 2322 anBegin("RXS", "Fetch Descriptors"); 2323 rxDescCache.fetchDescriptors(); 2324 } 2325 2326 if (rxDescCache.descUnused() == 0) { 2327 anBegin("RXS", "Fetch Descriptors"); 2328 rxDescCache.fetchDescriptors(); 2329 anWe("RXS", rxDescCache.annUnusedCacheQ); 2330 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, " 2331 "fetching descriptors and stopping ticking\n"); 2332 rxTick = false; 2333 } 2334 return; 2335 } 2336 2337 if (rxDmaPacket) { 2338 DPRINTF(EthernetSM, 2339 "RXS: stopping ticking until packet DMA completes\n"); 2340 rxTick = false; 2341 return; 2342 } 2343 2344 if (!rxDescCache.descUnused()) { 2345 anBegin("RXS", "Fetch Descriptors"); 2346 rxDescCache.fetchDescriptors(); 2347 anWe("RXS", rxDescCache.annUnusedCacheQ); 2348 DPRINTF(EthernetSM, "RXS: No descriptors available in cache, " 2349 "stopping ticking\n"); 2350 rxTick = false; 2351 DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n"); 2352 return; 2353 } 2354 anPq("RXS", rxDescCache.annUnusedCacheQ); 2355 2356 if (rxFifo.empty()) { 2357 anWe("RXS", "RX FIFO Q"); 2358 DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n"); 2359 rxTick = false; 2360 return; 2361 } 2362 anPq("RXS", "RX FIFO Q"); 2363 anBegin("RXS", "Get Desc"); 2364 2365 EthPacketPtr pkt; 2366 pkt = rxFifo.front(); 2367 2368 2369 pktOffset = rxDescCache.writePacket(pkt, pktOffset); 2370 DPRINTF(EthernetSM, "RXS: Writing packet into memory\n"); 2371 if (pktOffset == pkt->length) { 2372 anBegin( "RXS", "FIFO Dequeue"); 2373 DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n"); 2374 pktOffset = 0; 2375 anDq("RXS", "RX FIFO Q"); 2376 rxFifo.pop(); 2377 } 2378 2379 DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); 2380 rxTick = false; 2381 rxDmaPacket = true; 2382 anBegin("RXS", "DMA Packet"); 2383} 2384 2385void 2386IGbE::txWire() 2387{ 2388 if (txFifo.empty()) { 2389 anWe("TXQ", "TX FIFO Q"); 2390 txFifoTick = false; 2391 return; 2392 } 2393 2394 2395 anPq("TXQ", "TX FIFO Q"); 2396 if (etherInt->sendPacket(txFifo.front())) { 2397 anQ("TXQ", "WireQ"); 2398 if (DTRACE(EthernetSM)) { 2399 IpPtr ip(txFifo.front()); 2400 if (ip) 2401 DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n", 2402 ip->id()); 2403 else 2404 DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n"); 2405 } 2406 anDq("TXQ", "TX FIFO Q"); 2407 anBegin("TXQ", "Wire Send"); 2408 DPRINTF(EthernetSM, 2409 "TxFIFO: Successful transmit, bytes available in fifo: %d\n", 2410 txFifo.avail()); 2411 2412 txBytes += txFifo.front()->length; 2413 txPackets++; 2414 txFifoTick = false; 2415 2416 txFifo.pop(); 2417 } else { 2418 // We'll get woken up when the packet ethTxDone() gets called 2419 txFifoTick = false; 2420 } 2421} 2422 2423void 2424IGbE::tick() 2425{ 2426 DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n"); 2427 2428 if (rxTick) 2429 rxStateMachine(); 2430 2431 if (txTick) 2432 txStateMachine(); 2433 2434 if (txFifoTick) 2435 txWire(); 2436 2437 2438 if (rxTick || txTick || txFifoTick) 2439 schedule(tickEvent, curTick() + clockPeriod()); 2440} 2441 2442void 2443IGbE::ethTxDone() 2444{ 2445 anBegin("TXQ", "Send Done"); 2446 // restart the tx state machines if they are stopped 2447 // fifo to send another packet 2448 // tx sm to put more data into the fifo 2449 txFifoTick = true && drainState() != DrainState::Draining; 2450 if (txDescCache.descLeft() != 0 && drainState() != DrainState::Draining) 2451 txTick = true; 2452 2453 restartClock(); 2454 txWire(); 2455 DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n"); 2456} 2457 2458void 2459IGbE::serialize(CheckpointOut &cp) const 2460{ 2461 PciDevice::serialize(cp); 2462 2463 regs.serialize(cp); 2464 SERIALIZE_SCALAR(eeOpBits); 2465 SERIALIZE_SCALAR(eeAddrBits); 2466 SERIALIZE_SCALAR(eeDataBits); 2467 SERIALIZE_SCALAR(eeOpcode); 2468 SERIALIZE_SCALAR(eeAddr); 2469 SERIALIZE_SCALAR(lastInterrupt); 2470 SERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 2471 2472 rxFifo.serialize("rxfifo", cp); 2473 txFifo.serialize("txfifo", cp); 2474 2475 bool txPktExists = txPacket != nullptr; 2476 SERIALIZE_SCALAR(txPktExists); 2477 if (txPktExists) 2478 txPacket->serialize("txpacket", cp); 2479 2480 Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0, 2481 inter_time = 0; 2482 2483 if (rdtrEvent.scheduled()) 2484 rdtr_time = rdtrEvent.when(); 2485 SERIALIZE_SCALAR(rdtr_time); 2486 2487 if (radvEvent.scheduled()) 2488 radv_time = radvEvent.when(); 2489 SERIALIZE_SCALAR(radv_time); 2490 2491 if (tidvEvent.scheduled()) 2492 tidv_time = tidvEvent.when(); 2493 SERIALIZE_SCALAR(tidv_time); 2494 2495 if (tadvEvent.scheduled()) 2496 tadv_time = tadvEvent.when(); 2497 SERIALIZE_SCALAR(tadv_time); 2498 2499 if (interEvent.scheduled()) 2500 inter_time = interEvent.when(); 2501 SERIALIZE_SCALAR(inter_time); 2502 2503 SERIALIZE_SCALAR(pktOffset); 2504 2505 txDescCache.serializeSection(cp, "TxDescCache"); 2506 rxDescCache.serializeSection(cp, "RxDescCache"); 2507} 2508 2509void 2510IGbE::unserialize(CheckpointIn &cp) 2511{ 2512 PciDevice::unserialize(cp); 2513 2514 regs.unserialize(cp); 2515 UNSERIALIZE_SCALAR(eeOpBits); 2516 UNSERIALIZE_SCALAR(eeAddrBits); 2517 UNSERIALIZE_SCALAR(eeDataBits); 2518 UNSERIALIZE_SCALAR(eeOpcode); 2519 UNSERIALIZE_SCALAR(eeAddr); 2520 UNSERIALIZE_SCALAR(lastInterrupt); 2521 UNSERIALIZE_ARRAY(flash,iGbReg::EEPROM_SIZE); 2522 2523 rxFifo.unserialize("rxfifo", cp); 2524 txFifo.unserialize("txfifo", cp); 2525 2526 bool txPktExists; 2527 UNSERIALIZE_SCALAR(txPktExists); 2528 if (txPktExists) { 2529 txPacket = std::make_shared<EthPacketData>(16384); 2530 txPacket->unserialize("txpacket", cp); 2531 } 2532 2533 rxTick = true; 2534 txTick = true; 2535 txFifoTick = true; 2536 2537 Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time; 2538 UNSERIALIZE_SCALAR(rdtr_time); 2539 UNSERIALIZE_SCALAR(radv_time); 2540 UNSERIALIZE_SCALAR(tidv_time); 2541 UNSERIALIZE_SCALAR(tadv_time); 2542 UNSERIALIZE_SCALAR(inter_time); 2543 2544 if (rdtr_time) 2545 schedule(rdtrEvent, rdtr_time); 2546 2547 if (radv_time) 2548 schedule(radvEvent, radv_time); 2549 2550 if (tidv_time) 2551 schedule(tidvEvent, tidv_time); 2552 2553 if (tadv_time) 2554 schedule(tadvEvent, tadv_time); 2555 2556 if (inter_time) 2557 schedule(interEvent, inter_time); 2558 2559 UNSERIALIZE_SCALAR(pktOffset); 2560 2561 txDescCache.unserializeSection(cp, "TxDescCache"); 2562 rxDescCache.unserializeSection(cp, "RxDescCache"); 2563} 2564 2565IGbE * 2566IGbEParams::create() 2567{ 2568 return new IGbE(this); 2569}
|