X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_rxtx.c;h=e7af655c6afb3ecd79c383005bf9f90441a01b04;hb=c8b9a3e3fe1bf161003b57af6a5bce17daf37a4e;hp=fcacd340ae079d619a9fb385a69e7995f2f1ab25;hpb=72514b5d55431c4798ad25bcf6c67bc62b0a5800;p=dpdk.git diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index fcacd340ae..e7af655c6a 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -94,18 +94,44 @@ static uint16_t i40e_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +static inline void +i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) +{ + if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { + mb->ol_flags |= PKT_RX_VLAN_PKT; + mb->vlan_tci = + rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1); + PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u", + rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1)); + } else { + mb->vlan_tci = 0; + } +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) & + (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) { + mb->ol_flags |= PKT_RX_QINQ_PKT; + mb->vlan_tci_outer = mb->vlan_tci; + mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2); + PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", + rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1), + rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2)); + } else { + mb->vlan_tci_outer = 0; + } +#endif + PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u", + mb->vlan_tci, mb->vlan_tci_outer); +} + /* Translate the rx descriptor status to pkt flags */ static inline uint64_t i40e_rxd_status_to_pkt_flags(uint64_t qword) { uint64_t flags; - /* Check if VLAN packet */ - flags = qword & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? - PKT_RX_VLAN_PKT : 0; - /* Check if RSS_HASH */ - flags |= (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) & + flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) & I40E_RX_DESC_FLTSTAT_RSS_HASH) == I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0; @@ -150,272 +176,561 @@ i40e_rxd_error_to_pkt_flags(uint64_t qword) return flags; } -/* Translate pkt types to pkt flags */ +/* Function to check and set the ieee1588 timesync index and get the + * appropriate flags. + */ +#ifdef RTE_LIBRTE_IEEE1588 static inline uint64_t -i40e_rxd_ptype_to_pkt_flags(uint64_t qword) +i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword) { - uint8_t ptype = (uint8_t)((qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT); - static const uint64_t ip_ptype_map[I40E_MAX_PKT_TYPE] = { - 0, /* PTYPE 0 */ - 0, /* PTYPE 1 */ - 0, /* PTYPE 2 */ - 0, /* PTYPE 3 */ - 0, /* PTYPE 4 */ - 0, /* PTYPE 5 */ - 0, /* PTYPE 6 */ - 0, /* PTYPE 7 */ - 0, /* PTYPE 8 */ - 0, /* PTYPE 9 */ - 0, /* PTYPE 10 */ - 0, /* PTYPE 11 */ - 0, /* PTYPE 12 */ - 0, /* PTYPE 13 */ - 0, /* PTYPE 14 */ - 0, /* PTYPE 15 */ - 0, /* PTYPE 16 */ - 0, /* PTYPE 17 */ - 0, /* PTYPE 18 */ - 0, /* PTYPE 19 */ - 0, /* PTYPE 20 */ - 0, /* PTYPE 21 */ - PKT_RX_IPV4_HDR, /* PTYPE 22 */ - PKT_RX_IPV4_HDR, /* PTYPE 23 */ - PKT_RX_IPV4_HDR, /* PTYPE 24 */ - 0, /* PTYPE 25 */ - PKT_RX_IPV4_HDR, /* PTYPE 26 */ - PKT_RX_IPV4_HDR, /* PTYPE 27 */ - PKT_RX_IPV4_HDR, /* PTYPE 28 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 29 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 30 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 31 */ - 0, /* PTYPE 32 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 33 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 34 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 35 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 36 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 37 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 38 */ - 0, /* PTYPE 39 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 40 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 41 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 42 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 43 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 44 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 45 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 46 */ - 0, /* PTYPE 47 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 48 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 49 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 50 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 51 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 52 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 53 */ - 0, /* PTYPE 54 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 55 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 56 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 57 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 58 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 59 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 60 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 61 */ - 0, /* PTYPE 62 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 63 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 64 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 65 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 66 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 67 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 68 */ - 0, /* PTYPE 69 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 70 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 71 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 72 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 73 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 74 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 75 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 76 */ - 0, /* PTYPE 77 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 78 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 79 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 80 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 81 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 82 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 83 */ - 0, /* PTYPE 84 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 85 */ - PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 86 */ - PKT_RX_IPV4_HDR_EXT, /* PTYPE 87 */ - PKT_RX_IPV6_HDR, /* PTYPE 88 */ - PKT_RX_IPV6_HDR, /* PTYPE 89 */ - PKT_RX_IPV6_HDR, /* PTYPE 90 */ - 0, /* PTYPE 91 */ - PKT_RX_IPV6_HDR, /* PTYPE 92 */ - PKT_RX_IPV6_HDR, /* PTYPE 93 */ - PKT_RX_IPV6_HDR, /* PTYPE 94 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 95 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 96 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 97 */ - 0, /* PTYPE 98 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 99 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 100 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 101 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 102 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 103 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 104 */ - 0, /* PTYPE 105 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 106 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 107 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 108 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 109 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 110 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 111 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 112 */ - 0, /* PTYPE 113 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 114 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 115 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 116 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 117 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 118 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 119 */ - 0, /* PTYPE 120 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 121 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 122 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 123 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 124 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 125 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 126 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 127 */ - 0, /* PTYPE 128 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 129 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 130 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 131 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 132 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 133 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 134 */ - 0, /* PTYPE 135 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 136 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 137 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 138 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 139 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 140 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 141 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 142 */ - 0, /* PTYPE 143 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 144 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 145 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 146 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 147 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 148 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 149 */ - 0, /* PTYPE 150 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 151 */ - PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 152 */ - PKT_RX_IPV6_HDR_EXT, /* PTYPE 153 */ - 0, /* PTYPE 154 */ - 0, /* PTYPE 155 */ - 0, /* PTYPE 156 */ - 0, /* PTYPE 157 */ - 0, /* PTYPE 158 */ - 0, /* PTYPE 159 */ - 0, /* PTYPE 160 */ - 0, /* PTYPE 161 */ - 0, /* PTYPE 162 */ - 0, /* PTYPE 163 */ - 0, /* PTYPE 164 */ - 0, /* PTYPE 165 */ - 0, /* PTYPE 166 */ - 0, /* PTYPE 167 */ - 0, /* PTYPE 168 */ - 0, /* PTYPE 169 */ - 0, /* PTYPE 170 */ - 0, /* PTYPE 171 */ - 0, /* PTYPE 172 */ - 0, /* PTYPE 173 */ - 0, /* PTYPE 174 */ - 0, /* PTYPE 175 */ - 0, /* PTYPE 176 */ - 0, /* PTYPE 177 */ - 0, /* PTYPE 178 */ - 0, /* PTYPE 179 */ - 0, /* PTYPE 180 */ - 0, /* PTYPE 181 */ - 0, /* PTYPE 182 */ - 0, /* PTYPE 183 */ - 0, /* PTYPE 184 */ - 0, /* PTYPE 185 */ - 0, /* PTYPE 186 */ - 0, /* PTYPE 187 */ - 0, /* PTYPE 188 */ - 0, /* PTYPE 189 */ - 0, /* PTYPE 190 */ - 0, /* PTYPE 191 */ - 0, /* PTYPE 192 */ - 0, /* PTYPE 193 */ - 0, /* PTYPE 194 */ - 0, /* PTYPE 195 */ - 0, /* PTYPE 196 */ - 0, /* PTYPE 197 */ - 0, /* PTYPE 198 */ - 0, /* PTYPE 199 */ - 0, /* PTYPE 200 */ - 0, /* PTYPE 201 */ - 0, /* PTYPE 202 */ - 0, /* PTYPE 203 */ - 0, /* PTYPE 204 */ - 0, /* PTYPE 205 */ - 0, /* PTYPE 206 */ - 0, /* PTYPE 207 */ - 0, /* PTYPE 208 */ - 0, /* PTYPE 209 */ - 0, /* PTYPE 210 */ - 0, /* PTYPE 211 */ - 0, /* PTYPE 212 */ - 0, /* PTYPE 213 */ - 0, /* PTYPE 214 */ - 0, /* PTYPE 215 */ - 0, /* PTYPE 216 */ - 0, /* PTYPE 217 */ - 0, /* PTYPE 218 */ - 0, /* PTYPE 219 */ - 0, /* PTYPE 220 */ - 0, /* PTYPE 221 */ - 0, /* PTYPE 222 */ - 0, /* PTYPE 223 */ - 0, /* PTYPE 224 */ - 0, /* PTYPE 225 */ - 0, /* PTYPE 226 */ - 0, /* PTYPE 227 */ - 0, /* PTYPE 228 */ - 0, /* PTYPE 229 */ - 0, /* PTYPE 230 */ - 0, /* PTYPE 231 */ - 0, /* PTYPE 232 */ - 0, /* PTYPE 233 */ - 0, /* PTYPE 234 */ - 0, /* PTYPE 235 */ - 0, /* PTYPE 236 */ - 0, /* PTYPE 237 */ - 0, /* PTYPE 238 */ - 0, /* PTYPE 239 */ - 0, /* PTYPE 240 */ - 0, /* PTYPE 241 */ - 0, /* PTYPE 242 */ - 0, /* PTYPE 243 */ - 0, /* PTYPE 244 */ - 0, /* PTYPE 245 */ - 0, /* PTYPE 246 */ - 0, /* PTYPE 247 */ - 0, /* PTYPE 248 */ - 0, /* PTYPE 249 */ - 0, /* PTYPE 250 */ - 0, /* PTYPE 251 */ - 0, /* PTYPE 252 */ - 0, /* PTYPE 253 */ - 0, /* PTYPE 254 */ - 0, /* PTYPE 255 */ + uint64_t pkt_flags = 0; + uint16_t tsyn = (qword & (I40E_RXD_QW1_STATUS_TSYNVALID_MASK + | I40E_RXD_QW1_STATUS_TSYNINDX_MASK)) + >> I40E_RX_DESC_STATUS_TSYNINDX_SHIFT; + + if ((mb->packet_type & RTE_PTYPE_L2_MASK) + == RTE_PTYPE_L2_ETHER_TIMESYNC) + pkt_flags = PKT_RX_IEEE1588_PTP; + if (tsyn & 0x04) { + pkt_flags |= PKT_RX_IEEE1588_TMST; + mb->timesync = tsyn & 0x03; + } + + return pkt_flags; +} +#endif + +/* For each value it means, datasheet of hardware can tell more details */ +static inline uint32_t +i40e_rxd_pkt_type_mapping(uint8_t ptype) +{ + static const uint32_t ptype_table[UINT8_MAX] __rte_cache_aligned = { + /* L2 types */ + /* [0] reserved */ + [1] = RTE_PTYPE_L2_ETHER, + [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, + /* [3] - [5] reserved */ + [6] = RTE_PTYPE_L2_ETHER_LLDP, + /* [7] - [10] reserved */ + [11] = RTE_PTYPE_L2_ETHER_ARP, + /* [12] - [21] reserved */ + + /* Non tunneled IPv4 */ + [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [25] reserved */ + [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* IPv4 --> IPv4 */ + [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [32] reserved */ + [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> IPv6 */ + [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [39] reserved */ + [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN */ + [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ + [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [47] reserved */ + [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ + [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [54] reserved */ + [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ + [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [62] reserved */ + [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [69] reserved */ + [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ + [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ + [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [77] reserved */ + [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ + [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [84] reserved */ + [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* Non tunneled IPv6 */ + [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [91] reserved */ + [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* IPv6 --> IPv4 */ + [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [98] reserved */ + [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> IPv6 */ + [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [105] reserved */ + [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN */ + [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ + [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [113] reserved */ + [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ + [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [120] reserved */ + [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ + [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [128] reserved */ + [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [135] reserved */ + [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ + [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ + [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [143] reserved */ + [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ + [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [150] reserved */ + [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* All others reserved */ }; - return ip_ptype_map[ptype]; + return ptype_table[ptype]; } #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03 @@ -574,8 +889,9 @@ i40e_xmit_cleanup(struct i40e_tx_queue *txq) desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; - if (!(txd[desc_to_clean_to].cmd_type_offset_bsz & - rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))) { + if ((txd[desc_to_clean_to].cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) { PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done " "(port=%d queue=%d)", desc_to_clean_to, txq->port_id, txq->queue_id); @@ -696,30 +1012,29 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq) mb = rxep[j].mbuf; qword1 = rte_le_to_cpu_64(\ rxdp[j].wb.qword1.status_error_len); - rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len; mb->data_len = pkt_len; mb->pkt_len = pkt_len; - mb->vlan_tci = rx_status & - (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? - rte_le_to_cpu_16(\ - rxdp[j].wb.qword0.lo_dword.l2tag1) : 0; + mb->ol_flags = 0; + i40e_rxd_to_vlan_tci(mb, &rxdp[j]); pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); - pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1); - - mb->packet_type = (uint16_t)((qword1 & - I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT); + mb->packet_type = + i40e_rxd_pkt_type_mapping((uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT)); if (pkt_flags & PKT_RX_RSS_HASH) mb->hash.rss = rte_le_to_cpu_32(\ rxdp[j].wb.qword0.hi_dword.rss); if (pkt_flags & PKT_RX_FDIR) pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb); - mb->ol_flags = pkt_flags; +#ifdef RTE_LIBRTE_IEEE1588 + pkt_flags |= i40e_get_iee15888_flags(mb, qword1); +#endif + mb->ol_flags |= pkt_flags; + } for (j = 0; j < I40E_LOOK_AHEAD; j++) @@ -778,6 +1093,10 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) rxdp = &rxq->rx_ring[alloc_idx]; for (i = 0; i < rxq->rx_free_thresh; i++) { + if (likely(i < (rxq->rx_free_thresh - 1))) + /* Prefetch next mbuf */ + rte_prefetch0(rxep[i + 1].mbuf); + mb = rxep[i].mbuf; rte_mbuf_refcnt_set(mb, 1); mb->next = NULL; @@ -786,7 +1105,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) mb->port = rxq->port_id; dma_addr = rte_cpu_to_le_64(\ RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb)); - rxdp[i].read.hdr_addr = dma_addr; + rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } @@ -901,6 +1220,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; + /* Check the DD bit first */ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) break; @@ -932,7 +1252,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxe->mbuf = nmb; dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); - rxdp->read.hdr_addr = dma_addr; + rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> @@ -945,22 +1265,23 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->pkt_len = rx_packet_len; rxm->data_len = rx_packet_len; rxm->port = rxq->port_id; - - rxm->vlan_tci = rx_status & - (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? - rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0; + rxm->ol_flags = 0; + i40e_rxd_to_vlan_tci(rxm, &rxd); pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); - pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1); - rxm->packet_type = (uint16_t)((qword1 & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT); + rxm->packet_type = + i40e_rxd_pkt_type_mapping((uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)); if (pkt_flags & PKT_RX_RSS_HASH) rxm->hash.rss = rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); if (pkt_flags & PKT_RX_FDIR) pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm); - rxm->ol_flags = pkt_flags; +#ifdef RTE_LIBRTE_IEEE1588 + pkt_flags |= i40e_get_iee15888_flags(rxm, qword1); +#endif + rxm->ol_flags |= pkt_flags; rx_pkts[nb_rx++] = rxm; } @@ -1010,6 +1331,7 @@ i40e_recv_scattered_pkts(void *rx_queue, qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; + /* Check the DD bit */ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) break; @@ -1043,7 +1365,7 @@ i40e_recv_scattered_pkts(void *rx_queue, rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); /* Set data buffer address and data length of the mbuf */ - rxdp->read.hdr_addr = dma_addr; + rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; @@ -1105,22 +1427,23 @@ i40e_recv_scattered_pkts(void *rx_queue, } first_seg->port = rxq->port_id; - first_seg->vlan_tci = (rx_status & - (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? - rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0; + first_seg->ol_flags = 0; + i40e_rxd_to_vlan_tci(first_seg, &rxd); pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); - pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1); - first_seg->packet_type = (uint16_t)((qword1 & - I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT); + first_seg->packet_type = + i40e_rxd_pkt_type_mapping((uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)); if (pkt_flags & PKT_RX_RSS_HASH) rxm->hash.rss = rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); if (pkt_flags & PKT_RX_FDIR) pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm); - first_seg->ol_flags = pkt_flags; +#ifdef RTE_LIBRTE_IEEE1588 + pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1); +#endif + first_seg->ol_flags |= pkt_flags; /* Prefetch data of first segment, if configured to do so. */ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, @@ -1158,17 +1481,15 @@ i40e_recv_scattered_pkts(void *rx_queue, static inline uint16_t i40e_calc_context_desc(uint64_t flags) { - uint64_t mask = 0ULL; - - mask |= (PKT_TX_OUTER_IP_CKSUM | PKT_TX_TCP_SEG); + static uint64_t mask = PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TCP_SEG | + PKT_TX_QINQ_PKT; #ifdef RTE_LIBRTE_IEEE1588 mask |= PKT_TX_IEEE1588_TMST; #endif - if (flags & mask) - return 1; - return 0; + return ((flags & mask) ? 1 : 0); } /* set i40e TSO context descriptor */ @@ -1289,9 +1610,9 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* Descriptor based VLAN insertion */ - if (ol_flags & PKT_TX_VLAN_PKT) { + if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { tx_flags |= tx_pkt->vlan_tci << - I40E_TX_FLAG_L2TAG1_SHIFT; + I40E_TX_FLAG_L2TAG1_SHIFT; tx_flags |= I40E_TX_FLAG_INSERT_VLAN; td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >> @@ -1303,12 +1624,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Enable checksum offloading */ cd_tunneling_params = 0; - if (unlikely(ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK)) { + if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) { i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset, tx_offload, &cd_tunneling_params); } - if (unlikely(nb_ctx)) { + if (nb_ctx) { /* Setup TX context descriptor if required */ volatile struct i40e_tx_context_desc *ctx_txd = (volatile struct i40e_tx_context_desc *)\ @@ -1339,6 +1660,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) ctx_txd->tunneling_params = rte_cpu_to_le_32(cd_tunneling_params); + if (ol_flags & PKT_TX_QINQ_PKT) { + cd_l2tag2 = tx_pkt->vlan_tci_outer; + cd_type_cmd_tso_mss |= + ((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 << + I40E_TXD_CTX_QW1_CMD_SHIFT); + } ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2); ctx_txd->type_cmd_tso_mss = rte_cpu_to_le_64(cd_type_cmd_tso_mss); @@ -1431,8 +1758,9 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq) struct i40e_tx_entry *txep; uint16_t i; - if (!(txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & - rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))) + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) return 0; txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]); @@ -1460,9 +1788,6 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq) return txq->tx_rs_thresh; } -#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\ - I40E_TX_DESC_CMD_EOP) - /* Populate 4 descriptors with data from 4 mbufs */ static inline void tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) @@ -1780,10 +2105,13 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, struct i40e_vsi *vsi; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct i40e_rx_queue *rxq; const struct rte_memzone *rz; uint32_t ring_size; - uint16_t len; + uint16_t len, i; + uint16_t base, bsf, tc_mapping; int use_def_burst_func = 1; if (hw->mac.type == I40E_MAC_VF) { @@ -1888,13 +2216,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq); - if (!use_def_burst_func && !dev->data->scattered_rx) { + if (!use_def_burst_func) { #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function will be " "used on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); - dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc; #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " @@ -1902,6 +2229,20 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " "not enabled on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + + if (queue_idx >= base && queue_idx < (base + BIT(bsf))) + rxq->dcb_tc = i; } return 0; @@ -1996,6 +2337,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, const struct rte_memzone *tz; uint32_t ring_size; uint16_t tx_rs_thresh, tx_free_thresh; + uint16_t i, base, bsf, tc_mapping; if (hw->mac.type == I40E_MAC_VF) { struct i40e_vf *vf = @@ -2163,13 +2505,19 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, dev->data->tx_queues[queue_idx] = txq; /* Use a simple TX queue without offloads or multi segs if possible */ - if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) && - (txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) { - PMD_INIT_LOG(INFO, "Using simple tx path"); - dev->tx_pkt_burst = i40e_xmit_pkts_simple; - } else { - PMD_INIT_LOG(INFO, "Using full-featured tx path"); - dev->tx_pkt_burst = i40e_xmit_pkts; + i40e_set_tx_function_flag(dev, txq); + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + + if (queue_idx >= base && queue_idx < (base + BIT(bsf))) + txq->dcb_tc = i; } return 0; @@ -2239,6 +2587,12 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq) { uint16_t i; + /* SSE Vector driver has a different way of releasing mbufs. */ + if (rxq->rx_using_sse) { + i40e_rx_queue_release_mbufs_vec(rxq); + return; + } + if (!rxq || !rxq->sw_ring) { PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); return; @@ -2269,6 +2623,11 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq) unsigned i; uint16_t len; + if (!rxq) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); + return; + } + #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0) len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_I40E_RX_MAX_BURST); @@ -2292,6 +2651,9 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq) rxq->nb_rx_hold = 0; rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; + + rxq->rxrearm_start = 0; + rxq->rxrearm_nb = 0; } void @@ -2366,7 +2728,11 @@ i40e_tx_queue_init(struct i40e_tx_queue *txq) tx_ctx.new_context = 1; tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; tx_ctx.qlen = txq->nb_tx_desc; - tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[0]); + +#ifdef RTE_LIBRTE_IEEE1588 + tx_ctx.timesync_ena = 1; +#endif + tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[txq->dcb_tc]); if (vsi->type == I40E_VSI_FDIR) tx_ctx.fd_ena = TRUE; @@ -2421,7 +2787,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq) rxd = &rxq->rx_ring[i]; rxd->read.pkt_addr = dma_addr; - rxd->read.hdr_addr = dma_addr; + rxd->read.hdr_addr = 0; #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC rxd->read.rsvd1 = 0; rxd->read.rsvd2 = 0; @@ -2500,7 +2866,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) int err = I40E_SUCCESS; struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi); - struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi); uint16_t pf_q = rxq->reg_idx; uint16_t buf_size; struct i40e_hmc_obj_rxq rx_ctx; @@ -2556,7 +2921,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) /* Check if scattered RX needs to be used. */ if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { dev_data->scattered_rx = 1; - dev->rx_pkt_burst = i40e_recv_scattered_pkts; } /* Init the RX tail regieter. */ @@ -2583,6 +2947,26 @@ i40e_dev_clear_queues(struct rte_eth_dev *dev) } } +void +i40e_dev_free_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + i40e_dev_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + i40e_dev_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + #define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC #define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC @@ -2706,3 +3090,201 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf) return I40E_SUCCESS; } + +void __attribute__((cold)) +i40e_set_rx_function(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + uint16_t rx_using_sse, i; + /* In order to allow Vector Rx there are a few configuration + * conditions to be met and Rx Bulk Allocation should be allowed. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (i40e_rx_vec_dev_conf_condition_check(dev) || + !ad->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet" + " Vector Rx preconditions", + dev->data->port_id); + + ad->rx_vec_allowed = false; + } + if (ad->rx_vec_allowed) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct i40e_rx_queue *rxq = + dev->data->rx_queues[i]; + + if (i40e_rxq_vec_setup(rxq)) { + ad->rx_vec_allowed = false; + break; + } + } + } + } + + if (dev->data->scattered_rx) { + /* Set the non-LRO scattered callback: there are Vector and + * single allocation versions. + */ + if (ad->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx " + "callback (port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec; + } else { + PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " + "allocation callback (port=%d).", + dev->data->port_id); + dev->rx_pkt_burst = i40e_recv_scattered_pkts; + } + /* If parameters allow we are going to choose between the following + * callbacks: + * - Vector + * - Bulk Allocation + * - Single buffer allocation (the simplest one) + */ + } else if (ad->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " + "burst size no less than %d (port=%d).", + RTE_I40E_DESCS_PER_LOOP, + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts_vec; + } else if (ad->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function " + "will be used on port=%d.", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc; + } else { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " + "satisfied, or Scattered Rx is requested " + "(port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts; + } + + /* Propagate information about RX function choice through all queues. */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rx_using_sse = + (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || + dev->rx_pkt_burst == i40e_recv_pkts_vec); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct i40e_rx_queue *rxq = dev->data->rx_queues[i]; + + rxq->rx_using_sse = rx_using_sse; + } + } +} + +void __attribute__((cold)) +i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Use a simple Tx queue (no offloads, no multi segs) if possible */ + if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) + && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) { + if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) { + PMD_INIT_LOG(DEBUG, "Vector tx" + " can be enabled on this txq."); + + } else { + ad->tx_vec_allowed = false; + } + } else { + ad->tx_simple_allowed = false; + } +} + +void __attribute__((cold)) +i40e_set_tx_function(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int i; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (ad->tx_vec_allowed) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct i40e_tx_queue *txq = + dev->data->tx_queues[i]; + + if (i40e_txq_vec_setup(txq)) { + ad->tx_vec_allowed = false; + break; + } + } + } + } + + if (ad->tx_simple_allowed) { + if (ad->tx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Vector tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts_vec; + } else { + PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts_simple; + } + } else { + PMD_INIT_LOG(DEBUG, "Xmit tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts; + } +} + +/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */ +int __attribute__((weak)) +i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) +{ + return -1; +} + +uint16_t __attribute__((weak)) +i40e_recv_pkts_vec( + void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +i40e_recv_scattered_pkts_vec( + void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +int __attribute__((weak)) +i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq) +{ + return -1; +} + +int __attribute__((weak)) +i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) +{ + return -1; +} + +void __attribute__((weak)) +i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq) +{ + return; +} + +uint16_t __attribute__((weak)) +i40e_xmit_pkts_vec(void __rte_unused *tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} +