4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/queue.h>
44 #include <rte_string_fns.h>
45 #include <rte_memzone.h>
47 #include <rte_malloc.h>
48 #include <rte_ether.h>
49 #include <rte_ethdev.h>
54 #include "i40e_logs.h"
55 #include "base/i40e_prototype.h"
56 #include "base/i40e_type.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
60 #define I40E_MIN_RING_DESC 64
61 #define I40E_MAX_RING_DESC 4096
62 #define I40E_ALIGN 128
63 #define DEFAULT_TX_RS_THRESH 32
64 #define DEFAULT_TX_FREE_THRESH 32
65 #define I40E_MAX_PKT_TYPE 256
67 #define I40E_TX_MAX_BURST 32
69 #define I40E_DMA_MEM_ALIGN 4096
71 #define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
72 ETH_TXQ_FLAGS_NOOFFLOADS)
74 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
76 #define I40E_TX_CKSUM_OFFLOAD_MASK ( \
79 PKT_TX_OUTER_IP_CKSUM)
81 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
82 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
84 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
85 ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
87 static const struct rte_memzone *
88 i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
89 const char *ring_name,
93 static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
94 struct rte_mbuf **tx_pkts,
98 i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
100 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
101 (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
102 mb->ol_flags |= PKT_RX_VLAN_PKT;
104 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
105 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
106 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
110 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
111 if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
112 (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
113 mb->ol_flags |= PKT_RX_QINQ_PKT;
114 mb->vlan_tci_outer = mb->vlan_tci;
115 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
116 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
117 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
118 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
120 mb->vlan_tci_outer = 0;
123 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
124 mb->vlan_tci, mb->vlan_tci_outer);
127 /* Translate the rx descriptor status to pkt flags */
128 static inline uint64_t
129 i40e_rxd_status_to_pkt_flags(uint64_t qword)
133 /* Check if RSS_HASH */
134 flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
135 I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
136 I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
138 /* Check if FDIR Match */
139 flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ?
145 static inline uint64_t
146 i40e_rxd_error_to_pkt_flags(uint64_t qword)
149 uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
151 #define I40E_RX_ERR_BITS 0x3f
152 if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
154 /* If RXE bit set, all other status bits are meaningless */
155 if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
156 flags |= PKT_RX_MAC_ERR;
160 /* If RECIPE bit set, all other status indications should be ignored */
161 if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RECIPE_SHIFT))) {
162 flags |= PKT_RX_RECIP_ERR;
165 if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT)))
166 flags |= PKT_RX_HBUF_OVERFLOW;
167 if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
168 flags |= PKT_RX_IP_CKSUM_BAD;
169 if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
170 flags |= PKT_RX_L4_CKSUM_BAD;
171 if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
172 flags |= PKT_RX_EIP_CKSUM_BAD;
173 if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_OVERSIZE_SHIFT)))
174 flags |= PKT_RX_OVERSIZE;
179 /* Function to check and set the ieee1588 timesync index and get the
182 #ifdef RTE_LIBRTE_IEEE1588
183 static inline uint64_t
184 i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
186 uint64_t pkt_flags = 0;
187 uint16_t tsyn = (qword & (I40E_RXD_QW1_STATUS_TSYNVALID_MASK
188 | I40E_RXD_QW1_STATUS_TSYNINDX_MASK))
189 >> I40E_RX_DESC_STATUS_TSYNINDX_SHIFT;
192 if ((mb->packet_type & RTE_PTYPE_L2_MASK)
193 == RTE_PTYPE_L2_ETHER_TIMESYNC)
194 pkt_flags = PKT_RX_IEEE1588_PTP;
197 pkt_flags |= PKT_RX_IEEE1588_TMST;
198 mb->timesync = tsyn & 0x03;
206 /* For each value it means, datasheet of hardware can tell more details */
207 static inline uint32_t
208 i40e_rxd_pkt_type_mapping(uint8_t ptype)
210 static const uint32_t ptype_table[UINT8_MAX] __rte_cache_aligned = {
213 [1] = RTE_PTYPE_L2_ETHER,
214 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
215 /* [3] - [5] reserved */
216 [6] = RTE_PTYPE_L2_ETHER_LLDP,
217 /* [7] - [10] reserved */
218 [11] = RTE_PTYPE_L2_ETHER_ARP,
219 /* [12] - [21] reserved */
221 /* Non tunneled IPv4 */
222 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
224 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
225 RTE_PTYPE_L4_NONFRAG,
226 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
229 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
231 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
233 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
237 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
238 RTE_PTYPE_TUNNEL_IP |
239 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
240 RTE_PTYPE_INNER_L4_FRAG,
241 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_TUNNEL_IP |
243 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
244 RTE_PTYPE_INNER_L4_NONFRAG,
245 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_TUNNEL_IP |
247 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L4_UDP,
250 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
251 RTE_PTYPE_TUNNEL_IP |
252 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
253 RTE_PTYPE_INNER_L4_TCP,
254 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
255 RTE_PTYPE_TUNNEL_IP |
256 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L4_SCTP,
258 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
259 RTE_PTYPE_TUNNEL_IP |
260 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_ICMP,
264 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
265 RTE_PTYPE_TUNNEL_IP |
266 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L4_FRAG,
268 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
269 RTE_PTYPE_TUNNEL_IP |
270 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
271 RTE_PTYPE_INNER_L4_NONFRAG,
272 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
273 RTE_PTYPE_TUNNEL_IP |
274 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
275 RTE_PTYPE_INNER_L4_UDP,
277 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
278 RTE_PTYPE_TUNNEL_IP |
279 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
280 RTE_PTYPE_INNER_L4_TCP,
281 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
282 RTE_PTYPE_TUNNEL_IP |
283 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
284 RTE_PTYPE_INNER_L4_SCTP,
285 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
286 RTE_PTYPE_TUNNEL_IP |
287 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
288 RTE_PTYPE_INNER_L4_ICMP,
290 /* IPv4 --> GRE/Teredo/VXLAN */
291 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
292 RTE_PTYPE_TUNNEL_GRENAT,
294 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
295 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
296 RTE_PTYPE_TUNNEL_GRENAT |
297 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
298 RTE_PTYPE_INNER_L4_FRAG,
299 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
300 RTE_PTYPE_TUNNEL_GRENAT |
301 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
302 RTE_PTYPE_INNER_L4_NONFRAG,
303 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
304 RTE_PTYPE_TUNNEL_GRENAT |
305 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
306 RTE_PTYPE_INNER_L4_UDP,
308 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
309 RTE_PTYPE_TUNNEL_GRENAT |
310 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
311 RTE_PTYPE_INNER_L4_TCP,
312 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
313 RTE_PTYPE_TUNNEL_GRENAT |
314 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
315 RTE_PTYPE_INNER_L4_SCTP,
316 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
317 RTE_PTYPE_TUNNEL_GRENAT |
318 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
319 RTE_PTYPE_INNER_L4_ICMP,
321 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
322 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
323 RTE_PTYPE_TUNNEL_GRENAT |
324 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
325 RTE_PTYPE_INNER_L4_FRAG,
326 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
327 RTE_PTYPE_TUNNEL_GRENAT |
328 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
329 RTE_PTYPE_INNER_L4_NONFRAG,
330 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
331 RTE_PTYPE_TUNNEL_GRENAT |
332 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
333 RTE_PTYPE_INNER_L4_UDP,
335 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
336 RTE_PTYPE_TUNNEL_GRENAT |
337 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
338 RTE_PTYPE_INNER_L4_TCP,
339 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
340 RTE_PTYPE_TUNNEL_GRENAT |
341 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
342 RTE_PTYPE_INNER_L4_SCTP,
343 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
344 RTE_PTYPE_TUNNEL_GRENAT |
345 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
346 RTE_PTYPE_INNER_L4_ICMP,
348 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
349 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
350 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
352 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
353 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
354 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
355 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
356 RTE_PTYPE_INNER_L4_FRAG,
357 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
358 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
359 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
360 RTE_PTYPE_INNER_L4_NONFRAG,
361 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
362 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
363 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
364 RTE_PTYPE_INNER_L4_UDP,
366 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
367 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
368 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
369 RTE_PTYPE_INNER_L4_TCP,
370 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
371 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
372 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
373 RTE_PTYPE_INNER_L4_SCTP,
374 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
375 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
376 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
377 RTE_PTYPE_INNER_L4_ICMP,
379 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
380 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
381 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
382 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
383 RTE_PTYPE_INNER_L4_FRAG,
384 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
385 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
386 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
387 RTE_PTYPE_INNER_L4_NONFRAG,
388 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
389 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
390 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
391 RTE_PTYPE_INNER_L4_UDP,
393 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
394 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
395 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
396 RTE_PTYPE_INNER_L4_TCP,
397 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
398 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
399 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
400 RTE_PTYPE_INNER_L4_SCTP,
401 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
402 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
403 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
404 RTE_PTYPE_INNER_L4_ICMP,
406 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
407 [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
408 RTE_PTYPE_TUNNEL_GRENAT |
409 RTE_PTYPE_INNER_L2_ETHER_VLAN,
411 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
412 [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
413 RTE_PTYPE_TUNNEL_GRENAT |
414 RTE_PTYPE_INNER_L2_ETHER_VLAN |
415 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
416 RTE_PTYPE_INNER_L4_FRAG,
417 [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
418 RTE_PTYPE_TUNNEL_GRENAT |
419 RTE_PTYPE_INNER_L2_ETHER_VLAN |
420 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
421 RTE_PTYPE_INNER_L4_NONFRAG,
422 [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
423 RTE_PTYPE_TUNNEL_GRENAT |
424 RTE_PTYPE_INNER_L2_ETHER_VLAN |
425 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
426 RTE_PTYPE_INNER_L4_UDP,
428 [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
429 RTE_PTYPE_TUNNEL_GRENAT |
430 RTE_PTYPE_INNER_L2_ETHER_VLAN |
431 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
432 RTE_PTYPE_INNER_L4_TCP,
433 [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
434 RTE_PTYPE_TUNNEL_GRENAT |
435 RTE_PTYPE_INNER_L2_ETHER_VLAN |
436 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
437 RTE_PTYPE_INNER_L4_SCTP,
438 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
439 RTE_PTYPE_TUNNEL_GRENAT |
440 RTE_PTYPE_INNER_L2_ETHER_VLAN |
441 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
442 RTE_PTYPE_INNER_L4_ICMP,
444 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
445 [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
446 RTE_PTYPE_TUNNEL_GRENAT |
447 RTE_PTYPE_INNER_L2_ETHER_VLAN |
448 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
449 RTE_PTYPE_INNER_L4_FRAG,
450 [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
451 RTE_PTYPE_TUNNEL_GRENAT |
452 RTE_PTYPE_INNER_L2_ETHER_VLAN |
453 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
454 RTE_PTYPE_INNER_L4_NONFRAG,
455 [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
456 RTE_PTYPE_TUNNEL_GRENAT |
457 RTE_PTYPE_INNER_L2_ETHER_VLAN |
458 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
459 RTE_PTYPE_INNER_L4_UDP,
461 [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
462 RTE_PTYPE_TUNNEL_GRENAT |
463 RTE_PTYPE_INNER_L2_ETHER_VLAN |
464 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
465 RTE_PTYPE_INNER_L4_TCP,
466 [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
467 RTE_PTYPE_TUNNEL_GRENAT |
468 RTE_PTYPE_INNER_L2_ETHER_VLAN |
469 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
470 RTE_PTYPE_INNER_L4_SCTP,
471 [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
472 RTE_PTYPE_TUNNEL_GRENAT |
473 RTE_PTYPE_INNER_L2_ETHER_VLAN |
474 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
475 RTE_PTYPE_INNER_L4_ICMP,
477 /* Non tunneled IPv6 */
478 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
480 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
481 RTE_PTYPE_L4_NONFRAG,
482 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
485 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
487 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
489 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
493 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
494 RTE_PTYPE_TUNNEL_IP |
495 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
496 RTE_PTYPE_INNER_L4_FRAG,
497 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
498 RTE_PTYPE_TUNNEL_IP |
499 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
500 RTE_PTYPE_INNER_L4_NONFRAG,
501 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
502 RTE_PTYPE_TUNNEL_IP |
503 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
504 RTE_PTYPE_INNER_L4_UDP,
506 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
507 RTE_PTYPE_TUNNEL_IP |
508 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
509 RTE_PTYPE_INNER_L4_TCP,
510 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
511 RTE_PTYPE_TUNNEL_IP |
512 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
513 RTE_PTYPE_INNER_L4_SCTP,
514 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
515 RTE_PTYPE_TUNNEL_IP |
516 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
517 RTE_PTYPE_INNER_L4_ICMP,
520 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
521 RTE_PTYPE_TUNNEL_IP |
522 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
523 RTE_PTYPE_INNER_L4_FRAG,
524 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
525 RTE_PTYPE_TUNNEL_IP |
526 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
527 RTE_PTYPE_INNER_L4_NONFRAG,
528 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
529 RTE_PTYPE_TUNNEL_IP |
530 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
531 RTE_PTYPE_INNER_L4_UDP,
533 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
534 RTE_PTYPE_TUNNEL_IP |
535 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
536 RTE_PTYPE_INNER_L4_TCP,
537 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
538 RTE_PTYPE_TUNNEL_IP |
539 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
540 RTE_PTYPE_INNER_L4_SCTP,
541 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
542 RTE_PTYPE_TUNNEL_IP |
543 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
544 RTE_PTYPE_INNER_L4_ICMP,
546 /* IPv6 --> GRE/Teredo/VXLAN */
547 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
548 RTE_PTYPE_TUNNEL_GRENAT,
550 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
551 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
552 RTE_PTYPE_TUNNEL_GRENAT |
553 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
554 RTE_PTYPE_INNER_L4_FRAG,
555 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
556 RTE_PTYPE_TUNNEL_GRENAT |
557 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
558 RTE_PTYPE_INNER_L4_NONFRAG,
559 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
560 RTE_PTYPE_TUNNEL_GRENAT |
561 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
562 RTE_PTYPE_INNER_L4_UDP,
564 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
565 RTE_PTYPE_TUNNEL_GRENAT |
566 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
567 RTE_PTYPE_INNER_L4_TCP,
568 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
569 RTE_PTYPE_TUNNEL_GRENAT |
570 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
571 RTE_PTYPE_INNER_L4_SCTP,
572 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
573 RTE_PTYPE_TUNNEL_GRENAT |
574 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
575 RTE_PTYPE_INNER_L4_ICMP,
577 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
578 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
579 RTE_PTYPE_TUNNEL_GRENAT |
580 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
581 RTE_PTYPE_INNER_L4_FRAG,
582 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
583 RTE_PTYPE_TUNNEL_GRENAT |
584 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
585 RTE_PTYPE_INNER_L4_NONFRAG,
586 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
587 RTE_PTYPE_TUNNEL_GRENAT |
588 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
589 RTE_PTYPE_INNER_L4_UDP,
591 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
592 RTE_PTYPE_TUNNEL_GRENAT |
593 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
594 RTE_PTYPE_INNER_L4_TCP,
595 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
596 RTE_PTYPE_TUNNEL_GRENAT |
597 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
598 RTE_PTYPE_INNER_L4_SCTP,
599 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
600 RTE_PTYPE_TUNNEL_GRENAT |
601 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
602 RTE_PTYPE_INNER_L4_ICMP,
604 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
605 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
606 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
608 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
609 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
610 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
611 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
612 RTE_PTYPE_INNER_L4_FRAG,
613 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
614 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
615 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
616 RTE_PTYPE_INNER_L4_NONFRAG,
617 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
618 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
619 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
620 RTE_PTYPE_INNER_L4_UDP,
622 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
623 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
624 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
625 RTE_PTYPE_INNER_L4_TCP,
626 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
627 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
628 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
629 RTE_PTYPE_INNER_L4_SCTP,
630 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
631 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
632 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
633 RTE_PTYPE_INNER_L4_ICMP,
635 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
636 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
637 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
638 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
639 RTE_PTYPE_INNER_L4_FRAG,
640 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
641 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
642 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
643 RTE_PTYPE_INNER_L4_NONFRAG,
644 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
645 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
646 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
647 RTE_PTYPE_INNER_L4_UDP,
649 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
650 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
651 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
652 RTE_PTYPE_INNER_L4_TCP,
653 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
654 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
655 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
656 RTE_PTYPE_INNER_L4_SCTP,
657 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
658 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
659 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
660 RTE_PTYPE_INNER_L4_ICMP,
662 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
663 [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
664 RTE_PTYPE_TUNNEL_GRENAT |
665 RTE_PTYPE_INNER_L2_ETHER_VLAN,
667 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
668 [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
669 RTE_PTYPE_TUNNEL_GRENAT |
670 RTE_PTYPE_INNER_L2_ETHER_VLAN |
671 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
672 RTE_PTYPE_INNER_L4_FRAG,
673 [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
674 RTE_PTYPE_TUNNEL_GRENAT |
675 RTE_PTYPE_INNER_L2_ETHER_VLAN |
676 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
677 RTE_PTYPE_INNER_L4_NONFRAG,
678 [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
679 RTE_PTYPE_TUNNEL_GRENAT |
680 RTE_PTYPE_INNER_L2_ETHER_VLAN |
681 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
682 RTE_PTYPE_INNER_L4_UDP,
684 [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
685 RTE_PTYPE_TUNNEL_GRENAT |
686 RTE_PTYPE_INNER_L2_ETHER_VLAN |
687 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
688 RTE_PTYPE_INNER_L4_TCP,
689 [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
690 RTE_PTYPE_TUNNEL_GRENAT |
691 RTE_PTYPE_INNER_L2_ETHER_VLAN |
692 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
693 RTE_PTYPE_INNER_L4_SCTP,
694 [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
695 RTE_PTYPE_TUNNEL_GRENAT |
696 RTE_PTYPE_INNER_L2_ETHER_VLAN |
697 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
698 RTE_PTYPE_INNER_L4_ICMP,
700 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
701 [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
702 RTE_PTYPE_TUNNEL_GRENAT |
703 RTE_PTYPE_INNER_L2_ETHER_VLAN |
704 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
705 RTE_PTYPE_INNER_L4_FRAG,
706 [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
707 RTE_PTYPE_TUNNEL_GRENAT |
708 RTE_PTYPE_INNER_L2_ETHER_VLAN |
709 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
710 RTE_PTYPE_INNER_L4_NONFRAG,
711 [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
712 RTE_PTYPE_TUNNEL_GRENAT |
713 RTE_PTYPE_INNER_L2_ETHER_VLAN |
714 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
715 RTE_PTYPE_INNER_L4_UDP,
717 [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
718 RTE_PTYPE_TUNNEL_GRENAT |
719 RTE_PTYPE_INNER_L2_ETHER_VLAN |
720 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
721 RTE_PTYPE_INNER_L4_TCP,
722 [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
723 RTE_PTYPE_TUNNEL_GRENAT |
724 RTE_PTYPE_INNER_L2_ETHER_VLAN |
725 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
726 RTE_PTYPE_INNER_L4_SCTP,
727 [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
728 RTE_PTYPE_TUNNEL_GRENAT |
729 RTE_PTYPE_INNER_L2_ETHER_VLAN |
730 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
731 RTE_PTYPE_INNER_L4_ICMP,
733 /* All others reserved */
736 return ptype_table[ptype];
738 #else /* RTE_NEXT_ABI */
739 /* Translate pkt types to pkt flags */
740 static inline uint64_t
741 i40e_rxd_ptype_to_pkt_flags(uint64_t qword)
743 uint8_t ptype = (uint8_t)((qword & I40E_RXD_QW1_PTYPE_MASK) >>
744 I40E_RXD_QW1_PTYPE_SHIFT);
745 static const uint64_t ip_ptype_map[I40E_MAX_PKT_TYPE] = {
748 PKT_RX_IEEE1588_PTP, /* PTYPE 2 */
768 PKT_RX_IPV4_HDR, /* PTYPE 22 */
769 PKT_RX_IPV4_HDR, /* PTYPE 23 */
770 PKT_RX_IPV4_HDR, /* PTYPE 24 */
772 PKT_RX_IPV4_HDR, /* PTYPE 26 */
773 PKT_RX_IPV4_HDR, /* PTYPE 27 */
774 PKT_RX_IPV4_HDR, /* PTYPE 28 */
775 PKT_RX_IPV4_HDR_EXT, /* PTYPE 29 */
776 PKT_RX_IPV4_HDR_EXT, /* PTYPE 30 */
777 PKT_RX_IPV4_HDR_EXT, /* PTYPE 31 */
779 PKT_RX_IPV4_HDR_EXT, /* PTYPE 33 */
780 PKT_RX_IPV4_HDR_EXT, /* PTYPE 34 */
781 PKT_RX_IPV4_HDR_EXT, /* PTYPE 35 */
782 PKT_RX_IPV4_HDR_EXT, /* PTYPE 36 */
783 PKT_RX_IPV4_HDR_EXT, /* PTYPE 37 */
784 PKT_RX_IPV4_HDR_EXT, /* PTYPE 38 */
786 PKT_RX_IPV4_HDR_EXT, /* PTYPE 40 */
787 PKT_RX_IPV4_HDR_EXT, /* PTYPE 41 */
788 PKT_RX_IPV4_HDR_EXT, /* PTYPE 42 */
789 PKT_RX_IPV4_HDR_EXT, /* PTYPE 43 */
790 PKT_RX_IPV4_HDR_EXT, /* PTYPE 44 */
791 PKT_RX_IPV4_HDR_EXT, /* PTYPE 45 */
792 PKT_RX_IPV4_HDR_EXT, /* PTYPE 46 */
794 PKT_RX_IPV4_HDR_EXT, /* PTYPE 48 */
795 PKT_RX_IPV4_HDR_EXT, /* PTYPE 49 */
796 PKT_RX_IPV4_HDR_EXT, /* PTYPE 50 */
797 PKT_RX_IPV4_HDR_EXT, /* PTYPE 51 */
798 PKT_RX_IPV4_HDR_EXT, /* PTYPE 52 */
799 PKT_RX_IPV4_HDR_EXT, /* PTYPE 53 */
801 PKT_RX_IPV4_HDR_EXT, /* PTYPE 55 */
802 PKT_RX_IPV4_HDR_EXT, /* PTYPE 56 */
803 PKT_RX_IPV4_HDR_EXT, /* PTYPE 57 */
804 PKT_RX_IPV4_HDR_EXT, /* PTYPE 58 */
805 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 59 */
806 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 60 */
807 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 61 */
809 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 63 */
810 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 64 */
811 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 65 */
812 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 66 */
813 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 67 */
814 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 68 */
816 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 70 */
817 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 71 */
818 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 72 */
819 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 73 */
820 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 74 */
821 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 75 */
822 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 76 */
824 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 78 */
825 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 79 */
826 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 80 */
827 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 81 */
828 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 82 */
829 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 83 */
831 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 85 */
832 PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 86 */
833 PKT_RX_IPV4_HDR_EXT, /* PTYPE 87 */
834 PKT_RX_IPV6_HDR, /* PTYPE 88 */
835 PKT_RX_IPV6_HDR, /* PTYPE 89 */
836 PKT_RX_IPV6_HDR, /* PTYPE 90 */
838 PKT_RX_IPV6_HDR, /* PTYPE 92 */
839 PKT_RX_IPV6_HDR, /* PTYPE 93 */
840 PKT_RX_IPV6_HDR, /* PTYPE 94 */
841 PKT_RX_IPV6_HDR_EXT, /* PTYPE 95 */
842 PKT_RX_IPV6_HDR_EXT, /* PTYPE 96 */
843 PKT_RX_IPV6_HDR_EXT, /* PTYPE 97 */
845 PKT_RX_IPV6_HDR_EXT, /* PTYPE 99 */
846 PKT_RX_IPV6_HDR_EXT, /* PTYPE 100 */
847 PKT_RX_IPV6_HDR_EXT, /* PTYPE 101 */
848 PKT_RX_IPV6_HDR_EXT, /* PTYPE 102 */
849 PKT_RX_IPV6_HDR_EXT, /* PTYPE 103 */
850 PKT_RX_IPV6_HDR_EXT, /* PTYPE 104 */
852 PKT_RX_IPV6_HDR_EXT, /* PTYPE 106 */
853 PKT_RX_IPV6_HDR_EXT, /* PTYPE 107 */
854 PKT_RX_IPV6_HDR_EXT, /* PTYPE 108 */
855 PKT_RX_IPV6_HDR_EXT, /* PTYPE 109 */
856 PKT_RX_IPV6_HDR_EXT, /* PTYPE 110 */
857 PKT_RX_IPV6_HDR_EXT, /* PTYPE 111 */
858 PKT_RX_IPV6_HDR_EXT, /* PTYPE 112 */
860 PKT_RX_IPV6_HDR_EXT, /* PTYPE 114 */
861 PKT_RX_IPV6_HDR_EXT, /* PTYPE 115 */
862 PKT_RX_IPV6_HDR_EXT, /* PTYPE 116 */
863 PKT_RX_IPV6_HDR_EXT, /* PTYPE 117 */
864 PKT_RX_IPV6_HDR_EXT, /* PTYPE 118 */
865 PKT_RX_IPV6_HDR_EXT, /* PTYPE 119 */
867 PKT_RX_IPV6_HDR_EXT, /* PTYPE 121 */
868 PKT_RX_IPV6_HDR_EXT, /* PTYPE 122 */
869 PKT_RX_IPV6_HDR_EXT, /* PTYPE 123 */
870 PKT_RX_IPV6_HDR_EXT, /* PTYPE 124 */
871 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 125 */
872 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 126 */
873 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 127 */
875 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 129 */
876 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 130 */
877 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 131 */
878 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 132 */
879 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 133 */
880 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 134 */
882 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 136 */
883 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 137 */
884 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 138 */
885 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 139 */
886 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 140 */
887 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 141 */
888 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 142 */
890 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 144 */
891 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 145 */
892 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 146 */
893 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 147 */
894 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 148 */
895 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 149 */
897 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 151 */
898 PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 152 */
899 PKT_RX_IPV6_HDR_EXT, /* PTYPE 153 */
1004 return ip_ptype_map[ptype];
1006 #endif /* RTE_NEXT_ABI */
1008 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
1009 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
1010 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02
1011 #define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK 0x03
1012 #define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX 0x01
1014 static inline uint64_t
1015 i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
1018 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1019 uint16_t flexbh, flexbl;
1021 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1022 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1023 I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1024 flexbl = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1025 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT) &
1026 I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK;
1029 if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1031 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1032 flags |= PKT_RX_FDIR_ID;
1033 } else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) {
1035 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi);
1036 flags |= PKT_RX_FDIR_FLX;
1038 if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) {
1040 rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
1041 flags |= PKT_RX_FDIR_FLX;
1045 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1046 flags |= PKT_RX_FDIR_ID;
1051 i40e_txd_enable_checksum(uint64_t ol_flags,
1053 uint32_t *td_offset,
1054 union i40e_tx_offload tx_offload,
1055 uint32_t *cd_tunneling)
1057 /* UDP tunneling packet TX checksum offload */
1058 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
1060 *td_offset |= (tx_offload.outer_l2_len >> 1)
1061 << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1063 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1064 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
1065 else if (ol_flags & PKT_TX_OUTER_IPV4)
1066 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1067 else if (ol_flags & PKT_TX_OUTER_IPV6)
1068 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1070 /* Now set the ctx descriptor fields */
1071 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
1072 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
1073 (tx_offload.l2_len >> 1) <<
1074 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1077 *td_offset |= (tx_offload.l2_len >> 1)
1078 << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1080 /* Enable L3 checksum offloads */
1081 if (ol_flags & PKT_TX_IP_CKSUM) {
1082 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
1083 *td_offset |= (tx_offload.l3_len >> 2)
1084 << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1085 } else if (ol_flags & PKT_TX_IPV4) {
1086 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
1087 *td_offset |= (tx_offload.l3_len >> 2)
1088 << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1089 } else if (ol_flags & PKT_TX_IPV6) {
1090 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
1091 *td_offset |= (tx_offload.l3_len >> 2)
1092 << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1095 if (ol_flags & PKT_TX_TCP_SEG) {
1096 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1097 *td_offset |= (tx_offload.l4_len >> 2)
1098 << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1102 /* Enable L4 checksum offloads */
1103 switch (ol_flags & PKT_TX_L4_MASK) {
1104 case PKT_TX_TCP_CKSUM:
1105 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1106 *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
1107 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1109 case PKT_TX_SCTP_CKSUM:
1110 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
1111 *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
1112 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1114 case PKT_TX_UDP_CKSUM:
1115 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
1116 *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
1117 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1124 static inline struct rte_mbuf *
1125 rte_rxmbuf_alloc(struct rte_mempool *mp)
1129 m = __rte_mbuf_raw_alloc(mp);
1130 __rte_mbuf_sanity_check_raw(m, 0);
1135 /* Construct the tx flags */
1136 static inline uint64_t
1137 i40e_build_ctob(uint32_t td_cmd,
1142 return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1143 ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
1144 ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1145 ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1146 ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
1150 i40e_xmit_cleanup(struct i40e_tx_queue *txq)
1152 struct i40e_tx_entry *sw_ring = txq->sw_ring;
1153 volatile struct i40e_tx_desc *txd = txq->tx_ring;
1154 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1155 uint16_t nb_tx_desc = txq->nb_tx_desc;
1156 uint16_t desc_to_clean_to;
1157 uint16_t nb_tx_to_clean;
1159 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
1160 if (desc_to_clean_to >= nb_tx_desc)
1161 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1163 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1164 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1165 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
1166 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) {
1167 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1168 "(port=%d queue=%d)", desc_to_clean_to,
1169 txq->port_id, txq->queue_id);
1173 if (last_desc_cleaned > desc_to_clean_to)
1174 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1177 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1180 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1182 txq->last_desc_cleaned = desc_to_clean_to;
1183 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
1189 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
1190 check_rx_burst_bulk_alloc_preconditions(struct i40e_rx_queue *rxq)
1192 check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
1197 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
1198 if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST)) {
1199 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
1200 "rxq->rx_free_thresh=%d, "
1201 "RTE_PMD_I40E_RX_MAX_BURST=%d",
1202 rxq->rx_free_thresh, RTE_PMD_I40E_RX_MAX_BURST);
1204 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
1205 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
1206 "rxq->rx_free_thresh=%d, "
1207 "rxq->nb_rx_desc=%d",
1208 rxq->rx_free_thresh, rxq->nb_rx_desc);
1210 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
1211 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
1212 "rxq->nb_rx_desc=%d, "
1213 "rxq->rx_free_thresh=%d",
1214 rxq->nb_rx_desc, rxq->rx_free_thresh);
1216 } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
1217 RTE_PMD_I40E_RX_MAX_BURST))) {
1218 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
1219 "rxq->nb_rx_desc=%d, "
1220 "I40E_MAX_RING_DESC=%d, "
1221 "RTE_PMD_I40E_RX_MAX_BURST=%d",
1222 rxq->nb_rx_desc, I40E_MAX_RING_DESC,
1223 RTE_PMD_I40E_RX_MAX_BURST);
1233 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
1234 #define I40E_LOOK_AHEAD 8
1235 #if (I40E_LOOK_AHEAD != 8)
1236 #error "PMD I40E: I40E_LOOK_AHEAD must be 8\n"
1239 i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
1241 volatile union i40e_rx_desc *rxdp;
1242 struct i40e_rx_entry *rxep;
1243 struct rte_mbuf *mb;
1247 int32_t s[I40E_LOOK_AHEAD], nb_dd;
1248 int32_t i, j, nb_rx = 0;
1251 rxdp = &rxq->rx_ring[rxq->rx_tail];
1252 rxep = &rxq->sw_ring[rxq->rx_tail];
1254 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1255 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
1256 I40E_RXD_QW1_STATUS_SHIFT;
1258 /* Make sure there is at least 1 packet to receive */
1259 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1263 * Scan LOOK_AHEAD descriptors at a time to determine which
1264 * descriptors reference packets that are ready to be received.
1266 for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; i+=I40E_LOOK_AHEAD,
1267 rxdp += I40E_LOOK_AHEAD, rxep += I40E_LOOK_AHEAD) {
1268 /* Read desc statuses backwards to avoid race condition */
1269 for (j = I40E_LOOK_AHEAD - 1; j >= 0; j--) {
1270 qword1 = rte_le_to_cpu_64(\
1271 rxdp[j].wb.qword1.status_error_len);
1272 s[j] = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
1273 I40E_RXD_QW1_STATUS_SHIFT;
1276 /* Compute how many status bits were set */
1277 for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
1278 nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
1282 /* Translate descriptor info to mbuf parameters */
1283 for (j = 0; j < nb_dd; j++) {
1285 qword1 = rte_le_to_cpu_64(\
1286 rxdp[j].wb.qword1.status_error_len);
1287 pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1288 I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1289 mb->data_len = pkt_len;
1290 mb->pkt_len = pkt_len;
1292 i40e_rxd_to_vlan_tci(mb, &rxdp[j]);
1293 pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
1294 pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
1297 i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
1298 I40E_RXD_QW1_PTYPE_MASK) >>
1299 I40E_RXD_QW1_PTYPE_SHIFT));
1301 pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
1303 mb->packet_type = (uint16_t)((qword1 &
1304 I40E_RXD_QW1_PTYPE_MASK) >>
1305 I40E_RXD_QW1_PTYPE_SHIFT);
1306 #endif /* RTE_NEXT_ABI */
1307 if (pkt_flags & PKT_RX_RSS_HASH)
1308 mb->hash.rss = rte_le_to_cpu_32(\
1309 rxdp[j].wb.qword0.hi_dword.rss);
1310 if (pkt_flags & PKT_RX_FDIR)
1311 pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb);
1313 #ifdef RTE_LIBRTE_IEEE1588
1314 pkt_flags |= i40e_get_iee15888_flags(mb, qword1);
1316 mb->ol_flags |= pkt_flags;
1320 for (j = 0; j < I40E_LOOK_AHEAD; j++)
1321 rxq->rx_stage[i + j] = rxep[j].mbuf;
1323 if (nb_dd != I40E_LOOK_AHEAD)
1327 /* Clear software ring entries */
1328 for (i = 0; i < nb_rx; i++)
1329 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1334 static inline uint16_t
1335 i40e_rx_fill_from_stage(struct i40e_rx_queue *rxq,
1336 struct rte_mbuf **rx_pkts,
1340 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1342 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1344 for (i = 0; i < nb_pkts; i++)
1345 rx_pkts[i] = stage[i];
1347 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1348 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1354 i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
1356 volatile union i40e_rx_desc *rxdp;
1357 struct i40e_rx_entry *rxep;
1358 struct rte_mbuf *mb;
1359 uint16_t alloc_idx, i;
1363 /* Allocate buffers in bulk */
1364 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1365 (rxq->rx_free_thresh - 1));
1366 rxep = &(rxq->sw_ring[alloc_idx]);
1367 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1368 rxq->rx_free_thresh);
1369 if (unlikely(diag != 0)) {
1370 PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk");
1374 rxdp = &rxq->rx_ring[alloc_idx];
1375 for (i = 0; i < rxq->rx_free_thresh; i++) {
1376 if (likely(i < (rxq->rx_free_thresh - 1)))
1377 /* Prefetch next mbuf */
1378 rte_prefetch0(rxep[i + 1].mbuf);
1381 rte_mbuf_refcnt_set(mb, 1);
1383 mb->data_off = RTE_PKTMBUF_HEADROOM;
1385 mb->port = rxq->port_id;
1386 dma_addr = rte_cpu_to_le_64(\
1387 RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
1388 rxdp[i].read.hdr_addr = 0;
1389 rxdp[i].read.pkt_addr = dma_addr;
1392 /* Update rx tail regsiter */
1394 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1396 rxq->rx_free_trigger =
1397 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1398 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1399 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1404 static inline uint16_t
1405 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1407 struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
1413 if (rxq->rx_nb_avail)
1414 return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1416 nb_rx = (uint16_t)i40e_rx_scan_hw_ring(rxq);
1417 rxq->rx_next_avail = 0;
1418 rxq->rx_nb_avail = nb_rx;
1419 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1421 if (rxq->rx_tail > rxq->rx_free_trigger) {
1422 if (i40e_rx_alloc_bufs(rxq) != 0) {
1425 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1426 "port_id=%u, queue_id=%u",
1427 rxq->port_id, rxq->queue_id);
1428 rxq->rx_nb_avail = 0;
1429 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1430 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1431 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1437 if (rxq->rx_tail >= rxq->nb_rx_desc)
1440 if (rxq->rx_nb_avail)
1441 return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1447 i40e_recv_pkts_bulk_alloc(void *rx_queue,
1448 struct rte_mbuf **rx_pkts,
1451 uint16_t nb_rx = 0, n, count;
1453 if (unlikely(nb_pkts == 0))
1456 if (likely(nb_pkts <= RTE_PMD_I40E_RX_MAX_BURST))
1457 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1460 n = RTE_MIN(nb_pkts, RTE_PMD_I40E_RX_MAX_BURST);
1461 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1462 nb_rx = (uint16_t)(nb_rx + count);
1463 nb_pkts = (uint16_t)(nb_pkts - count);
1470 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
1473 i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1475 struct i40e_rx_queue *rxq;
1476 volatile union i40e_rx_desc *rx_ring;
1477 volatile union i40e_rx_desc *rxdp;
1478 union i40e_rx_desc rxd;
1479 struct i40e_rx_entry *sw_ring;
1480 struct i40e_rx_entry *rxe;
1481 struct rte_mbuf *rxm;
1482 struct rte_mbuf *nmb;
1486 uint16_t rx_packet_len;
1487 uint16_t rx_id, nb_hold;
1494 rx_id = rxq->rx_tail;
1495 rx_ring = rxq->rx_ring;
1496 sw_ring = rxq->sw_ring;
1498 while (nb_rx < nb_pkts) {
1499 rxdp = &rx_ring[rx_id];
1500 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1501 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1502 >> I40E_RXD_QW1_STATUS_SHIFT;
1504 /* Check the DD bit first */
1505 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1508 nmb = rte_rxmbuf_alloc(rxq->mp);
1514 rxe = &sw_ring[rx_id];
1516 if (unlikely(rx_id == rxq->nb_rx_desc))
1519 /* Prefetch next mbuf */
1520 rte_prefetch0(sw_ring[rx_id].mbuf);
1523 * When next RX descriptor is on a cache line boundary,
1524 * prefetch the next 4 RX descriptors and next 8 pointers
1527 if ((rx_id & 0x3) == 0) {
1528 rte_prefetch0(&rx_ring[rx_id]);
1529 rte_prefetch0(&sw_ring[rx_id]);
1534 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1535 rxdp->read.hdr_addr = 0;
1536 rxdp->read.pkt_addr = dma_addr;
1538 rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1539 I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1541 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1542 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1545 rxm->pkt_len = rx_packet_len;
1546 rxm->data_len = rx_packet_len;
1547 rxm->port = rxq->port_id;
1549 i40e_rxd_to_vlan_tci(rxm, &rxd);
1550 pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
1551 pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
1554 i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
1555 I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
1557 pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
1558 rxm->packet_type = (uint16_t)((qword1 & I40E_RXD_QW1_PTYPE_MASK) >>
1559 I40E_RXD_QW1_PTYPE_SHIFT);
1560 #endif /* RTE_NEXT_ABI */
1561 if (pkt_flags & PKT_RX_RSS_HASH)
1563 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1564 if (pkt_flags & PKT_RX_FDIR)
1565 pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
1567 #ifdef RTE_LIBRTE_IEEE1588
1568 pkt_flags |= i40e_get_iee15888_flags(rxm, qword1);
1570 rxm->ol_flags |= pkt_flags;
1572 rx_pkts[nb_rx++] = rxm;
1574 rxq->rx_tail = rx_id;
1577 * If the number of free RX descriptors is greater than the RX free
1578 * threshold of the queue, advance the receive tail register of queue.
1579 * Update that register with the value of the last processed RX
1580 * descriptor minus 1.
1582 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1583 if (nb_hold > rxq->rx_free_thresh) {
1584 rx_id = (uint16_t) ((rx_id == 0) ?
1585 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1586 I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1589 rxq->nb_rx_hold = nb_hold;
1595 i40e_recv_scattered_pkts(void *rx_queue,
1596 struct rte_mbuf **rx_pkts,
1599 struct i40e_rx_queue *rxq = rx_queue;
1600 volatile union i40e_rx_desc *rx_ring = rxq->rx_ring;
1601 volatile union i40e_rx_desc *rxdp;
1602 union i40e_rx_desc rxd;
1603 struct i40e_rx_entry *sw_ring = rxq->sw_ring;
1604 struct i40e_rx_entry *rxe;
1605 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1606 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1607 struct rte_mbuf *nmb, *rxm;
1608 uint16_t rx_id = rxq->rx_tail;
1609 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1615 while (nb_rx < nb_pkts) {
1616 rxdp = &rx_ring[rx_id];
1617 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1618 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
1619 I40E_RXD_QW1_STATUS_SHIFT;
1621 /* Check the DD bit */
1622 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1625 nmb = rte_rxmbuf_alloc(rxq->mp);
1630 rxe = &sw_ring[rx_id];
1632 if (rx_id == rxq->nb_rx_desc)
1635 /* Prefetch next mbuf */
1636 rte_prefetch0(sw_ring[rx_id].mbuf);
1639 * When next RX descriptor is on a cache line boundary,
1640 * prefetch the next 4 RX descriptors and next 8 pointers
1643 if ((rx_id & 0x3) == 0) {
1644 rte_prefetch0(&rx_ring[rx_id]);
1645 rte_prefetch0(&sw_ring[rx_id]);
1651 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
1653 /* Set data buffer address and data length of the mbuf */
1654 rxdp->read.hdr_addr = 0;
1655 rxdp->read.pkt_addr = dma_addr;
1656 rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1657 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1658 rxm->data_len = rx_packet_len;
1659 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1662 * If this is the first buffer of the received packet, set the
1663 * pointer to the first mbuf of the packet and initialize its
1664 * context. Otherwise, update the total length and the number
1665 * of segments of the current scattered packet, and update the
1666 * pointer to the last mbuf of the current packet.
1670 first_seg->nb_segs = 1;
1671 first_seg->pkt_len = rx_packet_len;
1673 first_seg->pkt_len =
1674 (uint16_t)(first_seg->pkt_len +
1676 first_seg->nb_segs++;
1677 last_seg->next = rxm;
1681 * If this is not the last buffer of the received packet,
1682 * update the pointer to the last mbuf of the current scattered
1683 * packet and continue to parse the RX ring.
1685 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT))) {
1691 * This is the last buffer of the received packet. If the CRC
1692 * is not stripped by the hardware:
1693 * - Subtract the CRC length from the total packet length.
1694 * - If the last buffer only contains the whole CRC or a part
1695 * of it, free the mbuf associated to the last buffer. If part
1696 * of the CRC is also contained in the previous mbuf, subtract
1697 * the length of that CRC part from the data length of the
1701 if (unlikely(rxq->crc_len > 0)) {
1702 first_seg->pkt_len -= ETHER_CRC_LEN;
1703 if (rx_packet_len <= ETHER_CRC_LEN) {
1704 rte_pktmbuf_free_seg(rxm);
1705 first_seg->nb_segs--;
1706 last_seg->data_len =
1707 (uint16_t)(last_seg->data_len -
1708 (ETHER_CRC_LEN - rx_packet_len));
1709 last_seg->next = NULL;
1711 rxm->data_len = (uint16_t)(rx_packet_len -
1715 first_seg->port = rxq->port_id;
1716 first_seg->ol_flags = 0;
1717 i40e_rxd_to_vlan_tci(first_seg, &rxd);
1718 pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
1719 pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
1721 first_seg->packet_type =
1722 i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
1723 I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
1725 pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
1726 first_seg->packet_type = (uint16_t)((qword1 &
1727 I40E_RXD_QW1_PTYPE_MASK) >>
1728 I40E_RXD_QW1_PTYPE_SHIFT);
1729 #endif /* RTE_NEXT_ABI */
1730 if (pkt_flags & PKT_RX_RSS_HASH)
1732 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1733 if (pkt_flags & PKT_RX_FDIR)
1734 pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
1736 #ifdef RTE_LIBRTE_IEEE1588
1737 pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1);
1739 first_seg->ol_flags |= pkt_flags;
1741 /* Prefetch data of first segment, if configured to do so. */
1742 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1743 first_seg->data_off));
1744 rx_pkts[nb_rx++] = first_seg;
1748 /* Record index of the next RX descriptor to probe. */
1749 rxq->rx_tail = rx_id;
1750 rxq->pkt_first_seg = first_seg;
1751 rxq->pkt_last_seg = last_seg;
1754 * If the number of free RX descriptors is greater than the RX free
1755 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1756 * register. Update the RDT with the value of the last processed RX
1757 * descriptor minus 1, to guarantee that the RDT register is never
1758 * equal to the RDH register, which creates a "full" ring situtation
1759 * from the hardware point of view.
1761 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1762 if (nb_hold > rxq->rx_free_thresh) {
1763 rx_id = (uint16_t)(rx_id == 0 ?
1764 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1765 I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1768 rxq->nb_rx_hold = nb_hold;
1773 /* Check if the context descriptor is needed for TX offloading */
1774 static inline uint16_t
1775 i40e_calc_context_desc(uint64_t flags)
1777 static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
1781 #ifdef RTE_LIBRTE_IEEE1588
1782 mask |= PKT_TX_IEEE1588_TMST;
1785 return ((flags & mask) ? 1 : 0);
1788 /* set i40e TSO context descriptor */
1789 static inline uint64_t
1790 i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
1792 uint64_t ctx_desc = 0;
1793 uint32_t cd_cmd, hdr_len, cd_tso_len;
1795 if (!tx_offload.l4_len) {
1796 PMD_DRV_LOG(DEBUG, "L4 length set to 0");
1801 * in case of tunneling packet, the outer_l2_len and
1802 * outer_l3_len must be 0.
1804 hdr_len = tx_offload.outer_l2_len +
1805 tx_offload.outer_l3_len +
1810 cd_cmd = I40E_TX_CTX_DESC_TSO;
1811 cd_tso_len = mbuf->pkt_len - hdr_len;
1812 ctx_desc |= ((uint64_t)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
1813 ((uint64_t)cd_tso_len <<
1814 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1815 ((uint64_t)mbuf->tso_segsz <<
1816 I40E_TXD_CTX_QW1_MSS_SHIFT);
1822 i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1824 struct i40e_tx_queue *txq;
1825 struct i40e_tx_entry *sw_ring;
1826 struct i40e_tx_entry *txe, *txn;
1827 volatile struct i40e_tx_desc *txd;
1828 volatile struct i40e_tx_desc *txr;
1829 struct rte_mbuf *tx_pkt;
1830 struct rte_mbuf *m_seg;
1831 uint32_t cd_tunneling_params;
1843 uint64_t buf_dma_addr;
1844 union i40e_tx_offload tx_offload = {0};
1847 sw_ring = txq->sw_ring;
1849 tx_id = txq->tx_tail;
1850 txe = &sw_ring[tx_id];
1852 /* Check if the descriptor ring needs to be cleaned. */
1853 if (txq->nb_tx_free < txq->tx_free_thresh)
1854 i40e_xmit_cleanup(txq);
1856 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1862 tx_pkt = *tx_pkts++;
1863 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1865 ol_flags = tx_pkt->ol_flags;
1866 tx_offload.l2_len = tx_pkt->l2_len;
1867 tx_offload.l3_len = tx_pkt->l3_len;
1868 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
1869 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
1870 tx_offload.l4_len = tx_pkt->l4_len;
1871 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1873 /* Calculate the number of context descriptors needed. */
1874 nb_ctx = i40e_calc_context_desc(ol_flags);
1877 * The number of descriptors that must be allocated for
1878 * a packet equals to the number of the segments of that
1879 * packet plus 1 context descriptor if needed.
1881 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1882 tx_last = (uint16_t)(tx_id + nb_used - 1);
1885 if (tx_last >= txq->nb_tx_desc)
1886 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1888 if (nb_used > txq->nb_tx_free) {
1889 if (i40e_xmit_cleanup(txq) != 0) {
1894 if (unlikely(nb_used > txq->tx_rs_thresh)) {
1895 while (nb_used > txq->nb_tx_free) {
1896 if (i40e_xmit_cleanup(txq) != 0) {
1905 /* Descriptor based VLAN insertion */
1906 if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1907 tx_flags |= tx_pkt->vlan_tci <<
1908 I40E_TX_FLAG_L2TAG1_SHIFT;
1909 tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
1910 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
1911 td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
1912 I40E_TX_FLAG_L2TAG1_SHIFT;
1915 /* Always enable CRC offload insertion */
1916 td_cmd |= I40E_TX_DESC_CMD_ICRC;
1918 /* Enable checksum offloading */
1919 cd_tunneling_params = 0;
1920 if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) {
1921 i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
1922 tx_offload, &cd_tunneling_params);
1926 /* Setup TX context descriptor if required */
1927 volatile struct i40e_tx_context_desc *ctx_txd =
1928 (volatile struct i40e_tx_context_desc *)\
1930 uint16_t cd_l2tag2 = 0;
1931 uint64_t cd_type_cmd_tso_mss =
1932 I40E_TX_DESC_DTYPE_CONTEXT;
1934 txn = &sw_ring[txe->next_id];
1935 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1936 if (txe->mbuf != NULL) {
1937 rte_pktmbuf_free_seg(txe->mbuf);
1941 /* TSO enabled means no timestamp */
1942 if (ol_flags & PKT_TX_TCP_SEG)
1943 cd_type_cmd_tso_mss |=
1944 i40e_set_tso_ctx(tx_pkt, tx_offload);
1946 #ifdef RTE_LIBRTE_IEEE1588
1947 if (ol_flags & PKT_TX_IEEE1588_TMST)
1948 cd_type_cmd_tso_mss |=
1949 ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
1950 I40E_TXD_CTX_QW1_CMD_SHIFT);
1954 ctx_txd->tunneling_params =
1955 rte_cpu_to_le_32(cd_tunneling_params);
1956 if (ol_flags & PKT_TX_QINQ_PKT) {
1957 cd_l2tag2 = tx_pkt->vlan_tci_outer;
1958 cd_type_cmd_tso_mss |=
1959 ((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 <<
1960 I40E_TXD_CTX_QW1_CMD_SHIFT);
1962 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
1963 ctx_txd->type_cmd_tso_mss =
1964 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
1966 PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]:\n"
1967 "tunneling_params: %#x;\n"
1970 "type_cmd_tso_mss: %#"PRIx64";\n",
1972 ctx_txd->tunneling_params,
1975 ctx_txd->type_cmd_tso_mss);
1977 txe->last_id = tx_last;
1978 tx_id = txe->next_id;
1985 txn = &sw_ring[txe->next_id];
1988 rte_pktmbuf_free_seg(txe->mbuf);
1991 /* Setup TX Descriptor */
1992 slen = m_seg->data_len;
1993 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
1995 PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
1996 "buf_dma_addr: %#"PRIx64";\n"
2001 tx_pkt, tx_id, buf_dma_addr,
2002 td_cmd, td_offset, slen, td_tag);
2004 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2005 txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
2006 td_offset, slen, td_tag);
2007 txe->last_id = tx_last;
2008 tx_id = txe->next_id;
2010 m_seg = m_seg->next;
2011 } while (m_seg != NULL);
2013 /* The last packet data descriptor needs End Of Packet (EOP) */
2014 td_cmd |= I40E_TX_DESC_CMD_EOP;
2015 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2016 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2018 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2019 PMD_TX_FREE_LOG(DEBUG,
2020 "Setting RS bit on TXD id="
2021 "%4u (port=%d queue=%d)",
2022 tx_last, txq->port_id, txq->queue_id);
2024 td_cmd |= I40E_TX_DESC_CMD_RS;
2026 /* Update txq RS bit counters */
2027 txq->nb_tx_used = 0;
2030 txd->cmd_type_offset_bsz |=
2031 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2032 I40E_TXD_QW1_CMD_SHIFT);
2038 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2039 (unsigned) txq->port_id, (unsigned) txq->queue_id,
2040 (unsigned) tx_id, (unsigned) nb_tx);
2042 I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2043 txq->tx_tail = tx_id;
2048 static inline int __attribute__((always_inline))
2049 i40e_tx_free_bufs(struct i40e_tx_queue *txq)
2051 struct i40e_tx_entry *txep;
2054 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2055 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
2056 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
2059 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
2061 for (i = 0; i < txq->tx_rs_thresh; i++)
2062 rte_prefetch0((txep + i)->mbuf);
2064 if (!(txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT)) {
2065 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2066 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2070 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2071 rte_pktmbuf_free_seg(txep->mbuf);
2076 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2077 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2078 if (txq->tx_next_dd >= txq->nb_tx_desc)
2079 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2081 return txq->tx_rs_thresh;
2084 #define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
2085 I40E_TX_DESC_CMD_EOP)
2087 /* Populate 4 descriptors with data from 4 mbufs */
2089 tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
2094 for (i = 0; i < 4; i++, txdp++, pkts++) {
2095 dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
2096 txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
2097 txdp->cmd_type_offset_bsz =
2098 i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
2099 (*pkts)->data_len, 0);
2103 /* Populate 1 descriptor with data from 1 mbuf */
2105 tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
2109 dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
2110 txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
2111 txdp->cmd_type_offset_bsz =
2112 i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
2113 (*pkts)->data_len, 0);
2116 /* Fill hardware descriptor ring with mbuf data */
2118 i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq,
2119 struct rte_mbuf **pkts,
2122 volatile struct i40e_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
2123 struct i40e_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
2124 const int N_PER_LOOP = 4;
2125 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2126 int mainpart, leftover;
2129 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
2130 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
2131 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2132 for (j = 0; j < N_PER_LOOP; ++j) {
2133 (txep + i + j)->mbuf = *(pkts + i + j);
2135 tx4(txdp + i, pkts + i);
2137 if (unlikely(leftover > 0)) {
2138 for (i = 0; i < leftover; ++i) {
2139 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2140 tx1(txdp + mainpart + i, pkts + mainpart + i);
2145 static inline uint16_t
2146 tx_xmit_pkts(struct i40e_tx_queue *txq,
2147 struct rte_mbuf **tx_pkts,
2150 volatile struct i40e_tx_desc *txr = txq->tx_ring;
2154 * Begin scanning the H/W ring for done descriptors when the number
2155 * of available descriptors drops below tx_free_thresh. For each done
2156 * descriptor, free the associated buffer.
2158 if (txq->nb_tx_free < txq->tx_free_thresh)
2159 i40e_tx_free_bufs(txq);
2161 /* Use available descriptor only */
2162 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2163 if (unlikely(!nb_pkts))
2166 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2167 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2168 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2169 i40e_tx_fill_hw_ring(txq, tx_pkts, n);
2170 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2171 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
2172 I40E_TXD_QW1_CMD_SHIFT);
2173 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2177 /* Fill hardware descriptor ring with mbuf data */
2178 i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2179 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2181 /* Determin if RS bit needs to be set */
2182 if (txq->tx_tail > txq->tx_next_rs) {
2183 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2184 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
2185 I40E_TXD_QW1_CMD_SHIFT);
2187 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2188 if (txq->tx_next_rs >= txq->nb_tx_desc)
2189 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2192 if (txq->tx_tail >= txq->nb_tx_desc)
2195 /* Update the tx tail register */
2197 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2203 i40e_xmit_pkts_simple(void *tx_queue,
2204 struct rte_mbuf **tx_pkts,
2209 if (likely(nb_pkts <= I40E_TX_MAX_BURST))
2210 return tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
2214 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2217 ret = tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
2218 &tx_pkts[nb_tx], num);
2219 nb_tx = (uint16_t)(nb_tx + ret);
2220 nb_pkts = (uint16_t)(nb_pkts - ret);
2229 * Find the VSI the queue belongs to. 'queue_idx' is the queue index
2230 * application used, which assume having sequential ones. But from driver's
2231 * perspective, it's different. For example, q0 belongs to FDIR VSI, q1-q64
2232 * to MAIN VSI, , q65-96 to SRIOV VSIs, q97-128 to VMDQ VSIs. For application
2233 * running on host, q1-64 and q97-128 can be used, total 96 queues. They can
2234 * use queue_idx from 0 to 95 to access queues, while real queue would be
2235 * different. This function will do a queue mapping to find VSI the queue
2238 static struct i40e_vsi*
2239 i40e_pf_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
2241 /* the queue in MAIN VSI range */
2242 if (queue_idx < pf->main_vsi->nb_qps)
2243 return pf->main_vsi;
2245 queue_idx -= pf->main_vsi->nb_qps;
2247 /* queue_idx is greater than VMDQ VSIs range */
2248 if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) {
2249 PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?");
2253 return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi;
2257 i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
2259 /* the queue in MAIN VSI range */
2260 if (queue_idx < pf->main_vsi->nb_qps)
2263 /* It's VMDQ queues */
2264 queue_idx -= pf->main_vsi->nb_qps;
2266 if (pf->nb_cfg_vmdq_vsi)
2267 return queue_idx % pf->vmdq_nb_qps;
2269 PMD_INIT_LOG(ERR, "Fail to get queue offset");
2270 return (uint16_t)(-1);
2275 i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2277 struct i40e_rx_queue *rxq;
2279 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2281 PMD_INIT_FUNC_TRACE();
2283 if (rx_queue_id < dev->data->nb_rx_queues) {
2284 rxq = dev->data->rx_queues[rx_queue_id];
2286 err = i40e_alloc_rx_queue_mbufs(rxq);
2288 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
2294 /* Init the RX tail regieter. */
2295 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
2297 err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
2300 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
2303 i40e_rx_queue_release_mbufs(rxq);
2304 i40e_reset_rx_queue(rxq);
2312 i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2314 struct i40e_rx_queue *rxq;
2316 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2318 if (rx_queue_id < dev->data->nb_rx_queues) {
2319 rxq = dev->data->rx_queues[rx_queue_id];
2322 * rx_queue_id is queue id aplication refers to, while
2323 * rxq->reg_idx is the real queue index.
2325 err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
2328 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
2332 i40e_rx_queue_release_mbufs(rxq);
2333 i40e_reset_rx_queue(rxq);
2340 i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2343 struct i40e_tx_queue *txq;
2344 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2346 PMD_INIT_FUNC_TRACE();
2348 if (tx_queue_id < dev->data->nb_tx_queues) {
2349 txq = dev->data->tx_queues[tx_queue_id];
2352 * tx_queue_id is queue id aplication refers to, while
2353 * rxq->reg_idx is the real queue index.
2355 err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
2357 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
2365 i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2367 struct i40e_tx_queue *txq;
2369 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2371 if (tx_queue_id < dev->data->nb_tx_queues) {
2372 txq = dev->data->tx_queues[tx_queue_id];
2375 * tx_queue_id is queue id aplication refers to, while
2376 * txq->reg_idx is the real queue index.
2378 err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
2381 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
2386 i40e_tx_queue_release_mbufs(txq);
2387 i40e_reset_tx_queue(txq);
2394 i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
2397 unsigned int socket_id,
2398 const struct rte_eth_rxconf *rx_conf,
2399 struct rte_mempool *mp)
2401 struct i40e_vsi *vsi;
2402 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2403 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2404 struct i40e_rx_queue *rxq;
2405 const struct rte_memzone *rz;
2408 int use_def_burst_func = 1;
2410 if (hw->mac.type == I40E_MAC_VF) {
2411 struct i40e_vf *vf =
2412 I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2415 vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
2418 PMD_DRV_LOG(ERR, "VSI not available or queue "
2419 "index exceeds the maximum");
2420 return I40E_ERR_PARAM;
2422 if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
2423 (nb_desc > I40E_MAX_RING_DESC) ||
2424 (nb_desc < I40E_MIN_RING_DESC)) {
2425 PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
2426 "invalid", nb_desc);
2427 return I40E_ERR_PARAM;
2430 /* Free memory if needed */
2431 if (dev->data->rx_queues[queue_idx]) {
2432 i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
2433 dev->data->rx_queues[queue_idx] = NULL;
2436 /* Allocate the rx queue data structure */
2437 rxq = rte_zmalloc_socket("i40e rx queue",
2438 sizeof(struct i40e_rx_queue),
2439 RTE_CACHE_LINE_SIZE,
2442 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2443 "rx queue data structure");
2447 rxq->nb_rx_desc = nb_desc;
2448 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2449 rxq->queue_id = queue_idx;
2450 if (hw->mac.type == I40E_MAC_VF)
2451 rxq->reg_idx = queue_idx;
2452 else /* PF device */
2453 rxq->reg_idx = vsi->base_queue +
2454 i40e_get_queue_offset_by_qindex(pf, queue_idx);
2456 rxq->port_id = dev->data->port_id;
2457 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2459 rxq->drop_en = rx_conf->rx_drop_en;
2461 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2463 /* Allocate the maximun number of RX ring hardware descriptor. */
2464 ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
2465 ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
2466 rz = i40e_ring_dma_zone_reserve(dev,
2472 i40e_dev_rx_queue_release(rxq);
2473 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
2477 /* Zero all the descriptors in the ring. */
2478 memset(rz->addr, 0, ring_size);
2480 #ifdef RTE_LIBRTE_XEN_DOM0
2481 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2483 rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
2486 rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
2488 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2489 len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
2494 /* Allocate the software ring. */
2496 rte_zmalloc_socket("i40e rx sw ring",
2497 sizeof(struct i40e_rx_entry) * len,
2498 RTE_CACHE_LINE_SIZE,
2500 if (!rxq->sw_ring) {
2501 i40e_dev_rx_queue_release(rxq);
2502 PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
2506 i40e_reset_rx_queue(rxq);
2508 dev->data->rx_queues[queue_idx] = rxq;
2510 use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
2512 if (!use_def_burst_func && !dev->data->scattered_rx) {
2513 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2514 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2515 "satisfied. Rx Burst Bulk Alloc function will be "
2516 "used on port=%d, queue=%d.",
2517 rxq->port_id, rxq->queue_id);
2518 dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
2519 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2521 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2522 "not satisfied, Scattered Rx is requested, "
2523 "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
2524 "not enabled on port=%d, queue=%d.",
2525 rxq->port_id, rxq->queue_id);
2532 i40e_dev_rx_queue_release(void *rxq)
2534 struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
2537 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
2541 i40e_rx_queue_release_mbufs(q);
2542 rte_free(q->sw_ring);
2547 i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2549 #define I40E_RXQ_SCAN_INTERVAL 4
2550 volatile union i40e_rx_desc *rxdp;
2551 struct i40e_rx_queue *rxq;
2554 if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
2555 PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
2559 rxq = dev->data->rx_queues[rx_queue_id];
2560 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2561 while ((desc < rxq->nb_rx_desc) &&
2562 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2563 I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
2564 (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
2566 * Check the DD bit of a rx descriptor of each 4 in a group,
2567 * to avoid checking too frequently and downgrading performance
2570 desc += I40E_RXQ_SCAN_INTERVAL;
2571 rxdp += I40E_RXQ_SCAN_INTERVAL;
2572 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2573 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2574 desc - rxq->nb_rx_desc]);
2581 i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2583 volatile union i40e_rx_desc *rxdp;
2584 struct i40e_rx_queue *rxq = rx_queue;
2588 if (unlikely(offset >= rxq->nb_rx_desc)) {
2589 PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset);
2593 desc = rxq->rx_tail + offset;
2594 if (desc >= rxq->nb_rx_desc)
2595 desc -= rxq->nb_rx_desc;
2597 rxdp = &(rxq->rx_ring[desc]);
2599 ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2600 I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
2601 (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
2607 i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
2610 unsigned int socket_id,
2611 const struct rte_eth_txconf *tx_conf)
2613 struct i40e_vsi *vsi;
2614 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2615 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2616 struct i40e_tx_queue *txq;
2617 const struct rte_memzone *tz;
2619 uint16_t tx_rs_thresh, tx_free_thresh;
2621 if (hw->mac.type == I40E_MAC_VF) {
2622 struct i40e_vf *vf =
2623 I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2626 vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
2629 PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
2630 "exceeds the maximum", queue_idx);
2631 return I40E_ERR_PARAM;
2634 if (((nb_desc * sizeof(struct i40e_tx_desc)) % I40E_ALIGN) != 0 ||
2635 (nb_desc > I40E_MAX_RING_DESC) ||
2636 (nb_desc < I40E_MIN_RING_DESC)) {
2637 PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
2638 "invalid", nb_desc);
2639 return I40E_ERR_PARAM;
2643 * The following two parameters control the setting of the RS bit on
2644 * transmit descriptors. TX descriptors will have their RS bit set
2645 * after txq->tx_rs_thresh descriptors have been used. The TX
2646 * descriptor ring will be cleaned after txq->tx_free_thresh
2647 * descriptors are used or if the number of descriptors required to
2648 * transmit a packet is greater than the number of free TX descriptors.
2650 * The following constraints must be satisfied:
2651 * - tx_rs_thresh must be greater than 0.
2652 * - tx_rs_thresh must be less than the size of the ring minus 2.
2653 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
2654 * - tx_rs_thresh must be a divisor of the ring size.
2655 * - tx_free_thresh must be greater than 0.
2656 * - tx_free_thresh must be less than the size of the ring minus 3.
2658 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
2659 * race condition, hence the maximum threshold constraints. When set
2660 * to zero use default values.
2662 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2663 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2664 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2665 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2666 if (tx_rs_thresh >= (nb_desc - 2)) {
2667 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2668 "number of TX descriptors minus 2. "
2669 "(tx_rs_thresh=%u port=%d queue=%d)",
2670 (unsigned int)tx_rs_thresh,
2671 (int)dev->data->port_id,
2673 return I40E_ERR_PARAM;
2675 if (tx_free_thresh >= (nb_desc - 3)) {
2676 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2677 "tx_free_thresh must be less than the "
2678 "number of TX descriptors minus 3. "
2679 "(tx_free_thresh=%u port=%d queue=%d)",
2680 (unsigned int)tx_free_thresh,
2681 (int)dev->data->port_id,
2683 return I40E_ERR_PARAM;
2685 if (tx_rs_thresh > tx_free_thresh) {
2686 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
2687 "equal to tx_free_thresh. (tx_free_thresh=%u"
2688 " tx_rs_thresh=%u port=%d queue=%d)",
2689 (unsigned int)tx_free_thresh,
2690 (unsigned int)tx_rs_thresh,
2691 (int)dev->data->port_id,
2693 return I40E_ERR_PARAM;
2695 if ((nb_desc % tx_rs_thresh) != 0) {
2696 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2697 "number of TX descriptors. (tx_rs_thresh=%u"
2698 " port=%d queue=%d)",
2699 (unsigned int)tx_rs_thresh,
2700 (int)dev->data->port_id,
2702 return I40E_ERR_PARAM;
2704 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2705 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2706 "tx_rs_thresh is greater than 1. "
2707 "(tx_rs_thresh=%u port=%d queue=%d)",
2708 (unsigned int)tx_rs_thresh,
2709 (int)dev->data->port_id,
2711 return I40E_ERR_PARAM;
2714 /* Free memory if needed. */
2715 if (dev->data->tx_queues[queue_idx]) {
2716 i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
2717 dev->data->tx_queues[queue_idx] = NULL;
2720 /* Allocate the TX queue data structure. */
2721 txq = rte_zmalloc_socket("i40e tx queue",
2722 sizeof(struct i40e_tx_queue),
2723 RTE_CACHE_LINE_SIZE,
2726 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2727 "tx queue structure");
2731 /* Allocate TX hardware ring descriptors. */
2732 ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
2733 ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
2734 tz = i40e_ring_dma_zone_reserve(dev,
2740 i40e_dev_tx_queue_release(txq);
2741 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
2745 txq->nb_tx_desc = nb_desc;
2746 txq->tx_rs_thresh = tx_rs_thresh;
2747 txq->tx_free_thresh = tx_free_thresh;
2748 txq->pthresh = tx_conf->tx_thresh.pthresh;
2749 txq->hthresh = tx_conf->tx_thresh.hthresh;
2750 txq->wthresh = tx_conf->tx_thresh.wthresh;
2751 txq->queue_id = queue_idx;
2752 if (hw->mac.type == I40E_MAC_VF)
2753 txq->reg_idx = queue_idx;
2754 else /* PF device */
2755 txq->reg_idx = vsi->base_queue +
2756 i40e_get_queue_offset_by_qindex(pf, queue_idx);
2758 txq->port_id = dev->data->port_id;
2759 txq->txq_flags = tx_conf->txq_flags;
2761 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2763 #ifdef RTE_LIBRTE_XEN_DOM0
2764 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2766 txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
2768 txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
2770 /* Allocate software ring */
2772 rte_zmalloc_socket("i40e tx sw ring",
2773 sizeof(struct i40e_tx_entry) * nb_desc,
2774 RTE_CACHE_LINE_SIZE,
2776 if (!txq->sw_ring) {
2777 i40e_dev_tx_queue_release(txq);
2778 PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
2782 i40e_reset_tx_queue(txq);
2784 dev->data->tx_queues[queue_idx] = txq;
2786 /* Use a simple TX queue without offloads or multi segs if possible */
2787 if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
2788 (txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) {
2789 PMD_INIT_LOG(INFO, "Using simple tx path");
2790 dev->tx_pkt_burst = i40e_xmit_pkts_simple;
2792 PMD_INIT_LOG(INFO, "Using full-featured tx path");
2793 dev->tx_pkt_burst = i40e_xmit_pkts;
2800 i40e_dev_tx_queue_release(void *txq)
2802 struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
2805 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
2809 i40e_tx_queue_release_mbufs(q);
2810 rte_free(q->sw_ring);
2814 static const struct rte_memzone *
2815 i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
2816 const char *ring_name,
2821 char z_name[RTE_MEMZONE_NAMESIZE];
2822 const struct rte_memzone *mz;
2824 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2825 dev->driver->pci_drv.name, ring_name,
2826 dev->data->port_id, queue_id);
2827 mz = rte_memzone_lookup(z_name);
2831 #ifdef RTE_LIBRTE_XEN_DOM0
2832 return rte_memzone_reserve_bounded(z_name, ring_size,
2833 socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
2835 return rte_memzone_reserve_aligned(z_name, ring_size,
2836 socket_id, 0, I40E_ALIGN);
2840 const struct rte_memzone *
2841 i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
2843 const struct rte_memzone *mz = NULL;
2845 mz = rte_memzone_lookup(name);
2848 #ifdef RTE_LIBRTE_XEN_DOM0
2849 mz = rte_memzone_reserve_bounded(name, len,
2850 socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
2852 mz = rte_memzone_reserve_aligned(name, len,
2853 socket_id, 0, I40E_ALIGN);
2859 i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
2863 if (!rxq || !rxq->sw_ring) {
2864 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
2868 for (i = 0; i < rxq->nb_rx_desc; i++) {
2869 if (rxq->sw_ring[i].mbuf) {
2870 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2871 rxq->sw_ring[i].mbuf = NULL;
2874 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2875 if (rxq->rx_nb_avail == 0)
2877 for (i = 0; i < rxq->rx_nb_avail; i++) {
2878 struct rte_mbuf *mbuf;
2880 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
2881 rte_pktmbuf_free_seg(mbuf);
2883 rxq->rx_nb_avail = 0;
2884 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2888 i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
2894 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
2898 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2899 if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
2900 len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_I40E_RX_MAX_BURST);
2902 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2903 len = rxq->nb_rx_desc;
2905 for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
2906 ((volatile char *)rxq->rx_ring)[i] = 0;
2908 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2909 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2910 for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
2911 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
2913 rxq->rx_nb_avail = 0;
2914 rxq->rx_next_avail = 0;
2915 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2916 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2918 rxq->nb_rx_hold = 0;
2919 rxq->pkt_first_seg = NULL;
2920 rxq->pkt_last_seg = NULL;
2924 i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
2928 if (!txq || !txq->sw_ring) {
2929 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
2933 for (i = 0; i < txq->nb_tx_desc; i++) {
2934 if (txq->sw_ring[i].mbuf) {
2935 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2936 txq->sw_ring[i].mbuf = NULL;
2942 i40e_reset_tx_queue(struct i40e_tx_queue *txq)
2944 struct i40e_tx_entry *txe;
2945 uint16_t i, prev, size;
2948 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
2953 size = sizeof(struct i40e_tx_desc) * txq->nb_tx_desc;
2954 for (i = 0; i < size; i++)
2955 ((volatile char *)txq->tx_ring)[i] = 0;
2957 prev = (uint16_t)(txq->nb_tx_desc - 1);
2958 for (i = 0; i < txq->nb_tx_desc; i++) {
2959 volatile struct i40e_tx_desc *txd = &txq->tx_ring[i];
2961 txd->cmd_type_offset_bsz =
2962 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
2965 txe[prev].next_id = i;
2969 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2970 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2973 txq->nb_tx_used = 0;
2975 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2976 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2979 /* Init the TX queue in hardware */
2981 i40e_tx_queue_init(struct i40e_tx_queue *txq)
2983 enum i40e_status_code err = I40E_SUCCESS;
2984 struct i40e_vsi *vsi = txq->vsi;
2985 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2986 uint16_t pf_q = txq->reg_idx;
2987 struct i40e_hmc_obj_txq tx_ctx;
2990 /* clear the context structure first */
2991 memset(&tx_ctx, 0, sizeof(tx_ctx));
2992 tx_ctx.new_context = 1;
2993 tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
2994 tx_ctx.qlen = txq->nb_tx_desc;
2996 #ifdef RTE_LIBRTE_IEEE1588
2997 tx_ctx.timesync_ena = 1;
2999 tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[0]);
3000 if (vsi->type == I40E_VSI_FDIR)
3001 tx_ctx.fd_ena = TRUE;
3003 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3004 if (err != I40E_SUCCESS) {
3005 PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context");
3009 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3010 if (err != I40E_SUCCESS) {
3011 PMD_DRV_LOG(ERR, "Failure of set lan tx queue context");
3015 /* Now associate this queue with this PCI function */
3016 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3017 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3018 I40E_QTX_CTL_PF_INDX_MASK);
3019 I40E_WRITE_REG(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3020 I40E_WRITE_FLUSH(hw);
3022 txq->qtx_tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3028 i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
3030 struct i40e_rx_entry *rxe = rxq->sw_ring;
3034 for (i = 0; i < rxq->nb_rx_desc; i++) {
3035 volatile union i40e_rx_desc *rxd;
3036 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp);
3038 if (unlikely(!mbuf)) {
3039 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
3043 rte_mbuf_refcnt_set(mbuf, 1);
3045 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3047 mbuf->port = rxq->port_id;
3050 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
3052 rxd = &rxq->rx_ring[i];
3053 rxd->read.pkt_addr = dma_addr;
3054 rxd->read.hdr_addr = 0;
3055 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
3056 rxd->read.rsvd1 = 0;
3057 rxd->read.rsvd2 = 0;
3058 #endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
3067 * Calculate the buffer length, and check the jumbo frame
3068 * and maximum packet length.
3071 i40e_rx_queue_config(struct i40e_rx_queue *rxq)
3073 struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
3074 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
3075 struct rte_eth_dev_data *data = pf->dev_data;
3076 uint16_t buf_size, len;
3078 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
3079 RTE_PKTMBUF_HEADROOM);
3081 switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED |
3082 I40E_FLAG_HEADER_SPLIT_ENABLED)) {
3083 case I40E_FLAG_HEADER_SPLIT_ENABLED: /* Not supported */
3084 rxq->rx_hdr_len = RTE_ALIGN(I40E_RXBUF_SZ_1024,
3085 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
3086 rxq->rx_buf_len = RTE_ALIGN(I40E_RXBUF_SZ_2048,
3087 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
3088 rxq->hs_mode = i40e_header_split_enabled;
3090 case I40E_FLAG_HEADER_SPLIT_DISABLED:
3092 rxq->rx_hdr_len = 0;
3093 rxq->rx_buf_len = RTE_ALIGN(buf_size,
3094 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
3095 rxq->hs_mode = i40e_header_split_none;
3099 len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
3100 rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
3101 if (data->dev_conf.rxmode.jumbo_frame == 1) {
3102 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
3103 rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
3104 PMD_DRV_LOG(ERR, "maximum packet length must "
3105 "be larger than %u and smaller than %u,"
3106 "as jumbo frame is enabled",
3107 (uint32_t)ETHER_MAX_LEN,
3108 (uint32_t)I40E_FRAME_SIZE_MAX);
3109 return I40E_ERR_CONFIG;
3112 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
3113 rxq->max_pkt_len > ETHER_MAX_LEN) {
3114 PMD_DRV_LOG(ERR, "maximum packet length must be "
3115 "larger than %u and smaller than %u, "
3116 "as jumbo frame is disabled",
3117 (uint32_t)ETHER_MIN_LEN,
3118 (uint32_t)ETHER_MAX_LEN);
3119 return I40E_ERR_CONFIG;
3126 /* Init the RX queue in hardware */
3128 i40e_rx_queue_init(struct i40e_rx_queue *rxq)
3130 int err = I40E_SUCCESS;
3131 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
3132 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
3133 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
3134 uint16_t pf_q = rxq->reg_idx;
3136 struct i40e_hmc_obj_rxq rx_ctx;
3138 err = i40e_rx_queue_config(rxq);
3140 PMD_DRV_LOG(ERR, "Failed to config RX queue");
3144 /* Clear the context structure first */
3145 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
3146 rx_ctx.dbuff = rxq->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
3147 rx_ctx.hbuff = rxq->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
3149 rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
3150 rx_ctx.qlen = rxq->nb_rx_desc;
3151 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
3154 rx_ctx.dtype = rxq->hs_mode;
3156 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
3158 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
3159 rx_ctx.rxmax = rxq->max_pkt_len;
3160 rx_ctx.tphrdesc_ena = 1;
3161 rx_ctx.tphwdesc_ena = 1;
3162 rx_ctx.tphdata_ena = 1;
3163 rx_ctx.tphhead_ena = 1;
3164 rx_ctx.lrxqthresh = 2;
3165 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
3170 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3171 if (err != I40E_SUCCESS) {
3172 PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context");
3175 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3176 if (err != I40E_SUCCESS) {
3177 PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context");
3181 rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3183 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
3184 RTE_PKTMBUF_HEADROOM);
3186 /* Check if scattered RX needs to be used. */
3187 if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
3188 dev_data->scattered_rx = 1;
3189 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
3192 /* Init the RX tail regieter. */
3193 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
3199 i40e_dev_clear_queues(struct rte_eth_dev *dev)
3203 PMD_INIT_FUNC_TRACE();
3205 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3206 i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
3207 i40e_reset_tx_queue(dev->data->tx_queues[i]);
3210 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3211 i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
3212 i40e_reset_rx_queue(dev->data->rx_queues[i]);
3217 i40e_dev_free_queues(struct rte_eth_dev *dev)
3221 PMD_INIT_FUNC_TRACE();
3223 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3224 i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
3225 dev->data->rx_queues[i] = NULL;
3227 dev->data->nb_rx_queues = 0;
3229 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3230 i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
3231 dev->data->tx_queues[i] = NULL;
3233 dev->data->nb_tx_queues = 0;
3236 #define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC
3237 #define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC
3239 enum i40e_status_code
3240 i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
3242 struct i40e_tx_queue *txq;
3243 const struct rte_memzone *tz = NULL;
3245 struct rte_eth_dev *dev = pf->adapter->eth_dev;
3248 PMD_DRV_LOG(ERR, "PF is not available");
3249 return I40E_ERR_BAD_PTR;
3252 /* Allocate the TX queue data structure. */
3253 txq = rte_zmalloc_socket("i40e fdir tx queue",
3254 sizeof(struct i40e_tx_queue),
3255 RTE_CACHE_LINE_SIZE,
3258 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3259 "tx queue structure.");
3260 return I40E_ERR_NO_MEMORY;
3263 /* Allocate TX hardware ring descriptors. */
3264 ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
3265 ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
3267 tz = i40e_ring_dma_zone_reserve(dev,
3273 i40e_dev_tx_queue_release(txq);
3274 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
3275 return I40E_ERR_NO_MEMORY;
3278 txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
3279 txq->queue_id = I40E_FDIR_QUEUE_ID;
3280 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
3281 txq->vsi = pf->fdir.fdir_vsi;
3283 #ifdef RTE_LIBRTE_XEN_DOM0
3284 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
3286 txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
3288 txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
3290 * don't need to allocate software ring and reset for the fdir
3291 * program queue just set the queue has been configured.
3296 return I40E_SUCCESS;
3299 enum i40e_status_code
3300 i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
3302 struct i40e_rx_queue *rxq;
3303 const struct rte_memzone *rz = NULL;
3305 struct rte_eth_dev *dev = pf->adapter->eth_dev;
3308 PMD_DRV_LOG(ERR, "PF is not available");
3309 return I40E_ERR_BAD_PTR;
3312 /* Allocate the RX queue data structure. */
3313 rxq = rte_zmalloc_socket("i40e fdir rx queue",
3314 sizeof(struct i40e_rx_queue),
3315 RTE_CACHE_LINE_SIZE,
3318 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3319 "rx queue structure.");
3320 return I40E_ERR_NO_MEMORY;
3323 /* Allocate RX hardware ring descriptors. */
3324 ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
3325 ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
3327 rz = i40e_ring_dma_zone_reserve(dev,
3333 i40e_dev_rx_queue_release(rxq);
3334 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
3335 return I40E_ERR_NO_MEMORY;
3338 rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC;
3339 rxq->queue_id = I40E_FDIR_QUEUE_ID;
3340 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
3341 rxq->vsi = pf->fdir.fdir_vsi;
3343 #ifdef RTE_LIBRTE_XEN_DOM0
3344 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
3346 rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
3348 rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
3351 * Don't need to allocate software ring and reset for the fdir
3352 * rx queue, just set the queue has been configured.
3357 return I40E_SUCCESS;