1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
6 #include <ethdev_driver.h>
7 #include <rte_malloc.h>
9 #include "base/i40e_prototype.h"
10 #include "base/i40e_type.h"
11 #include "i40e_ethdev.h"
12 #include "i40e_rxtx.h"
13 #include "i40e_rxtx_vec_common.h"
17 #ifndef __INTEL_COMPILER
18 #pragma GCC diagnostic ignored "-Wcast-qual"
21 #define RTE_I40E_DESCS_PER_LOOP_AVX 8
23 static __rte_always_inline void
24 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
28 volatile union i40e_rx_desc *rxdp;
29 struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
30 struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp,
33 rxdp = rxq->rx_ring + rxq->rxrearm_start;
36 return i40e_rxq_rearm_common(rxq, true);
38 /* We need to pull 'n' more MBUFs into the software ring from mempool
39 * We inline the mempool function here, so we can vectorize the copy
40 * from the cache into the shadow ring.
43 if (cache->len < RTE_I40E_RXQ_REARM_THRESH) {
44 /* No. Backfill the cache first, and then fill from it */
45 uint32_t req = RTE_I40E_RXQ_REARM_THRESH + (cache->size -
48 /* How many do we require
49 * i.e. number to fill the cache + the request
51 int ret = rte_mempool_ops_dequeue_bulk(rxq->mp,
52 &cache->objs[cache->len], req);
56 if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
60 dma_addr0 = _mm_setzero_si128();
61 for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
62 rxep[i].mbuf = &rxq->fake_mbuf;
64 ((__m128i *)&rxdp[i].read,
68 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
69 RTE_I40E_RXQ_REARM_THRESH;
74 const __m512i iova_offsets = _mm512_set1_epi64
75 (offsetof(struct rte_mbuf, buf_iova));
76 const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
78 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
79 /* to shuffle the addresses to correct slots. Values 4-7 will contain
80 * zeros, so use 7 for a zero-value.
82 const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
84 const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
87 /* Initialize the mbufs in vector, process 8 mbufs in one loop, taking
88 * from mempool cache and populating both shadow and HW rings
90 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH / 8; i++) {
91 const __m512i mbuf_ptrs = _mm512_loadu_si512
92 (&cache->objs[cache->len - 8]);
93 _mm512_store_si512(rxep, mbuf_ptrs);
95 /* gather iova of mbuf0-7 into one zmm reg */
96 const __m512i iova_base_addrs = _mm512_i64gather_epi64
97 (_mm512_add_epi64(mbuf_ptrs, iova_offsets),
100 const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
102 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
103 const __m512i iovas0 = _mm512_castsi256_si512
104 (_mm512_extracti64x4_epi64(iova_addrs, 0));
105 const __m512i iovas1 = _mm512_castsi256_si512
106 (_mm512_extracti64x4_epi64(iova_addrs, 1));
108 /* permute leaves desc 2-3 addresses in header address slots 0-1
109 * but these are ignored by driver since header split not
110 * enabled. Similarly for desc 4 & 5.
112 const __m512i desc_rd_0_1 = _mm512_permutexvar_epi64
113 (permute_idx, iovas0);
114 const __m512i desc_rd_2_3 = _mm512_bsrli_epi128(desc_rd_0_1, 8);
116 const __m512i desc_rd_4_5 = _mm512_permutexvar_epi64
117 (permute_idx, iovas1);
118 const __m512i desc_rd_6_7 = _mm512_bsrli_epi128(desc_rd_4_5, 8);
120 _mm512_store_si512((void *)rxdp, desc_rd_0_1);
121 _mm512_store_si512((void *)(rxdp + 2), desc_rd_2_3);
122 _mm512_store_si512((void *)(rxdp + 4), desc_rd_4_5);
123 _mm512_store_si512((void *)(rxdp + 6), desc_rd_6_7);
125 /* permute leaves desc 4-7 addresses in header address slots 0-3
126 * but these are ignored by driver since header split not
129 const __m512i desc_rd_0_3 = _mm512_permutexvar_epi64
130 (permute_idx, iova_addrs);
131 const __m512i desc_rd_4_7 = _mm512_bsrli_epi128(desc_rd_0_3, 8);
133 _mm512_store_si512((void *)rxdp, desc_rd_0_3);
134 _mm512_store_si512((void *)(rxdp + 4), desc_rd_4_7);
136 rxep += 8, rxdp += 8, cache->len -= 8;
139 rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
140 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
141 rxq->rxrearm_start = 0;
143 rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
145 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
146 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
148 /* Update the tail pointer on the NIC */
149 I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
152 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
153 /* Handles 32B descriptor FDIR ID processing:
154 * rxdp: receive descriptor ring, required to load 2nd 16B half of each desc
155 * rx_pkts: required to store metadata back to mbufs
156 * pkt_idx: offset into the burst, increments in vector widths
157 * desc_idx: required to select the correct shift at compile time
159 static inline __m256i
160 desc_fdir_processing_32b(volatile union i40e_rx_desc *rxdp,
161 struct rte_mbuf **rx_pkts,
162 const uint32_t pkt_idx,
163 const uint32_t desc_idx)
165 /* 32B desc path: load rxdp.wb.qword2 for EXT_STATUS and FLEXBH_STAT */
166 __m128i *rxdp_desc_0 = (void *)(&rxdp[desc_idx + 0].wb.qword2);
167 __m128i *rxdp_desc_1 = (void *)(&rxdp[desc_idx + 1].wb.qword2);
168 const __m128i desc_qw2_0 = _mm_load_si128(rxdp_desc_0);
169 const __m128i desc_qw2_1 = _mm_load_si128(rxdp_desc_1);
171 /* Mask for FLEXBH_STAT, and the FDIR_ID value to compare against. The
172 * remaining data is set to all 1's to pass through data.
174 const __m256i flexbh_mask = _mm256_set_epi32(-1, -1, -1, 3 << 4,
176 const __m256i flexbh_id = _mm256_set_epi32(-1, -1, -1, 1 << 4,
179 /* Load descriptor, check for FLEXBH bits, generate a mask for both
180 * packets in the register.
182 __m256i desc_qw2_0_1 =
183 _mm256_inserti128_si256(_mm256_castsi128_si256(desc_qw2_0),
185 __m256i desc_tmp_msk = _mm256_and_si256(flexbh_mask, desc_qw2_0_1);
186 __m256i fdir_mask = _mm256_cmpeq_epi32(flexbh_id, desc_tmp_msk);
187 __m256i fdir_data = _mm256_alignr_epi8(desc_qw2_0_1, desc_qw2_0_1, 12);
188 __m256i desc_fdir_data = _mm256_and_si256(fdir_mask, fdir_data);
190 /* Write data out to the mbuf. There is no store to this area of the
191 * mbuf today, so we cannot combine it with another store.
193 const uint32_t idx_0 = pkt_idx + desc_idx;
194 const uint32_t idx_1 = pkt_idx + desc_idx + 1;
196 rx_pkts[idx_0]->hash.fdir.hi = _mm256_extract_epi32(desc_fdir_data, 0);
197 rx_pkts[idx_1]->hash.fdir.hi = _mm256_extract_epi32(desc_fdir_data, 4);
199 /* Create mbuf flags as required for mbuf_flags layout
200 * (That's high lane [1,3,5,7, 0,2,4,6] as u32 lanes).
202 * - Mask away bits not required from the fdir_mask
203 * - Leave the PKT_FDIR_ID bit (1 << 13)
204 * - Position that bit correctly based on packet number
205 * - OR in the resulting bit to mbuf_flags
207 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
208 __m256i mbuf_flag_mask = _mm256_set_epi32(0, 0, 0, 1 << 13,
210 __m256i desc_flag_bit = _mm256_and_si256(mbuf_flag_mask, fdir_mask);
212 /* For static-inline function, this will be stripped out
213 * as the desc_idx is a hard-coded constant.
217 return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 4);
219 return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 8);
221 return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 12);
223 return desc_flag_bit;
228 /* NOT REACHED, see above switch returns */
229 return _mm256_setzero_si256();
231 #endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
233 #define PKTLEN_SHIFT 10
235 /* Force inline as some compilers will not inline by default. */
236 static __rte_always_inline uint16_t
237 _recv_raw_pkts_vec_avx512(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
238 uint16_t nb_pkts, uint8_t *split_packet)
240 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
241 const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
242 0, rxq->mbuf_initializer);
243 struct i40e_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
244 volatile union i40e_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
248 /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP_AVX */
249 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP_AVX);
251 /* See if we need to rearm the RX queue - gives the prefetch a bit
254 if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
257 /* Before we start moving massive data around, check to see if
258 * there is actually a packet available
260 if (!(rxdp->wb.qword1.status_error_len &
261 rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
264 /* constants used in processing loop */
265 const __m512i crc_adjust =
267 (0, /* ignore non-length fields */
268 -rxq->crc_len, /* sub crc on data_len */
269 -rxq->crc_len, /* sub crc on pkt_len */
270 0 /* ignore non-length fields */
273 /* 8 packets DD mask, LSB in each 32-bit value */
274 const __m256i dd_check = _mm256_set1_epi32(1);
276 /* 8 packets EOP mask, second-LSB in each 32-bit value */
277 const __m256i eop_check = _mm256_slli_epi32(dd_check,
278 I40E_RX_DESC_STATUS_EOF_SHIFT);
280 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
281 const __m512i shuf_msk =
283 (/* rss hash parsed separately */
284 /* octet 4~7, 32bits rss */
285 7 << 24 | 6 << 16 | 5 << 8 | 4,
286 /* octet 2~3, low 16 bits vlan_macip */
287 /* octet 14~15, 16 bits data_len */
288 3 << 24 | 2 << 16 | 15 << 8 | 14,
289 /* skip hi 16 bits pkt_len, zero out */
290 /* octet 14~15, 16 bits pkt_len */
291 0xFFFF << 16 | 15 << 8 | 14,
292 /* pkt_type set as unknown */
295 /* compile-time check the above crc and shuffle layout is correct.
296 * NOTE: the first field (lowest address) is given last in set_epi
299 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
300 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
301 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
302 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
303 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
304 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
305 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
306 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
308 /* Status/Error flag masks */
309 /* mask everything except RSS, flow director and VLAN flags
310 * bit2 is for VLAN tag, bit11 for flow director indication
311 * bit13:12 for RSS indication. Bits 3-5 of error
312 * field (bits 22-24) are for IP/L4 checksum errors
314 const __m256i flags_mask = _mm256_set1_epi32
315 ((1 << 2) | (1 << 11) | (3 << 12) | (7 << 22));
317 /* data to be shuffled by result of flag mask. If VLAN bit is set,
318 * (bit 2), then position 4 in this array will be used in the
321 const __m256i vlan_flags_shuf = _mm256_set_epi32
322 (0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
323 0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0);
325 /* data to be shuffled by result of flag mask, shifted down 11.
326 * If RSS/FDIR bits are set, shuffle moves appropriate flags in
329 const __m256i rss_flags_shuf = _mm256_set_epi8
330 (0, 0, 0, 0, 0, 0, 0, 0,
331 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
332 RTE_MBUF_F_RX_RSS_HASH,
334 0, 0, RTE_MBUF_F_RX_FDIR, 0, /* end up 128-bits */
335 0, 0, 0, 0, 0, 0, 0, 0,
336 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
337 RTE_MBUF_F_RX_RSS_HASH,
339 0, 0, RTE_MBUF_F_RX_FDIR, 0);
341 /* data to be shuffled by the result of the flags mask shifted by 22
342 * bits. This gives use the l3_l4 flags.
344 const __m256i l3_l4_flags_shuf = _mm256_set_epi8
345 (0, 0, 0, 0, 0, 0, 0, 0,
346 /* shift right 1 bit to make sure it not exceed 255 */
347 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
348 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
349 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
350 RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
351 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
352 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
353 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
354 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
355 RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
356 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
357 /* second 128-bits */
358 0, 0, 0, 0, 0, 0, 0, 0,
359 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
360 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
361 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
362 RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
363 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
364 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
365 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
366 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
367 RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
368 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
370 const __m256i cksum_mask = _mm256_set1_epi32
371 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
372 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
373 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
375 uint16_t i, received;
377 for (i = 0, received = 0; i < nb_pkts;
378 i += RTE_I40E_DESCS_PER_LOOP_AVX,
379 rxdp += RTE_I40E_DESCS_PER_LOOP_AVX) {
380 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
381 _mm256_storeu_si256((void *)&rx_pkts[i],
382 _mm256_loadu_si256((void *)&sw_ring[i]));
383 #ifdef RTE_ARCH_X86_64
384 _mm256_storeu_si256((void *)&rx_pkts[i + 4],
385 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
388 __m512i raw_desc0_3, raw_desc4_7;
389 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
391 /* load in descriptors, in reverse order */
392 const __m128i raw_desc7 =
393 _mm_load_si128((void *)(rxdp + 7));
394 rte_compiler_barrier();
395 const __m128i raw_desc6 =
396 _mm_load_si128((void *)(rxdp + 6));
397 rte_compiler_barrier();
398 const __m128i raw_desc5 =
399 _mm_load_si128((void *)(rxdp + 5));
400 rte_compiler_barrier();
401 const __m128i raw_desc4 =
402 _mm_load_si128((void *)(rxdp + 4));
403 rte_compiler_barrier();
404 const __m128i raw_desc3 =
405 _mm_load_si128((void *)(rxdp + 3));
406 rte_compiler_barrier();
407 const __m128i raw_desc2 =
408 _mm_load_si128((void *)(rxdp + 2));
409 rte_compiler_barrier();
410 const __m128i raw_desc1 =
411 _mm_load_si128((void *)(rxdp + 1));
412 rte_compiler_barrier();
413 const __m128i raw_desc0 =
414 _mm_load_si128((void *)(rxdp + 0));
417 _mm256_inserti128_si256
418 (_mm256_castsi128_si256(raw_desc6),
421 _mm256_inserti128_si256
422 (_mm256_castsi128_si256(raw_desc4),
425 _mm256_inserti128_si256
426 (_mm256_castsi128_si256(raw_desc2),
429 _mm256_inserti128_si256
430 (_mm256_castsi128_si256(raw_desc0),
435 (_mm512_castsi256_si512(raw_desc4_5),
439 (_mm512_castsi256_si512(raw_desc0_1),
445 for (j = 0; j < RTE_I40E_DESCS_PER_LOOP_AVX; j++)
446 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
449 /* convert descriptors 0-7 into mbufs, adjusting length and
450 * re-arranging fields. Then write into the mbuf
452 const __m512i len4_7 = _mm512_slli_epi32
453 (raw_desc4_7, PKTLEN_SHIFT);
454 const __m512i len0_3 = _mm512_slli_epi32
455 (raw_desc0_3, PKTLEN_SHIFT);
456 const __m512i desc4_7 = _mm512_mask_blend_epi16
457 (0x80808080, raw_desc4_7, len4_7);
458 const __m512i desc0_3 = _mm512_mask_blend_epi16
459 (0x80808080, raw_desc0_3, len0_3);
460 __m512i mb4_7 = _mm512_shuffle_epi8(desc4_7, shuf_msk);
461 __m512i mb0_3 = _mm512_shuffle_epi8(desc0_3, shuf_msk);
463 mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust);
464 mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust);
466 /* to get packet types, shift 64-bit values down 30 bits
467 * and so ptype is in lower 8-bits in each
469 const __m512i ptypes4_7 = _mm512_srli_epi64(desc4_7, 30);
470 const __m512i ptypes0_3 = _mm512_srli_epi64(desc0_3, 30);
471 const __m256i ptypes6_7 =
472 _mm512_extracti64x4_epi64(ptypes4_7, 1);
473 const __m256i ptypes4_5 =
474 _mm512_extracti64x4_epi64(ptypes4_7, 0);
475 const __m256i ptypes2_3 =
476 _mm512_extracti64x4_epi64(ptypes0_3, 1);
477 const __m256i ptypes0_1 =
478 _mm512_extracti64x4_epi64(ptypes0_3, 0);
479 const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24);
480 const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8);
481 const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24);
482 const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8);
483 const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24);
484 const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8);
485 const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24);
486 const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8);
488 const __m512i ptype4_7 = _mm512_set_epi32
489 (0, 0, 0, ptype_tbl[ptype7],
490 0, 0, 0, ptype_tbl[ptype6],
491 0, 0, 0, ptype_tbl[ptype5],
492 0, 0, 0, ptype_tbl[ptype4]);
493 const __m512i ptype0_3 = _mm512_set_epi32
494 (0, 0, 0, ptype_tbl[ptype3],
495 0, 0, 0, ptype_tbl[ptype2],
496 0, 0, 0, ptype_tbl[ptype1],
497 0, 0, 0, ptype_tbl[ptype0]);
499 mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7);
500 mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3);
502 __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0);
503 __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1);
504 __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0);
505 __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1);
508 * use permute/extract to get status content
509 * After the operations, the packets status flags are in the
510 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
512 /* merge the status bits into one register */
513 const __m512i status_permute_msk = _mm512_set_epi32
518 const __m512i raw_status0_7 = _mm512_permutex2var_epi32
519 (desc4_7, status_permute_msk, desc0_3);
520 __m256i status0_7 = _mm512_extracti64x4_epi64
523 /* now do flag manipulation */
525 /* get only flag/error bits we want */
526 const __m256i flag_bits =
527 _mm256_and_si256(status0_7, flags_mask);
528 /* set vlan and rss flags */
529 const __m256i vlan_flags =
530 _mm256_shuffle_epi8(vlan_flags_shuf, flag_bits);
531 const __m256i rss_fdir_bits = _mm256_srli_epi32(flag_bits, 11);
532 const __m256i rss_flags = _mm256_shuffle_epi8(rss_flags_shuf,
535 /* l3_l4_error flags, shuffle, then shift to correct adjustment
536 * of flags in flags_shuf, and finally mask out extra bits
538 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
539 _mm256_srli_epi32(flag_bits, 22));
540 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
541 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
544 __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
545 _mm256_or_si256(rss_flags, vlan_flags));
547 /* If the rxq has FDIR enabled, read and process the FDIR info
548 * from the descriptor. This can cause more loads/stores, so is
549 * not always performed. Branch over the code when not enabled.
551 if (rxq->fdir_enabled) {
552 #ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
553 /* 16B descriptor code path:
554 * RSS and FDIR ID use the same offset in the desc, so
555 * only one can be present at a time. The code below
556 * identifies an FDIR ID match, and zeros the RSS value
557 * in the mbuf on FDIR match to keep mbuf data clean.
559 #define FDIR_BLEND_MASK ((1 << 3) | (1 << 7))
562 * - Take flags, shift bits to null out
563 * - CMPEQ with known FDIR ID, to get 0xFFFF or 0 mask
564 * - Strip bits from mask, leaving 0 or 1 for FDIR ID
565 * - Merge with mbuf_flags
567 /* FLM = 1, FLTSTAT = 0b01, (FLM | FLTSTAT) == 3.
568 * Shift left by 28 to avoid having to mask.
571 _mm256_slli_epi32(rss_fdir_bits, 28);
572 const __m256i fdir_id = _mm256_set1_epi32(3 << 28);
574 /* As above, the fdir_mask to packet mapping is this:
575 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
576 * Then OR FDIR flags to mbuf_flags on FDIR ID hit.
578 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
579 const __m256i pkt_fdir_bit = _mm256_set1_epi32(1 << 13);
580 const __m256i fdir_mask =
581 _mm256_cmpeq_epi32(fdir, fdir_id);
583 _mm256_and_si256(fdir_mask, pkt_fdir_bit);
585 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_bits);
587 /* Based on FDIR_MASK, clear the RSS or FDIR value.
588 * The FDIR ID value is masked to zero if not a hit,
589 * otherwise the mb0_1 register RSS field is zeroed.
591 const __m256i fdir_zero_mask = _mm256_setzero_si256();
592 __m256i tmp0_1 = _mm256_blend_epi32(fdir_zero_mask,
593 fdir_mask, FDIR_BLEND_MASK);
594 __m256i fdir_mb0_1 = _mm256_and_si256(mb0_1, fdir_mask);
596 mb0_1 = _mm256_andnot_si256(tmp0_1, mb0_1);
598 /* Write to mbuf: no stores to combine with, so just a
599 * scalar store to push data here.
601 rx_pkts[i + 0]->hash.fdir.hi =
602 _mm256_extract_epi32(fdir_mb0_1, 3);
603 rx_pkts[i + 1]->hash.fdir.hi =
604 _mm256_extract_epi32(fdir_mb0_1, 7);
606 /* Same as above, only shift the fdir_mask to align
607 * the packet FDIR mask with the FDIR_ID desc lane.
610 _mm256_alignr_epi8(fdir_mask, fdir_mask, 12);
611 __m256i fdir_mb2_3 = _mm256_and_si256(mb2_3, tmp2_3);
613 tmp2_3 = _mm256_blend_epi32(fdir_zero_mask, tmp2_3,
615 mb2_3 = _mm256_andnot_si256(tmp2_3, mb2_3);
616 rx_pkts[i + 2]->hash.fdir.hi =
617 _mm256_extract_epi32(fdir_mb2_3, 3);
618 rx_pkts[i + 3]->hash.fdir.hi =
619 _mm256_extract_epi32(fdir_mb2_3, 7);
622 _mm256_alignr_epi8(fdir_mask, fdir_mask, 8);
623 __m256i fdir_mb4_5 = _mm256_and_si256(mb4_5, tmp4_5);
625 tmp4_5 = _mm256_blend_epi32(fdir_zero_mask, tmp4_5,
627 mb4_5 = _mm256_andnot_si256(tmp4_5, mb4_5);
628 rx_pkts[i + 4]->hash.fdir.hi =
629 _mm256_extract_epi32(fdir_mb4_5, 3);
630 rx_pkts[i + 5]->hash.fdir.hi =
631 _mm256_extract_epi32(fdir_mb4_5, 7);
634 _mm256_alignr_epi8(fdir_mask, fdir_mask, 4);
635 __m256i fdir_mb6_7 = _mm256_and_si256(mb6_7, tmp6_7);
637 tmp6_7 = _mm256_blend_epi32(fdir_zero_mask, tmp6_7,
639 mb6_7 = _mm256_andnot_si256(tmp6_7, mb6_7);
640 rx_pkts[i + 6]->hash.fdir.hi =
641 _mm256_extract_epi32(fdir_mb6_7, 3);
642 rx_pkts[i + 7]->hash.fdir.hi =
643 _mm256_extract_epi32(fdir_mb6_7, 7);
645 /* End of 16B descriptor handling */
647 /* 32B descriptor FDIR ID mark handling. Returns bits
648 * to be OR-ed into the mbuf olflags.
650 __m256i fdir_add_flags;
653 desc_fdir_processing_32b(rxdp, rx_pkts, i, 0);
655 _mm256_or_si256(mbuf_flags, fdir_add_flags);
658 desc_fdir_processing_32b(rxdp, rx_pkts, i, 2);
660 _mm256_or_si256(mbuf_flags, fdir_add_flags);
663 desc_fdir_processing_32b(rxdp, rx_pkts, i, 4);
665 _mm256_or_si256(mbuf_flags, fdir_add_flags);
668 desc_fdir_processing_32b(rxdp, rx_pkts, i, 6);
670 _mm256_or_si256(mbuf_flags, fdir_add_flags);
671 /* End 32B desc handling */
672 #endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
674 } /* if() on FDIR enabled */
676 /* At this point, we have the 8 sets of flags in the low 16-bits
677 * of each 32-bit value in vlan0.
678 * We want to extract these, and merge them with the mbuf init data
679 * so we can do a single write to the mbuf to set the flags
680 * and all the other initialization fields. Extracting the
681 * appropriate flags means that we have to do a shift and blend for
682 * each mbuf before we do the write. However, we can also
683 * add in the previously computed rx_descriptor fields to
684 * make a single 256-bit write per mbuf
686 /* check the structure matches expectations */
687 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
688 offsetof(struct rte_mbuf, rearm_data) + 8);
689 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
690 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
691 /* build up data and do writes */
692 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
694 rearm6 = _mm256_blend_epi32
695 (mbuf_init, _mm256_slli_si256(mbuf_flags, 8), 0x04);
696 rearm4 = _mm256_blend_epi32
697 (mbuf_init, _mm256_slli_si256(mbuf_flags, 4), 0x04);
698 rearm2 = _mm256_blend_epi32
699 (mbuf_init, mbuf_flags, 0x04);
700 rearm0 = _mm256_blend_epi32
701 (mbuf_init, _mm256_srli_si256(mbuf_flags, 4), 0x04);
702 /* permute to add in the rx_descriptor e.g. rss fields */
703 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
704 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
705 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
706 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
709 ((__m256i *)&rx_pkts[i + 6]->rearm_data, rearm6);
711 ((__m256i *)&rx_pkts[i + 4]->rearm_data, rearm4);
713 ((__m256i *)&rx_pkts[i + 2]->rearm_data, rearm2);
715 ((__m256i *)&rx_pkts[i + 0]->rearm_data, rearm0);
717 /* repeat for the odd mbufs */
718 const __m256i odd_flags = _mm256_castsi128_si256
719 (_mm256_extracti128_si256(mbuf_flags, 1));
720 rearm7 = _mm256_blend_epi32
721 (mbuf_init, _mm256_slli_si256(odd_flags, 8), 0x04);
722 rearm5 = _mm256_blend_epi32
723 (mbuf_init, _mm256_slli_si256(odd_flags, 4), 0x04);
724 rearm3 = _mm256_blend_epi32
725 (mbuf_init, odd_flags, 0x04);
726 rearm1 = _mm256_blend_epi32
727 (mbuf_init, _mm256_srli_si256(odd_flags, 4), 0x04);
728 /* since odd mbufs are already in hi 128-bits use blend */
729 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
730 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
731 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
732 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
733 /* again write to mbufs */
735 ((__m256i *)&rx_pkts[i + 7]->rearm_data, rearm7);
737 ((__m256i *)&rx_pkts[i + 5]->rearm_data, rearm5);
739 ((__m256i *)&rx_pkts[i + 3]->rearm_data, rearm3);
741 ((__m256i *)&rx_pkts[i + 1]->rearm_data, rearm1);
743 /* extract and record EOP bit */
745 const __m128i eop_mask =
747 (1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
748 const __m256i eop_bits256 =
749 _mm256_and_si256(status0_7, eop_check);
750 /* pack status bits into a single 128-bit register */
751 const __m128i eop_bits =
753 (_mm256_castsi256_si128(eop_bits256),
754 _mm256_extractf128_si256(eop_bits256, 1));
755 /* flip bits, and mask out the EOP bit, which is now
756 * a split-packet bit i.e. !EOP, rather than EOP one.
758 __m128i split_bits = _mm_andnot_si128(eop_bits,
760 /* eop bits are out of order, so we need to shuffle them
761 * back into order again. In doing so, only use low 8
762 * bits, which acts like another pack instruction
763 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
764 * [Since we use epi8, the 16-bit positions are
765 * multiplied by 2 in the eop_shuffle value.]
767 __m128i eop_shuffle = _mm_set_epi8
768 (0xFF, 0xFF, 0xFF, 0xFF, /* zero hi 64b */
769 0xFF, 0xFF, 0xFF, 0xFF,
770 8, 0, 10, 2, /* move values to lo 64b */
772 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
773 *(uint64_t *)split_packet =
774 _mm_cvtsi128_si64(split_bits);
775 split_packet += RTE_I40E_DESCS_PER_LOOP_AVX;
778 /* perform dd_check */
779 status0_7 = _mm256_and_si256(status0_7, dd_check);
780 status0_7 = _mm256_packs_epi32
781 (status0_7, _mm256_setzero_si256());
783 uint64_t burst = __builtin_popcountll
785 (_mm256_extracti128_si256
787 burst += __builtin_popcountll(_mm_cvtsi128_si64
788 (_mm256_castsi256_si128(status0_7)));
790 if (burst != RTE_I40E_DESCS_PER_LOOP_AVX)
794 /* update tail pointers */
795 rxq->rx_tail += received;
796 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
797 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
801 rxq->rxrearm_nb += received;
807 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
810 i40e_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
813 return _recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
817 * vPMD receive routine that reassembles single burst of 32 scattered packets
819 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
822 i40e_recv_scattered_burst_vec_avx512(void *rx_queue,
823 struct rte_mbuf **rx_pkts,
826 struct i40e_rx_queue *rxq = rx_queue;
827 uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
829 /* get some new buffers */
830 uint16_t nb_bufs = _recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
835 /* happy day case, full burst + no packets to be joined */
836 const uint64_t *split_fl64 = (uint64_t *)split_flags;
838 if (!rxq->pkt_first_seg &&
839 split_fl64[0] == 0 && split_fl64[1] == 0 &&
840 split_fl64[2] == 0 && split_fl64[3] == 0)
843 /* reassemble any packets that need reassembly*/
846 if (!rxq->pkt_first_seg) {
847 /* find the first split flag, and only reassemble then*/
848 while (i < nb_bufs && !split_flags[i])
852 rxq->pkt_first_seg = rx_pkts[i];
854 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
859 * vPMD receive routine that reassembles scattered packets.
860 * Main receive routine that can handle arbitrary burst sizes
862 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
865 i40e_recv_scattered_pkts_vec_avx512(void *rx_queue,
866 struct rte_mbuf **rx_pkts,
871 while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
872 uint16_t burst = i40e_recv_scattered_burst_vec_avx512(rx_queue,
873 rx_pkts + retval, RTE_I40E_VPMD_RX_BURST);
876 if (burst < RTE_I40E_VPMD_RX_BURST)
879 return retval + i40e_recv_scattered_burst_vec_avx512(rx_queue,
880 rx_pkts + retval, nb_pkts);
883 static __rte_always_inline int
884 i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
886 struct i40e_vec_tx_entry *txep;
890 struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
892 /* check DD bits on threshold descriptor */
893 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
894 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
895 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
898 n = txq->tx_rs_thresh;
900 /* first buffer to free from S/W ring is at index
901 * tx_next_dd - (tx_rs_thresh-1)
903 txep = (void *)txq->sw_ring;
904 txep += txq->tx_next_dd - (n - 1);
906 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
907 struct rte_mempool *mp = txep[0].mbuf->pool;
909 struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
912 if (!cache || cache->len == 0)
915 cache_objs = &cache->objs[cache->len];
917 if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
918 rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
922 /* The cache follows the following algorithm
923 * 1. Add the objects to the cache
924 * 2. Anything greater than the cache min value (if it
925 * crosses the cache flush threshold) is flushed to the ring.
927 /* Add elements back into the cache */
929 /* n is multiple of 32 */
931 const __m512i a = _mm512_load_si512(&txep[copied]);
932 const __m512i b = _mm512_load_si512(&txep[copied + 8]);
933 const __m512i c = _mm512_load_si512(&txep[copied + 16]);
934 const __m512i d = _mm512_load_si512(&txep[copied + 24]);
936 _mm512_storeu_si512(&cache_objs[copied], a);
937 _mm512_storeu_si512(&cache_objs[copied + 8], b);
938 _mm512_storeu_si512(&cache_objs[copied + 16], c);
939 _mm512_storeu_si512(&cache_objs[copied + 24], d);
944 if (cache->len >= cache->flushthresh) {
945 rte_mempool_ops_enqueue_bulk
946 (mp, &cache->objs[cache->size],
947 cache->len - cache->size);
948 cache->len = cache->size;
954 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
958 for (i = 1; i < n; i++) {
959 rte_prefetch0(&txep[i + 3].mbuf->cacheline1);
960 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
962 if (likely(m->pool == free[0]->pool)) {
965 rte_mempool_put_bulk(free[0]->pool,
973 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
975 for (i = 1; i < n; i++) {
976 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
978 rte_mempool_put(m->pool, m);
983 /* buffers were freed, update counters */
984 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
985 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
986 if (txq->tx_next_dd >= txq->nb_tx_desc)
987 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
989 return txq->tx_rs_thresh;
993 vtx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags)
995 uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
996 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
997 ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
999 __m128i descriptor = _mm_set_epi64x(high_qw,
1000 pkt->buf_iova + pkt->data_off);
1001 _mm_store_si128((__m128i *)txdp, descriptor);
1005 vtx(volatile struct i40e_tx_desc *txdp,
1006 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
1008 const uint64_t hi_qw_tmpl = (I40E_TX_DESC_DTYPE_DATA |
1009 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT));
1011 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
1014 ((uint64_t)pkt[3]->data_len <<
1015 I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
1018 ((uint64_t)pkt[2]->data_len <<
1019 I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
1022 ((uint64_t)pkt[1]->data_len <<
1023 I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
1026 ((uint64_t)pkt[0]->data_len <<
1027 I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
1031 (hi_qw3, pkt[3]->buf_iova + pkt[3]->data_off,
1032 hi_qw2, pkt[2]->buf_iova + pkt[2]->data_off,
1033 hi_qw1, pkt[1]->buf_iova + pkt[1]->data_off,
1034 hi_qw0, pkt[0]->buf_iova + pkt[0]->data_off);
1035 _mm512_storeu_si512((void *)txdp, desc0_3);
1038 /* do any last ones */
1040 vtx1(txdp, *pkt, flags);
1041 txdp++, pkt++, nb_pkts--;
1045 static __rte_always_inline void
1046 tx_backlog_entry_avx512(struct i40e_vec_tx_entry *txep,
1047 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1051 for (i = 0; i < (int)nb_pkts; ++i)
1052 txep[i].mbuf = tx_pkts[i];
1055 static inline uint16_t
1056 i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1059 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
1060 volatile struct i40e_tx_desc *txdp;
1061 struct i40e_vec_tx_entry *txep;
1062 uint16_t n, nb_commit, tx_id;
1063 uint64_t flags = I40E_TD_CMD;
1064 uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
1066 /* cross rx_thresh boundary is not allowed */
1067 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
1069 if (txq->nb_tx_free < txq->tx_free_thresh)
1070 i40e_tx_free_bufs_avx512(txq);
1072 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
1073 if (unlikely(nb_pkts == 0))
1076 tx_id = txq->tx_tail;
1077 txdp = &txq->tx_ring[tx_id];
1078 txep = (void *)txq->sw_ring;
1081 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
1083 n = (uint16_t)(txq->nb_tx_desc - tx_id);
1084 if (nb_commit >= n) {
1085 tx_backlog_entry_avx512(txep, tx_pkts, n);
1087 vtx(txdp, tx_pkts, n - 1, flags);
1091 vtx1(txdp, *tx_pkts++, rs);
1093 nb_commit = (uint16_t)(nb_commit - n);
1096 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1098 /* avoid reach the end of ring */
1099 txdp = txq->tx_ring;
1100 txep = (void *)txq->sw_ring;
1103 tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
1105 vtx(txdp, tx_pkts, nb_commit, flags);
1107 tx_id = (uint16_t)(tx_id + nb_commit);
1108 if (tx_id > txq->tx_next_rs) {
1109 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
1110 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
1111 I40E_TXD_QW1_CMD_SHIFT);
1113 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
1116 txq->tx_tail = tx_id;
1118 I40E_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
1124 i40e_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1128 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
1133 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
1134 ret = i40e_xmit_fixed_burst_vec_avx512
1135 (tx_queue, &tx_pkts[nb_tx], num);