1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
6 #include <ethdev_driver.h>
7 #include <rte_malloc.h>
9 #include "base/i40e_prototype.h"
10 #include "base/i40e_type.h"
11 #include "i40e_ethdev.h"
12 #include "i40e_rxtx.h"
13 #include "i40e_rxtx_vec_common.h"
14 #include "i40e_rxtx_common_avx.h"
18 #ifndef __INTEL_COMPILER
19 #pragma GCC diagnostic ignored "-Wcast-qual"
22 #define RTE_I40E_DESCS_PER_LOOP_AVX 8
24 static __rte_always_inline void
25 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
29 volatile union i40e_rx_desc *rxdp;
30 struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
31 struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp,
34 rxdp = rxq->rx_ring + rxq->rxrearm_start;
37 return i40e_rxq_rearm_common(rxq, true);
39 /* We need to pull 'n' more MBUFs into the software ring from mempool
40 * We inline the mempool function here, so we can vectorize the copy
41 * from the cache into the shadow ring.
44 if (cache->len < RTE_I40E_RXQ_REARM_THRESH) {
45 /* No. Backfill the cache first, and then fill from it */
46 uint32_t req = RTE_I40E_RXQ_REARM_THRESH + (cache->size -
49 /* How many do we require
50 * i.e. number to fill the cache + the request
52 int ret = rte_mempool_ops_dequeue_bulk(rxq->mp,
53 &cache->objs[cache->len], req);
57 if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
61 dma_addr0 = _mm_setzero_si128();
62 for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
63 rxep[i].mbuf = &rxq->fake_mbuf;
65 ((__m128i *)&rxdp[i].read,
69 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
70 RTE_I40E_RXQ_REARM_THRESH;
75 const __m512i iova_offsets = _mm512_set1_epi64
76 (offsetof(struct rte_mbuf, buf_iova));
77 const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
79 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
80 /* to shuffle the addresses to correct slots. Values 4-7 will contain
81 * zeros, so use 7 for a zero-value.
83 const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
85 const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
88 /* Initialize the mbufs in vector, process 8 mbufs in one loop, taking
89 * from mempool cache and populating both shadow and HW rings
91 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH / 8; i++) {
92 const __m512i mbuf_ptrs = _mm512_loadu_si512
93 (&cache->objs[cache->len - 8]);
94 _mm512_store_si512(rxep, mbuf_ptrs);
96 /* gather iova of mbuf0-7 into one zmm reg */
97 const __m512i iova_base_addrs = _mm512_i64gather_epi64
98 (_mm512_add_epi64(mbuf_ptrs, iova_offsets),
101 const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
103 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
104 const __m512i iovas0 = _mm512_castsi256_si512
105 (_mm512_extracti64x4_epi64(iova_addrs, 0));
106 const __m512i iovas1 = _mm512_castsi256_si512
107 (_mm512_extracti64x4_epi64(iova_addrs, 1));
109 /* permute leaves desc 2-3 addresses in header address slots 0-1
110 * but these are ignored by driver since header split not
111 * enabled. Similarly for desc 4 & 5.
113 const __m512i desc_rd_0_1 = _mm512_permutexvar_epi64
114 (permute_idx, iovas0);
115 const __m512i desc_rd_2_3 = _mm512_bsrli_epi128(desc_rd_0_1, 8);
117 const __m512i desc_rd_4_5 = _mm512_permutexvar_epi64
118 (permute_idx, iovas1);
119 const __m512i desc_rd_6_7 = _mm512_bsrli_epi128(desc_rd_4_5, 8);
121 _mm512_store_si512((void *)rxdp, desc_rd_0_1);
122 _mm512_store_si512((void *)(rxdp + 2), desc_rd_2_3);
123 _mm512_store_si512((void *)(rxdp + 4), desc_rd_4_5);
124 _mm512_store_si512((void *)(rxdp + 6), desc_rd_6_7);
126 /* permute leaves desc 4-7 addresses in header address slots 0-3
127 * but these are ignored by driver since header split not
130 const __m512i desc_rd_0_3 = _mm512_permutexvar_epi64
131 (permute_idx, iova_addrs);
132 const __m512i desc_rd_4_7 = _mm512_bsrli_epi128(desc_rd_0_3, 8);
134 _mm512_store_si512((void *)rxdp, desc_rd_0_3);
135 _mm512_store_si512((void *)(rxdp + 4), desc_rd_4_7);
137 rxep += 8, rxdp += 8, cache->len -= 8;
140 rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
141 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
142 rxq->rxrearm_start = 0;
144 rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
146 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
147 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
149 /* Update the tail pointer on the NIC */
150 I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
153 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
154 /* Handles 32B descriptor FDIR ID processing:
155 * rxdp: receive descriptor ring, required to load 2nd 16B half of each desc
156 * rx_pkts: required to store metadata back to mbufs
157 * pkt_idx: offset into the burst, increments in vector widths
158 * desc_idx: required to select the correct shift at compile time
160 static inline __m256i
161 desc_fdir_processing_32b(volatile union i40e_rx_desc *rxdp,
162 struct rte_mbuf **rx_pkts,
163 const uint32_t pkt_idx,
164 const uint32_t desc_idx)
166 /* 32B desc path: load rxdp.wb.qword2 for EXT_STATUS and FLEXBH_STAT */
167 __m128i *rxdp_desc_0 = (void *)(&rxdp[desc_idx + 0].wb.qword2);
168 __m128i *rxdp_desc_1 = (void *)(&rxdp[desc_idx + 1].wb.qword2);
169 const __m128i desc_qw2_0 = _mm_load_si128(rxdp_desc_0);
170 const __m128i desc_qw2_1 = _mm_load_si128(rxdp_desc_1);
172 /* Mask for FLEXBH_STAT, and the FDIR_ID value to compare against. The
173 * remaining data is set to all 1's to pass through data.
175 const __m256i flexbh_mask = _mm256_set_epi32(-1, -1, -1, 3 << 4,
177 const __m256i flexbh_id = _mm256_set_epi32(-1, -1, -1, 1 << 4,
180 /* Load descriptor, check for FLEXBH bits, generate a mask for both
181 * packets in the register.
183 __m256i desc_qw2_0_1 =
184 _mm256_inserti128_si256(_mm256_castsi128_si256(desc_qw2_0),
186 __m256i desc_tmp_msk = _mm256_and_si256(flexbh_mask, desc_qw2_0_1);
187 __m256i fdir_mask = _mm256_cmpeq_epi32(flexbh_id, desc_tmp_msk);
188 __m256i fdir_data = _mm256_alignr_epi8(desc_qw2_0_1, desc_qw2_0_1, 12);
189 __m256i desc_fdir_data = _mm256_and_si256(fdir_mask, fdir_data);
191 /* Write data out to the mbuf. There is no store to this area of the
192 * mbuf today, so we cannot combine it with another store.
194 const uint32_t idx_0 = pkt_idx + desc_idx;
195 const uint32_t idx_1 = pkt_idx + desc_idx + 1;
197 rx_pkts[idx_0]->hash.fdir.hi = _mm256_extract_epi32(desc_fdir_data, 0);
198 rx_pkts[idx_1]->hash.fdir.hi = _mm256_extract_epi32(desc_fdir_data, 4);
200 /* Create mbuf flags as required for mbuf_flags layout
201 * (That's high lane [1,3,5,7, 0,2,4,6] as u32 lanes).
203 * - Mask away bits not required from the fdir_mask
204 * - Leave the PKT_FDIR_ID bit (1 << 13)
205 * - Position that bit correctly based on packet number
206 * - OR in the resulting bit to mbuf_flags
208 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
209 __m256i mbuf_flag_mask = _mm256_set_epi32(0, 0, 0, 1 << 13,
211 __m256i desc_flag_bit = _mm256_and_si256(mbuf_flag_mask, fdir_mask);
213 /* For static-inline function, this will be stripped out
214 * as the desc_idx is a hard-coded constant.
218 return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 4);
220 return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 8);
222 return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 12);
224 return desc_flag_bit;
229 /* NOT REACHED, see above switch returns */
230 return _mm256_setzero_si256();
232 #endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
234 #define PKTLEN_SHIFT 10
236 /* Force inline as some compilers will not inline by default. */
237 static __rte_always_inline uint16_t
238 _recv_raw_pkts_vec_avx512(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
239 uint16_t nb_pkts, uint8_t *split_packet)
241 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
242 const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
243 0, rxq->mbuf_initializer);
244 struct i40e_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
245 volatile union i40e_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
249 /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP_AVX */
250 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP_AVX);
252 /* See if we need to rearm the RX queue - gives the prefetch a bit
255 if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
258 /* Before we start moving massive data around, check to see if
259 * there is actually a packet available
261 if (!(rxdp->wb.qword1.status_error_len &
262 rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
265 /* constants used in processing loop */
266 const __m512i crc_adjust =
268 (0, /* ignore non-length fields */
269 -rxq->crc_len, /* sub crc on data_len */
270 -rxq->crc_len, /* sub crc on pkt_len */
271 0 /* ignore non-length fields */
274 /* 8 packets DD mask, LSB in each 32-bit value */
275 const __m256i dd_check = _mm256_set1_epi32(1);
277 /* 8 packets EOP mask, second-LSB in each 32-bit value */
278 const __m256i eop_check = _mm256_slli_epi32(dd_check,
279 I40E_RX_DESC_STATUS_EOF_SHIFT);
281 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
282 const __m512i shuf_msk =
284 (/* rss hash parsed separately */
285 /* octet 4~7, 32bits rss */
286 7 << 24 | 6 << 16 | 5 << 8 | 4,
287 /* octet 2~3, low 16 bits vlan_macip */
288 /* octet 14~15, 16 bits data_len */
289 3 << 24 | 2 << 16 | 15 << 8 | 14,
290 /* skip hi 16 bits pkt_len, zero out */
291 /* octet 14~15, 16 bits pkt_len */
292 0xFFFF << 16 | 15 << 8 | 14,
293 /* pkt_type set as unknown */
296 /* compile-time check the above crc and shuffle layout is correct.
297 * NOTE: the first field (lowest address) is given last in set_epi
300 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
301 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
302 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
303 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
304 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
305 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
306 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
307 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
309 /* Status/Error flag masks */
310 /* mask everything except RSS, flow director and VLAN flags
311 * bit2 is for VLAN tag, bit11 for flow director indication
312 * bit13:12 for RSS indication. Bits 3-5 of error
313 * field (bits 22-24) are for IP/L4 checksum errors
315 const __m256i flags_mask = _mm256_set1_epi32
316 ((1 << 2) | (1 << 11) | (3 << 12) | (7 << 22));
318 /* data to be shuffled by result of flag mask. If VLAN bit is set,
319 * (bit 2), then position 4 in this array will be used in the
322 const __m256i vlan_flags_shuf = _mm256_set_epi32
323 (0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
324 0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0);
326 /* data to be shuffled by result of flag mask, shifted down 11.
327 * If RSS/FDIR bits are set, shuffle moves appropriate flags in
330 const __m256i rss_flags_shuf = _mm256_set_epi8
331 (0, 0, 0, 0, 0, 0, 0, 0,
332 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
333 0, 0, RTE_MBUF_F_RX_FDIR, 0, /* end up 128-bits */
334 0, 0, 0, 0, 0, 0, 0, 0,
335 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
336 0, 0, RTE_MBUF_F_RX_FDIR, 0);
338 /* data to be shuffled by the result of the flags mask shifted by 22
339 * bits. This gives use the l3_l4 flags.
341 const __m256i l3_l4_flags_shuf = _mm256_set_epi8
342 (0, 0, 0, 0, 0, 0, 0, 0,
343 /* shift right 1 bit to make sure it not exceed 255 */
344 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
345 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
346 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
347 RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
348 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
349 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
350 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
351 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
352 RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
353 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
354 /* second 128-bits */
355 0, 0, 0, 0, 0, 0, 0, 0,
356 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
357 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
358 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
359 RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
360 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
361 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
362 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
363 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
364 RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
365 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
367 const __m256i cksum_mask = _mm256_set1_epi32
368 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
369 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
370 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
372 uint16_t i, received;
374 for (i = 0, received = 0; i < nb_pkts;
375 i += RTE_I40E_DESCS_PER_LOOP_AVX,
376 rxdp += RTE_I40E_DESCS_PER_LOOP_AVX) {
377 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
378 _mm256_storeu_si256((void *)&rx_pkts[i],
379 _mm256_loadu_si256((void *)&sw_ring[i]));
380 #ifdef RTE_ARCH_X86_64
381 _mm256_storeu_si256((void *)&rx_pkts[i + 4],
382 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
385 __m512i raw_desc0_3, raw_desc4_7;
386 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
388 /* load in descriptors, in reverse order */
389 const __m128i raw_desc7 =
390 _mm_load_si128((void *)(rxdp + 7));
391 rte_compiler_barrier();
392 const __m128i raw_desc6 =
393 _mm_load_si128((void *)(rxdp + 6));
394 rte_compiler_barrier();
395 const __m128i raw_desc5 =
396 _mm_load_si128((void *)(rxdp + 5));
397 rte_compiler_barrier();
398 const __m128i raw_desc4 =
399 _mm_load_si128((void *)(rxdp + 4));
400 rte_compiler_barrier();
401 const __m128i raw_desc3 =
402 _mm_load_si128((void *)(rxdp + 3));
403 rte_compiler_barrier();
404 const __m128i raw_desc2 =
405 _mm_load_si128((void *)(rxdp + 2));
406 rte_compiler_barrier();
407 const __m128i raw_desc1 =
408 _mm_load_si128((void *)(rxdp + 1));
409 rte_compiler_barrier();
410 const __m128i raw_desc0 =
411 _mm_load_si128((void *)(rxdp + 0));
414 _mm256_inserti128_si256
415 (_mm256_castsi128_si256(raw_desc6),
418 _mm256_inserti128_si256
419 (_mm256_castsi128_si256(raw_desc4),
422 _mm256_inserti128_si256
423 (_mm256_castsi128_si256(raw_desc2),
426 _mm256_inserti128_si256
427 (_mm256_castsi128_si256(raw_desc0),
432 (_mm512_castsi256_si512(raw_desc4_5),
436 (_mm512_castsi256_si512(raw_desc0_1),
442 for (j = 0; j < RTE_I40E_DESCS_PER_LOOP_AVX; j++)
443 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
446 /* convert descriptors 0-7 into mbufs, adjusting length and
447 * re-arranging fields. Then write into the mbuf
449 const __m512i len4_7 = _mm512_slli_epi32
450 (raw_desc4_7, PKTLEN_SHIFT);
451 const __m512i len0_3 = _mm512_slli_epi32
452 (raw_desc0_3, PKTLEN_SHIFT);
453 const __m512i desc4_7 = _mm512_mask_blend_epi16
454 (0x80808080, raw_desc4_7, len4_7);
455 const __m512i desc0_3 = _mm512_mask_blend_epi16
456 (0x80808080, raw_desc0_3, len0_3);
457 __m512i mb4_7 = _mm512_shuffle_epi8(desc4_7, shuf_msk);
458 __m512i mb0_3 = _mm512_shuffle_epi8(desc0_3, shuf_msk);
460 mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust);
461 mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust);
463 /* to get packet types, shift 64-bit values down 30 bits
464 * and so ptype is in lower 8-bits in each
466 const __m512i ptypes4_7 = _mm512_srli_epi64(desc4_7, 30);
467 const __m512i ptypes0_3 = _mm512_srli_epi64(desc0_3, 30);
468 const __m256i ptypes6_7 =
469 _mm512_extracti64x4_epi64(ptypes4_7, 1);
470 const __m256i ptypes4_5 =
471 _mm512_extracti64x4_epi64(ptypes4_7, 0);
472 const __m256i ptypes2_3 =
473 _mm512_extracti64x4_epi64(ptypes0_3, 1);
474 const __m256i ptypes0_1 =
475 _mm512_extracti64x4_epi64(ptypes0_3, 0);
476 const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24);
477 const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8);
478 const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24);
479 const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8);
480 const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24);
481 const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8);
482 const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24);
483 const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8);
485 const __m512i ptype4_7 = _mm512_set_epi32
486 (0, 0, 0, ptype_tbl[ptype7],
487 0, 0, 0, ptype_tbl[ptype6],
488 0, 0, 0, ptype_tbl[ptype5],
489 0, 0, 0, ptype_tbl[ptype4]);
490 const __m512i ptype0_3 = _mm512_set_epi32
491 (0, 0, 0, ptype_tbl[ptype3],
492 0, 0, 0, ptype_tbl[ptype2],
493 0, 0, 0, ptype_tbl[ptype1],
494 0, 0, 0, ptype_tbl[ptype0]);
496 mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7);
497 mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3);
499 __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0);
500 __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1);
501 __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0);
502 __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1);
505 * use permute/extract to get status content
506 * After the operations, the packets status flags are in the
507 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
509 /* merge the status bits into one register */
510 const __m512i status_permute_msk = _mm512_set_epi32
515 const __m512i raw_status0_7 = _mm512_permutex2var_epi32
516 (desc4_7, status_permute_msk, desc0_3);
517 __m256i status0_7 = _mm512_extracti64x4_epi64
520 /* now do flag manipulation */
522 /* get only flag/error bits we want */
523 const __m256i flag_bits =
524 _mm256_and_si256(status0_7, flags_mask);
525 /* set vlan and rss flags */
526 const __m256i vlan_flags =
527 _mm256_shuffle_epi8(vlan_flags_shuf, flag_bits);
528 const __m256i rss_fdir_bits = _mm256_srli_epi32(flag_bits, 11);
529 const __m256i rss_flags = _mm256_shuffle_epi8(rss_flags_shuf,
532 /* l3_l4_error flags, shuffle, then shift to correct adjustment
533 * of flags in flags_shuf, and finally mask out extra bits
535 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
536 _mm256_srli_epi32(flag_bits, 22));
537 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
538 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
541 __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
542 _mm256_or_si256(rss_flags, vlan_flags));
544 /* If the rxq has FDIR enabled, read and process the FDIR info
545 * from the descriptor. This can cause more loads/stores, so is
546 * not always performed. Branch over the code when not enabled.
548 if (rxq->fdir_enabled) {
549 #ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
550 /* 16B descriptor code path:
551 * RSS and FDIR ID use the same offset in the desc, so
552 * only one can be present at a time. The code below
553 * identifies an FDIR ID match, and zeros the RSS value
554 * in the mbuf on FDIR match to keep mbuf data clean.
556 #define FDIR_BLEND_MASK ((1 << 3) | (1 << 7))
559 * - Take flags, shift bits to null out
560 * - CMPEQ with known FDIR ID, to get 0xFFFF or 0 mask
561 * - Strip bits from mask, leaving 0 or 1 for FDIR ID
562 * - Merge with mbuf_flags
564 /* FLM = 1, FLTSTAT = 0b01, (FLM | FLTSTAT) == 3.
565 * Shift left by 28 to avoid having to mask.
568 _mm256_slli_epi32(rss_fdir_bits, 28);
569 const __m256i fdir_id = _mm256_set1_epi32(3 << 28);
571 /* As above, the fdir_mask to packet mapping is this:
572 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
573 * Then OR FDIR flags to mbuf_flags on FDIR ID hit.
575 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
576 const __m256i pkt_fdir_bit = _mm256_set1_epi32(1 << 13);
577 const __m256i fdir_mask =
578 _mm256_cmpeq_epi32(fdir, fdir_id);
580 _mm256_and_si256(fdir_mask, pkt_fdir_bit);
582 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_bits);
584 /* Based on FDIR_MASK, clear the RSS or FDIR value.
585 * The FDIR ID value is masked to zero if not a hit,
586 * otherwise the mb0_1 register RSS field is zeroed.
588 const __m256i fdir_zero_mask = _mm256_setzero_si256();
589 __m256i tmp0_1 = _mm256_blend_epi32(fdir_zero_mask,
590 fdir_mask, FDIR_BLEND_MASK);
591 __m256i fdir_mb0_1 = _mm256_and_si256(mb0_1, fdir_mask);
593 mb0_1 = _mm256_andnot_si256(tmp0_1, mb0_1);
595 /* Write to mbuf: no stores to combine with, so just a
596 * scalar store to push data here.
598 rx_pkts[i + 0]->hash.fdir.hi =
599 _mm256_extract_epi32(fdir_mb0_1, 3);
600 rx_pkts[i + 1]->hash.fdir.hi =
601 _mm256_extract_epi32(fdir_mb0_1, 7);
603 /* Same as above, only shift the fdir_mask to align
604 * the packet FDIR mask with the FDIR_ID desc lane.
607 _mm256_alignr_epi8(fdir_mask, fdir_mask, 12);
608 __m256i fdir_mb2_3 = _mm256_and_si256(mb2_3, tmp2_3);
610 tmp2_3 = _mm256_blend_epi32(fdir_zero_mask, tmp2_3,
612 mb2_3 = _mm256_andnot_si256(tmp2_3, mb2_3);
613 rx_pkts[i + 2]->hash.fdir.hi =
614 _mm256_extract_epi32(fdir_mb2_3, 3);
615 rx_pkts[i + 3]->hash.fdir.hi =
616 _mm256_extract_epi32(fdir_mb2_3, 7);
619 _mm256_alignr_epi8(fdir_mask, fdir_mask, 8);
620 __m256i fdir_mb4_5 = _mm256_and_si256(mb4_5, tmp4_5);
622 tmp4_5 = _mm256_blend_epi32(fdir_zero_mask, tmp4_5,
624 mb4_5 = _mm256_andnot_si256(tmp4_5, mb4_5);
625 rx_pkts[i + 4]->hash.fdir.hi =
626 _mm256_extract_epi32(fdir_mb4_5, 3);
627 rx_pkts[i + 5]->hash.fdir.hi =
628 _mm256_extract_epi32(fdir_mb4_5, 7);
631 _mm256_alignr_epi8(fdir_mask, fdir_mask, 4);
632 __m256i fdir_mb6_7 = _mm256_and_si256(mb6_7, tmp6_7);
634 tmp6_7 = _mm256_blend_epi32(fdir_zero_mask, tmp6_7,
636 mb6_7 = _mm256_andnot_si256(tmp6_7, mb6_7);
637 rx_pkts[i + 6]->hash.fdir.hi =
638 _mm256_extract_epi32(fdir_mb6_7, 3);
639 rx_pkts[i + 7]->hash.fdir.hi =
640 _mm256_extract_epi32(fdir_mb6_7, 7);
642 /* End of 16B descriptor handling */
644 /* 32B descriptor FDIR ID mark handling. Returns bits
645 * to be OR-ed into the mbuf olflags.
647 __m256i fdir_add_flags;
650 desc_fdir_processing_32b(rxdp, rx_pkts, i, 0);
652 _mm256_or_si256(mbuf_flags, fdir_add_flags);
655 desc_fdir_processing_32b(rxdp, rx_pkts, i, 2);
657 _mm256_or_si256(mbuf_flags, fdir_add_flags);
660 desc_fdir_processing_32b(rxdp, rx_pkts, i, 4);
662 _mm256_or_si256(mbuf_flags, fdir_add_flags);
665 desc_fdir_processing_32b(rxdp, rx_pkts, i, 6);
667 _mm256_or_si256(mbuf_flags, fdir_add_flags);
668 /* End 32B desc handling */
669 #endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
671 } /* if() on FDIR enabled */
673 /* At this point, we have the 8 sets of flags in the low 16-bits
674 * of each 32-bit value in vlan0.
675 * We want to extract these, and merge them with the mbuf init data
676 * so we can do a single write to the mbuf to set the flags
677 * and all the other initialization fields. Extracting the
678 * appropriate flags means that we have to do a shift and blend for
679 * each mbuf before we do the write. However, we can also
680 * add in the previously computed rx_descriptor fields to
681 * make a single 256-bit write per mbuf
683 /* check the structure matches expectations */
684 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
685 offsetof(struct rte_mbuf, rearm_data) + 8);
686 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
687 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
688 /* build up data and do writes */
689 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
691 rearm6 = _mm256_blend_epi32
692 (mbuf_init, _mm256_slli_si256(mbuf_flags, 8), 0x04);
693 rearm4 = _mm256_blend_epi32
694 (mbuf_init, _mm256_slli_si256(mbuf_flags, 4), 0x04);
695 rearm2 = _mm256_blend_epi32
696 (mbuf_init, mbuf_flags, 0x04);
697 rearm0 = _mm256_blend_epi32
698 (mbuf_init, _mm256_srli_si256(mbuf_flags, 4), 0x04);
699 /* permute to add in the rx_descriptor e.g. rss fields */
700 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
701 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
702 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
703 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
706 ((__m256i *)&rx_pkts[i + 6]->rearm_data, rearm6);
708 ((__m256i *)&rx_pkts[i + 4]->rearm_data, rearm4);
710 ((__m256i *)&rx_pkts[i + 2]->rearm_data, rearm2);
712 ((__m256i *)&rx_pkts[i + 0]->rearm_data, rearm0);
714 /* repeat for the odd mbufs */
715 const __m256i odd_flags = _mm256_castsi128_si256
716 (_mm256_extracti128_si256(mbuf_flags, 1));
717 rearm7 = _mm256_blend_epi32
718 (mbuf_init, _mm256_slli_si256(odd_flags, 8), 0x04);
719 rearm5 = _mm256_blend_epi32
720 (mbuf_init, _mm256_slli_si256(odd_flags, 4), 0x04);
721 rearm3 = _mm256_blend_epi32
722 (mbuf_init, odd_flags, 0x04);
723 rearm1 = _mm256_blend_epi32
724 (mbuf_init, _mm256_srli_si256(odd_flags, 4), 0x04);
725 /* since odd mbufs are already in hi 128-bits use blend */
726 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
727 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
728 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
729 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
730 /* again write to mbufs */
732 ((__m256i *)&rx_pkts[i + 7]->rearm_data, rearm7);
734 ((__m256i *)&rx_pkts[i + 5]->rearm_data, rearm5);
736 ((__m256i *)&rx_pkts[i + 3]->rearm_data, rearm3);
738 ((__m256i *)&rx_pkts[i + 1]->rearm_data, rearm1);
740 /* extract and record EOP bit */
742 const __m128i eop_mask =
744 (1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
745 const __m256i eop_bits256 =
746 _mm256_and_si256(status0_7, eop_check);
747 /* pack status bits into a single 128-bit register */
748 const __m128i eop_bits =
750 (_mm256_castsi256_si128(eop_bits256),
751 _mm256_extractf128_si256(eop_bits256, 1));
752 /* flip bits, and mask out the EOP bit, which is now
753 * a split-packet bit i.e. !EOP, rather than EOP one.
755 __m128i split_bits = _mm_andnot_si128(eop_bits,
757 /* eop bits are out of order, so we need to shuffle them
758 * back into order again. In doing so, only use low 8
759 * bits, which acts like another pack instruction
760 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
761 * [Since we use epi8, the 16-bit positions are
762 * multiplied by 2 in the eop_shuffle value.]
764 __m128i eop_shuffle = _mm_set_epi8
765 (0xFF, 0xFF, 0xFF, 0xFF, /* zero hi 64b */
766 0xFF, 0xFF, 0xFF, 0xFF,
767 8, 0, 10, 2, /* move values to lo 64b */
769 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
770 *(uint64_t *)split_packet =
771 _mm_cvtsi128_si64(split_bits);
772 split_packet += RTE_I40E_DESCS_PER_LOOP_AVX;
775 /* perform dd_check */
776 status0_7 = _mm256_and_si256(status0_7, dd_check);
777 status0_7 = _mm256_packs_epi32
778 (status0_7, _mm256_setzero_si256());
780 uint64_t burst = __builtin_popcountll
782 (_mm256_extracti128_si256
784 burst += __builtin_popcountll(_mm_cvtsi128_si64
785 (_mm256_castsi256_si128(status0_7)));
787 if (burst != RTE_I40E_DESCS_PER_LOOP_AVX)
791 /* update tail pointers */
792 rxq->rx_tail += received;
793 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
794 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
798 rxq->rxrearm_nb += received;
804 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
807 i40e_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
810 return _recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
814 * vPMD receive routine that reassembles single burst of 32 scattered packets
816 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
819 i40e_recv_scattered_burst_vec_avx512(void *rx_queue,
820 struct rte_mbuf **rx_pkts,
823 struct i40e_rx_queue *rxq = rx_queue;
824 uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
826 /* get some new buffers */
827 uint16_t nb_bufs = _recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
832 /* happy day case, full burst + no packets to be joined */
833 const uint64_t *split_fl64 = (uint64_t *)split_flags;
835 if (!rxq->pkt_first_seg &&
836 split_fl64[0] == 0 && split_fl64[1] == 0 &&
837 split_fl64[2] == 0 && split_fl64[3] == 0)
840 /* reassemble any packets that need reassembly*/
843 if (!rxq->pkt_first_seg) {
844 /* find the first split flag, and only reassemble then*/
845 while (i < nb_bufs && !split_flags[i])
849 rxq->pkt_first_seg = rx_pkts[i];
851 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
856 * vPMD receive routine that reassembles scattered packets.
857 * Main receive routine that can handle arbitrary burst sizes
859 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
862 i40e_recv_scattered_pkts_vec_avx512(void *rx_queue,
863 struct rte_mbuf **rx_pkts,
868 while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
869 uint16_t burst = i40e_recv_scattered_burst_vec_avx512(rx_queue,
870 rx_pkts + retval, RTE_I40E_VPMD_RX_BURST);
873 if (burst < RTE_I40E_VPMD_RX_BURST)
876 return retval + i40e_recv_scattered_burst_vec_avx512(rx_queue,
877 rx_pkts + retval, nb_pkts);
880 static __rte_always_inline int
881 i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
883 struct i40e_vec_tx_entry *txep;
887 struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
889 /* check DD bits on threshold descriptor */
890 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
891 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
892 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
895 n = txq->tx_rs_thresh;
897 /* first buffer to free from S/W ring is at index
898 * tx_next_dd - (tx_rs_thresh-1)
900 txep = (void *)txq->sw_ring;
901 txep += txq->tx_next_dd - (n - 1);
903 if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
904 struct rte_mempool *mp = txep[0].mbuf->pool;
906 struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
909 if (!cache || cache->len == 0)
912 cache_objs = &cache->objs[cache->len];
914 if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
915 rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
919 /* The cache follows the following algorithm
920 * 1. Add the objects to the cache
921 * 2. Anything greater than the cache min value (if it
922 * crosses the cache flush threshold) is flushed to the ring.
924 /* Add elements back into the cache */
926 /* n is multiple of 32 */
928 const __m512i a = _mm512_load_si512(&txep[copied]);
929 const __m512i b = _mm512_load_si512(&txep[copied + 8]);
930 const __m512i c = _mm512_load_si512(&txep[copied + 16]);
931 const __m512i d = _mm512_load_si512(&txep[copied + 24]);
933 _mm512_storeu_si512(&cache_objs[copied], a);
934 _mm512_storeu_si512(&cache_objs[copied + 8], b);
935 _mm512_storeu_si512(&cache_objs[copied + 16], c);
936 _mm512_storeu_si512(&cache_objs[copied + 24], d);
941 if (cache->len >= cache->flushthresh) {
942 rte_mempool_ops_enqueue_bulk
943 (mp, &cache->objs[cache->size],
944 cache->len - cache->size);
945 cache->len = cache->size;
951 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
955 for (i = 1; i < n; i++) {
956 rte_prefetch0(&txep[i + 3].mbuf->cacheline1);
957 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
959 if (likely(m->pool == free[0]->pool)) {
962 rte_mempool_put_bulk(free[0]->pool,
970 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
972 for (i = 1; i < n; i++) {
973 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
975 rte_mempool_put(m->pool, m);
980 /* buffers were freed, update counters */
981 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
982 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
983 if (txq->tx_next_dd >= txq->nb_tx_desc)
984 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
986 return txq->tx_rs_thresh;
990 vtx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags)
992 uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
993 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
994 ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
996 __m128i descriptor = _mm_set_epi64x(high_qw,
997 pkt->buf_iova + pkt->data_off);
998 _mm_store_si128((__m128i *)txdp, descriptor);
1002 vtx(volatile struct i40e_tx_desc *txdp,
1003 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
1005 const uint64_t hi_qw_tmpl = (I40E_TX_DESC_DTYPE_DATA |
1006 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT));
1008 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
1011 ((uint64_t)pkt[3]->data_len <<
1012 I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
1015 ((uint64_t)pkt[2]->data_len <<
1016 I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
1019 ((uint64_t)pkt[1]->data_len <<
1020 I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
1023 ((uint64_t)pkt[0]->data_len <<
1024 I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
1028 (hi_qw3, pkt[3]->buf_iova + pkt[3]->data_off,
1029 hi_qw2, pkt[2]->buf_iova + pkt[2]->data_off,
1030 hi_qw1, pkt[1]->buf_iova + pkt[1]->data_off,
1031 hi_qw0, pkt[0]->buf_iova + pkt[0]->data_off);
1032 _mm512_storeu_si512((void *)txdp, desc0_3);
1035 /* do any last ones */
1037 vtx1(txdp, *pkt, flags);
1038 txdp++, pkt++, nb_pkts--;
1042 static __rte_always_inline void
1043 tx_backlog_entry_avx512(struct i40e_vec_tx_entry *txep,
1044 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1048 for (i = 0; i < (int)nb_pkts; ++i)
1049 txep[i].mbuf = tx_pkts[i];
1052 static inline uint16_t
1053 i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1056 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
1057 volatile struct i40e_tx_desc *txdp;
1058 struct i40e_vec_tx_entry *txep;
1059 uint16_t n, nb_commit, tx_id;
1060 uint64_t flags = I40E_TD_CMD;
1061 uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
1063 if (txq->nb_tx_free < txq->tx_free_thresh)
1064 i40e_tx_free_bufs_avx512(txq);
1066 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
1067 if (unlikely(nb_pkts == 0))
1070 tx_id = txq->tx_tail;
1071 txdp = &txq->tx_ring[tx_id];
1072 txep = (void *)txq->sw_ring;
1075 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
1077 n = (uint16_t)(txq->nb_tx_desc - tx_id);
1078 if (nb_commit >= n) {
1079 tx_backlog_entry_avx512(txep, tx_pkts, n);
1081 vtx(txdp, tx_pkts, n - 1, flags);
1085 vtx1(txdp, *tx_pkts++, rs);
1087 nb_commit = (uint16_t)(nb_commit - n);
1090 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1092 /* avoid reach the end of ring */
1093 txdp = txq->tx_ring;
1094 txep = (void *)txq->sw_ring;
1097 tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
1099 vtx(txdp, tx_pkts, nb_commit, flags);
1101 tx_id = (uint16_t)(tx_id + nb_commit);
1102 if (tx_id > txq->tx_next_rs) {
1103 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
1104 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
1105 I40E_TXD_QW1_CMD_SHIFT);
1107 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
1110 txq->tx_tail = tx_id;
1112 I40E_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
1118 i40e_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1122 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
1127 /* cross rs_thresh boundary is not allowed */
1128 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
1129 ret = i40e_xmit_fixed_burst_vec_avx512
1130 (tx_queue, &tx_pkts[nb_tx], num);