1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #include "ice_rxtx_vec_common.h"
9 #ifndef __INTEL_COMPILER
10 #pragma GCC diagnostic ignored "-Wcast-qual"
13 #define ICE_DESCS_PER_LOOP_AVX 8
15 static __rte_always_inline void
16 ice_rxq_rearm(struct ice_rx_queue *rxq)
20 volatile union ice_rx_flex_desc *rxdp;
21 struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
22 struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp,
25 rxdp = rxq->rx_ring + rxq->rxrearm_start;
28 return ice_rxq_rearm_common(rxq, true);
30 /* We need to pull 'n' more MBUFs into the software ring */
31 if (cache->len < ICE_RXQ_REARM_THRESH) {
32 uint32_t req = ICE_RXQ_REARM_THRESH + (cache->size -
35 int ret = rte_mempool_ops_dequeue_bulk(rxq->mp,
36 &cache->objs[cache->len], req);
40 if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >=
44 dma_addr0 = _mm_setzero_si128();
45 for (i = 0; i < ICE_DESCS_PER_LOOP; i++) {
46 rxep[i].mbuf = &rxq->fake_mbuf;
48 ((__m128i *)&rxdp[i].read,
52 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
58 const __m512i iova_offsets = _mm512_set1_epi64
59 (offsetof(struct rte_mbuf, buf_iova));
60 const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
62 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
63 /* shuffle the iova into correct slots. Values 4-7 will contain
64 * zeros, so use 7 for a zero-value.
66 const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
68 const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
71 /* fill up the rxd in vector, process 8 mbufs in one loop */
72 for (i = 0; i < ICE_RXQ_REARM_THRESH / 8; i++) {
73 const __m512i mbuf_ptrs = _mm512_loadu_si512
74 (&cache->objs[cache->len - 8]);
75 _mm512_store_si512(rxep, mbuf_ptrs);
77 /* gather iova of mbuf0-7 into one zmm reg */
78 const __m512i iova_base_addrs = _mm512_i64gather_epi64
79 (_mm512_add_epi64(mbuf_ptrs, iova_offsets),
82 const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
84 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
85 const __m512i iovas0 = _mm512_castsi256_si512
86 (_mm512_extracti64x4_epi64(iova_addrs, 0));
87 const __m512i iovas1 = _mm512_castsi256_si512
88 (_mm512_extracti64x4_epi64(iova_addrs, 1));
90 /* permute leaves iova 2-3 in hdr_addr of desc 0-1
91 * but these are ignored by driver since header split not
92 * enabled. Similarly for desc 4 & 5.
94 const __m512i desc0_1 = _mm512_permutexvar_epi64
95 (permute_idx, iovas0);
96 const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8);
98 const __m512i desc4_5 = _mm512_permutexvar_epi64
99 (permute_idx, iovas1);
100 const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8);
102 _mm512_store_si512((void *)rxdp, desc0_1);
103 _mm512_store_si512((void *)(rxdp + 2), desc2_3);
104 _mm512_store_si512((void *)(rxdp + 4), desc4_5);
105 _mm512_store_si512((void *)(rxdp + 6), desc6_7);
107 /* permute leaves iova 4-7 in hdr_addr of desc 0-3
108 * but these are ignored by driver since header split not
111 const __m512i desc0_3 = _mm512_permutexvar_epi64
112 (permute_idx, iova_addrs);
113 const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8);
115 _mm512_store_si512((void *)rxdp, desc0_3);
116 _mm512_store_si512((void *)(rxdp + 4), desc4_7);
118 rxep += 8, rxdp += 8, cache->len -= 8;
121 rxq->rxrearm_start += ICE_RXQ_REARM_THRESH;
122 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
123 rxq->rxrearm_start = 0;
125 rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH;
127 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
128 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
130 /* Update the tail pointer on the NIC */
131 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
134 static inline __m256i
135 ice_flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
137 #define FDID_MIS_MAGIC 0xFFFFFFFF
138 RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
139 RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
140 const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
142 /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
143 const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
144 __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
146 /* this XOR op results to bit-reverse the fdir_mask */
147 fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
148 const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
153 static inline uint16_t
154 _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
155 struct rte_mbuf **rx_pkts,
156 uint16_t nb_pkts, uint8_t *split_packet)
158 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
159 const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
160 0, rxq->mbuf_initializer);
161 struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
162 volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
166 /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */
167 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX);
169 /* See if we need to rearm the RX queue - gives the prefetch a bit
172 if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH)
175 /* Before we start moving massive data around, check to see if
176 * there is actually a packet available
178 if (!(rxdp->wb.status_error0 &
179 rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
182 /* constants used in processing loop */
183 const __m512i crc_adjust =
185 (0, /* ignore non-length fields */
186 -rxq->crc_len, /* sub crc on data_len */
187 -rxq->crc_len, /* sub crc on pkt_len */
188 0 /* ignore non-length fields */
191 /* 8 packets DD mask, LSB in each 32-bit value */
192 const __m256i dd_check = _mm256_set1_epi32(1);
194 /* 8 packets EOP mask, second-LSB in each 32-bit value */
195 const __m256i eop_check = _mm256_slli_epi32(dd_check,
196 ICE_RX_DESC_STATUS_EOF_S);
198 /* mask to shuffle from desc. to mbuf (4 descriptors)*/
199 const __m512i shuf_msk =
201 (/* rss hash parsed separately */
203 /* octet 10~11, 16 bits vlan_macip */
204 /* octet 4~5, 16 bits data_len */
205 11 << 24 | 10 << 16 | 5 << 8 | 4,
206 /* skip hi 16 bits pkt_len, zero out */
207 /* octet 4~5, 16 bits pkt_len */
208 0xFFFF << 16 | 5 << 8 | 4,
209 /* pkt_type set as unknown */
214 * compile-time check the above crc and shuffle layout is correct.
215 * NOTE: the first field (lowest address) is given last in set_epi
218 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
219 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
220 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
221 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
222 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
223 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
224 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
225 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
227 /* Status/Error flag masks */
229 * mask everything except Checksum Reports, RSS indication
230 * and VLAN indication.
231 * bit6:4 for IP/L4 checksum errors.
232 * bit12 is for RSS indication.
233 * bit13 is for VLAN indication.
235 const __m256i flags_mask =
236 _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13));
238 * data to be shuffled by the result of the flags mask shifted by 4
239 * bits. This gives use the l3_l4 flags.
241 const __m256i l3_l4_flags_shuf =
242 _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
243 PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
244 PKT_RX_IP_CKSUM_BAD) >> 1,
245 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
246 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
247 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
248 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
249 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
250 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
251 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
252 PKT_RX_IP_CKSUM_BAD) >> 1,
253 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
254 PKT_RX_IP_CKSUM_GOOD) >> 1,
255 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
256 PKT_RX_IP_CKSUM_BAD) >> 1,
257 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
258 PKT_RX_IP_CKSUM_GOOD) >> 1,
259 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
260 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
261 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
262 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
263 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
264 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
265 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
266 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
267 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
268 PKT_RX_IP_CKSUM_BAD) >> 1,
269 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
270 PKT_RX_IP_CKSUM_GOOD) >> 1,
271 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
272 PKT_RX_IP_CKSUM_BAD) >> 1,
273 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
274 PKT_RX_IP_CKSUM_GOOD) >> 1,
277 * shift right 20 bits to use the low two bits to indicate
278 * outer checksum status
279 * shift right 1 bit to make sure it not exceed 255
281 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
282 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
283 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
284 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
285 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
286 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
287 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
288 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
289 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
290 PKT_RX_IP_CKSUM_BAD) >> 1,
291 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
292 PKT_RX_IP_CKSUM_GOOD) >> 1,
293 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
294 PKT_RX_IP_CKSUM_BAD) >> 1,
295 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
296 PKT_RX_IP_CKSUM_GOOD) >> 1,
297 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
298 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
299 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
300 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
301 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
302 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
303 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
304 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
305 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
306 PKT_RX_IP_CKSUM_BAD) >> 1,
307 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
308 PKT_RX_IP_CKSUM_GOOD) >> 1,
309 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
310 PKT_RX_IP_CKSUM_BAD) >> 1,
311 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
312 PKT_RX_IP_CKSUM_GOOD) >> 1);
313 const __m256i cksum_mask =
314 _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK |
315 PKT_RX_L4_CKSUM_MASK |
316 PKT_RX_OUTER_IP_CKSUM_BAD |
317 PKT_RX_OUTER_L4_CKSUM_MASK);
319 * data to be shuffled by result of flag mask, shifted down 12.
320 * If RSS(bit12)/VLAN(bit13) are set,
321 * shuffle moves appropriate flags in place.
323 const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
326 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
327 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
333 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
334 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
337 uint16_t i, received;
339 for (i = 0, received = 0; i < nb_pkts;
340 i += ICE_DESCS_PER_LOOP_AVX,
341 rxdp += ICE_DESCS_PER_LOOP_AVX) {
342 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
343 _mm256_storeu_si256((void *)&rx_pkts[i],
344 _mm256_loadu_si256((void *)&sw_ring[i]));
345 #ifdef RTE_ARCH_X86_64
347 ((void *)&rx_pkts[i + 4],
348 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
351 __m512i raw_desc0_3, raw_desc4_7;
352 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
354 /* load in descriptors, in reverse order */
355 const __m128i raw_desc7 =
356 _mm_load_si128((void *)(rxdp + 7));
357 rte_compiler_barrier();
358 const __m128i raw_desc6 =
359 _mm_load_si128((void *)(rxdp + 6));
360 rte_compiler_barrier();
361 const __m128i raw_desc5 =
362 _mm_load_si128((void *)(rxdp + 5));
363 rte_compiler_barrier();
364 const __m128i raw_desc4 =
365 _mm_load_si128((void *)(rxdp + 4));
366 rte_compiler_barrier();
367 const __m128i raw_desc3 =
368 _mm_load_si128((void *)(rxdp + 3));
369 rte_compiler_barrier();
370 const __m128i raw_desc2 =
371 _mm_load_si128((void *)(rxdp + 2));
372 rte_compiler_barrier();
373 const __m128i raw_desc1 =
374 _mm_load_si128((void *)(rxdp + 1));
375 rte_compiler_barrier();
376 const __m128i raw_desc0 =
377 _mm_load_si128((void *)(rxdp + 0));
380 _mm256_inserti128_si256
381 (_mm256_castsi128_si256(raw_desc6),
384 _mm256_inserti128_si256
385 (_mm256_castsi128_si256(raw_desc4),
388 _mm256_inserti128_si256
389 (_mm256_castsi128_si256(raw_desc2),
392 _mm256_inserti128_si256
393 (_mm256_castsi128_si256(raw_desc0),
398 (_mm512_castsi256_si512(raw_desc4_5),
402 (_mm512_castsi256_si512(raw_desc0_1),
408 for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++)
409 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
413 * convert descriptors 0-7 into mbufs, re-arrange fields.
414 * Then write into the mbuf.
416 __m512i mb4_7 = _mm512_shuffle_epi8(raw_desc4_7, shuf_msk);
417 __m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk);
419 mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust);
420 mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust);
423 * to get packet types, ptype is located in bit16-25
426 const __m512i ptype_mask =
427 _mm512_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M);
430 * to get packet types, ptype is located in bit16-25
433 const __m512i ptypes4_7 =
434 _mm512_and_si512(raw_desc4_7, ptype_mask);
435 const __m512i ptypes0_3 =
436 _mm512_and_si512(raw_desc0_3, ptype_mask);
438 const __m256i ptypes6_7 =
439 _mm512_extracti64x4_epi64(ptypes4_7, 1);
440 const __m256i ptypes4_5 =
441 _mm512_extracti64x4_epi64(ptypes4_7, 0);
442 const __m256i ptypes2_3 =
443 _mm512_extracti64x4_epi64(ptypes0_3, 1);
444 const __m256i ptypes0_1 =
445 _mm512_extracti64x4_epi64(ptypes0_3, 0);
446 const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
447 const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
448 const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
449 const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
450 const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
451 const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
452 const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
453 const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
455 const __m512i ptype4_7 = _mm512_set_epi32
456 (0, 0, 0, ptype_tbl[ptype7],
457 0, 0, 0, ptype_tbl[ptype6],
458 0, 0, 0, ptype_tbl[ptype5],
459 0, 0, 0, ptype_tbl[ptype4]);
460 const __m512i ptype0_3 = _mm512_set_epi32
461 (0, 0, 0, ptype_tbl[ptype3],
462 0, 0, 0, ptype_tbl[ptype2],
463 0, 0, 0, ptype_tbl[ptype1],
464 0, 0, 0, ptype_tbl[ptype0]);
466 mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7);
467 mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3);
469 __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0);
470 __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1);
471 __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0);
472 __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1);
475 * use permute/extract to get status content
476 * After the operations, the packets status flags are in the
477 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
479 /* merge the status bits into one register */
480 const __m512i status_permute_msk = _mm512_set_epi32
485 const __m512i raw_status0_7 = _mm512_permutex2var_epi32
486 (raw_desc4_7, status_permute_msk, raw_desc0_3);
487 __m256i status0_7 = _mm512_extracti64x4_epi64
490 /* now do flag manipulation */
492 /* get only flag/error bits we want */
493 const __m256i flag_bits =
494 _mm256_and_si256(status0_7, flags_mask);
496 * l3_l4_error flags, shuffle, then shift to correct adjustment
497 * of flags in flags_shuf, and finally mask out extra bits
499 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
500 _mm256_srli_epi32(flag_bits, 4));
501 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
502 __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
503 __m256i l4_outer_flags =
504 _mm256_and_si256(l3_l4_flags, l4_outer_mask);
505 l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
507 __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
508 l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
509 l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
510 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
511 /* set rss and vlan flags */
512 const __m256i rss_vlan_flag_bits =
513 _mm256_srli_epi32(flag_bits, 12);
514 const __m256i rss_vlan_flags =
515 _mm256_shuffle_epi8(rss_vlan_flags_shuf,
519 __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
522 if (rxq->fdir_enabled) {
523 const __m256i fdir_id4_7 =
524 _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
526 const __m256i fdir_id0_3 =
527 _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
529 const __m256i fdir_id0_7 =
530 _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
532 const __m256i fdir_flags =
533 ice_flex_rxd_to_fdir_flags_vec_avx512
536 /* merge with fdir_flags */
537 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
539 /* write to mbuf: have to use scalar store here */
540 rx_pkts[i + 0]->hash.fdir.hi =
541 _mm256_extract_epi32(fdir_id0_7, 3);
543 rx_pkts[i + 1]->hash.fdir.hi =
544 _mm256_extract_epi32(fdir_id0_7, 7);
546 rx_pkts[i + 2]->hash.fdir.hi =
547 _mm256_extract_epi32(fdir_id0_7, 2);
549 rx_pkts[i + 3]->hash.fdir.hi =
550 _mm256_extract_epi32(fdir_id0_7, 6);
552 rx_pkts[i + 4]->hash.fdir.hi =
553 _mm256_extract_epi32(fdir_id0_7, 1);
555 rx_pkts[i + 5]->hash.fdir.hi =
556 _mm256_extract_epi32(fdir_id0_7, 5);
558 rx_pkts[i + 6]->hash.fdir.hi =
559 _mm256_extract_epi32(fdir_id0_7, 0);
561 rx_pkts[i + 7]->hash.fdir.hi =
562 _mm256_extract_epi32(fdir_id0_7, 4);
563 } /* if() on fdir_enabled */
565 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
567 * needs to load 2nd 16B of each desc for RSS hash parsing,
568 * will cause performance drop to get into this context.
570 if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
571 DEV_RX_OFFLOAD_RSS_HASH) {
572 /* load bottom half of every 32B desc */
573 const __m128i raw_desc_bh7 =
575 ((void *)(&rxdp[7].wb.status_error1));
576 rte_compiler_barrier();
577 const __m128i raw_desc_bh6 =
579 ((void *)(&rxdp[6].wb.status_error1));
580 rte_compiler_barrier();
581 const __m128i raw_desc_bh5 =
583 ((void *)(&rxdp[5].wb.status_error1));
584 rte_compiler_barrier();
585 const __m128i raw_desc_bh4 =
587 ((void *)(&rxdp[4].wb.status_error1));
588 rte_compiler_barrier();
589 const __m128i raw_desc_bh3 =
591 ((void *)(&rxdp[3].wb.status_error1));
592 rte_compiler_barrier();
593 const __m128i raw_desc_bh2 =
595 ((void *)(&rxdp[2].wb.status_error1));
596 rte_compiler_barrier();
597 const __m128i raw_desc_bh1 =
599 ((void *)(&rxdp[1].wb.status_error1));
600 rte_compiler_barrier();
601 const __m128i raw_desc_bh0 =
603 ((void *)(&rxdp[0].wb.status_error1));
605 __m256i raw_desc_bh6_7 =
606 _mm256_inserti128_si256
607 (_mm256_castsi128_si256(raw_desc_bh6),
609 __m256i raw_desc_bh4_5 =
610 _mm256_inserti128_si256
611 (_mm256_castsi128_si256(raw_desc_bh4),
613 __m256i raw_desc_bh2_3 =
614 _mm256_inserti128_si256
615 (_mm256_castsi128_si256(raw_desc_bh2),
617 __m256i raw_desc_bh0_1 =
618 _mm256_inserti128_si256
619 (_mm256_castsi128_si256(raw_desc_bh0),
623 * to shift the 32b RSS hash value to the
624 * highest 32b of each 128b before mask
626 __m256i rss_hash6_7 =
627 _mm256_slli_epi64(raw_desc_bh6_7, 32);
628 __m256i rss_hash4_5 =
629 _mm256_slli_epi64(raw_desc_bh4_5, 32);
630 __m256i rss_hash2_3 =
631 _mm256_slli_epi64(raw_desc_bh2_3, 32);
632 __m256i rss_hash0_1 =
633 _mm256_slli_epi64(raw_desc_bh0_1, 32);
635 __m256i rss_hash_msk =
636 _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
637 0xFFFFFFFF, 0, 0, 0);
639 rss_hash6_7 = _mm256_and_si256
640 (rss_hash6_7, rss_hash_msk);
641 rss_hash4_5 = _mm256_and_si256
642 (rss_hash4_5, rss_hash_msk);
643 rss_hash2_3 = _mm256_and_si256
644 (rss_hash2_3, rss_hash_msk);
645 rss_hash0_1 = _mm256_and_si256
646 (rss_hash0_1, rss_hash_msk);
648 mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
649 mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
650 mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
651 mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
652 } /* if() on RSS hash parsing */
656 * At this point, we have the 8 sets of flags in the low 16-bits
657 * of each 32-bit value in vlan0.
658 * We want to extract these, and merge them with the mbuf init
659 * data so we can do a single write to the mbuf to set the flags
660 * and all the other initialization fields. Extracting the
661 * appropriate flags means that we have to do a shift and blend
662 * for each mbuf before we do the write. However, we can also
663 * add in the previously computed rx_descriptor fields to
664 * make a single 256-bit write per mbuf
666 /* check the structure matches expectations */
667 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
668 offsetof(struct rte_mbuf, rearm_data) + 8);
669 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
670 RTE_ALIGN(offsetof(struct rte_mbuf,
673 /* build up data and do writes */
674 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
677 rearm6 = _mm256_blend_epi32(mbuf_init,
678 _mm256_slli_si256(mbuf_flags, 8),
680 rearm4 = _mm256_blend_epi32(mbuf_init,
681 _mm256_slli_si256(mbuf_flags, 4),
683 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
684 rearm0 = _mm256_blend_epi32(mbuf_init,
685 _mm256_srli_si256(mbuf_flags, 4),
688 /* permute to add in the rx_descriptor e.g. rss fields */
689 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
690 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
691 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
692 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
695 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
697 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
699 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
701 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
704 /* repeat for the odd mbufs */
705 const __m256i odd_flags =
706 _mm256_castsi128_si256
707 (_mm256_extracti128_si256(mbuf_flags, 1));
708 rearm7 = _mm256_blend_epi32(mbuf_init,
709 _mm256_slli_si256(odd_flags, 8),
711 rearm5 = _mm256_blend_epi32(mbuf_init,
712 _mm256_slli_si256(odd_flags, 4),
714 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
715 rearm1 = _mm256_blend_epi32(mbuf_init,
716 _mm256_srli_si256(odd_flags, 4),
719 /* since odd mbufs are already in hi 128-bits use blend */
720 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
721 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
722 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
723 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
724 /* again write to mbufs */
725 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
727 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
729 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
731 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
734 /* extract and record EOP bit */
736 const __m128i eop_mask =
737 _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S);
738 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
740 /* pack status bits into a single 128-bit register */
741 const __m128i eop_bits =
743 (_mm256_castsi256_si128(eop_bits256),
744 _mm256_extractf128_si256(eop_bits256,
747 * flip bits, and mask out the EOP bit, which is now
748 * a split-packet bit i.e. !EOP, rather than EOP one.
750 __m128i split_bits = _mm_andnot_si128(eop_bits,
753 * eop bits are out of order, so we need to shuffle them
754 * back into order again. In doing so, only use low 8
755 * bits, which acts like another pack instruction
756 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
757 * [Since we use epi8, the 16-bit positions are
758 * multiplied by 2 in the eop_shuffle value.]
760 __m128i eop_shuffle =
761 _mm_set_epi8(/* zero hi 64b */
762 0xFF, 0xFF, 0xFF, 0xFF,
763 0xFF, 0xFF, 0xFF, 0xFF,
764 /* move values to lo 64b */
767 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
768 *(uint64_t *)split_packet =
769 _mm_cvtsi128_si64(split_bits);
770 split_packet += ICE_DESCS_PER_LOOP_AVX;
773 /* perform dd_check */
774 status0_7 = _mm256_and_si256(status0_7, dd_check);
775 status0_7 = _mm256_packs_epi32(status0_7,
776 _mm256_setzero_si256());
778 uint64_t burst = __builtin_popcountll
780 (_mm256_extracti128_si256
782 burst += __builtin_popcountll
784 (_mm256_castsi256_si128(status0_7)));
786 if (burst != ICE_DESCS_PER_LOOP_AVX)
790 /* update tail pointers */
791 rxq->rx_tail += received;
792 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
793 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
797 rxq->rxrearm_nb += received;
803 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
806 ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
809 return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
813 * vPMD receive routine that reassembles single burst of 32 scattered packets
815 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
818 ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
821 struct ice_rx_queue *rxq = rx_queue;
822 uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
824 /* get some new buffers */
825 uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
830 /* happy day case, full burst + no packets to be joined */
831 const uint64_t *split_fl64 = (uint64_t *)split_flags;
833 if (!rxq->pkt_first_seg &&
834 split_fl64[0] == 0 && split_fl64[1] == 0 &&
835 split_fl64[2] == 0 && split_fl64[3] == 0)
838 /* reassemble any packets that need reassembly */
841 if (!rxq->pkt_first_seg) {
842 /* find the first split flag, and only reassemble then */
843 while (i < nb_bufs && !split_flags[i])
847 rxq->pkt_first_seg = rx_pkts[i];
849 return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
854 * vPMD receive routine that reassembles scattered packets.
855 * Main receive routine that can handle arbitrary burst sizes
857 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
860 ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
865 while (nb_pkts > ICE_VPMD_RX_BURST) {
866 uint16_t burst = ice_recv_scattered_burst_vec_avx512(rx_queue,
867 rx_pkts + retval, ICE_VPMD_RX_BURST);
870 if (burst < ICE_VPMD_RX_BURST)
873 return retval + ice_recv_scattered_burst_vec_avx512(rx_queue,
874 rx_pkts + retval, nb_pkts);
877 static __rte_always_inline int
878 ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
880 struct ice_vec_tx_entry *txep;
884 struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
886 /* check DD bits on threshold descriptor */
887 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
888 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
889 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
892 n = txq->tx_rs_thresh;
894 /* first buffer to free from S/W ring is at index
895 * tx_next_dd - (tx_rs_thresh - 1)
897 txep = (void *)txq->sw_ring;
898 txep += txq->tx_next_dd - (n - 1);
900 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
901 struct rte_mempool *mp = txep[0].mbuf->pool;
903 struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
906 if (!cache || cache->len == 0)
909 cache_objs = &cache->objs[cache->len];
911 if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
912 rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
916 /* The cache follows the following algorithm
917 * 1. Add the objects to the cache
918 * 2. Anything greater than the cache min value (if it
919 * crosses the cache flush threshold) is flushed to the ring.
921 /* Add elements back into the cache */
923 /* n is multiple of 32 */
925 const __m512i a = _mm512_loadu_si512(&txep[copied]);
926 const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
927 const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
928 const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
930 _mm512_storeu_si512(&cache_objs[copied], a);
931 _mm512_storeu_si512(&cache_objs[copied + 8], b);
932 _mm512_storeu_si512(&cache_objs[copied + 16], c);
933 _mm512_storeu_si512(&cache_objs[copied + 24], d);
938 if (cache->len >= cache->flushthresh) {
939 rte_mempool_ops_enqueue_bulk
940 (mp, &cache->objs[cache->size],
941 cache->len - cache->size);
942 cache->len = cache->size;
948 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
952 for (i = 1; i < n; i++) {
953 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
955 if (likely(m->pool == free[0]->pool)) {
958 rte_mempool_put_bulk(free[0]->pool,
966 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
968 for (i = 1; i < n; i++) {
969 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
971 rte_mempool_put(m->pool, m);
976 /* buffers were freed, update counters */
977 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
978 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
979 if (txq->tx_next_dd >= txq->nb_tx_desc)
980 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
982 return txq->tx_rs_thresh;
985 static __rte_always_inline void
986 ice_vtx1(volatile struct ice_tx_desc *txdp,
987 struct rte_mbuf *pkt, uint64_t flags, bool do_offload)
990 (ICE_TX_DESC_DTYPE_DATA |
991 ((uint64_t)flags << ICE_TXD_QW1_CMD_S) |
992 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
995 ice_txd_enable_offload(pkt, &high_qw);
997 __m128i descriptor = _mm_set_epi64x(high_qw,
998 pkt->buf_iova + pkt->data_off);
999 _mm_store_si128((__m128i *)txdp, descriptor);
1002 static __rte_always_inline void
1003 ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,
1004 uint16_t nb_pkts, uint64_t flags, bool do_offload)
1006 const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
1007 ((uint64_t)flags << ICE_TXD_QW1_CMD_S));
1009 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
1012 ((uint64_t)pkt[3]->data_len <<
1013 ICE_TXD_QW1_TX_BUF_SZ_S);
1015 ice_txd_enable_offload(pkt[3], &hi_qw3);
1018 ((uint64_t)pkt[2]->data_len <<
1019 ICE_TXD_QW1_TX_BUF_SZ_S);
1021 ice_txd_enable_offload(pkt[2], &hi_qw2);
1024 ((uint64_t)pkt[1]->data_len <<
1025 ICE_TXD_QW1_TX_BUF_SZ_S);
1027 ice_txd_enable_offload(pkt[1], &hi_qw1);
1030 ((uint64_t)pkt[0]->data_len <<
1031 ICE_TXD_QW1_TX_BUF_SZ_S);
1033 ice_txd_enable_offload(pkt[0], &hi_qw0);
1038 pkt[3]->buf_iova + pkt[3]->data_off,
1040 pkt[2]->buf_iova + pkt[2]->data_off,
1042 pkt[1]->buf_iova + pkt[1]->data_off,
1044 pkt[0]->buf_iova + pkt[0]->data_off);
1045 _mm512_storeu_si512((void *)txdp, desc0_3);
1048 /* do any last ones */
1050 ice_vtx1(txdp, *pkt, flags, do_offload);
1051 txdp++, pkt++, nb_pkts--;
1055 static __rte_always_inline void
1056 ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep,
1057 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1061 for (i = 0; i < (int)nb_pkts; ++i)
1062 txep[i].mbuf = tx_pkts[i];
1065 static __rte_always_inline uint16_t
1066 ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1067 uint16_t nb_pkts, bool do_offload)
1069 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
1070 volatile struct ice_tx_desc *txdp;
1071 struct ice_vec_tx_entry *txep;
1072 uint16_t n, nb_commit, tx_id;
1073 uint64_t flags = ICE_TD_CMD;
1074 uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
1076 /* cross rx_thresh boundary is not allowed */
1077 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
1079 if (txq->nb_tx_free < txq->tx_free_thresh)
1080 ice_tx_free_bufs_avx512(txq);
1082 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
1083 if (unlikely(nb_pkts == 0))
1086 tx_id = txq->tx_tail;
1087 txdp = &txq->tx_ring[tx_id];
1088 txep = (void *)txq->sw_ring;
1091 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
1093 n = (uint16_t)(txq->nb_tx_desc - tx_id);
1094 if (nb_commit >= n) {
1095 ice_tx_backlog_entry_avx512(txep, tx_pkts, n);
1097 ice_vtx(txdp, tx_pkts, n - 1, flags, do_offload);
1101 ice_vtx1(txdp, *tx_pkts++, rs, do_offload);
1103 nb_commit = (uint16_t)(nb_commit - n);
1106 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1108 /* avoid reach the end of ring */
1109 txdp = txq->tx_ring;
1110 txep = (void *)txq->sw_ring;
1113 ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
1115 ice_vtx(txdp, tx_pkts, nb_commit, flags, do_offload);
1117 tx_id = (uint16_t)(tx_id + nb_commit);
1118 if (tx_id > txq->tx_next_rs) {
1119 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
1120 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
1123 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
1126 txq->tx_tail = tx_id;
1128 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
1134 ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1138 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
1143 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
1144 ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
1145 &tx_pkts[nb_tx], num, false);
1156 ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
1160 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
1165 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
1166 ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
1167 &tx_pkts[nb_tx], num, true);