1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #include "iavf_rxtx_vec_common.h"
9 #ifndef __INTEL_COMPILER
10 #pragma GCC diagnostic ignored "-Wcast-qual"
13 #define IAVF_DESCS_PER_LOOP_AVX 8
14 #define PKTLEN_SHIFT 10
16 static __rte_always_inline void
17 iavf_rxq_rearm(struct iavf_rx_queue *rxq)
21 volatile union iavf_rx_desc *rxdp;
22 struct rte_mempool_cache *cache =
23 rte_mempool_default_cache(rxq->mp, rte_lcore_id());
24 struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start];
26 rxdp = rxq->rx_ring + rxq->rxrearm_start;
29 return iavf_rxq_rearm_common(rxq, true);
31 /* We need to pull 'n' more MBUFs into the software ring from mempool
32 * We inline the mempool function here, so we can vectorize the copy
33 * from the cache into the shadow ring.
36 /* Can this be satisfied from the cache? */
37 if (cache->len < IAVF_RXQ_REARM_THRESH) {
38 /* No. Backfill the cache first, and then fill from it */
39 uint32_t req = IAVF_RXQ_REARM_THRESH + (cache->size -
42 /* How many do we require i.e. number to fill the cache + the request */
43 int ret = rte_mempool_ops_dequeue_bulk
44 (rxq->mp, &cache->objs[cache->len], req);
48 if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >=
52 dma_addr0 = _mm_setzero_si128();
53 for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) {
54 rxp[i] = &rxq->fake_mbuf;
55 _mm_storeu_si128((__m128i *)&rxdp[i].read,
59 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
60 IAVF_RXQ_REARM_THRESH;
65 const __m512i iova_offsets = _mm512_set1_epi64(offsetof
66 (struct rte_mbuf, buf_iova));
67 const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
69 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
70 /* to shuffle the addresses to correct slots. Values 4-7 will contain
71 * zeros, so use 7 for a zero-value.
73 const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
75 const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
78 /* Initialize the mbufs in vector, process 8 mbufs in one loop, taking
79 * from mempool cache and populating both shadow and HW rings
81 for (i = 0; i < IAVF_RXQ_REARM_THRESH / IAVF_DESCS_PER_LOOP_AVX; i++) {
82 const __m512i mbuf_ptrs = _mm512_loadu_si512
83 (&cache->objs[cache->len - IAVF_DESCS_PER_LOOP_AVX]);
84 _mm512_storeu_si512(rxp, mbuf_ptrs);
86 const __m512i iova_base_addrs = _mm512_i64gather_epi64
87 (_mm512_add_epi64(mbuf_ptrs, iova_offsets),
90 const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
92 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
93 const __m512i iovas0 = _mm512_castsi256_si512
94 (_mm512_extracti64x4_epi64(iova_addrs, 0));
95 const __m512i iovas1 = _mm512_castsi256_si512
96 (_mm512_extracti64x4_epi64(iova_addrs, 1));
98 /* permute leaves desc 2-3 addresses in header address slots 0-1
99 * but these are ignored by driver since header split not
100 * enabled. Similarly for desc 6 & 7.
102 const __m512i desc0_1 = _mm512_permutexvar_epi64
105 const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8);
107 const __m512i desc4_5 = _mm512_permutexvar_epi64
110 const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8);
112 _mm512_storeu_si512((void *)rxdp, desc0_1);
113 _mm512_storeu_si512((void *)(rxdp + 2), desc2_3);
114 _mm512_storeu_si512((void *)(rxdp + 4), desc4_5);
115 _mm512_storeu_si512((void *)(rxdp + 6), desc6_7);
117 /* permute leaves desc 4-7 addresses in header address slots 0-3
118 * but these are ignored by driver since header split not
121 const __m512i desc0_3 = _mm512_permutexvar_epi64(permute_idx,
123 const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8);
125 _mm512_storeu_si512((void *)rxdp, desc0_3);
126 _mm512_storeu_si512((void *)(rxdp + 4), desc4_7);
128 rxp += IAVF_DESCS_PER_LOOP_AVX;
129 rxdp += IAVF_DESCS_PER_LOOP_AVX;
130 cache->len -= IAVF_DESCS_PER_LOOP_AVX;
133 rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH;
134 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
135 rxq->rxrearm_start = 0;
137 rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH;
139 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
140 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
142 /* Update the tail pointer on the NIC */
143 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
146 #define IAVF_RX_LEN_MASK 0x80808080
147 static inline uint16_t
148 _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
149 struct rte_mbuf **rx_pkts,
150 uint16_t nb_pkts, uint8_t *split_packet)
152 const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
154 const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
155 rxq->mbuf_initializer);
156 struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail];
157 volatile union iavf_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
161 /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
162 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
164 /* See if we need to rearm the RX queue - gives the prefetch a bit
167 if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
170 /* Before we start moving massive data around, check to see if
171 * there is actually a packet available
173 if (!(rxdp->wb.qword1.status_error_len &
174 rte_cpu_to_le_32(1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
177 /* constants used in processing loop */
178 const __m512i crc_adjust =
180 (/* 1st descriptor */
181 0, /* ignore non-length fields */
182 -rxq->crc_len, /* sub crc on data_len */
183 -rxq->crc_len, /* sub crc on pkt_len */
184 0, /* ignore pkt_type field */
186 0, /* ignore non-length fields */
187 -rxq->crc_len, /* sub crc on data_len */
188 -rxq->crc_len, /* sub crc on pkt_len */
189 0, /* ignore pkt_type field */
191 0, /* ignore non-length fields */
192 -rxq->crc_len, /* sub crc on data_len */
193 -rxq->crc_len, /* sub crc on pkt_len */
194 0, /* ignore pkt_type field */
196 0, /* ignore non-length fields */
197 -rxq->crc_len, /* sub crc on data_len */
198 -rxq->crc_len, /* sub crc on pkt_len */
199 0 /* ignore pkt_type field */
202 /* 8 packets DD mask, LSB in each 32-bit value */
203 const __m256i dd_check = _mm256_set1_epi32(1);
205 /* 8 packets EOP mask, second-LSB in each 32-bit value */
206 const __m256i eop_check = _mm256_slli_epi32(dd_check,
207 IAVF_RX_DESC_STATUS_EOF_SHIFT);
209 /* mask to shuffle from desc. to mbuf (4 descriptors)*/
210 const __m512i shuf_msk =
212 (/* 1st descriptor */
213 0x07060504, /* octet 4~7, 32bits rss */
214 0x03020F0E, /* octet 2~3, low 16 bits vlan_macip */
215 /* octet 15~14, 16 bits data_len */
216 0xFFFF0F0E, /* skip high 16 bits pkt_len, zero out */
217 /* octet 15~14, low 16 bits pkt_len */
218 0xFFFFFFFF, /* pkt_type set as unknown */
220 0x07060504, /* octet 4~7, 32bits rss */
221 0x03020F0E, /* octet 2~3, low 16 bits vlan_macip */
222 /* octet 15~14, 16 bits data_len */
223 0xFFFF0F0E, /* skip high 16 bits pkt_len, zero out */
224 /* octet 15~14, low 16 bits pkt_len */
225 0xFFFFFFFF, /* pkt_type set as unknown */
227 0x07060504, /* octet 4~7, 32bits rss */
228 0x03020F0E, /* octet 2~3, low 16 bits vlan_macip */
229 /* octet 15~14, 16 bits data_len */
230 0xFFFF0F0E, /* skip high 16 bits pkt_len, zero out */
231 /* octet 15~14, low 16 bits pkt_len */
232 0xFFFFFFFF, /* pkt_type set as unknown */
234 0x07060504, /* octet 4~7, 32bits rss */
235 0x03020F0E, /* octet 2~3, low 16 bits vlan_macip */
236 /* octet 15~14, 16 bits data_len */
237 0xFFFF0F0E, /* skip high 16 bits pkt_len, zero out */
238 /* octet 15~14, low 16 bits pkt_len */
239 0xFFFFFFFF /* pkt_type set as unknown */
242 * compile-time check the above crc and shuffle layout is correct.
243 * NOTE: the first field (lowest address) is given last in set_epi
246 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
247 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
248 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
249 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
250 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
251 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
252 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
253 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
255 /* Status/Error flag masks */
257 * mask everything except RSS, flow director and VLAN flags
258 * bit2 is for VLAN tag, bit11 for flow director indication
259 * bit13:12 for RSS indication. Bits 3-5 of error
260 * field (bits 22-24) are for IP/L4 checksum errors
262 const __m256i flags_mask =
263 _mm256_set1_epi32((1 << 2) | (1 << 11) |
264 (3 << 12) | (7 << 22));
266 * data to be shuffled by result of flag mask. If VLAN bit is set,
267 * (bit 2), then position 4 in this array will be used in the
270 const __m256i vlan_flags_shuf =
271 _mm256_set_epi32(0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
272 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
274 * data to be shuffled by result of flag mask, shifted down 11.
275 * If RSS/FDIR bits are set, shuffle moves appropriate flags in
278 const __m256i rss_flags_shuf =
279 _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
280 PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
281 0, 0, 0, 0, PKT_RX_FDIR, 0,/* end up 128-bits */
282 0, 0, 0, 0, 0, 0, 0, 0,
283 PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
284 0, 0, 0, 0, PKT_RX_FDIR, 0);
287 * data to be shuffled by the result of the flags mask shifted by 22
288 * bits. This gives use the l3_l4 flags.
290 const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
291 /* shift right 1 bit to make sure it not exceed 255 */
292 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
293 PKT_RX_IP_CKSUM_BAD) >> 1,
294 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
295 PKT_RX_L4_CKSUM_BAD) >> 1,
296 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
297 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
298 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
299 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
300 PKT_RX_IP_CKSUM_BAD >> 1,
301 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
302 /* second 128-bits */
303 0, 0, 0, 0, 0, 0, 0, 0,
304 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
305 PKT_RX_IP_CKSUM_BAD) >> 1,
306 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
307 PKT_RX_L4_CKSUM_BAD) >> 1,
308 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
309 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
310 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
311 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
312 PKT_RX_IP_CKSUM_BAD >> 1,
313 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
315 const __m256i cksum_mask =
316 _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
317 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
318 PKT_RX_OUTER_IP_CKSUM_BAD);
320 uint16_t i, received;
322 for (i = 0, received = 0; i < nb_pkts;
323 i += IAVF_DESCS_PER_LOOP_AVX,
324 rxdp += IAVF_DESCS_PER_LOOP_AVX) {
325 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
326 _mm256_storeu_si256((void *)&rx_pkts[i],
327 _mm256_loadu_si256((void *)&sw_ring[i]));
328 #ifdef RTE_ARCH_X86_64
330 ((void *)&rx_pkts[i + 4],
331 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
334 __m512i raw_desc0_3, raw_desc4_7;
335 const __m128i raw_desc7 =
336 _mm_load_si128((void *)(rxdp + 7));
337 rte_compiler_barrier();
338 const __m128i raw_desc6 =
339 _mm_load_si128((void *)(rxdp + 6));
340 rte_compiler_barrier();
341 const __m128i raw_desc5 =
342 _mm_load_si128((void *)(rxdp + 5));
343 rte_compiler_barrier();
344 const __m128i raw_desc4 =
345 _mm_load_si128((void *)(rxdp + 4));
346 rte_compiler_barrier();
347 const __m128i raw_desc3 =
348 _mm_load_si128((void *)(rxdp + 3));
349 rte_compiler_barrier();
350 const __m128i raw_desc2 =
351 _mm_load_si128((void *)(rxdp + 2));
352 rte_compiler_barrier();
353 const __m128i raw_desc1 =
354 _mm_load_si128((void *)(rxdp + 1));
355 rte_compiler_barrier();
356 const __m128i raw_desc0 =
357 _mm_load_si128((void *)(rxdp + 0));
359 raw_desc4_7 = _mm512_broadcast_i32x4(raw_desc4);
360 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc5, 1);
361 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc6, 2);
362 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc7, 3);
363 raw_desc0_3 = _mm512_broadcast_i32x4(raw_desc0);
364 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc1, 1);
365 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc2, 2);
366 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc3, 3);
371 for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
372 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
376 * convert descriptors 4-7 into mbufs, adjusting length and
377 * re-arranging fields. Then write into the mbuf
379 const __m512i len4_7 = _mm512_slli_epi32(raw_desc4_7,
381 const __m512i desc4_7 = _mm512_mask_blend_epi16(IAVF_RX_LEN_MASK,
384 __m512i mb4_7 = _mm512_shuffle_epi8(desc4_7, shuf_msk);
386 mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust);
388 * to get packet types, shift 64-bit values down 30 bits
389 * and so ptype is in lower 8-bits in each
391 const __m512i ptypes4_7 = _mm512_srli_epi64(desc4_7, 30);
392 const __m256i ptypes6_7 = _mm512_extracti64x4_epi64(ptypes4_7, 1);
393 const __m256i ptypes4_5 = _mm512_extracti64x4_epi64(ptypes4_7, 0);
394 const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24);
395 const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8);
396 const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24);
397 const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8);
399 const __m512i ptype4_7 = _mm512_set_epi32
400 (0, 0, 0, type_table[ptype7],
401 0, 0, 0, type_table[ptype6],
402 0, 0, 0, type_table[ptype5],
403 0, 0, 0, type_table[ptype4]);
404 mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7);
407 * convert descriptors 0-3 into mbufs, adjusting length and
408 * re-arranging fields. Then write into the mbuf
410 const __m512i len0_3 = _mm512_slli_epi32(raw_desc0_3,
412 const __m512i desc0_3 = _mm512_mask_blend_epi16(IAVF_RX_LEN_MASK,
415 __m512i mb0_3 = _mm512_shuffle_epi8(desc0_3, shuf_msk);
417 mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust);
418 /* get the packet types */
419 const __m512i ptypes0_3 = _mm512_srli_epi64(desc0_3, 30);
420 const __m256i ptypes2_3 = _mm512_extracti64x4_epi64(ptypes0_3, 1);
421 const __m256i ptypes0_1 = _mm512_extracti64x4_epi64(ptypes0_3, 0);
422 const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24);
423 const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8);
424 const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24);
425 const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8);
427 const __m512i ptype0_3 = _mm512_set_epi32
428 (0, 0, 0, type_table[ptype3],
429 0, 0, 0, type_table[ptype2],
430 0, 0, 0, type_table[ptype1],
431 0, 0, 0, type_table[ptype0]);
432 mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3);
435 * use permute/extract to get status content
436 * After the operations, the packets status flags are in the
437 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
439 /* merge the status bits into one register */
440 const __m512i status_permute_msk = _mm512_set_epi32
445 const __m512i raw_status0_7 = _mm512_permutex2var_epi32
446 (raw_desc4_7, status_permute_msk, raw_desc0_3);
447 __m256i status0_7 = _mm512_extracti64x4_epi64
450 /* now do flag manipulation */
452 /* get only flag/error bits we want */
453 const __m256i flag_bits =
454 _mm256_and_si256(status0_7, flags_mask);
455 /* set vlan and rss flags */
456 const __m256i vlan_flags =
457 _mm256_shuffle_epi8(vlan_flags_shuf, flag_bits);
458 const __m256i rss_flags =
459 _mm256_shuffle_epi8(rss_flags_shuf,
460 _mm256_srli_epi32(flag_bits, 11));
462 * l3_l4_error flags, shuffle, then shift to correct adjustment
463 * of flags in flags_shuf, and finally mask out extra bits
465 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
466 _mm256_srli_epi32(flag_bits, 22));
467 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
468 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
471 const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
472 _mm256_or_si256(rss_flags, vlan_flags));
474 * At this point, we have the 8 sets of flags in the low 16-bits
475 * of each 32-bit value in vlan0.
476 * We want to extract these, and merge them with the mbuf init
477 * data so we can do a single write to the mbuf to set the flags
478 * and all the other initialization fields. Extracting the
479 * appropriate flags means that we have to do a shift and blend
480 * for each mbuf before we do the write. However, we can also
481 * add in the previously computed rx_descriptor fields to
482 * make a single 256-bit write per mbuf
484 /* check the structure matches expectations */
485 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
486 offsetof(struct rte_mbuf, rearm_data) + 8);
487 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
488 RTE_ALIGN(offsetof(struct rte_mbuf,
491 /* build up data and do writes */
492 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
494 const __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0);
495 const __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1);
496 const __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0);
497 const __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1);
499 rearm6 = _mm256_blend_epi32(mbuf_init,
500 _mm256_slli_si256(mbuf_flags, 8),
502 rearm4 = _mm256_blend_epi32(mbuf_init,
503 _mm256_slli_si256(mbuf_flags, 4),
505 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
506 rearm0 = _mm256_blend_epi32(mbuf_init,
507 _mm256_srli_si256(mbuf_flags, 4),
509 /* permute to add in the rx_descriptor e.g. rss fields */
510 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
511 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
512 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
513 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
515 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
517 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
519 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
521 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
524 /* repeat for the odd mbufs */
525 const __m256i odd_flags =
526 _mm256_castsi128_si256
527 (_mm256_extracti128_si256(mbuf_flags, 1));
528 rearm7 = _mm256_blend_epi32(mbuf_init,
529 _mm256_slli_si256(odd_flags, 8),
531 rearm5 = _mm256_blend_epi32(mbuf_init,
532 _mm256_slli_si256(odd_flags, 4),
534 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
535 rearm1 = _mm256_blend_epi32(mbuf_init,
536 _mm256_srli_si256(odd_flags, 4),
538 /* since odd mbufs are already in hi 128-bits use blend */
539 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
540 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
541 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
542 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
543 /* again write to mbufs */
544 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
546 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
548 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
550 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
553 /* extract and record EOP bit */
555 const __m128i eop_mask =
556 _mm_set1_epi16(1 << IAVF_RX_DESC_STATUS_EOF_SHIFT);
557 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
559 /* pack status bits into a single 128-bit register */
560 const __m128i eop_bits =
562 (_mm256_castsi256_si128(eop_bits256),
563 _mm256_extractf128_si256(eop_bits256,
566 * flip bits, and mask out the EOP bit, which is now
567 * a split-packet bit i.e. !EOP, rather than EOP one.
569 __m128i split_bits = _mm_andnot_si128(eop_bits,
572 * eop bits are out of order, so we need to shuffle them
573 * back into order again. In doing so, only use low 8
574 * bits, which acts like another pack instruction
575 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
576 * [Since we use epi8, the 16-bit positions are
577 * multiplied by 2 in the eop_shuffle value.]
579 __m128i eop_shuffle =
580 _mm_set_epi8(/* zero hi 64b */
581 0xFF, 0xFF, 0xFF, 0xFF,
582 0xFF, 0xFF, 0xFF, 0xFF,
583 /* move values to lo 64b */
586 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
587 *(uint64_t *)split_packet =
588 _mm_cvtsi128_si64(split_bits);
589 split_packet += IAVF_DESCS_PER_LOOP_AVX;
592 /* perform dd_check */
593 status0_7 = _mm256_and_si256(status0_7, dd_check);
594 status0_7 = _mm256_packs_epi32(status0_7,
595 _mm256_setzero_si256());
597 uint64_t burst = __builtin_popcountll
599 (_mm256_extracti128_si256
601 burst += __builtin_popcountll
603 (_mm256_castsi256_si128(status0_7)));
605 if (burst != IAVF_DESCS_PER_LOOP_AVX)
609 /* update tail pointers */
610 rxq->rx_tail += received;
611 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
612 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep aligned */
616 rxq->rxrearm_nb += received;
620 static inline __m256i
621 flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
623 #define FDID_MIS_MAGIC 0xFFFFFFFF
624 RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
625 RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
626 const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
628 /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
629 const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
630 __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
632 /* this XOR op results to bit-reverse the fdir_mask */
633 fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
634 const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
639 static inline uint16_t
640 _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
641 struct rte_mbuf **rx_pkts,
642 uint16_t nb_pkts, uint8_t *split_packet)
644 const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
646 const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
647 rxq->mbuf_initializer);
648 struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail];
649 volatile union iavf_rx_flex_desc *rxdp =
650 (union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail;
654 /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
655 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
657 /* See if we need to rearm the RX queue - gives the prefetch a bit
660 if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
663 /* Before we start moving massive data around, check to see if
664 * there is actually a packet available
666 if (!(rxdp->wb.status_error0 &
667 rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
670 /* constants used in processing loop */
671 const __m512i crc_adjust =
673 (/* 1st descriptor */
674 0, /* ignore non-length fields */
675 -rxq->crc_len, /* sub crc on data_len */
676 -rxq->crc_len, /* sub crc on pkt_len */
677 0, /* ignore pkt_type field */
679 0, /* ignore non-length fields */
680 -rxq->crc_len, /* sub crc on data_len */
681 -rxq->crc_len, /* sub crc on pkt_len */
682 0, /* ignore pkt_type field */
684 0, /* ignore non-length fields */
685 -rxq->crc_len, /* sub crc on data_len */
686 -rxq->crc_len, /* sub crc on pkt_len */
687 0, /* ignore pkt_type field */
689 0, /* ignore non-length fields */
690 -rxq->crc_len, /* sub crc on data_len */
691 -rxq->crc_len, /* sub crc on pkt_len */
692 0 /* ignore pkt_type field */
695 /* 8 packets DD mask, LSB in each 32-bit value */
696 const __m256i dd_check = _mm256_set1_epi32(1);
698 /* 8 packets EOP mask, second-LSB in each 32-bit value */
699 const __m256i eop_check = _mm256_slli_epi32(dd_check,
700 IAVF_RX_FLEX_DESC_STATUS0_EOF_S);
702 /* mask to shuffle from desc. to mbuf (4 descriptors)*/
703 const __m512i shuf_msk =
705 (/* 1st descriptor */
706 0xFFFFFFFF, /* rss hash parsed separately */
707 0x0B0A0504, /* octet 10~11, 16 bits vlan_macip */
708 /* octet 4~5, 16 bits data_len */
709 0xFFFF0504, /* skip hi 16 bits pkt_len, zero out */
710 /* octet 4~5, 16 bits pkt_len */
711 0xFFFFFFFF, /* pkt_type set as unknown */
713 0xFFFFFFFF, /* rss hash parsed separately */
714 0x0B0A0504, /* octet 10~11, 16 bits vlan_macip */
715 /* octet 4~5, 16 bits data_len */
716 0xFFFF0504, /* skip hi 16 bits pkt_len, zero out */
717 /* octet 4~5, 16 bits pkt_len */
718 0xFFFFFFFF, /* pkt_type set as unknown */
720 0xFFFFFFFF, /* rss hash parsed separately */
721 0x0B0A0504, /* octet 10~11, 16 bits vlan_macip */
722 /* octet 4~5, 16 bits data_len */
723 0xFFFF0504, /* skip hi 16 bits pkt_len, zero out */
724 /* octet 4~5, 16 bits pkt_len */
725 0xFFFFFFFF, /* pkt_type set as unknown */
727 0xFFFFFFFF, /* rss hash parsed separately */
728 0x0B0A0504, /* octet 10~11, 16 bits vlan_macip */
729 /* octet 4~5, 16 bits data_len */
730 0xFFFF0504, /* skip hi 16 bits pkt_len, zero out */
731 /* octet 4~5, 16 bits pkt_len */
732 0xFFFFFFFF /* pkt_type set as unknown */
735 * compile-time check the above crc and shuffle layout is correct.
736 * NOTE: the first field (lowest address) is given last in set_epi
739 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
740 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
741 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
742 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
743 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
744 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
745 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
746 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
748 /* Status/Error flag masks */
750 * mask everything except Checksum Reports, RSS indication
751 * and VLAN indication.
752 * bit6:4 for IP/L4 checksum errors.
753 * bit12 is for RSS indication.
754 * bit13 is for VLAN indication.
756 const __m256i flags_mask =
757 _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13));
759 * data to be shuffled by the result of the flags mask shifted by 4
760 * bits. This gives use the l3_l4 flags.
762 const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
763 /* shift right 1 bit to make sure it not exceed 255 */
764 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
765 PKT_RX_IP_CKSUM_BAD) >> 1,
766 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
767 PKT_RX_IP_CKSUM_GOOD) >> 1,
768 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
769 PKT_RX_IP_CKSUM_BAD) >> 1,
770 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
771 PKT_RX_IP_CKSUM_GOOD) >> 1,
772 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
773 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
774 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
775 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
776 /* second 128-bits */
777 0, 0, 0, 0, 0, 0, 0, 0,
778 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
779 PKT_RX_IP_CKSUM_BAD) >> 1,
780 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
781 PKT_RX_IP_CKSUM_GOOD) >> 1,
782 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
783 PKT_RX_IP_CKSUM_BAD) >> 1,
784 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
785 PKT_RX_IP_CKSUM_GOOD) >> 1,
786 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
787 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
788 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
789 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
790 const __m256i cksum_mask =
791 _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
792 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
793 PKT_RX_OUTER_IP_CKSUM_BAD);
795 * data to be shuffled by result of flag mask, shifted down 12.
796 * If RSS(bit12)/VLAN(bit13) are set,
797 * shuffle moves appropriate flags in place.
799 const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
802 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
803 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
805 /* end up 128-bits */
809 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
810 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
813 uint16_t i, received;
815 for (i = 0, received = 0; i < nb_pkts;
816 i += IAVF_DESCS_PER_LOOP_AVX,
817 rxdp += IAVF_DESCS_PER_LOOP_AVX) {
818 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
819 _mm256_storeu_si256((void *)&rx_pkts[i],
820 _mm256_loadu_si256((void *)&sw_ring[i]));
821 #ifdef RTE_ARCH_X86_64
823 ((void *)&rx_pkts[i + 4],
824 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
827 __m512i raw_desc0_3, raw_desc4_7;
829 const __m128i raw_desc7 =
830 _mm_load_si128((void *)(rxdp + 7));
831 rte_compiler_barrier();
832 const __m128i raw_desc6 =
833 _mm_load_si128((void *)(rxdp + 6));
834 rte_compiler_barrier();
835 const __m128i raw_desc5 =
836 _mm_load_si128((void *)(rxdp + 5));
837 rte_compiler_barrier();
838 const __m128i raw_desc4 =
839 _mm_load_si128((void *)(rxdp + 4));
840 rte_compiler_barrier();
841 const __m128i raw_desc3 =
842 _mm_load_si128((void *)(rxdp + 3));
843 rte_compiler_barrier();
844 const __m128i raw_desc2 =
845 _mm_load_si128((void *)(rxdp + 2));
846 rte_compiler_barrier();
847 const __m128i raw_desc1 =
848 _mm_load_si128((void *)(rxdp + 1));
849 rte_compiler_barrier();
850 const __m128i raw_desc0 =
851 _mm_load_si128((void *)(rxdp + 0));
853 raw_desc4_7 = _mm512_broadcast_i32x4(raw_desc4);
854 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc5, 1);
855 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc6, 2);
856 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc7, 3);
857 raw_desc0_3 = _mm512_broadcast_i32x4(raw_desc0);
858 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc1, 1);
859 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc2, 2);
860 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc3, 3);
865 for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
866 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
870 * convert descriptors 4-7 into mbufs, re-arrange fields.
871 * Then write into the mbuf.
873 __m512i mb4_7 = _mm512_shuffle_epi8(raw_desc4_7, shuf_msk);
875 mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust);
877 * to get packet types, ptype is located in bit16-25
880 const __m512i ptype_mask =
881 _mm512_set1_epi16(IAVF_RX_FLEX_DESC_PTYPE_M);
882 const __m512i ptypes4_7 =
883 _mm512_and_si512(raw_desc4_7, ptype_mask);
884 const __m256i ptypes6_7 = _mm512_extracti64x4_epi64(ptypes4_7, 1);
885 const __m256i ptypes4_5 = _mm512_extracti64x4_epi64(ptypes4_7, 0);
886 const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
887 const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
888 const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
889 const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
891 const __m512i ptype4_7 = _mm512_set_epi32
892 (0, 0, 0, type_table[ptype7],
893 0, 0, 0, type_table[ptype6],
894 0, 0, 0, type_table[ptype5],
895 0, 0, 0, type_table[ptype4]);
896 mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7);
899 * convert descriptors 0-3 into mbufs, re-arrange fields.
900 * Then write into the mbuf.
902 __m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk);
904 mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust);
906 * to get packet types, ptype is located in bit16-25
909 const __m512i ptypes0_3 =
910 _mm512_and_si512(raw_desc0_3, ptype_mask);
911 const __m256i ptypes2_3 = _mm512_extracti64x4_epi64(ptypes0_3, 1);
912 const __m256i ptypes0_1 = _mm512_extracti64x4_epi64(ptypes0_3, 0);
913 const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
914 const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
915 const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
916 const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
918 const __m512i ptype0_3 = _mm512_set_epi32
919 (0, 0, 0, type_table[ptype3],
920 0, 0, 0, type_table[ptype2],
921 0, 0, 0, type_table[ptype1],
922 0, 0, 0, type_table[ptype0]);
923 mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3);
926 * use permute/extract to get status content
927 * After the operations, the packets status flags are in the
928 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
930 /* merge the status bits into one register */
931 const __m512i status_permute_msk = _mm512_set_epi32
936 const __m512i raw_status0_7 = _mm512_permutex2var_epi32
937 (raw_desc4_7, status_permute_msk, raw_desc0_3);
938 __m256i status0_7 = _mm512_extracti64x4_epi64
941 /* now do flag manipulation */
943 /* get only flag/error bits we want */
944 const __m256i flag_bits =
945 _mm256_and_si256(status0_7, flags_mask);
947 * l3_l4_error flags, shuffle, then shift to correct adjustment
948 * of flags in flags_shuf, and finally mask out extra bits
950 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
951 _mm256_srli_epi32(flag_bits, 4));
952 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
953 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
954 /* set rss and vlan flags */
955 const __m256i rss_vlan_flag_bits =
956 _mm256_srli_epi32(flag_bits, 12);
957 const __m256i rss_vlan_flags =
958 _mm256_shuffle_epi8(rss_vlan_flags_shuf,
962 __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
965 if (rxq->fdir_enabled) {
966 const __m512i fdir_permute_mask = _mm512_set_epi32
971 __m512i fdir_tmp = _mm512_permutex2var_epi32
972 (raw_desc0_3, fdir_permute_mask, raw_desc4_7);
973 const __m256i fdir_id0_7 = _mm512_extracti64x4_epi64
975 const __m256i fdir_flags =
976 flex_rxd_to_fdir_flags_vec_avx512(fdir_id0_7);
978 /* merge with fdir_flags */
979 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
981 /* write to mbuf: have to use scalar store here */
982 rx_pkts[i + 0]->hash.fdir.hi =
983 _mm256_extract_epi32(fdir_id0_7, 3);
985 rx_pkts[i + 1]->hash.fdir.hi =
986 _mm256_extract_epi32(fdir_id0_7, 7);
988 rx_pkts[i + 2]->hash.fdir.hi =
989 _mm256_extract_epi32(fdir_id0_7, 2);
991 rx_pkts[i + 3]->hash.fdir.hi =
992 _mm256_extract_epi32(fdir_id0_7, 6);
994 rx_pkts[i + 4]->hash.fdir.hi =
995 _mm256_extract_epi32(fdir_id0_7, 1);
997 rx_pkts[i + 5]->hash.fdir.hi =
998 _mm256_extract_epi32(fdir_id0_7, 5);
1000 rx_pkts[i + 6]->hash.fdir.hi =
1001 _mm256_extract_epi32(fdir_id0_7, 0);
1003 rx_pkts[i + 7]->hash.fdir.hi =
1004 _mm256_extract_epi32(fdir_id0_7, 4);
1005 } /* if() on fdir_enabled */
1007 __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0);
1008 __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1);
1009 __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0);
1010 __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1);
1012 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1014 * needs to load 2nd 16B of each desc for RSS hash parsing,
1015 * will cause performance drop to get into this context.
1017 if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
1018 DEV_RX_OFFLOAD_RSS_HASH) {
1019 /* load bottom half of every 32B desc */
1020 const __m128i raw_desc_bh7 =
1022 ((void *)(&rxdp[7].wb.status_error1));
1023 rte_compiler_barrier();
1024 const __m128i raw_desc_bh6 =
1026 ((void *)(&rxdp[6].wb.status_error1));
1027 rte_compiler_barrier();
1028 const __m128i raw_desc_bh5 =
1030 ((void *)(&rxdp[5].wb.status_error1));
1031 rte_compiler_barrier();
1032 const __m128i raw_desc_bh4 =
1034 ((void *)(&rxdp[4].wb.status_error1));
1035 rte_compiler_barrier();
1036 const __m128i raw_desc_bh3 =
1038 ((void *)(&rxdp[3].wb.status_error1));
1039 rte_compiler_barrier();
1040 const __m128i raw_desc_bh2 =
1042 ((void *)(&rxdp[2].wb.status_error1));
1043 rte_compiler_barrier();
1044 const __m128i raw_desc_bh1 =
1046 ((void *)(&rxdp[1].wb.status_error1));
1047 rte_compiler_barrier();
1048 const __m128i raw_desc_bh0 =
1050 ((void *)(&rxdp[0].wb.status_error1));
1052 __m256i raw_desc_bh6_7 =
1053 _mm256_inserti128_si256
1054 (_mm256_castsi128_si256(raw_desc_bh6),
1056 __m256i raw_desc_bh4_5 =
1057 _mm256_inserti128_si256
1058 (_mm256_castsi128_si256(raw_desc_bh4),
1060 __m256i raw_desc_bh2_3 =
1061 _mm256_inserti128_si256
1062 (_mm256_castsi128_si256(raw_desc_bh2),
1064 __m256i raw_desc_bh0_1 =
1065 _mm256_inserti128_si256
1066 (_mm256_castsi128_si256(raw_desc_bh0),
1070 * to shift the 32b RSS hash value to the
1071 * highest 32b of each 128b before mask
1073 __m256i rss_hash6_7 =
1074 _mm256_slli_epi64(raw_desc_bh6_7, 32);
1075 __m256i rss_hash4_5 =
1076 _mm256_slli_epi64(raw_desc_bh4_5, 32);
1077 __m256i rss_hash2_3 =
1078 _mm256_slli_epi64(raw_desc_bh2_3, 32);
1079 __m256i rss_hash0_1 =
1080 _mm256_slli_epi64(raw_desc_bh0_1, 32);
1082 __m256i rss_hash_msk =
1083 _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
1084 0xFFFFFFFF, 0, 0, 0);
1086 rss_hash6_7 = _mm256_and_si256
1087 (rss_hash6_7, rss_hash_msk);
1088 rss_hash4_5 = _mm256_and_si256
1089 (rss_hash4_5, rss_hash_msk);
1090 rss_hash2_3 = _mm256_and_si256
1091 (rss_hash2_3, rss_hash_msk);
1092 rss_hash0_1 = _mm256_and_si256
1093 (rss_hash0_1, rss_hash_msk);
1095 mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
1096 mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
1097 mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
1098 mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
1099 } /* if() on RSS hash parsing */
1103 * At this point, we have the 8 sets of flags in the low 16-bits
1104 * of each 32-bit value in vlan0.
1105 * We want to extract these, and merge them with the mbuf init
1106 * data so we can do a single write to the mbuf to set the flags
1107 * and all the other initialization fields. Extracting the
1108 * appropriate flags means that we have to do a shift and blend
1109 * for each mbuf before we do the write. However, we can also
1110 * add in the previously computed rx_descriptor fields to
1111 * make a single 256-bit write per mbuf
1113 /* check the structure matches expectations */
1114 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
1115 offsetof(struct rte_mbuf, rearm_data) + 8);
1116 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
1117 RTE_ALIGN(offsetof(struct rte_mbuf,
1120 /* build up data and do writes */
1121 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
1123 rearm6 = _mm256_blend_epi32(mbuf_init,
1124 _mm256_slli_si256(mbuf_flags, 8),
1126 rearm4 = _mm256_blend_epi32(mbuf_init,
1127 _mm256_slli_si256(mbuf_flags, 4),
1129 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
1130 rearm0 = _mm256_blend_epi32(mbuf_init,
1131 _mm256_srli_si256(mbuf_flags, 4),
1133 /* permute to add in the rx_descriptor e.g. rss fields */
1134 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
1135 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
1136 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
1137 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
1139 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
1141 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
1143 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
1145 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
1148 /* repeat for the odd mbufs */
1149 const __m256i odd_flags =
1150 _mm256_castsi128_si256
1151 (_mm256_extracti128_si256(mbuf_flags, 1));
1152 rearm7 = _mm256_blend_epi32(mbuf_init,
1153 _mm256_slli_si256(odd_flags, 8),
1155 rearm5 = _mm256_blend_epi32(mbuf_init,
1156 _mm256_slli_si256(odd_flags, 4),
1158 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
1159 rearm1 = _mm256_blend_epi32(mbuf_init,
1160 _mm256_srli_si256(odd_flags, 4),
1162 /* since odd mbufs are already in hi 128-bits use blend */
1163 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
1164 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
1165 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
1166 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
1167 /* again write to mbufs */
1168 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
1170 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
1172 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
1174 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
1177 /* extract and record EOP bit */
1179 const __m128i eop_mask =
1181 IAVF_RX_FLEX_DESC_STATUS0_EOF_S);
1182 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
1184 /* pack status bits into a single 128-bit register */
1185 const __m128i eop_bits =
1187 (_mm256_castsi256_si128(eop_bits256),
1188 _mm256_extractf128_si256(eop_bits256,
1191 * flip bits, and mask out the EOP bit, which is now
1192 * a split-packet bit i.e. !EOP, rather than EOP one.
1194 __m128i split_bits = _mm_andnot_si128(eop_bits,
1197 * eop bits are out of order, so we need to shuffle them
1198 * back into order again. In doing so, only use low 8
1199 * bits, which acts like another pack instruction
1200 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
1201 * [Since we use epi8, the 16-bit positions are
1202 * multiplied by 2 in the eop_shuffle value.]
1204 __m128i eop_shuffle =
1205 _mm_set_epi8(/* zero hi 64b */
1206 0xFF, 0xFF, 0xFF, 0xFF,
1207 0xFF, 0xFF, 0xFF, 0xFF,
1208 /* move values to lo 64b */
1211 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
1212 *(uint64_t *)split_packet =
1213 _mm_cvtsi128_si64(split_bits);
1214 split_packet += IAVF_DESCS_PER_LOOP_AVX;
1217 /* perform dd_check */
1218 status0_7 = _mm256_and_si256(status0_7, dd_check);
1219 status0_7 = _mm256_packs_epi32(status0_7,
1220 _mm256_setzero_si256());
1222 uint64_t burst = __builtin_popcountll
1224 (_mm256_extracti128_si256
1226 burst += __builtin_popcountll
1228 (_mm256_castsi256_si128(status0_7)));
1230 if (burst != IAVF_DESCS_PER_LOOP_AVX)
1234 /* update tail pointers */
1235 rxq->rx_tail += received;
1236 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
1237 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep aligned */
1241 rxq->rxrearm_nb += received;
1247 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1250 iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
1253 return _iavf_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
1258 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1261 iavf_recv_pkts_vec_avx512_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1264 return _iavf_recv_raw_pkts_vec_avx512_flex_rxd(rx_queue, rx_pkts,
1269 * vPMD receive routine that reassembles single burst of 32 scattered packets
1271 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1274 iavf_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
1277 struct iavf_rx_queue *rxq = rx_queue;
1278 uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
1280 /* get some new buffers */
1281 uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
1286 /* happy day case, full burst + no packets to be joined */
1287 const uint64_t *split_fl64 = (uint64_t *)split_flags;
1289 if (!rxq->pkt_first_seg &&
1290 split_fl64[0] == 0 && split_fl64[1] == 0 &&
1291 split_fl64[2] == 0 && split_fl64[3] == 0)
1294 /* reassemble any packets that need reassembly*/
1297 if (!rxq->pkt_first_seg) {
1298 /* find the first split flag, and only reassemble then*/
1299 while (i < nb_bufs && !split_flags[i])
1303 rxq->pkt_first_seg = rx_pkts[i];
1305 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
1310 * vPMD receive routine that reassembles scattered packets.
1311 * Main receive routine that can handle arbitrary burst sizes
1313 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1316 iavf_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
1319 uint16_t retval = 0;
1321 while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
1322 uint16_t burst = iavf_recv_scattered_burst_vec_avx512(rx_queue,
1323 rx_pkts + retval, IAVF_VPMD_RX_MAX_BURST);
1326 if (burst < IAVF_VPMD_RX_MAX_BURST)
1329 return retval + iavf_recv_scattered_burst_vec_avx512(rx_queue,
1330 rx_pkts + retval, nb_pkts);
1334 * vPMD receive routine that reassembles single burst of
1335 * 32 scattered packets for flex RxD
1337 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1340 iavf_recv_scattered_burst_vec_avx512_flex_rxd(void *rx_queue,
1341 struct rte_mbuf **rx_pkts,
1344 struct iavf_rx_queue *rxq = rx_queue;
1345 uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
1347 /* get some new buffers */
1348 uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx512_flex_rxd(rxq,
1349 rx_pkts, nb_pkts, split_flags);
1353 /* happy day case, full burst + no packets to be joined */
1354 const uint64_t *split_fl64 = (uint64_t *)split_flags;
1356 if (!rxq->pkt_first_seg &&
1357 split_fl64[0] == 0 && split_fl64[1] == 0 &&
1358 split_fl64[2] == 0 && split_fl64[3] == 0)
1361 /* reassemble any packets that need reassembly*/
1364 if (!rxq->pkt_first_seg) {
1365 /* find the first split flag, and only reassemble then*/
1366 while (i < nb_bufs && !split_flags[i])
1370 rxq->pkt_first_seg = rx_pkts[i];
1372 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
1377 * vPMD receive routine that reassembles scattered packets for flex RxD.
1378 * Main receive routine that can handle arbitrary burst sizes
1380 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1383 iavf_recv_scattered_pkts_vec_avx512_flex_rxd(void *rx_queue,
1384 struct rte_mbuf **rx_pkts,
1387 uint16_t retval = 0;
1389 while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
1391 iavf_recv_scattered_burst_vec_avx512_flex_rxd
1392 (rx_queue, rx_pkts + retval,
1393 IAVF_VPMD_RX_MAX_BURST);
1396 if (burst < IAVF_VPMD_RX_MAX_BURST)
1399 return retval + iavf_recv_scattered_burst_vec_avx512_flex_rxd(rx_queue,
1400 rx_pkts + retval, nb_pkts);
1403 static __rte_always_inline int
1404 iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
1406 struct iavf_tx_vec_entry *txep;
1410 struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF];
1412 /* check DD bits on threshold descriptor */
1413 if ((txq->tx_ring[txq->next_dd].cmd_type_offset_bsz &
1414 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1415 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE))
1420 /* first buffer to free from S/W ring is at index
1421 * tx_next_dd - (tx_rs_thresh-1)
1423 txep = (void *)txq->sw_ring;
1424 txep += txq->next_dd - (n - 1);
1426 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
1427 struct rte_mempool *mp = txep[0].mbuf->pool;
1428 struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
1432 if (!cache || cache->len == 0)
1435 cache_objs = &cache->objs[cache->len];
1437 if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
1438 rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
1442 /* The cache follows the following algorithm
1443 * 1. Add the objects to the cache
1444 * 2. Anything greater than the cache min value (if it crosses the
1445 * cache flush threshold) is flushed to the ring.
1447 /* Add elements back into the cache */
1448 uint32_t copied = 0;
1449 /* n is multiple of 32 */
1450 while (copied < n) {
1451 const __m512i a = _mm512_loadu_si512(&txep[copied]);
1452 const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
1453 const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
1454 const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
1456 _mm512_storeu_si512(&cache_objs[copied], a);
1457 _mm512_storeu_si512(&cache_objs[copied + 8], b);
1458 _mm512_storeu_si512(&cache_objs[copied + 16], c);
1459 _mm512_storeu_si512(&cache_objs[copied + 24], d);
1464 if (cache->len >= cache->flushthresh) {
1465 rte_mempool_ops_enqueue_bulk(mp,
1466 &cache->objs[cache->size],
1467 cache->len - cache->size);
1468 cache->len = cache->size;
1474 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
1478 for (i = 1; i < n; i++) {
1479 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
1481 if (likely(m->pool == free[0]->pool)) {
1482 free[nb_free++] = m;
1484 rte_mempool_put_bulk(free[0]->pool,
1492 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
1494 for (i = 1; i < n; i++) {
1495 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
1497 rte_mempool_put(m->pool, m);
1502 /* buffers were freed, update counters */
1503 txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
1504 txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
1505 if (txq->next_dd >= txq->nb_tx_desc)
1506 txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
1508 return txq->rs_thresh;
1511 static __rte_always_inline void
1512 tx_backlog_entry_avx512(struct iavf_tx_vec_entry *txep,
1513 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1517 for (i = 0; i < (int)nb_pkts; ++i)
1518 txep[i].mbuf = tx_pkts[i];
1522 iavf_vtx1(volatile struct iavf_tx_desc *txdp,
1523 struct rte_mbuf *pkt, uint64_t flags)
1526 (IAVF_TX_DESC_DTYPE_DATA |
1527 ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT) |
1528 ((uint64_t)pkt->data_len << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT));
1530 __m128i descriptor = _mm_set_epi64x(high_qw,
1531 pkt->buf_iova + pkt->data_off);
1532 _mm_storeu_si128((__m128i *)txdp, descriptor);
1535 #define IAVF_TX_LEN_MASK 0xAA
1536 #define IAVF_TX_OFF_MASK 0x55
1538 iavf_vtx(volatile struct iavf_tx_desc *txdp,
1539 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
1541 const uint64_t hi_qw_tmpl = (IAVF_TX_DESC_DTYPE_DATA |
1542 ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT));
1544 /* if unaligned on 32-bit boundary, do one to align */
1545 if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
1546 iavf_vtx1(txdp, *pkt, flags);
1547 nb_pkts--, txdp++, pkt++;
1550 /* do 4 at a time while possible, in bursts */
1551 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
1554 ((uint64_t)pkt[3]->data_len,
1556 (uint64_t)pkt[2]->data_len,
1558 (uint64_t)pkt[1]->data_len,
1560 (uint64_t)pkt[0]->data_len,
1562 __m512i hi_qw_tmpl_4 = _mm512_set1_epi64(hi_qw_tmpl);
1563 __m512i data_off_4 =
1574 desc4 = _mm512_mask_slli_epi64(desc4, IAVF_TX_LEN_MASK, desc4,
1575 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
1576 desc4 = _mm512_mask_or_epi64(desc4, IAVF_TX_LEN_MASK, desc4,
1578 desc4 = _mm512_mask_add_epi64(desc4, IAVF_TX_OFF_MASK, desc4,
1580 _mm512_storeu_si512((void *)txdp, desc4);
1583 /* do any last ones */
1585 iavf_vtx1(txdp, *pkt, flags);
1586 txdp++, pkt++, nb_pkts--;
1590 static inline uint16_t
1591 iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1594 struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
1595 volatile struct iavf_tx_desc *txdp;
1596 struct iavf_tx_vec_entry *txep;
1597 uint16_t n, nb_commit, tx_id;
1598 /* bit2 is reserved and must be set to 1 according to Spec */
1599 uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
1600 uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
1602 /* cross rx_thresh boundary is not allowed */
1603 nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
1605 if (txq->nb_free < txq->free_thresh)
1606 iavf_tx_free_bufs_avx512(txq);
1608 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
1609 if (unlikely(nb_pkts == 0))
1612 tx_id = txq->tx_tail;
1613 txdp = &txq->tx_ring[tx_id];
1614 txep = (void *)txq->sw_ring;
1617 txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
1619 n = (uint16_t)(txq->nb_tx_desc - tx_id);
1620 if (nb_commit >= n) {
1621 tx_backlog_entry_avx512(txep, tx_pkts, n);
1623 iavf_vtx(txdp, tx_pkts, n - 1, flags);
1627 iavf_vtx1(txdp, *tx_pkts++, rs);
1629 nb_commit = (uint16_t)(nb_commit - n);
1632 txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
1634 /* avoid reach the end of ring */
1635 txdp = &txq->tx_ring[tx_id];
1636 txep = (void *)txq->sw_ring;
1640 tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
1642 iavf_vtx(txdp, tx_pkts, nb_commit, flags);
1644 tx_id = (uint16_t)(tx_id + nb_commit);
1645 if (tx_id > txq->next_rs) {
1646 txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
1647 rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
1648 IAVF_TXD_QW1_CMD_SHIFT);
1650 (uint16_t)(txq->next_rs + txq->rs_thresh);
1653 txq->tx_tail = tx_id;
1655 IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1661 iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1665 struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
1670 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
1671 ret = iavf_xmit_fixed_burst_vec_avx512(tx_queue, &tx_pkts[nb_tx],
1683 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
1686 const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
1687 struct iavf_tx_vec_entry *swr = (void *)txq->sw_ring;
1689 if (!txq->sw_ring || txq->nb_free == max_desc)
1692 i = txq->next_dd - txq->rs_thresh + 1;
1693 if (txq->tx_tail < i) {
1694 for (; i < txq->nb_tx_desc; i++) {
1695 rte_pktmbuf_free_seg(swr[i].mbuf);
1702 static const struct iavf_txq_ops avx512_vec_txq_ops = {
1703 .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
1707 iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
1709 txq->ops = &avx512_vec_txq_ops;