1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #include "iavf_rxtx_vec_common.h"
9 #ifndef __INTEL_COMPILER
10 #pragma GCC diagnostic ignored "-Wcast-qual"
13 #define IAVF_DESCS_PER_LOOP_AVX 8
14 #define PKTLEN_SHIFT 10
16 /******************************************************************************
17 * If user knows a specific offload is not enabled by APP,
18 * the macro can be commented to save the effort of fast path.
19 * Currently below 2 features are supported in RX path,
21 * 2, VLAN/QINQ stripping
23 * 4, packet type analysis
24 * 5, flow director ID report
25 ******************************************************************************/
26 #define IAVF_RX_CSUM_OFFLOAD
27 #define IAVF_RX_VLAN_OFFLOAD
28 #define IAVF_RX_RSS_OFFLOAD
29 #define IAVF_RX_PTYPE_OFFLOAD
30 #define IAVF_RX_FDIR_OFFLOAD
32 static __rte_always_inline void
33 iavf_rxq_rearm(struct iavf_rx_queue *rxq)
37 volatile union iavf_rx_desc *rxdp;
38 struct rte_mempool_cache *cache =
39 rte_mempool_default_cache(rxq->mp, rte_lcore_id());
40 struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start];
42 rxdp = rxq->rx_ring + rxq->rxrearm_start;
45 return iavf_rxq_rearm_common(rxq, true);
47 /* We need to pull 'n' more MBUFs into the software ring from mempool
48 * We inline the mempool function here, so we can vectorize the copy
49 * from the cache into the shadow ring.
52 /* Can this be satisfied from the cache? */
53 if (cache->len < IAVF_RXQ_REARM_THRESH) {
54 /* No. Backfill the cache first, and then fill from it */
55 uint32_t req = IAVF_RXQ_REARM_THRESH + (cache->size -
58 /* How many do we require i.e. number to fill the cache + the request */
59 int ret = rte_mempool_ops_dequeue_bulk
60 (rxq->mp, &cache->objs[cache->len], req);
64 if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >=
68 dma_addr0 = _mm_setzero_si128();
69 for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) {
70 rxp[i] = &rxq->fake_mbuf;
71 _mm_storeu_si128((__m128i *)&rxdp[i].read,
75 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
76 IAVF_RXQ_REARM_THRESH;
81 const __m512i iova_offsets = _mm512_set1_epi64(offsetof
82 (struct rte_mbuf, buf_iova));
83 const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
85 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
86 /* to shuffle the addresses to correct slots. Values 4-7 will contain
87 * zeros, so use 7 for a zero-value.
89 const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
91 const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
94 /* Initialize the mbufs in vector, process 8 mbufs in one loop, taking
95 * from mempool cache and populating both shadow and HW rings
97 for (i = 0; i < IAVF_RXQ_REARM_THRESH / IAVF_DESCS_PER_LOOP_AVX; i++) {
98 const __m512i mbuf_ptrs = _mm512_loadu_si512
99 (&cache->objs[cache->len - IAVF_DESCS_PER_LOOP_AVX]);
100 _mm512_storeu_si512(rxp, mbuf_ptrs);
102 const __m512i iova_base_addrs = _mm512_i64gather_epi64
103 (_mm512_add_epi64(mbuf_ptrs, iova_offsets),
106 const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
108 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
109 const __m512i iovas0 = _mm512_castsi256_si512
110 (_mm512_extracti64x4_epi64(iova_addrs, 0));
111 const __m512i iovas1 = _mm512_castsi256_si512
112 (_mm512_extracti64x4_epi64(iova_addrs, 1));
114 /* permute leaves desc 2-3 addresses in header address slots 0-1
115 * but these are ignored by driver since header split not
116 * enabled. Similarly for desc 6 & 7.
118 const __m512i desc0_1 = _mm512_permutexvar_epi64
121 const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8);
123 const __m512i desc4_5 = _mm512_permutexvar_epi64
126 const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8);
128 _mm512_storeu_si512((void *)rxdp, desc0_1);
129 _mm512_storeu_si512((void *)(rxdp + 2), desc2_3);
130 _mm512_storeu_si512((void *)(rxdp + 4), desc4_5);
131 _mm512_storeu_si512((void *)(rxdp + 6), desc6_7);
133 /* permute leaves desc 4-7 addresses in header address slots 0-3
134 * but these are ignored by driver since header split not
137 const __m512i desc0_3 = _mm512_permutexvar_epi64(permute_idx,
139 const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8);
141 _mm512_storeu_si512((void *)rxdp, desc0_3);
142 _mm512_storeu_si512((void *)(rxdp + 4), desc4_7);
144 rxp += IAVF_DESCS_PER_LOOP_AVX;
145 rxdp += IAVF_DESCS_PER_LOOP_AVX;
146 cache->len -= IAVF_DESCS_PER_LOOP_AVX;
149 rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH;
150 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
151 rxq->rxrearm_start = 0;
153 rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH;
155 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
156 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
158 /* Update the tail pointer on the NIC */
159 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
162 #define IAVF_RX_LEN_MASK 0x80808080
163 static __rte_always_inline uint16_t
164 _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
165 struct rte_mbuf **rx_pkts,
166 uint16_t nb_pkts, uint8_t *split_packet,
169 #ifdef IAVF_RX_PTYPE_OFFLOAD
170 const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
173 const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
174 rxq->mbuf_initializer);
175 struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail];
176 volatile union iavf_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
180 /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
181 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
183 /* See if we need to rearm the RX queue - gives the prefetch a bit
186 if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
189 /* Before we start moving massive data around, check to see if
190 * there is actually a packet available
192 if (!(rxdp->wb.qword1.status_error_len &
193 rte_cpu_to_le_32(1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
196 /* constants used in processing loop */
197 const __m512i crc_adjust =
199 (/* 1st descriptor */
200 0, /* ignore non-length fields */
201 -rxq->crc_len, /* sub crc on data_len */
202 -rxq->crc_len, /* sub crc on pkt_len */
203 0, /* ignore pkt_type field */
205 0, /* ignore non-length fields */
206 -rxq->crc_len, /* sub crc on data_len */
207 -rxq->crc_len, /* sub crc on pkt_len */
208 0, /* ignore pkt_type field */
210 0, /* ignore non-length fields */
211 -rxq->crc_len, /* sub crc on data_len */
212 -rxq->crc_len, /* sub crc on pkt_len */
213 0, /* ignore pkt_type field */
215 0, /* ignore non-length fields */
216 -rxq->crc_len, /* sub crc on data_len */
217 -rxq->crc_len, /* sub crc on pkt_len */
218 0 /* ignore pkt_type field */
221 /* 8 packets DD mask, LSB in each 32-bit value */
222 const __m256i dd_check = _mm256_set1_epi32(1);
224 /* 8 packets EOP mask, second-LSB in each 32-bit value */
225 const __m256i eop_check = _mm256_slli_epi32(dd_check,
226 IAVF_RX_DESC_STATUS_EOF_SHIFT);
228 /* mask to shuffle from desc. to mbuf (4 descriptors)*/
229 const __m512i shuf_msk =
231 (/* 1st descriptor */
232 0x07060504, /* octet 4~7, 32bits rss */
233 0x03020F0E, /* octet 2~3, low 16 bits vlan_macip */
234 /* octet 15~14, 16 bits data_len */
235 0xFFFF0F0E, /* skip high 16 bits pkt_len, zero out */
236 /* octet 15~14, low 16 bits pkt_len */
237 0xFFFFFFFF, /* pkt_type set as unknown */
239 0x07060504, /* octet 4~7, 32bits rss */
240 0x03020F0E, /* octet 2~3, low 16 bits vlan_macip */
241 /* octet 15~14, 16 bits data_len */
242 0xFFFF0F0E, /* skip high 16 bits pkt_len, zero out */
243 /* octet 15~14, low 16 bits pkt_len */
244 0xFFFFFFFF, /* pkt_type set as unknown */
246 0x07060504, /* octet 4~7, 32bits rss */
247 0x03020F0E, /* octet 2~3, low 16 bits vlan_macip */
248 /* octet 15~14, 16 bits data_len */
249 0xFFFF0F0E, /* skip high 16 bits pkt_len, zero out */
250 /* octet 15~14, low 16 bits pkt_len */
251 0xFFFFFFFF, /* pkt_type set as unknown */
253 0x07060504, /* octet 4~7, 32bits rss */
254 0x03020F0E, /* octet 2~3, low 16 bits vlan_macip */
255 /* octet 15~14, 16 bits data_len */
256 0xFFFF0F0E, /* skip high 16 bits pkt_len, zero out */
257 /* octet 15~14, low 16 bits pkt_len */
258 0xFFFFFFFF /* pkt_type set as unknown */
261 * compile-time check the above crc and shuffle layout is correct.
262 * NOTE: the first field (lowest address) is given last in set_epi
265 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
266 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
267 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
268 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
269 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
270 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
271 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
272 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
274 uint16_t i, received;
276 for (i = 0, received = 0; i < nb_pkts;
277 i += IAVF_DESCS_PER_LOOP_AVX,
278 rxdp += IAVF_DESCS_PER_LOOP_AVX) {
279 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
280 _mm256_storeu_si256((void *)&rx_pkts[i],
281 _mm256_loadu_si256((void *)&sw_ring[i]));
282 #ifdef RTE_ARCH_X86_64
284 ((void *)&rx_pkts[i + 4],
285 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
288 __m512i raw_desc0_3, raw_desc4_7;
289 const __m128i raw_desc7 =
290 _mm_load_si128((void *)(rxdp + 7));
291 rte_compiler_barrier();
292 const __m128i raw_desc6 =
293 _mm_load_si128((void *)(rxdp + 6));
294 rte_compiler_barrier();
295 const __m128i raw_desc5 =
296 _mm_load_si128((void *)(rxdp + 5));
297 rte_compiler_barrier();
298 const __m128i raw_desc4 =
299 _mm_load_si128((void *)(rxdp + 4));
300 rte_compiler_barrier();
301 const __m128i raw_desc3 =
302 _mm_load_si128((void *)(rxdp + 3));
303 rte_compiler_barrier();
304 const __m128i raw_desc2 =
305 _mm_load_si128((void *)(rxdp + 2));
306 rte_compiler_barrier();
307 const __m128i raw_desc1 =
308 _mm_load_si128((void *)(rxdp + 1));
309 rte_compiler_barrier();
310 const __m128i raw_desc0 =
311 _mm_load_si128((void *)(rxdp + 0));
313 raw_desc4_7 = _mm512_broadcast_i32x4(raw_desc4);
314 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc5, 1);
315 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc6, 2);
316 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc7, 3);
317 raw_desc0_3 = _mm512_broadcast_i32x4(raw_desc0);
318 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc1, 1);
319 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc2, 2);
320 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc3, 3);
325 for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
326 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
330 * convert descriptors 4-7 into mbufs, adjusting length and
331 * re-arranging fields. Then write into the mbuf
333 const __m512i len4_7 = _mm512_slli_epi32(raw_desc4_7,
335 const __m512i desc4_7 = _mm512_mask_blend_epi16(IAVF_RX_LEN_MASK,
338 __m512i mb4_7 = _mm512_shuffle_epi8(desc4_7, shuf_msk);
340 mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust);
341 #ifdef IAVF_RX_PTYPE_OFFLOAD
343 * to get packet types, shift 64-bit values down 30 bits
344 * and so ptype is in lower 8-bits in each
346 const __m512i ptypes4_7 = _mm512_srli_epi64(desc4_7, 30);
347 const __m256i ptypes6_7 = _mm512_extracti64x4_epi64(ptypes4_7, 1);
348 const __m256i ptypes4_5 = _mm512_extracti64x4_epi64(ptypes4_7, 0);
349 const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24);
350 const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8);
351 const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24);
352 const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8);
354 const __m512i ptype4_7 = _mm512_set_epi32
355 (0, 0, 0, type_table[ptype7],
356 0, 0, 0, type_table[ptype6],
357 0, 0, 0, type_table[ptype5],
358 0, 0, 0, type_table[ptype4]);
359 mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7);
363 * convert descriptors 0-3 into mbufs, adjusting length and
364 * re-arranging fields. Then write into the mbuf
366 const __m512i len0_3 = _mm512_slli_epi32(raw_desc0_3,
368 const __m512i desc0_3 = _mm512_mask_blend_epi16(IAVF_RX_LEN_MASK,
371 __m512i mb0_3 = _mm512_shuffle_epi8(desc0_3, shuf_msk);
373 mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust);
374 #ifdef IAVF_RX_PTYPE_OFFLOAD
375 /* get the packet types */
376 const __m512i ptypes0_3 = _mm512_srli_epi64(desc0_3, 30);
377 const __m256i ptypes2_3 = _mm512_extracti64x4_epi64(ptypes0_3, 1);
378 const __m256i ptypes0_1 = _mm512_extracti64x4_epi64(ptypes0_3, 0);
379 const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24);
380 const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8);
381 const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24);
382 const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8);
384 const __m512i ptype0_3 = _mm512_set_epi32
385 (0, 0, 0, type_table[ptype3],
386 0, 0, 0, type_table[ptype2],
387 0, 0, 0, type_table[ptype1],
388 0, 0, 0, type_table[ptype0]);
389 mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3);
393 * use permute/extract to get status content
394 * After the operations, the packets status flags are in the
395 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
397 /* merge the status bits into one register */
398 const __m512i status_permute_msk = _mm512_set_epi32
403 const __m512i raw_status0_7 = _mm512_permutex2var_epi32
404 (raw_desc4_7, status_permute_msk, raw_desc0_3);
405 __m256i status0_7 = _mm512_extracti64x4_epi64
408 /* now do flag manipulation */
411 __m256i mbuf_flags = _mm256_set1_epi32(0);
414 #if defined(IAVF_RX_CSUM_OFFLOAD) || defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD)
415 /* Status/Error flag masks */
417 * mask everything except RSS, flow director and VLAN flags
418 * bit2 is for VLAN tag, bit11 for flow director indication
419 * bit13:12 for RSS indication. Bits 3-5 of error
420 * field (bits 22-24) are for IP/L4 checksum errors
422 const __m256i flags_mask =
423 _mm256_set1_epi32((1 << 2) | (1 << 11) |
424 (3 << 12) | (7 << 22));
427 #ifdef IAVF_RX_VLAN_OFFLOAD
429 * data to be shuffled by result of flag mask. If VLAN bit is set,
430 * (bit 2), then position 4 in this array will be used in the
433 const __m256i vlan_flags_shuf =
434 _mm256_set_epi32(0, 0,
435 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
438 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
442 #ifdef IAVF_RX_RSS_OFFLOAD
444 * data to be shuffled by result of flag mask, shifted down 11.
445 * If RSS/FDIR bits are set, shuffle moves appropriate flags in
448 const __m256i rss_flags_shuf =
449 _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
450 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
451 RTE_MBUF_F_RX_RSS_HASH,
453 RTE_MBUF_F_RX_FDIR, 0,
454 /* end up 128-bits */
455 0, 0, 0, 0, 0, 0, 0, 0,
456 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
457 RTE_MBUF_F_RX_RSS_HASH,
459 RTE_MBUF_F_RX_FDIR, 0);
462 #ifdef IAVF_RX_CSUM_OFFLOAD
464 * data to be shuffled by the result of the flags mask shifted by 22
465 * bits. This gives use the l3_l4 flags.
467 const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
468 /* shift right 1 bit to make sure it not exceed 255 */
469 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
470 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
471 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
472 RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
473 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
474 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
475 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
476 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
477 RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
478 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
479 /* second 128-bits */
480 0, 0, 0, 0, 0, 0, 0, 0,
481 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
482 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
483 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
484 RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
485 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
486 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
487 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
488 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
489 RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
490 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
492 const __m256i cksum_mask =
493 _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
494 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
495 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
498 #if defined(IAVF_RX_CSUM_OFFLOAD) || defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD)
499 /* get only flag/error bits we want */
500 const __m256i flag_bits =
501 _mm256_and_si256(status0_7, flags_mask);
503 /* set vlan and rss flags */
504 #ifdef IAVF_RX_VLAN_OFFLOAD
505 const __m256i vlan_flags =
506 _mm256_shuffle_epi8(vlan_flags_shuf, flag_bits);
508 #ifdef IAVF_RX_RSS_OFFLOAD
509 const __m256i rss_flags =
510 _mm256_shuffle_epi8(rss_flags_shuf,
511 _mm256_srli_epi32(flag_bits, 11));
513 #ifdef IAVF_RX_CSUM_OFFLOAD
515 * l3_l4_error flags, shuffle, then shift to correct adjustment
516 * of flags in flags_shuf, and finally mask out extra bits
518 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
519 _mm256_srli_epi32(flag_bits, 22));
520 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
521 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
524 #ifdef IAVF_RX_CSUM_OFFLOAD
525 mbuf_flags = _mm256_or_si256(mbuf_flags, l3_l4_flags);
527 #ifdef IAVF_RX_RSS_OFFLOAD
528 mbuf_flags = _mm256_or_si256(mbuf_flags, rss_flags);
530 #ifdef IAVF_RX_VLAN_OFFLOAD
531 mbuf_flags = _mm256_or_si256(mbuf_flags, vlan_flags);
536 * At this point, we have the 8 sets of flags in the low 16-bits
537 * of each 32-bit value in vlan0.
538 * We want to extract these, and merge them with the mbuf init
539 * data so we can do a single write to the mbuf to set the flags
540 * and all the other initialization fields. Extracting the
541 * appropriate flags means that we have to do a shift and blend
542 * for each mbuf before we do the write. However, we can also
543 * add in the previously computed rx_descriptor fields to
544 * make a single 256-bit write per mbuf
546 /* check the structure matches expectations */
547 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
548 offsetof(struct rte_mbuf, rearm_data) + 8);
549 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
550 RTE_ALIGN(offsetof(struct rte_mbuf,
553 /* build up data and do writes */
554 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
556 const __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0);
557 const __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1);
558 const __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0);
559 const __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1);
562 rearm6 = _mm256_blend_epi32(mbuf_init,
563 _mm256_slli_si256(mbuf_flags, 8),
565 rearm4 = _mm256_blend_epi32(mbuf_init,
566 _mm256_slli_si256(mbuf_flags, 4),
568 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
569 rearm0 = _mm256_blend_epi32(mbuf_init,
570 _mm256_srli_si256(mbuf_flags, 4),
572 /* permute to add in the rx_descriptor e.g. rss fields */
573 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
574 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
575 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
576 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
578 rearm6 = _mm256_permute2f128_si256(mbuf_init, mb6_7, 0x20);
579 rearm4 = _mm256_permute2f128_si256(mbuf_init, mb4_5, 0x20);
580 rearm2 = _mm256_permute2f128_si256(mbuf_init, mb2_3, 0x20);
581 rearm0 = _mm256_permute2f128_si256(mbuf_init, mb0_1, 0x20);
584 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
586 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
588 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
590 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
593 /* repeat for the odd mbufs */
595 const __m256i odd_flags =
596 _mm256_castsi128_si256
597 (_mm256_extracti128_si256(mbuf_flags, 1));
598 rearm7 = _mm256_blend_epi32(mbuf_init,
599 _mm256_slli_si256(odd_flags, 8),
601 rearm5 = _mm256_blend_epi32(mbuf_init,
602 _mm256_slli_si256(odd_flags, 4),
604 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
605 rearm1 = _mm256_blend_epi32(mbuf_init,
606 _mm256_srli_si256(odd_flags, 4),
608 /* since odd mbufs are already in hi 128-bits use blend */
609 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
610 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
611 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
612 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
614 rearm7 = _mm256_blend_epi32(mbuf_init, mb6_7, 0xF0);
615 rearm5 = _mm256_blend_epi32(mbuf_init, mb4_5, 0xF0);
616 rearm3 = _mm256_blend_epi32(mbuf_init, mb2_3, 0xF0);
617 rearm1 = _mm256_blend_epi32(mbuf_init, mb0_1, 0xF0);
619 /* again write to mbufs */
620 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
622 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
624 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
626 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
629 /* extract and record EOP bit */
631 const __m128i eop_mask =
632 _mm_set1_epi16(1 << IAVF_RX_DESC_STATUS_EOF_SHIFT);
633 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
635 /* pack status bits into a single 128-bit register */
636 const __m128i eop_bits =
638 (_mm256_castsi256_si128(eop_bits256),
639 _mm256_extractf128_si256(eop_bits256,
642 * flip bits, and mask out the EOP bit, which is now
643 * a split-packet bit i.e. !EOP, rather than EOP one.
645 __m128i split_bits = _mm_andnot_si128(eop_bits,
648 * eop bits are out of order, so we need to shuffle them
649 * back into order again. In doing so, only use low 8
650 * bits, which acts like another pack instruction
651 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
652 * [Since we use epi8, the 16-bit positions are
653 * multiplied by 2 in the eop_shuffle value.]
655 __m128i eop_shuffle =
656 _mm_set_epi8(/* zero hi 64b */
657 0xFF, 0xFF, 0xFF, 0xFF,
658 0xFF, 0xFF, 0xFF, 0xFF,
659 /* move values to lo 64b */
662 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
663 *(uint64_t *)split_packet =
664 _mm_cvtsi128_si64(split_bits);
665 split_packet += IAVF_DESCS_PER_LOOP_AVX;
668 /* perform dd_check */
669 status0_7 = _mm256_and_si256(status0_7, dd_check);
670 status0_7 = _mm256_packs_epi32(status0_7,
671 _mm256_setzero_si256());
673 uint64_t burst = __builtin_popcountll
675 (_mm256_extracti128_si256
677 burst += __builtin_popcountll
679 (_mm256_castsi256_si128(status0_7)));
681 if (burst != IAVF_DESCS_PER_LOOP_AVX)
685 /* update tail pointers */
686 rxq->rx_tail += received;
687 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
688 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep aligned */
692 rxq->rxrearm_nb += received;
696 static __rte_always_inline __m256i
697 flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
699 #define FDID_MIS_MAGIC 0xFFFFFFFF
700 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
701 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
702 const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
703 RTE_MBUF_F_RX_FDIR_ID);
704 /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
705 const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
706 __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
708 /* this XOR op results to bit-reverse the fdir_mask */
709 fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
710 const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
715 static __rte_always_inline uint16_t
716 _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
717 struct rte_mbuf **rx_pkts,
719 uint8_t *split_packet,
722 #ifdef IAVF_RX_PTYPE_OFFLOAD
723 const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
726 const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
727 rxq->mbuf_initializer);
728 struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail];
729 volatile union iavf_rx_flex_desc *rxdp =
730 (union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail;
734 /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
735 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
737 /* See if we need to rearm the RX queue - gives the prefetch a bit
740 if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
743 /* Before we start moving massive data around, check to see if
744 * there is actually a packet available
746 if (!(rxdp->wb.status_error0 &
747 rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
750 /* constants used in processing loop */
751 const __m512i crc_adjust =
753 (/* 1st descriptor */
754 0, /* ignore non-length fields */
755 -rxq->crc_len, /* sub crc on data_len */
756 -rxq->crc_len, /* sub crc on pkt_len */
757 0, /* ignore pkt_type field */
759 0, /* ignore non-length fields */
760 -rxq->crc_len, /* sub crc on data_len */
761 -rxq->crc_len, /* sub crc on pkt_len */
762 0, /* ignore pkt_type field */
764 0, /* ignore non-length fields */
765 -rxq->crc_len, /* sub crc on data_len */
766 -rxq->crc_len, /* sub crc on pkt_len */
767 0, /* ignore pkt_type field */
769 0, /* ignore non-length fields */
770 -rxq->crc_len, /* sub crc on data_len */
771 -rxq->crc_len, /* sub crc on pkt_len */
772 0 /* ignore pkt_type field */
775 /* 8 packets DD mask, LSB in each 32-bit value */
776 const __m256i dd_check = _mm256_set1_epi32(1);
778 /* 8 packets EOP mask, second-LSB in each 32-bit value */
779 const __m256i eop_check = _mm256_slli_epi32(dd_check,
780 IAVF_RX_FLEX_DESC_STATUS0_EOF_S);
782 /* mask to shuffle from desc. to mbuf (4 descriptors)*/
783 const __m512i shuf_msk =
785 (/* 1st descriptor */
786 0xFFFFFFFF, /* rss hash parsed separately */
787 0x0B0A0504, /* octet 10~11, 16 bits vlan_macip */
788 /* octet 4~5, 16 bits data_len */
789 0xFFFF0504, /* skip hi 16 bits pkt_len, zero out */
790 /* octet 4~5, 16 bits pkt_len */
791 0xFFFFFFFF, /* pkt_type set as unknown */
793 0xFFFFFFFF, /* rss hash parsed separately */
794 0x0B0A0504, /* octet 10~11, 16 bits vlan_macip */
795 /* octet 4~5, 16 bits data_len */
796 0xFFFF0504, /* skip hi 16 bits pkt_len, zero out */
797 /* octet 4~5, 16 bits pkt_len */
798 0xFFFFFFFF, /* pkt_type set as unknown */
800 0xFFFFFFFF, /* rss hash parsed separately */
801 0x0B0A0504, /* octet 10~11, 16 bits vlan_macip */
802 /* octet 4~5, 16 bits data_len */
803 0xFFFF0504, /* skip hi 16 bits pkt_len, zero out */
804 /* octet 4~5, 16 bits pkt_len */
805 0xFFFFFFFF, /* pkt_type set as unknown */
807 0xFFFFFFFF, /* rss hash parsed separately */
808 0x0B0A0504, /* octet 10~11, 16 bits vlan_macip */
809 /* octet 4~5, 16 bits data_len */
810 0xFFFF0504, /* skip hi 16 bits pkt_len, zero out */
811 /* octet 4~5, 16 bits pkt_len */
812 0xFFFFFFFF /* pkt_type set as unknown */
815 * compile-time check the above crc and shuffle layout is correct.
816 * NOTE: the first field (lowest address) is given last in set_epi
819 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
820 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
821 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
822 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
823 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
824 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
825 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
826 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
828 uint16_t i, received;
830 for (i = 0, received = 0; i < nb_pkts;
831 i += IAVF_DESCS_PER_LOOP_AVX,
832 rxdp += IAVF_DESCS_PER_LOOP_AVX) {
833 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
834 _mm256_storeu_si256((void *)&rx_pkts[i],
835 _mm256_loadu_si256((void *)&sw_ring[i]));
836 #ifdef RTE_ARCH_X86_64
838 ((void *)&rx_pkts[i + 4],
839 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
842 __m512i raw_desc0_3, raw_desc4_7;
844 const __m128i raw_desc7 =
845 _mm_load_si128((void *)(rxdp + 7));
846 rte_compiler_barrier();
847 const __m128i raw_desc6 =
848 _mm_load_si128((void *)(rxdp + 6));
849 rte_compiler_barrier();
850 const __m128i raw_desc5 =
851 _mm_load_si128((void *)(rxdp + 5));
852 rte_compiler_barrier();
853 const __m128i raw_desc4 =
854 _mm_load_si128((void *)(rxdp + 4));
855 rte_compiler_barrier();
856 const __m128i raw_desc3 =
857 _mm_load_si128((void *)(rxdp + 3));
858 rte_compiler_barrier();
859 const __m128i raw_desc2 =
860 _mm_load_si128((void *)(rxdp + 2));
861 rte_compiler_barrier();
862 const __m128i raw_desc1 =
863 _mm_load_si128((void *)(rxdp + 1));
864 rte_compiler_barrier();
865 const __m128i raw_desc0 =
866 _mm_load_si128((void *)(rxdp + 0));
868 raw_desc4_7 = _mm512_broadcast_i32x4(raw_desc4);
869 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc5, 1);
870 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc6, 2);
871 raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc7, 3);
872 raw_desc0_3 = _mm512_broadcast_i32x4(raw_desc0);
873 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc1, 1);
874 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc2, 2);
875 raw_desc0_3 = _mm512_inserti32x4(raw_desc0_3, raw_desc3, 3);
880 for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
881 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
885 * convert descriptors 4-7 into mbufs, re-arrange fields.
886 * Then write into the mbuf.
888 __m512i mb4_7 = _mm512_shuffle_epi8(raw_desc4_7, shuf_msk);
890 mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust);
891 #ifdef IAVF_RX_PTYPE_OFFLOAD
893 * to get packet types, ptype is located in bit16-25
896 const __m512i ptype_mask =
897 _mm512_set1_epi16(IAVF_RX_FLEX_DESC_PTYPE_M);
898 const __m512i ptypes4_7 =
899 _mm512_and_si512(raw_desc4_7, ptype_mask);
900 const __m256i ptypes6_7 = _mm512_extracti64x4_epi64(ptypes4_7, 1);
901 const __m256i ptypes4_5 = _mm512_extracti64x4_epi64(ptypes4_7, 0);
902 const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
903 const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
904 const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
905 const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
907 const __m512i ptype4_7 = _mm512_set_epi32
908 (0, 0, 0, type_table[ptype7],
909 0, 0, 0, type_table[ptype6],
910 0, 0, 0, type_table[ptype5],
911 0, 0, 0, type_table[ptype4]);
912 mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7);
916 * convert descriptors 0-3 into mbufs, re-arrange fields.
917 * Then write into the mbuf.
919 __m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk);
921 mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust);
922 #ifdef IAVF_RX_PTYPE_OFFLOAD
924 * to get packet types, ptype is located in bit16-25
927 const __m512i ptypes0_3 =
928 _mm512_and_si512(raw_desc0_3, ptype_mask);
929 const __m256i ptypes2_3 = _mm512_extracti64x4_epi64(ptypes0_3, 1);
930 const __m256i ptypes0_1 = _mm512_extracti64x4_epi64(ptypes0_3, 0);
931 const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
932 const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
933 const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
934 const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
936 const __m512i ptype0_3 = _mm512_set_epi32
937 (0, 0, 0, type_table[ptype3],
938 0, 0, 0, type_table[ptype2],
939 0, 0, 0, type_table[ptype1],
940 0, 0, 0, type_table[ptype0]);
941 mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3);
945 * use permute/extract to get status content
946 * After the operations, the packets status flags are in the
947 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
949 /* merge the status bits into one register */
950 const __m512i status_permute_msk = _mm512_set_epi32
955 const __m512i raw_status0_7 = _mm512_permutex2var_epi32
956 (raw_desc4_7, status_permute_msk, raw_desc0_3);
957 __m256i status0_7 = _mm512_extracti64x4_epi64
960 /* now do flag manipulation */
963 __m256i mbuf_flags = _mm256_set1_epi32(0);
964 __m256i vlan_flags = _mm256_setzero_si256();
967 #if defined(IAVF_RX_CSUM_OFFLOAD) || defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD)
968 /* Status/Error flag masks */
970 * mask everything except Checksum Reports, RSS indication
971 * and VLAN indication.
972 * bit6:4 for IP/L4 checksum errors.
973 * bit12 is for RSS indication.
974 * bit13 is for VLAN indication.
976 const __m256i flags_mask =
977 _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13));
979 #ifdef IAVF_RX_CSUM_OFFLOAD
981 * data to be shuffled by the result of the flags mask shifted by 4
982 * bits. This gives use the l3_l4 flags.
984 const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
985 /* shift right 1 bit to make sure it not exceed 255 */
986 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
987 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
988 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
989 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
990 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
991 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
992 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
993 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
994 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
995 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
996 (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
997 (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
998 /* second 128-bits */
999 0, 0, 0, 0, 0, 0, 0, 0,
1000 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
1001 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
1002 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
1003 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
1004 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
1005 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
1006 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
1007 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
1008 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
1009 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
1010 (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
1011 (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
1012 const __m256i cksum_mask =
1013 _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
1014 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
1015 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
1017 #if defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD)
1019 * data to be shuffled by result of flag mask, shifted down 12.
1020 * If RSS(bit12)/VLAN(bit13) are set,
1021 * shuffle moves appropriate flags in place.
1023 const __m256i rss_flags_shuf = _mm256_set_epi8
1027 RTE_MBUF_F_RX_RSS_HASH, 0,
1028 RTE_MBUF_F_RX_RSS_HASH, 0,
1029 /* end up 128-bits */
1033 RTE_MBUF_F_RX_RSS_HASH, 0,
1034 RTE_MBUF_F_RX_RSS_HASH, 0);
1036 const __m256i vlan_flags_shuf = _mm256_set_epi8
1040 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
1041 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
1043 /* end up 128-bits */
1047 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
1048 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
1052 #if defined(IAVF_RX_CSUM_OFFLOAD) || defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD)
1053 /* get only flag/error bits we want */
1054 const __m256i flag_bits =
1055 _mm256_and_si256(status0_7, flags_mask);
1057 #ifdef IAVF_RX_CSUM_OFFLOAD
1059 * l3_l4_error flags, shuffle, then shift to correct adjustment
1060 * of flags in flags_shuf, and finally mask out extra bits
1062 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
1063 _mm256_srli_epi32(flag_bits, 4));
1064 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
1065 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
1067 #if defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD)
1068 /* set rss and vlan flags */
1069 const __m256i rss_vlan_flag_bits =
1070 _mm256_srli_epi32(flag_bits, 12);
1071 const __m256i rss_flags =
1072 _mm256_shuffle_epi8(rss_flags_shuf,
1073 rss_vlan_flag_bits);
1075 if (rxq->rx_flags == IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1)
1077 _mm256_shuffle_epi8(vlan_flags_shuf,
1078 rss_vlan_flag_bits);
1080 const __m256i rss_vlan_flags =
1081 _mm256_or_si256(rss_flags, vlan_flags);
1085 #ifdef IAVF_RX_CSUM_OFFLOAD
1086 mbuf_flags = _mm256_or_si256(mbuf_flags, l3_l4_flags);
1088 #if defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD)
1089 mbuf_flags = _mm256_or_si256(mbuf_flags, rss_vlan_flags);
1093 #ifdef IAVF_RX_FDIR_OFFLOAD
1094 if (rxq->fdir_enabled) {
1095 const __m512i fdir_permute_mask = _mm512_set_epi32
1100 __m512i fdir_tmp = _mm512_permutex2var_epi32
1101 (raw_desc0_3, fdir_permute_mask, raw_desc4_7);
1102 const __m256i fdir_id0_7 = _mm512_extracti64x4_epi64
1104 const __m256i fdir_flags =
1105 flex_rxd_to_fdir_flags_vec_avx512(fdir_id0_7);
1107 /* merge with fdir_flags */
1108 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
1110 /* write to mbuf: have to use scalar store here */
1111 rx_pkts[i + 0]->hash.fdir.hi =
1112 _mm256_extract_epi32(fdir_id0_7, 3);
1114 rx_pkts[i + 1]->hash.fdir.hi =
1115 _mm256_extract_epi32(fdir_id0_7, 7);
1117 rx_pkts[i + 2]->hash.fdir.hi =
1118 _mm256_extract_epi32(fdir_id0_7, 2);
1120 rx_pkts[i + 3]->hash.fdir.hi =
1121 _mm256_extract_epi32(fdir_id0_7, 6);
1123 rx_pkts[i + 4]->hash.fdir.hi =
1124 _mm256_extract_epi32(fdir_id0_7, 1);
1126 rx_pkts[i + 5]->hash.fdir.hi =
1127 _mm256_extract_epi32(fdir_id0_7, 5);
1129 rx_pkts[i + 6]->hash.fdir.hi =
1130 _mm256_extract_epi32(fdir_id0_7, 0);
1132 rx_pkts[i + 7]->hash.fdir.hi =
1133 _mm256_extract_epi32(fdir_id0_7, 4);
1134 } /* if() on fdir_enabled */
1137 __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0);
1138 __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1);
1139 __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0);
1140 __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1);
1142 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1144 #ifdef IAVF_RX_RSS_OFFLOAD
1146 * needs to load 2nd 16B of each desc for RSS hash parsing,
1147 * will cause performance drop to get into this context.
1149 if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
1150 DEV_RX_OFFLOAD_RSS_HASH ||
1151 rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
1152 /* load bottom half of every 32B desc */
1153 const __m128i raw_desc_bh7 =
1155 ((void *)(&rxdp[7].wb.status_error1));
1156 rte_compiler_barrier();
1157 const __m128i raw_desc_bh6 =
1159 ((void *)(&rxdp[6].wb.status_error1));
1160 rte_compiler_barrier();
1161 const __m128i raw_desc_bh5 =
1163 ((void *)(&rxdp[5].wb.status_error1));
1164 rte_compiler_barrier();
1165 const __m128i raw_desc_bh4 =
1167 ((void *)(&rxdp[4].wb.status_error1));
1168 rte_compiler_barrier();
1169 const __m128i raw_desc_bh3 =
1171 ((void *)(&rxdp[3].wb.status_error1));
1172 rte_compiler_barrier();
1173 const __m128i raw_desc_bh2 =
1175 ((void *)(&rxdp[2].wb.status_error1));
1176 rte_compiler_barrier();
1177 const __m128i raw_desc_bh1 =
1179 ((void *)(&rxdp[1].wb.status_error1));
1180 rte_compiler_barrier();
1181 const __m128i raw_desc_bh0 =
1183 ((void *)(&rxdp[0].wb.status_error1));
1185 __m256i raw_desc_bh6_7 =
1186 _mm256_inserti128_si256
1187 (_mm256_castsi128_si256(raw_desc_bh6),
1189 __m256i raw_desc_bh4_5 =
1190 _mm256_inserti128_si256
1191 (_mm256_castsi128_si256(raw_desc_bh4),
1193 __m256i raw_desc_bh2_3 =
1194 _mm256_inserti128_si256
1195 (_mm256_castsi128_si256(raw_desc_bh2),
1197 __m256i raw_desc_bh0_1 =
1198 _mm256_inserti128_si256
1199 (_mm256_castsi128_si256(raw_desc_bh0),
1202 if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
1203 DEV_RX_OFFLOAD_RSS_HASH) {
1205 * to shift the 32b RSS hash value to the
1206 * highest 32b of each 128b before mask
1208 __m256i rss_hash6_7 =
1210 (raw_desc_bh6_7, 32);
1211 __m256i rss_hash4_5 =
1213 (raw_desc_bh4_5, 32);
1214 __m256i rss_hash2_3 =
1216 (raw_desc_bh2_3, 32);
1217 __m256i rss_hash0_1 =
1219 (raw_desc_bh0_1, 32);
1221 const __m256i rss_hash_msk =
1223 (0xFFFFFFFF, 0, 0, 0,
1224 0xFFFFFFFF, 0, 0, 0);
1226 rss_hash6_7 = _mm256_and_si256
1227 (rss_hash6_7, rss_hash_msk);
1228 rss_hash4_5 = _mm256_and_si256
1229 (rss_hash4_5, rss_hash_msk);
1230 rss_hash2_3 = _mm256_and_si256
1231 (rss_hash2_3, rss_hash_msk);
1232 rss_hash0_1 = _mm256_and_si256
1233 (rss_hash0_1, rss_hash_msk);
1235 mb6_7 = _mm256_or_si256
1236 (mb6_7, rss_hash6_7);
1237 mb4_5 = _mm256_or_si256
1238 (mb4_5, rss_hash4_5);
1239 mb2_3 = _mm256_or_si256
1240 (mb2_3, rss_hash2_3);
1241 mb0_1 = _mm256_or_si256
1242 (mb0_1, rss_hash0_1);
1245 if (rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
1246 /* merge the status/error-1 bits into one register */
1247 const __m256i status1_4_7 =
1248 _mm256_unpacklo_epi32
1251 const __m256i status1_0_3 =
1252 _mm256_unpacklo_epi32
1256 const __m256i status1_0_7 =
1257 _mm256_unpacklo_epi64
1258 (status1_4_7, status1_0_3);
1260 const __m256i l2tag2p_flag_mask =
1262 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
1264 __m256i l2tag2p_flag_bits =
1272 IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
1274 const __m256i l2tag2_flags_shuf =
1280 /* end up 128-bits */
1285 RTE_MBUF_F_RX_VLAN |
1286 RTE_MBUF_F_RX_VLAN_STRIPPED,
1294 /* merge with vlan_flags */
1295 mbuf_flags = _mm256_or_si256
1300 __m256i vlan_tci6_7 =
1302 (raw_desc_bh6_7, 4);
1303 __m256i vlan_tci4_5 =
1305 (raw_desc_bh4_5, 4);
1306 __m256i vlan_tci2_3 =
1308 (raw_desc_bh2_3, 4);
1309 __m256i vlan_tci0_1 =
1311 (raw_desc_bh0_1, 4);
1313 const __m256i vlan_tci_msk =
1315 (0, 0xFFFF0000, 0, 0,
1316 0, 0xFFFF0000, 0, 0);
1318 vlan_tci6_7 = _mm256_and_si256
1321 vlan_tci4_5 = _mm256_and_si256
1324 vlan_tci2_3 = _mm256_and_si256
1327 vlan_tci0_1 = _mm256_and_si256
1331 mb6_7 = _mm256_or_si256
1332 (mb6_7, vlan_tci6_7);
1333 mb4_5 = _mm256_or_si256
1334 (mb4_5, vlan_tci4_5);
1335 mb2_3 = _mm256_or_si256
1336 (mb2_3, vlan_tci2_3);
1337 mb0_1 = _mm256_or_si256
1338 (mb0_1, vlan_tci0_1);
1340 } /* if() on RSS hash parsing */
1346 * At this point, we have the 8 sets of flags in the low 16-bits
1347 * of each 32-bit value in vlan0.
1348 * We want to extract these, and merge them with the mbuf init
1349 * data so we can do a single write to the mbuf to set the flags
1350 * and all the other initialization fields. Extracting the
1351 * appropriate flags means that we have to do a shift and blend
1352 * for each mbuf before we do the write. However, we can also
1353 * add in the previously computed rx_descriptor fields to
1354 * make a single 256-bit write per mbuf
1356 /* check the structure matches expectations */
1357 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
1358 offsetof(struct rte_mbuf, rearm_data) + 8);
1359 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
1360 RTE_ALIGN(offsetof(struct rte_mbuf,
1363 /* build up data and do writes */
1364 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
1366 rearm6 = _mm256_blend_epi32(mbuf_init,
1367 _mm256_slli_si256(mbuf_flags, 8),
1369 rearm4 = _mm256_blend_epi32(mbuf_init,
1370 _mm256_slli_si256(mbuf_flags, 4),
1372 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
1373 rearm0 = _mm256_blend_epi32(mbuf_init,
1374 _mm256_srli_si256(mbuf_flags, 4),
1376 /* permute to add in the rx_descriptor e.g. rss fields */
1377 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
1378 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
1379 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
1380 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
1382 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
1384 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
1386 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
1388 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
1391 /* repeat for the odd mbufs */
1392 const __m256i odd_flags =
1393 _mm256_castsi128_si256
1394 (_mm256_extracti128_si256(mbuf_flags, 1));
1395 rearm7 = _mm256_blend_epi32(mbuf_init,
1396 _mm256_slli_si256(odd_flags, 8),
1398 rearm5 = _mm256_blend_epi32(mbuf_init,
1399 _mm256_slli_si256(odd_flags, 4),
1401 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
1402 rearm1 = _mm256_blend_epi32(mbuf_init,
1403 _mm256_srli_si256(odd_flags, 4),
1405 /* since odd mbufs are already in hi 128-bits use blend */
1406 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
1407 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
1408 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
1409 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
1410 /* again write to mbufs */
1411 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
1413 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
1415 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
1417 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
1420 /* extract and record EOP bit */
1422 const __m128i eop_mask =
1424 IAVF_RX_FLEX_DESC_STATUS0_EOF_S);
1425 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
1427 /* pack status bits into a single 128-bit register */
1428 const __m128i eop_bits =
1430 (_mm256_castsi256_si128(eop_bits256),
1431 _mm256_extractf128_si256(eop_bits256,
1434 * flip bits, and mask out the EOP bit, which is now
1435 * a split-packet bit i.e. !EOP, rather than EOP one.
1437 __m128i split_bits = _mm_andnot_si128(eop_bits,
1440 * eop bits are out of order, so we need to shuffle them
1441 * back into order again. In doing so, only use low 8
1442 * bits, which acts like another pack instruction
1443 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
1444 * [Since we use epi8, the 16-bit positions are
1445 * multiplied by 2 in the eop_shuffle value.]
1447 __m128i eop_shuffle =
1448 _mm_set_epi8(/* zero hi 64b */
1449 0xFF, 0xFF, 0xFF, 0xFF,
1450 0xFF, 0xFF, 0xFF, 0xFF,
1451 /* move values to lo 64b */
1454 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
1455 *(uint64_t *)split_packet =
1456 _mm_cvtsi128_si64(split_bits);
1457 split_packet += IAVF_DESCS_PER_LOOP_AVX;
1460 /* perform dd_check */
1461 status0_7 = _mm256_and_si256(status0_7, dd_check);
1462 status0_7 = _mm256_packs_epi32(status0_7,
1463 _mm256_setzero_si256());
1465 uint64_t burst = __builtin_popcountll
1467 (_mm256_extracti128_si256
1469 burst += __builtin_popcountll
1471 (_mm256_castsi256_si128(status0_7)));
1473 if (burst != IAVF_DESCS_PER_LOOP_AVX)
1477 /* update tail pointers */
1478 rxq->rx_tail += received;
1479 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
1480 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep aligned */
1484 rxq->rxrearm_nb += received;
1490 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1493 iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
1496 return _iavf_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts,
1502 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1505 iavf_recv_pkts_vec_avx512_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1508 return _iavf_recv_raw_pkts_vec_avx512_flex_rxd(rx_queue, rx_pkts,
1509 nb_pkts, NULL, false);
1513 * vPMD receive routine that reassembles single burst of 32 scattered packets
1515 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1517 static __rte_always_inline uint16_t
1518 iavf_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
1519 uint16_t nb_pkts, bool offload)
1521 struct iavf_rx_queue *rxq = rx_queue;
1522 uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
1524 /* get some new buffers */
1525 uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
1526 split_flags, offload);
1530 /* happy day case, full burst + no packets to be joined */
1531 const uint64_t *split_fl64 = (uint64_t *)split_flags;
1533 if (!rxq->pkt_first_seg &&
1534 split_fl64[0] == 0 && split_fl64[1] == 0 &&
1535 split_fl64[2] == 0 && split_fl64[3] == 0)
1538 /* reassemble any packets that need reassembly*/
1541 if (!rxq->pkt_first_seg) {
1542 /* find the first split flag, and only reassemble then*/
1543 while (i < nb_bufs && !split_flags[i])
1547 rxq->pkt_first_seg = rx_pkts[i];
1549 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
1554 * vPMD receive routine that reassembles scattered packets.
1555 * Main receive routine that can handle arbitrary burst sizes
1557 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1559 static __rte_always_inline uint16_t
1560 iavf_recv_scattered_pkts_vec_avx512_cmn(void *rx_queue, struct rte_mbuf **rx_pkts,
1561 uint16_t nb_pkts, bool offload)
1563 uint16_t retval = 0;
1565 while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
1566 uint16_t burst = iavf_recv_scattered_burst_vec_avx512(rx_queue,
1567 rx_pkts + retval, IAVF_VPMD_RX_MAX_BURST, offload);
1570 if (burst < IAVF_VPMD_RX_MAX_BURST)
1573 return retval + iavf_recv_scattered_burst_vec_avx512(rx_queue,
1574 rx_pkts + retval, nb_pkts, offload);
1578 iavf_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
1581 return iavf_recv_scattered_pkts_vec_avx512_cmn(rx_queue, rx_pkts,
1586 * vPMD receive routine that reassembles single burst of
1587 * 32 scattered packets for flex RxD
1589 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1591 static __rte_always_inline uint16_t
1592 iavf_recv_scattered_burst_vec_avx512_flex_rxd(void *rx_queue,
1593 struct rte_mbuf **rx_pkts,
1597 struct iavf_rx_queue *rxq = rx_queue;
1598 uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
1600 /* get some new buffers */
1601 uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx512_flex_rxd(rxq,
1602 rx_pkts, nb_pkts, split_flags, offload);
1606 /* happy day case, full burst + no packets to be joined */
1607 const uint64_t *split_fl64 = (uint64_t *)split_flags;
1609 if (!rxq->pkt_first_seg &&
1610 split_fl64[0] == 0 && split_fl64[1] == 0 &&
1611 split_fl64[2] == 0 && split_fl64[3] == 0)
1614 /* reassemble any packets that need reassembly*/
1617 if (!rxq->pkt_first_seg) {
1618 /* find the first split flag, and only reassemble then*/
1619 while (i < nb_bufs && !split_flags[i])
1623 rxq->pkt_first_seg = rx_pkts[i];
1625 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
1630 * vPMD receive routine that reassembles scattered packets for flex RxD.
1631 * Main receive routine that can handle arbitrary burst sizes
1633 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1635 static __rte_always_inline uint16_t
1636 iavf_recv_scattered_pkts_vec_avx512_flex_rxd_cmn(void *rx_queue,
1637 struct rte_mbuf **rx_pkts,
1641 uint16_t retval = 0;
1643 while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
1645 iavf_recv_scattered_burst_vec_avx512_flex_rxd
1646 (rx_queue, rx_pkts + retval,
1647 IAVF_VPMD_RX_MAX_BURST, offload);
1650 if (burst < IAVF_VPMD_RX_MAX_BURST)
1653 return retval + iavf_recv_scattered_burst_vec_avx512_flex_rxd(rx_queue,
1654 rx_pkts + retval, nb_pkts, offload);
1658 iavf_recv_scattered_pkts_vec_avx512_flex_rxd(void *rx_queue,
1659 struct rte_mbuf **rx_pkts,
1662 return iavf_recv_scattered_pkts_vec_avx512_flex_rxd_cmn(rx_queue,
1669 iavf_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
1672 return _iavf_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts,
1673 nb_pkts, NULL, true);
1677 iavf_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
1678 struct rte_mbuf **rx_pkts,
1681 return iavf_recv_scattered_pkts_vec_avx512_cmn(rx_queue, rx_pkts,
1686 iavf_recv_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
1687 struct rte_mbuf **rx_pkts,
1690 return _iavf_recv_raw_pkts_vec_avx512_flex_rxd(rx_queue,
1698 iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
1699 struct rte_mbuf **rx_pkts,
1702 return iavf_recv_scattered_pkts_vec_avx512_flex_rxd_cmn(rx_queue,
1708 static __rte_always_inline int
1709 iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
1711 struct iavf_tx_vec_entry *txep;
1715 struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF];
1717 /* check DD bits on threshold descriptor */
1718 if ((txq->tx_ring[txq->next_dd].cmd_type_offset_bsz &
1719 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1720 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE))
1725 /* first buffer to free from S/W ring is at index
1726 * tx_next_dd - (tx_rs_thresh-1)
1728 txep = (void *)txq->sw_ring;
1729 txep += txq->next_dd - (n - 1);
1731 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
1732 struct rte_mempool *mp = txep[0].mbuf->pool;
1733 struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
1737 if (!cache || cache->len == 0)
1740 cache_objs = &cache->objs[cache->len];
1742 if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
1743 rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
1747 /* The cache follows the following algorithm
1748 * 1. Add the objects to the cache
1749 * 2. Anything greater than the cache min value (if it crosses the
1750 * cache flush threshold) is flushed to the ring.
1752 /* Add elements back into the cache */
1753 uint32_t copied = 0;
1754 /* n is multiple of 32 */
1755 while (copied < n) {
1756 const __m512i a = _mm512_loadu_si512(&txep[copied]);
1757 const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
1758 const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
1759 const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
1761 _mm512_storeu_si512(&cache_objs[copied], a);
1762 _mm512_storeu_si512(&cache_objs[copied + 8], b);
1763 _mm512_storeu_si512(&cache_objs[copied + 16], c);
1764 _mm512_storeu_si512(&cache_objs[copied + 24], d);
1769 if (cache->len >= cache->flushthresh) {
1770 rte_mempool_ops_enqueue_bulk(mp,
1771 &cache->objs[cache->size],
1772 cache->len - cache->size);
1773 cache->len = cache->size;
1779 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
1783 for (i = 1; i < n; i++) {
1784 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
1786 if (likely(m->pool == free[0]->pool)) {
1787 free[nb_free++] = m;
1789 rte_mempool_put_bulk(free[0]->pool,
1797 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
1799 for (i = 1; i < n; i++) {
1800 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
1802 rte_mempool_put(m->pool, m);
1807 /* buffers were freed, update counters */
1808 txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
1809 txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
1810 if (txq->next_dd >= txq->nb_tx_desc)
1811 txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
1813 return txq->rs_thresh;
1816 static __rte_always_inline void
1817 tx_backlog_entry_avx512(struct iavf_tx_vec_entry *txep,
1818 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1822 for (i = 0; i < (int)nb_pkts; ++i)
1823 txep[i].mbuf = tx_pkts[i];
1826 static __rte_always_inline void
1827 iavf_vtx1(volatile struct iavf_tx_desc *txdp,
1828 struct rte_mbuf *pkt, uint64_t flags, bool offload)
1831 (IAVF_TX_DESC_DTYPE_DATA |
1832 ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT) |
1833 ((uint64_t)pkt->data_len << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT));
1835 iavf_txd_enable_offload(pkt, &high_qw);
1837 __m128i descriptor = _mm_set_epi64x(high_qw,
1838 pkt->buf_iova + pkt->data_off);
1839 _mm_storeu_si128((__m128i *)txdp, descriptor);
1842 #define IAVF_TX_LEN_MASK 0xAA
1843 #define IAVF_TX_OFF_MASK 0x55
1844 static __rte_always_inline void
1845 iavf_vtx(volatile struct iavf_tx_desc *txdp,
1846 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags,
1849 const uint64_t hi_qw_tmpl = (IAVF_TX_DESC_DTYPE_DATA |
1850 ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT));
1852 /* if unaligned on 32-bit boundary, do one to align */
1853 if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
1854 iavf_vtx1(txdp, *pkt, flags, offload);
1855 nb_pkts--, txdp++, pkt++;
1858 /* do 4 at a time while possible, in bursts */
1859 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
1862 ((uint64_t)pkt[3]->data_len <<
1863 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
1865 iavf_txd_enable_offload(pkt[3], &hi_qw3);
1868 ((uint64_t)pkt[2]->data_len <<
1869 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
1871 iavf_txd_enable_offload(pkt[2], &hi_qw2);
1874 ((uint64_t)pkt[1]->data_len <<
1875 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
1877 iavf_txd_enable_offload(pkt[1], &hi_qw1);
1880 ((uint64_t)pkt[0]->data_len <<
1881 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
1883 iavf_txd_enable_offload(pkt[0], &hi_qw0);
1888 pkt[3]->buf_iova + pkt[3]->data_off,
1890 pkt[2]->buf_iova + pkt[2]->data_off,
1892 pkt[1]->buf_iova + pkt[1]->data_off,
1894 pkt[0]->buf_iova + pkt[0]->data_off);
1895 _mm512_storeu_si512((void *)txdp, desc0_3);
1898 /* do any last ones */
1900 iavf_vtx1(txdp, *pkt, flags, offload);
1901 txdp++, pkt++, nb_pkts--;
1905 static __rte_always_inline uint16_t
1906 iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1907 uint16_t nb_pkts, bool offload)
1909 struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
1910 volatile struct iavf_tx_desc *txdp;
1911 struct iavf_tx_vec_entry *txep;
1912 uint16_t n, nb_commit, tx_id;
1913 /* bit2 is reserved and must be set to 1 according to Spec */
1914 uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
1915 uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
1917 /* cross rx_thresh boundary is not allowed */
1918 nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
1920 if (txq->nb_free < txq->free_thresh)
1921 iavf_tx_free_bufs_avx512(txq);
1923 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
1924 if (unlikely(nb_pkts == 0))
1927 tx_id = txq->tx_tail;
1928 txdp = &txq->tx_ring[tx_id];
1929 txep = (void *)txq->sw_ring;
1932 txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
1934 n = (uint16_t)(txq->nb_tx_desc - tx_id);
1935 if (nb_commit >= n) {
1936 tx_backlog_entry_avx512(txep, tx_pkts, n);
1938 iavf_vtx(txdp, tx_pkts, n - 1, flags, offload);
1942 iavf_vtx1(txdp, *tx_pkts++, rs, offload);
1944 nb_commit = (uint16_t)(nb_commit - n);
1947 txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
1949 /* avoid reach the end of ring */
1950 txdp = &txq->tx_ring[tx_id];
1951 txep = (void *)txq->sw_ring;
1955 tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
1957 iavf_vtx(txdp, tx_pkts, nb_commit, flags, offload);
1959 tx_id = (uint16_t)(tx_id + nb_commit);
1960 if (tx_id > txq->next_rs) {
1961 txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
1962 rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
1963 IAVF_TXD_QW1_CMD_SHIFT);
1965 (uint16_t)(txq->next_rs + txq->rs_thresh);
1968 txq->tx_tail = tx_id;
1970 IAVF_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
1975 static __rte_always_inline uint16_t
1976 iavf_xmit_pkts_vec_avx512_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
1977 uint16_t nb_pkts, bool offload)
1980 struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
1985 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
1986 ret = iavf_xmit_fixed_burst_vec_avx512(tx_queue, &tx_pkts[nb_tx],
1998 iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
2001 return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
2005 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
2008 const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
2009 struct iavf_tx_vec_entry *swr = (void *)txq->sw_ring;
2011 if (!txq->sw_ring || txq->nb_free == max_desc)
2014 i = txq->next_dd - txq->rs_thresh + 1;
2015 if (txq->tx_tail < i) {
2016 for (; i < txq->nb_tx_desc; i++) {
2017 rte_pktmbuf_free_seg(swr[i].mbuf);
2024 static const struct iavf_txq_ops avx512_vec_txq_ops = {
2025 .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
2029 iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
2031 txq->ops = &avx512_vec_txq_ops;
2036 iavf_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
2039 return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, true);