1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #include "ice_rxtx_vec_common.h"
9 #ifndef __INTEL_COMPILER
10 #pragma GCC diagnostic ignored "-Wcast-qual"
13 #define ICE_DESCS_PER_LOOP_AVX 8
16 ice_rxq_rearm(struct ice_rx_queue *rxq)
20 volatile union ice_rx_flex_desc *rxdp;
21 struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
22 struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp,
25 rxdp = rxq->rx_ring + rxq->rxrearm_start;
27 /* We need to pull 'n' more MBUFs into the software ring */
28 if (cache->len < ICE_RXQ_REARM_THRESH) {
29 uint32_t req = ICE_RXQ_REARM_THRESH + (cache->size -
32 int ret = rte_mempool_ops_dequeue_bulk(rxq->mp,
33 &cache->objs[cache->len], req);
37 if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >=
41 dma_addr0 = _mm_setzero_si128();
42 for (i = 0; i < ICE_DESCS_PER_LOOP; i++) {
43 rxep[i].mbuf = &rxq->fake_mbuf;
45 ((__m128i *)&rxdp[i].read,
49 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
55 const __m512i iova_offsets = _mm512_set1_epi64
56 (offsetof(struct rte_mbuf, buf_iova));
57 const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
59 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
60 /* shuffle the iova into correct slots. Values 4-7 will contain
61 * zeros, so use 7 for a zero-value.
63 const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
65 const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
68 /* fill up the rxd in vector, process 8 mbufs in one loop */
69 for (i = 0; i < ICE_RXQ_REARM_THRESH / 8; i++) {
70 const __m512i mbuf_ptrs = _mm512_loadu_si512
71 (&cache->objs[cache->len - 8]);
72 _mm512_store_si512(rxep, mbuf_ptrs);
74 /* gather iova of mbuf0-7 into one zmm reg */
75 const __m512i iova_base_addrs = _mm512_i64gather_epi64
76 (_mm512_add_epi64(mbuf_ptrs, iova_offsets),
79 const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
81 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
82 const __m512i iovas0 = _mm512_castsi256_si512
83 (_mm512_extracti64x4_epi64(iova_addrs, 0));
84 const __m512i iovas1 = _mm512_castsi256_si512
85 (_mm512_extracti64x4_epi64(iova_addrs, 1));
87 /* permute leaves iova 2-3 in hdr_addr of desc 0-1
88 * but these are ignored by driver since header split not
89 * enabled. Similarly for desc 4 & 5.
91 const __m512i desc0_1 = _mm512_permutexvar_epi64
92 (permute_idx, iovas0);
93 const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8);
95 const __m512i desc4_5 = _mm512_permutexvar_epi64
96 (permute_idx, iovas1);
97 const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8);
99 _mm512_store_si512((void *)rxdp, desc0_1);
100 _mm512_store_si512((void *)(rxdp + 2), desc2_3);
101 _mm512_store_si512((void *)(rxdp + 4), desc4_5);
102 _mm512_store_si512((void *)(rxdp + 6), desc6_7);
104 /* permute leaves iova 4-7 in hdr_addr of desc 0-3
105 * but these are ignored by driver since header split not
108 const __m512i desc0_3 = _mm512_permutexvar_epi64
109 (permute_idx, iova_addrs);
110 const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8);
112 _mm512_store_si512((void *)rxdp, desc0_3);
113 _mm512_store_si512((void *)(rxdp + 4), desc4_7);
115 rxep += 8, rxdp += 8, cache->len -= 8;
118 rxq->rxrearm_start += ICE_RXQ_REARM_THRESH;
119 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
120 rxq->rxrearm_start = 0;
122 rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH;
124 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
125 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
127 /* Update the tail pointer on the NIC */
128 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
131 static inline uint16_t
132 _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
133 struct rte_mbuf **rx_pkts,
134 uint16_t nb_pkts, uint8_t *split_packet)
136 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
137 const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
138 0, rxq->mbuf_initializer);
139 struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
140 volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
144 /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */
145 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX);
147 /* See if we need to rearm the RX queue - gives the prefetch a bit
150 if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH)
153 /* Before we start moving massive data around, check to see if
154 * there is actually a packet available
156 if (!(rxdp->wb.status_error0 &
157 rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
160 /* constants used in processing loop */
161 const __m512i crc_adjust =
163 (0, /* ignore non-length fields */
164 -rxq->crc_len, /* sub crc on data_len */
165 -rxq->crc_len, /* sub crc on pkt_len */
166 0 /* ignore non-length fields */
169 /* 8 packets DD mask, LSB in each 32-bit value */
170 const __m256i dd_check = _mm256_set1_epi32(1);
172 /* 8 packets EOP mask, second-LSB in each 32-bit value */
173 const __m256i eop_check = _mm256_slli_epi32(dd_check,
174 ICE_RX_DESC_STATUS_EOF_S);
176 /* mask to shuffle from desc. to mbuf (4 descriptors)*/
177 const __m512i shuf_msk =
179 (/* rss hash parsed separately */
181 /* octet 10~11, 16 bits vlan_macip */
182 /* octet 4~5, 16 bits data_len */
183 11 << 24 | 10 << 16 | 5 << 8 | 4,
184 /* skip hi 16 bits pkt_len, zero out */
185 /* octet 4~5, 16 bits pkt_len */
186 0xFFFF << 16 | 5 << 8 | 4,
187 /* pkt_type set as unknown */
192 * compile-time check the above crc and shuffle layout is correct.
193 * NOTE: the first field (lowest address) is given last in set_epi
196 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
197 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
198 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
199 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
200 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
201 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
202 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
203 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
205 /* Status/Error flag masks */
207 * mask everything except Checksum Reports, RSS indication
208 * and VLAN indication.
209 * bit6:4 for IP/L4 checksum errors.
210 * bit12 is for RSS indication.
211 * bit13 is for VLAN indication.
213 const __m256i flags_mask =
214 _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13));
216 * data to be shuffled by the result of the flags mask shifted by 4
217 * bits. This gives use the l3_l4 flags.
219 const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
220 /* shift right 1 bit to make sure it not exceed 255 */
221 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
222 PKT_RX_IP_CKSUM_BAD) >> 1,
223 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
224 PKT_RX_IP_CKSUM_GOOD) >> 1,
225 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
226 PKT_RX_IP_CKSUM_BAD) >> 1,
227 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
228 PKT_RX_IP_CKSUM_GOOD) >> 1,
229 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
230 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
231 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
232 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
234 0, 0, 0, 0, 0, 0, 0, 0,
235 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
236 PKT_RX_IP_CKSUM_BAD) >> 1,
237 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
238 PKT_RX_IP_CKSUM_GOOD) >> 1,
239 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
240 PKT_RX_IP_CKSUM_BAD) >> 1,
241 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
242 PKT_RX_IP_CKSUM_GOOD) >> 1,
243 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
244 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
245 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
246 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
247 const __m256i cksum_mask =
248 _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
249 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
250 PKT_RX_EIP_CKSUM_BAD);
252 * data to be shuffled by result of flag mask, shifted down 12.
253 * If RSS(bit12)/VLAN(bit13) are set,
254 * shuffle moves appropriate flags in place.
256 const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
259 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
260 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
266 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
267 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
270 uint16_t i, received;
272 for (i = 0, received = 0; i < nb_pkts;
273 i += ICE_DESCS_PER_LOOP_AVX,
274 rxdp += ICE_DESCS_PER_LOOP_AVX) {
275 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
276 _mm256_storeu_si256((void *)&rx_pkts[i],
277 _mm256_loadu_si256((void *)&sw_ring[i]));
278 #ifdef RTE_ARCH_X86_64
280 ((void *)&rx_pkts[i + 4],
281 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
284 __m512i raw_desc0_3, raw_desc4_7;
285 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
287 /* load in descriptors, in reverse order */
288 const __m128i raw_desc7 =
289 _mm_load_si128((void *)(rxdp + 7));
290 rte_compiler_barrier();
291 const __m128i raw_desc6 =
292 _mm_load_si128((void *)(rxdp + 6));
293 rte_compiler_barrier();
294 const __m128i raw_desc5 =
295 _mm_load_si128((void *)(rxdp + 5));
296 rte_compiler_barrier();
297 const __m128i raw_desc4 =
298 _mm_load_si128((void *)(rxdp + 4));
299 rte_compiler_barrier();
300 const __m128i raw_desc3 =
301 _mm_load_si128((void *)(rxdp + 3));
302 rte_compiler_barrier();
303 const __m128i raw_desc2 =
304 _mm_load_si128((void *)(rxdp + 2));
305 rte_compiler_barrier();
306 const __m128i raw_desc1 =
307 _mm_load_si128((void *)(rxdp + 1));
308 rte_compiler_barrier();
309 const __m128i raw_desc0 =
310 _mm_load_si128((void *)(rxdp + 0));
313 _mm256_inserti128_si256
314 (_mm256_castsi128_si256(raw_desc6),
317 _mm256_inserti128_si256
318 (_mm256_castsi128_si256(raw_desc4),
321 _mm256_inserti128_si256
322 (_mm256_castsi128_si256(raw_desc2),
325 _mm256_inserti128_si256
326 (_mm256_castsi128_si256(raw_desc0),
331 (_mm512_castsi256_si512(raw_desc4_5),
335 (_mm512_castsi256_si512(raw_desc0_1),
341 for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++)
342 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
346 * convert descriptors 0-7 into mbufs, re-arrange fields.
347 * Then write into the mbuf.
349 __m512i mb4_7 = _mm512_shuffle_epi8(raw_desc4_7, shuf_msk);
350 __m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk);
352 mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust);
353 mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust);
356 * to get packet types, ptype is located in bit16-25
359 const __m512i ptype_mask =
360 _mm512_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M);
363 * to get packet types, ptype is located in bit16-25
366 const __m512i ptypes4_7 =
367 _mm512_and_si512(raw_desc4_7, ptype_mask);
368 const __m512i ptypes0_3 =
369 _mm512_and_si512(raw_desc0_3, ptype_mask);
371 const __m256i ptypes6_7 =
372 _mm512_extracti64x4_epi64(ptypes4_7, 1);
373 const __m256i ptypes4_5 =
374 _mm512_extracti64x4_epi64(ptypes4_7, 0);
375 const __m256i ptypes2_3 =
376 _mm512_extracti64x4_epi64(ptypes0_3, 1);
377 const __m256i ptypes0_1 =
378 _mm512_extracti64x4_epi64(ptypes0_3, 0);
379 const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
380 const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
381 const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
382 const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
383 const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
384 const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
385 const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
386 const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
388 const __m512i ptype4_7 = _mm512_set_epi32
389 (0, 0, 0, ptype_tbl[ptype7],
390 0, 0, 0, ptype_tbl[ptype6],
391 0, 0, 0, ptype_tbl[ptype5],
392 0, 0, 0, ptype_tbl[ptype4]);
393 const __m512i ptype0_3 = _mm512_set_epi32
394 (0, 0, 0, ptype_tbl[ptype3],
395 0, 0, 0, ptype_tbl[ptype2],
396 0, 0, 0, ptype_tbl[ptype1],
397 0, 0, 0, ptype_tbl[ptype0]);
399 mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7);
400 mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3);
402 __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0);
403 __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1);
404 __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0);
405 __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1);
408 * use permute/extract to get status content
409 * After the operations, the packets status flags are in the
410 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
412 /* merge the status bits into one register */
413 const __m512i status_permute_msk = _mm512_set_epi32
418 const __m512i raw_status0_7 = _mm512_permutex2var_epi32
419 (raw_desc4_7, status_permute_msk, raw_desc0_3);
420 __m256i status0_7 = _mm512_extracti64x4_epi64
423 /* now do flag manipulation */
425 /* get only flag/error bits we want */
426 const __m256i flag_bits =
427 _mm256_and_si256(status0_7, flags_mask);
429 * l3_l4_error flags, shuffle, then shift to correct adjustment
430 * of flags in flags_shuf, and finally mask out extra bits
432 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
433 _mm256_srli_epi32(flag_bits, 4));
434 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
435 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
436 /* set rss and vlan flags */
437 const __m256i rss_vlan_flag_bits =
438 _mm256_srli_epi32(flag_bits, 12);
439 const __m256i rss_vlan_flags =
440 _mm256_shuffle_epi8(rss_vlan_flags_shuf,
444 const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
447 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
449 * needs to load 2nd 16B of each desc for RSS hash parsing,
450 * will cause performance drop to get into this context.
452 if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
453 DEV_RX_OFFLOAD_RSS_HASH) {
454 /* load bottom half of every 32B desc */
455 const __m128i raw_desc_bh7 =
457 ((void *)(&rxdp[7].wb.status_error1));
458 rte_compiler_barrier();
459 const __m128i raw_desc_bh6 =
461 ((void *)(&rxdp[6].wb.status_error1));
462 rte_compiler_barrier();
463 const __m128i raw_desc_bh5 =
465 ((void *)(&rxdp[5].wb.status_error1));
466 rte_compiler_barrier();
467 const __m128i raw_desc_bh4 =
469 ((void *)(&rxdp[4].wb.status_error1));
470 rte_compiler_barrier();
471 const __m128i raw_desc_bh3 =
473 ((void *)(&rxdp[3].wb.status_error1));
474 rte_compiler_barrier();
475 const __m128i raw_desc_bh2 =
477 ((void *)(&rxdp[2].wb.status_error1));
478 rte_compiler_barrier();
479 const __m128i raw_desc_bh1 =
481 ((void *)(&rxdp[1].wb.status_error1));
482 rte_compiler_barrier();
483 const __m128i raw_desc_bh0 =
485 ((void *)(&rxdp[0].wb.status_error1));
487 __m256i raw_desc_bh6_7 =
488 _mm256_inserti128_si256
489 (_mm256_castsi128_si256(raw_desc_bh6),
491 __m256i raw_desc_bh4_5 =
492 _mm256_inserti128_si256
493 (_mm256_castsi128_si256(raw_desc_bh4),
495 __m256i raw_desc_bh2_3 =
496 _mm256_inserti128_si256
497 (_mm256_castsi128_si256(raw_desc_bh2),
499 __m256i raw_desc_bh0_1 =
500 _mm256_inserti128_si256
501 (_mm256_castsi128_si256(raw_desc_bh0),
505 * to shift the 32b RSS hash value to the
506 * highest 32b of each 128b before mask
508 __m256i rss_hash6_7 =
509 _mm256_slli_epi64(raw_desc_bh6_7, 32);
510 __m256i rss_hash4_5 =
511 _mm256_slli_epi64(raw_desc_bh4_5, 32);
512 __m256i rss_hash2_3 =
513 _mm256_slli_epi64(raw_desc_bh2_3, 32);
514 __m256i rss_hash0_1 =
515 _mm256_slli_epi64(raw_desc_bh0_1, 32);
517 __m256i rss_hash_msk =
518 _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
519 0xFFFFFFFF, 0, 0, 0);
521 rss_hash6_7 = _mm256_and_si256
522 (rss_hash6_7, rss_hash_msk);
523 rss_hash4_5 = _mm256_and_si256
524 (rss_hash4_5, rss_hash_msk);
525 rss_hash2_3 = _mm256_and_si256
526 (rss_hash2_3, rss_hash_msk);
527 rss_hash0_1 = _mm256_and_si256
528 (rss_hash0_1, rss_hash_msk);
530 mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
531 mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
532 mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
533 mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
534 } /* if() on RSS hash parsing */
538 * At this point, we have the 8 sets of flags in the low 16-bits
539 * of each 32-bit value in vlan0.
540 * We want to extract these, and merge them with the mbuf init
541 * data so we can do a single write to the mbuf to set the flags
542 * and all the other initialization fields. Extracting the
543 * appropriate flags means that we have to do a shift and blend
544 * for each mbuf before we do the write. However, we can also
545 * add in the previously computed rx_descriptor fields to
546 * make a single 256-bit write per mbuf
548 /* check the structure matches expectations */
549 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
550 offsetof(struct rte_mbuf, rearm_data) + 8);
551 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
552 RTE_ALIGN(offsetof(struct rte_mbuf,
555 /* build up data and do writes */
556 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
559 rearm6 = _mm256_blend_epi32(mbuf_init,
560 _mm256_slli_si256(mbuf_flags, 8),
562 rearm4 = _mm256_blend_epi32(mbuf_init,
563 _mm256_slli_si256(mbuf_flags, 4),
565 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
566 rearm0 = _mm256_blend_epi32(mbuf_init,
567 _mm256_srli_si256(mbuf_flags, 4),
570 /* permute to add in the rx_descriptor e.g. rss fields */
571 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
572 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
573 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
574 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
577 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
579 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
581 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
583 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
586 /* repeat for the odd mbufs */
587 const __m256i odd_flags =
588 _mm256_castsi128_si256
589 (_mm256_extracti128_si256(mbuf_flags, 1));
590 rearm7 = _mm256_blend_epi32(mbuf_init,
591 _mm256_slli_si256(odd_flags, 8),
593 rearm5 = _mm256_blend_epi32(mbuf_init,
594 _mm256_slli_si256(odd_flags, 4),
596 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
597 rearm1 = _mm256_blend_epi32(mbuf_init,
598 _mm256_srli_si256(odd_flags, 4),
601 /* since odd mbufs are already in hi 128-bits use blend */
602 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
603 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
604 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
605 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
606 /* again write to mbufs */
607 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
609 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
611 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
613 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
616 /* extract and record EOP bit */
618 const __m128i eop_mask =
619 _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S);
620 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
622 /* pack status bits into a single 128-bit register */
623 const __m128i eop_bits =
625 (_mm256_castsi256_si128(eop_bits256),
626 _mm256_extractf128_si256(eop_bits256,
629 * flip bits, and mask out the EOP bit, which is now
630 * a split-packet bit i.e. !EOP, rather than EOP one.
632 __m128i split_bits = _mm_andnot_si128(eop_bits,
635 * eop bits are out of order, so we need to shuffle them
636 * back into order again. In doing so, only use low 8
637 * bits, which acts like another pack instruction
638 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
639 * [Since we use epi8, the 16-bit positions are
640 * multiplied by 2 in the eop_shuffle value.]
642 __m128i eop_shuffle =
643 _mm_set_epi8(/* zero hi 64b */
644 0xFF, 0xFF, 0xFF, 0xFF,
645 0xFF, 0xFF, 0xFF, 0xFF,
646 /* move values to lo 64b */
649 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
650 *(uint64_t *)split_packet =
651 _mm_cvtsi128_si64(split_bits);
652 split_packet += ICE_DESCS_PER_LOOP_AVX;
655 /* perform dd_check */
656 status0_7 = _mm256_and_si256(status0_7, dd_check);
657 status0_7 = _mm256_packs_epi32(status0_7,
658 _mm256_setzero_si256());
660 uint64_t burst = __builtin_popcountll
662 (_mm256_extracti128_si256
664 burst += __builtin_popcountll
666 (_mm256_castsi256_si128(status0_7)));
668 if (burst != ICE_DESCS_PER_LOOP_AVX)
672 /* update tail pointers */
673 rxq->rx_tail += received;
674 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
675 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
679 rxq->rxrearm_nb += received;
685 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
688 ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
691 return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
695 * vPMD receive routine that reassembles single burst of 32 scattered packets
697 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
700 ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
703 struct ice_rx_queue *rxq = rx_queue;
704 uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
706 /* get some new buffers */
707 uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
712 /* happy day case, full burst + no packets to be joined */
713 const uint64_t *split_fl64 = (uint64_t *)split_flags;
715 if (!rxq->pkt_first_seg &&
716 split_fl64[0] == 0 && split_fl64[1] == 0 &&
717 split_fl64[2] == 0 && split_fl64[3] == 0)
720 /* reassemble any packets that need reassembly */
723 if (!rxq->pkt_first_seg) {
724 /* find the first split flag, and only reassemble then */
725 while (i < nb_bufs && !split_flags[i])
729 rxq->pkt_first_seg = rx_pkts[i];
731 return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
736 * vPMD receive routine that reassembles scattered packets.
737 * Main receive routine that can handle arbitrary burst sizes
739 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
742 ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
747 while (nb_pkts > ICE_VPMD_RX_BURST) {
748 uint16_t burst = ice_recv_scattered_burst_vec_avx512(rx_queue,
749 rx_pkts + retval, ICE_VPMD_RX_BURST);
752 if (burst < ICE_VPMD_RX_BURST)
755 return retval + ice_recv_scattered_burst_vec_avx512(rx_queue,
756 rx_pkts + retval, nb_pkts);
759 static __rte_always_inline int
760 ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
762 struct ice_vec_tx_entry *txep;
766 struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
768 /* check DD bits on threshold descriptor */
769 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
770 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
771 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
774 n = txq->tx_rs_thresh;
776 /* first buffer to free from S/W ring is at index
777 * tx_next_dd - (tx_rs_thresh - 1)
779 txep = (void *)txq->sw_ring;
780 txep += txq->tx_next_dd - (n - 1);
782 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
783 struct rte_mempool *mp = txep[0].mbuf->pool;
784 struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
786 void **cache_objs = &cache->objs[cache->len];
788 if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
789 rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
793 /* The cache follows the following algorithm
794 * 1. Add the objects to the cache
795 * 2. Anything greater than the cache min value (if it
796 * crosses the cache flush threshold) is flushed to the ring.
798 /* Add elements back into the cache */
800 /* n is multiple of 32 */
802 const __m512i a = _mm512_loadu_si512(&txep[copied]);
803 const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
804 const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
805 const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
807 _mm512_storeu_si512(&cache_objs[copied], a);
808 _mm512_storeu_si512(&cache_objs[copied + 8], b);
809 _mm512_storeu_si512(&cache_objs[copied + 16], c);
810 _mm512_storeu_si512(&cache_objs[copied + 24], d);
815 if (cache->len >= cache->flushthresh) {
816 rte_mempool_ops_enqueue_bulk
817 (mp, &cache->objs[cache->size],
818 cache->len - cache->size);
819 cache->len = cache->size;
824 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
828 for (i = 1; i < n; i++) {
829 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
831 if (likely(m->pool == free[0]->pool)) {
834 rte_mempool_put_bulk(free[0]->pool,
842 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
844 for (i = 1; i < n; i++) {
845 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
847 rte_mempool_put(m->pool, m);
852 /* buffers were freed, update counters */
853 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
854 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
855 if (txq->tx_next_dd >= txq->nb_tx_desc)
856 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
858 return txq->tx_rs_thresh;
862 ice_vtx1(volatile struct ice_tx_desc *txdp,
863 struct rte_mbuf *pkt, uint64_t flags)
866 (ICE_TX_DESC_DTYPE_DATA |
867 ((uint64_t)flags << ICE_TXD_QW1_CMD_S) |
868 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
870 __m128i descriptor = _mm_set_epi64x(high_qw,
871 pkt->buf_iova + pkt->data_off);
872 _mm_store_si128((__m128i *)txdp, descriptor);
876 ice_vtx(volatile struct ice_tx_desc *txdp,
877 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
879 const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
880 ((uint64_t)flags << ICE_TXD_QW1_CMD_S));
882 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
885 ((uint64_t)pkt[3]->data_len <<
886 ICE_TXD_QW1_TX_BUF_SZ_S);
889 ((uint64_t)pkt[2]->data_len <<
890 ICE_TXD_QW1_TX_BUF_SZ_S);
893 ((uint64_t)pkt[1]->data_len <<
894 ICE_TXD_QW1_TX_BUF_SZ_S);
897 ((uint64_t)pkt[0]->data_len <<
898 ICE_TXD_QW1_TX_BUF_SZ_S);
903 pkt[3]->buf_iova + pkt[3]->data_off,
905 pkt[2]->buf_iova + pkt[2]->data_off,
907 pkt[1]->buf_iova + pkt[1]->data_off,
909 pkt[0]->buf_iova + pkt[0]->data_off);
910 _mm512_storeu_si512((void *)txdp, desc0_3);
913 /* do any last ones */
915 ice_vtx1(txdp, *pkt, flags);
916 txdp++, pkt++, nb_pkts--;
920 static __rte_always_inline void
921 ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep,
922 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
926 for (i = 0; i < (int)nb_pkts; ++i)
927 txep[i].mbuf = tx_pkts[i];
930 static inline uint16_t
931 ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
934 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
935 volatile struct ice_tx_desc *txdp;
936 struct ice_vec_tx_entry *txep;
937 uint16_t n, nb_commit, tx_id;
938 uint64_t flags = ICE_TD_CMD;
939 uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
941 /* cross rx_thresh boundary is not allowed */
942 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
944 if (txq->nb_tx_free < txq->tx_free_thresh)
945 ice_tx_free_bufs_avx512(txq);
947 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
948 if (unlikely(nb_pkts == 0))
951 tx_id = txq->tx_tail;
952 txdp = &txq->tx_ring[tx_id];
953 txep = (void *)txq->sw_ring;
956 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
958 n = (uint16_t)(txq->nb_tx_desc - tx_id);
959 if (nb_commit >= n) {
960 ice_tx_backlog_entry_avx512(txep, tx_pkts, n);
962 ice_vtx(txdp, tx_pkts, n - 1, flags);
966 ice_vtx1(txdp, *tx_pkts++, rs);
968 nb_commit = (uint16_t)(nb_commit - n);
971 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
973 /* avoid reach the end of ring */
975 txep = (void *)txq->sw_ring;
978 ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
980 ice_vtx(txdp, tx_pkts, nb_commit, flags);
982 tx_id = (uint16_t)(tx_id + nb_commit);
983 if (tx_id > txq->tx_next_rs) {
984 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
985 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
988 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
991 txq->tx_tail = tx_id;
993 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
999 ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1003 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
1008 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
1009 ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
1010 &tx_pkts[nb_tx], num);