1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #include "ice_rxtx_vec_common.h"
6 #include "ice_rxtx_common_avx.h"
10 #ifndef __INTEL_COMPILER
11 #pragma GCC diagnostic ignored "-Wcast-qual"
14 #define ICE_DESCS_PER_LOOP_AVX 8
16 static __rte_always_inline void
17 ice_rxq_rearm(struct ice_rx_queue *rxq)
21 volatile union ice_rx_flex_desc *rxdp;
22 struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
23 struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp,
26 rxdp = rxq->rx_ring + rxq->rxrearm_start;
29 return ice_rxq_rearm_common(rxq, true);
31 /* We need to pull 'n' more MBUFs into the software ring */
32 if (cache->len < ICE_RXQ_REARM_THRESH) {
33 uint32_t req = ICE_RXQ_REARM_THRESH + (cache->size -
36 int ret = rte_mempool_ops_dequeue_bulk(rxq->mp,
37 &cache->objs[cache->len], req);
41 if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >=
45 dma_addr0 = _mm_setzero_si128();
46 for (i = 0; i < ICE_DESCS_PER_LOOP; i++) {
47 rxep[i].mbuf = &rxq->fake_mbuf;
49 ((__m128i *)&rxdp[i].read,
53 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
59 const __m512i iova_offsets = _mm512_set1_epi64
60 (offsetof(struct rte_mbuf, buf_iova));
61 const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
63 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
64 /* shuffle the iova into correct slots. Values 4-7 will contain
65 * zeros, so use 7 for a zero-value.
67 const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
69 const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
72 /* fill up the rxd in vector, process 8 mbufs in one loop */
73 for (i = 0; i < ICE_RXQ_REARM_THRESH / 8; i++) {
74 const __m512i mbuf_ptrs = _mm512_loadu_si512
75 (&cache->objs[cache->len - 8]);
76 _mm512_store_si512(rxep, mbuf_ptrs);
78 /* gather iova of mbuf0-7 into one zmm reg */
79 const __m512i iova_base_addrs = _mm512_i64gather_epi64
80 (_mm512_add_epi64(mbuf_ptrs, iova_offsets),
83 const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
85 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
86 const __m512i iovas0 = _mm512_castsi256_si512
87 (_mm512_extracti64x4_epi64(iova_addrs, 0));
88 const __m512i iovas1 = _mm512_castsi256_si512
89 (_mm512_extracti64x4_epi64(iova_addrs, 1));
91 /* permute leaves iova 2-3 in hdr_addr of desc 0-1
92 * but these are ignored by driver since header split not
93 * enabled. Similarly for desc 4 & 5.
95 const __m512i desc0_1 = _mm512_permutexvar_epi64
96 (permute_idx, iovas0);
97 const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8);
99 const __m512i desc4_5 = _mm512_permutexvar_epi64
100 (permute_idx, iovas1);
101 const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8);
103 _mm512_store_si512((void *)rxdp, desc0_1);
104 _mm512_store_si512((void *)(rxdp + 2), desc2_3);
105 _mm512_store_si512((void *)(rxdp + 4), desc4_5);
106 _mm512_store_si512((void *)(rxdp + 6), desc6_7);
108 /* permute leaves iova 4-7 in hdr_addr of desc 0-3
109 * but these are ignored by driver since header split not
112 const __m512i desc0_3 = _mm512_permutexvar_epi64
113 (permute_idx, iova_addrs);
114 const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8);
116 _mm512_store_si512((void *)rxdp, desc0_3);
117 _mm512_store_si512((void *)(rxdp + 4), desc4_7);
119 rxep += 8, rxdp += 8, cache->len -= 8;
122 rxq->rxrearm_start += ICE_RXQ_REARM_THRESH;
123 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
124 rxq->rxrearm_start = 0;
126 rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH;
128 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
129 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
131 /* Update the tail pointer on the NIC */
132 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
135 static inline __m256i
136 ice_flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
138 #define FDID_MIS_MAGIC 0xFFFFFFFF
139 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
140 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
141 const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
142 RTE_MBUF_F_RX_FDIR_ID);
143 /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
144 const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
145 __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
147 /* this XOR op results to bit-reverse the fdir_mask */
148 fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
149 const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
154 static __rte_always_inline uint16_t
155 _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
156 struct rte_mbuf **rx_pkts,
158 uint8_t *split_packet,
161 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
162 const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
163 0, rxq->mbuf_initializer);
164 struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
165 volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
169 /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */
170 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX);
172 /* See if we need to rearm the RX queue - gives the prefetch a bit
175 if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH)
178 /* Before we start moving massive data around, check to see if
179 * there is actually a packet available
181 if (!(rxdp->wb.status_error0 &
182 rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
185 /* constants used in processing loop */
186 const __m512i crc_adjust =
188 (0, /* ignore non-length fields */
189 -rxq->crc_len, /* sub crc on data_len */
190 -rxq->crc_len, /* sub crc on pkt_len */
191 0 /* ignore non-length fields */
194 /* 8 packets DD mask, LSB in each 32-bit value */
195 const __m256i dd_check = _mm256_set1_epi32(1);
197 /* 8 packets EOP mask, second-LSB in each 32-bit value */
198 const __m256i eop_check = _mm256_slli_epi32(dd_check,
199 ICE_RX_DESC_STATUS_EOF_S);
201 /* mask to shuffle from desc. to mbuf (4 descriptors)*/
202 const __m512i shuf_msk =
204 (/* rss hash parsed separately */
206 /* octet 10~11, 16 bits vlan_macip */
207 /* octet 4~5, 16 bits data_len */
208 11 << 24 | 10 << 16 | 5 << 8 | 4,
209 /* skip hi 16 bits pkt_len, zero out */
210 /* octet 4~5, 16 bits pkt_len */
211 0xFFFF << 16 | 5 << 8 | 4,
212 /* pkt_type set as unknown */
217 * compile-time check the above crc and shuffle layout is correct.
218 * NOTE: the first field (lowest address) is given last in set_epi
221 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
222 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
223 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
224 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
225 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
226 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
227 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
228 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
230 /* following code block is for Rx Checksum Offload */
231 /* Status/Error flag masks */
233 * mask everything except Checksum Reports, RSS indication
234 * and VLAN indication.
235 * bit6:4 for IP/L4 checksum errors.
236 * bit12 is for RSS indication.
237 * bit13 is for VLAN indication.
239 const __m256i flags_mask =
240 _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13));
242 * data to be shuffled by the result of the flags mask shifted by 4
243 * bits. This gives use the l3_l4 flags.
245 const __m256i l3_l4_flags_shuf =
246 _mm256_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 |
247 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
248 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
249 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
250 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
251 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
252 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
253 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
254 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
255 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
256 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
257 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
258 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
259 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
260 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
261 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
262 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
263 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
264 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
265 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
266 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
267 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
268 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
269 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
270 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
271 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
272 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
273 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
274 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
275 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
276 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
277 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
278 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
281 * shift right 20 bits to use the low two bits to indicate
282 * outer checksum status
283 * shift right 1 bit to make sure it not exceed 255
285 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
286 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
287 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
288 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
289 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
290 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
291 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
292 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
293 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
294 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
295 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
296 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
297 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
298 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
299 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
300 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
301 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
302 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
303 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
304 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
305 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
306 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
307 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
308 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
309 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
310 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
311 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
312 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
313 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
314 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
315 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
316 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
317 const __m256i cksum_mask =
318 _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
319 RTE_MBUF_F_RX_L4_CKSUM_MASK |
320 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
321 RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK);
323 * data to be shuffled by result of flag mask, shifted down 12.
324 * If RSS(bit12)/VLAN(bit13) are set,
325 * shuffle moves appropriate flags in place.
327 const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
330 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
331 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
332 RTE_MBUF_F_RX_RSS_HASH, 0,
337 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
338 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
339 RTE_MBUF_F_RX_RSS_HASH, 0);
341 uint16_t i, received;
343 for (i = 0, received = 0; i < nb_pkts;
344 i += ICE_DESCS_PER_LOOP_AVX,
345 rxdp += ICE_DESCS_PER_LOOP_AVX) {
346 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
347 _mm256_storeu_si256((void *)&rx_pkts[i],
348 _mm256_loadu_si256((void *)&sw_ring[i]));
349 #ifdef RTE_ARCH_X86_64
351 ((void *)&rx_pkts[i + 4],
352 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
355 __m512i raw_desc0_3, raw_desc4_7;
356 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
358 /* load in descriptors, in reverse order */
359 const __m128i raw_desc7 =
360 _mm_load_si128((void *)(rxdp + 7));
361 rte_compiler_barrier();
362 const __m128i raw_desc6 =
363 _mm_load_si128((void *)(rxdp + 6));
364 rte_compiler_barrier();
365 const __m128i raw_desc5 =
366 _mm_load_si128((void *)(rxdp + 5));
367 rte_compiler_barrier();
368 const __m128i raw_desc4 =
369 _mm_load_si128((void *)(rxdp + 4));
370 rte_compiler_barrier();
371 const __m128i raw_desc3 =
372 _mm_load_si128((void *)(rxdp + 3));
373 rte_compiler_barrier();
374 const __m128i raw_desc2 =
375 _mm_load_si128((void *)(rxdp + 2));
376 rte_compiler_barrier();
377 const __m128i raw_desc1 =
378 _mm_load_si128((void *)(rxdp + 1));
379 rte_compiler_barrier();
380 const __m128i raw_desc0 =
381 _mm_load_si128((void *)(rxdp + 0));
384 _mm256_inserti128_si256
385 (_mm256_castsi128_si256(raw_desc6),
388 _mm256_inserti128_si256
389 (_mm256_castsi128_si256(raw_desc4),
392 _mm256_inserti128_si256
393 (_mm256_castsi128_si256(raw_desc2),
396 _mm256_inserti128_si256
397 (_mm256_castsi128_si256(raw_desc0),
402 (_mm512_castsi256_si512(raw_desc4_5),
406 (_mm512_castsi256_si512(raw_desc0_1),
412 for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++)
413 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
417 * convert descriptors 0-7 into mbufs, re-arrange fields.
418 * Then write into the mbuf.
420 __m512i mb4_7 = _mm512_shuffle_epi8(raw_desc4_7, shuf_msk);
421 __m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk);
423 mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust);
424 mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust);
427 * to get packet types, ptype is located in bit16-25
430 const __m512i ptype_mask =
431 _mm512_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M);
434 * to get packet types, ptype is located in bit16-25
437 const __m512i ptypes4_7 =
438 _mm512_and_si512(raw_desc4_7, ptype_mask);
439 const __m512i ptypes0_3 =
440 _mm512_and_si512(raw_desc0_3, ptype_mask);
442 const __m256i ptypes6_7 =
443 _mm512_extracti64x4_epi64(ptypes4_7, 1);
444 const __m256i ptypes4_5 =
445 _mm512_extracti64x4_epi64(ptypes4_7, 0);
446 const __m256i ptypes2_3 =
447 _mm512_extracti64x4_epi64(ptypes0_3, 1);
448 const __m256i ptypes0_1 =
449 _mm512_extracti64x4_epi64(ptypes0_3, 0);
450 const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
451 const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
452 const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
453 const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
454 const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
455 const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
456 const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
457 const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
459 const __m512i ptype4_7 = _mm512_set_epi32
460 (0, 0, 0, ptype_tbl[ptype7],
461 0, 0, 0, ptype_tbl[ptype6],
462 0, 0, 0, ptype_tbl[ptype5],
463 0, 0, 0, ptype_tbl[ptype4]);
464 const __m512i ptype0_3 = _mm512_set_epi32
465 (0, 0, 0, ptype_tbl[ptype3],
466 0, 0, 0, ptype_tbl[ptype2],
467 0, 0, 0, ptype_tbl[ptype1],
468 0, 0, 0, ptype_tbl[ptype0]);
470 mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7);
471 mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3);
473 __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0);
474 __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1);
475 __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0);
476 __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1);
479 * use permute/extract to get status content
480 * After the operations, the packets status flags are in the
481 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
483 /* merge the status bits into one register */
484 const __m512i status_permute_msk = _mm512_set_epi32
489 const __m512i raw_status0_7 = _mm512_permutex2var_epi32
490 (raw_desc4_7, status_permute_msk, raw_desc0_3);
491 __m256i status0_7 = _mm512_extracti64x4_epi64
494 __m256i mbuf_flags = _mm256_set1_epi32(0);
497 /* now do flag manipulation */
499 /* get only flag/error bits we want */
500 const __m256i flag_bits =
501 _mm256_and_si256(status0_7, flags_mask);
503 * l3_l4_error flags, shuffle, then shift to correct adjustment
504 * of flags in flags_shuf, and finally mask out extra bits
506 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
507 _mm256_srli_epi32(flag_bits, 4));
508 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
509 __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
510 __m256i l4_outer_flags =
511 _mm256_and_si256(l3_l4_flags, l4_outer_mask);
512 l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
514 __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
516 l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
517 l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
518 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
519 /* set rss and vlan flags */
520 const __m256i rss_vlan_flag_bits =
521 _mm256_srli_epi32(flag_bits, 12);
522 const __m256i rss_vlan_flags =
523 _mm256_shuffle_epi8(rss_vlan_flags_shuf,
527 mbuf_flags = _mm256_or_si256(l3_l4_flags,
531 if (rxq->fdir_enabled) {
532 const __m256i fdir_id4_7 =
533 _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
535 const __m256i fdir_id0_3 =
536 _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
538 const __m256i fdir_id0_7 =
539 _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
542 const __m256i fdir_flags =
543 ice_flex_rxd_to_fdir_flags_vec_avx512
546 /* merge with fdir_flags */
547 mbuf_flags = _mm256_or_si256
548 (mbuf_flags, fdir_flags);
551 ice_flex_rxd_to_fdir_flags_vec_avx512
555 /* write to mbuf: have to use scalar store here */
556 rx_pkts[i + 0]->hash.fdir.hi =
557 _mm256_extract_epi32(fdir_id0_7, 3);
559 rx_pkts[i + 1]->hash.fdir.hi =
560 _mm256_extract_epi32(fdir_id0_7, 7);
562 rx_pkts[i + 2]->hash.fdir.hi =
563 _mm256_extract_epi32(fdir_id0_7, 2);
565 rx_pkts[i + 3]->hash.fdir.hi =
566 _mm256_extract_epi32(fdir_id0_7, 6);
568 rx_pkts[i + 4]->hash.fdir.hi =
569 _mm256_extract_epi32(fdir_id0_7, 1);
571 rx_pkts[i + 5]->hash.fdir.hi =
572 _mm256_extract_epi32(fdir_id0_7, 5);
574 rx_pkts[i + 6]->hash.fdir.hi =
575 _mm256_extract_epi32(fdir_id0_7, 0);
577 rx_pkts[i + 7]->hash.fdir.hi =
578 _mm256_extract_epi32(fdir_id0_7, 4);
579 } /* if() on fdir_enabled */
582 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
584 * needs to load 2nd 16B of each desc for RSS hash parsing,
585 * will cause performance drop to get into this context.
587 if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
588 RTE_ETH_RX_OFFLOAD_RSS_HASH) {
589 /* load bottom half of every 32B desc */
590 const __m128i raw_desc_bh7 =
592 ((void *)(&rxdp[7].wb.status_error1));
593 rte_compiler_barrier();
594 const __m128i raw_desc_bh6 =
596 ((void *)(&rxdp[6].wb.status_error1));
597 rte_compiler_barrier();
598 const __m128i raw_desc_bh5 =
600 ((void *)(&rxdp[5].wb.status_error1));
601 rte_compiler_barrier();
602 const __m128i raw_desc_bh4 =
604 ((void *)(&rxdp[4].wb.status_error1));
605 rte_compiler_barrier();
606 const __m128i raw_desc_bh3 =
608 ((void *)(&rxdp[3].wb.status_error1));
609 rte_compiler_barrier();
610 const __m128i raw_desc_bh2 =
612 ((void *)(&rxdp[2].wb.status_error1));
613 rte_compiler_barrier();
614 const __m128i raw_desc_bh1 =
616 ((void *)(&rxdp[1].wb.status_error1));
617 rte_compiler_barrier();
618 const __m128i raw_desc_bh0 =
620 ((void *)(&rxdp[0].wb.status_error1));
622 __m256i raw_desc_bh6_7 =
623 _mm256_inserti128_si256
624 (_mm256_castsi128_si256(raw_desc_bh6),
626 __m256i raw_desc_bh4_5 =
627 _mm256_inserti128_si256
628 (_mm256_castsi128_si256(raw_desc_bh4),
630 __m256i raw_desc_bh2_3 =
631 _mm256_inserti128_si256
632 (_mm256_castsi128_si256(raw_desc_bh2),
634 __m256i raw_desc_bh0_1 =
635 _mm256_inserti128_si256
636 (_mm256_castsi128_si256(raw_desc_bh0),
640 * to shift the 32b RSS hash value to the
641 * highest 32b of each 128b before mask
643 __m256i rss_hash6_7 =
644 _mm256_slli_epi64(raw_desc_bh6_7, 32);
645 __m256i rss_hash4_5 =
646 _mm256_slli_epi64(raw_desc_bh4_5, 32);
647 __m256i rss_hash2_3 =
648 _mm256_slli_epi64(raw_desc_bh2_3, 32);
649 __m256i rss_hash0_1 =
650 _mm256_slli_epi64(raw_desc_bh0_1, 32);
652 __m256i rss_hash_msk =
653 _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
654 0xFFFFFFFF, 0, 0, 0);
656 rss_hash6_7 = _mm256_and_si256
657 (rss_hash6_7, rss_hash_msk);
658 rss_hash4_5 = _mm256_and_si256
659 (rss_hash4_5, rss_hash_msk);
660 rss_hash2_3 = _mm256_and_si256
661 (rss_hash2_3, rss_hash_msk);
662 rss_hash0_1 = _mm256_and_si256
663 (rss_hash0_1, rss_hash_msk);
665 mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
666 mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
667 mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
668 mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
669 } /* if() on RSS hash parsing */
674 * At this point, we have the 8 sets of flags in the low 16-bits
675 * of each 32-bit value in vlan0.
676 * We want to extract these, and merge them with the mbuf init
677 * data so we can do a single write to the mbuf to set the flags
678 * and all the other initialization fields. Extracting the
679 * appropriate flags means that we have to do a shift and blend
680 * for each mbuf before we do the write. However, we can also
681 * add in the previously computed rx_descriptor fields to
682 * make a single 256-bit write per mbuf
684 /* check the structure matches expectations */
685 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
686 offsetof(struct rte_mbuf, rearm_data) + 8);
687 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
688 RTE_ALIGN(offsetof(struct rte_mbuf,
691 /* build up data and do writes */
692 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
695 rearm6 = _mm256_blend_epi32(mbuf_init,
696 _mm256_slli_si256(mbuf_flags, 8),
698 rearm4 = _mm256_blend_epi32(mbuf_init,
699 _mm256_slli_si256(mbuf_flags, 4),
701 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
702 rearm0 = _mm256_blend_epi32(mbuf_init,
703 _mm256_srli_si256(mbuf_flags, 4),
706 /* permute to add in the rx_descriptor e.g. rss fields */
707 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
708 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
709 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
710 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
713 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
715 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
717 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
719 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
722 /* repeat for the odd mbufs */
723 const __m256i odd_flags =
724 _mm256_castsi128_si256
725 (_mm256_extracti128_si256(mbuf_flags, 1));
726 rearm7 = _mm256_blend_epi32(mbuf_init,
727 _mm256_slli_si256(odd_flags, 8),
729 rearm5 = _mm256_blend_epi32(mbuf_init,
730 _mm256_slli_si256(odd_flags, 4),
732 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
733 rearm1 = _mm256_blend_epi32(mbuf_init,
734 _mm256_srli_si256(odd_flags, 4),
737 /* since odd mbufs are already in hi 128-bits use blend */
738 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
739 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
740 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
741 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
742 /* again write to mbufs */
743 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
745 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
747 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
749 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
752 /* extract and record EOP bit */
754 const __m128i eop_mask =
755 _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S);
756 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
758 /* pack status bits into a single 128-bit register */
759 const __m128i eop_bits =
761 (_mm256_castsi256_si128(eop_bits256),
762 _mm256_extractf128_si256(eop_bits256,
765 * flip bits, and mask out the EOP bit, which is now
766 * a split-packet bit i.e. !EOP, rather than EOP one.
768 __m128i split_bits = _mm_andnot_si128(eop_bits,
771 * eop bits are out of order, so we need to shuffle them
772 * back into order again. In doing so, only use low 8
773 * bits, which acts like another pack instruction
774 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
775 * [Since we use epi8, the 16-bit positions are
776 * multiplied by 2 in the eop_shuffle value.]
778 __m128i eop_shuffle =
779 _mm_set_epi8(/* zero hi 64b */
780 0xFF, 0xFF, 0xFF, 0xFF,
781 0xFF, 0xFF, 0xFF, 0xFF,
782 /* move values to lo 64b */
785 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
786 *(uint64_t *)split_packet =
787 _mm_cvtsi128_si64(split_bits);
788 split_packet += ICE_DESCS_PER_LOOP_AVX;
791 /* perform dd_check */
792 status0_7 = _mm256_and_si256(status0_7, dd_check);
793 status0_7 = _mm256_packs_epi32(status0_7,
794 _mm256_setzero_si256());
796 uint64_t burst = __builtin_popcountll
798 (_mm256_extracti128_si256
800 burst += __builtin_popcountll
802 (_mm256_castsi256_si128(status0_7)));
804 if (burst != ICE_DESCS_PER_LOOP_AVX)
808 /* update tail pointers */
809 rxq->rx_tail += received;
810 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
811 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
815 rxq->rxrearm_nb += received;
821 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
824 ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
827 return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL, false);
832 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
835 ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
838 return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts,
839 nb_pkts, NULL, true);
843 * vPMD receive routine that reassembles single burst of 32 scattered packets
845 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
848 ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
851 struct ice_rx_queue *rxq = rx_queue;
852 uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
854 /* get some new buffers */
855 uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
860 /* happy day case, full burst + no packets to be joined */
861 const uint64_t *split_fl64 = (uint64_t *)split_flags;
863 if (!rxq->pkt_first_seg &&
864 split_fl64[0] == 0 && split_fl64[1] == 0 &&
865 split_fl64[2] == 0 && split_fl64[3] == 0)
868 /* reassemble any packets that need reassembly */
871 if (!rxq->pkt_first_seg) {
872 /* find the first split flag, and only reassemble then */
873 while (i < nb_bufs && !split_flags[i])
877 rxq->pkt_first_seg = rx_pkts[i];
879 return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
884 * vPMD receive routine that reassembles single burst of 32 scattered packets
886 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
889 ice_recv_scattered_burst_vec_avx512_offload(void *rx_queue,
890 struct rte_mbuf **rx_pkts,
893 struct ice_rx_queue *rxq = rx_queue;
894 uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
896 /* get some new buffers */
897 uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq,
898 rx_pkts, nb_pkts, split_flags, true);
902 /* happy day case, full burst + no packets to be joined */
903 const uint64_t *split_fl64 = (uint64_t *)split_flags;
905 if (!rxq->pkt_first_seg &&
906 split_fl64[0] == 0 && split_fl64[1] == 0 &&
907 split_fl64[2] == 0 && split_fl64[3] == 0)
910 /* reassemble any packets that need reassembly */
913 if (!rxq->pkt_first_seg) {
914 /* find the first split flag, and only reassemble then */
915 while (i < nb_bufs && !split_flags[i])
919 rxq->pkt_first_seg = rx_pkts[i];
921 return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
926 * vPMD receive routine that reassembles scattered packets.
927 * Main receive routine that can handle arbitrary burst sizes
929 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
932 ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
937 while (nb_pkts > ICE_VPMD_RX_BURST) {
938 uint16_t burst = ice_recv_scattered_burst_vec_avx512(rx_queue,
939 rx_pkts + retval, ICE_VPMD_RX_BURST);
942 if (burst < ICE_VPMD_RX_BURST)
945 return retval + ice_recv_scattered_burst_vec_avx512(rx_queue,
946 rx_pkts + retval, nb_pkts);
950 * vPMD receive routine that reassembles scattered packets.
951 * Main receive routine that can handle arbitrary burst sizes
953 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
956 ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
957 struct rte_mbuf **rx_pkts,
962 while (nb_pkts > ICE_VPMD_RX_BURST) {
964 ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
965 rx_pkts + retval, ICE_VPMD_RX_BURST);
968 if (burst < ICE_VPMD_RX_BURST)
971 return retval + ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
972 rx_pkts + retval, nb_pkts);
975 static __rte_always_inline int
976 ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
978 struct ice_vec_tx_entry *txep;
982 struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
984 /* check DD bits on threshold descriptor */
985 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
986 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
987 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
990 n = txq->tx_rs_thresh;
992 /* first buffer to free from S/W ring is at index
993 * tx_next_dd - (tx_rs_thresh - 1)
995 txep = (void *)txq->sw_ring;
996 txep += txq->tx_next_dd - (n - 1);
998 if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
999 struct rte_mempool *mp = txep[0].mbuf->pool;
1001 struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
1004 if (!cache || cache->len == 0)
1007 cache_objs = &cache->objs[cache->len];
1009 if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
1010 rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
1014 /* The cache follows the following algorithm
1015 * 1. Add the objects to the cache
1016 * 2. Anything greater than the cache min value (if it
1017 * crosses the cache flush threshold) is flushed to the ring.
1019 /* Add elements back into the cache */
1020 uint32_t copied = 0;
1021 /* n is multiple of 32 */
1022 while (copied < n) {
1023 const __m512i a = _mm512_loadu_si512(&txep[copied]);
1024 const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
1025 const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
1026 const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
1028 _mm512_storeu_si512(&cache_objs[copied], a);
1029 _mm512_storeu_si512(&cache_objs[copied + 8], b);
1030 _mm512_storeu_si512(&cache_objs[copied + 16], c);
1031 _mm512_storeu_si512(&cache_objs[copied + 24], d);
1036 if (cache->len >= cache->flushthresh) {
1037 rte_mempool_ops_enqueue_bulk
1038 (mp, &cache->objs[cache->size],
1039 cache->len - cache->size);
1040 cache->len = cache->size;
1046 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
1050 for (i = 1; i < n; i++) {
1051 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
1053 if (likely(m->pool == free[0]->pool)) {
1054 free[nb_free++] = m;
1056 rte_mempool_put_bulk(free[0]->pool,
1064 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
1066 for (i = 1; i < n; i++) {
1067 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
1069 rte_mempool_put(m->pool, m);
1074 /* buffers were freed, update counters */
1075 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
1076 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
1077 if (txq->tx_next_dd >= txq->nb_tx_desc)
1078 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1080 return txq->tx_rs_thresh;
1083 static __rte_always_inline void
1084 ice_vtx1(volatile struct ice_tx_desc *txdp,
1085 struct rte_mbuf *pkt, uint64_t flags, bool do_offload)
1088 (ICE_TX_DESC_DTYPE_DATA |
1089 ((uint64_t)flags << ICE_TXD_QW1_CMD_S) |
1090 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
1093 ice_txd_enable_offload(pkt, &high_qw);
1095 __m128i descriptor = _mm_set_epi64x(high_qw,
1096 pkt->buf_iova + pkt->data_off);
1097 _mm_store_si128((__m128i *)txdp, descriptor);
1100 static __rte_always_inline void
1101 ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,
1102 uint16_t nb_pkts, uint64_t flags, bool do_offload)
1104 const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
1105 ((uint64_t)flags << ICE_TXD_QW1_CMD_S));
1107 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
1110 ((uint64_t)pkt[3]->data_len <<
1111 ICE_TXD_QW1_TX_BUF_SZ_S);
1113 ice_txd_enable_offload(pkt[3], &hi_qw3);
1116 ((uint64_t)pkt[2]->data_len <<
1117 ICE_TXD_QW1_TX_BUF_SZ_S);
1119 ice_txd_enable_offload(pkt[2], &hi_qw2);
1122 ((uint64_t)pkt[1]->data_len <<
1123 ICE_TXD_QW1_TX_BUF_SZ_S);
1125 ice_txd_enable_offload(pkt[1], &hi_qw1);
1128 ((uint64_t)pkt[0]->data_len <<
1129 ICE_TXD_QW1_TX_BUF_SZ_S);
1131 ice_txd_enable_offload(pkt[0], &hi_qw0);
1136 pkt[3]->buf_iova + pkt[3]->data_off,
1138 pkt[2]->buf_iova + pkt[2]->data_off,
1140 pkt[1]->buf_iova + pkt[1]->data_off,
1142 pkt[0]->buf_iova + pkt[0]->data_off);
1143 _mm512_storeu_si512((void *)txdp, desc0_3);
1146 /* do any last ones */
1148 ice_vtx1(txdp, *pkt, flags, do_offload);
1149 txdp++, pkt++, nb_pkts--;
1153 static __rte_always_inline void
1154 ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep,
1155 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1159 for (i = 0; i < (int)nb_pkts; ++i)
1160 txep[i].mbuf = tx_pkts[i];
1163 static __rte_always_inline uint16_t
1164 ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1165 uint16_t nb_pkts, bool do_offload)
1167 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
1168 volatile struct ice_tx_desc *txdp;
1169 struct ice_vec_tx_entry *txep;
1170 uint16_t n, nb_commit, tx_id;
1171 uint64_t flags = ICE_TD_CMD;
1172 uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
1174 /* cross rx_thresh boundary is not allowed */
1175 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
1177 if (txq->nb_tx_free < txq->tx_free_thresh)
1178 ice_tx_free_bufs_avx512(txq);
1180 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
1181 if (unlikely(nb_pkts == 0))
1184 tx_id = txq->tx_tail;
1185 txdp = &txq->tx_ring[tx_id];
1186 txep = (void *)txq->sw_ring;
1189 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
1191 n = (uint16_t)(txq->nb_tx_desc - tx_id);
1192 if (nb_commit >= n) {
1193 ice_tx_backlog_entry_avx512(txep, tx_pkts, n);
1195 ice_vtx(txdp, tx_pkts, n - 1, flags, do_offload);
1199 ice_vtx1(txdp, *tx_pkts++, rs, do_offload);
1201 nb_commit = (uint16_t)(nb_commit - n);
1204 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1206 /* avoid reach the end of ring */
1207 txdp = txq->tx_ring;
1208 txep = (void *)txq->sw_ring;
1211 ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
1213 ice_vtx(txdp, tx_pkts, nb_commit, flags, do_offload);
1215 tx_id = (uint16_t)(tx_id + nb_commit);
1216 if (tx_id > txq->tx_next_rs) {
1217 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
1218 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
1221 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
1224 txq->tx_tail = tx_id;
1226 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
1232 ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
1236 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
1241 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
1242 ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
1243 &tx_pkts[nb_tx], num, false);
1254 ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
1258 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
1263 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
1264 ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
1265 &tx_pkts[nb_tx], num, true);