1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #include "ice_rxtx_vec_common.h"
9 #ifndef __INTEL_COMPILER
10 #pragma GCC diagnostic ignored "-Wcast-qual"
14 ice_rxq_rearm(struct ice_rx_queue *rxq)
18 volatile union ice_rx_flex_desc *rxdp;
19 struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
21 rxdp = rxq->rx_ring + rxq->rxrearm_start;
23 /* Pull 'n' more MBUFs into the software ring */
24 if (rte_mempool_get_bulk(rxq->mp,
26 ICE_RXQ_REARM_THRESH) < 0) {
27 if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >=
31 dma_addr0 = _mm_setzero_si128();
32 for (i = 0; i < ICE_DESCS_PER_LOOP; i++) {
33 rxep[i].mbuf = &rxq->fake_mbuf;
34 _mm_store_si128((__m128i *)&rxdp[i].read,
38 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
43 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
44 struct rte_mbuf *mb0, *mb1;
45 __m128i dma_addr0, dma_addr1;
46 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
47 RTE_PKTMBUF_HEADROOM);
48 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
49 for (i = 0; i < ICE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
50 __m128i vaddr0, vaddr1;
55 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
56 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
57 offsetof(struct rte_mbuf, buf_addr) + 8);
58 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
59 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
61 /* convert pa to dma_addr hdr/data */
62 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
63 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
65 /* add headroom to pa values */
66 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
67 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
69 /* flush desc with pa dma_addr */
70 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
71 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
74 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
75 __m256i dma_addr0_1, dma_addr2_3;
76 __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
77 /* Initialize the mbufs in vector, process 4 mbufs in one loop */
78 for (i = 0; i < ICE_RXQ_REARM_THRESH;
79 i += 4, rxep += 4, rxdp += 4) {
80 __m128i vaddr0, vaddr1, vaddr2, vaddr3;
81 __m256i vaddr0_1, vaddr2_3;
88 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
89 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
90 offsetof(struct rte_mbuf, buf_addr) + 8);
91 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
92 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
93 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
94 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
97 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
98 * into the high lanes. Similarly for 2 & 3
101 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
104 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
107 /* convert pa to dma_addr hdr/data */
108 dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1);
109 dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3);
111 /* add headroom to pa values */
112 dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room);
113 dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room);
115 /* flush desc with pa dma_addr */
116 _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1);
117 _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3);
122 rxq->rxrearm_start += ICE_RXQ_REARM_THRESH;
123 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
124 rxq->rxrearm_start = 0;
126 rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH;
128 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
129 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
131 /* Update the tail pointer on the NIC */
132 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
135 static inline __m256i
136 ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
138 #define FDID_MIS_MAGIC 0xFFFFFFFF
139 RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
140 RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
141 const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
143 /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
144 const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
145 __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
147 /* this XOR op results to bit-reverse the fdir_mask */
148 fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
149 const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
154 static inline uint16_t
155 _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
156 uint16_t nb_pkts, uint8_t *split_packet)
158 #define ICE_DESCS_PER_LOOP_AVX 8
160 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
161 const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
162 0, rxq->mbuf_initializer);
163 struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
164 volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
165 const int avx_aligned = ((rxq->rx_tail & 1) == 0);
169 /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */
170 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX);
172 /* See if we need to rearm the RX queue - gives the prefetch a bit
175 if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH)
178 /* Before we start moving massive data around, check to see if
179 * there is actually a packet available
181 if (!(rxdp->wb.status_error0 &
182 rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
185 /* constants used in processing loop */
186 const __m256i crc_adjust =
188 (/* first descriptor */
189 0, 0, 0, /* ignore non-length fields */
190 -rxq->crc_len, /* sub crc on data_len */
191 0, /* ignore high-16bits of pkt_len */
192 -rxq->crc_len, /* sub crc on pkt_len */
193 0, 0, /* ignore pkt_type field */
194 /* second descriptor */
195 0, 0, 0, /* ignore non-length fields */
196 -rxq->crc_len, /* sub crc on data_len */
197 0, /* ignore high-16bits of pkt_len */
198 -rxq->crc_len, /* sub crc on pkt_len */
199 0, 0 /* ignore pkt_type field */
202 /* 8 packets DD mask, LSB in each 32-bit value */
203 const __m256i dd_check = _mm256_set1_epi32(1);
205 /* 8 packets EOP mask, second-LSB in each 32-bit value */
206 const __m256i eop_check = _mm256_slli_epi32(dd_check,
207 ICE_RX_DESC_STATUS_EOF_S);
209 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
210 const __m256i shuf_msk =
212 (/* first descriptor */
214 0xFF, 0xFF, /* rss hash parsed separately */
215 11, 10, /* octet 10~11, 16 bits vlan_macip */
216 5, 4, /* octet 4~5, 16 bits data_len */
217 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
218 5, 4, /* octet 4~5, 16 bits pkt_len */
219 0xFF, 0xFF, /* pkt_type set as unknown */
220 0xFF, 0xFF, /*pkt_type set as unknown */
221 /* second descriptor */
223 0xFF, 0xFF, /* rss hash parsed separately */
224 11, 10, /* octet 10~11, 16 bits vlan_macip */
225 5, 4, /* octet 4~5, 16 bits data_len */
226 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
227 5, 4, /* octet 4~5, 16 bits pkt_len */
228 0xFF, 0xFF, /* pkt_type set as unknown */
229 0xFF, 0xFF /*pkt_type set as unknown */
232 * compile-time check the above crc and shuffle layout is correct.
233 * NOTE: the first field (lowest address) is given last in set_epi
236 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
237 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
238 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
239 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
240 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
241 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
242 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
243 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
245 /* Status/Error flag masks */
247 * mask everything except Checksum Reports, RSS indication
248 * and VLAN indication.
249 * bit6:4 for IP/L4 checksum errors.
250 * bit12 is for RSS indication.
251 * bit13 is for VLAN indication.
253 const __m256i flags_mask =
254 _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13));
256 * data to be shuffled by the result of the flags mask shifted by 4
257 * bits. This gives use the l3_l4 flags.
259 const __m256i l3_l4_flags_shuf =
260 _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
261 PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
262 PKT_RX_IP_CKSUM_BAD) >> 1,
263 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
264 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
265 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
266 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
267 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
268 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
269 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
270 PKT_RX_IP_CKSUM_BAD) >> 1,
271 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
272 PKT_RX_IP_CKSUM_GOOD) >> 1,
273 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
274 PKT_RX_IP_CKSUM_BAD) >> 1,
275 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
276 PKT_RX_IP_CKSUM_GOOD) >> 1,
277 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
278 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
279 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
280 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
281 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
282 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
283 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
284 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
285 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
286 PKT_RX_IP_CKSUM_BAD) >> 1,
287 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
288 PKT_RX_IP_CKSUM_GOOD) >> 1,
289 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
290 PKT_RX_IP_CKSUM_BAD) >> 1,
291 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
292 PKT_RX_IP_CKSUM_GOOD) >> 1,
295 * shift right 20 bits to use the low two bits to indicate
296 * outer checksum status
297 * shift right 1 bit to make sure it not exceed 255
299 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
300 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
301 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
302 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
303 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
304 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
305 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
306 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
307 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
308 PKT_RX_IP_CKSUM_BAD) >> 1,
309 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
310 PKT_RX_IP_CKSUM_GOOD) >> 1,
311 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
312 PKT_RX_IP_CKSUM_BAD) >> 1,
313 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
314 PKT_RX_IP_CKSUM_GOOD) >> 1,
315 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
316 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
317 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
318 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
319 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
320 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
321 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
322 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
323 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
324 PKT_RX_IP_CKSUM_BAD) >> 1,
325 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
326 PKT_RX_IP_CKSUM_GOOD) >> 1,
327 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
328 PKT_RX_IP_CKSUM_BAD) >> 1,
329 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
330 PKT_RX_IP_CKSUM_GOOD) >> 1);
331 const __m256i cksum_mask =
332 _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK |
333 PKT_RX_L4_CKSUM_MASK |
334 PKT_RX_OUTER_IP_CKSUM_BAD |
335 PKT_RX_OUTER_L4_CKSUM_MASK);
337 * data to be shuffled by result of flag mask, shifted down 12.
338 * If RSS(bit12)/VLAN(bit13) are set,
339 * shuffle moves appropriate flags in place.
341 const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
344 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
345 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
347 /* end up 128-bits */
351 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
352 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
355 RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
357 uint16_t i, received;
359 for (i = 0, received = 0; i < nb_pkts;
360 i += ICE_DESCS_PER_LOOP_AVX,
361 rxdp += ICE_DESCS_PER_LOOP_AVX) {
362 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
363 _mm256_storeu_si256((void *)&rx_pkts[i],
364 _mm256_loadu_si256((void *)&sw_ring[i]));
365 #ifdef RTE_ARCH_X86_64
367 ((void *)&rx_pkts[i + 4],
368 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
371 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
372 #ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC
373 /* for AVX we need alignment otherwise loads are not atomic */
375 /* load in descriptors, 2 at a time, in reverse order */
376 raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
377 rte_compiler_barrier();
378 raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
379 rte_compiler_barrier();
380 raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
381 rte_compiler_barrier();
382 raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
386 const __m128i raw_desc7 =
387 _mm_load_si128((void *)(rxdp + 7));
388 rte_compiler_barrier();
389 const __m128i raw_desc6 =
390 _mm_load_si128((void *)(rxdp + 6));
391 rte_compiler_barrier();
392 const __m128i raw_desc5 =
393 _mm_load_si128((void *)(rxdp + 5));
394 rte_compiler_barrier();
395 const __m128i raw_desc4 =
396 _mm_load_si128((void *)(rxdp + 4));
397 rte_compiler_barrier();
398 const __m128i raw_desc3 =
399 _mm_load_si128((void *)(rxdp + 3));
400 rte_compiler_barrier();
401 const __m128i raw_desc2 =
402 _mm_load_si128((void *)(rxdp + 2));
403 rte_compiler_barrier();
404 const __m128i raw_desc1 =
405 _mm_load_si128((void *)(rxdp + 1));
406 rte_compiler_barrier();
407 const __m128i raw_desc0 =
408 _mm_load_si128((void *)(rxdp + 0));
411 _mm256_inserti128_si256
412 (_mm256_castsi128_si256(raw_desc6),
415 _mm256_inserti128_si256
416 (_mm256_castsi128_si256(raw_desc4),
419 _mm256_inserti128_si256
420 (_mm256_castsi128_si256(raw_desc2),
423 _mm256_inserti128_si256
424 (_mm256_castsi128_si256(raw_desc0),
431 for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++)
432 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
436 * convert descriptors 4-7 into mbufs, re-arrange fields.
437 * Then write into the mbuf.
439 __m256i mb6_7 = _mm256_shuffle_epi8(raw_desc6_7, shuf_msk);
440 __m256i mb4_5 = _mm256_shuffle_epi8(raw_desc4_5, shuf_msk);
442 mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
443 mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
445 * to get packet types, ptype is located in bit16-25
448 const __m256i ptype_mask =
449 _mm256_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M);
450 const __m256i ptypes6_7 =
451 _mm256_and_si256(raw_desc6_7, ptype_mask);
452 const __m256i ptypes4_5 =
453 _mm256_and_si256(raw_desc4_5, ptype_mask);
454 const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
455 const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
456 const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
457 const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
459 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4);
460 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0);
461 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4);
462 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0);
463 /* merge the status bits into one register */
464 const __m256i status4_7 = _mm256_unpackhi_epi32(raw_desc6_7,
468 * convert descriptors 0-3 into mbufs, re-arrange fields.
469 * Then write into the mbuf.
471 __m256i mb2_3 = _mm256_shuffle_epi8(raw_desc2_3, shuf_msk);
472 __m256i mb0_1 = _mm256_shuffle_epi8(raw_desc0_1, shuf_msk);
474 mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
475 mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
477 * to get packet types, ptype is located in bit16-25
480 const __m256i ptypes2_3 =
481 _mm256_and_si256(raw_desc2_3, ptype_mask);
482 const __m256i ptypes0_1 =
483 _mm256_and_si256(raw_desc0_1, ptype_mask);
484 const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
485 const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
486 const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
487 const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
489 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4);
490 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0);
491 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4);
492 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0);
493 /* merge the status bits into one register */
494 const __m256i status0_3 = _mm256_unpackhi_epi32(raw_desc2_3,
498 * take the two sets of status bits and merge to one
499 * After merge, the packets status flags are in the
500 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
502 __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
505 /* now do flag manipulation */
507 /* get only flag/error bits we want */
508 const __m256i flag_bits =
509 _mm256_and_si256(status0_7, flags_mask);
511 * l3_l4_error flags, shuffle, then shift to correct adjustment
512 * of flags in flags_shuf, and finally mask out extra bits
514 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
515 _mm256_srli_epi32(flag_bits, 4));
516 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
518 __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
519 __m256i l4_outer_flags =
520 _mm256_and_si256(l3_l4_flags, l4_outer_mask);
521 l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
523 __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
524 l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
525 l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
526 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
527 /* set rss and vlan flags */
528 const __m256i rss_vlan_flag_bits =
529 _mm256_srli_epi32(flag_bits, 12);
530 const __m256i rss_vlan_flags =
531 _mm256_shuffle_epi8(rss_vlan_flags_shuf,
535 __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
538 if (rxq->fdir_enabled) {
539 const __m256i fdir_id4_7 =
540 _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
542 const __m256i fdir_id0_3 =
543 _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
545 const __m256i fdir_id0_7 =
546 _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
548 const __m256i fdir_flags =
549 ice_flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7);
551 /* merge with fdir_flags */
552 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
554 /* write to mbuf: have to use scalar store here */
555 rx_pkts[i + 0]->hash.fdir.hi =
556 _mm256_extract_epi32(fdir_id0_7, 3);
558 rx_pkts[i + 1]->hash.fdir.hi =
559 _mm256_extract_epi32(fdir_id0_7, 7);
561 rx_pkts[i + 2]->hash.fdir.hi =
562 _mm256_extract_epi32(fdir_id0_7, 2);
564 rx_pkts[i + 3]->hash.fdir.hi =
565 _mm256_extract_epi32(fdir_id0_7, 6);
567 rx_pkts[i + 4]->hash.fdir.hi =
568 _mm256_extract_epi32(fdir_id0_7, 1);
570 rx_pkts[i + 5]->hash.fdir.hi =
571 _mm256_extract_epi32(fdir_id0_7, 5);
573 rx_pkts[i + 6]->hash.fdir.hi =
574 _mm256_extract_epi32(fdir_id0_7, 0);
576 rx_pkts[i + 7]->hash.fdir.hi =
577 _mm256_extract_epi32(fdir_id0_7, 4);
578 } /* if() on fdir_enabled */
580 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
582 * needs to load 2nd 16B of each desc for RSS hash parsing,
583 * will cause performance drop to get into this context.
585 if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
586 DEV_RX_OFFLOAD_RSS_HASH) {
587 /* load bottom half of every 32B desc */
588 const __m128i raw_desc_bh7 =
590 ((void *)(&rxdp[7].wb.status_error1));
591 rte_compiler_barrier();
592 const __m128i raw_desc_bh6 =
594 ((void *)(&rxdp[6].wb.status_error1));
595 rte_compiler_barrier();
596 const __m128i raw_desc_bh5 =
598 ((void *)(&rxdp[5].wb.status_error1));
599 rte_compiler_barrier();
600 const __m128i raw_desc_bh4 =
602 ((void *)(&rxdp[4].wb.status_error1));
603 rte_compiler_barrier();
604 const __m128i raw_desc_bh3 =
606 ((void *)(&rxdp[3].wb.status_error1));
607 rte_compiler_barrier();
608 const __m128i raw_desc_bh2 =
610 ((void *)(&rxdp[2].wb.status_error1));
611 rte_compiler_barrier();
612 const __m128i raw_desc_bh1 =
614 ((void *)(&rxdp[1].wb.status_error1));
615 rte_compiler_barrier();
616 const __m128i raw_desc_bh0 =
618 ((void *)(&rxdp[0].wb.status_error1));
620 __m256i raw_desc_bh6_7 =
621 _mm256_inserti128_si256
622 (_mm256_castsi128_si256(raw_desc_bh6),
624 __m256i raw_desc_bh4_5 =
625 _mm256_inserti128_si256
626 (_mm256_castsi128_si256(raw_desc_bh4),
628 __m256i raw_desc_bh2_3 =
629 _mm256_inserti128_si256
630 (_mm256_castsi128_si256(raw_desc_bh2),
632 __m256i raw_desc_bh0_1 =
633 _mm256_inserti128_si256
634 (_mm256_castsi128_si256(raw_desc_bh0),
638 * to shift the 32b RSS hash value to the
639 * highest 32b of each 128b before mask
641 __m256i rss_hash6_7 =
642 _mm256_slli_epi64(raw_desc_bh6_7, 32);
643 __m256i rss_hash4_5 =
644 _mm256_slli_epi64(raw_desc_bh4_5, 32);
645 __m256i rss_hash2_3 =
646 _mm256_slli_epi64(raw_desc_bh2_3, 32);
647 __m256i rss_hash0_1 =
648 _mm256_slli_epi64(raw_desc_bh0_1, 32);
650 __m256i rss_hash_msk =
651 _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
652 0xFFFFFFFF, 0, 0, 0);
654 rss_hash6_7 = _mm256_and_si256
655 (rss_hash6_7, rss_hash_msk);
656 rss_hash4_5 = _mm256_and_si256
657 (rss_hash4_5, rss_hash_msk);
658 rss_hash2_3 = _mm256_and_si256
659 (rss_hash2_3, rss_hash_msk);
660 rss_hash0_1 = _mm256_and_si256
661 (rss_hash0_1, rss_hash_msk);
663 mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
664 mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
665 mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
666 mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
667 } /* if() on RSS hash parsing */
671 * At this point, we have the 8 sets of flags in the low 16-bits
672 * of each 32-bit value in vlan0.
673 * We want to extract these, and merge them with the mbuf init
674 * data so we can do a single write to the mbuf to set the flags
675 * and all the other initialization fields. Extracting the
676 * appropriate flags means that we have to do a shift and blend
677 * for each mbuf before we do the write. However, we can also
678 * add in the previously computed rx_descriptor fields to
679 * make a single 256-bit write per mbuf
681 /* check the structure matches expectations */
682 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
683 offsetof(struct rte_mbuf, rearm_data) + 8);
684 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
685 RTE_ALIGN(offsetof(struct rte_mbuf,
688 /* build up data and do writes */
689 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
691 rearm6 = _mm256_blend_epi32(mbuf_init,
692 _mm256_slli_si256(mbuf_flags, 8),
694 rearm4 = _mm256_blend_epi32(mbuf_init,
695 _mm256_slli_si256(mbuf_flags, 4),
697 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
698 rearm0 = _mm256_blend_epi32(mbuf_init,
699 _mm256_srli_si256(mbuf_flags, 4),
701 /* permute to add in the rx_descriptor e.g. rss fields */
702 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
703 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
704 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
705 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
707 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
709 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
711 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
713 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
716 /* repeat for the odd mbufs */
717 const __m256i odd_flags =
718 _mm256_castsi128_si256
719 (_mm256_extracti128_si256(mbuf_flags, 1));
720 rearm7 = _mm256_blend_epi32(mbuf_init,
721 _mm256_slli_si256(odd_flags, 8),
723 rearm5 = _mm256_blend_epi32(mbuf_init,
724 _mm256_slli_si256(odd_flags, 4),
726 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
727 rearm1 = _mm256_blend_epi32(mbuf_init,
728 _mm256_srli_si256(odd_flags, 4),
730 /* since odd mbufs are already in hi 128-bits use blend */
731 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
732 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
733 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
734 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
735 /* again write to mbufs */
736 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
738 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
740 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
742 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
745 /* extract and record EOP bit */
747 const __m128i eop_mask =
748 _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S);
749 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
751 /* pack status bits into a single 128-bit register */
752 const __m128i eop_bits =
754 (_mm256_castsi256_si128(eop_bits256),
755 _mm256_extractf128_si256(eop_bits256,
758 * flip bits, and mask out the EOP bit, which is now
759 * a split-packet bit i.e. !EOP, rather than EOP one.
761 __m128i split_bits = _mm_andnot_si128(eop_bits,
764 * eop bits are out of order, so we need to shuffle them
765 * back into order again. In doing so, only use low 8
766 * bits, which acts like another pack instruction
767 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
768 * [Since we use epi8, the 16-bit positions are
769 * multiplied by 2 in the eop_shuffle value.]
771 __m128i eop_shuffle =
772 _mm_set_epi8(/* zero hi 64b */
773 0xFF, 0xFF, 0xFF, 0xFF,
774 0xFF, 0xFF, 0xFF, 0xFF,
775 /* move values to lo 64b */
778 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
779 *(uint64_t *)split_packet =
780 _mm_cvtsi128_si64(split_bits);
781 split_packet += ICE_DESCS_PER_LOOP_AVX;
784 /* perform dd_check */
785 status0_7 = _mm256_and_si256(status0_7, dd_check);
786 status0_7 = _mm256_packs_epi32(status0_7,
787 _mm256_setzero_si256());
789 uint64_t burst = __builtin_popcountll
791 (_mm256_extracti128_si256
793 burst += __builtin_popcountll
795 (_mm256_castsi256_si128(status0_7)));
797 if (burst != ICE_DESCS_PER_LOOP_AVX)
801 /* update tail pointers */
802 rxq->rx_tail += received;
803 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
804 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
808 rxq->rxrearm_nb += received;
814 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
817 ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
820 return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
824 * vPMD receive routine that reassembles single burst of 32 scattered packets
826 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
829 ice_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
832 struct ice_rx_queue *rxq = rx_queue;
833 uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
835 /* get some new buffers */
836 uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
841 /* happy day case, full burst + no packets to be joined */
842 const uint64_t *split_fl64 = (uint64_t *)split_flags;
844 if (!rxq->pkt_first_seg &&
845 split_fl64[0] == 0 && split_fl64[1] == 0 &&
846 split_fl64[2] == 0 && split_fl64[3] == 0)
849 /* reassemble any packets that need reassembly*/
852 if (!rxq->pkt_first_seg) {
853 /* find the first split flag, and only reassemble then*/
854 while (i < nb_bufs && !split_flags[i])
858 rxq->pkt_first_seg = rx_pkts[i];
860 return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
865 * vPMD receive routine that reassembles scattered packets.
866 * Main receive routine that can handle arbitrary burst sizes
868 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
871 ice_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
876 while (nb_pkts > ICE_VPMD_RX_BURST) {
877 uint16_t burst = ice_recv_scattered_burst_vec_avx2(rx_queue,
878 rx_pkts + retval, ICE_VPMD_RX_BURST);
881 if (burst < ICE_VPMD_RX_BURST)
884 return retval + ice_recv_scattered_burst_vec_avx2(rx_queue,
885 rx_pkts + retval, nb_pkts);
889 ice_vtx1(volatile struct ice_tx_desc *txdp,
890 struct rte_mbuf *pkt, uint64_t flags)
893 (ICE_TX_DESC_DTYPE_DATA |
894 ((uint64_t)flags << ICE_TXD_QW1_CMD_S) |
895 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
897 __m128i descriptor = _mm_set_epi64x(high_qw,
898 pkt->buf_iova + pkt->data_off);
899 _mm_store_si128((__m128i *)txdp, descriptor);
903 ice_vtx(volatile struct ice_tx_desc *txdp,
904 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
906 const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
907 ((uint64_t)flags << ICE_TXD_QW1_CMD_S));
909 /* if unaligned on 32-bit boundary, do one to align */
910 if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
911 ice_vtx1(txdp, *pkt, flags);
912 nb_pkts--, txdp++, pkt++;
915 /* do two at a time while possible, in bursts */
916 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
919 ((uint64_t)pkt[3]->data_len <<
920 ICE_TXD_QW1_TX_BUF_SZ_S);
923 ((uint64_t)pkt[2]->data_len <<
924 ICE_TXD_QW1_TX_BUF_SZ_S);
927 ((uint64_t)pkt[1]->data_len <<
928 ICE_TXD_QW1_TX_BUF_SZ_S);
931 ((uint64_t)pkt[0]->data_len <<
932 ICE_TXD_QW1_TX_BUF_SZ_S);
937 pkt[3]->buf_iova + pkt[3]->data_off,
939 pkt[2]->buf_iova + pkt[2]->data_off);
943 pkt[1]->buf_iova + pkt[1]->data_off,
945 pkt[0]->buf_iova + pkt[0]->data_off);
946 _mm256_store_si256((void *)(txdp + 2), desc2_3);
947 _mm256_store_si256((void *)txdp, desc0_1);
950 /* do any last ones */
952 ice_vtx1(txdp, *pkt, flags);
953 txdp++, pkt++, nb_pkts--;
957 static inline uint16_t
958 ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
961 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
962 volatile struct ice_tx_desc *txdp;
963 struct ice_tx_entry *txep;
964 uint16_t n, nb_commit, tx_id;
965 uint64_t flags = ICE_TD_CMD;
966 uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
968 /* cross rx_thresh boundary is not allowed */
969 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
971 if (txq->nb_tx_free < txq->tx_free_thresh)
972 ice_tx_free_bufs(txq);
974 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
975 if (unlikely(nb_pkts == 0))
978 tx_id = txq->tx_tail;
979 txdp = &txq->tx_ring[tx_id];
980 txep = &txq->sw_ring[tx_id];
982 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
984 n = (uint16_t)(txq->nb_tx_desc - tx_id);
985 if (nb_commit >= n) {
986 ice_tx_backlog_entry(txep, tx_pkts, n);
988 ice_vtx(txdp, tx_pkts, n - 1, flags);
992 ice_vtx1(txdp, *tx_pkts++, rs);
994 nb_commit = (uint16_t)(nb_commit - n);
997 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
999 /* avoid reach the end of ring */
1000 txdp = &txq->tx_ring[tx_id];
1001 txep = &txq->sw_ring[tx_id];
1004 ice_tx_backlog_entry(txep, tx_pkts, nb_commit);
1006 ice_vtx(txdp, tx_pkts, nb_commit, flags);
1008 tx_id = (uint16_t)(tx_id + nb_commit);
1009 if (tx_id > txq->tx_next_rs) {
1010 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
1011 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
1014 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
1017 txq->tx_tail = tx_id;
1019 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
1025 ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
1029 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
1034 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
1035 ret = ice_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],