1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #include "ice_rxtx_vec_common.h"
6 #include "ice_rxtx_common_avx.h"
10 #ifndef __INTEL_COMPILER
11 #pragma GCC diagnostic ignored "-Wcast-qual"
14 static __rte_always_inline void
15 ice_rxq_rearm(struct ice_rx_queue *rxq)
17 return ice_rxq_rearm_common(rxq, false);
20 static __rte_always_inline __m256i
21 ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
23 #define FDID_MIS_MAGIC 0xFFFFFFFF
24 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
25 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
26 const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
27 RTE_MBUF_F_RX_FDIR_ID);
28 /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
29 const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
30 __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
32 /* this XOR op results to bit-reverse the fdir_mask */
33 fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
34 const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
39 static __rte_always_inline uint16_t
40 _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
41 uint16_t nb_pkts, uint8_t *split_packet,
44 #define ICE_DESCS_PER_LOOP_AVX 8
46 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
47 const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
48 0, rxq->mbuf_initializer);
49 struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
50 volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
51 const int avx_aligned = ((rxq->rx_tail & 1) == 0);
55 /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */
56 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX);
58 /* See if we need to rearm the RX queue - gives the prefetch a bit
61 if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH)
64 /* Before we start moving massive data around, check to see if
65 * there is actually a packet available
67 if (!(rxdp->wb.status_error0 &
68 rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
71 /* constants used in processing loop */
72 const __m256i crc_adjust =
74 (/* first descriptor */
75 0, 0, 0, /* ignore non-length fields */
76 -rxq->crc_len, /* sub crc on data_len */
77 0, /* ignore high-16bits of pkt_len */
78 -rxq->crc_len, /* sub crc on pkt_len */
79 0, 0, /* ignore pkt_type field */
80 /* second descriptor */
81 0, 0, 0, /* ignore non-length fields */
82 -rxq->crc_len, /* sub crc on data_len */
83 0, /* ignore high-16bits of pkt_len */
84 -rxq->crc_len, /* sub crc on pkt_len */
85 0, 0 /* ignore pkt_type field */
88 /* 8 packets DD mask, LSB in each 32-bit value */
89 const __m256i dd_check = _mm256_set1_epi32(1);
91 /* 8 packets EOP mask, second-LSB in each 32-bit value */
92 const __m256i eop_check = _mm256_slli_epi32(dd_check,
93 ICE_RX_DESC_STATUS_EOF_S);
95 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
96 const __m256i shuf_msk =
98 (/* first descriptor */
100 0xFF, 0xFF, /* rss hash parsed separately */
101 11, 10, /* octet 10~11, 16 bits vlan_macip */
102 5, 4, /* octet 4~5, 16 bits data_len */
103 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
104 5, 4, /* octet 4~5, 16 bits pkt_len */
105 0xFF, 0xFF, /* pkt_type set as unknown */
106 0xFF, 0xFF, /*pkt_type set as unknown */
107 /* second descriptor */
109 0xFF, 0xFF, /* rss hash parsed separately */
110 11, 10, /* octet 10~11, 16 bits vlan_macip */
111 5, 4, /* octet 4~5, 16 bits data_len */
112 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
113 5, 4, /* octet 4~5, 16 bits pkt_len */
114 0xFF, 0xFF, /* pkt_type set as unknown */
115 0xFF, 0xFF /*pkt_type set as unknown */
118 * compile-time check the above crc and shuffle layout is correct.
119 * NOTE: the first field (lowest address) is given last in set_epi
122 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
123 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
124 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
125 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
126 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
127 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
128 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
129 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
131 /* Status/Error flag masks */
133 * mask everything except Checksum Reports, RSS indication
134 * and VLAN indication.
135 * bit6:4 for IP/L4 checksum errors.
136 * bit12 is for RSS indication.
137 * bit13 is for VLAN indication.
139 const __m256i flags_mask =
140 _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13));
142 * data to be shuffled by the result of the flags mask shifted by 4
143 * bits. This gives use the l3_l4 flags.
145 const __m256i l3_l4_flags_shuf =
146 _mm256_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 |
147 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
148 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
149 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
150 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
151 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
152 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
153 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
154 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
155 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
156 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
157 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
158 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
159 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
160 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
161 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
162 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
163 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
164 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
165 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
166 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
167 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
168 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
169 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
170 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
171 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
172 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
173 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
174 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
175 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
176 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
177 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
178 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
181 * shift right 20 bits to use the low two bits to indicate
182 * outer checksum status
183 * shift right 1 bit to make sure it not exceed 255
185 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
186 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
187 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
188 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
189 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
190 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
191 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
192 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
193 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
194 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
195 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
196 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
197 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
198 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
199 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
200 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
201 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
202 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
203 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
204 RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
205 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
206 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
207 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
208 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
209 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
210 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
211 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
212 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
213 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
214 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
215 (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
216 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
217 const __m256i cksum_mask =
218 _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
219 RTE_MBUF_F_RX_L4_CKSUM_MASK |
220 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
221 RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK);
223 * data to be shuffled by result of flag mask, shifted down 12.
224 * If RSS(bit12)/VLAN(bit13) are set,
225 * shuffle moves appropriate flags in place.
227 const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
230 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
231 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
232 RTE_MBUF_F_RX_RSS_HASH, 0,
233 /* end up 128-bits */
237 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
238 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
239 RTE_MBUF_F_RX_RSS_HASH, 0);
241 RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
243 uint16_t i, received;
245 for (i = 0, received = 0; i < nb_pkts;
246 i += ICE_DESCS_PER_LOOP_AVX,
247 rxdp += ICE_DESCS_PER_LOOP_AVX) {
248 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
249 _mm256_storeu_si256((void *)&rx_pkts[i],
250 _mm256_loadu_si256((void *)&sw_ring[i]));
251 #ifdef RTE_ARCH_X86_64
253 ((void *)&rx_pkts[i + 4],
254 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
257 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
258 #ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC
259 /* for AVX we need alignment otherwise loads are not atomic */
261 /* load in descriptors, 2 at a time, in reverse order */
262 raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
263 rte_compiler_barrier();
264 raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
265 rte_compiler_barrier();
266 raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
267 rte_compiler_barrier();
268 raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
272 const __m128i raw_desc7 =
273 _mm_load_si128((void *)(rxdp + 7));
274 rte_compiler_barrier();
275 const __m128i raw_desc6 =
276 _mm_load_si128((void *)(rxdp + 6));
277 rte_compiler_barrier();
278 const __m128i raw_desc5 =
279 _mm_load_si128((void *)(rxdp + 5));
280 rte_compiler_barrier();
281 const __m128i raw_desc4 =
282 _mm_load_si128((void *)(rxdp + 4));
283 rte_compiler_barrier();
284 const __m128i raw_desc3 =
285 _mm_load_si128((void *)(rxdp + 3));
286 rte_compiler_barrier();
287 const __m128i raw_desc2 =
288 _mm_load_si128((void *)(rxdp + 2));
289 rte_compiler_barrier();
290 const __m128i raw_desc1 =
291 _mm_load_si128((void *)(rxdp + 1));
292 rte_compiler_barrier();
293 const __m128i raw_desc0 =
294 _mm_load_si128((void *)(rxdp + 0));
297 _mm256_inserti128_si256
298 (_mm256_castsi128_si256(raw_desc6),
301 _mm256_inserti128_si256
302 (_mm256_castsi128_si256(raw_desc4),
305 _mm256_inserti128_si256
306 (_mm256_castsi128_si256(raw_desc2),
309 _mm256_inserti128_si256
310 (_mm256_castsi128_si256(raw_desc0),
317 for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++)
318 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
322 * convert descriptors 4-7 into mbufs, re-arrange fields.
323 * Then write into the mbuf.
325 __m256i mb6_7 = _mm256_shuffle_epi8(raw_desc6_7, shuf_msk);
326 __m256i mb4_5 = _mm256_shuffle_epi8(raw_desc4_5, shuf_msk);
328 mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
329 mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
331 * to get packet types, ptype is located in bit16-25
334 const __m256i ptype_mask =
335 _mm256_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M);
336 const __m256i ptypes6_7 =
337 _mm256_and_si256(raw_desc6_7, ptype_mask);
338 const __m256i ptypes4_5 =
339 _mm256_and_si256(raw_desc4_5, ptype_mask);
340 const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
341 const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
342 const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
343 const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
345 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4);
346 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0);
347 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4);
348 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0);
349 /* merge the status bits into one register */
350 const __m256i status4_7 = _mm256_unpackhi_epi32(raw_desc6_7,
354 * convert descriptors 0-3 into mbufs, re-arrange fields.
355 * Then write into the mbuf.
357 __m256i mb2_3 = _mm256_shuffle_epi8(raw_desc2_3, shuf_msk);
358 __m256i mb0_1 = _mm256_shuffle_epi8(raw_desc0_1, shuf_msk);
360 mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
361 mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
363 * to get packet types, ptype is located in bit16-25
366 const __m256i ptypes2_3 =
367 _mm256_and_si256(raw_desc2_3, ptype_mask);
368 const __m256i ptypes0_1 =
369 _mm256_and_si256(raw_desc0_1, ptype_mask);
370 const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
371 const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
372 const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
373 const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
375 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4);
376 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0);
377 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4);
378 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0);
379 /* merge the status bits into one register */
380 const __m256i status0_3 = _mm256_unpackhi_epi32(raw_desc2_3,
384 * take the two sets of status bits and merge to one
385 * After merge, the packets status flags are in the
386 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
388 __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
390 __m256i mbuf_flags = _mm256_set1_epi32(0);
393 /* now do flag manipulation */
395 /* get only flag/error bits we want */
396 const __m256i flag_bits =
397 _mm256_and_si256(status0_7, flags_mask);
399 * l3_l4_error flags, shuffle, then shift to correct adjustment
400 * of flags in flags_shuf, and finally mask out extra bits
402 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
403 _mm256_srli_epi32(flag_bits, 4));
404 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
406 __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
407 __m256i l4_outer_flags =
408 _mm256_and_si256(l3_l4_flags, l4_outer_mask);
409 l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
411 __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
413 l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
414 l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
415 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
416 /* set rss and vlan flags */
417 const __m256i rss_vlan_flag_bits =
418 _mm256_srli_epi32(flag_bits, 12);
419 const __m256i rss_vlan_flags =
420 _mm256_shuffle_epi8(rss_vlan_flags_shuf,
424 mbuf_flags = _mm256_or_si256(l3_l4_flags,
428 if (rxq->fdir_enabled) {
429 const __m256i fdir_id4_7 =
430 _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
432 const __m256i fdir_id0_3 =
433 _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
435 const __m256i fdir_id0_7 =
436 _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
438 const __m256i fdir_flags =
439 ice_flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7);
441 /* merge with fdir_flags */
442 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
444 /* write to mbuf: have to use scalar store here */
445 rx_pkts[i + 0]->hash.fdir.hi =
446 _mm256_extract_epi32(fdir_id0_7, 3);
448 rx_pkts[i + 1]->hash.fdir.hi =
449 _mm256_extract_epi32(fdir_id0_7, 7);
451 rx_pkts[i + 2]->hash.fdir.hi =
452 _mm256_extract_epi32(fdir_id0_7, 2);
454 rx_pkts[i + 3]->hash.fdir.hi =
455 _mm256_extract_epi32(fdir_id0_7, 6);
457 rx_pkts[i + 4]->hash.fdir.hi =
458 _mm256_extract_epi32(fdir_id0_7, 1);
460 rx_pkts[i + 5]->hash.fdir.hi =
461 _mm256_extract_epi32(fdir_id0_7, 5);
463 rx_pkts[i + 6]->hash.fdir.hi =
464 _mm256_extract_epi32(fdir_id0_7, 0);
466 rx_pkts[i + 7]->hash.fdir.hi =
467 _mm256_extract_epi32(fdir_id0_7, 4);
468 } /* if() on fdir_enabled */
471 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
473 * needs to load 2nd 16B of each desc for RSS hash parsing,
474 * will cause performance drop to get into this context.
476 if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
477 RTE_ETH_RX_OFFLOAD_RSS_HASH) {
478 /* load bottom half of every 32B desc */
479 const __m128i raw_desc_bh7 =
481 ((void *)(&rxdp[7].wb.status_error1));
482 rte_compiler_barrier();
483 const __m128i raw_desc_bh6 =
485 ((void *)(&rxdp[6].wb.status_error1));
486 rte_compiler_barrier();
487 const __m128i raw_desc_bh5 =
489 ((void *)(&rxdp[5].wb.status_error1));
490 rte_compiler_barrier();
491 const __m128i raw_desc_bh4 =
493 ((void *)(&rxdp[4].wb.status_error1));
494 rte_compiler_barrier();
495 const __m128i raw_desc_bh3 =
497 ((void *)(&rxdp[3].wb.status_error1));
498 rte_compiler_barrier();
499 const __m128i raw_desc_bh2 =
501 ((void *)(&rxdp[2].wb.status_error1));
502 rte_compiler_barrier();
503 const __m128i raw_desc_bh1 =
505 ((void *)(&rxdp[1].wb.status_error1));
506 rte_compiler_barrier();
507 const __m128i raw_desc_bh0 =
509 ((void *)(&rxdp[0].wb.status_error1));
511 __m256i raw_desc_bh6_7 =
512 _mm256_inserti128_si256
513 (_mm256_castsi128_si256(raw_desc_bh6),
515 __m256i raw_desc_bh4_5 =
516 _mm256_inserti128_si256
517 (_mm256_castsi128_si256(raw_desc_bh4),
519 __m256i raw_desc_bh2_3 =
520 _mm256_inserti128_si256
521 (_mm256_castsi128_si256(raw_desc_bh2),
523 __m256i raw_desc_bh0_1 =
524 _mm256_inserti128_si256
525 (_mm256_castsi128_si256(raw_desc_bh0),
529 * to shift the 32b RSS hash value to the
530 * highest 32b of each 128b before mask
532 __m256i rss_hash6_7 =
533 _mm256_slli_epi64(raw_desc_bh6_7, 32);
534 __m256i rss_hash4_5 =
535 _mm256_slli_epi64(raw_desc_bh4_5, 32);
536 __m256i rss_hash2_3 =
537 _mm256_slli_epi64(raw_desc_bh2_3, 32);
538 __m256i rss_hash0_1 =
539 _mm256_slli_epi64(raw_desc_bh0_1, 32);
541 __m256i rss_hash_msk =
542 _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
543 0xFFFFFFFF, 0, 0, 0);
545 rss_hash6_7 = _mm256_and_si256
546 (rss_hash6_7, rss_hash_msk);
547 rss_hash4_5 = _mm256_and_si256
548 (rss_hash4_5, rss_hash_msk);
549 rss_hash2_3 = _mm256_and_si256
550 (rss_hash2_3, rss_hash_msk);
551 rss_hash0_1 = _mm256_and_si256
552 (rss_hash0_1, rss_hash_msk);
554 mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
555 mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
556 mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
557 mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
558 } /* if() on RSS hash parsing */
563 * At this point, we have the 8 sets of flags in the low 16-bits
564 * of each 32-bit value in vlan0.
565 * We want to extract these, and merge them with the mbuf init
566 * data so we can do a single write to the mbuf to set the flags
567 * and all the other initialization fields. Extracting the
568 * appropriate flags means that we have to do a shift and blend
569 * for each mbuf before we do the write. However, we can also
570 * add in the previously computed rx_descriptor fields to
571 * make a single 256-bit write per mbuf
573 /* check the structure matches expectations */
574 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
575 offsetof(struct rte_mbuf, rearm_data) + 8);
576 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
577 RTE_ALIGN(offsetof(struct rte_mbuf,
580 /* build up data and do writes */
581 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
583 rearm6 = _mm256_blend_epi32(mbuf_init,
584 _mm256_slli_si256(mbuf_flags, 8),
586 rearm4 = _mm256_blend_epi32(mbuf_init,
587 _mm256_slli_si256(mbuf_flags, 4),
589 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
590 rearm0 = _mm256_blend_epi32(mbuf_init,
591 _mm256_srli_si256(mbuf_flags, 4),
593 /* permute to add in the rx_descriptor e.g. rss fields */
594 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
595 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
596 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
597 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
599 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
601 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
603 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
605 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
608 /* repeat for the odd mbufs */
609 const __m256i odd_flags =
610 _mm256_castsi128_si256
611 (_mm256_extracti128_si256(mbuf_flags, 1));
612 rearm7 = _mm256_blend_epi32(mbuf_init,
613 _mm256_slli_si256(odd_flags, 8),
615 rearm5 = _mm256_blend_epi32(mbuf_init,
616 _mm256_slli_si256(odd_flags, 4),
618 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
619 rearm1 = _mm256_blend_epi32(mbuf_init,
620 _mm256_srli_si256(odd_flags, 4),
622 /* since odd mbufs are already in hi 128-bits use blend */
623 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
624 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
625 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
626 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
627 /* again write to mbufs */
628 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
630 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
632 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
634 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
637 /* extract and record EOP bit */
639 const __m128i eop_mask =
640 _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S);
641 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
643 /* pack status bits into a single 128-bit register */
644 const __m128i eop_bits =
646 (_mm256_castsi256_si128(eop_bits256),
647 _mm256_extractf128_si256(eop_bits256,
650 * flip bits, and mask out the EOP bit, which is now
651 * a split-packet bit i.e. !EOP, rather than EOP one.
653 __m128i split_bits = _mm_andnot_si128(eop_bits,
656 * eop bits are out of order, so we need to shuffle them
657 * back into order again. In doing so, only use low 8
658 * bits, which acts like another pack instruction
659 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
660 * [Since we use epi8, the 16-bit positions are
661 * multiplied by 2 in the eop_shuffle value.]
663 __m128i eop_shuffle =
664 _mm_set_epi8(/* zero hi 64b */
665 0xFF, 0xFF, 0xFF, 0xFF,
666 0xFF, 0xFF, 0xFF, 0xFF,
667 /* move values to lo 64b */
670 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
671 *(uint64_t *)split_packet =
672 _mm_cvtsi128_si64(split_bits);
673 split_packet += ICE_DESCS_PER_LOOP_AVX;
676 /* perform dd_check */
677 status0_7 = _mm256_and_si256(status0_7, dd_check);
678 status0_7 = _mm256_packs_epi32(status0_7,
679 _mm256_setzero_si256());
681 uint64_t burst = __builtin_popcountll
683 (_mm256_extracti128_si256
685 burst += __builtin_popcountll
687 (_mm256_castsi256_si128(status0_7)));
689 if (burst != ICE_DESCS_PER_LOOP_AVX)
693 /* update tail pointers */
694 rxq->rx_tail += received;
695 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
696 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
700 rxq->rxrearm_nb += received;
706 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
709 ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
712 return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts,
713 nb_pkts, NULL, false);
717 ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
720 return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts,
721 nb_pkts, NULL, true);
725 * vPMD receive routine that reassembles single burst of 32 scattered packets
727 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
729 static __rte_always_inline uint16_t
730 ice_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
731 uint16_t nb_pkts, bool offload)
733 struct ice_rx_queue *rxq = rx_queue;
734 uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
736 /* get some new buffers */
737 uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
738 split_flags, offload);
742 /* happy day case, full burst + no packets to be joined */
743 const uint64_t *split_fl64 = (uint64_t *)split_flags;
745 if (!rxq->pkt_first_seg &&
746 split_fl64[0] == 0 && split_fl64[1] == 0 &&
747 split_fl64[2] == 0 && split_fl64[3] == 0)
750 /* reassemble any packets that need reassembly*/
753 if (!rxq->pkt_first_seg) {
754 /* find the first split flag, and only reassemble then*/
755 while (i < nb_bufs && !split_flags[i])
759 rxq->pkt_first_seg = rx_pkts[i];
761 return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
766 * vPMD receive routine that reassembles scattered packets.
767 * Main receive routine that can handle arbitrary burst sizes
769 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
771 static __rte_always_inline uint16_t
772 ice_recv_scattered_pkts_vec_avx2_common(void *rx_queue,
773 struct rte_mbuf **rx_pkts,
779 while (nb_pkts > ICE_VPMD_RX_BURST) {
780 uint16_t burst = ice_recv_scattered_burst_vec_avx2(rx_queue,
781 rx_pkts + retval, ICE_VPMD_RX_BURST, offload);
784 if (burst < ICE_VPMD_RX_BURST)
787 return retval + ice_recv_scattered_burst_vec_avx2(rx_queue,
788 rx_pkts + retval, nb_pkts, offload);
792 ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
793 struct rte_mbuf **rx_pkts,
796 return ice_recv_scattered_pkts_vec_avx2_common(rx_queue,
803 ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue,
804 struct rte_mbuf **rx_pkts,
807 return ice_recv_scattered_pkts_vec_avx2_common(rx_queue,
813 static __rte_always_inline void
814 ice_vtx1(volatile struct ice_tx_desc *txdp,
815 struct rte_mbuf *pkt, uint64_t flags, bool offload)
818 (ICE_TX_DESC_DTYPE_DATA |
819 ((uint64_t)flags << ICE_TXD_QW1_CMD_S) |
820 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
822 ice_txd_enable_offload(pkt, &high_qw);
824 __m128i descriptor = _mm_set_epi64x(high_qw,
825 pkt->buf_iova + pkt->data_off);
826 _mm_store_si128((__m128i *)txdp, descriptor);
829 static __rte_always_inline void
830 ice_vtx(volatile struct ice_tx_desc *txdp,
831 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags, bool offload)
833 const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
834 ((uint64_t)flags << ICE_TXD_QW1_CMD_S));
836 /* if unaligned on 32-bit boundary, do one to align */
837 if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
838 ice_vtx1(txdp, *pkt, flags, offload);
839 nb_pkts--, txdp++, pkt++;
842 /* do two at a time while possible, in bursts */
843 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
846 ((uint64_t)pkt[3]->data_len <<
847 ICE_TXD_QW1_TX_BUF_SZ_S);
849 ice_txd_enable_offload(pkt[3], &hi_qw3);
852 ((uint64_t)pkt[2]->data_len <<
853 ICE_TXD_QW1_TX_BUF_SZ_S);
855 ice_txd_enable_offload(pkt[2], &hi_qw2);
858 ((uint64_t)pkt[1]->data_len <<
859 ICE_TXD_QW1_TX_BUF_SZ_S);
861 ice_txd_enable_offload(pkt[1], &hi_qw1);
864 ((uint64_t)pkt[0]->data_len <<
865 ICE_TXD_QW1_TX_BUF_SZ_S);
867 ice_txd_enable_offload(pkt[0], &hi_qw0);
872 pkt[3]->buf_iova + pkt[3]->data_off,
874 pkt[2]->buf_iova + pkt[2]->data_off);
878 pkt[1]->buf_iova + pkt[1]->data_off,
880 pkt[0]->buf_iova + pkt[0]->data_off);
881 _mm256_store_si256((void *)(txdp + 2), desc2_3);
882 _mm256_store_si256((void *)txdp, desc0_1);
885 /* do any last ones */
887 ice_vtx1(txdp, *pkt, flags, offload);
888 txdp++, pkt++, nb_pkts--;
892 static __rte_always_inline uint16_t
893 ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
894 uint16_t nb_pkts, bool offload)
896 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
897 volatile struct ice_tx_desc *txdp;
898 struct ice_tx_entry *txep;
899 uint16_t n, nb_commit, tx_id;
900 uint64_t flags = ICE_TD_CMD;
901 uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
903 /* cross rx_thresh boundary is not allowed */
904 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
906 if (txq->nb_tx_free < txq->tx_free_thresh)
907 ice_tx_free_bufs_vec(txq);
909 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
910 if (unlikely(nb_pkts == 0))
913 tx_id = txq->tx_tail;
914 txdp = &txq->tx_ring[tx_id];
915 txep = &txq->sw_ring[tx_id];
917 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
919 n = (uint16_t)(txq->nb_tx_desc - tx_id);
920 if (nb_commit >= n) {
921 ice_tx_backlog_entry(txep, tx_pkts, n);
923 ice_vtx(txdp, tx_pkts, n - 1, flags, offload);
927 ice_vtx1(txdp, *tx_pkts++, rs, offload);
929 nb_commit = (uint16_t)(nb_commit - n);
932 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
934 /* avoid reach the end of ring */
935 txdp = &txq->tx_ring[tx_id];
936 txep = &txq->sw_ring[tx_id];
939 ice_tx_backlog_entry(txep, tx_pkts, nb_commit);
941 ice_vtx(txdp, tx_pkts, nb_commit, flags, offload);
943 tx_id = (uint16_t)(tx_id + nb_commit);
944 if (tx_id > txq->tx_next_rs) {
945 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
946 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
949 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
952 txq->tx_tail = tx_id;
954 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
959 static __rte_always_inline uint16_t
960 ice_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts,
961 uint16_t nb_pkts, bool offload)
964 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
969 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
970 ret = ice_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],
982 ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
985 return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, false);
989 ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
992 return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, true);