1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #include "ice_rxtx_vec_common.h"
9 #ifndef __INTEL_COMPILER
10 #pragma GCC diagnostic ignored "-Wcast-qual"
13 static __rte_always_inline void
14 ice_rxq_rearm(struct ice_rx_queue *rxq)
16 return ice_rxq_rearm_common(rxq, false);
20 ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
22 #define FDID_MIS_MAGIC 0xFFFFFFFF
23 RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
24 RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
25 const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
27 /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
28 const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
29 __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
31 /* this XOR op results to bit-reverse the fdir_mask */
32 fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
33 const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
38 static inline uint16_t
39 _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
40 uint16_t nb_pkts, uint8_t *split_packet)
42 #define ICE_DESCS_PER_LOOP_AVX 8
44 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
45 const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
46 0, rxq->mbuf_initializer);
47 struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
48 volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
49 const int avx_aligned = ((rxq->rx_tail & 1) == 0);
53 /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */
54 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX);
56 /* See if we need to rearm the RX queue - gives the prefetch a bit
59 if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH)
62 /* Before we start moving massive data around, check to see if
63 * there is actually a packet available
65 if (!(rxdp->wb.status_error0 &
66 rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
69 /* constants used in processing loop */
70 const __m256i crc_adjust =
72 (/* first descriptor */
73 0, 0, 0, /* ignore non-length fields */
74 -rxq->crc_len, /* sub crc on data_len */
75 0, /* ignore high-16bits of pkt_len */
76 -rxq->crc_len, /* sub crc on pkt_len */
77 0, 0, /* ignore pkt_type field */
78 /* second descriptor */
79 0, 0, 0, /* ignore non-length fields */
80 -rxq->crc_len, /* sub crc on data_len */
81 0, /* ignore high-16bits of pkt_len */
82 -rxq->crc_len, /* sub crc on pkt_len */
83 0, 0 /* ignore pkt_type field */
86 /* 8 packets DD mask, LSB in each 32-bit value */
87 const __m256i dd_check = _mm256_set1_epi32(1);
89 /* 8 packets EOP mask, second-LSB in each 32-bit value */
90 const __m256i eop_check = _mm256_slli_epi32(dd_check,
91 ICE_RX_DESC_STATUS_EOF_S);
93 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
94 const __m256i shuf_msk =
96 (/* first descriptor */
98 0xFF, 0xFF, /* rss hash parsed separately */
99 11, 10, /* octet 10~11, 16 bits vlan_macip */
100 5, 4, /* octet 4~5, 16 bits data_len */
101 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
102 5, 4, /* octet 4~5, 16 bits pkt_len */
103 0xFF, 0xFF, /* pkt_type set as unknown */
104 0xFF, 0xFF, /*pkt_type set as unknown */
105 /* second descriptor */
107 0xFF, 0xFF, /* rss hash parsed separately */
108 11, 10, /* octet 10~11, 16 bits vlan_macip */
109 5, 4, /* octet 4~5, 16 bits data_len */
110 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
111 5, 4, /* octet 4~5, 16 bits pkt_len */
112 0xFF, 0xFF, /* pkt_type set as unknown */
113 0xFF, 0xFF /*pkt_type set as unknown */
116 * compile-time check the above crc and shuffle layout is correct.
117 * NOTE: the first field (lowest address) is given last in set_epi
120 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
121 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
122 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
123 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
124 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
125 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
126 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
127 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
129 /* Status/Error flag masks */
131 * mask everything except Checksum Reports, RSS indication
132 * and VLAN indication.
133 * bit6:4 for IP/L4 checksum errors.
134 * bit12 is for RSS indication.
135 * bit13 is for VLAN indication.
137 const __m256i flags_mask =
138 _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13));
140 * data to be shuffled by the result of the flags mask shifted by 4
141 * bits. This gives use the l3_l4 flags.
143 const __m256i l3_l4_flags_shuf =
144 _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
145 PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
146 PKT_RX_IP_CKSUM_BAD) >> 1,
147 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
148 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
149 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
150 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
151 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
152 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
153 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
154 PKT_RX_IP_CKSUM_BAD) >> 1,
155 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
156 PKT_RX_IP_CKSUM_GOOD) >> 1,
157 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
158 PKT_RX_IP_CKSUM_BAD) >> 1,
159 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
160 PKT_RX_IP_CKSUM_GOOD) >> 1,
161 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
162 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
163 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
164 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
165 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
166 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
167 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
168 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
169 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
170 PKT_RX_IP_CKSUM_BAD) >> 1,
171 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
172 PKT_RX_IP_CKSUM_GOOD) >> 1,
173 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
174 PKT_RX_IP_CKSUM_BAD) >> 1,
175 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
176 PKT_RX_IP_CKSUM_GOOD) >> 1,
179 * shift right 20 bits to use the low two bits to indicate
180 * outer checksum status
181 * shift right 1 bit to make sure it not exceed 255
183 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
184 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
185 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
186 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
187 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
188 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
189 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
190 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
191 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
192 PKT_RX_IP_CKSUM_BAD) >> 1,
193 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
194 PKT_RX_IP_CKSUM_GOOD) >> 1,
195 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
196 PKT_RX_IP_CKSUM_BAD) >> 1,
197 (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
198 PKT_RX_IP_CKSUM_GOOD) >> 1,
199 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
200 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
201 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
202 PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
203 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
204 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
205 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
206 PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
207 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
208 PKT_RX_IP_CKSUM_BAD) >> 1,
209 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
210 PKT_RX_IP_CKSUM_GOOD) >> 1,
211 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
212 PKT_RX_IP_CKSUM_BAD) >> 1,
213 (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
214 PKT_RX_IP_CKSUM_GOOD) >> 1);
215 const __m256i cksum_mask =
216 _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK |
217 PKT_RX_L4_CKSUM_MASK |
218 PKT_RX_OUTER_IP_CKSUM_BAD |
219 PKT_RX_OUTER_L4_CKSUM_MASK);
221 * data to be shuffled by result of flag mask, shifted down 12.
222 * If RSS(bit12)/VLAN(bit13) are set,
223 * shuffle moves appropriate flags in place.
225 const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
228 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
229 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
231 /* end up 128-bits */
235 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
236 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
239 RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
241 uint16_t i, received;
243 for (i = 0, received = 0; i < nb_pkts;
244 i += ICE_DESCS_PER_LOOP_AVX,
245 rxdp += ICE_DESCS_PER_LOOP_AVX) {
246 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
247 _mm256_storeu_si256((void *)&rx_pkts[i],
248 _mm256_loadu_si256((void *)&sw_ring[i]));
249 #ifdef RTE_ARCH_X86_64
251 ((void *)&rx_pkts[i + 4],
252 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
255 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
256 #ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC
257 /* for AVX we need alignment otherwise loads are not atomic */
259 /* load in descriptors, 2 at a time, in reverse order */
260 raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
261 rte_compiler_barrier();
262 raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
263 rte_compiler_barrier();
264 raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
265 rte_compiler_barrier();
266 raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
270 const __m128i raw_desc7 =
271 _mm_load_si128((void *)(rxdp + 7));
272 rte_compiler_barrier();
273 const __m128i raw_desc6 =
274 _mm_load_si128((void *)(rxdp + 6));
275 rte_compiler_barrier();
276 const __m128i raw_desc5 =
277 _mm_load_si128((void *)(rxdp + 5));
278 rte_compiler_barrier();
279 const __m128i raw_desc4 =
280 _mm_load_si128((void *)(rxdp + 4));
281 rte_compiler_barrier();
282 const __m128i raw_desc3 =
283 _mm_load_si128((void *)(rxdp + 3));
284 rte_compiler_barrier();
285 const __m128i raw_desc2 =
286 _mm_load_si128((void *)(rxdp + 2));
287 rte_compiler_barrier();
288 const __m128i raw_desc1 =
289 _mm_load_si128((void *)(rxdp + 1));
290 rte_compiler_barrier();
291 const __m128i raw_desc0 =
292 _mm_load_si128((void *)(rxdp + 0));
295 _mm256_inserti128_si256
296 (_mm256_castsi128_si256(raw_desc6),
299 _mm256_inserti128_si256
300 (_mm256_castsi128_si256(raw_desc4),
303 _mm256_inserti128_si256
304 (_mm256_castsi128_si256(raw_desc2),
307 _mm256_inserti128_si256
308 (_mm256_castsi128_si256(raw_desc0),
315 for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++)
316 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
320 * convert descriptors 4-7 into mbufs, re-arrange fields.
321 * Then write into the mbuf.
323 __m256i mb6_7 = _mm256_shuffle_epi8(raw_desc6_7, shuf_msk);
324 __m256i mb4_5 = _mm256_shuffle_epi8(raw_desc4_5, shuf_msk);
326 mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
327 mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
329 * to get packet types, ptype is located in bit16-25
332 const __m256i ptype_mask =
333 _mm256_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M);
334 const __m256i ptypes6_7 =
335 _mm256_and_si256(raw_desc6_7, ptype_mask);
336 const __m256i ptypes4_5 =
337 _mm256_and_si256(raw_desc4_5, ptype_mask);
338 const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
339 const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
340 const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
341 const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
343 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4);
344 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0);
345 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4);
346 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0);
347 /* merge the status bits into one register */
348 const __m256i status4_7 = _mm256_unpackhi_epi32(raw_desc6_7,
352 * convert descriptors 0-3 into mbufs, re-arrange fields.
353 * Then write into the mbuf.
355 __m256i mb2_3 = _mm256_shuffle_epi8(raw_desc2_3, shuf_msk);
356 __m256i mb0_1 = _mm256_shuffle_epi8(raw_desc0_1, shuf_msk);
358 mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
359 mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
361 * to get packet types, ptype is located in bit16-25
364 const __m256i ptypes2_3 =
365 _mm256_and_si256(raw_desc2_3, ptype_mask);
366 const __m256i ptypes0_1 =
367 _mm256_and_si256(raw_desc0_1, ptype_mask);
368 const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
369 const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
370 const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
371 const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
373 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4);
374 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0);
375 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4);
376 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0);
377 /* merge the status bits into one register */
378 const __m256i status0_3 = _mm256_unpackhi_epi32(raw_desc2_3,
382 * take the two sets of status bits and merge to one
383 * After merge, the packets status flags are in the
384 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
386 __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
389 /* now do flag manipulation */
391 /* get only flag/error bits we want */
392 const __m256i flag_bits =
393 _mm256_and_si256(status0_7, flags_mask);
395 * l3_l4_error flags, shuffle, then shift to correct adjustment
396 * of flags in flags_shuf, and finally mask out extra bits
398 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
399 _mm256_srli_epi32(flag_bits, 4));
400 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
402 __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
403 __m256i l4_outer_flags =
404 _mm256_and_si256(l3_l4_flags, l4_outer_mask);
405 l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
407 __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
408 l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
409 l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
410 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
411 /* set rss and vlan flags */
412 const __m256i rss_vlan_flag_bits =
413 _mm256_srli_epi32(flag_bits, 12);
414 const __m256i rss_vlan_flags =
415 _mm256_shuffle_epi8(rss_vlan_flags_shuf,
419 __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
422 if (rxq->fdir_enabled) {
423 const __m256i fdir_id4_7 =
424 _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
426 const __m256i fdir_id0_3 =
427 _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
429 const __m256i fdir_id0_7 =
430 _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
432 const __m256i fdir_flags =
433 ice_flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7);
435 /* merge with fdir_flags */
436 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
438 /* write to mbuf: have to use scalar store here */
439 rx_pkts[i + 0]->hash.fdir.hi =
440 _mm256_extract_epi32(fdir_id0_7, 3);
442 rx_pkts[i + 1]->hash.fdir.hi =
443 _mm256_extract_epi32(fdir_id0_7, 7);
445 rx_pkts[i + 2]->hash.fdir.hi =
446 _mm256_extract_epi32(fdir_id0_7, 2);
448 rx_pkts[i + 3]->hash.fdir.hi =
449 _mm256_extract_epi32(fdir_id0_7, 6);
451 rx_pkts[i + 4]->hash.fdir.hi =
452 _mm256_extract_epi32(fdir_id0_7, 1);
454 rx_pkts[i + 5]->hash.fdir.hi =
455 _mm256_extract_epi32(fdir_id0_7, 5);
457 rx_pkts[i + 6]->hash.fdir.hi =
458 _mm256_extract_epi32(fdir_id0_7, 0);
460 rx_pkts[i + 7]->hash.fdir.hi =
461 _mm256_extract_epi32(fdir_id0_7, 4);
462 } /* if() on fdir_enabled */
464 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
466 * needs to load 2nd 16B of each desc for RSS hash parsing,
467 * will cause performance drop to get into this context.
469 if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
470 DEV_RX_OFFLOAD_RSS_HASH) {
471 /* load bottom half of every 32B desc */
472 const __m128i raw_desc_bh7 =
474 ((void *)(&rxdp[7].wb.status_error1));
475 rte_compiler_barrier();
476 const __m128i raw_desc_bh6 =
478 ((void *)(&rxdp[6].wb.status_error1));
479 rte_compiler_barrier();
480 const __m128i raw_desc_bh5 =
482 ((void *)(&rxdp[5].wb.status_error1));
483 rte_compiler_barrier();
484 const __m128i raw_desc_bh4 =
486 ((void *)(&rxdp[4].wb.status_error1));
487 rte_compiler_barrier();
488 const __m128i raw_desc_bh3 =
490 ((void *)(&rxdp[3].wb.status_error1));
491 rte_compiler_barrier();
492 const __m128i raw_desc_bh2 =
494 ((void *)(&rxdp[2].wb.status_error1));
495 rte_compiler_barrier();
496 const __m128i raw_desc_bh1 =
498 ((void *)(&rxdp[1].wb.status_error1));
499 rte_compiler_barrier();
500 const __m128i raw_desc_bh0 =
502 ((void *)(&rxdp[0].wb.status_error1));
504 __m256i raw_desc_bh6_7 =
505 _mm256_inserti128_si256
506 (_mm256_castsi128_si256(raw_desc_bh6),
508 __m256i raw_desc_bh4_5 =
509 _mm256_inserti128_si256
510 (_mm256_castsi128_si256(raw_desc_bh4),
512 __m256i raw_desc_bh2_3 =
513 _mm256_inserti128_si256
514 (_mm256_castsi128_si256(raw_desc_bh2),
516 __m256i raw_desc_bh0_1 =
517 _mm256_inserti128_si256
518 (_mm256_castsi128_si256(raw_desc_bh0),
522 * to shift the 32b RSS hash value to the
523 * highest 32b of each 128b before mask
525 __m256i rss_hash6_7 =
526 _mm256_slli_epi64(raw_desc_bh6_7, 32);
527 __m256i rss_hash4_5 =
528 _mm256_slli_epi64(raw_desc_bh4_5, 32);
529 __m256i rss_hash2_3 =
530 _mm256_slli_epi64(raw_desc_bh2_3, 32);
531 __m256i rss_hash0_1 =
532 _mm256_slli_epi64(raw_desc_bh0_1, 32);
534 __m256i rss_hash_msk =
535 _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
536 0xFFFFFFFF, 0, 0, 0);
538 rss_hash6_7 = _mm256_and_si256
539 (rss_hash6_7, rss_hash_msk);
540 rss_hash4_5 = _mm256_and_si256
541 (rss_hash4_5, rss_hash_msk);
542 rss_hash2_3 = _mm256_and_si256
543 (rss_hash2_3, rss_hash_msk);
544 rss_hash0_1 = _mm256_and_si256
545 (rss_hash0_1, rss_hash_msk);
547 mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
548 mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
549 mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
550 mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
551 } /* if() on RSS hash parsing */
555 * At this point, we have the 8 sets of flags in the low 16-bits
556 * of each 32-bit value in vlan0.
557 * We want to extract these, and merge them with the mbuf init
558 * data so we can do a single write to the mbuf to set the flags
559 * and all the other initialization fields. Extracting the
560 * appropriate flags means that we have to do a shift and blend
561 * for each mbuf before we do the write. However, we can also
562 * add in the previously computed rx_descriptor fields to
563 * make a single 256-bit write per mbuf
565 /* check the structure matches expectations */
566 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
567 offsetof(struct rte_mbuf, rearm_data) + 8);
568 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
569 RTE_ALIGN(offsetof(struct rte_mbuf,
572 /* build up data and do writes */
573 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
575 rearm6 = _mm256_blend_epi32(mbuf_init,
576 _mm256_slli_si256(mbuf_flags, 8),
578 rearm4 = _mm256_blend_epi32(mbuf_init,
579 _mm256_slli_si256(mbuf_flags, 4),
581 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
582 rearm0 = _mm256_blend_epi32(mbuf_init,
583 _mm256_srli_si256(mbuf_flags, 4),
585 /* permute to add in the rx_descriptor e.g. rss fields */
586 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
587 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
588 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
589 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
591 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
593 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
595 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
597 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
600 /* repeat for the odd mbufs */
601 const __m256i odd_flags =
602 _mm256_castsi128_si256
603 (_mm256_extracti128_si256(mbuf_flags, 1));
604 rearm7 = _mm256_blend_epi32(mbuf_init,
605 _mm256_slli_si256(odd_flags, 8),
607 rearm5 = _mm256_blend_epi32(mbuf_init,
608 _mm256_slli_si256(odd_flags, 4),
610 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
611 rearm1 = _mm256_blend_epi32(mbuf_init,
612 _mm256_srli_si256(odd_flags, 4),
614 /* since odd mbufs are already in hi 128-bits use blend */
615 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
616 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
617 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
618 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
619 /* again write to mbufs */
620 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
622 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
624 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
626 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
629 /* extract and record EOP bit */
631 const __m128i eop_mask =
632 _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S);
633 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
635 /* pack status bits into a single 128-bit register */
636 const __m128i eop_bits =
638 (_mm256_castsi256_si128(eop_bits256),
639 _mm256_extractf128_si256(eop_bits256,
642 * flip bits, and mask out the EOP bit, which is now
643 * a split-packet bit i.e. !EOP, rather than EOP one.
645 __m128i split_bits = _mm_andnot_si128(eop_bits,
648 * eop bits are out of order, so we need to shuffle them
649 * back into order again. In doing so, only use low 8
650 * bits, which acts like another pack instruction
651 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
652 * [Since we use epi8, the 16-bit positions are
653 * multiplied by 2 in the eop_shuffle value.]
655 __m128i eop_shuffle =
656 _mm_set_epi8(/* zero hi 64b */
657 0xFF, 0xFF, 0xFF, 0xFF,
658 0xFF, 0xFF, 0xFF, 0xFF,
659 /* move values to lo 64b */
662 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
663 *(uint64_t *)split_packet =
664 _mm_cvtsi128_si64(split_bits);
665 split_packet += ICE_DESCS_PER_LOOP_AVX;
668 /* perform dd_check */
669 status0_7 = _mm256_and_si256(status0_7, dd_check);
670 status0_7 = _mm256_packs_epi32(status0_7,
671 _mm256_setzero_si256());
673 uint64_t burst = __builtin_popcountll
675 (_mm256_extracti128_si256
677 burst += __builtin_popcountll
679 (_mm256_castsi256_si128(status0_7)));
681 if (burst != ICE_DESCS_PER_LOOP_AVX)
685 /* update tail pointers */
686 rxq->rx_tail += received;
687 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
688 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
692 rxq->rxrearm_nb += received;
698 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
701 ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
704 return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
708 * vPMD receive routine that reassembles single burst of 32 scattered packets
710 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
713 ice_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
716 struct ice_rx_queue *rxq = rx_queue;
717 uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
719 /* get some new buffers */
720 uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
725 /* happy day case, full burst + no packets to be joined */
726 const uint64_t *split_fl64 = (uint64_t *)split_flags;
728 if (!rxq->pkt_first_seg &&
729 split_fl64[0] == 0 && split_fl64[1] == 0 &&
730 split_fl64[2] == 0 && split_fl64[3] == 0)
733 /* reassemble any packets that need reassembly*/
736 if (!rxq->pkt_first_seg) {
737 /* find the first split flag, and only reassemble then*/
738 while (i < nb_bufs && !split_flags[i])
742 rxq->pkt_first_seg = rx_pkts[i];
744 return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
749 * vPMD receive routine that reassembles scattered packets.
750 * Main receive routine that can handle arbitrary burst sizes
752 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
755 ice_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
760 while (nb_pkts > ICE_VPMD_RX_BURST) {
761 uint16_t burst = ice_recv_scattered_burst_vec_avx2(rx_queue,
762 rx_pkts + retval, ICE_VPMD_RX_BURST);
765 if (burst < ICE_VPMD_RX_BURST)
768 return retval + ice_recv_scattered_burst_vec_avx2(rx_queue,
769 rx_pkts + retval, nb_pkts);
773 ice_vtx1(volatile struct ice_tx_desc *txdp,
774 struct rte_mbuf *pkt, uint64_t flags)
777 (ICE_TX_DESC_DTYPE_DATA |
778 ((uint64_t)flags << ICE_TXD_QW1_CMD_S) |
779 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
781 __m128i descriptor = _mm_set_epi64x(high_qw,
782 pkt->buf_iova + pkt->data_off);
783 _mm_store_si128((__m128i *)txdp, descriptor);
787 ice_vtx(volatile struct ice_tx_desc *txdp,
788 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
790 const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
791 ((uint64_t)flags << ICE_TXD_QW1_CMD_S));
793 /* if unaligned on 32-bit boundary, do one to align */
794 if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
795 ice_vtx1(txdp, *pkt, flags);
796 nb_pkts--, txdp++, pkt++;
799 /* do two at a time while possible, in bursts */
800 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
803 ((uint64_t)pkt[3]->data_len <<
804 ICE_TXD_QW1_TX_BUF_SZ_S);
807 ((uint64_t)pkt[2]->data_len <<
808 ICE_TXD_QW1_TX_BUF_SZ_S);
811 ((uint64_t)pkt[1]->data_len <<
812 ICE_TXD_QW1_TX_BUF_SZ_S);
815 ((uint64_t)pkt[0]->data_len <<
816 ICE_TXD_QW1_TX_BUF_SZ_S);
821 pkt[3]->buf_iova + pkt[3]->data_off,
823 pkt[2]->buf_iova + pkt[2]->data_off);
827 pkt[1]->buf_iova + pkt[1]->data_off,
829 pkt[0]->buf_iova + pkt[0]->data_off);
830 _mm256_store_si256((void *)(txdp + 2), desc2_3);
831 _mm256_store_si256((void *)txdp, desc0_1);
834 /* do any last ones */
836 ice_vtx1(txdp, *pkt, flags);
837 txdp++, pkt++, nb_pkts--;
841 static inline uint16_t
842 ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
845 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
846 volatile struct ice_tx_desc *txdp;
847 struct ice_tx_entry *txep;
848 uint16_t n, nb_commit, tx_id;
849 uint64_t flags = ICE_TD_CMD;
850 uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
852 /* cross rx_thresh boundary is not allowed */
853 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
855 if (txq->nb_tx_free < txq->tx_free_thresh)
856 ice_tx_free_bufs_vec(txq);
858 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
859 if (unlikely(nb_pkts == 0))
862 tx_id = txq->tx_tail;
863 txdp = &txq->tx_ring[tx_id];
864 txep = &txq->sw_ring[tx_id];
866 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
868 n = (uint16_t)(txq->nb_tx_desc - tx_id);
869 if (nb_commit >= n) {
870 ice_tx_backlog_entry(txep, tx_pkts, n);
872 ice_vtx(txdp, tx_pkts, n - 1, flags);
876 ice_vtx1(txdp, *tx_pkts++, rs);
878 nb_commit = (uint16_t)(nb_commit - n);
881 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
883 /* avoid reach the end of ring */
884 txdp = &txq->tx_ring[tx_id];
885 txep = &txq->sw_ring[tx_id];
888 ice_tx_backlog_entry(txep, tx_pkts, nb_commit);
890 ice_vtx(txdp, tx_pkts, nb_commit, flags);
892 tx_id = (uint16_t)(tx_id + nb_commit);
893 if (tx_id > txq->tx_next_rs) {
894 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
895 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
898 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
901 txq->tx_tail = tx_id;
903 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
909 ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
913 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
918 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
919 ret = ice_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],