1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
6 #include <ethdev_driver.h>
7 #include <rte_malloc.h>
9 #include "base/i40e_prototype.h"
10 #include "base/i40e_type.h"
11 #include "i40e_ethdev.h"
12 #include "i40e_rxtx.h"
13 #include "i40e_rxtx_vec_common.h"
14 #include "i40e_rxtx_common_avx.h"
18 #ifndef __INTEL_COMPILER
19 #pragma GCC diagnostic ignored "-Wcast-qual"
22 static __rte_always_inline void
23 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
25 return i40e_rxq_rearm_common(rxq, false);
28 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
29 /* Handles 32B descriptor FDIR ID processing:
30 * rxdp: receive descriptor ring, required to load 2nd 16B half of each desc
31 * rx_pkts: required to store metadata back to mbufs
32 * pkt_idx: offset into the burst, increments in vector widths
33 * desc_idx: required to select the correct shift at compile time
36 desc_fdir_processing_32b(volatile union i40e_rx_desc *rxdp,
37 struct rte_mbuf **rx_pkts,
38 const uint32_t pkt_idx,
39 const uint32_t desc_idx)
41 /* 32B desc path: load rxdp.wb.qword2 for EXT_STATUS and FLEXBH_STAT */
42 __m128i *rxdp_desc_0 = (void *)(&rxdp[desc_idx + 0].wb.qword2);
43 __m128i *rxdp_desc_1 = (void *)(&rxdp[desc_idx + 1].wb.qword2);
44 const __m128i desc_qw2_0 = _mm_load_si128(rxdp_desc_0);
45 const __m128i desc_qw2_1 = _mm_load_si128(rxdp_desc_1);
47 /* Mask for FLEXBH_STAT, and the FDIR_ID value to compare against. The
48 * remaining data is set to all 1's to pass through data.
50 const __m256i flexbh_mask = _mm256_set_epi32(-1, -1, -1, 3 << 4,
52 const __m256i flexbh_id = _mm256_set_epi32(-1, -1, -1, 1 << 4,
55 /* Load descriptor, check for FLEXBH bits, generate a mask for both
56 * packets in the register.
58 __m256i desc_qw2_0_1 =
59 _mm256_inserti128_si256(_mm256_castsi128_si256(desc_qw2_0),
61 __m256i desc_tmp_msk = _mm256_and_si256(flexbh_mask, desc_qw2_0_1);
62 __m256i fdir_mask = _mm256_cmpeq_epi32(flexbh_id, desc_tmp_msk);
63 __m256i fdir_data = _mm256_alignr_epi8(desc_qw2_0_1, desc_qw2_0_1, 12);
64 __m256i desc_fdir_data = _mm256_and_si256(fdir_mask, fdir_data);
66 /* Write data out to the mbuf. There is no store to this area of the
67 * mbuf today, so we cannot combine it with another store.
69 const uint32_t idx_0 = pkt_idx + desc_idx;
70 const uint32_t idx_1 = pkt_idx + desc_idx + 1;
71 rx_pkts[idx_0]->hash.fdir.hi = _mm256_extract_epi32(desc_fdir_data, 0);
72 rx_pkts[idx_1]->hash.fdir.hi = _mm256_extract_epi32(desc_fdir_data, 4);
74 /* Create mbuf flags as required for mbuf_flags layout
75 * (That's high lane [1,3,5,7, 0,2,4,6] as u32 lanes).
77 * - Mask away bits not required from the fdir_mask
78 * - Leave the PKT_FDIR_ID bit (1 << 13)
79 * - Position that bit correctly based on packet number
80 * - OR in the resulting bit to mbuf_flags
82 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
83 __m256i mbuf_flag_mask = _mm256_set_epi32(0, 0, 0, 1 << 13,
85 __m256i desc_flag_bit = _mm256_and_si256(mbuf_flag_mask, fdir_mask);
87 /* For static-inline function, this will be stripped out
88 * as the desc_idx is a hard-coded constant.
92 return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 4);
94 return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 8);
96 return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 12);
103 /* NOT REACHED, see above switch returns */
104 return _mm256_setzero_si256();
106 #endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
108 #define PKTLEN_SHIFT 10
110 /* Force inline as some compilers will not inline by default. */
111 static __rte_always_inline uint16_t
112 _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
113 uint16_t nb_pkts, uint8_t *split_packet)
115 #define RTE_I40E_DESCS_PER_LOOP_AVX 8
117 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
118 const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
119 0, rxq->mbuf_initializer);
120 struct i40e_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
121 volatile union i40e_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
122 const int avx_aligned = ((rxq->rx_tail & 1) == 0);
125 /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP_AVX */
126 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP_AVX);
128 /* See if we need to rearm the RX queue - gives the prefetch a bit
131 if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
134 /* Before we start moving massive data around, check to see if
135 * there is actually a packet available
137 if (!(rxdp->wb.qword1.status_error_len &
138 rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
141 /* constants used in processing loop */
142 const __m256i crc_adjust = _mm256_set_epi16(
143 /* first descriptor */
144 0, 0, 0, /* ignore non-length fields */
145 -rxq->crc_len, /* sub crc on data_len */
146 0, /* ignore high-16bits of pkt_len */
147 -rxq->crc_len, /* sub crc on pkt_len */
148 0, 0, /* ignore pkt_type field */
149 /* second descriptor */
150 0, 0, 0, /* ignore non-length fields */
151 -rxq->crc_len, /* sub crc on data_len */
152 0, /* ignore high-16bits of pkt_len */
153 -rxq->crc_len, /* sub crc on pkt_len */
154 0, 0 /* ignore pkt_type field */
157 /* 8 packets DD mask, LSB in each 32-bit value */
158 const __m256i dd_check = _mm256_set1_epi32(1);
160 /* 8 packets EOP mask, second-LSB in each 32-bit value */
161 const __m256i eop_check = _mm256_slli_epi32(dd_check,
162 I40E_RX_DESC_STATUS_EOF_SHIFT);
164 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
165 const __m256i shuf_msk = _mm256_set_epi8(
166 /* first descriptor */
167 7, 6, 5, 4, /* octet 4~7, 32bits rss */
168 3, 2, /* octet 2~3, low 16 bits vlan_macip */
169 15, 14, /* octet 15~14, 16 bits data_len */
170 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
171 15, 14, /* octet 15~14, low 16 bits pkt_len */
172 0xFF, 0xFF, /* pkt_type set as unknown */
173 0xFF, 0xFF, /*pkt_type set as unknown */
174 /* second descriptor */
175 7, 6, 5, 4, /* octet 4~7, 32bits rss */
176 3, 2, /* octet 2~3, low 16 bits vlan_macip */
177 15, 14, /* octet 15~14, 16 bits data_len */
178 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
179 15, 14, /* octet 15~14, low 16 bits pkt_len */
180 0xFF, 0xFF, /* pkt_type set as unknown */
181 0xFF, 0xFF /*pkt_type set as unknown */
184 * compile-time check the above crc and shuffle layout is correct.
185 * NOTE: the first field (lowest address) is given last in set_epi
188 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
189 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
190 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
191 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
192 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
193 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
194 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
195 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
197 /* Status/Error flag masks */
199 * mask everything except RSS, flow director and VLAN flags
200 * bit2 is for VLAN tag, bit11 for flow director indication
201 * bit13:12 for RSS indication. Bits 3-5 of error
202 * field (bits 22-24) are for IP/L4 checksum errors
204 const __m256i flags_mask = _mm256_set1_epi32(
205 (1 << 2) | (1 << 11) | (3 << 12) | (7 << 22));
207 * data to be shuffled by result of flag mask. If VLAN bit is set,
208 * (bit 2), then position 4 in this array will be used in the
211 const __m256i vlan_flags_shuf = _mm256_set_epi32(
212 0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
213 0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0);
215 * data to be shuffled by result of flag mask, shifted down 11.
216 * If RSS/FDIR bits are set, shuffle moves appropriate flags in
219 const __m256i rss_flags_shuf = _mm256_set_epi8(
220 0, 0, 0, 0, 0, 0, 0, 0,
221 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
222 0, 0, RTE_MBUF_F_RX_FDIR, 0, /* end up 128-bits */
223 0, 0, 0, 0, 0, 0, 0, 0,
224 RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
225 0, 0, RTE_MBUF_F_RX_FDIR, 0);
228 * data to be shuffled by the result of the flags mask shifted by 22
229 * bits. This gives use the l3_l4 flags.
231 const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
232 /* shift right 1 bit to make sure it not exceed 255 */
233 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
234 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
235 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
236 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
237 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
238 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
239 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
240 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
241 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
242 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
243 (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
244 (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
245 /* second 128-bits */
246 0, 0, 0, 0, 0, 0, 0, 0,
247 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
248 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
249 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
250 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
251 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
252 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
253 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
254 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
255 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
256 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
257 (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
258 (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
260 const __m256i cksum_mask = _mm256_set1_epi32(
261 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
262 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
263 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
265 RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
267 uint16_t i, received;
268 for (i = 0, received = 0; i < nb_pkts;
269 i += RTE_I40E_DESCS_PER_LOOP_AVX,
270 rxdp += RTE_I40E_DESCS_PER_LOOP_AVX) {
271 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
272 _mm256_storeu_si256((void *)&rx_pkts[i],
273 _mm256_loadu_si256((void *)&sw_ring[i]));
274 #ifdef RTE_ARCH_X86_64
275 _mm256_storeu_si256((void *)&rx_pkts[i + 4],
276 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
279 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
280 #ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
281 /* for AVX we need alignment otherwise loads are not atomic */
283 /* load in descriptors, 2 at a time, in reverse order */
284 raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
285 rte_compiler_barrier();
286 raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
287 rte_compiler_barrier();
288 raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
289 rte_compiler_barrier();
290 raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
294 const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7));
295 rte_compiler_barrier();
296 const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6));
297 rte_compiler_barrier();
298 const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5));
299 rte_compiler_barrier();
300 const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4));
301 rte_compiler_barrier();
302 const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3));
303 rte_compiler_barrier();
304 const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2));
305 rte_compiler_barrier();
306 const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1));
307 rte_compiler_barrier();
308 const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0));
310 raw_desc6_7 = _mm256_inserti128_si256(
311 _mm256_castsi128_si256(raw_desc6), raw_desc7, 1);
312 raw_desc4_5 = _mm256_inserti128_si256(
313 _mm256_castsi128_si256(raw_desc4), raw_desc5, 1);
314 raw_desc2_3 = _mm256_inserti128_si256(
315 _mm256_castsi128_si256(raw_desc2), raw_desc3, 1);
316 raw_desc0_1 = _mm256_inserti128_si256(
317 _mm256_castsi128_si256(raw_desc0), raw_desc1, 1);
322 for (j = 0; j < RTE_I40E_DESCS_PER_LOOP_AVX; j++)
323 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
327 * convert descriptors 4-7 into mbufs, adjusting length and
328 * re-arranging fields. Then write into the mbuf
330 const __m256i len6_7 = _mm256_slli_epi32(raw_desc6_7, PKTLEN_SHIFT);
331 const __m256i len4_5 = _mm256_slli_epi32(raw_desc4_5, PKTLEN_SHIFT);
332 const __m256i desc6_7 = _mm256_blend_epi16(raw_desc6_7, len6_7, 0x80);
333 const __m256i desc4_5 = _mm256_blend_epi16(raw_desc4_5, len4_5, 0x80);
334 __m256i mb6_7 = _mm256_shuffle_epi8(desc6_7, shuf_msk);
335 __m256i mb4_5 = _mm256_shuffle_epi8(desc4_5, shuf_msk);
336 mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
337 mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
339 * to get packet types, shift 64-bit values down 30 bits
340 * and so ptype is in lower 8-bits in each
342 const __m256i ptypes6_7 = _mm256_srli_epi64(desc6_7, 30);
343 const __m256i ptypes4_5 = _mm256_srli_epi64(desc4_5, 30);
344 const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24);
345 const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8);
346 const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24);
347 const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8);
348 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4);
349 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0);
350 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4);
351 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0);
352 /* merge the status bits into one register */
353 const __m256i status4_7 = _mm256_unpackhi_epi32(desc6_7,
357 * convert descriptors 0-3 into mbufs, adjusting length and
358 * re-arranging fields. Then write into the mbuf
360 const __m256i len2_3 = _mm256_slli_epi32(raw_desc2_3, PKTLEN_SHIFT);
361 const __m256i len0_1 = _mm256_slli_epi32(raw_desc0_1, PKTLEN_SHIFT);
362 const __m256i desc2_3 = _mm256_blend_epi16(raw_desc2_3, len2_3, 0x80);
363 const __m256i desc0_1 = _mm256_blend_epi16(raw_desc0_1, len0_1, 0x80);
364 __m256i mb2_3 = _mm256_shuffle_epi8(desc2_3, shuf_msk);
365 __m256i mb0_1 = _mm256_shuffle_epi8(desc0_1, shuf_msk);
366 mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
367 mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
368 /* get the packet types */
369 const __m256i ptypes2_3 = _mm256_srli_epi64(desc2_3, 30);
370 const __m256i ptypes0_1 = _mm256_srli_epi64(desc0_1, 30);
371 const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24);
372 const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8);
373 const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24);
374 const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8);
375 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4);
376 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0);
377 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4);
378 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0);
379 /* merge the status bits into one register */
380 const __m256i status0_3 = _mm256_unpackhi_epi32(desc2_3,
384 * take the two sets of status bits and merge to one
385 * After merge, the packets status flags are in the
386 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
388 __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
391 /* now do flag manipulation */
393 /* get only flag/error bits we want */
394 const __m256i flag_bits = _mm256_and_si256(
395 status0_7, flags_mask);
396 /* set vlan and rss flags */
397 const __m256i vlan_flags = _mm256_shuffle_epi8(
398 vlan_flags_shuf, flag_bits);
399 const __m256i rss_fdir_bits = _mm256_srli_epi32(flag_bits, 11);
400 const __m256i rss_flags = _mm256_shuffle_epi8(rss_flags_shuf,
404 * l3_l4_error flags, shuffle, then shift to correct adjustment
405 * of flags in flags_shuf, and finally mask out extra bits
407 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
408 _mm256_srli_epi32(flag_bits, 22));
409 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
410 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
413 __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
414 _mm256_or_si256(rss_flags, vlan_flags));
416 /* If the rxq has FDIR enabled, read and process the FDIR info
417 * from the descriptor. This can cause more loads/stores, so is
418 * not always performed. Branch over the code when not enabled.
420 if (rxq->fdir_enabled) {
421 #ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
422 /* 16B descriptor code path:
423 * RSS and FDIR ID use the same offset in the desc, so
424 * only one can be present at a time. The code below
425 * identifies an FDIR ID match, and zeros the RSS value
426 * in the mbuf on FDIR match to keep mbuf data clean.
428 #define FDIR_BLEND_MASK ((1 << 3) | (1 << 7))
431 * - Take flags, shift bits to null out
432 * - CMPEQ with known FDIR ID, to get 0xFFFF or 0 mask
433 * - Strip bits from mask, leaving 0 or 1 for FDIR ID
434 * - Merge with mbuf_flags
436 /* FLM = 1, FLTSTAT = 0b01, (FLM | FLTSTAT) == 3.
437 * Shift left by 28 to avoid having to mask.
439 const __m256i fdir = _mm256_slli_epi32(rss_fdir_bits, 28);
440 const __m256i fdir_id = _mm256_set1_epi32(3 << 28);
442 /* As above, the fdir_mask to packet mapping is this:
443 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
444 * Then OR FDIR flags to mbuf_flags on FDIR ID hit.
446 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
447 const __m256i pkt_fdir_bit = _mm256_set1_epi32(1 << 13);
448 const __m256i fdir_mask = _mm256_cmpeq_epi32(fdir, fdir_id);
449 __m256i fdir_bits = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
450 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_bits);
452 /* Based on FDIR_MASK, clear the RSS or FDIR value.
453 * The FDIR ID value is masked to zero if not a hit,
454 * otherwise the mb0_1 register RSS field is zeroed.
456 const __m256i fdir_zero_mask = _mm256_setzero_si256();
457 __m256i tmp0_1 = _mm256_blend_epi32(fdir_zero_mask,
458 fdir_mask, FDIR_BLEND_MASK);
459 __m256i fdir_mb0_1 = _mm256_and_si256(mb0_1, fdir_mask);
460 mb0_1 = _mm256_andnot_si256(tmp0_1, mb0_1);
462 /* Write to mbuf: no stores to combine with, so just a
463 * scalar store to push data here.
465 rx_pkts[i + 0]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb0_1, 3);
466 rx_pkts[i + 1]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb0_1, 7);
468 /* Same as above, only shift the fdir_mask to align
469 * the packet FDIR mask with the FDIR_ID desc lane.
471 __m256i tmp2_3 = _mm256_alignr_epi8(fdir_mask, fdir_mask, 12);
472 __m256i fdir_mb2_3 = _mm256_and_si256(mb2_3, tmp2_3);
473 tmp2_3 = _mm256_blend_epi32(fdir_zero_mask, tmp2_3,
475 mb2_3 = _mm256_andnot_si256(tmp2_3, mb2_3);
476 rx_pkts[i + 2]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb2_3, 3);
477 rx_pkts[i + 3]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb2_3, 7);
479 __m256i tmp4_5 = _mm256_alignr_epi8(fdir_mask, fdir_mask, 8);
480 __m256i fdir_mb4_5 = _mm256_and_si256(mb4_5, tmp4_5);
481 tmp4_5 = _mm256_blend_epi32(fdir_zero_mask, tmp4_5,
483 mb4_5 = _mm256_andnot_si256(tmp4_5, mb4_5);
484 rx_pkts[i + 4]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb4_5, 3);
485 rx_pkts[i + 5]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb4_5, 7);
487 __m256i tmp6_7 = _mm256_alignr_epi8(fdir_mask, fdir_mask, 4);
488 __m256i fdir_mb6_7 = _mm256_and_si256(mb6_7, tmp6_7);
489 tmp6_7 = _mm256_blend_epi32(fdir_zero_mask, tmp6_7,
491 mb6_7 = _mm256_andnot_si256(tmp6_7, mb6_7);
492 rx_pkts[i + 6]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb6_7, 3);
493 rx_pkts[i + 7]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb6_7, 7);
495 /* End of 16B descriptor handling */
497 /* 32B descriptor FDIR ID mark handling. Returns bits
498 * to be OR-ed into the mbuf olflags.
500 __m256i fdir_add_flags;
501 fdir_add_flags = desc_fdir_processing_32b(rxdp, rx_pkts, i, 0);
502 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_add_flags);
504 fdir_add_flags = desc_fdir_processing_32b(rxdp, rx_pkts, i, 2);
505 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_add_flags);
507 fdir_add_flags = desc_fdir_processing_32b(rxdp, rx_pkts, i, 4);
508 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_add_flags);
510 fdir_add_flags = desc_fdir_processing_32b(rxdp, rx_pkts, i, 6);
511 mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_add_flags);
512 /* End 32B desc handling */
513 #endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
515 } /* if() on FDIR enabled */
518 * At this point, we have the 8 sets of flags in the low 16-bits
519 * of each 32-bit value in vlan0.
520 * We want to extract these, and merge them with the mbuf init data
521 * so we can do a single write to the mbuf to set the flags
522 * and all the other initialization fields. Extracting the
523 * appropriate flags means that we have to do a shift and blend for
524 * each mbuf before we do the write. However, we can also
525 * add in the previously computed rx_descriptor fields to
526 * make a single 256-bit write per mbuf
528 /* check the structure matches expectations */
529 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
530 offsetof(struct rte_mbuf, rearm_data) + 8);
531 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
532 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
533 /* build up data and do writes */
534 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
536 rearm6 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 8), 0x04);
537 rearm4 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 4), 0x04);
538 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
539 rearm0 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(mbuf_flags, 4), 0x04);
540 /* permute to add in the rx_descriptor e.g. rss fields */
541 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
542 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
543 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
544 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
546 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, rearm6);
547 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, rearm4);
548 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, rearm2);
549 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, rearm0);
551 /* repeat for the odd mbufs */
552 const __m256i odd_flags = _mm256_castsi128_si256(
553 _mm256_extracti128_si256(mbuf_flags, 1));
554 rearm7 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 8), 0x04);
555 rearm5 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 4), 0x04);
556 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
557 rearm1 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(odd_flags, 4), 0x04);
558 /* since odd mbufs are already in hi 128-bits use blend */
559 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
560 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
561 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
562 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
563 /* again write to mbufs */
564 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, rearm7);
565 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, rearm5);
566 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, rearm3);
567 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, rearm1);
569 /* extract and record EOP bit */
571 const __m128i eop_mask = _mm_set1_epi16(
572 1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
573 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
575 /* pack status bits into a single 128-bit register */
576 const __m128i eop_bits = _mm_packus_epi32(
577 _mm256_castsi256_si128(eop_bits256),
578 _mm256_extractf128_si256(eop_bits256, 1));
580 * flip bits, and mask out the EOP bit, which is now
581 * a split-packet bit i.e. !EOP, rather than EOP one.
583 __m128i split_bits = _mm_andnot_si128(eop_bits,
586 * eop bits are out of order, so we need to shuffle them
587 * back into order again. In doing so, only use low 8
588 * bits, which acts like another pack instruction
589 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
590 * [Since we use epi8, the 16-bit positions are
591 * multiplied by 2 in the eop_shuffle value.]
593 __m128i eop_shuffle = _mm_set_epi8(
594 0xFF, 0xFF, 0xFF, 0xFF, /* zero hi 64b */
595 0xFF, 0xFF, 0xFF, 0xFF,
596 8, 0, 10, 2, /* move values to lo 64b */
598 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
599 *(uint64_t *)split_packet = _mm_cvtsi128_si64(split_bits);
600 split_packet += RTE_I40E_DESCS_PER_LOOP_AVX;
603 /* perform dd_check */
604 status0_7 = _mm256_and_si256(status0_7, dd_check);
605 status0_7 = _mm256_packs_epi32(status0_7,
606 _mm256_setzero_si256());
608 uint64_t burst = __builtin_popcountll(_mm_cvtsi128_si64(
609 _mm256_extracti128_si256(status0_7, 1)));
610 burst += __builtin_popcountll(_mm_cvtsi128_si64(
611 _mm256_castsi256_si128(status0_7)));
613 if (burst != RTE_I40E_DESCS_PER_LOOP_AVX)
617 /* update tail pointers */
618 rxq->rx_tail += received;
619 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
620 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
624 rxq->rxrearm_nb += received;
630 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
633 i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
636 return _recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
640 * vPMD receive routine that reassembles single burst of 32 scattered packets
642 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
645 i40e_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
648 struct i40e_rx_queue *rxq = rx_queue;
649 uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
651 /* get some new buffers */
652 uint16_t nb_bufs = _recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
657 /* happy day case, full burst + no packets to be joined */
658 const uint64_t *split_fl64 = (uint64_t *)split_flags;
660 if (rxq->pkt_first_seg == NULL &&
661 split_fl64[0] == 0 && split_fl64[1] == 0 &&
662 split_fl64[2] == 0 && split_fl64[3] == 0)
665 /* reassemble any packets that need reassembly*/
668 if (rxq->pkt_first_seg == NULL) {
669 /* find the first split flag, and only reassemble then*/
670 while (i < nb_bufs && !split_flags[i])
674 rxq->pkt_first_seg = rx_pkts[i];
676 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
681 * vPMD receive routine that reassembles scattered packets.
682 * Main receive routine that can handle arbitrary burst sizes
684 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
687 i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
691 while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
692 uint16_t burst = i40e_recv_scattered_burst_vec_avx2(rx_queue,
693 rx_pkts + retval, RTE_I40E_VPMD_RX_BURST);
696 if (burst < RTE_I40E_VPMD_RX_BURST)
699 return retval + i40e_recv_scattered_burst_vec_avx2(rx_queue,
700 rx_pkts + retval, nb_pkts);
705 vtx1(volatile struct i40e_tx_desc *txdp,
706 struct rte_mbuf *pkt, uint64_t flags)
708 uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
709 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
710 ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
712 __m128i descriptor = _mm_set_epi64x(high_qw,
713 pkt->buf_iova + pkt->data_off);
714 _mm_store_si128((__m128i *)txdp, descriptor);
718 vtx(volatile struct i40e_tx_desc *txdp,
719 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
721 const uint64_t hi_qw_tmpl = (I40E_TX_DESC_DTYPE_DATA |
722 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT));
724 /* if unaligned on 32-bit boundary, do one to align */
725 if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
726 vtx1(txdp, *pkt, flags);
727 nb_pkts--, txdp++, pkt++;
730 /* do two at a time while possible, in bursts */
731 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
732 uint64_t hi_qw3 = hi_qw_tmpl |
733 ((uint64_t)pkt[3]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
734 uint64_t hi_qw2 = hi_qw_tmpl |
735 ((uint64_t)pkt[2]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
736 uint64_t hi_qw1 = hi_qw_tmpl |
737 ((uint64_t)pkt[1]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
738 uint64_t hi_qw0 = hi_qw_tmpl |
739 ((uint64_t)pkt[0]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
741 __m256i desc2_3 = _mm256_set_epi64x(
742 hi_qw3, pkt[3]->buf_iova + pkt[3]->data_off,
743 hi_qw2, pkt[2]->buf_iova + pkt[2]->data_off);
744 __m256i desc0_1 = _mm256_set_epi64x(
745 hi_qw1, pkt[1]->buf_iova + pkt[1]->data_off,
746 hi_qw0, pkt[0]->buf_iova + pkt[0]->data_off);
747 _mm256_store_si256((void *)(txdp + 2), desc2_3);
748 _mm256_store_si256((void *)txdp, desc0_1);
751 /* do any last ones */
753 vtx1(txdp, *pkt, flags);
754 txdp++, pkt++, nb_pkts--;
758 static inline uint16_t
759 i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
762 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
763 volatile struct i40e_tx_desc *txdp;
764 struct i40e_tx_entry *txep;
765 uint16_t n, nb_commit, tx_id;
766 uint64_t flags = I40E_TD_CMD;
767 uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
769 /* cross rx_thresh boundary is not allowed */
770 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
772 if (txq->nb_tx_free < txq->tx_free_thresh)
773 i40e_tx_free_bufs(txq);
775 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
776 if (unlikely(nb_pkts == 0))
779 tx_id = txq->tx_tail;
780 txdp = &txq->tx_ring[tx_id];
781 txep = &txq->sw_ring[tx_id];
783 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
785 n = (uint16_t)(txq->nb_tx_desc - tx_id);
786 if (nb_commit >= n) {
787 tx_backlog_entry(txep, tx_pkts, n);
789 vtx(txdp, tx_pkts, n - 1, flags);
793 vtx1(txdp, *tx_pkts++, rs);
795 nb_commit = (uint16_t)(nb_commit - n);
798 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
800 /* avoid reach the end of ring */
801 txdp = &txq->tx_ring[tx_id];
802 txep = &txq->sw_ring[tx_id];
805 tx_backlog_entry(txep, tx_pkts, nb_commit);
807 vtx(txdp, tx_pkts, nb_commit, flags);
809 tx_id = (uint16_t)(tx_id + nb_commit);
810 if (tx_id > txq->tx_next_rs) {
811 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
812 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
813 I40E_TXD_QW1_CMD_SHIFT);
815 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
818 txq->tx_tail = tx_id;
820 I40E_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
826 i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
830 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
835 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
836 ret = i40e_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],