1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
9 #include "base/i40e_prototype.h"
10 #include "base/i40e_type.h"
11 #include "i40e_ethdev.h"
12 #include "i40e_rxtx.h"
13 #include "i40e_rxtx_vec_common.h"
15 #include <x86intrin.h>
17 #ifndef __INTEL_COMPILER
18 #pragma GCC diagnostic ignored "-Wcast-qual"
22 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
26 volatile union i40e_rx_desc *rxdp;
27 struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
29 rxdp = rxq->rx_ring + rxq->rxrearm_start;
31 /* Pull 'n' more MBUFs into the software ring */
32 if (rte_mempool_get_bulk(rxq->mp,
34 RTE_I40E_RXQ_REARM_THRESH) < 0) {
35 if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
38 dma_addr0 = _mm_setzero_si128();
39 for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
40 rxep[i].mbuf = &rxq->fake_mbuf;
41 _mm_store_si128((__m128i *)&rxdp[i].read,
45 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
46 RTE_I40E_RXQ_REARM_THRESH;
50 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
51 struct rte_mbuf *mb0, *mb1;
52 __m128i dma_addr0, dma_addr1;
53 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
54 RTE_PKTMBUF_HEADROOM);
55 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
56 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
57 __m128i vaddr0, vaddr1;
62 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
63 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
64 offsetof(struct rte_mbuf, buf_addr) + 8);
65 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
66 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
68 /* convert pa to dma_addr hdr/data */
69 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
70 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
72 /* add headroom to pa values */
73 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
74 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
76 /* flush desc with pa dma_addr */
77 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
78 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
81 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
82 __m256i dma_addr0_1, dma_addr2_3;
83 __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
84 /* Initialize the mbufs in vector, process 4 mbufs in one loop */
85 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH;
86 i += 4, rxep += 4, rxdp += 4) {
87 __m128i vaddr0, vaddr1, vaddr2, vaddr3;
88 __m256i vaddr0_1, vaddr2_3;
95 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
96 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
97 offsetof(struct rte_mbuf, buf_addr) + 8);
98 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
99 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
100 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
101 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
104 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
105 * into the high lanes. Similarly for 2 & 3
107 vaddr0_1 = _mm256_inserti128_si256(
108 _mm256_castsi128_si256(vaddr0), vaddr1, 1);
109 vaddr2_3 = _mm256_inserti128_si256(
110 _mm256_castsi128_si256(vaddr2), vaddr3, 1);
112 /* convert pa to dma_addr hdr/data */
113 dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1);
114 dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3);
116 /* add headroom to pa values */
117 dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room);
118 dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room);
120 /* flush desc with pa dma_addr */
121 _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1);
122 _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3);
127 rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
128 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
129 rxq->rxrearm_start = 0;
131 rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
133 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
134 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
136 /* Update the tail pointer on the NIC */
137 I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
140 #define PKTLEN_SHIFT 10
142 static inline uint16_t
143 _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
144 uint16_t nb_pkts, uint8_t *split_packet)
146 #define RTE_I40E_DESCS_PER_LOOP_AVX 8
148 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
149 const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
150 0, rxq->mbuf_initializer);
151 struct i40e_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
152 volatile union i40e_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
153 const int avx_aligned = ((rxq->rx_tail & 1) == 0);
156 /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP_AVX */
157 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP_AVX);
159 /* See if we need to rearm the RX queue - gives the prefetch a bit
162 if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
165 /* Before we start moving massive data around, check to see if
166 * there is actually a packet available
168 if (!(rxdp->wb.qword1.status_error_len &
169 rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
172 /* constants used in processing loop */
173 const __m256i crc_adjust = _mm256_set_epi16(
174 /* first descriptor */
175 0, 0, 0, /* ignore non-length fields */
176 -rxq->crc_len, /* sub crc on data_len */
177 0, /* ignore high-16bits of pkt_len */
178 -rxq->crc_len, /* sub crc on pkt_len */
179 0, 0, /* ignore pkt_type field */
180 /* second descriptor */
181 0, 0, 0, /* ignore non-length fields */
182 -rxq->crc_len, /* sub crc on data_len */
183 0, /* ignore high-16bits of pkt_len */
184 -rxq->crc_len, /* sub crc on pkt_len */
185 0, 0 /* ignore pkt_type field */
188 /* 8 packets DD mask, LSB in each 32-bit value */
189 const __m256i dd_check = _mm256_set1_epi32(1);
191 /* 8 packets EOP mask, second-LSB in each 32-bit value */
192 const __m256i eop_check = _mm256_slli_epi32(dd_check,
193 I40E_RX_DESC_STATUS_EOF_SHIFT);
195 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
196 const __m256i shuf_msk = _mm256_set_epi8(
197 /* first descriptor */
198 7, 6, 5, 4, /* octet 4~7, 32bits rss */
199 3, 2, /* octet 2~3, low 16 bits vlan_macip */
200 15, 14, /* octet 15~14, 16 bits data_len */
201 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
202 15, 14, /* octet 15~14, low 16 bits pkt_len */
203 0xFF, 0xFF, /* pkt_type set as unknown */
204 0xFF, 0xFF, /*pkt_type set as unknown */
205 /* second descriptor */
206 7, 6, 5, 4, /* octet 4~7, 32bits rss */
207 3, 2, /* octet 2~3, low 16 bits vlan_macip */
208 15, 14, /* octet 15~14, 16 bits data_len */
209 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
210 15, 14, /* octet 15~14, low 16 bits pkt_len */
211 0xFF, 0xFF, /* pkt_type set as unknown */
212 0xFF, 0xFF /*pkt_type set as unknown */
215 * compile-time check the above crc and shuffle layout is correct.
216 * NOTE: the first field (lowest address) is given last in set_epi
219 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
220 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
221 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
222 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
223 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
224 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
225 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
226 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
228 /* Status/Error flag masks */
230 * mask everything except RSS, flow director and VLAN flags
231 * bit2 is for VLAN tag, bit11 for flow director indication
232 * bit13:12 for RSS indication. Bits 3-5 of error
233 * field (bits 22-24) are for IP/L4 checksum errors
235 const __m256i flags_mask = _mm256_set1_epi32(
236 (1 << 2) | (1 << 11) | (3 << 12) | (7 << 22));
238 * data to be shuffled by result of flag mask. If VLAN bit is set,
239 * (bit 2), then position 4 in this array will be used in the
242 const __m256i vlan_flags_shuf = _mm256_set_epi32(
243 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
244 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
246 * data to be shuffled by result of flag mask, shifted down 11.
247 * If RSS/FDIR bits are set, shuffle moves appropriate flags in
250 const __m256i rss_flags_shuf = _mm256_set_epi8(
251 0, 0, 0, 0, 0, 0, 0, 0,
252 PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
253 0, 0, PKT_RX_FDIR, 0, /* end up 128-bits */
254 0, 0, 0, 0, 0, 0, 0, 0,
255 PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
256 0, 0, PKT_RX_FDIR, 0);
259 * data to be shuffled by the result of the flags mask shifted by 22
260 * bits. This gives use the l3_l4 flags.
262 const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
263 /* shift right 1 bit to make sure it not exceed 255 */
264 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
265 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1,
266 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
267 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
268 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
269 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
270 PKT_RX_IP_CKSUM_BAD >> 1,
271 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
272 /* second 128-bits */
273 0, 0, 0, 0, 0, 0, 0, 0,
274 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
275 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1,
276 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
277 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
278 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
279 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
280 PKT_RX_IP_CKSUM_BAD >> 1,
281 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
283 const __m256i cksum_mask = _mm256_set1_epi32(
284 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
285 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
286 PKT_RX_EIP_CKSUM_BAD);
288 RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
290 uint16_t i, received;
291 for (i = 0, received = 0; i < nb_pkts;
292 i += RTE_I40E_DESCS_PER_LOOP_AVX,
293 rxdp += RTE_I40E_DESCS_PER_LOOP_AVX) {
294 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
295 _mm256_storeu_si256((void *)&rx_pkts[i],
296 _mm256_loadu_si256((void *)&sw_ring[i]));
297 #ifdef RTE_ARCH_X86_64
298 _mm256_storeu_si256((void *)&rx_pkts[i + 4],
299 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
302 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
303 #ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
304 /* for AVX we need alignment otherwise loads are not atomic */
306 /* load in descriptors, 2 at a time, in reverse order */
307 raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
308 rte_compiler_barrier();
309 raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
310 rte_compiler_barrier();
311 raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
312 rte_compiler_barrier();
313 raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
317 const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7));
318 rte_compiler_barrier();
319 const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6));
320 rte_compiler_barrier();
321 const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5));
322 rte_compiler_barrier();
323 const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4));
324 rte_compiler_barrier();
325 const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3));
326 rte_compiler_barrier();
327 const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2));
328 rte_compiler_barrier();
329 const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1));
330 rte_compiler_barrier();
331 const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0));
333 raw_desc6_7 = _mm256_inserti128_si256(
334 _mm256_castsi128_si256(raw_desc6), raw_desc7, 1);
335 raw_desc4_5 = _mm256_inserti128_si256(
336 _mm256_castsi128_si256(raw_desc4), raw_desc5, 1);
337 raw_desc2_3 = _mm256_inserti128_si256(
338 _mm256_castsi128_si256(raw_desc2), raw_desc3, 1);
339 raw_desc0_1 = _mm256_inserti128_si256(
340 _mm256_castsi128_si256(raw_desc0), raw_desc1, 1);
345 for (j = 0; j < RTE_I40E_DESCS_PER_LOOP_AVX; j++)
346 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
350 * convert descriptors 4-7 into mbufs, adjusting length and
351 * re-arranging fields. Then write into the mbuf
353 const __m256i len6_7 = _mm256_slli_epi32(raw_desc6_7, PKTLEN_SHIFT);
354 const __m256i len4_5 = _mm256_slli_epi32(raw_desc4_5, PKTLEN_SHIFT);
355 const __m256i desc6_7 = _mm256_blend_epi16(raw_desc6_7, len6_7, 0x80);
356 const __m256i desc4_5 = _mm256_blend_epi16(raw_desc4_5, len4_5, 0x80);
357 __m256i mb6_7 = _mm256_shuffle_epi8(desc6_7, shuf_msk);
358 __m256i mb4_5 = _mm256_shuffle_epi8(desc4_5, shuf_msk);
359 mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
360 mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
362 * to get packet types, shift 64-bit values down 30 bits
363 * and so ptype is in lower 8-bits in each
365 const __m256i ptypes6_7 = _mm256_srli_epi64(desc6_7, 30);
366 const __m256i ptypes4_5 = _mm256_srli_epi64(desc4_5, 30);
367 const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24);
368 const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8);
369 const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24);
370 const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8);
371 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4);
372 mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0);
373 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4);
374 mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0);
375 /* merge the status bits into one register */
376 const __m256i status4_7 = _mm256_unpackhi_epi32(desc6_7,
380 * convert descriptors 0-3 into mbufs, adjusting length and
381 * re-arranging fields. Then write into the mbuf
383 const __m256i len2_3 = _mm256_slli_epi32(raw_desc2_3, PKTLEN_SHIFT);
384 const __m256i len0_1 = _mm256_slli_epi32(raw_desc0_1, PKTLEN_SHIFT);
385 const __m256i desc2_3 = _mm256_blend_epi16(raw_desc2_3, len2_3, 0x80);
386 const __m256i desc0_1 = _mm256_blend_epi16(raw_desc0_1, len0_1, 0x80);
387 __m256i mb2_3 = _mm256_shuffle_epi8(desc2_3, shuf_msk);
388 __m256i mb0_1 = _mm256_shuffle_epi8(desc0_1, shuf_msk);
389 mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
390 mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
391 /* get the packet types */
392 const __m256i ptypes2_3 = _mm256_srli_epi64(desc2_3, 30);
393 const __m256i ptypes0_1 = _mm256_srli_epi64(desc0_1, 30);
394 const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24);
395 const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8);
396 const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24);
397 const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8);
398 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4);
399 mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0);
400 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4);
401 mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0);
402 /* merge the status bits into one register */
403 const __m256i status0_3 = _mm256_unpackhi_epi32(desc2_3,
407 * take the two sets of status bits and merge to one
408 * After merge, the packets status flags are in the
409 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
411 __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
414 /* now do flag manipulation */
416 /* get only flag/error bits we want */
417 const __m256i flag_bits = _mm256_and_si256(
418 status0_7, flags_mask);
419 /* set vlan and rss flags */
420 const __m256i vlan_flags = _mm256_shuffle_epi8(
421 vlan_flags_shuf, flag_bits);
422 const __m256i rss_flags = _mm256_shuffle_epi8(
423 rss_flags_shuf, _mm256_srli_epi32(flag_bits, 11));
425 * l3_l4_error flags, shuffle, then shift to correct adjustment
426 * of flags in flags_shuf, and finally mask out extra bits
428 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
429 _mm256_srli_epi32(flag_bits, 22));
430 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
431 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
434 const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
435 _mm256_or_si256(rss_flags, vlan_flags));
437 * At this point, we have the 8 sets of flags in the low 16-bits
438 * of each 32-bit value in vlan0.
439 * We want to extract these, and merge them with the mbuf init data
440 * so we can do a single write to the mbuf to set the flags
441 * and all the other initialization fields. Extracting the
442 * appropriate flags means that we have to do a shift and blend for
443 * each mbuf before we do the write. However, we can also
444 * add in the previously computed rx_descriptor fields to
445 * make a single 256-bit write per mbuf
447 /* check the structure matches expectations */
448 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
449 offsetof(struct rte_mbuf, rearm_data) + 8);
450 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
451 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
452 /* build up data and do writes */
453 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
455 rearm6 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 8), 0x04);
456 rearm4 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 4), 0x04);
457 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
458 rearm0 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(mbuf_flags, 4), 0x04);
459 /* permute to add in the rx_descriptor e.g. rss fields */
460 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
461 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
462 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
463 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
465 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, rearm6);
466 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, rearm4);
467 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, rearm2);
468 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, rearm0);
470 /* repeat for the odd mbufs */
471 const __m256i odd_flags = _mm256_castsi128_si256(
472 _mm256_extracti128_si256(mbuf_flags, 1));
473 rearm7 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 8), 0x04);
474 rearm5 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 4), 0x04);
475 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
476 rearm1 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(odd_flags, 4), 0x04);
477 /* since odd mbufs are already in hi 128-bits use blend */
478 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
479 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
480 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
481 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
482 /* again write to mbufs */
483 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, rearm7);
484 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, rearm5);
485 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, rearm3);
486 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, rearm1);
488 /* extract and record EOP bit */
490 const __m128i eop_mask = _mm_set1_epi16(
491 1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
492 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
494 /* pack status bits into a single 128-bit register */
495 const __m128i eop_bits = _mm_packus_epi32(
496 _mm256_castsi256_si128(eop_bits256),
497 _mm256_extractf128_si256(eop_bits256, 1));
499 * flip bits, and mask out the EOP bit, which is now
500 * a split-packet bit i.e. !EOP, rather than EOP one.
502 __m128i split_bits = _mm_andnot_si128(eop_bits,
505 * eop bits are out of order, so we need to shuffle them
506 * back into order again. In doing so, only use low 8
507 * bits, which acts like another pack instruction
508 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
509 * [Since we use epi8, the 16-bit positions are
510 * multiplied by 2 in the eop_shuffle value.]
512 __m128i eop_shuffle = _mm_set_epi8(
513 0xFF, 0xFF, 0xFF, 0xFF, /* zero hi 64b */
514 0xFF, 0xFF, 0xFF, 0xFF,
515 8, 0, 10, 2, /* move values to lo 64b */
517 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
518 *(uint64_t *)split_packet = _mm_cvtsi128_si64(split_bits);
519 split_packet += RTE_I40E_DESCS_PER_LOOP_AVX;
522 /* perform dd_check */
523 status0_7 = _mm256_and_si256(status0_7, dd_check);
524 status0_7 = _mm256_packs_epi32(status0_7,
525 _mm256_setzero_si256());
527 uint64_t burst = __builtin_popcountll(_mm_cvtsi128_si64(
528 _mm256_extracti128_si256(status0_7, 1)));
529 burst += __builtin_popcountll(_mm_cvtsi128_si64(
530 _mm256_castsi256_si128(status0_7)));
532 if (burst != RTE_I40E_DESCS_PER_LOOP_AVX)
536 /* update tail pointers */
537 rxq->rx_tail += received;
538 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
539 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
543 rxq->rxrearm_nb += received;
549 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
552 i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
555 return _recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
559 * vPMD receive routine that reassembles single burst of 32 scattered packets
561 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
564 i40e_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
567 struct i40e_rx_queue *rxq = rx_queue;
568 uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
570 /* get some new buffers */
571 uint16_t nb_bufs = _recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
576 /* happy day case, full burst + no packets to be joined */
577 const uint64_t *split_fl64 = (uint64_t *)split_flags;
579 if (rxq->pkt_first_seg == NULL &&
580 split_fl64[0] == 0 && split_fl64[1] == 0 &&
581 split_fl64[2] == 0 && split_fl64[3] == 0)
584 /* reassemble any packets that need reassembly*/
587 if (rxq->pkt_first_seg == NULL) {
588 /* find the first split flag, and only reassemble then*/
589 while (i < nb_bufs && !split_flags[i])
593 rxq->pkt_first_seg = rx_pkts[i];
595 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
600 * vPMD receive routine that reassembles scattered packets.
601 * Main receive routine that can handle arbitrary burst sizes
603 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
606 i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
610 while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
611 uint16_t burst = i40e_recv_scattered_burst_vec_avx2(rx_queue,
612 rx_pkts + retval, RTE_I40E_VPMD_RX_BURST);
615 if (burst < RTE_I40E_VPMD_RX_BURST)
618 return retval + i40e_recv_scattered_burst_vec_avx2(rx_queue,
619 rx_pkts + retval, nb_pkts);
624 vtx1(volatile struct i40e_tx_desc *txdp,
625 struct rte_mbuf *pkt, uint64_t flags)
627 uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
628 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
629 ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
631 __m128i descriptor = _mm_set_epi64x(high_qw,
632 pkt->buf_physaddr + pkt->data_off);
633 _mm_store_si128((__m128i *)txdp, descriptor);
637 vtx(volatile struct i40e_tx_desc *txdp,
638 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
640 const uint64_t hi_qw_tmpl = (I40E_TX_DESC_DTYPE_DATA |
641 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT));
643 /* if unaligned on 32-bit boundary, do one to align */
644 if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
645 vtx1(txdp, *pkt, flags);
646 nb_pkts--, txdp++, pkt++;
649 /* do two at a time while possible, in bursts */
650 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
651 uint64_t hi_qw3 = hi_qw_tmpl |
652 ((uint64_t)pkt[3]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
653 uint64_t hi_qw2 = hi_qw_tmpl |
654 ((uint64_t)pkt[2]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
655 uint64_t hi_qw1 = hi_qw_tmpl |
656 ((uint64_t)pkt[1]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
657 uint64_t hi_qw0 = hi_qw_tmpl |
658 ((uint64_t)pkt[0]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
660 __m256i desc2_3 = _mm256_set_epi64x(
661 hi_qw3, pkt[3]->buf_physaddr + pkt[3]->data_off,
662 hi_qw2, pkt[2]->buf_physaddr + pkt[2]->data_off);
663 __m256i desc0_1 = _mm256_set_epi64x(
664 hi_qw1, pkt[1]->buf_physaddr + pkt[1]->data_off,
665 hi_qw0, pkt[0]->buf_physaddr + pkt[0]->data_off);
666 _mm256_store_si256((void *)(txdp + 2), desc2_3);
667 _mm256_store_si256((void *)txdp, desc0_1);
670 /* do any last ones */
672 vtx1(txdp, *pkt, flags);
673 txdp++, pkt++, nb_pkts--;
677 static inline uint16_t
678 i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
681 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
682 volatile struct i40e_tx_desc *txdp;
683 struct i40e_tx_entry *txep;
684 uint16_t n, nb_commit, tx_id;
685 uint64_t flags = I40E_TD_CMD;
686 uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
688 /* cross rx_thresh boundary is not allowed */
689 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
691 if (txq->nb_tx_free < txq->tx_free_thresh)
692 i40e_tx_free_bufs(txq);
694 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
695 if (unlikely(nb_pkts == 0))
698 tx_id = txq->tx_tail;
699 txdp = &txq->tx_ring[tx_id];
700 txep = &txq->sw_ring[tx_id];
702 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
704 n = (uint16_t)(txq->nb_tx_desc - tx_id);
705 if (nb_commit >= n) {
706 tx_backlog_entry(txep, tx_pkts, n);
708 vtx(txdp, tx_pkts, n - 1, flags);
712 vtx1(txdp, *tx_pkts++, rs);
714 nb_commit = (uint16_t)(nb_commit - n);
717 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
719 /* avoid reach the end of ring */
720 txdp = &txq->tx_ring[tx_id];
721 txep = &txq->sw_ring[tx_id];
724 tx_backlog_entry(txep, tx_pkts, nb_commit);
726 vtx(txdp, tx_pkts, nb_commit, flags);
728 tx_id = (uint16_t)(tx_id + nb_commit);
729 if (tx_id > txq->tx_next_rs) {
730 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
731 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
732 I40E_TXD_QW1_CMD_SHIFT);
734 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
737 txq->tx_tail = tx_id;
739 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
745 i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
749 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
754 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
755 ret = i40e_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],