1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #include "iavf_rxtx_vec_common.h"
9 #ifndef __INTEL_COMPILER
10 #pragma GCC diagnostic ignored "-Wcast-qual"
14 iavf_rxq_rearm(struct iavf_rx_queue *rxq)
18 volatile union iavf_rx_desc *rxdp;
19 struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start];
21 rxdp = rxq->rx_ring + rxq->rxrearm_start;
23 /* Pull 'n' more MBUFs into the software ring */
24 if (rte_mempool_get_bulk(rxq->mp,
26 IAVF_RXQ_REARM_THRESH) < 0) {
27 if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >=
31 dma_addr0 = _mm_setzero_si128();
32 for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) {
33 rxp[i] = &rxq->fake_mbuf;
34 _mm_store_si128((__m128i *)&rxdp[i].read,
38 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
39 IAVF_RXQ_REARM_THRESH;
43 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
44 struct rte_mbuf *mb0, *mb1;
45 __m128i dma_addr0, dma_addr1;
46 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
47 RTE_PKTMBUF_HEADROOM);
48 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
49 for (i = 0; i < IAVF_RXQ_REARM_THRESH; i += 2, rxp += 2) {
50 __m128i vaddr0, vaddr1;
55 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
56 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
57 offsetof(struct rte_mbuf, buf_addr) + 8);
58 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
59 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
61 /* convert pa to dma_addr hdr/data */
62 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
63 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
65 /* add headroom to pa values */
66 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
67 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
69 /* flush desc with pa dma_addr */
70 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
71 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
74 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
75 __m256i dma_addr0_1, dma_addr2_3;
76 __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
77 /* Initialize the mbufs in vector, process 4 mbufs in one loop */
78 for (i = 0; i < IAVF_RXQ_REARM_THRESH;
79 i += 4, rxp += 4, rxdp += 4) {
80 __m128i vaddr0, vaddr1, vaddr2, vaddr3;
81 __m256i vaddr0_1, vaddr2_3;
88 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
89 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
90 offsetof(struct rte_mbuf, buf_addr) + 8);
91 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
92 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
93 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
94 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
97 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
98 * into the high lanes. Similarly for 2 & 3
101 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
104 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
107 /* convert pa to dma_addr hdr/data */
108 dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1);
109 dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3);
111 /* add headroom to pa values */
112 dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room);
113 dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room);
115 /* flush desc with pa dma_addr */
116 _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1);
117 _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3);
122 rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH;
123 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
124 rxq->rxrearm_start = 0;
126 rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH;
128 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
129 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
131 /* Update the tail pointer on the NIC */
132 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
135 #define PKTLEN_SHIFT 10
137 static inline uint16_t
138 _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
139 struct rte_mbuf **rx_pkts,
140 uint16_t nb_pkts, uint8_t *split_packet)
142 #define IAVF_DESCS_PER_LOOP_AVX 8
144 /* const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; */
145 static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
147 [1] = RTE_PTYPE_L2_ETHER,
148 /* [2] - [21] reserved */
149 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
151 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
152 RTE_PTYPE_L4_NONFRAG,
153 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
156 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
158 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
160 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
162 /* All others reserved */
164 const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
165 0, rxq->mbuf_initializer);
166 /* struct iavf_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; */
167 struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail];
168 volatile union iavf_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
169 const int avx_aligned = ((rxq->rx_tail & 1) == 0);
173 /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
174 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
176 /* See if we need to rearm the RX queue - gives the prefetch a bit
179 if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
182 /* Before we start moving massive data around, check to see if
183 * there is actually a packet available
185 if (!(rxdp->wb.qword1.status_error_len &
186 rte_cpu_to_le_32(1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
189 /* constants used in processing loop */
190 const __m256i crc_adjust =
192 (/* first descriptor */
193 0, 0, 0, /* ignore non-length fields */
194 -rxq->crc_len, /* sub crc on data_len */
195 0, /* ignore high-16bits of pkt_len */
196 -rxq->crc_len, /* sub crc on pkt_len */
197 0, 0, /* ignore pkt_type field */
198 /* second descriptor */
199 0, 0, 0, /* ignore non-length fields */
200 -rxq->crc_len, /* sub crc on data_len */
201 0, /* ignore high-16bits of pkt_len */
202 -rxq->crc_len, /* sub crc on pkt_len */
203 0, 0 /* ignore pkt_type field */
206 /* 8 packets DD mask, LSB in each 32-bit value */
207 const __m256i dd_check = _mm256_set1_epi32(1);
209 /* 8 packets EOP mask, second-LSB in each 32-bit value */
210 const __m256i eop_check = _mm256_slli_epi32(dd_check,
211 IAVF_RX_DESC_STATUS_EOF_SHIFT);
213 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
214 const __m256i shuf_msk =
216 (/* first descriptor */
217 7, 6, 5, 4, /* octet 4~7, 32bits rss */
218 3, 2, /* octet 2~3, low 16 bits vlan_macip */
219 15, 14, /* octet 15~14, 16 bits data_len */
220 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
221 15, 14, /* octet 15~14, low 16 bits pkt_len */
222 0xFF, 0xFF, /* pkt_type set as unknown */
223 0xFF, 0xFF, /*pkt_type set as unknown */
224 /* second descriptor */
225 7, 6, 5, 4, /* octet 4~7, 32bits rss */
226 3, 2, /* octet 2~3, low 16 bits vlan_macip */
227 15, 14, /* octet 15~14, 16 bits data_len */
228 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
229 15, 14, /* octet 15~14, low 16 bits pkt_len */
230 0xFF, 0xFF, /* pkt_type set as unknown */
231 0xFF, 0xFF /*pkt_type set as unknown */
234 * compile-time check the above crc and shuffle layout is correct.
235 * NOTE: the first field (lowest address) is given last in set_epi
238 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
239 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
240 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
241 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
242 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
243 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
244 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
245 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
247 /* Status/Error flag masks */
249 * mask everything except RSS, flow director and VLAN flags
250 * bit2 is for VLAN tag, bit11 for flow director indication
251 * bit13:12 for RSS indication. Bits 3-5 of error
252 * field (bits 22-24) are for IP/L4 checksum errors
254 const __m256i flags_mask =
255 _mm256_set1_epi32((1 << 2) | (1 << 11) |
256 (3 << 12) | (7 << 22));
258 * data to be shuffled by result of flag mask. If VLAN bit is set,
259 * (bit 2), then position 4 in this array will be used in the
262 const __m256i vlan_flags_shuf =
263 _mm256_set_epi32(0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
264 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
266 * data to be shuffled by result of flag mask, shifted down 11.
267 * If RSS/FDIR bits are set, shuffle moves appropriate flags in
270 const __m256i rss_flags_shuf =
271 _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
272 PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
273 0, 0, 0, 0, PKT_RX_FDIR, 0,/* end up 128-bits */
274 0, 0, 0, 0, 0, 0, 0, 0,
275 PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
276 0, 0, 0, 0, PKT_RX_FDIR, 0);
279 * data to be shuffled by the result of the flags mask shifted by 22
280 * bits. This gives use the l3_l4 flags.
282 const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
283 /* shift right 1 bit to make sure it not exceed 255 */
284 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
285 PKT_RX_IP_CKSUM_BAD) >> 1,
286 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
287 PKT_RX_L4_CKSUM_BAD) >> 1,
288 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
289 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
290 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
291 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
292 PKT_RX_IP_CKSUM_BAD >> 1,
293 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
294 /* second 128-bits */
295 0, 0, 0, 0, 0, 0, 0, 0,
296 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
297 PKT_RX_IP_CKSUM_BAD) >> 1,
298 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
299 PKT_RX_L4_CKSUM_BAD) >> 1,
300 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
301 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
302 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
303 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
304 PKT_RX_IP_CKSUM_BAD >> 1,
305 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
307 const __m256i cksum_mask =
308 _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
309 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
310 PKT_RX_EIP_CKSUM_BAD);
312 RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
314 uint16_t i, received;
316 for (i = 0, received = 0; i < nb_pkts;
317 i += IAVF_DESCS_PER_LOOP_AVX,
318 rxdp += IAVF_DESCS_PER_LOOP_AVX) {
319 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
320 _mm256_storeu_si256((void *)&rx_pkts[i],
321 _mm256_loadu_si256((void *)&sw_ring[i]));
322 #ifdef RTE_ARCH_X86_64
324 ((void *)&rx_pkts[i + 4],
325 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
328 __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
329 #ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
330 /* for AVX we need alignment otherwise loads are not atomic */
332 /* load in descriptors, 2 at a time, in reverse order */
333 raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
334 rte_compiler_barrier();
335 raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
336 rte_compiler_barrier();
337 raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
338 rte_compiler_barrier();
339 raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
343 const __m128i raw_desc7 =
344 _mm_load_si128((void *)(rxdp + 7));
345 rte_compiler_barrier();
346 const __m128i raw_desc6 =
347 _mm_load_si128((void *)(rxdp + 6));
348 rte_compiler_barrier();
349 const __m128i raw_desc5 =
350 _mm_load_si128((void *)(rxdp + 5));
351 rte_compiler_barrier();
352 const __m128i raw_desc4 =
353 _mm_load_si128((void *)(rxdp + 4));
354 rte_compiler_barrier();
355 const __m128i raw_desc3 =
356 _mm_load_si128((void *)(rxdp + 3));
357 rte_compiler_barrier();
358 const __m128i raw_desc2 =
359 _mm_load_si128((void *)(rxdp + 2));
360 rte_compiler_barrier();
361 const __m128i raw_desc1 =
362 _mm_load_si128((void *)(rxdp + 1));
363 rte_compiler_barrier();
364 const __m128i raw_desc0 =
365 _mm_load_si128((void *)(rxdp + 0));
368 _mm256_inserti128_si256
369 (_mm256_castsi128_si256(raw_desc6),
372 _mm256_inserti128_si256
373 (_mm256_castsi128_si256(raw_desc4),
376 _mm256_inserti128_si256
377 (_mm256_castsi128_si256(raw_desc2),
380 _mm256_inserti128_si256
381 (_mm256_castsi128_si256(raw_desc0),
388 for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
389 rte_mbuf_prefetch_part2(rx_pkts[i + j]);
393 * convert descriptors 4-7 into mbufs, adjusting length and
394 * re-arranging fields. Then write into the mbuf
396 const __m256i len6_7 = _mm256_slli_epi32(raw_desc6_7,
398 const __m256i len4_5 = _mm256_slli_epi32(raw_desc4_5,
400 const __m256i desc6_7 = _mm256_blend_epi16(raw_desc6_7,
402 const __m256i desc4_5 = _mm256_blend_epi16(raw_desc4_5,
404 __m256i mb6_7 = _mm256_shuffle_epi8(desc6_7, shuf_msk);
405 __m256i mb4_5 = _mm256_shuffle_epi8(desc4_5, shuf_msk);
407 mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
408 mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
410 * to get packet types, shift 64-bit values down 30 bits
411 * and so ptype is in lower 8-bits in each
413 const __m256i ptypes6_7 = _mm256_srli_epi64(desc6_7, 30);
414 const __m256i ptypes4_5 = _mm256_srli_epi64(desc4_5, 30);
415 const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24);
416 const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8);
417 const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24);
418 const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8);
420 mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype7], 4);
421 mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype6], 0);
422 mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype5], 4);
423 mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype4], 0);
424 /* merge the status bits into one register */
425 const __m256i status4_7 = _mm256_unpackhi_epi32(desc6_7,
429 * convert descriptors 0-3 into mbufs, adjusting length and
430 * re-arranging fields. Then write into the mbuf
432 const __m256i len2_3 = _mm256_slli_epi32(raw_desc2_3,
434 const __m256i len0_1 = _mm256_slli_epi32(raw_desc0_1,
436 const __m256i desc2_3 = _mm256_blend_epi16(raw_desc2_3,
438 const __m256i desc0_1 = _mm256_blend_epi16(raw_desc0_1,
440 __m256i mb2_3 = _mm256_shuffle_epi8(desc2_3, shuf_msk);
441 __m256i mb0_1 = _mm256_shuffle_epi8(desc0_1, shuf_msk);
443 mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
444 mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
445 /* get the packet types */
446 const __m256i ptypes2_3 = _mm256_srli_epi64(desc2_3, 30);
447 const __m256i ptypes0_1 = _mm256_srli_epi64(desc0_1, 30);
448 const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24);
449 const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8);
450 const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24);
451 const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8);
453 mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype3], 4);
454 mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype2], 0);
455 mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype1], 4);
456 mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype0], 0);
457 /* merge the status bits into one register */
458 const __m256i status0_3 = _mm256_unpackhi_epi32(desc2_3,
462 * take the two sets of status bits and merge to one
463 * After merge, the packets status flags are in the
464 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
466 __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
469 /* now do flag manipulation */
471 /* get only flag/error bits we want */
472 const __m256i flag_bits =
473 _mm256_and_si256(status0_7, flags_mask);
474 /* set vlan and rss flags */
475 const __m256i vlan_flags =
476 _mm256_shuffle_epi8(vlan_flags_shuf, flag_bits);
477 const __m256i rss_flags =
478 _mm256_shuffle_epi8(rss_flags_shuf,
479 _mm256_srli_epi32(flag_bits, 11));
481 * l3_l4_error flags, shuffle, then shift to correct adjustment
482 * of flags in flags_shuf, and finally mask out extra bits
484 __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
485 _mm256_srli_epi32(flag_bits, 22));
486 l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
487 l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
490 const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
491 _mm256_or_si256(rss_flags, vlan_flags));
493 * At this point, we have the 8 sets of flags in the low 16-bits
494 * of each 32-bit value in vlan0.
495 * We want to extract these, and merge them with the mbuf init
496 * data so we can do a single write to the mbuf to set the flags
497 * and all the other initialization fields. Extracting the
498 * appropriate flags means that we have to do a shift and blend
499 * for each mbuf before we do the write. However, we can also
500 * add in the previously computed rx_descriptor fields to
501 * make a single 256-bit write per mbuf
503 /* check the structure matches expectations */
504 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
505 offsetof(struct rte_mbuf, rearm_data) + 8);
506 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
507 RTE_ALIGN(offsetof(struct rte_mbuf,
510 /* build up data and do writes */
511 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
513 rearm6 = _mm256_blend_epi32(mbuf_init,
514 _mm256_slli_si256(mbuf_flags, 8),
516 rearm4 = _mm256_blend_epi32(mbuf_init,
517 _mm256_slli_si256(mbuf_flags, 4),
519 rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
520 rearm0 = _mm256_blend_epi32(mbuf_init,
521 _mm256_srli_si256(mbuf_flags, 4),
523 /* permute to add in the rx_descriptor e.g. rss fields */
524 rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
525 rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
526 rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
527 rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
529 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
531 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
533 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
535 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
538 /* repeat for the odd mbufs */
539 const __m256i odd_flags =
540 _mm256_castsi128_si256
541 (_mm256_extracti128_si256(mbuf_flags, 1));
542 rearm7 = _mm256_blend_epi32(mbuf_init,
543 _mm256_slli_si256(odd_flags, 8),
545 rearm5 = _mm256_blend_epi32(mbuf_init,
546 _mm256_slli_si256(odd_flags, 4),
548 rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
549 rearm1 = _mm256_blend_epi32(mbuf_init,
550 _mm256_srli_si256(odd_flags, 4),
552 /* since odd mbufs are already in hi 128-bits use blend */
553 rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
554 rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
555 rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
556 rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
557 /* again write to mbufs */
558 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
560 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
562 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
564 _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
567 /* extract and record EOP bit */
569 const __m128i eop_mask =
570 _mm_set1_epi16(1 << IAVF_RX_DESC_STATUS_EOF_SHIFT);
571 const __m256i eop_bits256 = _mm256_and_si256(status0_7,
573 /* pack status bits into a single 128-bit register */
574 const __m128i eop_bits =
576 (_mm256_castsi256_si128(eop_bits256),
577 _mm256_extractf128_si256(eop_bits256,
580 * flip bits, and mask out the EOP bit, which is now
581 * a split-packet bit i.e. !EOP, rather than EOP one.
583 __m128i split_bits = _mm_andnot_si128(eop_bits,
586 * eop bits are out of order, so we need to shuffle them
587 * back into order again. In doing so, only use low 8
588 * bits, which acts like another pack instruction
589 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
590 * [Since we use epi8, the 16-bit positions are
591 * multiplied by 2 in the eop_shuffle value.]
593 __m128i eop_shuffle =
594 _mm_set_epi8(/* zero hi 64b */
595 0xFF, 0xFF, 0xFF, 0xFF,
596 0xFF, 0xFF, 0xFF, 0xFF,
597 /* move values to lo 64b */
600 split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
601 *(uint64_t *)split_packet =
602 _mm_cvtsi128_si64(split_bits);
603 split_packet += IAVF_DESCS_PER_LOOP_AVX;
606 /* perform dd_check */
607 status0_7 = _mm256_and_si256(status0_7, dd_check);
608 status0_7 = _mm256_packs_epi32(status0_7,
609 _mm256_setzero_si256());
611 uint64_t burst = __builtin_popcountll
613 (_mm256_extracti128_si256
615 burst += __builtin_popcountll
617 (_mm256_castsi256_si128(status0_7)));
619 if (burst != IAVF_DESCS_PER_LOOP_AVX)
623 /* update tail pointers */
624 rxq->rx_tail += received;
625 rxq->rx_tail &= (rxq->nb_rx_desc - 1);
626 if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
630 rxq->rxrearm_nb += received;
636 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
639 iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
642 return _iavf_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
646 * vPMD receive routine that reassembles single burst of 32 scattered packets
648 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
651 iavf_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
654 struct iavf_rx_queue *rxq = rx_queue;
655 uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
657 /* get some new buffers */
658 uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
663 /* happy day case, full burst + no packets to be joined */
664 const uint64_t *split_fl64 = (uint64_t *)split_flags;
666 if (!rxq->pkt_first_seg &&
667 split_fl64[0] == 0 && split_fl64[1] == 0 &&
668 split_fl64[2] == 0 && split_fl64[3] == 0)
671 /* reassemble any packets that need reassembly*/
674 if (!rxq->pkt_first_seg) {
675 /* find the first split flag, and only reassemble then*/
676 while (i < nb_bufs && !split_flags[i])
680 rxq->pkt_first_seg = rx_pkts[i];
682 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
687 * vPMD receive routine that reassembles scattered packets.
688 * Main receive routine that can handle arbitrary burst sizes
690 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
693 iavf_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
698 while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
699 uint16_t burst = iavf_recv_scattered_burst_vec_avx2(rx_queue,
700 rx_pkts + retval, IAVF_VPMD_RX_MAX_BURST);
703 if (burst < IAVF_VPMD_RX_MAX_BURST)
706 return retval + iavf_recv_scattered_burst_vec_avx2(rx_queue,
707 rx_pkts + retval, nb_pkts);
711 iavf_vtx1(volatile struct iavf_tx_desc *txdp,
712 struct rte_mbuf *pkt, uint64_t flags)
715 (IAVF_TX_DESC_DTYPE_DATA |
716 ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT) |
717 ((uint64_t)pkt->data_len << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT));
719 __m128i descriptor = _mm_set_epi64x(high_qw,
720 pkt->buf_physaddr + pkt->data_off);
721 _mm_store_si128((__m128i *)txdp, descriptor);
725 iavf_vtx(volatile struct iavf_tx_desc *txdp,
726 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
728 const uint64_t hi_qw_tmpl = (IAVF_TX_DESC_DTYPE_DATA |
729 ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT));
731 /* if unaligned on 32-bit boundary, do one to align */
732 if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
733 iavf_vtx1(txdp, *pkt, flags);
734 nb_pkts--, txdp++, pkt++;
737 /* do two at a time while possible, in bursts */
738 for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
741 ((uint64_t)pkt[3]->data_len <<
742 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
745 ((uint64_t)pkt[2]->data_len <<
746 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
749 ((uint64_t)pkt[1]->data_len <<
750 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
753 ((uint64_t)pkt[0]->data_len <<
754 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT);
759 pkt[3]->buf_physaddr + pkt[3]->data_off,
761 pkt[2]->buf_physaddr + pkt[2]->data_off);
765 pkt[1]->buf_physaddr + pkt[1]->data_off,
767 pkt[0]->buf_physaddr + pkt[0]->data_off);
768 _mm256_store_si256((void *)(txdp + 2), desc2_3);
769 _mm256_store_si256((void *)txdp, desc0_1);
772 /* do any last ones */
774 iavf_vtx1(txdp, *pkt, flags);
775 txdp++, pkt++, nb_pkts--;
779 static inline uint16_t
780 iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
783 struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
784 volatile struct iavf_tx_desc *txdp;
785 struct iavf_tx_entry *txep;
786 uint16_t n, nb_commit, tx_id;
787 /* bit2 is reserved and must be set to 1 according to Spec */
788 uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
789 uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
791 /* cross rx_thresh boundary is not allowed */
792 nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
794 if (txq->nb_free < txq->free_thresh)
795 iavf_tx_free_bufs(txq);
797 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
798 if (unlikely(nb_pkts == 0))
801 tx_id = txq->tx_tail;
802 txdp = &txq->tx_ring[tx_id];
803 txep = &txq->sw_ring[tx_id];
805 txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
807 n = (uint16_t)(txq->nb_tx_desc - tx_id);
808 if (nb_commit >= n) {
809 tx_backlog_entry(txep, tx_pkts, n);
811 iavf_vtx(txdp, tx_pkts, n - 1, flags);
815 iavf_vtx1(txdp, *tx_pkts++, rs);
817 nb_commit = (uint16_t)(nb_commit - n);
820 txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
822 /* avoid reach the end of ring */
823 txdp = &txq->tx_ring[tx_id];
824 txep = &txq->sw_ring[tx_id];
827 tx_backlog_entry(txep, tx_pkts, nb_commit);
829 iavf_vtx(txdp, tx_pkts, nb_commit, flags);
831 tx_id = (uint16_t)(tx_id + nb_commit);
832 if (tx_id > txq->next_rs) {
833 txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
834 rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
835 IAVF_TXD_QW1_CMD_SHIFT);
837 (uint16_t)(txq->next_rs + txq->rs_thresh);
840 txq->tx_tail = tx_id;
842 IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
848 iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
852 struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
857 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
858 ret = iavf_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],