1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
6 #include <ethdev_driver.h>
7 #include <rte_malloc.h>
9 #include "ixgbe_ethdev.h"
10 #include "ixgbe_rxtx.h"
11 #include "ixgbe_rxtx_vec_common.h"
13 #include <tmmintrin.h>
15 #ifndef __INTEL_COMPILER
16 #pragma GCC diagnostic ignored "-Wcast-qual"
20 ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
24 volatile union ixgbe_adv_rx_desc *rxdp;
25 struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
26 struct rte_mbuf *mb0, *mb1;
27 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
28 RTE_PKTMBUF_HEADROOM);
29 __m128i dma_addr0, dma_addr1;
31 const __m128i hba_msk = _mm_set_epi64x(0, UINT64_MAX);
33 rxdp = rxq->rx_ring + rxq->rxrearm_start;
35 /* Pull 'n' more MBUFs into the software ring */
36 if (rte_mempool_get_bulk(rxq->mb_pool,
38 RTE_IXGBE_RXQ_REARM_THRESH) < 0) {
39 if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
41 dma_addr0 = _mm_setzero_si128();
42 for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
43 rxep[i].mbuf = &rxq->fake_mbuf;
44 _mm_store_si128((__m128i *)&rxdp[i].read,
48 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
49 RTE_IXGBE_RXQ_REARM_THRESH;
53 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
54 for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
55 __m128i vaddr0, vaddr1;
60 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
61 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
62 offsetof(struct rte_mbuf, buf_addr) + 8);
63 vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
64 vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
66 /* convert pa to dma_addr hdr/data */
67 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
68 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
70 /* add headroom to pa values */
71 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
72 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
74 /* set Header Buffer Address to zero */
75 dma_addr0 = _mm_and_si128(dma_addr0, hba_msk);
76 dma_addr1 = _mm_and_si128(dma_addr1, hba_msk);
78 /* flush desc with pa dma_addr */
79 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
80 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
83 rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
84 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
85 rxq->rxrearm_start = 0;
87 rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
89 rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
90 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
92 /* Update the tail pointer on the NIC */
93 IXGBE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, rx_id);
96 #ifdef RTE_LIB_SECURITY
98 desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts)
100 __m128i sterr, rearm, tmp_e, tmp_p;
101 uint32_t *rearm0 = (uint32_t *)rx_pkts[0]->rearm_data + 2;
102 uint32_t *rearm1 = (uint32_t *)rx_pkts[1]->rearm_data + 2;
103 uint32_t *rearm2 = (uint32_t *)rx_pkts[2]->rearm_data + 2;
104 uint32_t *rearm3 = (uint32_t *)rx_pkts[3]->rearm_data + 2;
105 const __m128i ipsec_sterr_msk =
106 _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP |
107 IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED);
108 const __m128i ipsec_proc_msk =
109 _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP);
110 const __m128i ipsec_err_flag =
111 _mm_set1_epi32(PKT_RX_SEC_OFFLOAD_FAILED |
113 const __m128i ipsec_proc_flag = _mm_set1_epi32(PKT_RX_SEC_OFFLOAD);
115 rearm = _mm_set_epi32(*rearm3, *rearm2, *rearm1, *rearm0);
116 sterr = _mm_set_epi32(_mm_extract_epi32(descs[3], 2),
117 _mm_extract_epi32(descs[2], 2),
118 _mm_extract_epi32(descs[1], 2),
119 _mm_extract_epi32(descs[0], 2));
120 sterr = _mm_and_si128(sterr, ipsec_sterr_msk);
121 tmp_e = _mm_cmpeq_epi32(sterr, ipsec_sterr_msk);
122 tmp_p = _mm_cmpeq_epi32(sterr, ipsec_proc_msk);
123 sterr = _mm_or_si128(_mm_and_si128(tmp_e, ipsec_err_flag),
124 _mm_and_si128(tmp_p, ipsec_proc_flag));
125 rearm = _mm_or_si128(rearm, sterr);
126 *rearm0 = _mm_extract_epi32(rearm, 0);
127 *rearm1 = _mm_extract_epi32(rearm, 1);
128 *rearm2 = _mm_extract_epi32(rearm, 2);
129 *rearm3 = _mm_extract_epi32(rearm, 3);
134 desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
135 uint16_t udp_p_flag, struct rte_mbuf **rx_pkts)
137 __m128i ptype0, ptype1, vtag0, vtag1, csum, udp_csum_skip;
138 __m128i rearm0, rearm1, rearm2, rearm3;
140 /* mask everything except rss type */
141 const __m128i rsstype_msk = _mm_set_epi16(
142 0x0000, 0x0000, 0x0000, 0x0000,
143 0x000F, 0x000F, 0x000F, 0x000F);
145 /* mask the lower byte of ol_flags */
146 const __m128i ol_flags_msk = _mm_set_epi16(
147 0x0000, 0x0000, 0x0000, 0x0000,
148 0x00FF, 0x00FF, 0x00FF, 0x00FF);
150 /* map rss type to rss hash flag */
151 const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0,
152 0, 0, 0, PKT_RX_RSS_HASH,
153 PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
154 PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
156 /* mask everything except vlan present and l4/ip csum error */
157 const __m128i vlan_csum_msk = _mm_set_epi16(
158 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
159 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
160 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
161 (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
162 IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
163 IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP);
165 /* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */
166 const __m128i vlan_csum_map_lo = _mm_set_epi8(
168 vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
169 vlan_flags | PKT_RX_IP_CKSUM_BAD,
170 vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
171 vlan_flags | PKT_RX_IP_CKSUM_GOOD,
173 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
175 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
176 PKT_RX_IP_CKSUM_GOOD);
178 const __m128i vlan_csum_map_hi = _mm_set_epi8(
180 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
181 PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t),
183 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
184 PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
186 /* mask everything except UDP header present if specified */
187 const __m128i udp_hdr_p_msk = _mm_set_epi16
189 udp_p_flag, udp_p_flag, udp_p_flag, udp_p_flag);
191 const __m128i udp_csum_bad_shuf = _mm_set_epi8
192 (0, 0, 0, 0, 0, 0, 0, 0,
193 0, 0, 0, 0, 0, 0, ~(uint8_t)PKT_RX_L4_CKSUM_BAD, 0xFF);
195 ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
196 ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
197 vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
198 vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
200 ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
201 /* save the UDP header present information */
202 udp_csum_skip = _mm_and_si128(ptype0, udp_hdr_p_msk);
203 ptype0 = _mm_and_si128(ptype0, rsstype_msk);
204 ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
206 vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
207 vtag1 = _mm_and_si128(vtag1, vlan_csum_msk);
209 /* csum bits are in the most significant, to use shuffle we need to
210 * shift them. Change mask to 0xc000 to 0x0003.
212 csum = _mm_srli_epi16(vtag1, 14);
214 /* now or the most significant 64 bits containing the checksum
215 * flags with the vlan present flags.
217 csum = _mm_srli_si128(csum, 8);
218 vtag1 = _mm_or_si128(csum, vtag1);
220 /* convert VP, IPE, L4E to ol_flags */
221 vtag0 = _mm_shuffle_epi8(vlan_csum_map_hi, vtag1);
222 vtag0 = _mm_slli_epi16(vtag0, sizeof(uint8_t));
224 vtag1 = _mm_shuffle_epi8(vlan_csum_map_lo, vtag1);
225 vtag1 = _mm_and_si128(vtag1, ol_flags_msk);
226 vtag1 = _mm_or_si128(vtag0, vtag1);
228 vtag1 = _mm_or_si128(ptype0, vtag1);
230 /* convert the UDP header present 0x200 to 0x1 for aligning with each
231 * PKT_RX_L4_CKSUM_BAD value in low byte of 16 bits word ol_flag in
232 * vtag1 (4x16). Then mask out the bad checksum value by shuffle and
235 udp_csum_skip = _mm_srli_epi16(udp_csum_skip, 9);
236 udp_csum_skip = _mm_shuffle_epi8(udp_csum_bad_shuf, udp_csum_skip);
237 vtag1 = _mm_and_si128(vtag1, udp_csum_skip);
240 * At this point, we have the 4 sets of flags in the low 64-bits
242 * We want to extract these, and merge them with the mbuf init data
243 * so we can do a single 16-byte write to the mbuf to set the flags
244 * and all the other initialization fields. Extracting the
245 * appropriate flags means that we have to do a shift and blend for
246 * each mbuf before we do the write.
248 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10);
249 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10);
250 rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
251 rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
253 /* write the rearm data and the olflags in one write */
254 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
255 offsetof(struct rte_mbuf, rearm_data) + 8);
256 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
257 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
258 _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
259 _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
260 _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
261 _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
264 static inline uint32_t get_packet_type(int index,
267 uint32_t tunnel_check)
269 if (etqf_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP)))
270 return RTE_PTYPE_UNKNOWN;
272 if (tunnel_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP))) {
273 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
274 return ptype_table_tn[pkt_info];
277 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
278 return ptype_table[pkt_info];
282 desc_to_ptype_v(__m128i descs[4], uint16_t pkt_type_mask,
283 struct rte_mbuf **rx_pkts)
285 __m128i etqf_mask = _mm_set_epi64x(0x800000008000LL, 0x800000008000LL);
286 __m128i ptype_mask = _mm_set_epi32(
287 pkt_type_mask, pkt_type_mask, pkt_type_mask, pkt_type_mask);
288 __m128i tunnel_mask =
289 _mm_set_epi64x(0x100000001000LL, 0x100000001000LL);
291 uint32_t etqf_check, tunnel_check, pkt_info;
293 __m128i ptype0 = _mm_unpacklo_epi32(descs[0], descs[2]);
294 __m128i ptype1 = _mm_unpacklo_epi32(descs[1], descs[3]);
296 /* interleave low 32 bits,
297 * now we have 4 ptypes in a XMM register
299 ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
301 /* create a etqf bitmask based on the etqf bit. */
302 etqf_check = _mm_movemask_epi8(_mm_and_si128(ptype0, etqf_mask));
304 /* shift left by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */
305 ptype0 = _mm_and_si128(_mm_srli_epi32(ptype0, IXGBE_PACKET_TYPE_SHIFT),
308 /* create a tunnel bitmask based on the tunnel bit */
309 tunnel_check = _mm_movemask_epi8(
310 _mm_slli_epi32(_mm_and_si128(ptype0, tunnel_mask), 0x3));
312 pkt_info = _mm_extract_epi32(ptype0, 0);
313 rx_pkts[0]->packet_type =
314 get_packet_type(0, pkt_info, etqf_check, tunnel_check);
315 pkt_info = _mm_extract_epi32(ptype0, 1);
316 rx_pkts[1]->packet_type =
317 get_packet_type(1, pkt_info, etqf_check, tunnel_check);
318 pkt_info = _mm_extract_epi32(ptype0, 2);
319 rx_pkts[2]->packet_type =
320 get_packet_type(2, pkt_info, etqf_check, tunnel_check);
321 pkt_info = _mm_extract_epi32(ptype0, 3);
322 rx_pkts[3]->packet_type =
323 get_packet_type(3, pkt_info, etqf_check, tunnel_check);
327 * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
330 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
331 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
333 static inline uint16_t
334 _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
335 uint16_t nb_pkts, uint8_t *split_packet)
337 volatile union ixgbe_adv_rx_desc *rxdp;
338 struct ixgbe_rx_entry *sw_ring;
339 uint16_t nb_pkts_recd;
340 #ifdef RTE_LIB_SECURITY
341 uint8_t use_ipsec = rxq->using_ipsec;
346 __m128i crc_adjust = _mm_set_epi16(
347 0, 0, 0, /* ignore non-length fields */
348 -rxq->crc_len, /* sub crc on data_len */
349 0, /* ignore high-16bits of pkt_len */
350 -rxq->crc_len, /* sub crc on pkt_len */
351 0, 0 /* ignore pkt_type field */
354 * compile-time check the above crc_adjust layout is correct.
355 * NOTE: the first field (lowest address) is given last in set_epi16
358 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
359 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
360 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
361 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
362 __m128i dd_check, eop_check;
365 uint16_t udp_p_flag = 0; /* Rx Descriptor UDP header present */
367 /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
368 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
370 /* Just the act of getting into the function from the application is
371 * going to cost about 7 cycles
373 rxdp = rxq->rx_ring + rxq->rx_tail;
377 /* See if we need to rearm the RX queue - gives the prefetch a bit
380 if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
381 ixgbe_rxq_rearm(rxq);
383 /* Before we start moving massive data around, check to see if
384 * there is actually a packet available
386 if (!(rxdp->wb.upper.status_error &
387 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
390 if (rxq->rx_udp_csum_zero_err)
391 udp_p_flag = IXGBE_RXDADV_PKTTYPE_UDP;
393 /* 4 packets DD mask */
394 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
396 /* 4 packets EOP mask */
397 eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
399 /* mask to shuffle from desc. to mbuf */
400 shuf_msk = _mm_set_epi8(
401 7, 6, 5, 4, /* octet 4~7, 32bits rss */
402 15, 14, /* octet 14~15, low 16 bits vlan_macip */
403 13, 12, /* octet 12~13, 16 bits data_len */
404 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
405 13, 12, /* octet 12~13, low 16 bits pkt_len */
406 0xFF, 0xFF, /* skip 32 bit pkt_type */
410 * Compile-time verify the shuffle mask
411 * NOTE: some field positions already verified above, but duplicated
412 * here for completeness in case of future modifications.
414 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
415 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
416 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
417 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
418 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
419 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
420 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
421 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
423 mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
425 /* Cache is empty -> need to scan the buffer rings, but first move
426 * the next 'n' mbufs into the cache
428 sw_ring = &rxq->sw_ring[rxq->rx_tail];
430 /* ensure these 2 flags are in the lower 8 bits */
431 RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
432 vlan_flags = rxq->vlan_flags & UINT8_MAX;
434 /* A. load 4 packet in one loop
435 * [A*. mask out 4 unused dirty field in desc]
436 * B. copy 4 mbuf point from swring to rx_pkts
437 * C. calc the number of DD bits among the 4 packets
438 * [C*. extract the end-of-packet bit, if requested]
439 * D. fill info. from desc to mbuf
441 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
442 pos += RTE_IXGBE_DESCS_PER_LOOP,
443 rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
444 __m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
445 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
446 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
447 /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
449 #if defined(RTE_ARCH_X86_64)
453 /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
454 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
456 /* Read desc statuses backwards to avoid race condition */
457 /* A.1 load desc[3] */
458 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
459 rte_compiler_barrier();
461 /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
462 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
464 #if defined(RTE_ARCH_X86_64)
465 /* B.1 load 2 64 bit mbuf points */
466 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
469 /* A.1 load desc[2-0] */
470 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
471 rte_compiler_barrier();
472 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
473 rte_compiler_barrier();
474 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
476 #if defined(RTE_ARCH_X86_64)
477 /* B.2 copy 2 mbuf point into rx_pkts */
478 _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
482 rte_mbuf_prefetch_part2(rx_pkts[pos]);
483 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
484 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
485 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
488 /* avoid compiler reorder optimization */
489 rte_compiler_barrier();
491 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
492 pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
493 pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
495 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
496 pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
497 pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
499 /* C.1 4=>2 filter staterr info only */
500 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
501 /* C.1 4=>2 filter staterr info only */
502 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
504 /* set ol_flags with vlan packet type */
505 desc_to_olflags_v(descs, mbuf_init, vlan_flags, udp_p_flag,
508 #ifdef RTE_LIB_SECURITY
509 if (unlikely(use_ipsec))
510 desc_to_olflags_v_ipsec(descs, &rx_pkts[pos]);
513 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
514 pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
515 pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
517 /* C.2 get 4 pkts staterr value */
518 zero = _mm_xor_si128(dd_check, dd_check);
519 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
521 /* D.3 copy final 3,4 data to rx_pkts */
522 _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
524 _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
527 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
528 pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
529 pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
531 /* C* extract and record EOP bit */
533 __m128i eop_shuf_mask = _mm_set_epi8(
534 0xFF, 0xFF, 0xFF, 0xFF,
535 0xFF, 0xFF, 0xFF, 0xFF,
536 0xFF, 0xFF, 0xFF, 0xFF,
537 0x04, 0x0C, 0x00, 0x08
540 /* and with mask to extract bits, flipping 1-0 */
541 __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
542 /* the staterr values are not in order, as the count
543 * of dd bits doesn't care. However, for end of
544 * packet tracking, we do care, so shuffle. This also
545 * compresses the 32-bit values to 8-bit
547 eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
548 /* store the resulting 32-bit value */
549 *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
550 split_packet += RTE_IXGBE_DESCS_PER_LOOP;
553 /* C.3 calc available number of desc */
554 staterr = _mm_and_si128(staterr, dd_check);
555 staterr = _mm_packs_epi32(staterr, zero);
557 /* D.3 copy final 1,2 data to rx_pkts */
558 _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
560 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
563 desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
565 /* C.4 calc avaialbe number of desc */
566 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
568 if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
572 /* Update our internal tail pointer */
573 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
574 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
575 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
581 * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
584 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
585 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
588 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
591 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
595 * vPMD receive routine that reassembles scattered packets
598 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
599 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
602 ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
605 struct ixgbe_rx_queue *rxq = rx_queue;
606 uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
608 /* get some new buffers */
609 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
614 /* happy day case, full burst + no packets to be joined */
615 const uint64_t *split_fl64 = (uint64_t *)split_flags;
616 if (rxq->pkt_first_seg == NULL &&
617 split_fl64[0] == 0 && split_fl64[1] == 0 &&
618 split_fl64[2] == 0 && split_fl64[3] == 0)
621 /* reassemble any packets that need reassembly*/
623 if (rxq->pkt_first_seg == NULL) {
624 /* find the first split flag, and only reassemble then*/
625 while (i < nb_bufs && !split_flags[i])
629 rxq->pkt_first_seg = rx_pkts[i];
631 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
636 * vPMD receive routine that reassembles scattered packets.
639 ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
644 while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) {
647 burst = ixgbe_recv_scattered_burst_vec(rx_queue,
649 RTE_IXGBE_MAX_RX_BURST);
652 if (burst < RTE_IXGBE_MAX_RX_BURST)
656 return retval + ixgbe_recv_scattered_burst_vec(rx_queue,
662 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
663 struct rte_mbuf *pkt, uint64_t flags)
665 __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
666 flags | pkt->data_len,
667 pkt->buf_iova + pkt->data_off);
668 _mm_store_si128((__m128i *)&txdp->read, descriptor);
672 vtx(volatile union ixgbe_adv_tx_desc *txdp,
673 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
677 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
678 vtx1(txdp, *pkt, flags);
682 ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
685 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
686 volatile union ixgbe_adv_tx_desc *txdp;
687 struct ixgbe_tx_entry_v *txep;
688 uint16_t n, nb_commit, tx_id;
689 uint64_t flags = DCMD_DTYP_FLAGS;
690 uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
693 /* cross rx_thresh boundary is not allowed */
694 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
696 if (txq->nb_tx_free < txq->tx_free_thresh)
697 ixgbe_tx_free_bufs(txq);
699 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
700 if (unlikely(nb_pkts == 0))
703 tx_id = txq->tx_tail;
704 txdp = &txq->tx_ring[tx_id];
705 txep = &txq->sw_ring_v[tx_id];
707 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
709 n = (uint16_t)(txq->nb_tx_desc - tx_id);
710 if (nb_commit >= n) {
712 tx_backlog_entry(txep, tx_pkts, n);
714 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
715 vtx1(txdp, *tx_pkts, flags);
717 vtx1(txdp, *tx_pkts++, rs);
719 nb_commit = (uint16_t)(nb_commit - n);
722 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
724 /* avoid reach the end of ring */
725 txdp = &(txq->tx_ring[tx_id]);
726 txep = &txq->sw_ring_v[tx_id];
729 tx_backlog_entry(txep, tx_pkts, nb_commit);
731 vtx(txdp, tx_pkts, nb_commit, flags);
733 tx_id = (uint16_t)(tx_id + nb_commit);
734 if (tx_id > txq->tx_next_rs) {
735 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
736 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
737 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
741 txq->tx_tail = tx_id;
743 IXGBE_PCI_REG_WC_WRITE(txq->tdt_reg_addr, txq->tx_tail);
748 static void __rte_cold
749 ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
751 _ixgbe_tx_queue_release_mbufs_vec(txq);
755 ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
757 _ixgbe_rx_queue_release_mbufs_vec(rxq);
760 static void __rte_cold
761 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
763 _ixgbe_tx_free_swring_vec(txq);
766 static void __rte_cold
767 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
769 _ixgbe_reset_tx_queue_vec(txq);
772 static const struct ixgbe_txq_ops vec_txq_ops = {
773 .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
774 .free_swring = ixgbe_tx_free_swring,
775 .reset = ixgbe_reset_tx_queue,
779 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
781 return ixgbe_rxq_vec_setup_default(rxq);
785 ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
787 return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
791 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
793 return ixgbe_rx_vec_dev_conf_condition_check_default(dev);