1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
6 #include <ethdev_driver.h>
7 #include <rte_malloc.h>
10 #include "iavf_rxtx.h"
11 #include "iavf_rxtx_vec_common.h"
13 #include <tmmintrin.h>
15 #ifndef __INTEL_COMPILER
16 #pragma GCC diagnostic ignored "-Wcast-qual"
20 iavf_rxq_rearm(struct iavf_rx_queue *rxq)
25 volatile union iavf_rx_desc *rxdp;
26 struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start];
27 struct rte_mbuf *mb0, *mb1;
28 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
29 RTE_PKTMBUF_HEADROOM);
30 __m128i dma_addr0, dma_addr1;
32 rxdp = rxq->rx_ring + rxq->rxrearm_start;
34 /* Pull 'n' more MBUFs into the software ring */
35 if (rte_mempool_get_bulk(rxq->mp, (void *)rxp,
36 rxq->rx_free_thresh) < 0) {
37 if (rxq->rxrearm_nb + rxq->rx_free_thresh >= rxq->nb_rx_desc) {
38 dma_addr0 = _mm_setzero_si128();
39 for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) {
40 rxp[i] = &rxq->fake_mbuf;
41 _mm_store_si128((__m128i *)&rxdp[i].read,
45 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
50 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
51 for (i = 0; i < rxq->rx_free_thresh; i += 2, rxp += 2) {
52 __m128i vaddr0, vaddr1;
57 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
58 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
59 offsetof(struct rte_mbuf, buf_addr) + 8);
60 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
61 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
63 /* convert pa to dma_addr hdr/data */
64 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
65 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
67 /* add headroom to pa values */
68 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
69 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
71 /* flush desc with pa dma_addr */
72 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
73 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
76 rxq->rxrearm_start += rxq->rx_free_thresh;
77 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
78 rxq->rxrearm_start = 0;
80 rxq->rxrearm_nb -= rxq->rx_free_thresh;
82 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
83 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
85 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
86 "rearm_start=%u rearm_nb=%u",
87 rxq->port_id, rxq->queue_id,
88 rx_id, rxq->rxrearm_start, rxq->rxrearm_nb);
90 /* Update the tail pointer on the NIC */
91 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
95 desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
96 struct rte_mbuf **rx_pkts)
98 const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
99 __m128i rearm0, rearm1, rearm2, rearm3;
101 __m128i vlan0, vlan1, rss, l3_l4e;
103 /* mask everything except RSS, flow director and VLAN flags
104 * bit2 is for VLAN tag, bit11 for flow director indication
105 * bit13:12 for RSS indication.
107 const __m128i rss_vlan_msk = _mm_set_epi32(
108 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
110 const __m128i cksum_mask = _mm_set_epi32(
111 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
112 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
113 PKT_RX_OUTER_IP_CKSUM_BAD,
114 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
115 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
116 PKT_RX_OUTER_IP_CKSUM_BAD,
117 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
118 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
119 PKT_RX_OUTER_IP_CKSUM_BAD,
120 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
121 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
122 PKT_RX_OUTER_IP_CKSUM_BAD);
124 /* map rss and vlan type to rss hash and vlan flag */
125 const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
127 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
130 const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
132 PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
133 0, 0, PKT_RX_FDIR, 0);
135 const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
136 /* shift right 1 bit to make sure it not exceed 255 */
137 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
138 PKT_RX_IP_CKSUM_BAD) >> 1,
139 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
140 PKT_RX_L4_CKSUM_BAD) >> 1,
141 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
142 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
143 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
144 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
145 PKT_RX_IP_CKSUM_BAD >> 1,
146 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
148 vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
149 vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
150 vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
152 vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
153 vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
155 rss = _mm_srli_epi32(vlan1, 11);
156 rss = _mm_shuffle_epi8(rss_flags, rss);
158 l3_l4e = _mm_srli_epi32(vlan1, 22);
159 l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
160 /* then we shift left 1 bit */
161 l3_l4e = _mm_slli_epi32(l3_l4e, 1);
162 /* we need to mask out the reduntant bits */
163 l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
165 vlan0 = _mm_or_si128(vlan0, rss);
166 vlan0 = _mm_or_si128(vlan0, l3_l4e);
168 /* At this point, we have the 4 sets of flags in the low 16-bits
169 * of each 32-bit value in vlan0.
170 * We want to extract these, and merge them with the mbuf init data
171 * so we can do a single 16-byte write to the mbuf to set the flags
172 * and all the other initialization fields. Extracting the
173 * appropriate flags means that we have to do a shift and blend for
174 * each mbuf before we do the write.
176 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
177 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
178 rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
179 rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
181 /* write the rearm data and the olflags in one write */
182 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
183 offsetof(struct rte_mbuf, rearm_data) + 8);
184 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
185 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
186 _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
187 _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
188 _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
189 _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
192 static inline __m128i
193 flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
195 #define FDID_MIS_MAGIC 0xFFFFFFFF
196 RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
197 RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
198 const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR |
200 /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
201 const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
202 __m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
204 /* this XOR op results to bit-reverse the fdir_mask */
205 fdir_mask = _mm_xor_si128(fdir_mask, fdir_mis_mask);
206 const __m128i fdir_flags = _mm_and_si128(fdir_mask, pkt_fdir_bit);
212 flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
213 struct rte_mbuf **rx_pkts)
215 const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
216 __m128i rearm0, rearm1, rearm2, rearm3;
218 __m128i tmp_desc, flags, rss_vlan;
220 /* mask everything except checksum, RSS and VLAN flags.
221 * bit6:4 for checksum.
222 * bit12 for RSS indication.
223 * bit13 for VLAN indication.
225 const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070,
228 const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK |
229 PKT_RX_L4_CKSUM_MASK |
230 PKT_RX_OUTER_IP_CKSUM_BAD,
231 PKT_RX_IP_CKSUM_MASK |
232 PKT_RX_L4_CKSUM_MASK |
233 PKT_RX_OUTER_IP_CKSUM_BAD,
234 PKT_RX_IP_CKSUM_MASK |
235 PKT_RX_L4_CKSUM_MASK |
236 PKT_RX_OUTER_IP_CKSUM_BAD,
237 PKT_RX_IP_CKSUM_MASK |
238 PKT_RX_L4_CKSUM_MASK |
239 PKT_RX_OUTER_IP_CKSUM_BAD);
241 /* map the checksum, rss and vlan fields to the checksum, rss
244 const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
245 /* shift right 1 bit to make sure it not exceed 255 */
246 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
247 PKT_RX_IP_CKSUM_BAD) >> 1,
248 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
249 PKT_RX_IP_CKSUM_GOOD) >> 1,
250 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
251 PKT_RX_IP_CKSUM_BAD) >> 1,
252 (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
253 PKT_RX_IP_CKSUM_GOOD) >> 1,
254 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
255 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
256 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
257 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
259 const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0,
262 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
263 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
266 /* merge 4 descriptors */
267 flags = _mm_unpackhi_epi32(descs[0], descs[1]);
268 tmp_desc = _mm_unpackhi_epi32(descs[2], descs[3]);
269 tmp_desc = _mm_unpacklo_epi64(flags, tmp_desc);
270 tmp_desc = _mm_and_si128(tmp_desc, desc_mask);
273 tmp_desc = _mm_srli_epi32(tmp_desc, 4);
274 flags = _mm_shuffle_epi8(cksum_flags, tmp_desc);
275 /* then we shift left 1 bit */
276 flags = _mm_slli_epi32(flags, 1);
277 /* we need to mask out the redundant bits introduced by RSS or
280 flags = _mm_and_si128(flags, cksum_mask);
283 tmp_desc = _mm_srli_epi32(tmp_desc, 8);
284 rss_vlan = _mm_shuffle_epi8(rss_vlan_flags, tmp_desc);
286 /* merge the flags */
287 flags = _mm_or_si128(flags, rss_vlan);
289 if (rxq->fdir_enabled) {
290 const __m128i fdir_id0_1 =
291 _mm_unpackhi_epi32(descs[0], descs[1]);
293 const __m128i fdir_id2_3 =
294 _mm_unpackhi_epi32(descs[2], descs[3]);
296 const __m128i fdir_id0_3 =
297 _mm_unpackhi_epi64(fdir_id0_1, fdir_id2_3);
299 const __m128i fdir_flags =
300 flex_rxd_to_fdir_flags_vec(fdir_id0_3);
302 /* merge with fdir_flags */
303 flags = _mm_or_si128(flags, fdir_flags);
305 /* write fdir_id to mbuf */
306 rx_pkts[0]->hash.fdir.hi =
307 _mm_extract_epi32(fdir_id0_3, 0);
309 rx_pkts[1]->hash.fdir.hi =
310 _mm_extract_epi32(fdir_id0_3, 1);
312 rx_pkts[2]->hash.fdir.hi =
313 _mm_extract_epi32(fdir_id0_3, 2);
315 rx_pkts[3]->hash.fdir.hi =
316 _mm_extract_epi32(fdir_id0_3, 3);
317 } /* if() on fdir_enabled */
320 * At this point, we have the 4 sets of flags in the low 16-bits
321 * of each 32-bit value in flags.
322 * We want to extract these, and merge them with the mbuf init data
323 * so we can do a single 16-byte write to the mbuf to set the flags
324 * and all the other initialization fields. Extracting the
325 * appropriate flags means that we have to do a shift and blend for
326 * each mbuf before we do the write.
328 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x10);
329 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x10);
330 rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x10);
331 rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x10);
333 /* write the rearm data and the olflags in one write */
334 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
335 offsetof(struct rte_mbuf, rearm_data) + 8);
336 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
337 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
338 _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
339 _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
340 _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
341 _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
344 #define PKTLEN_SHIFT 10
347 desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
348 const uint32_t *type_table)
350 __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
351 __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
353 ptype0 = _mm_srli_epi64(ptype0, 30);
354 ptype1 = _mm_srli_epi64(ptype1, 30);
356 rx_pkts[0]->packet_type = type_table[_mm_extract_epi8(ptype0, 0)];
357 rx_pkts[1]->packet_type = type_table[_mm_extract_epi8(ptype0, 8)];
358 rx_pkts[2]->packet_type = type_table[_mm_extract_epi8(ptype1, 0)];
359 rx_pkts[3]->packet_type = type_table[_mm_extract_epi8(ptype1, 8)];
363 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
364 const uint32_t *type_table)
366 const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
367 0, IAVF_RX_FLEX_DESC_PTYPE_M,
368 0, IAVF_RX_FLEX_DESC_PTYPE_M,
369 0, IAVF_RX_FLEX_DESC_PTYPE_M);
370 __m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
371 __m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
372 __m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
374 ptype_all = _mm_and_si128(ptype_all, ptype_mask);
376 rx_pkts[0]->packet_type = type_table[_mm_extract_epi16(ptype_all, 1)];
377 rx_pkts[1]->packet_type = type_table[_mm_extract_epi16(ptype_all, 3)];
378 rx_pkts[2]->packet_type = type_table[_mm_extract_epi16(ptype_all, 5)];
379 rx_pkts[3]->packet_type = type_table[_mm_extract_epi16(ptype_all, 7)];
383 * vPMD raw receive routine, only accept(nb_pkts >= IAVF_VPMD_DESCS_PER_LOOP)
386 * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
387 * - floor align nb_pkts to a IAVF_VPMD_DESCS_PER_LOOP power-of-two
389 static inline uint16_t
390 _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
391 uint16_t nb_pkts, uint8_t *split_packet)
393 volatile union iavf_rx_desc *rxdp;
394 struct rte_mbuf **sw_ring;
395 uint16_t nb_pkts_recd;
399 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
401 __m128i crc_adjust = _mm_set_epi16(
402 0, 0, 0, /* ignore non-length fields */
403 -rxq->crc_len, /* sub crc on data_len */
404 0, /* ignore high-16bits of pkt_len */
405 -rxq->crc_len, /* sub crc on pkt_len */
406 0, 0 /* ignore pkt_type field */
408 /* compile-time check the above crc_adjust layout is correct.
409 * NOTE: the first field (lowest address) is given last in set_epi16
412 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
413 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
414 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
415 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
416 __m128i dd_check, eop_check;
418 /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */
419 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP);
421 /* Just the act of getting into the function from the application is
422 * going to cost about 7 cycles
424 rxdp = rxq->rx_ring + rxq->rx_tail;
428 /* See if we need to rearm the RX queue - gives the prefetch a bit
431 if (rxq->rxrearm_nb > rxq->rx_free_thresh)
434 /* Before we start moving massive data around, check to see if
435 * there is actually a packet available
437 if (!(rxdp->wb.qword1.status_error_len &
438 rte_cpu_to_le_32(1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
441 /* 4 packets DD mask */
442 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
444 /* 4 packets EOP mask */
445 eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
447 /* mask to shuffle from desc. to mbuf */
448 shuf_msk = _mm_set_epi8(
449 7, 6, 5, 4, /* octet 4~7, 32bits rss */
450 3, 2, /* octet 2~3, low 16 bits vlan_macip */
451 15, 14, /* octet 15~14, 16 bits data_len */
452 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
453 15, 14, /* octet 15~14, low 16 bits pkt_len */
454 0xFF, 0xFF, 0xFF, 0xFF /* pkt_type set as unknown */
456 /* Compile-time verify the shuffle mask
457 * NOTE: some field positions already verified above, but duplicated
458 * here for completeness in case of future modifications.
460 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
461 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
462 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
463 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
464 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
465 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
466 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
467 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
469 /* Cache is empty -> need to scan the buffer rings, but first move
470 * the next 'n' mbufs into the cache
472 sw_ring = &rxq->sw_ring[rxq->rx_tail];
474 /* A. load 4 packet in one loop
475 * [A*. mask out 4 unused dirty field in desc]
476 * B. copy 4 mbuf point from swring to rx_pkts
477 * C. calc the number of DD bits among the 4 packets
478 * [C*. extract the end-of-packet bit, if requested]
479 * D. fill info. from desc to mbuf
482 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
483 pos += IAVF_VPMD_DESCS_PER_LOOP,
484 rxdp += IAVF_VPMD_DESCS_PER_LOOP) {
485 __m128i descs[IAVF_VPMD_DESCS_PER_LOOP];
486 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
487 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
488 /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
490 #if defined(RTE_ARCH_X86_64)
494 /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
495 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
496 /* Read desc statuses backwards to avoid race condition */
497 /* A.1 load desc[3] */
498 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
499 rte_compiler_barrier();
501 /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
502 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
504 #if defined(RTE_ARCH_X86_64)
505 /* B.1 load 2 64 bit mbuf points */
506 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
509 /* A.1 load desc[2-0] */
510 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
511 rte_compiler_barrier();
512 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
513 rte_compiler_barrier();
514 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
516 #if defined(RTE_ARCH_X86_64)
517 /* B.2 copy 2 mbuf point into rx_pkts */
518 _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2);
522 rte_mbuf_prefetch_part2(rx_pkts[pos]);
523 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
524 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
525 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
528 /* avoid compiler reorder optimization */
529 rte_compiler_barrier();
531 /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
532 const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT);
533 const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT);
535 /* merge the now-aligned packet length fields back in */
536 descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
537 descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
539 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
540 pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
541 pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
543 /* C.1 4=>2 status err info only */
544 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
545 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
547 desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
549 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
550 pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
551 pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
553 /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
554 const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT);
555 const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT);
557 /* merge the now-aligned packet length fields back in */
558 descs[1] = _mm_blend_epi16(descs[1], len1, 0x80);
559 descs[0] = _mm_blend_epi16(descs[0], len0, 0x80);
561 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
562 pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
563 pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
565 /* C.2 get 4 pkts status err value */
566 zero = _mm_xor_si128(dd_check, dd_check);
567 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
569 /* D.3 copy final 3,4 data to rx_pkts */
571 (void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
574 (void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
577 /* D.2 pkt 1,2 remove crc */
578 pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
579 pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
581 /* C* extract and record EOP bit */
583 __m128i eop_shuf_mask = _mm_set_epi8(
584 0xFF, 0xFF, 0xFF, 0xFF,
585 0xFF, 0xFF, 0xFF, 0xFF,
586 0xFF, 0xFF, 0xFF, 0xFF,
587 0x04, 0x0C, 0x00, 0x08
590 /* and with mask to extract bits, flipping 1-0 */
591 __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
592 /* the staterr values are not in order, as the count
593 * of dd bits doesn't care. However, for end of
594 * packet tracking, we do care, so shuffle. This also
595 * compresses the 32-bit values to 8-bit
597 eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
598 /* store the resulting 32-bit value */
599 *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
600 split_packet += IAVF_VPMD_DESCS_PER_LOOP;
603 /* C.3 calc available number of desc */
604 staterr = _mm_and_si128(staterr, dd_check);
605 staterr = _mm_packs_epi32(staterr, zero);
607 /* D.3 copy final 1,2 data to rx_pkts */
609 (void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
611 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
613 desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
614 /* C.4 calc avaialbe number of desc */
615 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
617 if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
621 /* Update our internal tail pointer */
622 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
623 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
624 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
630 * vPMD raw receive routine for flex RxD,
631 * only accept(nb_pkts >= IAVF_VPMD_DESCS_PER_LOOP)
634 * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
635 * - floor align nb_pkts to a IAVF_VPMD_DESCS_PER_LOOP power-of-two
637 static inline uint16_t
638 _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
639 struct rte_mbuf **rx_pkts,
640 uint16_t nb_pkts, uint8_t *split_packet)
642 volatile union iavf_rx_flex_desc *rxdp;
643 struct rte_mbuf **sw_ring;
644 uint16_t nb_pkts_recd;
647 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
648 __m128i crc_adjust = _mm_set_epi16
649 (0, 0, 0, /* ignore non-length fields */
650 -rxq->crc_len, /* sub crc on data_len */
651 0, /* ignore high-16bits of pkt_len */
652 -rxq->crc_len, /* sub crc on pkt_len */
653 0, 0 /* ignore pkt_type field */
655 const __m128i zero = _mm_setzero_si128();
656 /* mask to shuffle from desc. to mbuf */
657 const __m128i shuf_msk = _mm_set_epi8
659 0xFF, 0xFF, /* rss hash parsed separately */
660 11, 10, /* octet 10~11, 16 bits vlan_macip */
661 5, 4, /* octet 4~5, 16 bits data_len */
662 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
663 5, 4, /* octet 4~5, low 16 bits pkt_len */
664 0xFF, 0xFF, /* pkt_type set as unknown */
665 0xFF, 0xFF /* pkt_type set as unknown */
667 const __m128i eop_shuf_mask = _mm_set_epi8(0xFF, 0xFF,
677 * compile-time check the above crc_adjust layout is correct.
678 * NOTE: the first field (lowest address) is given last in set_epi16
681 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
682 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
683 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
684 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
686 /* 4 packets DD mask */
687 const __m128i dd_check = _mm_set_epi64x(0x0000000100000001LL,
688 0x0000000100000001LL);
689 /* 4 packets EOP mask */
690 const __m128i eop_check = _mm_set_epi64x(0x0000000200000002LL,
691 0x0000000200000002LL);
693 /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */
694 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP);
696 /* Just the act of getting into the function from the application is
697 * going to cost about 7 cycles
699 rxdp = (union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail;
703 /* See if we need to rearm the RX queue - gives the prefetch a bit
706 if (rxq->rxrearm_nb > rxq->rx_free_thresh)
709 /* Before we start moving massive data around, check to see if
710 * there is actually a packet available
712 if (!(rxdp->wb.status_error0 &
713 rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
717 * Compile-time verify the shuffle mask
718 * NOTE: some field positions already verified above, but duplicated
719 * here for completeness in case of future modifications.
721 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
722 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
723 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
724 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
725 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
726 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
727 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
728 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
730 /* Cache is empty -> need to scan the buffer rings, but first move
731 * the next 'n' mbufs into the cache
733 sw_ring = &rxq->sw_ring[rxq->rx_tail];
735 /* A. load 4 packet in one loop
736 * [A*. mask out 4 unused dirty field in desc]
737 * B. copy 4 mbuf point from swring to rx_pkts
738 * C. calc the number of DD bits among the 4 packets
739 * [C*. extract the end-of-packet bit, if requested]
740 * D. fill info. from desc to mbuf
743 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
744 pos += IAVF_VPMD_DESCS_PER_LOOP,
745 rxdp += IAVF_VPMD_DESCS_PER_LOOP) {
746 __m128i descs[IAVF_VPMD_DESCS_PER_LOOP];
747 __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
748 __m128i staterr, sterr_tmp1, sterr_tmp2;
749 /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
751 #if defined(RTE_ARCH_X86_64)
755 /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
756 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
757 /* Read desc statuses backwards to avoid race condition */
758 /* A.1 load desc[3] */
759 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
760 rte_compiler_barrier();
762 /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
763 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
765 #if defined(RTE_ARCH_X86_64)
766 /* B.1 load 2 64 bit mbuf points */
767 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
770 /* A.1 load desc[2-0] */
771 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
772 rte_compiler_barrier();
773 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
774 rte_compiler_barrier();
775 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
777 #if defined(RTE_ARCH_X86_64)
778 /* B.2 copy 2 mbuf point into rx_pkts */
779 _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2);
783 rte_mbuf_prefetch_part2(rx_pkts[pos]);
784 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
785 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
786 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
789 /* avoid compiler reorder optimization */
790 rte_compiler_barrier();
792 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
793 pkt_mb3 = _mm_shuffle_epi8(descs[3], shuf_msk);
794 pkt_mb2 = _mm_shuffle_epi8(descs[2], shuf_msk);
796 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
797 pkt_mb1 = _mm_shuffle_epi8(descs[1], shuf_msk);
798 pkt_mb0 = _mm_shuffle_epi8(descs[0], shuf_msk);
800 /* C.1 4=>2 filter staterr info only */
801 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
802 /* C.1 4=>2 filter staterr info only */
803 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
805 flex_desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
807 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
808 pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
809 pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
811 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
812 pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
813 pkt_mb0 = _mm_add_epi16(pkt_mb0, crc_adjust);
815 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
817 * needs to load 2nd 16B of each desc for RSS hash parsing,
818 * will cause performance drop to get into this context.
820 if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
821 DEV_RX_OFFLOAD_RSS_HASH) {
822 /* load bottom half of every 32B desc */
823 const __m128i raw_desc_bh3 =
825 ((void *)(&rxdp[3].wb.status_error1));
826 rte_compiler_barrier();
827 const __m128i raw_desc_bh2 =
829 ((void *)(&rxdp[2].wb.status_error1));
830 rte_compiler_barrier();
831 const __m128i raw_desc_bh1 =
833 ((void *)(&rxdp[1].wb.status_error1));
834 rte_compiler_barrier();
835 const __m128i raw_desc_bh0 =
837 ((void *)(&rxdp[0].wb.status_error1));
840 * to shift the 32b RSS hash value to the
841 * highest 32b of each 128b before mask
844 _mm_slli_epi64(raw_desc_bh3, 32);
846 _mm_slli_epi64(raw_desc_bh2, 32);
848 _mm_slli_epi64(raw_desc_bh1, 32);
850 _mm_slli_epi64(raw_desc_bh0, 32);
852 __m128i rss_hash_msk =
853 _mm_set_epi32(0xFFFFFFFF, 0, 0, 0);
855 rss_hash3 = _mm_and_si128
856 (rss_hash3, rss_hash_msk);
857 rss_hash2 = _mm_and_si128
858 (rss_hash2, rss_hash_msk);
859 rss_hash1 = _mm_and_si128
860 (rss_hash1, rss_hash_msk);
861 rss_hash0 = _mm_and_si128
862 (rss_hash0, rss_hash_msk);
864 pkt_mb3 = _mm_or_si128(pkt_mb3, rss_hash3);
865 pkt_mb2 = _mm_or_si128(pkt_mb2, rss_hash2);
866 pkt_mb1 = _mm_or_si128(pkt_mb1, rss_hash1);
867 pkt_mb0 = _mm_or_si128(pkt_mb0, rss_hash0);
868 } /* if() on RSS hash parsing */
871 /* C.2 get 4 pkts staterr value */
872 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
874 /* D.3 copy final 3,4 data to rx_pkts */
876 ((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
879 ((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
882 /* C* extract and record EOP bit */
884 /* and with mask to extract bits, flipping 1-0 */
885 __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
886 /* the staterr values are not in order, as the count
887 * of dd bits doesn't care. However, for end of
888 * packet tracking, we do care, so shuffle. This also
889 * compresses the 32-bit values to 8-bit
891 eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
892 /* store the resulting 32-bit value */
893 *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
894 split_packet += IAVF_VPMD_DESCS_PER_LOOP;
897 /* C.3 calc available number of desc */
898 staterr = _mm_and_si128(staterr, dd_check);
899 staterr = _mm_packs_epi32(staterr, zero);
901 /* D.3 copy final 1,2 data to rx_pkts */
903 ((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
905 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
907 flex_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
908 /* C.4 calc available number of desc */
909 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
911 if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
915 /* Update our internal tail pointer */
916 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
917 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
918 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
924 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
925 * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
929 iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
932 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
936 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
937 * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
941 iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
944 return _recv_raw_pkts_vec_flex_rxd(rx_queue, rx_pkts, nb_pkts, NULL);
948 * vPMD receive routine that reassembles single burst of 32 scattered packets
951 * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
954 iavf_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
957 struct iavf_rx_queue *rxq = rx_queue;
958 uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
961 /* get some new buffers */
962 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
967 /* happy day case, full burst + no packets to be joined */
968 const uint64_t *split_fl64 = (uint64_t *)split_flags;
970 if (!rxq->pkt_first_seg &&
971 split_fl64[0] == 0 && split_fl64[1] == 0 &&
972 split_fl64[2] == 0 && split_fl64[3] == 0)
975 /* reassemble any packets that need reassembly*/
976 if (!rxq->pkt_first_seg) {
977 /* find the first split flag, and only reassemble then*/
978 while (i < nb_bufs && !split_flags[i])
982 rxq->pkt_first_seg = rx_pkts[i];
984 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
989 * vPMD receive routine that reassembles scattered packets.
992 iavf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
997 while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
1000 burst = iavf_recv_scattered_burst_vec(rx_queue,
1002 IAVF_VPMD_RX_MAX_BURST);
1005 if (burst < IAVF_VPMD_RX_MAX_BURST)
1009 return retval + iavf_recv_scattered_burst_vec(rx_queue,
1015 * vPMD receive routine that reassembles single burst of 32 scattered packets
1019 * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
1022 iavf_recv_scattered_burst_vec_flex_rxd(void *rx_queue,
1023 struct rte_mbuf **rx_pkts,
1026 struct iavf_rx_queue *rxq = rx_queue;
1027 uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
1030 /* get some new buffers */
1031 uint16_t nb_bufs = _recv_raw_pkts_vec_flex_rxd(rxq, rx_pkts, nb_pkts,
1036 /* happy day case, full burst + no packets to be joined */
1037 const uint64_t *split_fl64 = (uint64_t *)split_flags;
1039 if (!rxq->pkt_first_seg &&
1040 split_fl64[0] == 0 && split_fl64[1] == 0 &&
1041 split_fl64[2] == 0 && split_fl64[3] == 0)
1044 /* reassemble any packets that need reassembly*/
1045 if (!rxq->pkt_first_seg) {
1046 /* find the first split flag, and only reassemble then*/
1047 while (i < nb_bufs && !split_flags[i])
1051 rxq->pkt_first_seg = rx_pkts[i];
1053 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
1058 * vPMD receive routine that reassembles scattered packets for flex RxD
1061 iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
1062 struct rte_mbuf **rx_pkts,
1065 uint16_t retval = 0;
1067 while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
1070 burst = iavf_recv_scattered_burst_vec_flex_rxd(rx_queue,
1072 IAVF_VPMD_RX_MAX_BURST);
1075 if (burst < IAVF_VPMD_RX_MAX_BURST)
1079 return retval + iavf_recv_scattered_burst_vec_flex_rxd(rx_queue,
1085 vtx1(volatile struct iavf_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags)
1088 (IAVF_TX_DESC_DTYPE_DATA |
1089 ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT) |
1090 ((uint64_t)pkt->data_len <<
1091 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT));
1093 __m128i descriptor = _mm_set_epi64x(high_qw,
1094 pkt->buf_iova + pkt->data_off);
1095 _mm_store_si128((__m128i *)txdp, descriptor);
1099 iavf_vtx(volatile struct iavf_tx_desc *txdp, struct rte_mbuf **pkt,
1100 uint16_t nb_pkts, uint64_t flags)
1104 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
1105 vtx1(txdp, *pkt, flags);
1109 iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
1112 struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
1113 volatile struct iavf_tx_desc *txdp;
1114 struct iavf_tx_entry *txep;
1115 uint16_t n, nb_commit, tx_id;
1116 uint64_t flags = IAVF_TX_DESC_CMD_EOP | 0x04; /* bit 2 must be set */
1117 uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
1120 /* cross rx_thresh boundary is not allowed */
1121 nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
1123 if (txq->nb_free < txq->free_thresh)
1124 iavf_tx_free_bufs(txq);
1126 nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
1127 if (unlikely(nb_pkts == 0))
1129 nb_commit = nb_pkts;
1131 tx_id = txq->tx_tail;
1132 txdp = &txq->tx_ring[tx_id];
1133 txep = &txq->sw_ring[tx_id];
1135 txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
1137 n = (uint16_t)(txq->nb_tx_desc - tx_id);
1138 if (nb_commit >= n) {
1139 tx_backlog_entry(txep, tx_pkts, n);
1141 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
1142 vtx1(txdp, *tx_pkts, flags);
1144 vtx1(txdp, *tx_pkts++, rs);
1146 nb_commit = (uint16_t)(nb_commit - n);
1149 txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
1151 /* avoid reach the end of ring */
1152 txdp = &txq->tx_ring[tx_id];
1153 txep = &txq->sw_ring[tx_id];
1156 tx_backlog_entry(txep, tx_pkts, nb_commit);
1158 iavf_vtx(txdp, tx_pkts, nb_commit, flags);
1160 tx_id = (uint16_t)(tx_id + nb_commit);
1161 if (tx_id > txq->next_rs) {
1162 txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
1163 rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
1164 IAVF_TXD_QW1_CMD_SHIFT);
1166 (uint16_t)(txq->next_rs + txq->rs_thresh);
1169 txq->tx_tail = tx_id;
1171 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_pkts=%u",
1172 txq->port_id, txq->queue_id, tx_id, nb_pkts);
1174 IAVF_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
1180 iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
1184 struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
1189 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
1190 ret = iavf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
1200 static void __rte_cold
1201 iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
1203 _iavf_rx_queue_release_mbufs_vec(rxq);
1206 static void __rte_cold
1207 iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
1209 _iavf_tx_queue_release_mbufs_vec(txq);
1212 static const struct iavf_rxq_ops sse_vec_rxq_ops = {
1213 .release_mbufs = iavf_rx_queue_release_mbufs_sse,
1216 static const struct iavf_txq_ops sse_vec_txq_ops = {
1217 .release_mbufs = iavf_tx_queue_release_mbufs_sse,
1221 iavf_txq_vec_setup(struct iavf_tx_queue *txq)
1223 txq->ops = &sse_vec_txq_ops;
1228 iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
1230 rxq->ops = &sse_vec_rxq_ops;
1231 return iavf_rxq_vec_setup_default(rxq);
1235 iavf_rx_vec_dev_check(struct rte_eth_dev *dev)
1237 return iavf_rx_vec_dev_check_default(dev);
1241 iavf_tx_vec_dev_check(struct rte_eth_dev *dev)
1243 return iavf_tx_vec_dev_check_default(dev);