1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #include "ice_rxtx_vec_common.h"
9 #ifndef __INTEL_COMPILER
10 #pragma GCC diagnostic ignored "-Wcast-qual"
14 ice_flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
16 #define FDID_MIS_MAGIC 0xFFFFFFFF
17 RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
18 RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
19 const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR |
21 /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
22 const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
23 __m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
25 /* this XOR op results to bit-reverse the fdir_mask */
26 fdir_mask = _mm_xor_si128(fdir_mask, fdir_mis_mask);
27 const __m128i fdir_flags = _mm_and_si128(fdir_mask, pkt_fdir_bit);
33 ice_rxq_rearm(struct ice_rx_queue *rxq)
37 volatile union ice_rx_flex_desc *rxdp;
38 struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
39 struct rte_mbuf *mb0, *mb1;
40 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
41 RTE_PKTMBUF_HEADROOM);
42 __m128i dma_addr0, dma_addr1;
44 rxdp = rxq->rx_ring + rxq->rxrearm_start;
46 /* Pull 'n' more MBUFs into the software ring */
47 if (rte_mempool_get_bulk(rxq->mp,
49 ICE_RXQ_REARM_THRESH) < 0) {
50 if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >=
52 dma_addr0 = _mm_setzero_si128();
53 for (i = 0; i < ICE_DESCS_PER_LOOP; i++) {
54 rxep[i].mbuf = &rxq->fake_mbuf;
55 _mm_store_si128((__m128i *)&rxdp[i].read,
59 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
64 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
65 for (i = 0; i < ICE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
66 __m128i vaddr0, vaddr1;
71 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
72 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
73 offsetof(struct rte_mbuf, buf_addr) + 8);
74 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
75 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
77 /* convert pa to dma_addr hdr/data */
78 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
79 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
81 /* add headroom to pa values */
82 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
83 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
85 /* flush desc with pa dma_addr */
86 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
87 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
90 rxq->rxrearm_start += ICE_RXQ_REARM_THRESH;
91 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
92 rxq->rxrearm_start = 0;
94 rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH;
96 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
97 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
99 /* Update the tail pointer on the NIC */
100 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
104 ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4],
105 struct rte_mbuf **rx_pkts)
107 const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
108 __m128i rearm0, rearm1, rearm2, rearm3;
110 __m128i tmp_desc, flags, rss_vlan;
112 /* mask everything except checksum, RSS and VLAN flags.
113 * bit6:4 for checksum.
114 * bit12 for RSS indication.
115 * bit13 for VLAN indication.
117 const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070,
120 const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK |
121 PKT_RX_L4_CKSUM_MASK |
122 PKT_RX_EIP_CKSUM_BAD,
123 PKT_RX_IP_CKSUM_MASK |
124 PKT_RX_L4_CKSUM_MASK |
125 PKT_RX_EIP_CKSUM_BAD,
126 PKT_RX_IP_CKSUM_MASK |
127 PKT_RX_L4_CKSUM_MASK |
128 PKT_RX_EIP_CKSUM_BAD,
129 PKT_RX_IP_CKSUM_MASK |
130 PKT_RX_L4_CKSUM_MASK |
131 PKT_RX_EIP_CKSUM_BAD);
133 /* map the checksum, rss and vlan fields to the checksum, rss
136 const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
137 /* shift right 1 bit to make sure it not exceed 255 */
138 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
139 PKT_RX_IP_CKSUM_BAD) >> 1,
140 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
141 PKT_RX_IP_CKSUM_GOOD) >> 1,
142 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
143 PKT_RX_IP_CKSUM_BAD) >> 1,
144 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
145 PKT_RX_IP_CKSUM_GOOD) >> 1,
146 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
147 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
148 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
149 (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
151 const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0,
154 PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
155 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
158 /* merge 4 descriptors */
159 flags = _mm_unpackhi_epi32(descs[0], descs[1]);
160 tmp_desc = _mm_unpackhi_epi32(descs[2], descs[3]);
161 tmp_desc = _mm_unpacklo_epi64(flags, tmp_desc);
162 tmp_desc = _mm_and_si128(flags, desc_mask);
165 tmp_desc = _mm_srli_epi32(tmp_desc, 4);
166 flags = _mm_shuffle_epi8(cksum_flags, tmp_desc);
167 /* then we shift left 1 bit */
168 flags = _mm_slli_epi32(flags, 1);
169 /* we need to mask out the reduntant bits introduced by RSS or
172 flags = _mm_and_si128(flags, cksum_mask);
175 tmp_desc = _mm_srli_epi32(tmp_desc, 8);
176 rss_vlan = _mm_shuffle_epi8(rss_vlan_flags, tmp_desc);
178 /* merge the flags */
179 flags = _mm_or_si128(flags, rss_vlan);
181 if (rxq->fdir_enabled) {
182 const __m128i fdir_id0_1 =
183 _mm_unpackhi_epi32(descs[0], descs[1]);
185 const __m128i fdir_id2_3 =
186 _mm_unpackhi_epi32(descs[2], descs[3]);
188 const __m128i fdir_id0_3 =
189 _mm_unpackhi_epi64(fdir_id0_1, fdir_id2_3);
191 const __m128i fdir_flags =
192 ice_flex_rxd_to_fdir_flags_vec(fdir_id0_3);
194 /* merge with fdir_flags */
195 flags = _mm_or_si128(flags, fdir_flags);
197 /* write fdir_id to mbuf */
198 rx_pkts[0]->hash.fdir.hi =
199 _mm_extract_epi32(fdir_id0_3, 0);
201 rx_pkts[1]->hash.fdir.hi =
202 _mm_extract_epi32(fdir_id0_3, 1);
204 rx_pkts[2]->hash.fdir.hi =
205 _mm_extract_epi32(fdir_id0_3, 2);
207 rx_pkts[3]->hash.fdir.hi =
208 _mm_extract_epi32(fdir_id0_3, 3);
209 } /* if() on fdir_enabled */
212 * At this point, we have the 4 sets of flags in the low 16-bits
213 * of each 32-bit value in flags.
214 * We want to extract these, and merge them with the mbuf init data
215 * so we can do a single 16-byte write to the mbuf to set the flags
216 * and all the other initialization fields. Extracting the
217 * appropriate flags means that we have to do a shift and blend for
218 * each mbuf before we do the write.
220 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x10);
221 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x10);
222 rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x10);
223 rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x10);
225 /* write the rearm data and the olflags in one write */
226 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
227 offsetof(struct rte_mbuf, rearm_data) + 8);
228 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
229 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
230 _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
231 _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
232 _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
233 _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
237 ice_rx_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
240 const __m128i ptype_mask = _mm_set_epi16(ICE_RX_FLEX_DESC_PTYPE_M, 0,
241 ICE_RX_FLEX_DESC_PTYPE_M, 0,
242 ICE_RX_FLEX_DESC_PTYPE_M, 0,
243 ICE_RX_FLEX_DESC_PTYPE_M, 0);
244 __m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
245 __m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
246 __m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
248 ptype_all = _mm_and_si128(ptype_all, ptype_mask);
250 rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 1)];
251 rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 3)];
252 rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 5)];
253 rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 7)];
258 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
259 * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST
262 static inline uint16_t
263 _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
264 uint16_t nb_pkts, uint8_t *split_packet)
266 volatile union ice_rx_flex_desc *rxdp;
267 struct ice_rx_entry *sw_ring;
268 uint16_t nb_pkts_recd;
271 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
272 __m128i crc_adjust = _mm_set_epi16
273 (0, 0, 0, /* ignore non-length fields */
274 -rxq->crc_len, /* sub crc on data_len */
275 0, /* ignore high-16bits of pkt_len */
276 -rxq->crc_len, /* sub crc on pkt_len */
277 0, 0 /* ignore pkt_type field */
279 const __m128i zero = _mm_setzero_si128();
280 /* mask to shuffle from desc. to mbuf */
281 const __m128i shuf_msk = _mm_set_epi8
283 0xFF, 0xFF, /* rss hash parsed separately */
284 11, 10, /* octet 10~11, 16 bits vlan_macip */
285 5, 4, /* octet 4~5, 16 bits data_len */
286 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
287 5, 4, /* octet 4~5, low 16 bits pkt_len */
288 0xFF, 0xFF, /* pkt_type set as unknown */
289 0xFF, 0xFF /* pkt_type set as unknown */
291 const __m128i eop_shuf_mask = _mm_set_epi8(0xFF, 0xFF,
301 * compile-time check the above crc_adjust layout is correct.
302 * NOTE: the first field (lowest address) is given last in set_epi16
305 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
306 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
307 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
308 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
310 /* 4 packets DD mask */
311 const __m128i dd_check = _mm_set_epi64x(0x0000000100000001LL,
312 0x0000000100000001LL);
313 /* 4 packets EOP mask */
314 const __m128i eop_check = _mm_set_epi64x(0x0000000200000002LL,
315 0x0000000200000002LL);
317 /* nb_pkts shall be less equal than ICE_MAX_RX_BURST */
318 nb_pkts = RTE_MIN(nb_pkts, ICE_MAX_RX_BURST);
320 /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP */
321 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP);
323 /* Just the act of getting into the function from the application is
324 * going to cost about 7 cycles
326 rxdp = rxq->rx_ring + rxq->rx_tail;
330 /* See if we need to rearm the RX queue - gives the prefetch a bit
333 if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH)
336 /* Before we start moving massive data around, check to see if
337 * there is actually a packet available
339 if (!(rxdp->wb.status_error0 &
340 rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
344 * Compile-time verify the shuffle mask
345 * NOTE: some field positions already verified above, but duplicated
346 * here for completeness in case of future modifications.
348 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
349 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
350 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
351 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
352 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
353 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
354 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
355 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
357 /* Cache is empty -> need to scan the buffer rings, but first move
358 * the next 'n' mbufs into the cache
360 sw_ring = &rxq->sw_ring[rxq->rx_tail];
362 /* A. load 4 packet in one loop
363 * [A*. mask out 4 unused dirty field in desc]
364 * B. copy 4 mbuf point from swring to rx_pkts
365 * C. calc the number of DD bits among the 4 packets
366 * [C*. extract the end-of-packet bit, if requested]
367 * D. fill info. from desc to mbuf
370 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
371 pos += ICE_DESCS_PER_LOOP,
372 rxdp += ICE_DESCS_PER_LOOP) {
373 __m128i descs[ICE_DESCS_PER_LOOP];
374 __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
375 __m128i staterr, sterr_tmp1, sterr_tmp2;
376 /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
378 #if defined(RTE_ARCH_X86_64)
382 /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
383 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
384 /* Read desc statuses backwards to avoid race condition */
385 /* A.1 load 4 pkts desc */
386 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
387 rte_compiler_barrier();
389 /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
390 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
392 #if defined(RTE_ARCH_X86_64)
393 /* B.1 load 2 64 bit mbuf points */
394 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
397 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
398 rte_compiler_barrier();
399 /* B.1 load 2 mbuf point */
400 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
401 rte_compiler_barrier();
402 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
404 #if defined(RTE_ARCH_X86_64)
405 /* B.2 copy 2 mbuf point into rx_pkts */
406 _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2);
410 rte_mbuf_prefetch_part2(rx_pkts[pos]);
411 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
412 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
413 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
416 /* avoid compiler reorder optimization */
417 rte_compiler_barrier();
419 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
420 pkt_mb3 = _mm_shuffle_epi8(descs[3], shuf_msk);
421 pkt_mb2 = _mm_shuffle_epi8(descs[2], shuf_msk);
423 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
424 pkt_mb1 = _mm_shuffle_epi8(descs[1], shuf_msk);
425 pkt_mb0 = _mm_shuffle_epi8(descs[0], shuf_msk);
427 /* C.1 4=>2 filter staterr info only */
428 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
429 /* C.1 4=>2 filter staterr info only */
430 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
432 ice_rx_desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
434 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
435 pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
436 pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
438 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
439 pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
440 pkt_mb0 = _mm_add_epi16(pkt_mb0, crc_adjust);
442 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
444 * needs to load 2nd 16B of each desc for RSS hash parsing,
445 * will cause performance drop to get into this context.
447 if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
448 DEV_RX_OFFLOAD_RSS_HASH) {
449 /* load bottom half of every 32B desc */
450 const __m128i raw_desc_bh3 =
452 ((void *)(&rxdp[3].wb.status_error1));
453 rte_compiler_barrier();
454 const __m128i raw_desc_bh2 =
456 ((void *)(&rxdp[2].wb.status_error1));
457 rte_compiler_barrier();
458 const __m128i raw_desc_bh1 =
460 ((void *)(&rxdp[1].wb.status_error1));
461 rte_compiler_barrier();
462 const __m128i raw_desc_bh0 =
464 ((void *)(&rxdp[0].wb.status_error1));
467 * to shift the 32b RSS hash value to the
468 * highest 32b of each 128b before mask
471 _mm_slli_epi64(raw_desc_bh3, 32);
473 _mm_slli_epi64(raw_desc_bh2, 32);
475 _mm_slli_epi64(raw_desc_bh1, 32);
477 _mm_slli_epi64(raw_desc_bh0, 32);
479 __m128i rss_hash_msk =
480 _mm_set_epi32(0xFFFFFFFF, 0, 0, 0);
482 rss_hash3 = _mm_and_si128
483 (rss_hash3, rss_hash_msk);
484 rss_hash2 = _mm_and_si128
485 (rss_hash2, rss_hash_msk);
486 rss_hash1 = _mm_and_si128
487 (rss_hash1, rss_hash_msk);
488 rss_hash0 = _mm_and_si128
489 (rss_hash0, rss_hash_msk);
491 pkt_mb3 = _mm_or_si128(pkt_mb3, rss_hash3);
492 pkt_mb2 = _mm_or_si128(pkt_mb2, rss_hash2);
493 pkt_mb1 = _mm_or_si128(pkt_mb1, rss_hash1);
494 pkt_mb0 = _mm_or_si128(pkt_mb0, rss_hash0);
495 } /* if() on RSS hash parsing */
498 /* C.2 get 4 pkts staterr value */
499 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
501 /* D.3 copy final 3,4 data to rx_pkts */
503 ((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
506 ((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
509 /* C* extract and record EOP bit */
511 /* and with mask to extract bits, flipping 1-0 */
512 __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
513 /* the staterr values are not in order, as the count
514 * count of dd bits doesn't care. However, for end of
515 * packet tracking, we do care, so shuffle. This also
516 * compresses the 32-bit values to 8-bit
518 eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
519 /* store the resulting 32-bit value */
520 *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
521 split_packet += ICE_DESCS_PER_LOOP;
524 /* C.3 calc available number of desc */
525 staterr = _mm_and_si128(staterr, dd_check);
526 staterr = _mm_packs_epi32(staterr, zero);
528 /* D.3 copy final 1,2 data to rx_pkts */
530 ((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
532 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
534 ice_rx_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
535 /* C.4 calc avaialbe number of desc */
536 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
538 if (likely(var != ICE_DESCS_PER_LOOP))
542 /* Update our internal tail pointer */
543 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
544 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
545 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
552 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
553 * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST
557 ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
560 return _ice_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
563 /* vPMD receive routine that reassembles scattered packets
565 * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
566 * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST
570 ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
573 struct ice_rx_queue *rxq = rx_queue;
574 uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
576 /* get some new buffers */
577 uint16_t nb_bufs = _ice_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
582 /* happy day case, full burst + no packets to be joined */
583 const uint64_t *split_fl64 = (uint64_t *)split_flags;
585 if (!rxq->pkt_first_seg &&
586 split_fl64[0] == 0 && split_fl64[1] == 0 &&
587 split_fl64[2] == 0 && split_fl64[3] == 0)
590 /* reassemble any packets that need reassembly*/
593 if (!rxq->pkt_first_seg) {
594 /* find the first split flag, and only reassemble then*/
595 while (i < nb_bufs && !split_flags[i])
599 rxq->pkt_first_seg = rx_pkts[i];
601 return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
606 ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt,
610 (ICE_TX_DESC_DTYPE_DATA |
611 ((uint64_t)flags << ICE_TXD_QW1_CMD_S) |
612 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
614 __m128i descriptor = _mm_set_epi64x(high_qw,
615 pkt->buf_iova + pkt->data_off);
616 _mm_store_si128((__m128i *)txdp, descriptor);
620 ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,
621 uint16_t nb_pkts, uint64_t flags)
625 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
626 ice_vtx1(txdp, *pkt, flags);
630 ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
633 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
634 volatile struct ice_tx_desc *txdp;
635 struct ice_tx_entry *txep;
636 uint16_t n, nb_commit, tx_id;
637 uint64_t flags = ICE_TD_CMD;
638 uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
641 /* cross rx_thresh boundary is not allowed */
642 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
644 if (txq->nb_tx_free < txq->tx_free_thresh)
645 ice_tx_free_bufs(txq);
647 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
649 if (unlikely(nb_pkts == 0))
652 tx_id = txq->tx_tail;
653 txdp = &txq->tx_ring[tx_id];
654 txep = &txq->sw_ring[tx_id];
656 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
658 n = (uint16_t)(txq->nb_tx_desc - tx_id);
659 if (nb_commit >= n) {
660 ice_tx_backlog_entry(txep, tx_pkts, n);
662 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
663 ice_vtx1(txdp, *tx_pkts, flags);
665 ice_vtx1(txdp, *tx_pkts++, rs);
667 nb_commit = (uint16_t)(nb_commit - n);
670 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
672 /* avoid reach the end of ring */
673 txdp = &txq->tx_ring[tx_id];
674 txep = &txq->sw_ring[tx_id];
677 ice_tx_backlog_entry(txep, tx_pkts, nb_commit);
679 ice_vtx(txdp, tx_pkts, nb_commit, flags);
681 tx_id = (uint16_t)(tx_id + nb_commit);
682 if (tx_id > txq->tx_next_rs) {
683 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
684 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
687 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
690 txq->tx_tail = tx_id;
692 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
698 ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
702 struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
707 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
708 ret = ice_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
719 ice_rxq_vec_setup(struct ice_rx_queue *rxq)
724 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs_vec;
725 return ice_rxq_vec_setup_default(rxq);
729 ice_txq_vec_setup(struct ice_tx_queue __rte_unused *txq)
734 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs_vec;
739 ice_rx_vec_dev_check(struct rte_eth_dev *dev)
741 return ice_rx_vec_dev_check_default(dev);
745 ice_tx_vec_dev_check(struct rte_eth_dev *dev)
747 return ice_tx_vec_dev_check_default(dev);