4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
38 #include "base/i40e_prototype.h"
39 #include "base/i40e_type.h"
40 #include "i40e_ethdev.h"
41 #include "i40e_rxtx.h"
42 #include "i40e_rxtx_vec_common.h"
44 #include <tmmintrin.h>
46 #ifndef __INTEL_COMPILER
47 #pragma GCC diagnostic ignored "-Wcast-qual"
51 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
55 volatile union i40e_rx_desc *rxdp;
56 struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
57 struct rte_mbuf *mb0, *mb1;
58 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
59 RTE_PKTMBUF_HEADROOM);
60 __m128i dma_addr0, dma_addr1;
62 rxdp = rxq->rx_ring + rxq->rxrearm_start;
64 /* Pull 'n' more MBUFs into the software ring */
65 if (rte_mempool_get_bulk(rxq->mp,
67 RTE_I40E_RXQ_REARM_THRESH) < 0) {
68 if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
70 dma_addr0 = _mm_setzero_si128();
71 for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
72 rxep[i].mbuf = &rxq->fake_mbuf;
73 _mm_store_si128((__m128i *)&rxdp[i].read,
77 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
78 RTE_I40E_RXQ_REARM_THRESH;
82 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
83 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
84 __m128i vaddr0, vaddr1;
89 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
90 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
91 offsetof(struct rte_mbuf, buf_addr) + 8);
92 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
93 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
95 /* convert pa to dma_addr hdr/data */
96 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
97 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
99 /* add headroom to pa values */
100 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
101 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
103 /* flush desc with pa dma_addr */
104 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
105 _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
108 rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
109 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
110 rxq->rxrearm_start = 0;
112 rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
114 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
115 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
117 /* Update the tail pointer on the NIC */
118 I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
122 desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4],
123 struct rte_mbuf **rx_pkts)
125 const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
126 __m128i rearm0, rearm1, rearm2, rearm3;
128 __m128i vlan0, vlan1, rss, l3_l4e;
130 /* mask everything except RSS, flow director and VLAN flags
131 * bit2 is for VLAN tag, bit11 for flow director indication
132 * bit13:12 for RSS indication.
134 const __m128i rss_vlan_msk = _mm_set_epi32(
135 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
137 const __m128i cksum_mask = _mm_set_epi32(
138 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
139 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
140 PKT_RX_EIP_CKSUM_BAD,
141 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
142 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
143 PKT_RX_EIP_CKSUM_BAD,
144 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
145 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
146 PKT_RX_EIP_CKSUM_BAD,
147 PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
148 PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
149 PKT_RX_EIP_CKSUM_BAD);
151 /* map rss and vlan type to rss hash and vlan flag */
152 const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
154 0, 0, 0, PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
157 const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
159 PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
160 0, 0, PKT_RX_FDIR, 0);
162 const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
163 /* shift right 1 bit to make sure it not exceed 255 */
164 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
165 PKT_RX_IP_CKSUM_BAD) >> 1,
166 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
167 PKT_RX_L4_CKSUM_BAD) >> 1,
168 (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
169 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
170 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
171 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
172 PKT_RX_IP_CKSUM_BAD >> 1,
173 (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
175 vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
176 vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
177 vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
179 vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
180 vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
182 rss = _mm_srli_epi32(vlan1, 11);
183 rss = _mm_shuffle_epi8(rss_flags, rss);
185 l3_l4e = _mm_srli_epi32(vlan1, 22);
186 l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
187 /* then we shift left 1 bit */
188 l3_l4e = _mm_slli_epi32(l3_l4e, 1);
189 /* we need to mask out the reduntant bits */
190 l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
192 vlan0 = _mm_or_si128(vlan0, rss);
193 vlan0 = _mm_or_si128(vlan0, l3_l4e);
196 * At this point, we have the 4 sets of flags in the low 16-bits
197 * of each 32-bit value in vlan0.
198 * We want to extract these, and merge them with the mbuf init data
199 * so we can do a single 16-byte write to the mbuf to set the flags
200 * and all the other initialization fields. Extracting the
201 * appropriate flags means that we have to do a shift and blend for
202 * each mbuf before we do the write.
204 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
205 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
206 rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
207 rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
209 /* write the rearm data and the olflags in one write */
210 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
211 offsetof(struct rte_mbuf, rearm_data) + 8);
212 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
213 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
214 _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
215 _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
216 _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
217 _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
220 #define PKTLEN_SHIFT 10
223 desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
226 __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
227 __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
229 ptype0 = _mm_srli_epi64(ptype0, 30);
230 ptype1 = _mm_srli_epi64(ptype1, 30);
232 rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 0)];
233 rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 8)];
234 rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 0)];
235 rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)];
240 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
241 * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
244 static inline uint16_t
245 _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
246 uint16_t nb_pkts, uint8_t *split_packet)
248 volatile union i40e_rx_desc *rxdp;
249 struct i40e_rx_entry *sw_ring;
250 uint16_t nb_pkts_recd;
254 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
256 __m128i crc_adjust = _mm_set_epi16(
257 0, 0, 0, /* ignore non-length fields */
258 -rxq->crc_len, /* sub crc on data_len */
259 0, /* ignore high-16bits of pkt_len */
260 -rxq->crc_len, /* sub crc on pkt_len */
261 0, 0 /* ignore pkt_type field */
264 * compile-time check the above crc_adjust layout is correct.
265 * NOTE: the first field (lowest address) is given last in set_epi16
268 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
269 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
270 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
271 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
272 __m128i dd_check, eop_check;
274 /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
275 nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
277 /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
278 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
280 /* Just the act of getting into the function from the application is
281 * going to cost about 7 cycles
283 rxdp = rxq->rx_ring + rxq->rx_tail;
287 /* See if we need to rearm the RX queue - gives the prefetch a bit
290 if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
293 /* Before we start moving massive data around, check to see if
294 * there is actually a packet available
296 if (!(rxdp->wb.qword1.status_error_len &
297 rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
300 /* 4 packets DD mask */
301 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
303 /* 4 packets EOP mask */
304 eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
306 /* mask to shuffle from desc. to mbuf */
307 shuf_msk = _mm_set_epi8(
308 7, 6, 5, 4, /* octet 4~7, 32bits rss */
309 3, 2, /* octet 2~3, low 16 bits vlan_macip */
310 15, 14, /* octet 15~14, 16 bits data_len */
311 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
312 15, 14, /* octet 15~14, low 16 bits pkt_len */
313 0xFF, 0xFF, /* pkt_type set as unknown */
314 0xFF, 0xFF /*pkt_type set as unknown */
317 * Compile-time verify the shuffle mask
318 * NOTE: some field positions already verified above, but duplicated
319 * here for completeness in case of future modifications.
321 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
322 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
323 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
324 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
325 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
326 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
327 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
328 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
330 /* Cache is empty -> need to scan the buffer rings, but first move
331 * the next 'n' mbufs into the cache
333 sw_ring = &rxq->sw_ring[rxq->rx_tail];
335 /* A. load 4 packet in one loop
336 * [A*. mask out 4 unused dirty field in desc]
337 * B. copy 4 mbuf point from swring to rx_pkts
338 * C. calc the number of DD bits among the 4 packets
339 * [C*. extract the end-of-packet bit, if requested]
340 * D. fill info. from desc to mbuf
343 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
344 pos += RTE_I40E_DESCS_PER_LOOP,
345 rxdp += RTE_I40E_DESCS_PER_LOOP) {
346 __m128i descs[RTE_I40E_DESCS_PER_LOOP];
347 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
348 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
349 /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
351 #if defined(RTE_ARCH_X86_64)
355 /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
356 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
357 /* Read desc statuses backwards to avoid race condition */
358 /* A.1 load 4 pkts desc */
359 descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
360 rte_compiler_barrier();
362 /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
363 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
365 #if defined(RTE_ARCH_X86_64)
366 /* B.1 load 2 64 bit mbuf points */
367 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
370 descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
371 rte_compiler_barrier();
372 /* B.1 load 2 mbuf point */
373 descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
374 rte_compiler_barrier();
375 descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
377 #if defined(RTE_ARCH_X86_64)
378 /* B.2 copy 2 mbuf point into rx_pkts */
379 _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
383 rte_mbuf_prefetch_part2(rx_pkts[pos]);
384 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
385 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
386 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
389 /* avoid compiler reorder optimization */
390 rte_compiler_barrier();
392 /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
393 const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT);
394 const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT);
396 /* merge the now-aligned packet length fields back in */
397 descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
398 descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
400 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
401 pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
402 pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
404 /* C.1 4=>2 filter staterr info only */
405 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
406 /* C.1 4=>2 filter staterr info only */
407 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
409 desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
411 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
412 pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
413 pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
415 /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
416 const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT);
417 const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT);
419 /* merge the now-aligned packet length fields back in */
420 descs[1] = _mm_blend_epi16(descs[1], len1, 0x80);
421 descs[0] = _mm_blend_epi16(descs[0], len0, 0x80);
423 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
424 pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
425 pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
427 /* C.2 get 4 pkts staterr value */
428 zero = _mm_xor_si128(dd_check, dd_check);
429 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
431 /* D.3 copy final 3,4 data to rx_pkts */
432 _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
434 _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
437 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
438 pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
439 pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
441 /* C* extract and record EOP bit */
443 __m128i eop_shuf_mask = _mm_set_epi8(
444 0xFF, 0xFF, 0xFF, 0xFF,
445 0xFF, 0xFF, 0xFF, 0xFF,
446 0xFF, 0xFF, 0xFF, 0xFF,
447 0x04, 0x0C, 0x00, 0x08
450 /* and with mask to extract bits, flipping 1-0 */
451 __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
452 /* the staterr values are not in order, as the count
453 * count of dd bits doesn't care. However, for end of
454 * packet tracking, we do care, so shuffle. This also
455 * compresses the 32-bit values to 8-bit
457 eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
458 /* store the resulting 32-bit value */
459 *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
460 split_packet += RTE_I40E_DESCS_PER_LOOP;
463 /* C.3 calc available number of desc */
464 staterr = _mm_and_si128(staterr, dd_check);
465 staterr = _mm_packs_epi32(staterr, zero);
467 /* D.3 copy final 1,2 data to rx_pkts */
468 _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
470 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
472 desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
473 /* C.4 calc avaialbe number of desc */
474 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
476 if (likely(var != RTE_I40E_DESCS_PER_LOOP))
480 /* Update our internal tail pointer */
481 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
482 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
483 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
490 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
491 * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
495 i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
498 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
501 /* vPMD receive routine that reassembles scattered packets
503 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
504 * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
508 i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
512 struct i40e_rx_queue *rxq = rx_queue;
513 uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
515 /* get some new buffers */
516 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
521 /* happy day case, full burst + no packets to be joined */
522 const uint64_t *split_fl64 = (uint64_t *)split_flags;
524 if (rxq->pkt_first_seg == NULL &&
525 split_fl64[0] == 0 && split_fl64[1] == 0 &&
526 split_fl64[2] == 0 && split_fl64[3] == 0)
529 /* reassemble any packets that need reassembly*/
532 if (rxq->pkt_first_seg == NULL) {
533 /* find the first split flag, and only reassemble then*/
534 while (i < nb_bufs && !split_flags[i])
539 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
544 vtx1(volatile struct i40e_tx_desc *txdp,
545 struct rte_mbuf *pkt, uint64_t flags)
547 uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
548 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
549 ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
551 __m128i descriptor = _mm_set_epi64x(high_qw,
552 pkt->buf_physaddr + pkt->data_off);
553 _mm_store_si128((__m128i *)txdp, descriptor);
557 vtx(volatile struct i40e_tx_desc *txdp,
558 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
562 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
563 vtx1(txdp, *pkt, flags);
567 i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
570 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
571 volatile struct i40e_tx_desc *txdp;
572 struct i40e_tx_entry *txep;
573 uint16_t n, nb_commit, tx_id;
574 uint64_t flags = I40E_TD_CMD;
575 uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
578 /* cross rx_thresh boundary is not allowed */
579 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
581 if (txq->nb_tx_free < txq->tx_free_thresh)
582 i40e_tx_free_bufs(txq);
584 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
585 if (unlikely(nb_pkts == 0))
588 tx_id = txq->tx_tail;
589 txdp = &txq->tx_ring[tx_id];
590 txep = &txq->sw_ring[tx_id];
592 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
594 n = (uint16_t)(txq->nb_tx_desc - tx_id);
595 if (nb_commit >= n) {
596 tx_backlog_entry(txep, tx_pkts, n);
598 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
599 vtx1(txdp, *tx_pkts, flags);
601 vtx1(txdp, *tx_pkts++, rs);
603 nb_commit = (uint16_t)(nb_commit - n);
606 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
608 /* avoid reach the end of ring */
609 txdp = &txq->tx_ring[tx_id];
610 txep = &txq->sw_ring[tx_id];
613 tx_backlog_entry(txep, tx_pkts, nb_commit);
615 vtx(txdp, tx_pkts, nb_commit, flags);
617 tx_id = (uint16_t)(tx_id + nb_commit);
618 if (tx_id > txq->tx_next_rs) {
619 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
620 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
621 I40E_TXD_QW1_CMD_SHIFT);
623 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
626 txq->tx_tail = tx_id;
628 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
633 void __attribute__((cold))
634 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
636 _i40e_rx_queue_release_mbufs_vec(rxq);
639 int __attribute__((cold))
640 i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
642 return i40e_rxq_vec_setup_default(rxq);
645 int __attribute__((cold))
646 i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
651 int __attribute__((cold))
652 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
654 return i40e_rx_vec_dev_conf_condition_check_default(dev);