4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_ethdev.h>
37 #include <rte_common.h>
39 #include "base/fm10k_type.h"
41 #include <tmmintrin.h>
43 #ifndef __INTEL_COMPILER
44 #pragma GCC diagnostic ignored "-Wcast-qual"
47 /* Handling the offload flags (olflags) field takes computation
48 * time when receiving packets. Therefore we provide a flag to disable
49 * the processing of the olflags field when they are not needed. This
50 * gives improved performance, at the cost of losing the offload info
51 * in the received packet
53 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
55 /* Vlan present flag shift */
58 #define L3TYPE_SHIFT (4)
60 #define L4TYPE_SHIFT (7)
63 fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
65 __m128i ptype0, ptype1, vtag0, vtag1;
71 const __m128i pkttype_msk = _mm_set_epi16(
72 0x0000, 0x0000, 0x0000, 0x0000,
73 PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
74 PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
76 /* mask everything except rss type */
77 const __m128i rsstype_msk = _mm_set_epi16(
78 0x0000, 0x0000, 0x0000, 0x0000,
79 0x000F, 0x000F, 0x000F, 0x000F);
81 /* map rss type to rss hash flag */
82 const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
83 0, 0, 0, PKT_RX_RSS_HASH,
84 PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
85 PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
87 ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
88 ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
89 vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
90 vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
92 ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
93 ptype0 = _mm_and_si128(ptype0, rsstype_msk);
94 ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
96 vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
97 vtag1 = _mm_srli_epi16(vtag1, VP_SHIFT);
98 vtag1 = _mm_and_si128(vtag1, pkttype_msk);
100 vtag1 = _mm_or_si128(ptype0, vtag1);
101 vol.dword = _mm_cvtsi128_si64(vtag1);
103 rx_pkts[0]->ol_flags = vol.e[0];
104 rx_pkts[1]->ol_flags = vol.e[1];
105 rx_pkts[2]->ol_flags = vol.e[2];
106 rx_pkts[3]->ol_flags = vol.e[3];
110 fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
112 __m128i l3l4type0, l3l4type1, l3type, l4type;
118 /* L3 pkt type mask Bit4 to Bit6 */
119 const __m128i l3type_msk = _mm_set_epi16(
120 0x0000, 0x0000, 0x0000, 0x0000,
121 0x0070, 0x0070, 0x0070, 0x0070);
123 /* L4 pkt type mask Bit7 to Bit9 */
124 const __m128i l4type_msk = _mm_set_epi16(
125 0x0000, 0x0000, 0x0000, 0x0000,
126 0x0380, 0x0380, 0x0380, 0x0380);
128 /* convert RRC l3 type to mbuf format */
129 const __m128i l3type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
130 0, 0, 0, RTE_PTYPE_L3_IPV6_EXT,
131 RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV4_EXT,
132 RTE_PTYPE_L3_IPV4, 0);
134 /* Convert RRC l4 type to mbuf format l4type_flags shift-left 8 bits
135 * to fill into8 bits length.
137 const __m128i l4type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
138 RTE_PTYPE_TUNNEL_GENEVE >> 8,
139 RTE_PTYPE_TUNNEL_NVGRE >> 8,
140 RTE_PTYPE_TUNNEL_VXLAN >> 8,
141 RTE_PTYPE_TUNNEL_GRE >> 8,
142 RTE_PTYPE_L4_UDP >> 8,
143 RTE_PTYPE_L4_TCP >> 8,
146 l3l4type0 = _mm_unpacklo_epi16(descs[0], descs[1]);
147 l3l4type1 = _mm_unpacklo_epi16(descs[2], descs[3]);
148 l3l4type0 = _mm_unpacklo_epi32(l3l4type0, l3l4type1);
150 l3type = _mm_and_si128(l3l4type0, l3type_msk);
151 l4type = _mm_and_si128(l3l4type0, l4type_msk);
153 l3type = _mm_srli_epi16(l3type, L3TYPE_SHIFT);
154 l4type = _mm_srli_epi16(l4type, L4TYPE_SHIFT);
156 l3type = _mm_shuffle_epi8(l3type_flags, l3type);
157 /* l4type_flags shift-left for 8 bits, need shift-right back */
158 l4type = _mm_shuffle_epi8(l4type_flags, l4type);
160 l4type = _mm_slli_epi16(l4type, 8);
161 l3l4type0 = _mm_or_si128(l3type, l4type);
162 vol.dword = _mm_cvtsi128_si64(l3l4type0);
164 rx_pkts[0]->packet_type = vol.e[0];
165 rx_pkts[1]->packet_type = vol.e[1];
166 rx_pkts[2]->packet_type = vol.e[2];
167 rx_pkts[3]->packet_type = vol.e[3];
170 #define fm10k_desc_to_olflags_v(desc, rx_pkts) do {} while (0)
171 #define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0)
174 int __attribute__((cold))
175 fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
177 #ifndef RTE_LIBRTE_IEEE1588
178 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
179 struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
181 #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
182 /* whithout rx ol_flags, no VP flag report */
183 if (rxmode->hw_vlan_extend != 0)
187 /* no fdir support */
188 if (fconf->mode != RTE_FDIR_MODE_NONE)
191 /* - no csum error report support
192 * - no header split support
194 if (rxmode->hw_ip_checksum == 1 ||
195 rxmode->header_split == 1)
205 int __attribute__((cold))
206 fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)
209 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
212 /* data_off will be ajusted after new mbuf allocated for 512-byte
215 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
216 mb_def.port = rxq->port_id;
217 rte_mbuf_refcnt_set(&mb_def, 1);
219 /* prevent compiler reordering: rearm_data covers previous fields */
220 rte_compiler_barrier();
221 p = (uintptr_t)&mb_def.rearm_data;
222 rxq->mbuf_initializer = *(uint64_t *)p;
227 fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
231 volatile union fm10k_rx_desc *rxdp;
232 struct rte_mbuf **mb_alloc = &rxq->sw_ring[rxq->rxrearm_start];
233 struct rte_mbuf *mb0, *mb1;
234 __m128i head_off = _mm_set_epi64x(
235 RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1,
236 RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1);
237 __m128i dma_addr0, dma_addr1;
238 /* Rx buffer need to be aligned with 512 byte */
239 const __m128i hba_msk = _mm_set_epi64x(0,
240 UINT64_MAX - FM10K_RX_DATABUF_ALIGN + 1);
242 rxdp = rxq->hw_ring + rxq->rxrearm_start;
244 /* Pull 'n' more MBUFs into the software ring */
245 if (rte_mempool_get_bulk(rxq->mp,
247 RTE_FM10K_RXQ_REARM_THRESH) < 0) {
248 dma_addr0 = _mm_setzero_si128();
249 /* Clean up all the HW/SW ring content */
250 for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i++) {
251 mb_alloc[i] = &rxq->fake_mbuf;
252 _mm_store_si128((__m128i *)&rxdp[i].q,
256 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
257 RTE_FM10K_RXQ_REARM_THRESH;
261 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
262 for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i += 2, mb_alloc += 2) {
263 __m128i vaddr0, vaddr1;
269 /* Flush mbuf with pkt template.
270 * Data to be rearmed is 6 bytes long.
271 * Though, RX will overwrite ol_flags that are coming next
272 * anyway. So overwrite whole 8 bytes with one load:
273 * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
275 p0 = (uintptr_t)&mb0->rearm_data;
276 *(uint64_t *)p0 = rxq->mbuf_initializer;
277 p1 = (uintptr_t)&mb1->rearm_data;
278 *(uint64_t *)p1 = rxq->mbuf_initializer;
280 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
281 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
282 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
284 /* convert pa to dma_addr hdr/data */
285 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
286 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
288 /* add headroom to pa values */
289 dma_addr0 = _mm_add_epi64(dma_addr0, head_off);
290 dma_addr1 = _mm_add_epi64(dma_addr1, head_off);
292 /* Do 512 byte alignment to satisfy HW requirement, in the
293 * meanwhile, set Header Buffer Address to zero.
295 dma_addr0 = _mm_and_si128(dma_addr0, hba_msk);
296 dma_addr1 = _mm_and_si128(dma_addr1, hba_msk);
298 /* flush desc with pa dma_addr */
299 _mm_store_si128((__m128i *)&rxdp++->q, dma_addr0);
300 _mm_store_si128((__m128i *)&rxdp++->q, dma_addr1);
302 /* enforce 512B alignment on default Rx virtual addresses */
303 mb0->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb0->buf_addr
304 + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
305 - (char *)mb0->buf_addr);
306 mb1->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb1->buf_addr
307 + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
308 - (char *)mb1->buf_addr);
311 rxq->rxrearm_start += RTE_FM10K_RXQ_REARM_THRESH;
312 if (rxq->rxrearm_start >= rxq->nb_desc)
313 rxq->rxrearm_start = 0;
315 rxq->rxrearm_nb -= RTE_FM10K_RXQ_REARM_THRESH;
317 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
318 (rxq->nb_desc - 1) : (rxq->rxrearm_start - 1));
320 /* Update the tail pointer on the NIC */
321 FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id);
324 static inline uint16_t
325 fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
326 uint16_t nb_pkts, uint8_t *split_packet)
328 volatile union fm10k_rx_desc *rxdp;
329 struct rte_mbuf **mbufp;
330 uint16_t nb_pkts_recd;
332 struct fm10k_rx_queue *rxq = rx_queue;
335 __m128i dd_check, eop_check;
338 next_dd = rxq->next_dd;
340 /* Just the act of getting into the function from the application is
341 * going to cost about 7 cycles
343 rxdp = rxq->hw_ring + next_dd;
345 _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
347 /* See if we need to rearm the RX queue - gives the prefetch a bit
350 if (rxq->rxrearm_nb > RTE_FM10K_RXQ_REARM_THRESH)
351 fm10k_rxq_rearm(rxq);
353 /* Before we start moving massive data around, check to see if
354 * there is actually a packet available
356 if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))
359 /* Vecotr RX will process 4 packets at a time, strip the unaligned
360 * tails in case it's not multiple of 4.
362 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);
364 /* 4 packets DD mask */
365 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
367 /* 4 packets EOP mask */
368 eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
370 /* mask to shuffle from desc. to mbuf */
371 shuf_msk = _mm_set_epi8(
372 7, 6, 5, 4, /* octet 4~7, 32bits rss */
373 15, 14, /* octet 14~15, low 16 bits vlan_macip */
374 13, 12, /* octet 12~13, 16 bits data_len */
375 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
376 13, 12, /* octet 12~13, low 16 bits pkt_len */
377 0xFF, 0xFF, /* skip high 16 bits pkt_type */
378 0xFF, 0xFF /* Skip pkt_type field in shuffle operation */
381 /* Cache is empty -> need to scan the buffer rings, but first move
382 * the next 'n' mbufs into the cache
384 mbufp = &rxq->sw_ring[next_dd];
386 /* A. load 4 packet in one loop
387 * [A*. mask out 4 unused dirty field in desc]
388 * B. copy 4 mbuf point from swring to rx_pkts
389 * C. calc the number of DD bits among the 4 packets
390 * [C*. extract the end-of-packet bit, if requested]
391 * D. fill info. from desc to mbuf
393 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
394 pos += RTE_FM10K_DESCS_PER_LOOP,
395 rxdp += RTE_FM10K_DESCS_PER_LOOP) {
396 __m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
397 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
398 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
399 __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
401 /* B.1 load 1 mbuf point */
402 mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
404 /* Read desc statuses backwards to avoid race condition */
405 /* A.1 load 4 pkts desc */
406 descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
408 /* B.2 copy 2 mbuf point into rx_pkts */
409 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
411 /* B.1 load 1 mbuf point */
412 mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
414 descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
415 /* B.1 load 2 mbuf point */
416 descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
417 descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
419 /* B.2 copy 2 mbuf point into rx_pkts */
420 _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
422 /* avoid compiler reorder optimization */
423 rte_compiler_barrier();
426 rte_prefetch0(&rx_pkts[pos]->cacheline1);
427 rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
428 rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
429 rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
432 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
433 pkt_mb4 = _mm_shuffle_epi8(descs0[3], shuf_msk);
434 pkt_mb3 = _mm_shuffle_epi8(descs0[2], shuf_msk);
436 /* C.1 4=>2 filter staterr info only */
437 sterr_tmp2 = _mm_unpackhi_epi32(descs0[3], descs0[2]);
438 /* C.1 4=>2 filter staterr info only */
439 sterr_tmp1 = _mm_unpackhi_epi32(descs0[1], descs0[0]);
441 /* set ol_flags with vlan packet type */
442 fm10k_desc_to_olflags_v(descs0, &rx_pkts[pos]);
444 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
445 pkt_mb2 = _mm_shuffle_epi8(descs0[1], shuf_msk);
446 pkt_mb1 = _mm_shuffle_epi8(descs0[0], shuf_msk);
448 /* C.2 get 4 pkts staterr value */
449 zero = _mm_xor_si128(dd_check, dd_check);
450 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
452 /* D.3 copy final 3,4 data to rx_pkts */
453 _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
455 _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
458 /* C* extract and record EOP bit */
460 __m128i eop_shuf_mask = _mm_set_epi8(
461 0xFF, 0xFF, 0xFF, 0xFF,
462 0xFF, 0xFF, 0xFF, 0xFF,
463 0xFF, 0xFF, 0xFF, 0xFF,
464 0x04, 0x0C, 0x00, 0x08
467 /* and with mask to extract bits, flipping 1-0 */
468 __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
469 /* the staterr values are not in order, as the count
470 * count of dd bits doesn't care. However, for end of
471 * packet tracking, we do care, so shuffle. This also
472 * compresses the 32-bit values to 8-bit
474 eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
475 /* store the resulting 32-bit value */
476 *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
477 split_packet += RTE_FM10K_DESCS_PER_LOOP;
479 /* zero-out next pointers */
480 rx_pkts[pos]->next = NULL;
481 rx_pkts[pos + 1]->next = NULL;
482 rx_pkts[pos + 2]->next = NULL;
483 rx_pkts[pos + 3]->next = NULL;
486 /* C.3 calc available number of desc */
487 staterr = _mm_and_si128(staterr, dd_check);
488 staterr = _mm_packs_epi32(staterr, zero);
490 /* D.3 copy final 1,2 data to rx_pkts */
491 _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
493 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
496 fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);
498 /* C.4 calc avaialbe number of desc */
499 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
501 if (likely(var != RTE_FM10K_DESCS_PER_LOOP))
505 /* Update our internal tail pointer */
506 rxq->next_dd = (uint16_t)(rxq->next_dd + nb_pkts_recd);
507 rxq->next_dd = (uint16_t)(rxq->next_dd & (rxq->nb_desc - 1));
508 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
513 /* vPMD receive routine
516 * - don't support ol_flags for rss and csum err
519 fm10k_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
522 return fm10k_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
525 static inline uint16_t
526 fm10k_reassemble_packets(struct fm10k_rx_queue *rxq,
527 struct rte_mbuf **rx_bufs,
528 uint16_t nb_bufs, uint8_t *split_flags)
530 struct rte_mbuf *pkts[RTE_FM10K_MAX_RX_BURST]; /*finished pkts*/
531 struct rte_mbuf *start = rxq->pkt_first_seg;
532 struct rte_mbuf *end = rxq->pkt_last_seg;
533 unsigned pkt_idx, buf_idx;
535 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
537 /* processing a split packet */
538 end->next = rx_bufs[buf_idx];
540 start->pkt_len += rx_bufs[buf_idx]->data_len;
543 if (!split_flags[buf_idx]) {
544 /* it's the last packet of the set */
545 start->hash = end->hash;
546 start->ol_flags = end->ol_flags;
547 pkts[pkt_idx++] = start;
551 /* not processing a split packet */
552 if (!split_flags[buf_idx]) {
553 /* not a split packet, save and skip */
554 pkts[pkt_idx++] = rx_bufs[buf_idx];
557 end = start = rx_bufs[buf_idx];
561 /* save the partial packet for next time */
562 rxq->pkt_first_seg = start;
563 rxq->pkt_last_seg = end;
564 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
569 * vPMD receive routine that reassembles scattered packets
572 * - don't support ol_flags for rss and csum err
573 * - nb_pkts > RTE_FM10K_MAX_RX_BURST, only scan RTE_FM10K_MAX_RX_BURST
577 fm10k_recv_scattered_pkts_vec(void *rx_queue,
578 struct rte_mbuf **rx_pkts,
581 struct fm10k_rx_queue *rxq = rx_queue;
582 uint8_t split_flags[RTE_FM10K_MAX_RX_BURST] = {0};
585 /* Split_flags only can support max of RTE_FM10K_MAX_RX_BURST */
586 nb_pkts = RTE_MIN(nb_pkts, RTE_FM10K_MAX_RX_BURST);
587 /* get some new buffers */
588 uint16_t nb_bufs = fm10k_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
593 /* happy day case, full burst + no packets to be joined */
594 const uint64_t *split_fl64 = (uint64_t *)split_flags;
596 if (rxq->pkt_first_seg == NULL &&
597 split_fl64[0] == 0 && split_fl64[1] == 0 &&
598 split_fl64[2] == 0 && split_fl64[3] == 0)
601 /* reassemble any packets that need reassembly*/
602 if (rxq->pkt_first_seg == NULL) {
603 /* find the first split flag, and only reassemble then*/
604 while (i < nb_bufs && !split_flags[i])
609 return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,