4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_ethdev.h>
37 #include <rte_common.h>
39 #include "base/fm10k_type.h"
41 #include <tmmintrin.h>
43 #ifndef __INTEL_COMPILER
44 #pragma GCC diagnostic ignored "-Wcast-qual"
48 fm10k_tx_queue_release_mbufs_vec(struct fm10k_tx_queue *txq);
50 fm10k_reset_tx_queue(struct fm10k_tx_queue *txq);
52 /* Handling the offload flags (olflags) field takes computation
53 * time when receiving packets. Therefore we provide a flag to disable
54 * the processing of the olflags field when they are not needed. This
55 * gives improved performance, at the cost of losing the offload info
56 * in the received packet
58 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
60 /* Vlan present flag shift */
63 #define L3TYPE_SHIFT (4)
65 #define L4TYPE_SHIFT (7)
68 fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
70 __m128i ptype0, ptype1, vtag0, vtag1;
76 const __m128i pkttype_msk = _mm_set_epi16(
77 0x0000, 0x0000, 0x0000, 0x0000,
78 PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
79 PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
81 /* mask everything except rss type */
82 const __m128i rsstype_msk = _mm_set_epi16(
83 0x0000, 0x0000, 0x0000, 0x0000,
84 0x000F, 0x000F, 0x000F, 0x000F);
86 /* map rss type to rss hash flag */
87 const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
88 0, 0, 0, PKT_RX_RSS_HASH,
89 PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
90 PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
92 ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
93 ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
94 vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
95 vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
97 ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
98 ptype0 = _mm_and_si128(ptype0, rsstype_msk);
99 ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
101 vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
102 vtag1 = _mm_srli_epi16(vtag1, VP_SHIFT);
103 vtag1 = _mm_and_si128(vtag1, pkttype_msk);
105 vtag1 = _mm_or_si128(ptype0, vtag1);
106 vol.dword = _mm_cvtsi128_si64(vtag1);
108 rx_pkts[0]->ol_flags = vol.e[0];
109 rx_pkts[1]->ol_flags = vol.e[1];
110 rx_pkts[2]->ol_flags = vol.e[2];
111 rx_pkts[3]->ol_flags = vol.e[3];
115 fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
117 __m128i l3l4type0, l3l4type1, l3type, l4type;
123 /* L3 pkt type mask Bit4 to Bit6 */
124 const __m128i l3type_msk = _mm_set_epi16(
125 0x0000, 0x0000, 0x0000, 0x0000,
126 0x0070, 0x0070, 0x0070, 0x0070);
128 /* L4 pkt type mask Bit7 to Bit9 */
129 const __m128i l4type_msk = _mm_set_epi16(
130 0x0000, 0x0000, 0x0000, 0x0000,
131 0x0380, 0x0380, 0x0380, 0x0380);
133 /* convert RRC l3 type to mbuf format */
134 const __m128i l3type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
135 0, 0, 0, RTE_PTYPE_L3_IPV6_EXT,
136 RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV4_EXT,
137 RTE_PTYPE_L3_IPV4, 0);
139 /* Convert RRC l4 type to mbuf format l4type_flags shift-left 8 bits
140 * to fill into8 bits length.
142 const __m128i l4type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
143 RTE_PTYPE_TUNNEL_GENEVE >> 8,
144 RTE_PTYPE_TUNNEL_NVGRE >> 8,
145 RTE_PTYPE_TUNNEL_VXLAN >> 8,
146 RTE_PTYPE_TUNNEL_GRE >> 8,
147 RTE_PTYPE_L4_UDP >> 8,
148 RTE_PTYPE_L4_TCP >> 8,
151 l3l4type0 = _mm_unpacklo_epi16(descs[0], descs[1]);
152 l3l4type1 = _mm_unpacklo_epi16(descs[2], descs[3]);
153 l3l4type0 = _mm_unpacklo_epi32(l3l4type0, l3l4type1);
155 l3type = _mm_and_si128(l3l4type0, l3type_msk);
156 l4type = _mm_and_si128(l3l4type0, l4type_msk);
158 l3type = _mm_srli_epi16(l3type, L3TYPE_SHIFT);
159 l4type = _mm_srli_epi16(l4type, L4TYPE_SHIFT);
161 l3type = _mm_shuffle_epi8(l3type_flags, l3type);
162 /* l4type_flags shift-left for 8 bits, need shift-right back */
163 l4type = _mm_shuffle_epi8(l4type_flags, l4type);
165 l4type = _mm_slli_epi16(l4type, 8);
166 l3l4type0 = _mm_or_si128(l3type, l4type);
167 vol.dword = _mm_cvtsi128_si64(l3l4type0);
169 rx_pkts[0]->packet_type = vol.e[0];
170 rx_pkts[1]->packet_type = vol.e[1];
171 rx_pkts[2]->packet_type = vol.e[2];
172 rx_pkts[3]->packet_type = vol.e[3];
175 #define fm10k_desc_to_olflags_v(desc, rx_pkts) do {} while (0)
176 #define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0)
179 int __attribute__((cold))
180 fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
182 #ifndef RTE_LIBRTE_IEEE1588
183 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
184 struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
186 #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
187 /* whithout rx ol_flags, no VP flag report */
188 if (rxmode->hw_vlan_extend != 0)
192 /* no fdir support */
193 if (fconf->mode != RTE_FDIR_MODE_NONE)
196 /* - no csum error report support
197 * - no header split support
199 if (rxmode->hw_ip_checksum == 1 ||
200 rxmode->header_split == 1)
210 int __attribute__((cold))
211 fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)
214 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
217 /* data_off will be ajusted after new mbuf allocated for 512-byte
220 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
221 mb_def.port = rxq->port_id;
222 rte_mbuf_refcnt_set(&mb_def, 1);
224 /* prevent compiler reordering: rearm_data covers previous fields */
225 rte_compiler_barrier();
226 p = (uintptr_t)&mb_def.rearm_data;
227 rxq->mbuf_initializer = *(uint64_t *)p;
232 fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
236 volatile union fm10k_rx_desc *rxdp;
237 struct rte_mbuf **mb_alloc = &rxq->sw_ring[rxq->rxrearm_start];
238 struct rte_mbuf *mb0, *mb1;
239 __m128i head_off = _mm_set_epi64x(
240 RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1,
241 RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1);
242 __m128i dma_addr0, dma_addr1;
243 /* Rx buffer need to be aligned with 512 byte */
244 const __m128i hba_msk = _mm_set_epi64x(0,
245 UINT64_MAX - FM10K_RX_DATABUF_ALIGN + 1);
247 rxdp = rxq->hw_ring + rxq->rxrearm_start;
249 /* Pull 'n' more MBUFs into the software ring */
250 if (rte_mempool_get_bulk(rxq->mp,
252 RTE_FM10K_RXQ_REARM_THRESH) < 0) {
253 dma_addr0 = _mm_setzero_si128();
254 /* Clean up all the HW/SW ring content */
255 for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i++) {
256 mb_alloc[i] = &rxq->fake_mbuf;
257 _mm_store_si128((__m128i *)&rxdp[i].q,
261 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
262 RTE_FM10K_RXQ_REARM_THRESH;
266 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
267 for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i += 2, mb_alloc += 2) {
268 __m128i vaddr0, vaddr1;
274 /* Flush mbuf with pkt template.
275 * Data to be rearmed is 6 bytes long.
276 * Though, RX will overwrite ol_flags that are coming next
277 * anyway. So overwrite whole 8 bytes with one load:
278 * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
280 p0 = (uintptr_t)&mb0->rearm_data;
281 *(uint64_t *)p0 = rxq->mbuf_initializer;
282 p1 = (uintptr_t)&mb1->rearm_data;
283 *(uint64_t *)p1 = rxq->mbuf_initializer;
285 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
286 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
287 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
289 /* convert pa to dma_addr hdr/data */
290 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
291 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
293 /* add headroom to pa values */
294 dma_addr0 = _mm_add_epi64(dma_addr0, head_off);
295 dma_addr1 = _mm_add_epi64(dma_addr1, head_off);
297 /* Do 512 byte alignment to satisfy HW requirement, in the
298 * meanwhile, set Header Buffer Address to zero.
300 dma_addr0 = _mm_and_si128(dma_addr0, hba_msk);
301 dma_addr1 = _mm_and_si128(dma_addr1, hba_msk);
303 /* flush desc with pa dma_addr */
304 _mm_store_si128((__m128i *)&rxdp++->q, dma_addr0);
305 _mm_store_si128((__m128i *)&rxdp++->q, dma_addr1);
307 /* enforce 512B alignment on default Rx virtual addresses */
308 mb0->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb0->buf_addr
309 + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
310 - (char *)mb0->buf_addr);
311 mb1->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb1->buf_addr
312 + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
313 - (char *)mb1->buf_addr);
316 rxq->rxrearm_start += RTE_FM10K_RXQ_REARM_THRESH;
317 if (rxq->rxrearm_start >= rxq->nb_desc)
318 rxq->rxrearm_start = 0;
320 rxq->rxrearm_nb -= RTE_FM10K_RXQ_REARM_THRESH;
322 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
323 (rxq->nb_desc - 1) : (rxq->rxrearm_start - 1));
325 /* Update the tail pointer on the NIC */
326 FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id);
329 void __attribute__((cold))
330 fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq)
332 const unsigned mask = rxq->nb_desc - 1;
335 if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_desc)
338 /* free all mbufs that are valid in the ring */
339 for (i = rxq->next_dd; i != rxq->rxrearm_start; i = (i + 1) & mask)
340 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
341 rxq->rxrearm_nb = rxq->nb_desc;
343 /* set all entries to NULL */
344 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_desc);
347 static inline uint16_t
348 fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
349 uint16_t nb_pkts, uint8_t *split_packet)
351 volatile union fm10k_rx_desc *rxdp;
352 struct rte_mbuf **mbufp;
353 uint16_t nb_pkts_recd;
355 struct fm10k_rx_queue *rxq = rx_queue;
358 __m128i dd_check, eop_check;
361 next_dd = rxq->next_dd;
363 /* Just the act of getting into the function from the application is
364 * going to cost about 7 cycles
366 rxdp = rxq->hw_ring + next_dd;
368 _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
370 /* See if we need to rearm the RX queue - gives the prefetch a bit
373 if (rxq->rxrearm_nb > RTE_FM10K_RXQ_REARM_THRESH)
374 fm10k_rxq_rearm(rxq);
376 /* Before we start moving massive data around, check to see if
377 * there is actually a packet available
379 if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))
382 /* Vecotr RX will process 4 packets at a time, strip the unaligned
383 * tails in case it's not multiple of 4.
385 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);
387 /* 4 packets DD mask */
388 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
390 /* 4 packets EOP mask */
391 eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
393 /* mask to shuffle from desc. to mbuf */
394 shuf_msk = _mm_set_epi8(
395 7, 6, 5, 4, /* octet 4~7, 32bits rss */
396 15, 14, /* octet 14~15, low 16 bits vlan_macip */
397 13, 12, /* octet 12~13, 16 bits data_len */
398 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
399 13, 12, /* octet 12~13, low 16 bits pkt_len */
400 0xFF, 0xFF, /* skip high 16 bits pkt_type */
401 0xFF, 0xFF /* Skip pkt_type field in shuffle operation */
404 /* Cache is empty -> need to scan the buffer rings, but first move
405 * the next 'n' mbufs into the cache
407 mbufp = &rxq->sw_ring[next_dd];
409 /* A. load 4 packet in one loop
410 * [A*. mask out 4 unused dirty field in desc]
411 * B. copy 4 mbuf point from swring to rx_pkts
412 * C. calc the number of DD bits among the 4 packets
413 * [C*. extract the end-of-packet bit, if requested]
414 * D. fill info. from desc to mbuf
416 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
417 pos += RTE_FM10K_DESCS_PER_LOOP,
418 rxdp += RTE_FM10K_DESCS_PER_LOOP) {
419 __m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
420 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
421 __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
422 __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
424 /* B.1 load 1 mbuf point */
425 mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
427 /* Read desc statuses backwards to avoid race condition */
428 /* A.1 load 4 pkts desc */
429 descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
431 /* B.2 copy 2 mbuf point into rx_pkts */
432 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
434 /* B.1 load 1 mbuf point */
435 mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
437 descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
438 /* B.1 load 2 mbuf point */
439 descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
440 descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
442 /* B.2 copy 2 mbuf point into rx_pkts */
443 _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
445 /* avoid compiler reorder optimization */
446 rte_compiler_barrier();
449 rte_prefetch0(&rx_pkts[pos]->cacheline1);
450 rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
451 rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
452 rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
455 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
456 pkt_mb4 = _mm_shuffle_epi8(descs0[3], shuf_msk);
457 pkt_mb3 = _mm_shuffle_epi8(descs0[2], shuf_msk);
459 /* C.1 4=>2 filter staterr info only */
460 sterr_tmp2 = _mm_unpackhi_epi32(descs0[3], descs0[2]);
461 /* C.1 4=>2 filter staterr info only */
462 sterr_tmp1 = _mm_unpackhi_epi32(descs0[1], descs0[0]);
464 /* set ol_flags with vlan packet type */
465 fm10k_desc_to_olflags_v(descs0, &rx_pkts[pos]);
467 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
468 pkt_mb2 = _mm_shuffle_epi8(descs0[1], shuf_msk);
469 pkt_mb1 = _mm_shuffle_epi8(descs0[0], shuf_msk);
471 /* C.2 get 4 pkts staterr value */
472 zero = _mm_xor_si128(dd_check, dd_check);
473 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
475 /* D.3 copy final 3,4 data to rx_pkts */
476 _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
478 _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
481 /* C* extract and record EOP bit */
483 __m128i eop_shuf_mask = _mm_set_epi8(
484 0xFF, 0xFF, 0xFF, 0xFF,
485 0xFF, 0xFF, 0xFF, 0xFF,
486 0xFF, 0xFF, 0xFF, 0xFF,
487 0x04, 0x0C, 0x00, 0x08
490 /* and with mask to extract bits, flipping 1-0 */
491 __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
492 /* the staterr values are not in order, as the count
493 * count of dd bits doesn't care. However, for end of
494 * packet tracking, we do care, so shuffle. This also
495 * compresses the 32-bit values to 8-bit
497 eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
498 /* store the resulting 32-bit value */
499 *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
500 split_packet += RTE_FM10K_DESCS_PER_LOOP;
502 /* zero-out next pointers */
503 rx_pkts[pos]->next = NULL;
504 rx_pkts[pos + 1]->next = NULL;
505 rx_pkts[pos + 2]->next = NULL;
506 rx_pkts[pos + 3]->next = NULL;
509 /* C.3 calc available number of desc */
510 staterr = _mm_and_si128(staterr, dd_check);
511 staterr = _mm_packs_epi32(staterr, zero);
513 /* D.3 copy final 1,2 data to rx_pkts */
514 _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
516 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
519 fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);
521 /* C.4 calc avaialbe number of desc */
522 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
524 if (likely(var != RTE_FM10K_DESCS_PER_LOOP))
528 /* Update our internal tail pointer */
529 rxq->next_dd = (uint16_t)(rxq->next_dd + nb_pkts_recd);
530 rxq->next_dd = (uint16_t)(rxq->next_dd & (rxq->nb_desc - 1));
531 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
536 /* vPMD receive routine
539 * - don't support ol_flags for rss and csum err
542 fm10k_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
545 return fm10k_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
548 static inline uint16_t
549 fm10k_reassemble_packets(struct fm10k_rx_queue *rxq,
550 struct rte_mbuf **rx_bufs,
551 uint16_t nb_bufs, uint8_t *split_flags)
553 struct rte_mbuf *pkts[RTE_FM10K_MAX_RX_BURST]; /*finished pkts*/
554 struct rte_mbuf *start = rxq->pkt_first_seg;
555 struct rte_mbuf *end = rxq->pkt_last_seg;
556 unsigned pkt_idx, buf_idx;
558 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
560 /* processing a split packet */
561 end->next = rx_bufs[buf_idx];
563 start->pkt_len += rx_bufs[buf_idx]->data_len;
566 if (!split_flags[buf_idx]) {
567 /* it's the last packet of the set */
568 start->hash = end->hash;
569 start->ol_flags = end->ol_flags;
570 pkts[pkt_idx++] = start;
574 /* not processing a split packet */
575 if (!split_flags[buf_idx]) {
576 /* not a split packet, save and skip */
577 pkts[pkt_idx++] = rx_bufs[buf_idx];
580 end = start = rx_bufs[buf_idx];
584 /* save the partial packet for next time */
585 rxq->pkt_first_seg = start;
586 rxq->pkt_last_seg = end;
587 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
592 * vPMD receive routine that reassembles scattered packets
595 * - don't support ol_flags for rss and csum err
596 * - nb_pkts > RTE_FM10K_MAX_RX_BURST, only scan RTE_FM10K_MAX_RX_BURST
600 fm10k_recv_scattered_pkts_vec(void *rx_queue,
601 struct rte_mbuf **rx_pkts,
604 struct fm10k_rx_queue *rxq = rx_queue;
605 uint8_t split_flags[RTE_FM10K_MAX_RX_BURST] = {0};
608 /* Split_flags only can support max of RTE_FM10K_MAX_RX_BURST */
609 nb_pkts = RTE_MIN(nb_pkts, RTE_FM10K_MAX_RX_BURST);
610 /* get some new buffers */
611 uint16_t nb_bufs = fm10k_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
616 /* happy day case, full burst + no packets to be joined */
617 const uint64_t *split_fl64 = (uint64_t *)split_flags;
619 if (rxq->pkt_first_seg == NULL &&
620 split_fl64[0] == 0 && split_fl64[1] == 0 &&
621 split_fl64[2] == 0 && split_fl64[3] == 0)
624 /* reassemble any packets that need reassembly*/
625 if (rxq->pkt_first_seg == NULL) {
626 /* find the first split flag, and only reassemble then*/
627 while (i < nb_bufs && !split_flags[i])
632 return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
636 static const struct fm10k_txq_ops vec_txq_ops = {
637 .release_mbufs = fm10k_tx_queue_release_mbufs_vec,
638 .reset = fm10k_reset_tx_queue,
641 void __attribute__((cold))
642 fm10k_txq_vec_setup(struct fm10k_tx_queue *txq)
644 txq->ops = &vec_txq_ops;
648 vtx1(volatile struct fm10k_tx_desc *txdp,
649 struct rte_mbuf *pkt, uint64_t flags)
651 __m128i descriptor = _mm_set_epi64x(flags << 56 |
652 pkt->vlan_tci << 16 | pkt->data_len,
654 _mm_store_si128((__m128i *)txdp, descriptor);
658 vtx(volatile struct fm10k_tx_desc *txdp,
659 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
663 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
664 vtx1(txdp, *pkt, flags);
667 static inline int __attribute__((always_inline))
668 fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
670 struct rte_mbuf **txep;
675 struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
677 /* check DD bit on threshold descriptor */
678 flags = txq->hw_ring[txq->next_dd].flags;
679 if (!(flags & FM10K_TXD_FLAG_DONE))
684 /* First buffer to free from S/W ring is at index
685 * next_dd - (rs_thresh-1)
687 txep = &txq->sw_ring[txq->next_dd - (n - 1)];
688 m = __rte_pktmbuf_prefree_seg(txep[0]);
689 if (likely(m != NULL)) {
692 for (i = 1; i < n; i++) {
693 m = __rte_pktmbuf_prefree_seg(txep[i]);
694 if (likely(m != NULL)) {
695 if (likely(m->pool == free[0]->pool))
698 rte_mempool_put_bulk(free[0]->pool,
699 (void *)free, nb_free);
705 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
707 for (i = 1; i < n; i++) {
708 m = __rte_pktmbuf_prefree_seg(txep[i]);
710 rte_mempool_put(m->pool, m);
714 /* buffers were freed, update counters */
715 txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
716 txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
717 if (txq->next_dd >= txq->nb_desc)
718 txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
720 return txq->rs_thresh;
723 static inline void __attribute__((always_inline))
724 tx_backlog_entry(struct rte_mbuf **txep,
725 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
729 for (i = 0; i < (int)nb_pkts; ++i)
730 txep[i] = tx_pkts[i];
734 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
737 struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
738 volatile struct fm10k_tx_desc *txdp;
739 struct rte_mbuf **txep;
740 uint16_t n, nb_commit, tx_id;
741 uint64_t flags = FM10K_TXD_FLAG_LAST;
742 uint64_t rs = FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_LAST;
745 /* cross rx_thresh boundary is not allowed */
746 nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
748 if (txq->nb_free < txq->free_thresh)
749 fm10k_tx_free_bufs(txq);
751 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
752 if (unlikely(nb_pkts == 0))
755 tx_id = txq->next_free;
756 txdp = &txq->hw_ring[tx_id];
757 txep = &txq->sw_ring[tx_id];
759 txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
761 n = (uint16_t)(txq->nb_desc - tx_id);
762 if (nb_commit >= n) {
763 tx_backlog_entry(txep, tx_pkts, n);
765 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
766 vtx1(txdp, *tx_pkts, flags);
768 vtx1(txdp, *tx_pkts++, rs);
770 nb_commit = (uint16_t)(nb_commit - n);
773 txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
775 /* avoid reach the end of ring */
776 txdp = &(txq->hw_ring[tx_id]);
777 txep = &txq->sw_ring[tx_id];
780 tx_backlog_entry(txep, tx_pkts, nb_commit);
782 vtx(txdp, tx_pkts, nb_commit, flags);
784 tx_id = (uint16_t)(tx_id + nb_commit);
785 if (tx_id > txq->next_rs) {
786 txq->hw_ring[txq->next_rs].flags |= FM10K_TXD_FLAG_RS;
787 txq->next_rs = (uint16_t)(txq->next_rs + txq->rs_thresh);
790 txq->next_free = tx_id;
792 FM10K_PCI_REG_WRITE(txq->tail_ptr, txq->next_free);
797 static void __attribute__((cold))
798 fm10k_tx_queue_release_mbufs_vec(struct fm10k_tx_queue *txq)
801 const uint16_t max_desc = (uint16_t)(txq->nb_desc - 1);
803 if (txq->sw_ring == NULL || txq->nb_free == max_desc)
806 /* release the used mbufs in sw_ring */
807 for (i = txq->next_dd - (txq->rs_thresh - 1);
809 i = (i + 1) & max_desc)
810 rte_pktmbuf_free_seg(txq->sw_ring[i]);
812 txq->nb_free = max_desc;
815 for (i = 0; i < txq->nb_desc; i++)
816 txq->sw_ring[i] = NULL;
818 rte_free(txq->sw_ring);
822 static void __attribute__((cold))
823 fm10k_reset_tx_queue(struct fm10k_tx_queue *txq)
825 static const struct fm10k_tx_desc zeroed_desc = {0};
826 struct rte_mbuf **txe = txq->sw_ring;
829 /* Zero out HW ring memory */
830 for (i = 0; i < txq->nb_desc; i++)
831 txq->hw_ring[i] = zeroed_desc;
833 /* Initialize SW ring entries */
834 for (i = 0; i < txq->nb_desc; i++)
837 txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
838 txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
842 /* Always allow 1 descriptor to be un-allocated to avoid
843 * a H/W race condition
845 txq->nb_free = (uint16_t)(txq->nb_desc - 1);
846 FM10K_PCI_REG_WRITE(txq->tail_ptr, 0);