1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
14 #include <mlx5_glue.h>
17 #include "mlx5_defs.h"
19 #include "mlx5_utils.h"
20 #include "mlx5_rxtx.h"
21 #include "mlx5_rxtx_vec.h"
22 #include "mlx5_autoconf.h"
24 #if defined RTE_ARCH_X86_64
25 #include "mlx5_rxtx_vec_sse.h"
26 #elif defined RTE_ARCH_ARM64
27 #include "mlx5_rxtx_vec_neon.h"
28 #elif defined RTE_ARCH_PPC_64
29 #include "mlx5_rxtx_vec_altivec.h"
31 #error "This should not be compiled if SIMD instructions are not supported."
38 * Pointer to RX queue structure.
40 * Array to store received packets.
42 * Maximum number of packets in array.
45 * Number of packets successfully received (<= pkts_n).
48 rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
53 #ifdef MLX5_PMD_SOFT_COUNTERS
54 uint32_t err_bytes = 0;
57 for (i = 0; i < pkts_n; ++i) {
58 struct rte_mbuf *pkt = pkts[i];
60 if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) {
61 #ifdef MLX5_PMD_SOFT_COUNTERS
62 err_bytes += PKT_LEN(pkt);
64 rte_pktmbuf_free_seg(pkt);
69 rxq->stats.idropped += (pkts_n - n);
70 #ifdef MLX5_PMD_SOFT_COUNTERS
71 /* Correct counters of errored completions. */
72 rxq->stats.ipackets -= (pkts_n - n);
73 rxq->stats.ibytes -= err_bytes;
75 mlx5_rx_err_handle(rxq, 1);
80 * DPDK callback for vectorized RX.
83 * Generic pointer to RX queue structure.
85 * Array to store received packets.
87 * Maximum number of packets in array.
90 * Number of packets successfully received (<= pkts_n).
93 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
95 struct mlx5_rxq_data *rxq = dpdk_rxq;
102 nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn, &err, &no_cq);
103 if (unlikely(err | rxq->err_state))
104 nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx);
108 } while (tn != pkts_n);
113 * Check a RX queue can support vectorized RX.
116 * Pointer to RX queue.
119 * 1 if supported, negative errno value if not.
122 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
124 struct mlx5_rxq_ctrl *ctrl =
125 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
127 if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv)))
129 if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
137 * Check a device can support vectorized RX.
140 * Pointer to Ethernet device.
143 * 1 if supported, negative errno value if not.
146 mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
148 struct mlx5_priv *priv = dev->data->dev_private;
151 if (!priv->config.rx_vec_en)
153 if (mlx5_mprq_enabled(dev))
155 /* All the configured queues should support. */
156 for (i = 0; i < priv->rxqs_n; ++i) {
157 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
161 if (mlx5_rxq_check_vec_support(rxq) < 0)
164 if (i != priv->rxqs_n)