1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
16 #include <infiniband/mlx5dv.h>
18 #pragma GCC diagnostic error "-Wpedantic"
22 #include <rte_mempool.h>
23 #include <rte_prefetch.h>
27 #include "mlx5_defs.h"
29 #include "mlx5_utils.h"
30 #include "mlx5_rxtx.h"
31 #include "mlx5_rxtx_vec.h"
32 #include "mlx5_autoconf.h"
34 #if defined RTE_ARCH_X86_64
35 #include "mlx5_rxtx_vec_sse.h"
36 #elif defined RTE_ARCH_ARM64
37 #include "mlx5_rxtx_vec_neon.h"
38 #elif defined RTE_ARCH_PPC_64
39 #include "mlx5_rxtx_vec_altivec.h"
41 #error "This should not be compiled if SIMD instructions are not supported."
48 * Pointer to RX queue structure.
50 * Array to store received packets.
52 * Maximum number of packets in array.
55 * Number of packets successfully received (<= pkts_n).
58 rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
63 #ifdef MLX5_PMD_SOFT_COUNTERS
64 uint32_t err_bytes = 0;
67 for (i = 0; i < pkts_n; ++i) {
68 struct rte_mbuf *pkt = pkts[i];
70 if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) {
71 #ifdef MLX5_PMD_SOFT_COUNTERS
72 err_bytes += PKT_LEN(pkt);
74 rte_pktmbuf_free_seg(pkt);
79 rxq->stats.idropped += (pkts_n - n);
80 #ifdef MLX5_PMD_SOFT_COUNTERS
81 /* Correct counters of errored completions. */
82 rxq->stats.ipackets -= (pkts_n - n);
83 rxq->stats.ibytes -= err_bytes;
85 mlx5_rx_err_handle(rxq, 1);
90 * DPDK callback for vectorized RX.
93 * Generic pointer to RX queue structure.
95 * Array to store received packets.
97 * Maximum number of packets in array.
100 * Number of packets successfully received (<= pkts_n).
103 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
105 struct mlx5_rxq_data *rxq = dpdk_rxq;
112 nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn, &err, &no_cq);
113 if (unlikely(err | rxq->err_state))
114 nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx);
118 } while (tn != pkts_n);
123 * Check a RX queue can support vectorized RX.
126 * Pointer to RX queue.
129 * 1 if supported, negative errno value if not.
132 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
134 struct mlx5_rxq_ctrl *ctrl =
135 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
137 if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv)))
139 if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
147 * Check a device can support vectorized RX.
150 * Pointer to Ethernet device.
153 * 1 if supported, negative errno value if not.
156 mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
158 struct mlx5_priv *priv = dev->data->dev_private;
161 if (!priv->config.rx_vec_en)
163 if (mlx5_mprq_enabled(dev))
165 /* All the configured queues should support. */
166 for (i = 0; i < priv->rxqs_n; ++i) {
167 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
171 if (mlx5_rxq_check_vec_support(rxq) < 0)
174 if (i != priv->rxqs_n)