1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
16 #include <infiniband/mlx5dv.h>
18 #pragma GCC diagnostic error "-Wpedantic"
22 #include <rte_mempool.h>
23 #include <rte_prefetch.h>
27 #include "mlx5_defs.h"
29 #include "mlx5_utils.h"
30 #include "mlx5_rxtx.h"
31 #include "mlx5_rxtx_vec.h"
32 #include "mlx5_autoconf.h"
34 #if defined RTE_ARCH_X86_64
35 #include "mlx5_rxtx_vec_sse.h"
36 #elif defined RTE_ARCH_ARM64
37 #include "mlx5_rxtx_vec_neon.h"
38 #elif defined RTE_ARCH_PPC_64
39 #include "mlx5_rxtx_vec_altivec.h"
41 #error "This should not be compiled if SIMD instructions are not supported."
48 * Pointer to RX queue structure.
50 * Array to store received packets.
52 * Maximum number of packets in array.
55 * Number of packets successfully received (<= pkts_n).
58 rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
63 #ifdef MLX5_PMD_SOFT_COUNTERS
64 uint32_t err_bytes = 0;
67 for (i = 0; i < pkts_n; ++i) {
68 struct rte_mbuf *pkt = pkts[i];
70 if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) {
71 #ifdef MLX5_PMD_SOFT_COUNTERS
72 err_bytes += PKT_LEN(pkt);
74 rte_pktmbuf_free_seg(pkt);
79 rxq->stats.idropped += (pkts_n - n);
80 #ifdef MLX5_PMD_SOFT_COUNTERS
81 /* Correct counters of errored completions. */
82 rxq->stats.ipackets -= (pkts_n - n);
83 rxq->stats.ibytes -= err_bytes;
85 mlx5_rx_err_handle(rxq, 1);
90 * DPDK callback for vectorized RX.
93 * Generic pointer to RX queue structure.
95 * Array to store received packets.
97 * Maximum number of packets in array.
100 * Number of packets successfully received (<= pkts_n).
103 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
105 struct mlx5_rxq_data *rxq = dpdk_rxq;
109 nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err);
110 if (unlikely(err | rxq->err_state))
111 nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
116 * Check a RX queue can support vectorized RX.
119 * Pointer to RX queue.
122 * 1 if supported, negative errno value if not.
125 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
127 struct mlx5_rxq_ctrl *ctrl =
128 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
130 if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv)))
132 if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
140 * Check a device can support vectorized RX.
143 * Pointer to Ethernet device.
146 * 1 if supported, negative errno value if not.
149 mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
151 struct mlx5_priv *priv = dev->data->dev_private;
154 if (!priv->config.rx_vec_en)
156 if (mlx5_mprq_enabled(dev))
158 /* All the configured queues should support. */
159 for (i = 0; i < priv->rxqs_n; ++i) {
160 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
164 if (mlx5_rxq_check_vec_support(rxq) < 0)
167 if (i != priv->rxqs_n)