1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox.
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
27 #include "mlx5_utils.h"
28 #include "mlx5_rxtx.h"
29 #include "mlx5_rxtx_vec.h"
30 #include "mlx5_autoconf.h"
31 #include "mlx5_defs.h"
34 #if defined RTE_ARCH_X86_64
35 #include "mlx5_rxtx_vec_sse.h"
36 #elif defined RTE_ARCH_ARM64
37 #include "mlx5_rxtx_vec_neon.h"
39 #error "This should not be compiled if SIMD instructions are not supported."
43 * Count the number of packets having same ol_flags and calculate cs_flags.
46 * Pointer to TX queue structure.
48 * Pointer to array of packets.
52 * Pointer of flags to be returned.
55 * Number of packets having same ol_flags.
57 static inline unsigned int
58 txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
59 uint16_t pkts_n, uint8_t *cs_flags)
62 const uint64_t ol_mask =
63 PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
64 PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE |
65 PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM;
69 /* Count the number of packets having same ol_flags. */
70 for (pos = 1; pos < pkts_n; ++pos)
71 if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
73 *cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]);
78 * DPDK callback for vectorized TX.
81 * Generic pointer to TX queue structure.
83 * Packets to transmit.
85 * Number of packets in array.
88 * Number of packets successfully transmitted (<= pkts_n).
91 mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
94 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
97 while (pkts_n > nb_tx) {
101 n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
102 ret = txq_burst_v(txq, &pkts[nb_tx], n, 0);
111 * DPDK callback for vectorized TX with multi-seg packets and offload.
114 * Generic pointer to TX queue structure.
116 * Packets to transmit.
118 * Number of packets in array.
121 * Number of packets successfully transmitted (<= pkts_n).
124 mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
126 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
129 while (pkts_n > nb_tx) {
130 uint8_t cs_flags = 0;
134 /* Transmit multi-seg packets in the head of pkts list. */
135 if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
136 NB_SEGS(pkts[nb_tx]) > 1)
137 nb_tx += txq_scatter_v(txq,
140 n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
141 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
142 n = txq_count_contig_single_seg(&pkts[nb_tx], n);
143 if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
144 n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
145 ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
154 * Skip error packets.
157 * Pointer to RX queue structure.
159 * Array to store received packets.
161 * Maximum number of packets in array.
164 * Number of packets successfully received (<= pkts_n).
167 rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
172 #ifdef MLX5_PMD_SOFT_COUNTERS
173 uint32_t err_bytes = 0;
176 for (i = 0; i < pkts_n; ++i) {
177 struct rte_mbuf *pkt = pkts[i];
179 if (pkt->packet_type == RTE_PTYPE_ALL_MASK) {
180 #ifdef MLX5_PMD_SOFT_COUNTERS
181 err_bytes += PKT_LEN(pkt);
183 rte_pktmbuf_free_seg(pkt);
188 rxq->stats.idropped += (pkts_n - n);
189 #ifdef MLX5_PMD_SOFT_COUNTERS
190 /* Correct counters of errored completions. */
191 rxq->stats.ipackets -= (pkts_n - n);
192 rxq->stats.ibytes -= err_bytes;
198 * DPDK callback for vectorized RX.
201 * Generic pointer to RX queue structure.
203 * Array to store received packets.
205 * Maximum number of packets in array.
208 * Number of packets successfully received (<= pkts_n).
211 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
213 struct mlx5_rxq_data *rxq = dpdk_rxq;
217 nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err);
219 nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
224 * Check Tx queue flags are set for raw vectorized Tx.
227 * Pointer to private structure.
229 * Pointer to rte_eth_dev structure.
232 * 1 if supported, negative errno value if not.
234 int __attribute__((cold))
235 priv_check_raw_vec_tx_support(__rte_unused struct priv *priv,
236 struct rte_eth_dev *dev)
238 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
240 /* Doesn't support any offload. */
247 * Check a device can support vectorized TX.
250 * Pointer to private structure.
252 * Pointer to rte_eth_dev structure.
255 * 1 if supported, negative errno value if not.
257 int __attribute__((cold))
258 priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
260 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
262 if (!priv->config.tx_vec_en ||
263 priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
264 priv->config.mps != MLX5_MPW_ENHANCED ||
265 offloads & ~MLX5_VEC_TX_OFFLOAD_CAP)
271 * Check a RX queue can support vectorized RX.
274 * Pointer to RX queue.
277 * 1 if supported, negative errno value if not.
279 int __attribute__((cold))
280 rxq_check_vec_support(struct mlx5_rxq_data *rxq)
282 struct mlx5_rxq_ctrl *ctrl =
283 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
285 if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
291 * Check a device can support vectorized RX.
294 * Pointer to private structure.
297 * 1 if supported, negative errno value if not.
299 int __attribute__((cold))
300 priv_check_vec_rx_support(struct priv *priv)
304 if (!priv->config.rx_vec_en)
306 /* All the configured queues should support. */
307 for (i = 0; i < priv->rxqs_n; ++i) {
308 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
312 if (rxq_check_vec_support(rxq) < 0)
315 if (i != priv->rxqs_n)