1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #pragma GCC diagnostic ignored "-Wpedantic"
16 #include <infiniband/verbs.h>
17 #include <infiniband/mlx5dv.h>
19 #pragma GCC diagnostic error "-Wpedantic"
23 #include <rte_mempool.h>
24 #include <rte_prefetch.h>
27 #include "mlx5_utils.h"
28 #include "mlx5_rxtx.h"
29 #include "mlx5_rxtx_vec.h"
30 #include "mlx5_autoconf.h"
31 #include "mlx5_defs.h"
34 #if defined RTE_ARCH_X86_64
35 #include "mlx5_rxtx_vec_sse.h"
36 #elif defined RTE_ARCH_ARM64
37 #include "mlx5_rxtx_vec_neon.h"
39 #error "This should not be compiled if SIMD instructions are not supported."
43 * Count the number of packets having same ol_flags and same metadata (if
44 * PKT_TX_METADATA is set in ol_flags), and calculate cs_flags.
47 * Pointer to array of packets.
51 * Pointer of flags to be returned.
53 * Pointer of metadata to be returned.
55 * Offloads enabled on Tx queue
58 * Number of packets having same ol_flags and metadata, if relevant.
60 static inline unsigned int
61 txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags,
62 rte_be32_t *metadata, const uint64_t txq_offloads)
65 const uint64_t cksum_ol_mask =
66 PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
67 PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE |
68 PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM;
69 rte_be32_t p0_metadata, pn_metadata;
73 p0_metadata = pkts[0]->ol_flags & PKT_TX_METADATA ?
74 pkts[0]->tx_metadata : 0;
75 /* Count the number of packets having same offload parameters. */
76 for (pos = 1; pos < pkts_n; ++pos) {
77 /* Check if packet has same checksum flags. */
78 if ((txq_offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP) &&
79 ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & cksum_ol_mask))
81 /* Check if packet has same metadata. */
82 if (txq_offloads & DEV_TX_OFFLOAD_MATCH_METADATA) {
83 pn_metadata = pkts[pos]->ol_flags & PKT_TX_METADATA ?
84 pkts[pos]->tx_metadata : 0;
85 if (pn_metadata != p0_metadata)
89 *cs_flags = txq_ol_cksum_to_cs(pkts[0]);
90 *metadata = p0_metadata;
95 * DPDK callback for vectorized TX.
98 * Generic pointer to TX queue structure.
100 * Packets to transmit.
102 * Number of packets in array.
105 * Number of packets successfully transmitted (<= pkts_n).
108 mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
111 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
114 while (pkts_n > nb_tx) {
118 n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
119 ret = txq_burst_v(txq, &pkts[nb_tx], n, 0, 0);
128 * DPDK callback for vectorized TX with multi-seg packets and offload.
131 * Generic pointer to TX queue structure.
133 * Packets to transmit.
135 * Number of packets in array.
138 * Number of packets successfully transmitted (<= pkts_n).
141 mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
143 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
146 while (pkts_n > nb_tx) {
147 uint8_t cs_flags = 0;
150 rte_be32_t metadata = 0;
152 /* Transmit multi-seg packets in the head of pkts list. */
153 if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
154 NB_SEGS(pkts[nb_tx]) > 1)
155 nb_tx += txq_scatter_v(txq,
158 n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
159 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
160 n = txq_count_contig_single_seg(&pkts[nb_tx], n);
161 if (txq->offloads & (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP |
162 DEV_TX_OFFLOAD_MATCH_METADATA))
163 n = txq_calc_offload(&pkts[nb_tx], n,
164 &cs_flags, &metadata,
166 ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags, metadata);
175 * Skip error packets.
178 * Pointer to RX queue structure.
180 * Array to store received packets.
182 * Maximum number of packets in array.
185 * Number of packets successfully received (<= pkts_n).
188 rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
193 #ifdef MLX5_PMD_SOFT_COUNTERS
194 uint32_t err_bytes = 0;
197 for (i = 0; i < pkts_n; ++i) {
198 struct rte_mbuf *pkt = pkts[i];
200 if (pkt->packet_type == RTE_PTYPE_ALL_MASK) {
201 #ifdef MLX5_PMD_SOFT_COUNTERS
202 err_bytes += PKT_LEN(pkt);
204 rte_pktmbuf_free_seg(pkt);
209 rxq->stats.idropped += (pkts_n - n);
210 #ifdef MLX5_PMD_SOFT_COUNTERS
211 /* Correct counters of errored completions. */
212 rxq->stats.ipackets -= (pkts_n - n);
213 rxq->stats.ibytes -= err_bytes;
219 * DPDK callback for vectorized RX.
222 * Generic pointer to RX queue structure.
224 * Array to store received packets.
226 * Maximum number of packets in array.
229 * Number of packets successfully received (<= pkts_n).
232 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
234 struct mlx5_rxq_data *rxq = dpdk_rxq;
238 nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err);
240 nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
245 * Check Tx queue flags are set for raw vectorized Tx.
248 * Pointer to Ethernet device.
251 * 1 if supported, negative errno value if not.
253 int __attribute__((cold))
254 mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev)
256 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
258 /* Doesn't support any offload. */
265 * Check a device can support vectorized TX.
268 * Pointer to Ethernet device.
271 * 1 if supported, negative errno value if not.
273 int __attribute__((cold))
274 mlx5_check_vec_tx_support(struct rte_eth_dev *dev)
276 struct mlx5_priv *priv = dev->data->dev_private;
277 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
279 if (!priv->config.tx_vec_en ||
280 priv->txqs_n > (unsigned int)priv->config.txqs_vec ||
281 priv->config.mps != MLX5_MPW_ENHANCED ||
282 offloads & ~MLX5_VEC_TX_OFFLOAD_CAP)
288 * Check a RX queue can support vectorized RX.
291 * Pointer to RX queue.
294 * 1 if supported, negative errno value if not.
296 int __attribute__((cold))
297 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
299 struct mlx5_rxq_ctrl *ctrl =
300 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
302 if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv)))
304 if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
310 * Check a device can support vectorized RX.
313 * Pointer to Ethernet device.
316 * 1 if supported, negative errno value if not.
318 int __attribute__((cold))
319 mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
321 struct mlx5_priv *priv = dev->data->dev_private;
324 if (!priv->config.rx_vec_en)
326 if (mlx5_mprq_enabled(dev))
328 /* All the configured queues should support. */
329 for (i = 0; i < priv->rxqs_n; ++i) {
330 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
334 if (mlx5_rxq_check_vec_support(rxq) < 0)
337 if (i != priv->rxqs_n)