4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
42 #pragma GCC diagnostic ignored "-Wpedantic"
44 #include <infiniband/verbs.h>
45 #include <infiniband/mlx5dv.h>
47 #pragma GCC diagnostic error "-Wpedantic"
51 #include <rte_mempool.h>
52 #include <rte_prefetch.h>
55 #include "mlx5_utils.h"
56 #include "mlx5_rxtx.h"
57 #include "mlx5_rxtx_vec.h"
58 #include "mlx5_autoconf.h"
59 #include "mlx5_defs.h"
62 #ifdef RTE_ARCH_X86_64
63 #include "mlx5_rxtx_vec_sse.h"
65 #error "This should not be compiled if SIMD instructions are not supported."
69 * Count the number of continuous single segment packets.
72 * Pointer to array of packets.
77 * Number of continuous single segment packets.
79 static inline unsigned int
80 txq_check_multiseg(struct rte_mbuf **pkts, uint16_t pkts_n)
86 /* Count the number of continuous single segment packets. */
87 for (pos = 0; pos < pkts_n; ++pos)
88 if (NB_SEGS(pkts[pos]) > 1)
94 * Count the number of packets having same ol_flags and calculate cs_flags.
97 * Pointer to TX queue structure.
99 * Pointer to array of packets.
103 * Pointer of flags to be returned.
106 * Number of packets having same ol_flags.
108 static inline unsigned int
109 txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
110 uint16_t pkts_n, uint8_t *cs_flags)
113 const uint64_t ol_mask =
114 PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
115 PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE |
116 PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM;
120 /* Count the number of packets having same ol_flags. */
121 for (pos = 1; pos < pkts_n; ++pos)
122 if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
124 /* Should open another MPW session for the rest. */
125 if (pkts[0]->ol_flags &
126 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
127 const uint64_t is_tunneled =
130 PKT_TX_TUNNEL_VXLAN);
132 if (is_tunneled && txq->tunnel_en) {
133 *cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
134 MLX5_ETH_WQE_L4_INNER_CSUM;
135 if (pkts[0]->ol_flags & PKT_TX_OUTER_IP_CKSUM)
136 *cs_flags |= MLX5_ETH_WQE_L3_CSUM;
138 *cs_flags = MLX5_ETH_WQE_L3_CSUM |
139 MLX5_ETH_WQE_L4_CSUM;
146 * DPDK callback for vectorized TX.
149 * Generic pointer to TX queue structure.
151 * Packets to transmit.
153 * Number of packets in array.
156 * Number of packets successfully transmitted (<= pkts_n).
159 mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
162 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
165 while (pkts_n > nb_tx) {
169 n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
170 ret = txq_burst_v(txq, &pkts[nb_tx], n, 0);
179 * DPDK callback for vectorized TX with multi-seg packets and offload.
182 * Generic pointer to TX queue structure.
184 * Packets to transmit.
186 * Number of packets in array.
189 * Number of packets successfully transmitted (<= pkts_n).
192 mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
194 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
197 while (pkts_n > nb_tx) {
198 uint8_t cs_flags = 0;
202 /* Transmit multi-seg packets in the head of pkts list. */
203 if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
204 NB_SEGS(pkts[nb_tx]) > 1)
205 nb_tx += txq_scatter_v(txq,
208 n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
209 if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
210 n = txq_check_multiseg(&pkts[nb_tx], n);
211 if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
212 n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
213 ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
222 * Skip error packets.
225 * Pointer to RX queue structure.
227 * Array to store received packets.
229 * Maximum number of packets in array.
232 * Number of packets successfully received (<= pkts_n).
235 rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
240 #ifdef MLX5_PMD_SOFT_COUNTERS
241 uint32_t err_bytes = 0;
244 for (i = 0; i < pkts_n; ++i) {
245 struct rte_mbuf *pkt = pkts[i];
247 if (pkt->packet_type == RTE_PTYPE_ALL_MASK) {
248 #ifdef MLX5_PMD_SOFT_COUNTERS
249 err_bytes += PKT_LEN(pkt);
251 rte_pktmbuf_free_seg(pkt);
256 rxq->stats.idropped += (pkts_n - n);
257 #ifdef MLX5_PMD_SOFT_COUNTERS
258 /* Correct counters of errored completions. */
259 rxq->stats.ipackets -= (pkts_n - n);
260 rxq->stats.ibytes -= err_bytes;
262 rxq->pending_err = 0;
267 * DPDK callback for vectorized RX.
270 * Generic pointer to RX queue structure.
272 * Array to store received packets.
274 * Maximum number of packets in array.
277 * Number of packets successfully received (<= pkts_n).
280 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
282 struct mlx5_rxq_data *rxq = dpdk_rxq;
285 nb_rx = rxq_burst_v(rxq, pkts, pkts_n);
286 if (unlikely(rxq->pending_err))
287 nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
292 * Check Tx queue flags are set for raw vectorized Tx.
295 * Pointer to private structure.
298 * 1 if supported, negative errno value if not.
300 int __attribute__((cold))
301 priv_check_raw_vec_tx_support(struct priv *priv)
305 /* All the configured queues should support. */
306 for (i = 0; i < priv->txqs_n; ++i) {
307 struct mlx5_txq_data *txq = (*priv->txqs)[i];
309 if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
310 !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
313 if (i != priv->txqs_n)
319 * Check a device can support vectorized TX.
322 * Pointer to private structure.
325 * 1 if supported, negative errno value if not.
327 int __attribute__((cold))
328 priv_check_vec_tx_support(struct priv *priv)
330 if (!priv->tx_vec_en ||
331 priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
332 priv->mps != MLX5_MPW_ENHANCED ||
339 * Check a RX queue can support vectorized RX.
342 * Pointer to RX queue.
345 * 1 if supported, negative errno value if not.
347 int __attribute__((cold))
348 rxq_check_vec_support(struct mlx5_rxq_data *rxq)
350 struct mlx5_rxq_ctrl *ctrl =
351 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
353 if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
359 * Check a device can support vectorized RX.
362 * Pointer to private structure.
365 * 1 if supported, negative errno value if not.
367 int __attribute__((cold))
368 priv_check_vec_rx_support(struct priv *priv)
372 if (!priv->rx_vec_en)
374 /* All the configured queues should support. */
375 for (i = 0; i < priv->rxqs_n; ++i) {
376 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
380 if (rxq_check_vec_support(rxq) < 0)
383 if (i != priv->rxqs_n)