X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx_vec.c;h=7fae2010f98073341ca14df158a861e2d21cc68a;hb=0bf68c660e932e76087dc8c87f8b1dacba89c2be;hp=8b132396de75a5a40e5da5065e14ffbbca81cf64;hpb=5feecc57d90b97c579b16d1083ea167f7564530b;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index 8b132396de..7fae2010f9 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -3,7 +3,6 @@ * Copyright 2017 Mellanox Technologies, Ltd */ -#include #include #include #include @@ -23,133 +22,25 @@ #include #include +#include + +#include "mlx5_defs.h" #include "mlx5.h" #include "mlx5_utils.h" #include "mlx5_rxtx.h" #include "mlx5_rxtx_vec.h" #include "mlx5_autoconf.h" -#include "mlx5_defs.h" -#include "mlx5_prm.h" #if defined RTE_ARCH_X86_64 #include "mlx5_rxtx_vec_sse.h" #elif defined RTE_ARCH_ARM64 #include "mlx5_rxtx_vec_neon.h" +#elif defined RTE_ARCH_PPC_64 +#include "mlx5_rxtx_vec_altivec.h" #else #error "This should not be compiled if SIMD instructions are not supported." #endif -/** - * Count the number of packets having same ol_flags and calculate cs_flags. - * - * @param txq - * Pointer to TX queue structure. - * @param pkts - * Pointer to array of packets. - * @param pkts_n - * Number of packets. - * @param cs_flags - * Pointer of flags to be returned. - * - * @return - * Number of packets having same ol_flags. - */ -static inline unsigned int -txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, - uint16_t pkts_n, uint8_t *cs_flags) -{ - unsigned int pos; - const uint64_t ol_mask = - PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | - PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM; - - if (!pkts_n) - return 0; - /* Count the number of packets having same ol_flags. */ - for (pos = 1; pos < pkts_n; ++pos) - if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask) - break; - *cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]); - return pos; -} - -/** - * DPDK callback for vectorized TX. - * - * @param dpdk_txq - * Generic pointer to TX queue structure. - * @param[in] pkts - * Packets to transmit. - * @param pkts_n - * Number of packets in array. - * - * @return - * Number of packets successfully transmitted (<= pkts_n). - */ -uint16_t -mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, - uint16_t pkts_n) -{ - struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; - uint16_t nb_tx = 0; - - while (pkts_n > nb_tx) { - uint16_t n; - uint16_t ret; - - n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST); - ret = txq_burst_v(txq, &pkts[nb_tx], n, 0); - nb_tx += ret; - if (!ret) - break; - } - return nb_tx; -} - -/** - * DPDK callback for vectorized TX with multi-seg packets and offload. - * - * @param dpdk_txq - * Generic pointer to TX queue structure. - * @param[in] pkts - * Packets to transmit. - * @param pkts_n - * Number of packets in array. - * - * @return - * Number of packets successfully transmitted (<= pkts_n). - */ -uint16_t -mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) -{ - struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; - uint16_t nb_tx = 0; - - while (pkts_n > nb_tx) { - uint8_t cs_flags = 0; - uint16_t n; - uint16_t ret; - - /* Transmit multi-seg packets in the head of pkts list. */ - if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) && - NB_SEGS(pkts[nb_tx]) > 1) - nb_tx += txq_scatter_v(txq, - &pkts[nb_tx], - pkts_n - nb_tx); - n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST); - if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) - n = txq_count_contig_single_seg(&pkts[nb_tx], n); - if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP) - n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags); - ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags); - nb_tx += ret; - if (!ret) - break; - } - return nb_tx; -} - /** * Skip error packets. * @@ -176,7 +67,7 @@ rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, for (i = 0; i < pkts_n; ++i) { struct rte_mbuf *pkt = pkts[i]; - if (pkt->packet_type == RTE_PTYPE_ALL_MASK) { + if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) { #ifdef MLX5_PMD_SOFT_COUNTERS err_bytes += PKT_LEN(pkt); #endif @@ -191,6 +82,7 @@ rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, rxq->stats.ipackets -= (pkts_n - n); rxq->stats.ibytes -= err_bytes; #endif + mlx5_rx_err_handle(rxq, 1); return n; } @@ -211,56 +103,20 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct mlx5_rxq_data *rxq = dpdk_rxq; - uint16_t nb_rx; + uint16_t nb_rx = 0; + uint16_t tn = 0; uint64_t err = 0; - - nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err); - if (unlikely(err)) - nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx); - return nb_rx; -} - -/** - * Check Tx queue flags are set for raw vectorized Tx. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 1 if supported, negative errno value if not. - */ -int __attribute__((cold)) -mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev) -{ - uint64_t offloads = dev->data->dev_conf.txmode.offloads; - - /* Doesn't support any offload. */ - if (offloads) - return -ENOTSUP; - return 1; -} - -/** - * Check a device can support vectorized TX. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 1 if supported, negative errno value if not. - */ -int __attribute__((cold)) -mlx5_check_vec_tx_support(struct rte_eth_dev *dev) -{ - struct priv *priv = dev->data->dev_private; - uint64_t offloads = dev->data->dev_conf.txmode.offloads; - - if (!priv->config.tx_vec_en || - priv->txqs_n > MLX5_VPMD_MIN_TXQS || - priv->config.mps != MLX5_MPW_ENHANCED || - offloads & ~MLX5_VEC_TX_OFFLOAD_CAP) - return -ENOTSUP; - return 1; + bool no_cq = false; + + do { + nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn, &err, &no_cq); + if (unlikely(err | rxq->err_state)) + nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx); + tn += nb_rx; + if (unlikely(no_cq)) + break; + } while (tn != pkts_n); + return tn; } /** @@ -272,14 +128,18 @@ mlx5_check_vec_tx_support(struct rte_eth_dev *dev) * @return * 1 if supported, negative errno value if not. */ -int __attribute__((cold)) +int __rte_cold mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq) { struct mlx5_rxq_ctrl *ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); + if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv))) + return -ENOTSUP; if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0) return -ENOTSUP; + if (rxq->lro) + return -ENOTSUP; return 1; } @@ -292,14 +152,16 @@ mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq) * @return * 1 if supported, negative errno value if not. */ -int __attribute__((cold)) +int __rte_cold mlx5_check_vec_rx_support(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - uint16_t i; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t i; if (!priv->config.rx_vec_en) return -ENOTSUP; + if (mlx5_mprq_enabled(dev)) + return -ENOTSUP; /* All the configured queues should support. */ for (i = 0; i < priv->rxqs_n; ++i) { struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];