X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txq.c;h=6fe8673503ffbfa6825aa98955c246b10704dc21;hb=a4996bd89c42590f8886454a06a994f71acf89dd;hp=9139429be6fc4bb8be448b3ffed523c42d0f6c9c;hpb=b7059e6e436852c1d8be241970c042dd182ff63d;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 9139429be6..6fe8673503 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -48,7 +48,7 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) for (i = 0; (i != elts_n); ++i) (*txq_ctrl->txq.elts)[i] = NULL; DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs", - txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n); + PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -70,7 +70,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs", - txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx); + PORT_ID(txq_ctrl->priv), txq_ctrl->idx); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -119,35 +119,13 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) if (config->tso) offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO); + if (config->swp) + offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); } return offloads; } -/** - * Checks if the per-queue offload configuration is valid. - * - * @param dev - * Pointer to Ethernet device. - * @param offloads - * Per-queue offloads configuration. - * - * @return - * 1 if the configuration is valid, 0 otherwise. - */ -static int -mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) -{ - uint64_t port_offloads = dev->data->dev_conf.txmode.offloads; - uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev); - - /* There are no Tx offloads which are per queue. */ - if ((offloads & port_supp_offloads) != offloads) - return 0; - if ((port_offloads ^ offloads) & port_supp_offloads) - return 0; - return 1; -} - /** * DPDK callback to configure a TX queue. * @@ -174,22 +152,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - /* - * Don't verify port offloads for application which - * use the old API. - */ - if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && - !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) { - rte_errno = ENOTSUP; - DRV_LOG(ERR, - "port %u Tx queue offloads 0x%" PRIx64 " don't match" - " port offloads 0x%" PRIx64 " or supported offloads 0x%" - PRIx64, - dev->data->port_id, conf->offloads, - dev->data->dev_conf.txmode.offloads, - mlx5_get_tx_port_offloads(dev)); - return -rte_errno; - } if (desc <= MLX5_TX_COMP_THRESH) { DRV_LOG(WARNING, "port %u number of descriptors requested for Tx queue" @@ -252,9 +214,9 @@ mlx5_tx_queue_release(void *dpdk_txq) priv = txq_ctrl->priv; for (i = 0; (i != priv->txqs_n); ++i) if ((*priv->txqs)[i] == txq) { - mlx5_txq_release(priv->dev, i); + mlx5_txq_release(ETH_DEV(priv), i); DRV_LOG(DEBUG, "port %u removing Tx queue %u from list", - priv->dev->data->port_id, txq_ctrl->idx); + PORT_ID(priv), txq_ctrl->idx); break; } } @@ -615,7 +577,7 @@ mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) { assert(txq_ibv); DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", - txq_ibv->txq_ctrl->priv->dev->data->port_id, + PORT_ID(txq_ibv->txq_ctrl->priv), txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp)); @@ -682,11 +644,13 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) unsigned int txqs_inline; unsigned int inline_max_packet_sz; eth_tx_burst_t tx_pkt_burst = - mlx5_select_tx_function(txq_ctrl->priv->dev); + mlx5_select_tx_function(ETH_DEV(priv)); int is_empw_func = is_empw_burst_func(tx_pkt_burst); int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO)); + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO)); txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ? 0 : config->txq_inline; @@ -720,18 +684,6 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) inline_max_packet_sz) + (RTE_CACHE_LINE_SIZE - 1)) / RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE; - } else if (tso) { - int inline_diff = txq_ctrl->txq.max_inline - - max_tso_inline; - - /* - * Adjust inline value as Verbs aggregates - * tso_inline and txq_inline fields. - */ - txq_ctrl->max_inline_data = inline_diff > 0 ? - inline_diff * - RTE_CACHE_LINE_SIZE : - 0; } else { txq_ctrl->max_inline_data = txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE; @@ -754,8 +706,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) DRV_LOG(WARNING, "port %u txq inline is too large (%d) setting" " it to the maximum possible: %d\n", - priv->dev->data->port_id, txq_inline, - max_inline); + PORT_ID(priv), txq_inline, max_inline); txq_ctrl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE; } @@ -767,6 +718,10 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) txq_ctrl->txq.tso_en = 1; } txq_ctrl->txq.tunnel_en = config->tunnel_en; + txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) & + txq_ctrl->txq.offloads) && config->swp; } /** @@ -802,7 +757,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, return NULL; } assert(desc > MLX5_TX_COMP_THRESH); - tmpl->txq.offloads = conf->offloads; + tmpl->txq.offloads = conf->offloads | + dev->data->dev_conf.txmode.offloads; tmpl->priv = priv; tmpl->socket = socket; tmpl->txq.elts_n = log2above(desc);