X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txq.c;h=e9b837d10b60ce6c56cef27f4d6c5a5519e1255d;hb=2bc06869cd94195e986cfb7939a549d7050097e8;hp=9d0c00f6daa7591b2692bdd4bf163e92569de358;hpb=f5fde520510111ef3068dbb54946da07561a4998;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 9d0c00f6da..e9b837d10b 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -266,6 +266,7 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, struct ibv_exp_cq_attr cq_attr; } attr; enum ibv_exp_query_intf_status status; + unsigned int cqe_n; int ret = 0; if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { @@ -276,6 +277,8 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, (void)conf; /* Thresholds configuration (ignored). */ assert(desc > MLX5_TX_COMP_THRESH); tmpl.txq.elts_n = log2above(desc); + if (priv->mps == MLX5_MPW_ENHANCED) + tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg; /* MRs will be registered in mp2mr[] later. */ attr.rd = (struct ibv_exp_res_domain_init_attr){ .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | @@ -294,9 +297,12 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, .res_domain = tmpl.rd, }; + cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ? + ((desc / MLX5_TX_COMP_THRESH) - 1) : 1; + if (priv->mps == MLX5_MPW_ENHANCED) + cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV; tmpl.cq = ibv_exp_create_cq(priv->ctx, - (((desc / MLX5_TX_COMP_THRESH) - 1) ? - ((desc / MLX5_TX_COMP_THRESH) - 1) : 1), + cqe_n, NULL, NULL, 0, &attr.cq); if (tmpl.cq == NULL) { ret = ENOMEM; @@ -340,9 +346,24 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, tmpl.txq.max_inline = ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) / RTE_CACHE_LINE_SIZE); - attr.init.cap.max_inline_data = - tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE; tmpl.txq.inline_en = 1; + /* TSO and MPS can't be enabled concurrently. */ + assert(!priv->tso || !priv->mps); + if (priv->mps == MLX5_MPW_ENHANCED) { + tmpl.txq.inline_max_packet_sz = + priv->inline_max_packet_sz; + /* To minimize the size of data set, avoid requesting + * too large WQ. + */ + attr.init.cap.max_inline_data = + ((RTE_MIN(priv->txq_inline, + priv->inline_max_packet_sz) + + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE; + } else { + attr.init.cap.max_inline_data = + tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE; + } } if (priv->tso) { uint16_t max_tso_inline = ((MLX5_MAX_TSO_HEADER + @@ -511,6 +532,19 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } (*priv->txqs)[idx] = NULL; txq_cleanup(txq_ctrl); + /* Resize if txq size is changed. */ + if (txq_ctrl->txq.elts_n != log2above(desc)) { + txq_ctrl = rte_realloc(txq_ctrl, + sizeof(*txq_ctrl) + + desc * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE); + if (!txq_ctrl) { + ERROR("%p: unable to reallocate queue index %u", + (void *)dev, idx); + priv_unlock(priv); + return -ENOMEM; + } + } } else { txq_ctrl = rte_calloc_socket("TXQ", 1,