X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txq.c;h=cd13eb95eeb066ebd29a0970c495fdad258076a1;hb=a96102c8698aa61dcb72d392a6c844323b8286a5;hp=cd7b42a0b26bfa6a6706add618ef2d7017172706;hpb=89f170c0da1b9debbc962f6b68afdda09e4465bb;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index cd7b42a0b2..cd13eb95ee 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include @@ -123,6 +123,8 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO); } + if (!config->mprq.enabled) + offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; return offloads; } @@ -154,6 +156,7 @@ txq_sync_cq(struct mlx5_txq_data *txq) /* Resync CQE and WQE (WQ in reset state). */ rte_io_wmb(); *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci); + txq->cq_pi = txq->cq_ci; rte_io_wmb(); } @@ -388,7 +391,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, DRV_LOG(DEBUG, "port %u adding Tx queue %u to list", dev->data->port_id, idx); (*priv->txqs)[idx] = &txq_ctrl->txq; - dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; return 0; } @@ -634,18 +636,23 @@ txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl) void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_txq_data *txq; - struct mlx5_txq_ctrl *txq_ctrl; + struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *) + dev->process_private; + const size_t page_size = rte_mem_page_size(); + void *addr; unsigned int i; + if (page_size == (size_t)-1) { + DRV_LOG(ERR, "Failed to get mem page size"); + return; + } MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); - for (i = 0; i != priv->txqs_n; ++i) { - if (!(*priv->txqs)[i]) + for (i = 0; i != ppriv->uar_table_sz; ++i) { + if (!ppriv->uar_table[i]) continue; - txq = (*priv->txqs)[i]; - txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - txq_uar_uninit_secondary(txq_ctrl); + addr = ppriv->uar_table[i]; + rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size); + } } @@ -800,6 +807,10 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) bool vlan_inline; unsigned int temp; + txq_ctrl->txq.fast_free = + !!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) && + !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) && + !config->mprq.enabled); if (config->txqs_inline == MLX5_ARG_UNSET) txqs_inline = #if defined(RTE_ARCH_ARM64) @@ -1141,11 +1152,12 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rte_errno = ENOMEM; goto error; } - __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); tmpl->type = MLX5_TXQ_TYPE_STANDARD; LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; error: + mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh); mlx5_free(tmpl); return NULL; } @@ -1185,7 +1197,7 @@ mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->txq.idx = idx; tmpl->hairpin_conf = *hairpin_conf; tmpl->type = MLX5_TXQ_TYPE_HAIRPIN; - __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; } @@ -1210,7 +1222,7 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) if (txq_data) { ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); - __atomic_add_fetch(&ctrl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED); } return ctrl; } @@ -1249,8 +1261,8 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) txq_ctrl->txq.fcqs = NULL; } txq_free_elts(txq_ctrl); + dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED; } - dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED; if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) { if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);