net/mlx5: fix Tx queue stop state
authorMatan Azrad <matan@nvidia.com>
Tue, 3 Nov 2020 06:48:32 +0000 (06:48 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 13 Nov 2020 15:26:54 +0000 (16:26 +0100)
The Tx queue stop API doesn't call the PMD callback when the state of
the queue is stopped.
The drivers should update the state to be stopped when the queue stop
callback is done successfully or when the port is stopped.
The drivers should update the state to be started when the queue start
callback is done successfully or when the port is started.

The driver wrongly didn't update the state as started when the port
start callback was done which kept the state as stopped.
Following call to a queue stop API was not completed by ethdev layer
because the state is already stopped.

Move the state update from the Tx queue setup to the port start
callback.

Fixes: 161d103b231c ("net/mlx5: add queue start and stop")
Cc: stable@dpdk.org
Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
drivers/net/mlx5/linux/mlx5_verbs.c
drivers/net/mlx5/mlx5_devx.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_trigger.c
drivers/net/mlx5/mlx5_txq.c

index 494ddba..540ce32 100644 (file)
@@ -1038,6 +1038,7 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
                goto error;
        }
        txq_uar_init(txq_ctrl);
+       dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
        priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
        return 0;
 error:
index 924ee51..e9ceda5 100644 (file)
@@ -1435,6 +1435,7 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
        txq_ctrl->uar_mmap_offset =
                                mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
        txq_uar_init(txq_ctrl);
+       dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
        return 0;
 error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
index a61a2da..d95a573 100644 (file)
@@ -1784,8 +1784,10 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
                mlx5_free(rxq_ctrl->obj);
                rxq_ctrl->obj = NULL;
        }
-       if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+       if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
                rxq_free_elts(rxq_ctrl);
+               dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       }
        if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
                if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
                        mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
index f5fab0f..46e4191 100644 (file)
@@ -77,6 +77,7 @@ mlx5_txq_start(struct rte_eth_dev *dev)
                }
                if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
                        size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
+
                        txq_data->fcqs = mlx5_malloc(flags, size,
                                                     RTE_CACHE_LINE_SIZE,
                                                     txq_ctrl->socket);
index fcbc84b..faf4e45 100644 (file)
@@ -388,7 +388,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
                dev->data->port_id, idx);
        (*priv->txqs)[idx] = &txq_ctrl->txq;
-       dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
        return 0;
 }
 
@@ -1249,8 +1248,8 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
                        txq_ctrl->txq.fcqs = NULL;
                }
                txq_free_elts(txq_ctrl);
+               dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
        }
-       dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
        if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
                if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
                        mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);