From: Michael Baum Date: Thu, 3 Sep 2020 10:13:46 +0000 (+0000) Subject: net/mlx5: share Rx queue indirection table code X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=25ae7f1a5d9d127a46f8d62d1d689f77a78138fd;p=dpdk.git net/mlx5: share Rx queue indirection table code Move Rx indirection table object similar resources allocations from DevX and Verbs modules to a shared location. Signed-off-by: Michael Baum Acked-by: Matan Azrad --- diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c index 56570b7f6e..45691849c2 100644 --- a/drivers/net/mlx5/linux/mlx5_verbs.c +++ b/drivers/net/mlx5/linux/mlx5_verbs.c @@ -440,67 +440,49 @@ exit: } /** - * Create an indirection table. + * Creates a receive work queue as a filed of indirection table. * * @param dev * Pointer to Ethernet device. - * @param queues - * Queues entering in the indirection table. - * @param queues_n - * Number of queues in the array. + * @param log_n + * Log of number of queues in the array. + * @param ind_tbl + * Verbs indirection table object. * * @return - * The Verbs object initialized, NULL otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static struct mlx5_ind_table_obj * -mlx5_ibv_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, - uint32_t queues_n) +static int +mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, + struct mlx5_ind_table_obj *ind_tbl) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_obj *ind_tbl; - const unsigned int wq_n = rte_is_power_of_2(queues_n) ? - log2above(queues_n) : - log2above(priv->config.ind_table_max_size); - struct ibv_wq *wq[1 << wq_n]; - unsigned int i = 0, j = 0, k = 0; + struct ibv_wq *wq[1 << log_n]; + unsigned int i, j; - ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) + - queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY); - if (!ind_tbl) { - rte_errno = ENOMEM; - return NULL; - } - for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); - if (!rxq) - goto error; - wq[i] = rxq->obj->wq; - ind_tbl->queues[i] = queues[i]; + MLX5_ASSERT(ind_tbl); + for (i = 0; i != ind_tbl->queues_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + + wq[i] = rxq_ctrl->obj->wq; } - ind_tbl->queues_n = queues_n; + MLX5_ASSERT(i > 0); /* Finalise indirection table. */ - k = i; /* Retain value of i for use in error case. */ - for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j) - wq[k] = wq[j]; + for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i) + wq[i] = wq[j]; ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table(priv->sh->ctx, &(struct ibv_rwq_ind_table_init_attr){ - .log_ind_tbl_size = wq_n, + .log_ind_tbl_size = log_n, .ind_tbl = wq, .comp_mask = 0, }); if (!ind_tbl->ind_table) { rte_errno = errno; - goto error; + return -rte_errno; } - rte_atomic32_inc(&ind_tbl->refcnt); - LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - return ind_tbl; -error: - for (j = 0; j < i; j++) - mlx5_rxq_release(dev, ind_tbl->queues[j]); - mlx5_free(ind_tbl); - DEBUG("Port %u cannot create indirection table.", dev->data->port_id); - return NULL; + return 0; } /** @@ -510,7 +492,7 @@ error: * Indirection table to release. */ static void -mlx5_ibv_ind_table_obj_destroy(struct mlx5_ind_table_obj *ind_tbl) +mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) { claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table)); } @@ -554,8 +536,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, queues_n = hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) - ind_tbl = priv->obj_ops->ind_table_obj_new(dev, queues, - queues_n); + ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n); if (!ind_tbl) { rte_errno = ENOMEM; return 0; @@ -671,8 +652,8 @@ struct mlx5_obj_ops ibv_obj_ops = { .rxq_event_get = mlx5_rx_ibv_get_event, .rxq_obj_modify = mlx5_ibv_modify_wq, .rxq_obj_release = mlx5_rxq_ibv_obj_release, - .ind_table_obj_new = mlx5_ibv_ind_table_obj_new, - .ind_table_obj_destroy = mlx5_ibv_ind_table_obj_destroy, + .ind_table_new = mlx5_ibv_ind_table_new, + .ind_table_destroy = mlx5_ibv_ind_table_destroy, .hrxq_new = mlx5_ibv_hrxq_new, .hrxq_destroy = mlx5_ibv_qp_destroy, }; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 9594856674..12017e8fb5 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -742,10 +742,9 @@ struct mlx5_obj_ops { int (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj); int (*rxq_obj_modify)(struct mlx5_rxq_obj *rxq_obj, bool is_start); void (*rxq_obj_release)(struct mlx5_rxq_obj *rxq_obj); - struct mlx5_ind_table_obj *(*ind_table_obj_new)(struct rte_eth_dev *dev, - const uint16_t *queues, - uint32_t queues_n); - void (*ind_table_obj_destroy)(struct mlx5_ind_table_obj *ind_tbl); + int (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n, + struct mlx5_ind_table_obj *ind_tbl); + void (*ind_table_destroy)(struct mlx5_ind_table_obj *ind_tbl); uint32_t (*hrxq_new)(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c index 5fa41f18c0..ebc3929704 100644 --- a/drivers/net/mlx5/mlx5_devx.c +++ b/drivers/net/mlx5/mlx5_devx.c @@ -609,76 +609,57 @@ error: } /** - * Create an indirection table. + * Create RQT using DevX API as a filed of indirection table. * * @param dev * Pointer to Ethernet device. - * @param queues - * Queues entering in the indirection table. - * @param queues_n - * Number of queues in the array. + * @param log_n + * Log of number of queues in the array. + * @param ind_tbl + * DevX indirection table object. * * @return - * The DevX object initialized, NULL otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static struct mlx5_ind_table_obj * -mlx5_devx_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, - uint32_t queues_n) +static int +mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, + struct mlx5_ind_table_obj *ind_tbl) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_obj *ind_tbl; struct mlx5_devx_rqt_attr *rqt_attr = NULL; - const unsigned int rqt_n = 1 << (rte_is_power_of_2(queues_n) ? - log2above(queues_n) : - log2above(priv->config.ind_table_max_size)); - unsigned int i = 0, j = 0, k = 0; + const unsigned int rqt_n = 1 << log_n; + unsigned int i, j; - ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) + - queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY); - if (!ind_tbl) { - rte_errno = ENOMEM; - return NULL; - } + MLX5_ASSERT(ind_tbl); rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); if (!rqt_attr) { DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", dev->data->port_id); rte_errno = ENOMEM; - goto error; + return -rte_errno; } rqt_attr->rqt_max_size = priv->config.ind_table_max_size; rqt_attr->rqt_actual_size = rqt_n; - for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); - if (!rxq) { - mlx5_free(rqt_attr); - goto error; - } - rqt_attr->rq_list[i] = rxq->obj->rq->id; - ind_tbl->queues[i] = queues[i]; + for (i = 0; i != ind_tbl->queues_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + + rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id; } - k = i; /* Retain value of i for use in error case. */ - for (j = 0; k != rqt_n; ++k, ++j) - rqt_attr->rq_list[k] = rqt_attr->rq_list[j]; + MLX5_ASSERT(i > 0); + for (j = 0; i != rqt_n; ++j, ++i) + rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr); mlx5_free(rqt_attr); if (!ind_tbl->rqt) { DRV_LOG(ERR, "Port %u cannot create DevX RQT.", dev->data->port_id); rte_errno = errno; - goto error; + return -rte_errno; } - ind_tbl->queues_n = queues_n; - rte_atomic32_inc(&ind_tbl->refcnt); - LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - return ind_tbl; -error: - for (j = 0; j < i; j++) - mlx5_rxq_release(dev, ind_tbl->queues[j]); - mlx5_free(ind_tbl); - DEBUG("Port %u cannot create indirection table.", dev->data->port_id); - return NULL; + return 0; } /** @@ -688,7 +669,7 @@ error: * Indirection table to release. */ static void -mlx5_devx_ind_table_obj_destroy(struct mlx5_ind_table_obj *ind_tbl) +mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) { claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); } @@ -738,8 +719,7 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, queues_n = hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) - ind_tbl = priv->obj_ops->ind_table_obj_new(dev, queues, - queues_n); + ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n); if (!ind_tbl) { rte_errno = ENOMEM; return 0; @@ -852,8 +832,8 @@ struct mlx5_obj_ops devx_obj_ops = { .rxq_event_get = mlx5_rx_devx_get_event, .rxq_obj_modify = mlx5_devx_modify_rq, .rxq_obj_release = mlx5_rxq_devx_obj_release, - .ind_table_obj_new = mlx5_devx_ind_table_obj_new, - .ind_table_obj_destroy = mlx5_devx_ind_table_obj_destroy, + .ind_table_new = mlx5_devx_ind_table_new, + .ind_table_destroy = mlx5_devx_ind_table_destroy, .hrxq_new = mlx5_devx_hrxq_new, .hrxq_destroy = mlx5_devx_tir_destroy, }; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index d84dfe1102..c353139c13 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1762,7 +1762,7 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev, unsigned int i; if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) - priv->obj_ops->ind_table_obj_destroy(ind_tbl); + priv->obj_ops->ind_table_destroy(ind_tbl); for (i = 0; i != ind_tbl->queues_n; ++i) claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); if (!rte_atomic32_read(&ind_tbl->refcnt)) { @@ -1798,6 +1798,60 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev) return ret; } +/** + * Create an indirection table. + * + * @param dev + * Pointer to Ethernet device. + * @param queues + * Queues entering in the indirection table. + * @param queues_n + * Number of queues in the array. + * + * @return + * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set. + */ +struct mlx5_ind_table_obj * +mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, + uint32_t queues_n) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ind_table_obj *ind_tbl; + const unsigned int n = rte_is_power_of_2(queues_n) ? + log2above(queues_n) : + log2above(priv->config.ind_table_max_size); + unsigned int i, j; + int ret; + + ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) + + queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY); + if (!ind_tbl) { + rte_errno = ENOMEM; + return NULL; + } + ind_tbl->queues_n = queues_n; + for (i = 0; i != queues_n; ++i) { + struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); + if (!rxq) + goto error; + ind_tbl->queues[i] = queues[i]; + } + ret = priv->obj_ops->ind_table_new(dev, n, ind_tbl); + if (ret < 0) + goto error; + rte_atomic32_inc(&ind_tbl->refcnt); + LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); + return ind_tbl; +error: + ret = rte_errno; + for (j = 0; j < i; j++) + mlx5_rxq_release(dev, ind_tbl->queues[j]); + rte_errno = ret; + mlx5_free(ind_tbl); + DEBUG("Port %u cannot create indirection table.", dev->data->port_id); + return NULL; +} + /** * Get an Rx Hash queue. * diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 14a353584b..237344fd40 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -365,6 +365,9 @@ int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); int mlx5_rxq_verify(struct rte_eth_dev *dev); int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev); +struct mlx5_ind_table_obj *mlx5_ind_table_obj_new(struct rte_eth_dev *dev, + const uint16_t *queues, + uint32_t queues_n); struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n);