X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_devx.c;h=84a5c55ee07573c55a3eefc9f25a7121b3d3d28d;hb=d0b3ef1a6e3804c76f0d35b16946fc25d54443ab;hp=e9ceda5caf952ee1bd57a71a5af1b31f3c797195;hpb=876b5d52a3952cde9cd84d5e2e5188886cc1bfd5;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c index e9ceda5caf..84a5c55ee0 100644 --- a/drivers/net/mlx5/mlx5_devx.c +++ b/drivers/net/mlx5/mlx5_devx.c @@ -154,14 +154,14 @@ mlx5_rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) { struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page; + if (rxq_ctrl->wq_umem) { + mlx5_os_umem_dereg(rxq_ctrl->wq_umem); + rxq_ctrl->wq_umem = NULL; + } if (rxq_ctrl->rxq.wqes) { mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes); rxq_ctrl->rxq.wqes = NULL; } - if (rxq_ctrl->wq_umem) { - mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem); - rxq_ctrl->wq_umem = NULL; - } if (dbr_page) { claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, mlx5_os_get_umem_id(dbr_page->umem), @@ -181,14 +181,14 @@ mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) { struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page; + if (rxq_ctrl->cq_umem) { + mlx5_os_umem_dereg(rxq_ctrl->cq_umem); + rxq_ctrl->cq_umem = NULL; + } if (rxq_ctrl->rxq.cqes) { rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes); rxq_ctrl->rxq.cqes = NULL; } - if (rxq_ctrl->cq_umem) { - mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem); - rxq_ctrl->cq_umem = NULL; - } if (dbr_page) { claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, mlx5_os_get_umem_id(dbr_page->umem), @@ -216,7 +216,7 @@ mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq)); if (rxq_obj->devx_channel) - mlx5_glue->devx_destroy_event_channel + mlx5_os_devx_destroy_event_channel (rxq_obj->devx_channel); mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl); mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl); @@ -294,7 +294,7 @@ static void mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl, struct mlx5_devx_wq_attr *wq_attr) { - wq_attr->end_padding_mode = priv->config.cqe_pad ? + wq_attr->end_padding_mode = priv->config.hw_padding ? MLX5_WQ_END_PAD_MODE_ALIGN : MLX5_WQ_END_PAD_MODE_NONE; wq_attr->pd = priv->sh->pdn; @@ -375,7 +375,7 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx) if (!buf) return NULL; rxq_data->wqes = buf; - rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, + rxq_ctrl->wq_umem = mlx5_os_umem_reg(priv->sh->ctx, buf, wq_size, 0); if (!rxq_ctrl->wq_umem) goto error; @@ -497,7 +497,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) goto error; } rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf; - rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf, + rxq_ctrl->cq_umem = mlx5_os_umem_reg(priv->sh->ctx, buf, cq_size, IBV_ACCESS_LOCAL_WRITE); if (!rxq_ctrl->cq_umem) { @@ -533,7 +533,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) rxq_data->cqe_n = log_cqe_n; rxq_data->cqn = cq_obj->id; if (rxq_ctrl->obj->devx_channel) { - ret = mlx5_glue->devx_subscribe_devx_event + ret = mlx5_os_devx_subscribe_devx_event (rxq_ctrl->obj->devx_channel, cq_obj->obj, sizeof(event_nums), @@ -644,7 +644,7 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) int devx_ev_flag = MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; - tmpl->devx_channel = mlx5_glue->devx_create_event_channel + tmpl->devx_channel = mlx5_os_devx_create_event_channel (priv->sh->ctx, devx_ev_flag); if (!tmpl->devx_channel) { @@ -686,7 +686,7 @@ error: if (tmpl->devx_cq) claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq)); if (tmpl->devx_channel) - mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel); + mlx5_os_devx_destroy_event_channel(tmpl->devx_channel); mlx5_rxq_release_devx_rq_resources(rxq_ctrl); mlx5_rxq_release_devx_cq_resources(rxq_ctrl); rte_errno = ret; /* Restore rte_errno. */ @@ -694,7 +694,7 @@ error: } /** - * Create RQT using DevX API as a filed of indirection table. + * Prepare RQT attribute structure for DevX RQT API. * * @param dev * Pointer to Ethernet device. @@ -704,30 +704,31 @@ error: * DevX indirection table object. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * The RQT attr object initialized, NULL otherwise and rte_errno is set. */ -static int -mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, - struct mlx5_ind_table_obj *ind_tbl) +static struct mlx5_devx_rqt_attr * +mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev, + const unsigned int log_n, + const uint16_t *queues, + const uint32_t queues_n) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_devx_rqt_attr *rqt_attr = NULL; const unsigned int rqt_n = 1 << log_n; unsigned int i, j; - MLX5_ASSERT(ind_tbl); rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); if (!rqt_attr) { DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", dev->data->port_id); rte_errno = ENOMEM; - return -rte_errno; + return NULL; } rqt_attr->rqt_max_size = priv->config.ind_table_max_size; rqt_attr->rqt_actual_size = rqt_n; - for (i = 0; i != ind_tbl->queues_n; ++i) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]]; + for (i = 0; i != queues_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); @@ -736,6 +737,35 @@ mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, MLX5_ASSERT(i > 0); for (j = 0; i != rqt_n; ++j, ++i) rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; + return rqt_attr; +} + +/** + * Create RQT using DevX API as a filed of indirection table. + * + * @param dev + * Pointer to Ethernet device. + * @param log_n + * Log of number of queues in the array. + * @param ind_tbl + * DevX indirection table object. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, + struct mlx5_ind_table_obj *ind_tbl) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_devx_rqt_attr *rqt_attr = NULL; + + MLX5_ASSERT(ind_tbl); + rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, + ind_tbl->queues, + ind_tbl->queues_n); + if (!rqt_attr) + return -rte_errno; ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr); mlx5_free(rqt_attr); if (!ind_tbl->rqt) { @@ -747,6 +777,41 @@ mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, return 0; } +/** + * Modify RQT using DevX API as a filed of indirection table. + * + * @param dev + * Pointer to Ethernet device. + * @param log_n + * Log of number of queues in the array. + * @param ind_tbl + * DevX indirection table object. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n, + const uint16_t *queues, const uint32_t queues_n, + struct mlx5_ind_table_obj *ind_tbl) +{ + int ret = 0; + struct mlx5_devx_rqt_attr *rqt_attr = NULL; + + MLX5_ASSERT(ind_tbl); + rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, + queues, + queues_n); + if (!rqt_attr) + return -rte_errno; + ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr); + mlx5_free(rqt_attr); + if (ret) + DRV_LOG(ERR, "Port %u cannot modify DevX RQT.", + dev->data->port_id); + return ret; +} + /** * Destroy the DevX RQT object. * @@ -1062,7 +1127,7 @@ mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj) txq_obj->sq_devx = NULL; } if (txq_obj->sq_umem) { - claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem)); + claim_zero(mlx5_os_umem_dereg(txq_obj->sq_umem)); txq_obj->sq_umem = NULL; } if (txq_obj->sq_buf) { @@ -1090,7 +1155,7 @@ mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj) if (txq_obj->cq_devx) claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx)); if (txq_obj->cq_umem) - claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem)); + claim_zero(mlx5_os_umem_dereg(txq_obj->cq_umem)); if (txq_obj->cq_buf) mlx5_free(txq_obj->cq_buf); if (txq_obj->cq_dbrec_page) @@ -1109,8 +1174,8 @@ mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj) static void mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj) { - mlx5_txq_release_devx_cq_resources(txq_obj); mlx5_txq_release_devx_sq_resources(txq_obj); + mlx5_txq_release_devx_cq_resources(txq_obj); } /** @@ -1178,7 +1243,7 @@ mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) return 0; } /* Register allocated buffer in user space with DevX. */ - txq_obj->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, + txq_obj->cq_umem = mlx5_os_umem_reg(priv->sh->ctx, (void *)txq_obj->cq_buf, cqe_n * sizeof(struct mlx5_cqe), IBV_ACCESS_LOCAL_WRITE); @@ -1277,7 +1342,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx) goto error; } /* Register allocated buffer in user space with DevX. */ - txq_obj->sq_umem = mlx5_glue->devx_umem_reg + txq_obj->sq_umem = mlx5_os_umem_reg (priv->sh->ctx, (void *)txq_obj->sq_buf, wqe_n * sizeof(struct mlx5_wqe), @@ -1472,6 +1537,7 @@ struct mlx5_obj_ops devx_obj_ops = { .rxq_obj_modify = mlx5_devx_modify_rq, .rxq_obj_release = mlx5_rxq_devx_obj_release, .ind_table_new = mlx5_devx_ind_table_new, + .ind_table_modify = mlx5_devx_ind_table_modify, .ind_table_destroy = mlx5_devx_ind_table_destroy, .hrxq_new = mlx5_devx_hrxq_new, .hrxq_destroy = mlx5_devx_tir_destroy,