X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fregex%2Fmlx5%2Fmlx5_regex_control.c;h=545bbbcf89adbbbff521145cc1e225e7b2761174;hb=fe46b20c96593ff9644097978b347286c6a4b71a;hp=ec57e24e90fac470b841d8fa2280dcd092b480d2;hpb=5f41b66d12cda394c919777ae9b80257e97b5018;p=dpdk.git diff --git a/drivers/regex/mlx5/mlx5_regex_control.c b/drivers/regex/mlx5/mlx5_regex_control.c index ec57e24e90..545bbbcf89 100644 --- a/drivers/regex/mlx5/mlx5_regex_control.c +++ b/drivers/regex/mlx5/mlx5_regex_control.c @@ -6,16 +6,19 @@ #include #include +#include #include #include #include #include +#include #include #include #include #include #include +#include #include "mlx5_regex.h" #include "mlx5_regex_utils.h" @@ -24,6 +27,9 @@ #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64) +#define MLX5_REGEX_WQE_LOG_NUM(has_umr, log_desc) \ + ((has_umr) ? ((log_desc) + 2) : (log_desc)) + /** * Returns the number of qp obj to be created. * @@ -43,8 +49,6 @@ regex_ctrl_get_nb_obj(uint16_t nb_desc) /** * destroy CQ. * - * @param priv - * Pointer to the priv object. * @param cp * Pointer to the CQ to be destroyed. * @@ -52,24 +56,10 @@ regex_ctrl_get_nb_obj(uint16_t nb_desc) * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -regex_ctrl_destroy_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq) +regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq) { - if (cq->cqe_umem) { - mlx5_glue->devx_umem_dereg(cq->cqe_umem); - cq->cqe_umem = NULL; - } - if (cq->cqe) { - rte_free((void *)(uintptr_t)cq->cqe); - cq->cqe = NULL; - } - if (cq->dbr_offset) { - mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset); - cq->dbr_offset = -1; - } - if (cq->obj) { - mlx5_devx_cmd_destroy(cq->obj); - cq->obj = NULL; - } + mlx5_devx_cq_destroy(&cq->cq_obj); + memset(cq, 0, sizeof(*cq)); return 0; } @@ -88,85 +78,42 @@ static int regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq) { struct mlx5_devx_cq_attr attr = { - .q_umem_valid = 1, - .db_umem_valid = 1, - .eqn = priv->eqn, + .uar_page_id = priv->uar->page_id, }; - struct mlx5_devx_dbr_page *dbr_page = NULL; - void *buf = NULL; - size_t pgsize = sysconf(_SC_PAGESIZE); - uint32_t cq_size = 1 << cq->log_nb_desc; - uint32_t i; - - cq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page); - if (cq->dbr_offset < 0) { - DRV_LOG(ERR, "Can't allocate cq door bell record."); - rte_errno = ENOMEM; - goto error; - } - cq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem); - cq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs + - (uintptr_t)cq->dbr_offset); + int ret; - buf = rte_calloc(NULL, 1, sizeof(struct mlx5_cqe) * cq_size, 4096); - if (!buf) { - DRV_LOG(ERR, "Can't allocate cqe buffer."); - rte_errno = ENOMEM; - goto error; - } - cq->cqe = buf; - for (i = 0; i < cq_size; i++) - cq->cqe[i].op_own = 0xff; - cq->cqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf, - sizeof(struct mlx5_cqe) * - cq_size, 7); - if (!cq->cqe_umem) { - DRV_LOG(ERR, "Can't register cqe mem."); - rte_errno = ENOMEM; - goto error; - } - attr.db_umem_offset = cq->dbr_offset; - attr.db_umem_id = cq->dbr_umem; - attr.q_umem_id = mlx5_os_get_umem_id(cq->cqe_umem); - attr.log_cq_size = cq->log_nb_desc; - attr.uar_page_id = priv->uar->page_id; - attr.log_page_size = rte_log2_u32(pgsize); - cq->obj = mlx5_devx_cmd_create_cq(priv->ctx, &attr); - if (!cq->obj) { - DRV_LOG(ERR, "Can't create cq object."); - rte_errno = ENOMEM; - goto error; + cq->ci = 0; + ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, cq->log_nb_desc, + &attr, SOCKET_ID_ANY); + if (ret) { + DRV_LOG(ERR, "Can't create CQ object."); + memset(cq, 0, sizeof(*cq)); + rte_errno = ENOMEM; + return -rte_errno; } return 0; -error: - if (cq->cqe_umem) - mlx5_glue->devx_umem_dereg(cq->cqe_umem); - if (buf) - rte_free(buf); - if (cq->dbr_offset) - mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset); - return -rte_errno; } -#ifdef HAVE_IBV_FLOW_DV_SUPPORT +/** + * Destroy the SQ object. + * + * @param qp + * Pointer to the QP element + * @param q_ind + * The index of the queue. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ static int -regex_get_pdn(void *pd, uint32_t *pdn) +regex_ctrl_destroy_hw_qp(struct mlx5_regex_qp *qp, uint16_t q_ind) { - struct mlx5dv_obj obj; - struct mlx5dv_pd pd_info; - int ret = 0; + struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind]; - obj.pd.in = pd; - obj.pd.out = &pd_info; - ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); - if (ret) { - DRV_LOG(DEBUG, "Fail to get PD object info"); - return ret; - } - *pdn = pd_info.pdn; + mlx5_devx_qp_destroy(&qp_obj->qp_obj); + memset(qp, 0, sizeof(*qp)); return 0; } -#endif /** * create the SQ object. @@ -184,86 +131,45 @@ regex_get_pdn(void *pd, uint32_t *pdn) * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -regex_ctrl_create_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, +regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, uint16_t q_ind, uint16_t log_nb_desc) { #ifdef HAVE_IBV_FLOW_DV_SUPPORT - struct mlx5_devx_create_sq_attr attr = { 0 }; - struct mlx5_devx_modify_sq_attr modify_attr = { 0 }; - struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr; - struct mlx5_devx_dbr_page *dbr_page = NULL; - struct mlx5_regex_sq *sq = &qp->sqs[q_ind]; - void *buf = NULL; - uint32_t sq_size; - uint32_t pd_num = 0; + struct mlx5_devx_qp_attr attr = { + .cqn = qp->cq.cq_obj.cq->id, + .uar_index = priv->uar->page_id, + .pd = priv->cdev->pdn, + .ts_format = mlx5_ts_format_conv + (priv->cdev->config.hca_attr.qp_ts_format), + .user_index = q_ind, + }; + struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind]; int ret; - sq->log_nb_desc = log_nb_desc; - sq_size = 1 << sq->log_nb_desc; - sq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page); - if (sq->dbr_offset < 0) { - DRV_LOG(ERR, "Can't allocate sq door bell record."); - rte_errno = ENOMEM; - goto error; - } - sq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem); - sq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs + - (uintptr_t)sq->dbr_offset); - - buf = rte_calloc(NULL, 1, 64 * sq_size, 4096); - if (!buf) { - DRV_LOG(ERR, "Can't allocate wqe buffer."); - rte_errno = ENOMEM; - goto error; - } - sq->wqe = buf; - sq->wqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf, 64 * sq_size, - 7); - if (!sq->wqe_umem) { - DRV_LOG(ERR, "Can't register wqe mem."); - rte_errno = ENOMEM; - goto error; - } - attr.state = MLX5_SQC_STATE_RST; - attr.tis_lst_sz = 0; - attr.tis_num = 0; - attr.user_index = q_ind; - attr.cqn = qp->cq.obj->id; - wq_attr->uar_page = priv->uar->page_id; - regex_get_pdn(priv->pd, &pd_num); - wq_attr->pd = pd_num; - wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC; - wq_attr->dbr_umem_id = sq->dbr_umem; - wq_attr->dbr_addr = sq->dbr_offset; - wq_attr->dbr_umem_valid = 1; - wq_attr->wq_umem_id = mlx5_os_get_umem_id(sq->wqe_umem); - wq_attr->wq_umem_offset = 0; - wq_attr->wq_umem_valid = 1; - wq_attr->log_wq_stride = 6; - wq_attr->log_wq_sz = sq->log_nb_desc; - sq->obj = mlx5_devx_cmd_create_sq(priv->ctx, &attr); - if (!sq->obj) { - DRV_LOG(ERR, "Can't create sq object."); - rte_errno = ENOMEM; - goto error; + qp_obj->log_nb_desc = log_nb_desc; + qp_obj->qpn = q_ind; + qp_obj->ci = 0; + qp_obj->pi = 0; + attr.rq_size = 0; + attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, + log_nb_desc)); + attr.mmo = priv->mmo_regex_qp_cap; + ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp_obj->qp_obj, + MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_nb_desc), + &attr, SOCKET_ID_ANY); + if (ret) { + DRV_LOG(ERR, "Can't create QP object."); + rte_errno = ENOMEM; + return -rte_errno; } - modify_attr.state = MLX5_SQC_STATE_RDY; - ret = mlx5_devx_cmd_modify_sq(sq->obj, &modify_attr); + ret = mlx5_devx_qp2rts(&qp_obj->qp_obj, 0); if (ret) { - DRV_LOG(ERR, "Can't change sq state to ready."); - rte_errno = ENOMEM; - goto error; + DRV_LOG(ERR, "Can't change QP state to RTS."); + regex_ctrl_destroy_hw_qp(qp, q_ind); + rte_errno = ENOMEM; + return -rte_errno; } - return 0; -error: - if (sq->wqe_umem) - mlx5_glue->devx_umem_dereg(sq->wqe_umem); - if (buf) - rte_free(buf); - if (sq->dbr_offset) - mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset); - return -rte_errno; #else (void)priv; (void)qp; @@ -274,44 +180,6 @@ error: #endif } -/** - * Destroy the SQ object. - * - * @param priv - * Pointer to the priv object. - * @param qp - * Pointer to the QP element - * @param q_ind - * The index of the queue. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ -static int -regex_ctrl_destroy_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, - uint16_t q_ind) -{ - struct mlx5_regex_sq *sq = &qp->sqs[q_ind]; - - if (sq->wqe_umem) { - mlx5_glue->devx_umem_dereg(sq->wqe_umem); - sq->wqe_umem = NULL; - } - if (sq->wqe) { - rte_free((void *)(uintptr_t)sq->wqe); - sq->wqe = NULL; - } - if (sq->dbr_offset) { - mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset); - sq->dbr_offset = -1; - } - if (sq->obj) { - mlx5_devx_cmd_destroy(sq->obj); - sq->obj = NULL; - } - return 0; -} - /** * Setup the qp. * @@ -332,45 +200,71 @@ mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind, struct mlx5_regex_priv *priv = dev->data->dev_private; struct mlx5_regex_qp *qp; int i; + int nb_sq_config = 0; int ret; uint16_t log_desc; qp = &priv->qps[qp_ind]; qp->flags = cfg->qp_conf_flags; - qp->cq.log_nb_desc = rte_log2_u32(cfg->nb_desc); - qp->nb_desc = 1 << qp->cq.log_nb_desc; + log_desc = rte_log2_u32(cfg->nb_desc); + /* + * UMR mode requires two WQEs(UMR and RegEx WQE) for one descriptor. + * For CQ, expand the CQE number multiple with 2. + * For SQ, the UMR and RegEx WQE for one descriptor consumes 4 WQEBBS, + * expand the WQE number multiple with 4. + */ + qp->cq.log_nb_desc = log_desc + (!!priv->has_umr); + qp->nb_desc = 1 << log_desc; if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F) - qp->nb_obj = regex_ctrl_get_nb_obj(qp->nb_desc); + qp->nb_obj = regex_ctrl_get_nb_obj + (1 << MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_desc)); else qp->nb_obj = 1; - qp->sqs = rte_malloc(NULL, - qp->nb_obj * sizeof(struct mlx5_regex_sq), 64); - if (!qp->sqs) { - DRV_LOG(ERR, "Can't allocate sq array memory."); - rte_errno = ENOMEM; + qp->qps = rte_malloc(NULL, + qp->nb_obj * sizeof(struct mlx5_regex_hw_qp), 64); + if (!qp->qps) { + DRV_LOG(ERR, "Can't allocate qp array memory."); + rte_errno = ENOMEM; return -rte_errno; } log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj); ret = regex_ctrl_create_cq(priv, &qp->cq); if (ret) { DRV_LOG(ERR, "Can't create cq."); - goto error; + goto err_cq; } for (i = 0; i < qp->nb_obj; i++) { - ret = regex_ctrl_create_sq(priv, qp, i, log_desc); + ret = regex_ctrl_create_hw_qp(priv, qp, i, log_desc); if (ret) { - DRV_LOG(ERR, "Can't create sq."); - goto error; + DRV_LOG(ERR, "Can't create qp object."); + goto err_btree; } + nb_sq_config++; } - mlx5_regexdev_setup_fastpath(priv, qp_ind); - return 0; + /* Save pointer of global generation number to check memory event. */ + qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen; + ret = mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N, + rte_socket_id()); + if (ret) { + DRV_LOG(ERR, "Error setting up mr btree"); + goto err_btree; + } -error: - regex_ctrl_destroy_cq(priv, &qp->cq); - for (i = 0; i < qp->nb_obj; i++) - ret = regex_ctrl_destroy_sq(priv, qp, i); - return -rte_errno; + ret = mlx5_regexdev_setup_fastpath(priv, qp_ind); + if (ret) { + DRV_LOG(ERR, "Error setting up fastpath"); + goto err_fp; + } + return 0; +err_fp: + mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh); +err_btree: + for (i = 0; i < nb_sq_config; i++) + regex_ctrl_destroy_hw_qp(qp, i); + regex_ctrl_destroy_cq(&qp->cq); +err_cq: + rte_free(qp->qps); + return ret; }