+ tmpl->type = type;
+ tmpl->rxq_ctrl = rxq_ctrl;
+ if (rxq_ctrl->irq) {
+ tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
+ if (!tmpl->channel) {
+ DRV_LOG(ERR, "port %u: comp channel creation failure",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ }
+ if (mlx5_rxq_mprq_enabled(rxq_data))
+ cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+ else
+ cqe_n = wqe_n - 1;
+ tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
+ if (!tmpl->cq) {
+ DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ obj.cq.in = tmpl->cq;
+ obj.cq.out = &cq_info;
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
+ if (ret) {
+ rte_errno = ret;
+ goto error;
+ }
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
+ DRV_LOG(ERR,
+ "port %u wrong MLX5_CQE_SIZE environment variable"
+ " value: it should be set to %u",
+ dev->data->port_id, RTE_CACHE_LINE_SIZE);
+ rte_errno = EINVAL;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
+ dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
+ /* Allocate door-bell for types created with DevX. */
+ if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
+ struct mlx5_devx_dbr_page *dbr_page;
+ int64_t dbr_offset;
+
+ dbr_offset = mlx5_get_dbr(dev, &dbr_page);
+ if (dbr_offset < 0)
+ goto error;
+ rxq_ctrl->dbr_offset = dbr_offset;
+ rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
+ rxq_ctrl->dbr_umem_id_valid = 1;
+ rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
+ (uintptr_t)rxq_ctrl->dbr_offset);
+ }
+ if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
+ tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
+ tmpl);
+ if (!tmpl->wq) {
+ DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ /* Change queue state to ready. */
+ mod = (struct ibv_wq_attr){
+ .attr_mask = IBV_WQ_ATTR_STATE,
+ .wq_state = IBV_WQS_RDY,
+ };
+ ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
+ if (ret) {
+ DRV_LOG(ERR,
+ "port %u Rx queue %u WQ state to IBV_WQS_RDY"
+ " failed", dev->data->port_id, idx);
+ rte_errno = ret;
+ goto error;
+ }
+ obj.rwq.in = tmpl->wq;
+ obj.rwq.out = &rwq;
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
+ if (ret) {
+ rte_errno = ret;
+ goto error;
+ }
+ rxq_data->wqes = rwq.buf;
+ rxq_data->rq_db = rwq.dbrec;
+ } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
+ struct mlx5_devx_modify_rq_attr rq_attr;
+
+ memset(&rq_attr, 0, sizeof(rq_attr));
+ tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
+ if (!tmpl->rq) {
+ DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ /* Change queue state to ready. */
+ rq_attr.rq_state = MLX5_RQC_STATE_RST;
+ rq_attr.state = MLX5_RQC_STATE_RDY;
+ ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
+ if (ret)
+ goto error;
+ }
+ /* Fill the rings. */
+ rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
+ rxq_data->cq_db = cq_info.dbrec;
+ rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
+ rxq_data->cq_uar = cq_info.cq_uar;
+ rxq_data->cqn = cq_info.cqn;
+ rxq_data->cq_arm_sn = 0;
+ mlx5_rxq_initialize(rxq_data);
+ rxq_data->cq_ci = 0;
+ DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
+ idx, (void *)&tmpl);
+ rte_atomic32_inc(&tmpl->refcnt);
+ LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ return tmpl;
+error:
+ if (tmpl) {
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
+ claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
+ else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
+ claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
+ if (tmpl->cq)
+ claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
+ if (tmpl->channel)