X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Flinux%2Fmlx5_verbs.c;h=9d299542614aafb371538042f7391402739ba130;hb=09c2555303be8fa21be6b2261ca8ec262a21e201;hp=0476d948465856b9e4e00504a722600ff2d341b9;hpb=e8390b3de048f6da3b6da027be51f0c8ccc7d4cf;p=dpdk.git diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c index 0476d94846..9d29954261 100644 --- a/drivers/net/mlx5/linux/mlx5_verbs.c +++ b/drivers/net/mlx5/linux/mlx5_verbs.c @@ -4,7 +4,6 @@ #include #include -#include #include #include #include @@ -15,69 +14,28 @@ #include #include -#include +#include #include #include #include #include -#include #include +#include +#include #include #include -/** - * Register mr. Given protection domain pointer, pointer to addr and length - * register the memory region. - * - * @param[in] pd - * Pointer to protection domain context. - * @param[in] addr - * Pointer to memory start address. - * @param[in] length - * Length of the memory to register. - * @param[out] pmd_mr - * pmd_mr struct set with lkey, address, length and pointer to mr object - * - * @return - * 0 on successful registration, -1 otherwise - */ -static int -mlx5_reg_mr(void *pd, void *addr, size_t length, - struct mlx5_pmd_mr *pmd_mr) -{ - return mlx5_common_verbs_reg_mr(pd, addr, length, pmd_mr); -} - -/** - * Deregister mr. Given the mlx5 pmd MR - deregister the MR - * - * @param[in] pmd_mr - * pmd_mr struct set with lkey, address, length and pointer to mr object - * - */ -static void -mlx5_dereg_mr(struct mlx5_pmd_mr *pmd_mr) -{ - mlx5_common_verbs_dereg_mr(pmd_mr); -} - -/* verbs operations. */ -const struct mlx5_verbs_ops mlx5_verbs_ops = { - .reg_mr = mlx5_reg_mr, - .dereg_mr = mlx5_dereg_mr, -}; - /** * Modify Rx WQ vlan stripping offload * - * @param rxq_obj - * Rx queue object. + * @param rxq + * Rx queue. * * @return 0 on success, non-0 otherwise */ static int -mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) +mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_priv *rxq, int on) { uint16_t vlan_offloads = (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) | @@ -89,47 +47,110 @@ mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) .flags = vlan_offloads, }; - return mlx5_glue->modify_wq(rxq_obj->wq, &mod); + return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod); } /** * Modifies the attributes for the specified WQ. * - * @param rxq_obj - * Verbs Rx queue object. + * @param rxq + * Verbs Rx queue. + * @param type + * Type of change queue state. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_ibv_modify_wq(struct mlx5_rxq_obj *rxq_obj, bool is_start) +mlx5_ibv_modify_wq(struct mlx5_rxq_priv *rxq, uint8_t type) { struct ibv_wq_attr mod = { .attr_mask = IBV_WQ_ATTR_STATE, - .wq_state = is_start ? IBV_WQS_RDY : IBV_WQS_RESET, + .wq_state = (enum ibv_wq_state)type, + }; + + return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod); +} + +/** + * Modify QP using Verbs API. + * + * @param txq_obj + * Verbs Tx queue object. + * @param type + * Type of change queue state. + * @param dev_port + * IB device port number. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, + uint8_t dev_port) +{ + struct ibv_qp_attr mod = { + .qp_state = IBV_QPS_RESET, + .port_num = dev_port, }; + int attr_mask = (IBV_QP_STATE | IBV_QP_PORT); + int ret; - return mlx5_glue->modify_wq(rxq_obj->wq, &mod); + if (type != MLX5_TXQ_MOD_RST2RDY) { + ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE); + if (ret) { + DRV_LOG(ERR, "Cannot change Tx QP state to RESET %s", + strerror(errno)); + rte_errno = errno; + return ret; + } + if (type == MLX5_TXQ_MOD_RDY2RST) + return 0; + } + if (type == MLX5_TXQ_MOD_ERR2RDY) + attr_mask = IBV_QP_STATE; + mod.qp_state = IBV_QPS_INIT; + ret = mlx5_glue->modify_qp(obj->qp, &mod, attr_mask); + if (ret) { + DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s", + strerror(errno)); + rte_errno = errno; + return ret; + } + mod.qp_state = IBV_QPS_RTR; + ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE); + if (ret) { + DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s", + strerror(errno)); + rte_errno = errno; + return ret; + } + mod.qp_state = IBV_QPS_RTS; + ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE); + if (ret) { + DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s", + strerror(errno)); + rte_errno = errno; + return ret; + } + return 0; } /** * Create a CQ Verbs object. * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array. + * @param rxq + * Pointer to Rx queue. * * @return * The Verbs CQ object initialized, NULL otherwise and rte_errno is set. */ static struct ibv_cq * -mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx) +mlx5_rxq_ibv_cq_create(struct mlx5_rxq_priv *rxq) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + struct mlx5_priv *priv = rxq->priv; + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; + struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq; struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj; unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); struct { @@ -148,13 +169,22 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx) if (priv->config.cqe_comp && !rxq_data->hw_timestamp) { cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; + rxq_data->byte_mask = UINT32_MAX; #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT - cq_attr.mlx5.cqe_comp_res_format = - mlx5_rxq_mprq_enabled(rxq_data) ? - MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX : - MLX5DV_CQE_RES_FORMAT_HASH; + if (mlx5_rxq_mprq_enabled(rxq_data)) { + cq_attr.mlx5.cqe_comp_res_format = + MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX; + rxq_data->mcqe_format = + MLX5_CQE_RESP_FORMAT_CSUM_STRIDX; + } else { + cq_attr.mlx5.cqe_comp_res_format = + MLX5DV_CQE_RES_FORMAT_HASH; + rxq_data->mcqe_format = + MLX5_CQE_RESP_FORMAT_HASH; + } #else cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; + rxq_data->mcqe_format = MLX5_CQE_RESP_FORMAT_HASH; #endif /* * For vectorized Rx, it must not be doubled in order to @@ -166,37 +196,35 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx) DRV_LOG(DEBUG, "Port %u Rx CQE compression is disabled for HW" " timestamp.", - dev->data->port_id); + priv->dev_data->port_id); } #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD - if (priv->config.cqe_pad) { + if (RTE_CACHE_LINE_SIZE == 128) { cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS; cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD; } #endif - return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx, - &cq_attr.ibv, - &cq_attr.mlx5)); + return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq + (priv->sh->cdev->ctx, + &cq_attr.ibv, + &cq_attr.mlx5)); } /** * Create a WQ Verbs object. * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array. + * @param rxq + * Pointer to Rx queue. * * @return * The Verbs WQ object initialized, NULL otherwise and rte_errno is set. */ static struct ibv_wq * -mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx) +mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv *rxq) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + struct mlx5_priv *priv = rxq->priv; + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; + struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq; struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj; unsigned int wqe_n = 1 << rxq_data->elts_n; struct { @@ -213,7 +241,7 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx) .max_wr = wqe_n >> rxq_data->sges_n, /* Max number of scatter/gather elements in a WR. */ .max_sge = 1 << rxq_data->sges_n, - .pd = priv->sh->pd, + .pd = priv->sh->cdev->pd, .cq = rxq_obj->ibv_cq, .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0, .create_flags = (rxq_data->vlan_strip ? @@ -248,10 +276,10 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx) .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT, }; } - rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv, + rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->cdev->ctx, &wq_attr.ibv, &wq_attr.mlx5); #else - rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv); + rxq_obj->wq = mlx5_glue->create_wq(priv->sh->cdev->ctx, &wq_attr.ibv); #endif if (rxq_obj->wq) { /* @@ -263,7 +291,7 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx) DRV_LOG(ERR, "Port %u Rx queue %u requested %u*%u but got" " %u*%u WRs*SGEs.", - dev->data->port_id, idx, + priv->dev_data->port_id, rxq->idx, wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n), wq_attr.ibv.max_wr, wq_attr.ibv.max_sge); @@ -278,21 +306,20 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx) /** * Create the Rx queue Verbs object. * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array. + * @param rxq + * Pointer to Rx queue. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) +mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv *rxq) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + uint16_t idx = rxq->idx; + struct mlx5_priv *priv = rxq->priv; + uint16_t port_id = priv->dev_data->port_id; + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; + struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq; struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; struct mlx5dv_cq cq_info; struct mlx5dv_rwq rwq; @@ -301,26 +328,23 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) MLX5_ASSERT(rxq_data); MLX5_ASSERT(tmpl); - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE; - priv->verbs_alloc_ctx.obj = rxq_ctrl; - tmpl->type = MLX5_RXQ_OBJ_TYPE_IBV; tmpl->rxq_ctrl = rxq_ctrl; if (rxq_ctrl->irq) { tmpl->ibv_channel = - mlx5_glue->create_comp_channel(priv->sh->ctx); + mlx5_glue->create_comp_channel(priv->sh->cdev->ctx); if (!tmpl->ibv_channel) { DRV_LOG(ERR, "Port %u: comp channel creation failure.", - dev->data->port_id); + port_id); rte_errno = ENOMEM; goto error; } tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd; } /* Create CQ using Verbs API. */ - tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(dev, idx); + tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(rxq); if (!tmpl->ibv_cq) { DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.", - dev->data->port_id, idx); + port_id, idx); rte_errno = ENOMEM; goto error; } @@ -335,7 +359,7 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) DRV_LOG(ERR, "Port %u wrong MLX5_CQE_SIZE environment " "variable value: it should be set to %u.", - dev->data->port_id, RTE_CACHE_LINE_SIZE); + port_id, RTE_CACHE_LINE_SIZE); rte_errno = EINVAL; goto error; } @@ -346,19 +370,19 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) rxq_data->cq_uar = cq_info.cq_uar; rxq_data->cqn = cq_info.cqn; /* Create WQ (RQ) using Verbs API. */ - tmpl->wq = mlx5_rxq_ibv_wq_create(dev, idx); + tmpl->wq = mlx5_rxq_ibv_wq_create(rxq); if (!tmpl->wq) { DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.", - dev->data->port_id, idx); + port_id, idx); rte_errno = ENOMEM; goto error; } /* Change queue state to ready. */ - ret = mlx5_ibv_modify_wq(tmpl, true); + ret = mlx5_ibv_modify_wq(rxq, IBV_WQS_RDY); if (ret) { DRV_LOG(ERR, "Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.", - dev->data->port_id, idx); + port_id, idx); rte_errno = ret; goto error; } @@ -374,8 +398,7 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) rxq_data->cq_arm_sn = 0; mlx5_rxq_initialize(rxq_data); rxq_data->cq_ci = 0; - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; - dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; + priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num; return 0; error: @@ -387,27 +410,30 @@ error: if (tmpl->ibv_channel) claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel)); rte_errno = ret; /* Restore rte_errno. */ - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return -rte_errno; } /** * Release an Rx verbs queue object. * - * @param rxq_obj - * Verbs Rx queue object. + * @param rxq + * Pointer to Rx queue. */ static void -mlx5_rxq_ibv_obj_release(struct mlx5_rxq_obj *rxq_obj) +mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv *rxq) { - MLX5_ASSERT(rxq_obj); - MLX5_ASSERT(rxq_obj->wq); - MLX5_ASSERT(rxq_obj->ibv_cq); + struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj; + + if (rxq_obj == NULL || rxq_obj->wq == NULL) + return; claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq)); + rxq_obj->wq = NULL; + MLX5_ASSERT(rxq_obj->ibv_cq); claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq)); if (rxq_obj->ibv_channel) claim_zero(mlx5_glue->destroy_comp_channel (rxq_obj->ibv_channel)); + rxq->ctrl->started = false; } /** @@ -462,22 +488,22 @@ mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, MLX5_ASSERT(ind_tbl); for (i = 0; i != ind_tbl->queues_n; ++i) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, + ind_tbl->queues[i]); - wq[i] = rxq_ctrl->obj->wq; + wq[i] = rxq->ctrl->obj->wq; } MLX5_ASSERT(i > 0); /* Finalise indirection table. */ for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i) wq[i] = wq[j]; - ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table(priv->sh->ctx, - &(struct ibv_rwq_ind_table_init_attr){ - .log_ind_tbl_size = log_n, - .ind_tbl = wq, - .comp_mask = 0, - }); + ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table + (priv->sh->cdev->ctx, + &(struct ibv_rwq_ind_table_init_attr){ + .log_ind_tbl_size = log_n, + .ind_tbl = wq, + .comp_mask = 0, + }); if (!ind_tbl->ind_table) { rte_errno = errno; return -rte_errno; @@ -539,7 +565,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, } #endif qp = mlx5_glue->dv_create_qp - (priv->sh->ctx, + (priv->sh->cdev->ctx, &(struct ibv_qp_init_attr_ex){ .qp_type = IBV_QPT_RAW_PACKET, .comp_mask = @@ -555,12 +581,12 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->sh->pd, + .pd = priv->sh->cdev->pd, }, &qp_init_attr); #else qp = mlx5_glue->create_qp_ex - (priv->sh->ctx, + (priv->sh->cdev->ctx, &(struct ibv_qp_init_attr_ex){ .qp_type = IBV_QPT_RAW_PACKET, .comp_mask = @@ -576,7 +602,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->sh->pd, + .pd = priv->sh->cdev->pd, }); #endif if (!qp) { @@ -622,12 +648,24 @@ static void mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq; + struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq; + struct mlx5_rxq_obj *rxq_obj; - if (rxq->wq) - claim_zero(mlx5_glue->destroy_wq(rxq->wq)); - if (rxq->ibv_cq) - claim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq)); + if (rxq == NULL) + return; + if (rxq->ctrl == NULL) + goto free_priv; + rxq_obj = rxq->ctrl->obj; + if (rxq_obj == NULL) + goto free_ctrl; + if (rxq_obj->wq) + claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq)); + if (rxq_obj->ibv_cq) + claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq)); + mlx5_free(rxq_obj); +free_ctrl: + mlx5_free(rxq->ctrl); +free_priv: mlx5_free(rxq); priv->drop_queue.rxq = NULL; } @@ -645,40 +683,59 @@ static int mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct ibv_context *ctx = priv->sh->ctx; - struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq; + struct ibv_context *ctx = priv->sh->cdev->ctx; + struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq; + struct mlx5_rxq_ctrl *rxq_ctrl = NULL; + struct mlx5_rxq_obj *rxq_obj = NULL; - if (rxq) + if (rxq != NULL) return 0; rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY); - if (!rxq) { - DEBUG("Port %u cannot allocate drop Rx queue memory.", + if (rxq == NULL) { + DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.", dev->data->port_id); rte_errno = ENOMEM; return -rte_errno; } priv->drop_queue.rxq = rxq; - rxq->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0); - if (!rxq->ibv_cq) { - DEBUG("Port %u cannot allocate CQ for drop queue.", + rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 0, + SOCKET_ID_ANY); + if (rxq_ctrl == NULL) { + DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue control memory.", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + rxq->ctrl = rxq_ctrl; + rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, + SOCKET_ID_ANY); + if (rxq_obj == NULL) { + DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + rxq_ctrl->obj = rxq_obj; + rxq_obj->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0); + if (!rxq_obj->ibv_cq) { + DRV_LOG(DEBUG, "Port %u cannot allocate CQ for drop queue.", dev->data->port_id); rte_errno = errno; goto error; } - rxq->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){ + rxq_obj->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){ .wq_type = IBV_WQT_RQ, .max_wr = 1, .max_sge = 1, - .pd = priv->sh->pd, - .cq = rxq->ibv_cq, + .pd = priv->sh->cdev->pd, + .cq = rxq_obj->ibv_cq, }); - if (!rxq->wq) { - DEBUG("Port %u cannot allocate WQ for drop queue.", + if (!rxq_obj->wq) { + DRV_LOG(DEBUG, "Port %u cannot allocate WQ for drop queue.", dev->data->port_id); rte_errno = errno; goto error; } - priv->drop_queue.rxq = rxq; return 0; error: mlx5_rxq_ibv_obj_drop_release(dev); @@ -707,21 +764,22 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev) ret = mlx5_rxq_ibv_obj_drop_create(dev); if (ret < 0) goto error; - rxq = priv->drop_queue.rxq; + rxq = priv->drop_queue.rxq->ctrl->obj; ind_tbl = mlx5_glue->create_rwq_ind_table - (priv->sh->ctx, + (priv->sh->cdev->ctx, &(struct ibv_rwq_ind_table_init_attr){ .log_ind_tbl_size = 0, .ind_tbl = (struct ibv_wq **)&rxq->wq, .comp_mask = 0, }); if (!ind_tbl) { - DEBUG("Port %u cannot allocate indirection table for drop" - " queue.", dev->data->port_id); + DRV_LOG(DEBUG, "Port %u" + " cannot allocate indirection table for drop queue.", + dev->data->port_id); rte_errno = errno; goto error; } - hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->ctx, + hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx, &(struct ibv_qp_init_attr_ex){ .qp_type = IBV_QPT_RAW_PACKET, .comp_mask = IBV_QP_INIT_ATTR_PD | @@ -734,10 +792,10 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev) .rx_hash_fields_mask = 0, }, .rwq_ind_tbl = ind_tbl, - .pd = priv->sh->pd + .pd = priv->sh->cdev->pd }); if (!hrxq->qp) { - DEBUG("Port %u cannot allocate QP for drop queue.", + DRV_LOG(DEBUG, "Port %u cannot allocate QP for drop queue.", dev->data->port_id); rte_errno = errno; goto error; @@ -822,7 +880,7 @@ mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx) qp_attr.qp_type = IBV_QPT_RAW_PACKET, /* Do *NOT* enable this, completions events are managed per Tx burst. */ qp_attr.sq_sig_all = 0; - qp_attr.pd = priv->sh->pd; + qp_attr.pd = priv->sh->cdev->pd; qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD; if (txq_data->inlen_send) qp_attr.cap.max_inline_data = txq_ctrl->max_inline_data; @@ -830,7 +888,7 @@ mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx) qp_attr.max_tso_header = txq_ctrl->max_tso_header; qp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER; } - qp_obj = mlx5_glue->create_qp_ex(priv->sh->ctx, &qp_attr); + qp_obj = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx, &qp_attr); if (qp_obj == NULL) { DRV_LOG(ERR, "Port %u Tx queue %u QP creation failure.", dev->data->port_id, idx); @@ -858,7 +916,6 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) struct mlx5_txq_ctrl *txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; - struct ibv_qp_attr mod; unsigned int cqe_n; struct mlx5dv_qp qp; struct mlx5dv_cq cq_info; @@ -868,10 +925,7 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) MLX5_ASSERT(txq_data); MLX5_ASSERT(txq_obj); - txq_obj->type = MLX5_TXQ_OBJ_TYPE_IBV; txq_obj->txq_ctrl = txq_ctrl; - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE; - priv->verbs_alloc_ctx.obj = txq_ctrl; if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { DRV_LOG(ERR, "Port %u MLX5_ENABLE_CQE_COMPRESSION " "must never be set.", dev->data->port_id); @@ -880,7 +934,8 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) } cqe_n = desc / MLX5_TX_COMP_THRESH + 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; - txq_obj->cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0); + txq_obj->cq = mlx5_glue->create_cq(priv->sh->cdev->ctx, cqe_n, + NULL, NULL, 0); if (txq_obj->cq == NULL) { DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.", dev->data->port_id, idx); @@ -892,37 +947,10 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) rte_errno = errno; goto error; } - mod = (struct ibv_qp_attr){ - /* Move the QP to this state. */ - .qp_state = IBV_QPS_INIT, - /* IB device port number. */ - .port_num = (uint8_t)priv->dev_port, - }; - ret = mlx5_glue->modify_qp(txq_obj->qp, &mod, - (IBV_QP_STATE | IBV_QP_PORT)); + ret = mlx5_ibv_modify_qp(txq_obj, MLX5_TXQ_MOD_RST2RDY, + (uint8_t)priv->dev_port); if (ret) { - DRV_LOG(ERR, - "Port %u Tx queue %u QP state to IBV_QPS_INIT failed.", - dev->data->port_id, idx); - rte_errno = errno; - goto error; - } - mod = (struct ibv_qp_attr){ - .qp_state = IBV_QPS_RTR - }; - ret = mlx5_glue->modify_qp(txq_obj->qp, &mod, IBV_QP_STATE); - if (ret) { - DRV_LOG(ERR, - "Port %u Tx queue %u QP state to IBV_QPS_RTR failed.", - dev->data->port_id, idx); - rte_errno = errno; - goto error; - } - mod.qp_state = IBV_QPS_RTS; - ret = mlx5_glue->modify_qp(txq_obj->qp, &mod, IBV_QP_STATE); - if (ret) { - DRV_LOG(ERR, - "Port %u Tx queue %u QP state to IBV_QPS_RTS failed.", + DRV_LOG(ERR, "Port %u Tx queue %u QP state modifying failed.", dev->data->port_id, idx); rte_errno = errno; goto error; @@ -1003,7 +1031,7 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) goto error; } txq_uar_init(txq_ctrl); - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ @@ -1011,11 +1039,129 @@ error: claim_zero(mlx5_glue->destroy_cq(txq_obj->cq)); if (txq_obj->qp) claim_zero(mlx5_glue->destroy_qp(txq_obj->qp)); - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; rte_errno = ret; /* Restore rte_errno. */ return -rte_errno; } +/* + * Create the dummy QP with minimal resources for loopback. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev) +{ +#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct ibv_context *ctx = sh->cdev->ctx; + struct mlx5dv_qp_init_attr qp_init_attr = {0}; + struct { + struct ibv_cq_init_attr_ex ibv; + struct mlx5dv_cq_init_attr mlx5; + } cq_attr = {{0}}; + + if (dev->data->dev_conf.lpbk_mode) { + /* Allow packet sent from NIC loop back w/o source MAC check. */ + qp_init_attr.comp_mask |= + MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; + qp_init_attr.create_flags |= + MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC; + } else { + return 0; + } + /* Only need to check refcnt, 0 after "sh" is allocated. */ + if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) { + MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp); + priv->lb_used = 1; + return 0; + } + cq_attr.ibv = (struct ibv_cq_init_attr_ex){ + .cqe = 1, + .channel = NULL, + .comp_mask = 0, + }; + cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){ + .comp_mask = 0, + }; + /* Only CQ is needed, no WQ(RQ) is required in this case. */ + sh->self_lb.ibv_cq = mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(ctx, + &cq_attr.ibv, + &cq_attr.mlx5)); + if (!sh->self_lb.ibv_cq) { + DRV_LOG(ERR, "Port %u cannot allocate CQ for loopback.", + dev->data->port_id); + rte_errno = errno; + goto error; + } + sh->self_lb.qp = mlx5_glue->dv_create_qp(ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = IBV_QP_INIT_ATTR_PD, + .pd = sh->cdev->pd, + .send_cq = sh->self_lb.ibv_cq, + .recv_cq = sh->self_lb.ibv_cq, + .cap.max_recv_wr = 1, + }, + &qp_init_attr); + if (!sh->self_lb.qp) { + DRV_LOG(DEBUG, "Port %u cannot allocate QP for loopback.", + dev->data->port_id); + rte_errno = errno; + goto error; + } + priv->lb_used = 1; + return 0; +error: + if (sh->self_lb.ibv_cq) { + claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq)); + sh->self_lb.ibv_cq = NULL; + } + (void)__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED); + return -rte_errno; +#else + RTE_SET_USED(dev); + return 0; +#endif +} + +/* + * Release the dummy queue resources for loopback. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev) +{ +#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + + if (!priv->lb_used) + return; + MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED)); + if (!(__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) { + if (sh->self_lb.qp) { + claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp)); + sh->self_lb.qp = NULL; + } + if (sh->self_lb.ibv_cq) { + claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq)); + sh->self_lb.ibv_cq = NULL; + } + } + priv->lb_used = 0; +#else + RTE_SET_USED(dev); + return; +#endif +} + /** * Release an Tx verbs queue object. * @@ -1043,5 +1189,8 @@ struct mlx5_obj_ops ibv_obj_ops = { .drop_action_create = mlx5_ibv_drop_action_create, .drop_action_destroy = mlx5_ibv_drop_action_destroy, .txq_obj_new = mlx5_txq_ibv_obj_new, + .txq_obj_modify = mlx5_ibv_modify_qp, .txq_obj_release = mlx5_txq_ibv_obj_release, + .lb_dummy_queue_create = NULL, + .lb_dummy_queue_release = NULL, };