X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_devx.c;h=91243f684fb08d61dc8fedf87ae2fe07d452934b;hb=8e83ba285abe4341b7666927d3fc265b35446c06;hp=424f77be79036210f6373b1ce07e5cea3438fee3;hpb=bc5bee028ebc79c86dd846e9b661e0541e5da23c;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c index 424f77be79..91243f684f 100644 --- a/drivers/net/mlx5/mlx5_devx.c +++ b/drivers/net/mlx5/mlx5_devx.c @@ -30,14 +30,16 @@ /** * Modify RQ vlan stripping offload * - * @param rxq_obj - * Rx queue object. + * @param rxq + * Rx queue. + * @param on + * Enable/disable VLAN stripping. * * @return * 0 on success, non-0 otherwise */ static int -mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) +mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on) { struct mlx5_devx_modify_rq_attr rq_attr; @@ -46,14 +48,14 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) rq_attr.state = MLX5_RQC_STATE_RDY; rq_attr.vsd = (on ? 0 : 1); rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; - return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); + return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr); } /** * Modify RQ using DevX API. * - * @param rxq_obj - * DevX Rx queue object. + * @param rxq + * DevX rx queue. * @param type * Type of change queue state. * @@ -61,7 +63,7 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type) +mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type) { struct mlx5_devx_modify_rq_attr rq_attr; @@ -86,7 +88,9 @@ mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type) default: break; } - return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); + if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) + return mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr); + return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr); } /** @@ -145,43 +149,39 @@ mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, return 0; } -/** - * Destroy the Rx queue DevX object. - * - * @param rxq_obj - * Rxq object to destroy. - */ -static void -mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj) -{ - mlx5_devx_rq_destroy(&rxq_obj->rq_obj); - memset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj)); - mlx5_devx_cq_destroy(&rxq_obj->cq_obj); - memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj)); -} - /** * Release an Rx DevX queue object. * - * @param rxq_obj - * DevX Rx queue object. + * @param rxq + * DevX Rx queue. */ static void -mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) +mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq) { - MLX5_ASSERT(rxq_obj); + struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj; + + if (rxq_obj == NULL) + return; if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { - MLX5_ASSERT(rxq_obj->rq); - mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST); + if (rxq_obj->rq == NULL) + return; + mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST); claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); } else { - MLX5_ASSERT(rxq_obj->cq_obj.cq); - MLX5_ASSERT(rxq_obj->rq_obj.rq); - mlx5_rxq_release_devx_resources(rxq_obj); - if (rxq_obj->devx_channel) + if (rxq->devx_rq.rq == NULL) + return; + mlx5_devx_rq_destroy(&rxq->devx_rq); + if (rxq->devx_rq.rmp != NULL && rxq->devx_rq.rmp->ref_cnt > 0) + return; + mlx5_devx_cq_destroy(&rxq_obj->cq_obj); + memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj)); + if (rxq_obj->devx_channel) { mlx5_os_devx_destroy_event_channel (rxq_obj->devx_channel); + rxq_obj->devx_channel = NULL; + } } + rxq->ctrl->started = false; } /** @@ -224,22 +224,19 @@ mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) /** * Create a RQ object using DevX. * - * @param dev - * Pointer to Ethernet device. - * @param rxq_data - * RX queue data. + * @param rxq + * Pointer to Rx queue. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, - struct mlx5_rxq_data *rxq_data) +mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq) { - struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = rxq->priv; struct mlx5_common_device *cdev = priv->sh->cdev; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; + struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq; struct mlx5_devx_create_rq_attr rq_attr = { 0 }; uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n; uint32_t wqe_size, log_wqe_size; @@ -260,11 +257,11 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, * 512*2^single_wqe_log_num_of_strides. */ rq_attr.wq_attr.single_wqe_log_num_of_strides = - rxq_data->strd_num_n - + rxq_data->log_strd_num - MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ rq_attr.wq_attr.single_stride_log_num_of_bytes = - rxq_data->strd_sz_n - + rxq_data->log_strd_sz - MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; wqe_size = sizeof(struct mlx5_wqe_mprq); } else { @@ -280,37 +277,41 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, MLX5_WQ_END_PAD_MODE_NONE; rq_attr.wq_attr.pd = cdev->pdn; rq_attr.counter_set_id = priv->counter_set_id; + rq_attr.delay_drop_en = rxq_data->delay_drop; + rq_attr.user_index = rte_cpu_to_be_16(priv->dev_data->port_id); + if (rxq_data->shared) /* Create RMP based RQ. */ + rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp; /* Create RQ using DevX API. */ - return mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size, + return mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size, log_desc_n, &rq_attr, rxq_ctrl->socket); } /** * Create a DevX CQ object for an Rx queue. * - * @param dev - * Pointer to Ethernet device. - * @param rxq_data - * RX queue data. + * @param rxq + * Pointer to Rx queue. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, - struct mlx5_rxq_data *rxq_data) +mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq) { struct mlx5_devx_cq *cq_obj = 0; struct mlx5_devx_cq_attr cq_attr = { 0 }; - struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = rxq->priv; struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + uint16_t port_id = priv->dev_data->port_id; + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; + struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq; unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); uint32_t log_cqe_n; uint16_t event_nums[1] = { 0 }; int ret = 0; + if (rxq_ctrl->started) + return 0; if (priv->config.cqe_comp && !rxq_data->hw_timestamp && !rxq_data->lro) { cq_attr.cqe_comp_en = 1u; @@ -346,7 +347,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, } DRV_LOG(DEBUG, "Port %u Rx CQE compression is enabled, format %d.", - dev->data->port_id, priv->config.cqe_comp_fmt); + port_id, priv->config.cqe_comp_fmt); /* * For vectorized Rx, it must not be doubled in order to * make cq_ci and rq_ci aligned. @@ -355,15 +356,14 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, cqe_n *= 2; } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { DRV_LOG(DEBUG, - "Port %u Rx CQE compression is disabled for HW" - " timestamp.", - dev->data->port_id); + "Port %u Rx CQE compression is disabled for HW timestamp.", + port_id); } else if (priv->config.cqe_comp && rxq_data->lro) { DRV_LOG(DEBUG, "Port %u Rx CQE compression is disabled for LRO.", - dev->data->port_id); + port_id); } - cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar); + cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->rx_uar.obj); log_cqe_n = log2above(cqe_n); /* Create CQ using DevX API. */ ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj, @@ -374,9 +374,10 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, rxq_data->cqes = (volatile struct mlx5_cqe (*)[]) (uintptr_t)cq_obj->cqes; rxq_data->cq_db = cq_obj->db_rec; - rxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar); + rxq_data->uar_data = sh->rx_uar.cq_db; rxq_data->cqe_n = log_cqe_n; rxq_data->cqn = cq_obj->cq->id; + rxq_data->cq_ci = 0; if (rxq_ctrl->obj->devx_channel) { ret = mlx5_os_devx_subscribe_devx_event (rxq_ctrl->obj->devx_channel, @@ -399,27 +400,23 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, /** * Create the Rx hairpin queue object. * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array. + * @param rxq + * Pointer to Rx queue. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) +mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + uint16_t idx = rxq->idx; + struct mlx5_priv *priv = rxq->priv; + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; struct mlx5_devx_create_rq_attr attr = { 0 }; struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; uint32_t max_wq_data; - MLX5_ASSERT(rxq_data); - MLX5_ASSERT(tmpl); + MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL); tmpl->rxq_ctrl = rxq_ctrl; attr.hairpin = 1; max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; @@ -443,46 +440,45 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) attr.wq_attr.log_hairpin_data_sz - MLX5_HAIRPIN_QUEUE_STRIDE; attr.counter_set_id = priv->counter_set_id; + rxq_ctrl->rxq.delay_drop = priv->config.hp_delay_drop; + attr.delay_drop_en = priv->config.hp_delay_drop; tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr, rxq_ctrl->socket); if (!tmpl->rq) { DRV_LOG(ERR, "Port %u Rx hairpin queue %u can't create rq object.", - dev->data->port_id, idx); + priv->dev_data->port_id, idx); rte_errno = errno; return -rte_errno; } - dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; + priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; return 0; } /** * Create the Rx queue DevX object. * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array. + * @param rxq + * Pointer to Rx queue. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) +mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + struct mlx5_priv *priv = rxq->priv; + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; + struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq; struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; int ret = 0; MLX5_ASSERT(rxq_data); MLX5_ASSERT(tmpl); if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) - return mlx5_rxq_obj_hairpin_new(dev, idx); + return mlx5_rxq_obj_hairpin_new(rxq); tmpl->rxq_ctrl = rxq_ctrl; - if (rxq_ctrl->irq) { + if (rxq_ctrl->irq && !rxq_ctrl->started) { int devx_ev_flag = MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; @@ -498,34 +494,41 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); } /* Create CQ using DevX API. */ - ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data); + ret = mlx5_rxq_create_devx_cq_resources(rxq); if (ret) { DRV_LOG(ERR, "Failed to create CQ."); goto error; } + rxq_data->delay_drop = priv->config.std_delay_drop; /* Create RQ using DevX API. */ - ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data); + ret = mlx5_rxq_create_devx_rq_resources(rxq); if (ret) { DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", - dev->data->port_id, idx); + priv->dev_data->port_id, rxq->idx); rte_errno = ENOMEM; goto error; } /* Change queue state to ready. */ - ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY); + ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY); if (ret) goto error; - rxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.umem_buf; - rxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.db_rec; - rxq_data->cq_arm_sn = 0; - rxq_data->cq_ci = 0; - mlx5_rxq_initialize(rxq_data); - dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; - rxq_ctrl->wqn = tmpl->rq_obj.rq->id; + if (!rxq_data->shared) { + rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf; + rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec; + } else if (!rxq_ctrl->started) { + rxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf; + rxq_data->rq_db = + (uint32_t *)(uintptr_t)tmpl->devx_rmp.wq.db_rec; + } + if (!rxq_ctrl->started) { + mlx5_rxq_initialize(rxq_data); + rxq_ctrl->wqn = rxq->devx_rq.rq->id; + } + priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED; return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_rxq_devx_obj_release(tmpl); + mlx5_rxq_devx_obj_release(rxq); rte_errno = ret; /* Restore rte_errno. */ return -rte_errno; } @@ -571,15 +574,18 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev, rqt_attr->rqt_actual_size = rqt_n; if (queues == NULL) { for (i = 0; i < rqt_n; i++) - rqt_attr->rq_list[i] = priv->drop_queue.rxq->rq->id; + rqt_attr->rq_list[i] = + priv->drop_queue.rxq->devx_rq.rq->id; return rqt_attr; } for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]); - rqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id; + MLX5_ASSERT(rxq != NULL); + if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) + rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id; + else + rqt_attr->rq_list[i] = rxq->devx_rq.rq->id; } MLX5_ASSERT(i > 0); for (j = 0; i != rqt_n; ++j, ++i) @@ -705,21 +711,23 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key, /* NULL queues designate drop queue. */ if (ind_tbl->queues != NULL) { - struct mlx5_rxq_data *rxq_data = - (*priv->rxqs)[ind_tbl->queues[0]]; struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - rxq_obj_type = rxq_ctrl->type; + mlx5_rxq_ctrl_get(dev, ind_tbl->queues[0]); + rxq_obj_type = rxq_ctrl != NULL ? rxq_ctrl->type : + MLX5_RXQ_TYPE_STANDARD; /* Enable TIR LRO only if all the queues were configured for. */ for (i = 0; i < ind_tbl->queues_n; ++i) { - if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) { + struct mlx5_rxq_data *rxq_i = + mlx5_rxq_data_get(dev, ind_tbl->queues[i]); + + if (rxq_i != NULL && !rxq_i->lro) { lro = false; break; } } } else { - rxq_obj_type = priv->drop_queue.rxq->rxq_ctrl->type; + rxq_obj_type = priv->drop_queue.rxq->ctrl->type; } memset(tir_attr, 0, sizeof(*tir_attr)); tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; @@ -891,9 +899,9 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; int socket_id = dev->device->numa_node; - struct mlx5_rxq_ctrl *rxq_ctrl; - struct mlx5_rxq_data *rxq_data; - struct mlx5_rxq_obj *rxq = NULL; + struct mlx5_rxq_priv *rxq; + struct mlx5_rxq_ctrl *rxq_ctrl = NULL; + struct mlx5_rxq_obj *rxq_obj = NULL; int ret; /* @@ -901,6 +909,13 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev) * They are required to hold pointers for cleanup * and are only accessible via drop queue DevX objects. */ + rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id); + if (rxq == NULL) { + DRV_LOG(ERR, "Port %u could not allocate drop queue private", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 0, socket_id); if (rxq_ctrl == NULL) { @@ -909,27 +924,30 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev) rte_errno = ENOMEM; goto error; } - rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id); - if (rxq == NULL) { + rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, socket_id); + if (rxq_obj == NULL) { DRV_LOG(ERR, "Port %u could not allocate drop queue object", dev->data->port_id); rte_errno = ENOMEM; goto error; } - rxq->rxq_ctrl = rxq_ctrl; + rxq_obj->rxq_ctrl = rxq_ctrl; rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD; - rxq_ctrl->priv = priv; - rxq_ctrl->obj = rxq; - rxq_data = &rxq_ctrl->rxq; + rxq_ctrl->sh = priv->sh; + rxq_ctrl->obj = rxq_obj; + rxq->ctrl = rxq_ctrl; + rxq->priv = priv; + LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry); /* Create CQ using DevX API. */ - ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data); + ret = mlx5_rxq_create_devx_cq_resources(rxq); if (ret != 0) { DRV_LOG(ERR, "Port %u drop queue CQ creation failed.", dev->data->port_id); goto error; } + rxq_ctrl->rxq.delay_drop = 0; /* Create RQ using DevX API. */ - ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data); + ret = mlx5_rxq_create_devx_rq_resources(rxq); if (ret != 0) { DRV_LOG(ERR, "Port %u drop queue RQ creation failed.", dev->data->port_id); @@ -945,18 +963,20 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev) return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ - if (rxq != NULL) { - if (rxq->rq_obj.rq != NULL) - mlx5_devx_rq_destroy(&rxq->rq_obj); - if (rxq->cq_obj.cq != NULL) - mlx5_devx_cq_destroy(&rxq->cq_obj); - if (rxq->devx_channel) + if (rxq != NULL && rxq->devx_rq.rq != NULL) + mlx5_devx_rq_destroy(&rxq->devx_rq); + if (rxq_obj != NULL) { + if (rxq_obj->cq_obj.cq != NULL) + mlx5_devx_cq_destroy(&rxq_obj->cq_obj); + if (rxq_obj->devx_channel) mlx5_os_devx_destroy_event_channel - (rxq->devx_channel); - mlx5_free(rxq); + (rxq_obj->devx_channel); + mlx5_free(rxq_obj); } if (rxq_ctrl != NULL) mlx5_free(rxq_ctrl); + if (rxq != NULL) + mlx5_free(rxq); rte_errno = ret; /* Restore rte_errno. */ return -rte_errno; } @@ -971,12 +991,13 @@ static void mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq; - struct mlx5_rxq_ctrl *rxq_ctrl = rxq->rxq_ctrl; + struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq; + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; mlx5_rxq_devx_obj_release(rxq); - mlx5_free(rxq); + mlx5_free(rxq_ctrl->obj); mlx5_free(rxq_ctrl); + mlx5_free(rxq); priv->drop_queue.rxq = NULL; } @@ -996,7 +1017,7 @@ mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) mlx5_devx_tir_destroy(hrxq); if (hrxq->ind_table->ind_table != NULL) mlx5_devx_ind_table_destroy(hrxq->ind_table); - if (priv->drop_queue.rxq->rq != NULL) + if (priv->drop_queue.rxq->devx_rq.rq != NULL) mlx5_rxq_devx_obj_drop_release(dev); } @@ -1164,6 +1185,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_common_device *cdev = priv->sh->cdev; + struct mlx5_uar *uar = &priv->sh->tx_uar; struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); @@ -1177,8 +1199,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, .tis_lst_sz = 1, .wq_attr = (struct mlx5_devx_wq_attr){ .pd = cdev->pdn, - .uar_page = - mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar), + .uar_page = mlx5_os_get_devx_uar_page_id(uar->obj), }, .ts_format = mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format), @@ -1218,18 +1239,20 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) rte_errno = ENOMEM; return -rte_errno; #else + struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv)); struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; struct mlx5_devx_cq_attr cq_attr = { - .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar), + .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj), }; - void *reg_addr; uint32_t cqe_n, log_desc_n; uint32_t wqe_n, wqe_size; int ret = 0; MLX5_ASSERT(txq_data); MLX5_ASSERT(txq_obj); + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + MLX5_ASSERT(ppriv); txq_obj->txq_ctrl = txq_ctrl; txq_obj->dev = dev; cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + @@ -1302,6 +1325,8 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR]; *txq_data->qp_db = 0; txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8; + txq_data->db_heu = sh->cdev->config.dbnc == MLX5_TXDB_HEURISTIC; + txq_data->db_nc = sh->tx_uar.dbnc; /* Change Send Queue state to Ready-to-Send. */ ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0); if (ret) { @@ -1320,13 +1345,9 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) if (!priv->sh->tdn) priv->sh->tdn = priv->sh->td->id; #endif - MLX5_ASSERT(sh->tx_uar); - reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); - MLX5_ASSERT(reg_addr); - txq_ctrl->bf_reg = reg_addr; txq_ctrl->uar_mmap_offset = - mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar); - txq_uar_init(txq_ctrl); + mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar.obj); + ppriv->uar_table[txq_data->idx] = sh->tx_uar.bf_db; dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; return 0; error: