X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_devx.c;h=105c3d67f078bfc5613e9f90ba872c98284f340a;hb=693c7d4b1e12024cd429ef563e9df5cc2c477aee;hp=6b6b9c77ae49fe78bd49e98c971948448da10944;hpb=fe46b20c96593ff9644097978b347286c6a4b71a;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c index 6b6b9c77ae..105c3d67f0 100644 --- a/drivers/net/mlx5/mlx5_devx.c +++ b/drivers/net/mlx5/mlx5_devx.c @@ -30,14 +30,16 @@ /** * Modify RQ vlan stripping offload * - * @param rxq_obj - * Rx queue object. + * @param rxq + * Rx queue. + * @param on + * Enable/disable VLAN stripping. * * @return * 0 on success, non-0 otherwise */ static int -mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) +mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on) { struct mlx5_devx_modify_rq_attr rq_attr; @@ -46,14 +48,14 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) rq_attr.state = MLX5_RQC_STATE_RDY; rq_attr.vsd = (on ? 0 : 1); rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; - return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); + return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr); } /** * Modify RQ using DevX API. * - * @param rxq_obj - * DevX Rx queue object. + * @param rxq + * DevX rx queue. * @param type * Type of change queue state. * @@ -61,7 +63,7 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type) +mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type) { struct mlx5_devx_modify_rq_attr rq_attr; @@ -86,7 +88,9 @@ mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type) default: break; } - return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); + if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) + return mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr); + return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr); } /** @@ -102,9 +106,9 @@ mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type) * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, - uint8_t dev_port) +int +mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, + uint8_t dev_port) { struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; int ret; @@ -145,43 +149,39 @@ mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, return 0; } -/** - * Destroy the Rx queue DevX object. - * - * @param rxq_obj - * Rxq object to destroy. - */ -static void -mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj) -{ - mlx5_devx_rq_destroy(&rxq_obj->rq_obj); - memset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj)); - mlx5_devx_cq_destroy(&rxq_obj->cq_obj); - memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj)); -} - /** * Release an Rx DevX queue object. * - * @param rxq_obj - * DevX Rx queue object. + * @param rxq + * DevX Rx queue. */ static void -mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) +mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq) { - MLX5_ASSERT(rxq_obj); + struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj; + + if (rxq_obj == NULL) + return; if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { - MLX5_ASSERT(rxq_obj->rq); - mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST); + if (rxq_obj->rq == NULL) + return; + mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST); claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); } else { - MLX5_ASSERT(rxq_obj->cq_obj.cq); - MLX5_ASSERT(rxq_obj->rq_obj.rq); - mlx5_rxq_release_devx_resources(rxq_obj); - if (rxq_obj->devx_channel) + if (rxq->devx_rq.rq == NULL) + return; + mlx5_devx_rq_destroy(&rxq->devx_rq); + if (rxq->devx_rq.rmp != NULL && rxq->devx_rq.rmp->ref_cnt > 0) + return; + mlx5_devx_cq_destroy(&rxq_obj->cq_obj); + memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj)); + if (rxq_obj->devx_channel) { mlx5_os_devx_destroy_event_channel (rxq_obj->devx_channel); + rxq_obj->devx_channel = NULL; + } } + rxq->ctrl->started = false; } /** @@ -224,22 +224,19 @@ mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) /** * Create a RQ object using DevX. * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array. + * @param rxq + * Pointer to Rx queue. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx) +mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq) { - struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = rxq->priv; struct mlx5_common_device *cdev = priv->sh->cdev; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; + struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq; struct mlx5_devx_create_rq_attr rq_attr = { 0 }; uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n; uint32_t wqe_size, log_wqe_size; @@ -280,37 +277,41 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx) MLX5_WQ_END_PAD_MODE_NONE; rq_attr.wq_attr.pd = cdev->pdn; rq_attr.counter_set_id = priv->counter_set_id; + rq_attr.delay_drop_en = rxq_data->delay_drop; + rq_attr.user_index = rte_cpu_to_be_16(priv->dev_data->port_id); + if (rxq_data->shared) /* Create RMP based RQ. */ + rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp; /* Create RQ using DevX API. */ - return mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size, + return mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size, log_desc_n, &rq_attr, rxq_ctrl->socket); } /** * Create a DevX CQ object for an Rx queue. * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array. + * @param rxq + * Pointer to Rx queue. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) +mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq) { struct mlx5_devx_cq *cq_obj = 0; struct mlx5_devx_cq_attr cq_attr = { 0 }; - struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = rxq->priv; struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + uint16_t port_id = priv->dev_data->port_id; + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; + struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq; unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); uint32_t log_cqe_n; uint16_t event_nums[1] = { 0 }; int ret = 0; + if (rxq_ctrl->started) + return 0; if (priv->config.cqe_comp && !rxq_data->hw_timestamp && !rxq_data->lro) { cq_attr.cqe_comp_en = 1u; @@ -346,7 +347,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) } DRV_LOG(DEBUG, "Port %u Rx CQE compression is enabled, format %d.", - dev->data->port_id, priv->config.cqe_comp_fmt); + port_id, priv->config.cqe_comp_fmt); /* * For vectorized Rx, it must not be doubled in order to * make cq_ci and rq_ci aligned. @@ -355,15 +356,14 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) cqe_n *= 2; } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { DRV_LOG(DEBUG, - "Port %u Rx CQE compression is disabled for HW" - " timestamp.", - dev->data->port_id); + "Port %u Rx CQE compression is disabled for HW timestamp.", + port_id); } else if (priv->config.cqe_comp && rxq_data->lro) { DRV_LOG(DEBUG, "Port %u Rx CQE compression is disabled for LRO.", - dev->data->port_id); + port_id); } - cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar); + cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->rx_uar.obj); log_cqe_n = log2above(cqe_n); /* Create CQ using DevX API. */ ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj, @@ -374,9 +374,10 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) rxq_data->cqes = (volatile struct mlx5_cqe (*)[]) (uintptr_t)cq_obj->cqes; rxq_data->cq_db = cq_obj->db_rec; - rxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar); + rxq_data->uar_data = sh->rx_uar.cq_db; rxq_data->cqe_n = log_cqe_n; rxq_data->cqn = cq_obj->cq->id; + rxq_data->cq_ci = 0; if (rxq_ctrl->obj->devx_channel) { ret = mlx5_os_devx_subscribe_devx_event (rxq_ctrl->obj->devx_channel, @@ -399,27 +400,23 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) /** * Create the Rx hairpin queue object. * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array. + * @param rxq + * Pointer to Rx queue. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) +mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + uint16_t idx = rxq->idx; + struct mlx5_priv *priv = rxq->priv; + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; struct mlx5_devx_create_rq_attr attr = { 0 }; struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; uint32_t max_wq_data; - MLX5_ASSERT(rxq_data); - MLX5_ASSERT(tmpl); + MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL); tmpl->rxq_ctrl = rxq_ctrl; attr.hairpin = 1; max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; @@ -443,46 +440,45 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) attr.wq_attr.log_hairpin_data_sz - MLX5_HAIRPIN_QUEUE_STRIDE; attr.counter_set_id = priv->counter_set_id; + rxq_ctrl->rxq.delay_drop = priv->config.hp_delay_drop; + attr.delay_drop_en = priv->config.hp_delay_drop; tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr, rxq_ctrl->socket); if (!tmpl->rq) { DRV_LOG(ERR, "Port %u Rx hairpin queue %u can't create rq object.", - dev->data->port_id, idx); + priv->dev_data->port_id, idx); rte_errno = errno; return -rte_errno; } - dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; + priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; return 0; } /** * Create the Rx queue DevX object. * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array. + * @param rxq + * Pointer to Rx queue. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) +mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + struct mlx5_priv *priv = rxq->priv; + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; + struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq; struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; int ret = 0; MLX5_ASSERT(rxq_data); MLX5_ASSERT(tmpl); if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) - return mlx5_rxq_obj_hairpin_new(dev, idx); + return mlx5_rxq_obj_hairpin_new(rxq); tmpl->rxq_ctrl = rxq_ctrl; - if (rxq_ctrl->irq) { + if (rxq_ctrl->irq && !rxq_ctrl->started) { int devx_ev_flag = MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; @@ -498,34 +494,41 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); } /* Create CQ using DevX API. */ - ret = mlx5_rxq_create_devx_cq_resources(dev, idx); + ret = mlx5_rxq_create_devx_cq_resources(rxq); if (ret) { DRV_LOG(ERR, "Failed to create CQ."); goto error; } + rxq_data->delay_drop = priv->config.std_delay_drop; /* Create RQ using DevX API. */ - ret = mlx5_rxq_create_devx_rq_resources(dev, idx); + ret = mlx5_rxq_create_devx_rq_resources(rxq); if (ret) { DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", - dev->data->port_id, idx); + priv->dev_data->port_id, rxq->idx); rte_errno = ENOMEM; goto error; } /* Change queue state to ready. */ - ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY); + ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY); if (ret) goto error; - rxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.umem_buf; - rxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.db_rec; - rxq_data->cq_arm_sn = 0; - rxq_data->cq_ci = 0; - mlx5_rxq_initialize(rxq_data); - dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; - rxq_ctrl->wqn = tmpl->rq_obj.rq->id; + if (!rxq_data->shared) { + rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf; + rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec; + } else if (!rxq_ctrl->started) { + rxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf; + rxq_data->rq_db = + (uint32_t *)(uintptr_t)tmpl->devx_rmp.wq.db_rec; + } + if (!rxq_ctrl->started) { + mlx5_rxq_initialize(rxq_data); + rxq_ctrl->wqn = rxq->devx_rq.rq->id; + } + priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED; return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_rxq_devx_obj_release(tmpl); + mlx5_rxq_devx_obj_release(rxq); rte_errno = ret; /* Restore rte_errno. */ return -rte_errno; } @@ -537,6 +540,11 @@ error: * Pointer to Ethernet device. * @param log_n * Log of number of queues in the array. + * @param queues + * List of RX queue indices or NULL, in which case + * the attribute will be filled by drop queue ID. + * @param queues_n + * Size of @p queues array or 0 if it is NULL. * @param ind_tbl * DevX indirection table object. * @@ -564,12 +572,20 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev, } rqt_attr->rqt_max_size = priv->config.ind_table_max_size; rqt_attr->rqt_actual_size = rqt_n; + if (queues == NULL) { + for (i = 0; i < rqt_n; i++) + rqt_attr->rq_list[i] = + priv->drop_queue.rxq->devx_rq.rq->id; + return rqt_attr; + } for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]); - rqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id; + MLX5_ASSERT(rxq != NULL); + if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) + rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id; + else + rqt_attr->rq_list[i] = rxq->devx_rq.rq->id; } MLX5_ASSERT(i > 0); for (j = 0; i != rqt_n; ++j, ++i) @@ -596,11 +612,12 @@ mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_devx_rqt_attr *rqt_attr = NULL; + const uint16_t *queues = dev->data->dev_started ? ind_tbl->queues : + NULL; MLX5_ASSERT(ind_tbl); - rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, - ind_tbl->queues, - ind_tbl->queues_n); + rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, queues, + ind_tbl->queues_n); if (!rqt_attr) return -rte_errno; ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr); @@ -671,7 +688,8 @@ mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) * @param[in] hash_fields * Verbs protocol hash field to make the RSS on. * @param[in] ind_tbl - * Indirection table for TIR. + * Indirection table for TIR. If table queues array is NULL, + * a TIR for drop queue is assumed. * @param[in] tunnel * Tunnel type. * @param[out] tir_attr @@ -687,19 +705,29 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key, int tunnel, struct mlx5_devx_tir_attr *tir_attr) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type; + enum mlx5_rxq_type rxq_obj_type; bool lro = true; uint32_t i; - /* Enable TIR LRO only if all the queues were configured for. */ - for (i = 0; i < ind_tbl->queues_n; ++i) { - if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) { - lro = false; - break; + /* NULL queues designate drop queue. */ + if (ind_tbl->queues != NULL) { + struct mlx5_rxq_ctrl *rxq_ctrl = + mlx5_rxq_ctrl_get(dev, ind_tbl->queues[0]); + rxq_obj_type = rxq_ctrl != NULL ? rxq_ctrl->type : + MLX5_RXQ_TYPE_STANDARD; + + /* Enable TIR LRO only if all the queues were configured for. */ + for (i = 0; i < ind_tbl->queues_n; ++i) { + struct mlx5_rxq_data *rxq_i = + mlx5_rxq_data_get(dev, ind_tbl->queues[i]); + + if (rxq_i != NULL && !rxq_i->lro) { + lro = false; + break; + } } + } else { + rxq_obj_type = priv->drop_queue.rxq->ctrl->type; } memset(tir_attr, 0, sizeof(*tir_attr)); tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; @@ -858,7 +886,7 @@ mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, } /** - * Create a DevX drop action for Rx Hash queue. + * Create a DevX drop Rx queue. * * @param dev * Pointer to Ethernet device. @@ -867,14 +895,112 @@ mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_devx_drop_action_create(struct rte_eth_dev *dev) +mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev) { - (void)dev; - DRV_LOG(ERR, "DevX drop action is not supported yet."); - rte_errno = ENOTSUP; + struct mlx5_priv *priv = dev->data->dev_private; + int socket_id = dev->device->numa_node; + struct mlx5_rxq_priv *rxq; + struct mlx5_rxq_ctrl *rxq_ctrl = NULL; + struct mlx5_rxq_obj *rxq_obj = NULL; + int ret; + + /* + * Initialize dummy control structures. + * They are required to hold pointers for cleanup + * and are only accessible via drop queue DevX objects. + */ + rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id); + if (rxq == NULL) { + DRV_LOG(ERR, "Port %u could not allocate drop queue private", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), + 0, socket_id); + if (rxq_ctrl == NULL) { + DRV_LOG(ERR, "Port %u could not allocate drop queue control", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, socket_id); + if (rxq_obj == NULL) { + DRV_LOG(ERR, "Port %u could not allocate drop queue object", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + rxq_obj->rxq_ctrl = rxq_ctrl; + rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD; + rxq_ctrl->sh = priv->sh; + rxq_ctrl->obj = rxq_obj; + rxq->ctrl = rxq_ctrl; + rxq->priv = priv; + LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry); + /* Create CQ using DevX API. */ + ret = mlx5_rxq_create_devx_cq_resources(rxq); + if (ret != 0) { + DRV_LOG(ERR, "Port %u drop queue CQ creation failed.", + dev->data->port_id); + goto error; + } + rxq_ctrl->rxq.delay_drop = 0; + /* Create RQ using DevX API. */ + ret = mlx5_rxq_create_devx_rq_resources(rxq); + if (ret != 0) { + DRV_LOG(ERR, "Port %u drop queue RQ creation failed.", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + /* Change queue state to ready. */ + ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY); + if (ret != 0) + goto error; + /* Initialize drop queue. */ + priv->drop_queue.rxq = rxq; + return 0; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + if (rxq != NULL && rxq->devx_rq.rq != NULL) + mlx5_devx_rq_destroy(&rxq->devx_rq); + if (rxq_obj != NULL) { + if (rxq_obj->cq_obj.cq != NULL) + mlx5_devx_cq_destroy(&rxq_obj->cq_obj); + if (rxq_obj->devx_channel) + mlx5_os_devx_destroy_event_channel + (rxq_obj->devx_channel); + mlx5_free(rxq_obj); + } + if (rxq_ctrl != NULL) + mlx5_free(rxq_ctrl); + if (rxq != NULL) + mlx5_free(rxq); + rte_errno = ret; /* Restore rte_errno. */ return -rte_errno; } +/** + * Release drop Rx queue resources. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq; + struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; + + mlx5_rxq_devx_obj_release(rxq); + mlx5_free(rxq_ctrl->obj); + mlx5_free(rxq_ctrl); + mlx5_free(rxq); + priv->drop_queue.rxq = NULL; +} + /** * Release a drop hash Rx queue. * @@ -884,9 +1010,84 @@ mlx5_devx_drop_action_create(struct rte_eth_dev *dev) static void mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) { - (void)dev; - DRV_LOG(ERR, "DevX drop action is not supported yet."); - rte_errno = ENOTSUP; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; + + if (hrxq->tir != NULL) + mlx5_devx_tir_destroy(hrxq); + if (hrxq->ind_table->ind_table != NULL) + mlx5_devx_ind_table_destroy(hrxq->ind_table); + if (priv->drop_queue.rxq->devx_rq.rq != NULL) + mlx5_rxq_devx_obj_drop_release(dev); +} + +/** + * Create a DevX drop action for Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_devx_drop_action_create(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; + int ret; + + ret = mlx5_rxq_devx_obj_drop_create(dev); + if (ret != 0) { + DRV_LOG(ERR, "Cannot create drop RX queue"); + return ret; + } + /* hrxq->ind_table queues are NULL, drop RX queue ID will be used */ + ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table); + if (ret != 0) { + DRV_LOG(ERR, "Cannot create drop hash RX queue indirection table"); + goto error; + } + ret = mlx5_devx_hrxq_new(dev, hrxq, /* tunnel */ false); + if (ret != 0) { + DRV_LOG(ERR, "Cannot create drop hash RX queue"); + goto error; + } + return 0; +error: + mlx5_devx_drop_action_destroy(dev); + return ret; +} + +/** + * Select TXQ TIS number. + * + * @param dev + * Pointer to Ethernet device. + * @param queue_idx + * Queue index in DPDK Tx queue array. + * + * @return + * > 0 on success, a negative errno value otherwise. + */ +static uint32_t +mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + int tis_idx; + + if (priv->sh->bond.n_port && priv->sh->lag.affinity_mode == + MLX5_LAG_MODE_TIS) { + tis_idx = (priv->lag_affinity_idx + queue_idx) % + priv->sh->bond.n_port; + DRV_LOG(INFO, "port %d txq %d gets affinity %d and maps to PF %d.", + dev->data->port_id, queue_idx, tis_idx + 1, + priv->sh->lag.tx_remap_affinity[tis_idx]); + } else { + tis_idx = 0; + } + MLX5_ASSERT(priv->sh->tis[tis_idx]); + return priv->sh->tis[tis_idx]->id; } /** @@ -936,7 +1137,8 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) attr.wq_attr.log_hairpin_num_packets = attr.wq_attr.log_hairpin_data_sz - MLX5_HAIRPIN_QUEUE_STRIDE; - attr.tis_num = priv->sh->tis->id; + + attr.tis_num = mlx5_get_txq_tis_num(dev, idx); tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &attr); if (!tmpl->sq) { DRV_LOG(ERR, @@ -983,6 +1185,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_common_device *cdev = priv->sh->cdev; + struct mlx5_uar *uar = &priv->sh->tx_uar; struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); @@ -994,15 +1197,15 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, .allow_swp = !!priv->config.swp, .cqn = txq_obj->cq_obj.cq->id, .tis_lst_sz = 1, - .tis_num = priv->sh->tis->id, .wq_attr = (struct mlx5_devx_wq_attr){ .pd = cdev->pdn, - .uar_page = - mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar), + .uar_page = mlx5_os_get_devx_uar_page_id(uar->obj), }, .ts_format = mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format), + .tis_num = mlx5_get_txq_tis_num(dev, idx), }; + /* Create Send Queue object with DevX. */ return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj, log_desc_n, &sq_attr, priv->sh->numa_node); @@ -1036,18 +1239,20 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) rte_errno = ENOMEM; return -rte_errno; #else + struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv)); struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; struct mlx5_devx_cq_attr cq_attr = { - .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar), + .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj), }; - void *reg_addr; uint32_t cqe_n, log_desc_n; uint32_t wqe_n, wqe_size; int ret = 0; MLX5_ASSERT(txq_data); MLX5_ASSERT(txq_obj); + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + MLX5_ASSERT(ppriv); txq_obj->txq_ctrl = txq_ctrl; txq_obj->dev = dev; cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + @@ -1120,8 +1325,10 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR]; *txq_data->qp_db = 0; txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8; + txq_data->db_heu = sh->cdev->config.dbnc == MLX5_TXDB_HEURISTIC; + txq_data->db_nc = sh->tx_uar.dbnc; /* Change Send Queue state to Ready-to-Send. */ - ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0); + ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0); if (ret) { rte_errno = errno; DRV_LOG(ERR, @@ -1138,13 +1345,9 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) if (!priv->sh->tdn) priv->sh->tdn = priv->sh->td->id; #endif - MLX5_ASSERT(sh->tx_uar); - reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); - MLX5_ASSERT(reg_addr); - txq_ctrl->bf_reg = reg_addr; txq_ctrl->uar_mmap_offset = - mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar); - txq_uar_init(txq_ctrl); + mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar.obj); + ppriv->uar_table[txq_data->idx] = sh->tx_uar.bf_db; dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; return 0; error: @@ -1190,7 +1393,7 @@ struct mlx5_obj_ops devx_obj_ops = { .drop_action_create = mlx5_devx_drop_action_create, .drop_action_destroy = mlx5_devx_drop_action_destroy, .txq_obj_new = mlx5_txq_devx_obj_new, - .txq_obj_modify = mlx5_devx_modify_sq, + .txq_obj_modify = mlx5_txq_devx_modify, .txq_obj_release = mlx5_txq_devx_obj_release, .lb_dummy_queue_create = NULL, .lb_dummy_queue_release = NULL,