X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_devx.c;h=91243f684fb08d61dc8fedf87ae2fe07d452934b;hb=8e83ba285abe4341b7666927d3fc265b35446c06;hp=668d47025e8d2e59774570f4f61207b556354f63;hpb=5cf0707fc7e97930ac656a2030608af666165fcf;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c index 668d47025e..91243f684f 100644 --- a/drivers/net/mlx5/mlx5_devx.c +++ b/drivers/net/mlx5/mlx5_devx.c @@ -88,6 +88,8 @@ mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type) default: break; } + if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) + return mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr); return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr); } @@ -156,18 +158,21 @@ mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, static void mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq) { - struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; - struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj; + struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj; - MLX5_ASSERT(rxq != NULL); - MLX5_ASSERT(rxq_ctrl != NULL); + if (rxq_obj == NULL) + return; if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { - MLX5_ASSERT(rxq_obj->rq); + if (rxq_obj->rq == NULL) + return; mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST); claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); } else { + if (rxq->devx_rq.rq == NULL) + return; mlx5_devx_rq_destroy(&rxq->devx_rq); - memset(&rxq->devx_rq, 0, sizeof(rxq->devx_rq)); + if (rxq->devx_rq.rmp != NULL && rxq->devx_rq.rmp->ref_cnt > 0) + return; mlx5_devx_cq_destroy(&rxq_obj->cq_obj); memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj)); if (rxq_obj->devx_channel) { @@ -176,6 +181,7 @@ mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq) rxq_obj->devx_channel = NULL; } } + rxq->ctrl->started = false; } /** @@ -251,11 +257,11 @@ mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq) * 512*2^single_wqe_log_num_of_strides. */ rq_attr.wq_attr.single_wqe_log_num_of_strides = - rxq_data->strd_num_n - + rxq_data->log_strd_num - MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ rq_attr.wq_attr.single_stride_log_num_of_bytes = - rxq_data->strd_sz_n - + rxq_data->log_strd_sz - MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; wqe_size = sizeof(struct mlx5_wqe_mprq); } else { @@ -271,6 +277,10 @@ mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq) MLX5_WQ_END_PAD_MODE_NONE; rq_attr.wq_attr.pd = cdev->pdn; rq_attr.counter_set_id = priv->counter_set_id; + rq_attr.delay_drop_en = rxq_data->delay_drop; + rq_attr.user_index = rte_cpu_to_be_16(priv->dev_data->port_id); + if (rxq_data->shared) /* Create RMP based RQ. */ + rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp; /* Create RQ using DevX API. */ return mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size, log_desc_n, &rq_attr, rxq_ctrl->socket); @@ -300,6 +310,8 @@ mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq) uint16_t event_nums[1] = { 0 }; int ret = 0; + if (rxq_ctrl->started) + return 0; if (priv->config.cqe_comp && !rxq_data->hw_timestamp && !rxq_data->lro) { cq_attr.cqe_comp_en = 1u; @@ -351,7 +363,7 @@ mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq) "Port %u Rx CQE compression is disabled for LRO.", port_id); } - cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar); + cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->rx_uar.obj); log_cqe_n = log2above(cqe_n); /* Create CQ using DevX API. */ ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj, @@ -362,9 +374,10 @@ mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq) rxq_data->cqes = (volatile struct mlx5_cqe (*)[]) (uintptr_t)cq_obj->cqes; rxq_data->cq_db = cq_obj->db_rec; - rxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar); + rxq_data->uar_data = sh->rx_uar.cq_db; rxq_data->cqe_n = log_cqe_n; rxq_data->cqn = cq_obj->cq->id; + rxq_data->cq_ci = 0; if (rxq_ctrl->obj->devx_channel) { ret = mlx5_os_devx_subscribe_devx_event (rxq_ctrl->obj->devx_channel, @@ -427,6 +440,8 @@ mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq) attr.wq_attr.log_hairpin_data_sz - MLX5_HAIRPIN_QUEUE_STRIDE; attr.counter_set_id = priv->counter_set_id; + rxq_ctrl->rxq.delay_drop = priv->config.hp_delay_drop; + attr.delay_drop_en = priv->config.hp_delay_drop; tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr, rxq_ctrl->socket); if (!tmpl->rq) { @@ -463,7 +478,7 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq) if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) return mlx5_rxq_obj_hairpin_new(rxq); tmpl->rxq_ctrl = rxq_ctrl; - if (rxq_ctrl->irq) { + if (rxq_ctrl->irq && !rxq_ctrl->started) { int devx_ev_flag = MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; @@ -484,6 +499,7 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq) DRV_LOG(ERR, "Failed to create CQ."); goto error; } + rxq_data->delay_drop = priv->config.std_delay_drop; /* Create RQ using DevX API. */ ret = mlx5_rxq_create_devx_rq_resources(rxq); if (ret) { @@ -496,11 +512,19 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq) ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY); if (ret) goto error; - rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf; - rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec; - mlx5_rxq_initialize(rxq_data); + if (!rxq_data->shared) { + rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf; + rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec; + } else if (!rxq_ctrl->started) { + rxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf; + rxq_data->rq_db = + (uint32_t *)(uintptr_t)tmpl->devx_rmp.wq.db_rec; + } + if (!rxq_ctrl->started) { + mlx5_rxq_initialize(rxq_data); + rxq_ctrl->wqn = rxq->devx_rq.rq->id; + } priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED; - rxq_ctrl->wqn = rxq->devx_rq.rq->id; return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ @@ -558,7 +582,10 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]); MLX5_ASSERT(rxq != NULL); - rqt_attr->rq_list[i] = rxq->devx_rq.rq->id; + if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) + rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id; + else + rqt_attr->rq_list[i] = rxq->devx_rq.rq->id; } MLX5_ASSERT(i > 0); for (j = 0; i != rqt_n; ++j, ++i) @@ -918,6 +945,7 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev) dev->data->port_id); goto error; } + rxq_ctrl->rxq.delay_drop = 0; /* Create RQ using DevX API. */ ret = mlx5_rxq_create_devx_rq_resources(rxq); if (ret != 0) { @@ -1157,6 +1185,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_common_device *cdev = priv->sh->cdev; + struct mlx5_uar *uar = &priv->sh->tx_uar; struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); @@ -1170,8 +1199,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, .tis_lst_sz = 1, .wq_attr = (struct mlx5_devx_wq_attr){ .pd = cdev->pdn, - .uar_page = - mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar), + .uar_page = mlx5_os_get_devx_uar_page_id(uar->obj), }, .ts_format = mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format), @@ -1211,18 +1239,20 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) rte_errno = ENOMEM; return -rte_errno; #else + struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv)); struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; struct mlx5_devx_cq_attr cq_attr = { - .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar), + .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj), }; - void *reg_addr; uint32_t cqe_n, log_desc_n; uint32_t wqe_n, wqe_size; int ret = 0; MLX5_ASSERT(txq_data); MLX5_ASSERT(txq_obj); + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + MLX5_ASSERT(ppriv); txq_obj->txq_ctrl = txq_ctrl; txq_obj->dev = dev; cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + @@ -1295,6 +1325,8 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR]; *txq_data->qp_db = 0; txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8; + txq_data->db_heu = sh->cdev->config.dbnc == MLX5_TXDB_HEURISTIC; + txq_data->db_nc = sh->tx_uar.dbnc; /* Change Send Queue state to Ready-to-Send. */ ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0); if (ret) { @@ -1313,13 +1345,9 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) if (!priv->sh->tdn) priv->sh->tdn = priv->sh->td->id; #endif - MLX5_ASSERT(sh->tx_uar); - reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); - MLX5_ASSERT(reg_addr); - txq_ctrl->bf_reg = reg_addr; txq_ctrl->uar_mmap_offset = - mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar); - txq_uar_init(txq_ctrl); + mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar.obj); + ppriv->uar_table[txq_data->idx] = sh->tx_uar.bf_db; dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; return 0; error: