X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_aso.c;h=da513a600ce388960c5abd4610c0b2f21e31a238;hb=f3191849f2c2fe15c3f2ca77adf31a42d932a442;hp=fe5c9912f2dfc67aca130567216fa2208bae4918;hpb=e93c58da4d88f61da67438e5700c621b3d25a399;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c index fe5c9912f2..da513a600c 100644 --- a/drivers/net/mlx5/mlx5_flow_aso.c +++ b/drivers/net/mlx5/mlx5_flow_aso.c @@ -191,7 +191,6 @@ mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq) volatile struct mlx5_aso_wqe *restrict wqe; int i; int size = 1 << sq->log_desc_n; - uint32_t idx; /* All the next fields state should stay constant. */ for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) { @@ -204,11 +203,6 @@ mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq) (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET)); wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET); - for (idx = 0; idx < MLX5_ASO_METERS_PER_WQE; - idx++) - wqe->aso_dseg.mtrs[idx].v_bo_sc_bbog_mm = - RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) | - (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET)); } } @@ -274,6 +268,7 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, sq->tail = 0; sq->sqn = sq->sq_obj.sq->id; sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar); + rte_spinlock_init(&sq->sqsl); return 0; error: mlx5_aso_destroy_sq(sq); @@ -310,11 +305,11 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh, mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq); break; case ASO_OPC_MOD_POLICER: - if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->sq, 0, + if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->pools_mng.sq, 0, sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC, sh->sq_ts_format)) return -1; - mlx5_aso_mtr_init_sq(&sh->mtrmng->sq); + mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq); break; default: DRV_LOG(ERR, "Unknown ASO operation mode"); @@ -341,7 +336,7 @@ mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh, sq = &sh->aso_age_mng->aso_sq; break; case ASO_OPC_MOD_POLICER: - sq = &sh->mtrmng->sq; + sq = &sh->mtrmng->pools_mng.sq; break; default: DRV_LOG(ERR, "Unknown ASO operation mode"); @@ -663,14 +658,18 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq, { volatile struct mlx5_aso_wqe *wqe = NULL; struct mlx5_flow_meter_info *fm = NULL; + struct mlx5_flow_meter_profile *fmp; uint16_t size = 1 << sq->log_desc_n; uint16_t mask = size - 1; - uint16_t res = size - (uint16_t)(sq->head - sq->tail); + uint16_t res; uint32_t dseg_idx = 0; struct mlx5_aso_mtr_pool *pool = NULL; + rte_spinlock_lock(&sq->sqsl); + res = size - (uint16_t)(sq->head - sq->tail); if (unlikely(!res)) { DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send"); + rte_spinlock_unlock(&sq->sqsl); return 0; } wqe = &sq->sq_obj.aso_wqes[sq->head & mask]; @@ -700,6 +699,16 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq, RTE_BE32(MLX5_IFC_FLOW_METER_DISABLE_CBS_CIR_VAL); wqe->aso_dseg.mtrs[dseg_idx].ebs_eir = 0; } + fmp = fm->profile; + if (fmp->profile.packet_mode) + wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm = + RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) | + (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET) | + (MLX5_METER_MODE_PKT << ASO_DSEG_MTR_MODE)); + else + wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm = + RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) | + (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET)); sq->head++; sq->pi += 2;/* Each WQE contains 2 WQEBB's. */ rte_io_wmb(); @@ -707,6 +716,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq, rte_wmb(); *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */ rte_wmb(); + rte_spinlock_unlock(&sq->sqsl); return 1; } @@ -737,12 +747,16 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq) const unsigned int mask = cq_size - 1; uint32_t idx; uint32_t next_idx = cq->cq_ci & mask; - const uint16_t max = (uint16_t)(sq->head - sq->tail); + uint16_t max; uint16_t n = 0; int ret; - if (unlikely(!max)) + rte_spinlock_lock(&sq->sqsl); + max = (uint16_t)(sq->head - sq->tail); + if (unlikely(!max)) { + rte_spinlock_unlock(&sq->sqsl); return; + } do { idx = next_idx; next_idx = (cq->cq_ci + 1) & mask; @@ -769,6 +783,7 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq) rte_io_wmb(); cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci); } + rte_spinlock_unlock(&sq->sqsl); } /** @@ -788,7 +803,7 @@ int mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, struct mlx5_aso_mtr *mtr) { - struct mlx5_aso_sq *sq = &sh->mtrmng->sq; + struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq; uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES; do { @@ -798,8 +813,8 @@ mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, /* Waiting for wqe resource. */ rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY); } while (--poll_wqe_times); - DRV_LOG(ERR, "Fail to send WQE for ASO meter %d", - mtr->fm.meter_id); + DRV_LOG(ERR, "Fail to send WQE for ASO meter offset %d", + mtr->offset); return -1; } @@ -820,7 +835,7 @@ int mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, struct mlx5_aso_mtr *mtr) { - struct mlx5_aso_sq *sq = &sh->mtrmng->sq; + struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq; uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES; if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) == @@ -834,7 +849,7 @@ mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, /* Waiting for CQE ready. */ rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY); } while (--poll_cqe_times); - DRV_LOG(ERR, "Fail to poll CQE ready for ASO meter %d", - mtr->fm.meter_id); + DRV_LOG(ERR, "Fail to poll CQE ready for ASO meter offset %d", + mtr->offset); return -1; }