/**
* Free MR resources.
*
- * @param[in] sh
- * Pointer to shared device context.
+ * @param[in] cdev
+ * Pointer to the mlx5 common device.
* @param[in] mr
* MR to free.
*/
static void
-mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr)
+mlx5_aso_dereg_mr(struct mlx5_common_device *cdev, struct mlx5_pmd_mr *mr)
{
void *addr = mr->addr;
- sh->share_cache.dereg_mr_cb(mr);
+ cdev->mr_scache.dereg_mr_cb(mr);
mlx5_free(addr);
memset(mr, 0, sizeof(*mr));
}
/**
* Register Memory Region.
*
- * @param[in] sh
- * Pointer to shared device context.
+ * @param[in] cdev
+ * Pointer to the mlx5 common device.
* @param[in] length
* Size of MR buffer.
* @param[in/out] mr
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length,
+mlx5_aso_reg_mr(struct mlx5_common_device *cdev, size_t length,
struct mlx5_pmd_mr *mr, int socket)
{
DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
return -1;
}
- ret = sh->share_cache.reg_mr_cb(sh->pd, mr->addr, length, mr);
+ ret = cdev->mr_scache.reg_mr_cb(cdev->pd, mr->addr, length, mr);
if (ret) {
DRV_LOG(ERR, "Failed to create direct Mkey.");
mlx5_free(mr->addr);
enum mlx5_access_aso_opc_mod aso_opc_mod)
{
uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
+ struct mlx5_common_device *cdev = sh->cdev;
switch (aso_opc_mod) {
case ASO_OPC_MOD_FLOW_HIT:
- if (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
+ if (mlx5_aso_reg_mr(cdev, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
return -1;
- if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format)) {
- mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
+ if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
+ sh->tx_uar, cdev->pdn,
+ MLX5_ASO_QUEUE_LOG_DESC,
+ cdev->config.hca_attr.sq_ts_format)) {
+ mlx5_aso_dereg_mr(cdev, &sh->aso_age_mng->aso_sq.mr);
return -1;
}
mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
break;
case ASO_OPC_MOD_POLICER:
- if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->pools_mng.sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format))
+ if (mlx5_aso_sq_create(cdev->ctx, &sh->mtrmng->pools_mng.sq, 0,
+ sh->tx_uar, cdev->pdn,
+ MLX5_ASO_QUEUE_LOG_DESC,
+ cdev->config.hca_attr.sq_ts_format))
return -1;
mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
break;
case ASO_OPC_MOD_CONNECTION_TRACKING:
/* 64B per object for query. */
- if (mlx5_aso_reg_mr(sh, 64 * sq_desc_n,
+ if (mlx5_aso_reg_mr(cdev, 64 * sq_desc_n,
&sh->ct_mng->aso_sq.mr, 0))
return -1;
- if (mlx5_aso_sq_create(sh->ctx, &sh->ct_mng->aso_sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format)) {
- mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
+ if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
+ sh->tx_uar, cdev->pdn,
+ MLX5_ASO_QUEUE_LOG_DESC,
+ cdev->config.hca_attr.sq_ts_format)) {
+ mlx5_aso_dereg_mr(cdev, &sh->ct_mng->aso_sq.mr);
return -1;
}
mlx5_aso_ct_init_sq(&sh->ct_mng->aso_sq);
switch (aso_opc_mod) {
case ASO_OPC_MOD_FLOW_HIT:
- mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
+ mlx5_aso_dereg_mr(sh->cdev, &sh->aso_age_mng->aso_sq.mr);
sq = &sh->aso_age_mng->aso_sq;
break;
case ASO_OPC_MOD_POLICER:
sq = &sh->mtrmng->pools_mng.sq;
break;
case ASO_OPC_MOD_CONNECTION_TRACKING:
- mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
+ mlx5_aso_dereg_mr(sh->cdev, &sh->ct_mng->aso_sq.mr);
sq = &sh->ct_mng->aso_sq;
break;
default:
wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
(MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET));
+ switch (fmp->profile.alg) {
+ case RTE_MTR_SRTCM_RFC2697:
+ /* Only needed for RFC2697. */
+ if (fm->profile->srtcm_prm.ebs_eir)
+ wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm |=
+ RTE_BE32(1 << ASO_DSEG_BO_OFFSET);
+ break;
+ case RTE_MTR_TRTCM_RFC2698:
+ wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm |=
+ RTE_BE32(1 << ASO_DSEG_BBOG_OFFSET);
+ break;
+ case RTE_MTR_TRTCM_RFC4115:
+ default:
+ break;
+ }
+ /*
+ * Note:
+ * Due to software performance reason, the token fields will not be
+ * set when posting the WQE to ASO SQ. It will be filled by the HW
+ * automatically.
+ */
sq->head++;
sq->pi += 2;/* Each WQE contains 2 WQEBB's. */
rte_io_wmb();
mlx5_aso_ct_obj_analyze(profile, out_data);
return ret;
}
+
+/*
+ * Make sure the conntrack context is synchronized with hardware before
+ * creating a flow rule that uses it.
+ *
+ * @param[in] sh
+ * Pointer to shared device context.
+ * @param[in] ct
+ * Pointer to connection tracking offload object.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_aso_ct_action *ct)
+{
+ struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
+ uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
+ enum mlx5_aso_ct_state state =
+ __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+
+ if (state == ASO_CONNTRACK_FREE) {
+ rte_errno = ENXIO;
+ return -rte_errno;
+ } else if (state == ASO_CONNTRACK_READY ||
+ state == ASO_CONNTRACK_QUERY) {
+ return 0;
+ }
+ do {
+ mlx5_aso_ct_completion_handle(mng);
+ state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ if (state == ASO_CONNTRACK_READY ||
+ state == ASO_CONNTRACK_QUERY)
+ return 0;
+ /* Waiting for CQE ready, consider should block or sleep. */
+ rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
+ } while (--poll_cqe_times);
+ rte_errno = EBUSY;
+ return -rte_errno;
+}