err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
        if (err)
                goto error;
-       rte_atomic32_init(&tbl_data->tbl.refcnt);
-       rte_atomic32_inc(&tbl_data->tbl.refcnt);
+       __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED);
        table_key.direction = 1;
        tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
                               SOCKET_ID_ANY);
        err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
        if (err)
                goto error;
-       rte_atomic32_init(&tbl_data->tbl.refcnt);
-       rte_atomic32_inc(&tbl_data->tbl.refcnt);
+       __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED);
        table_key.direction = 0;
        table_key.domain = 1;
        tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
        err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
        if (err)
                goto error;
-       rte_atomic32_init(&tbl_data->tbl.refcnt);
-       rte_atomic32_inc(&tbl_data->tbl.refcnt);
+       __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED);
        return err;
 error:
        mlx5_free_table_hash_list(priv);
 
 /* Indirection table. */
 struct mlx5_ind_table_obj {
        LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
-       rte_atomic32_t refcnt; /* Reference counter. */
+       uint32_t refcnt; /* Reference counter. */
        RTE_STD_C11
        union {
                void *ind_table; /**< Indirection table. */
 __extension__
 struct mlx5_hrxq {
        ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
-       rte_atomic32_t refcnt; /* Reference counter. */
+       uint32_t refcnt; /* Reference counter. */
        uint32_t shared:1; /* This object used in shared action. */
        struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
        RTE_STD_C11
        /* Indirection tables. */
        LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
        /* Pointer to next element. */
-       rte_atomic32_t refcnt; /**< Reference counter. */
+       uint32_t refcnt; /**< Reference counter. */
        /**< Verbs modify header action object. */
        uint8_t ft_type; /**< Flow table type, Rx or Tx. */
        uint8_t max_lro_msg_size;
 
        }
        rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
        return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
-
 }
 
 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
        tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
 #endif
        tmpl->rxq.idx = idx;
-       __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+       __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
        LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
        return tmpl;
 error:
        tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
        tmpl->hairpin_conf = *hairpin_conf;
        tmpl->rxq.idx = idx;
-       __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+       __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
        LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
        return tmpl;
 }
 
        if (rxq_data) {
                rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
-               __atomic_add_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
+               __atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
        }
        return rxq_ctrl;
 }
        if (ind_tbl) {
                unsigned int i;
 
-               rte_atomic32_inc(&ind_tbl->refcnt);
+               __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
                for (i = 0; i != ind_tbl->queues_n; ++i)
                        mlx5_rxq_get(dev, ind_tbl->queues[i]);
        }
        struct mlx5_priv *priv = dev->data->dev_private;
        unsigned int i;
 
-       if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
+       if (__atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) == 0)
                priv->obj_ops.ind_table_destroy(ind_tbl);
        for (i = 0; i != ind_tbl->queues_n; ++i)
                claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
-       if (!rte_atomic32_read(&ind_tbl->refcnt)) {
+       if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) {
                LIST_REMOVE(ind_tbl, next);
                mlx5_free(ind_tbl);
                return 0;
        ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
        if (ret < 0)
                goto error;
-       rte_atomic32_inc(&ind_tbl->refcnt);
+       __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
        LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
        return ind_tbl;
 error:
                        mlx5_ind_table_obj_release(dev, ind_tbl);
                        continue;
                }
-               rte_atomic32_inc(&hrxq->refcnt);
+               __atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
                return idx;
        }
        return 0;
        hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
        if (!hrxq)
                return 0;
-       if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+       if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
                mlx5_glue->destroy_flow_action(hrxq->action);
 #endif
                rte_errno = errno;
                goto error;
        }
-       rte_atomic32_inc(&hrxq->refcnt);
+       __atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
        ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
                     hrxq, next);
        return hrxq_idx;
        int ret;
 
        if (priv->drop_queue.hrxq) {
-               rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
+               __atomic_fetch_add(&priv->drop_queue.hrxq->refcnt, 1,
+                                  __ATOMIC_RELAXED);
                return priv->drop_queue.hrxq;
        }
        hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
        ret = priv->obj_ops.drop_action_create(dev);
        if (ret < 0)
                goto error;
-       rte_atomic32_set(&hrxq->refcnt, 1);
+       __atomic_store_n(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
        return hrxq;
 error:
        if (hrxq) {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
 
-       if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+       if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) {
                priv->obj_ops.drop_action_destroy(dev);
                mlx5_free(priv->drop_queue.rxq);
                mlx5_free(hrxq->ind_table);
 
                rte_errno = ENOMEM;
                goto error;
        }
-       __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+       __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
        tmpl->type = MLX5_TXQ_TYPE_STANDARD;
        LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
        return tmpl;
        tmpl->txq.idx = idx;
        tmpl->hairpin_conf = *hairpin_conf;
        tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
-       __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+       __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
        LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
        return tmpl;
 }
 
        if (txq_data) {
                ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
-               __atomic_add_fetch(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
+               __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
        }
        return ctrl;
 }
        if (!(*priv->txqs)[idx])
                return 0;
        txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
-       if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
+       if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) != 0)
                return 1;
        if (txq_ctrl->obj) {
                priv->obj_ops.txq_obj_release(txq_ctrl->obj);