From b5c8b3e70cdfaafc4d08f6c2dabaff524eeb6041 Mon Sep 17 00:00:00 2001 From: Alexander Kozyrev Date: Tue, 27 Oct 2020 15:28:23 +0000 Subject: [PATCH] net/mlx5: use C11 atomics for RxQ/TxQ refcounts The rte_atomic API is deprecated and needs to be replaced with C11 atomic builtins. Use the relaxed ordering for RxQ/TxQ refcounts. Signed-off-by: Alexander Kozyrev Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/linux/mlx5_ethdev_os.c | 1 - drivers/net/mlx5/mlx5.c | 9 +++----- drivers/net/mlx5/mlx5.h | 6 +++--- drivers/net/mlx5/mlx5_ethdev.c | 1 - drivers/net/mlx5/mlx5_flow_dv.c | 3 ++- drivers/net/mlx5/mlx5_rxq.c | 28 ++++++++++++------------- drivers/net/mlx5/mlx5_txq.c | 8 +++---- 7 files changed, 26 insertions(+), 30 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c index 593b0d08ac..19b281925f 100644 --- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c +++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 91aaee3d8c..27c9c2abb6 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1232,8 +1232,7 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv) err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); if (err) goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); + __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED); table_key.direction = 1; tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, SOCKET_ID_ANY); @@ -1245,8 +1244,7 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv) err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); if (err) goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); + __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED); table_key.direction = 0; table_key.domain = 1; tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, @@ -1259,8 +1257,7 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv) err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); if (err) goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); + __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED); return err; error: mlx5_free_table_hash_list(priv); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 88bbd316f0..3bd3451ad0 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -718,7 +718,7 @@ struct mlx5_rxq_obj { /* Indirection table. */ struct mlx5_ind_table_obj { LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ + uint32_t refcnt; /* Reference counter. */ RTE_STD_C11 union { void *ind_table; /**< Indirection table. */ @@ -732,7 +732,7 @@ struct mlx5_ind_table_obj { __extension__ struct mlx5_hrxq { ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ + uint32_t refcnt; /* Reference counter. */ uint32_t shared:1; /* This object used in shared action. */ struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ RTE_STD_C11 @@ -870,7 +870,7 @@ struct mlx5_priv { /* Indirection tables. */ LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls; /* Pointer to next element. */ - rte_atomic32_t refcnt; /**< Reference counter. */ + uint32_t refcnt; /**< Reference counter. */ /**< Verbs modify header action object. */ uint8_t ft_type; /**< Flow table type, Rx or Tx. */ uint8_t max_lro_msg_size; diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 3399e95338..2cd05c5f80 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -10,7 +10,6 @@ #include #include -#include #include #include #include diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index dafe07f42e..3f1ccf8fe0 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -10283,7 +10283,8 @@ __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow, if (hrxq_idx) { *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - rte_atomic32_inc(&(*hrxq)->refcnt); + __atomic_fetch_add(&(*hrxq)->refcnt, 1, + __ATOMIC_RELAXED); } } else { struct mlx5_flow_rss_desc *rss_desc = diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 05116b7510..819042f42a 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -461,7 +461,6 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) } rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1); - } /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */ @@ -1677,7 +1676,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq; #endif tmpl->rxq.idx = idx; - __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: @@ -1724,7 +1723,7 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 }; tmpl->hairpin_conf = *hairpin_conf; tmpl->rxq.idx = idx; - __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; } @@ -1749,7 +1748,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) if (rxq_data) { rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - __atomic_add_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED); } return rxq_ctrl; } @@ -1924,7 +1923,7 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, if (ind_tbl) { unsigned int i; - rte_atomic32_inc(&ind_tbl->refcnt); + __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); for (i = 0; i != ind_tbl->queues_n; ++i) mlx5_rxq_get(dev, ind_tbl->queues[i]); } @@ -1949,11 +1948,11 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; - if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) + if (__atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) == 0) priv->obj_ops.ind_table_destroy(ind_tbl); for (i = 0; i != ind_tbl->queues_n; ++i) claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); - if (!rte_atomic32_read(&ind_tbl->refcnt)) { + if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) { LIST_REMOVE(ind_tbl, next); mlx5_free(ind_tbl); return 0; @@ -2027,7 +2026,7 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl); if (ret < 0) goto error; - rte_atomic32_inc(&ind_tbl->refcnt); + __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); return ind_tbl; error: @@ -2086,7 +2085,7 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, mlx5_ind_table_obj_release(dev, ind_tbl); continue; } - rte_atomic32_inc(&hrxq->refcnt); + __atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED); return idx; } return 0; @@ -2192,7 +2191,7 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); if (!hrxq) return 0; - if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { + if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) { #ifdef HAVE_IBV_FLOW_DV_SUPPORT mlx5_glue->destroy_flow_action(hrxq->action); #endif @@ -2265,7 +2264,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, rte_errno = errno; goto error; } - rte_atomic32_inc(&hrxq->refcnt); + __atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED); ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, hrxq, next); return hrxq_idx; @@ -2295,7 +2294,8 @@ mlx5_drop_action_create(struct rte_eth_dev *dev) int ret; if (priv->drop_queue.hrxq) { - rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt); + __atomic_fetch_add(&priv->drop_queue.hrxq->refcnt, 1, + __ATOMIC_RELAXED); return priv->drop_queue.hrxq; } hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY); @@ -2316,7 +2316,7 @@ mlx5_drop_action_create(struct rte_eth_dev *dev) ret = priv->obj_ops.drop_action_create(dev); if (ret < 0) goto error; - rte_atomic32_set(&hrxq->refcnt, 1); + __atomic_store_n(&hrxq->refcnt, 1, __ATOMIC_RELAXED); return hrxq; error: if (hrxq) { @@ -2340,7 +2340,7 @@ mlx5_drop_action_destroy(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; - if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { + if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) { priv->obj_ops.drop_action_destroy(dev); mlx5_free(priv->drop_queue.rxq); mlx5_free(hrxq->ind_table); diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index cd7b42a0b2..0f30a5d529 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -1141,7 +1141,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rte_errno = ENOMEM; goto error; } - __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); tmpl->type = MLX5_TXQ_TYPE_STANDARD; LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; @@ -1185,7 +1185,7 @@ mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->txq.idx = idx; tmpl->hairpin_conf = *hairpin_conf; tmpl->type = MLX5_TXQ_TYPE_HAIRPIN; - __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; } @@ -1210,7 +1210,7 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) if (txq_data) { ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); - __atomic_add_fetch(&ctrl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED); } return ctrl; } @@ -1235,7 +1235,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) if (!(*priv->txqs)[idx]) return 0; txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1) + if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) != 0) return 1; if (txq_ctrl->obj) { priv->obj_ops.txq_obj_release(txq_ctrl->obj); -- 2.20.1