net/mlx5: relax atomic refcnt for multi-packet Rx buffer
authorPhil Yang <phil.yang@arm.com>
Thu, 3 Sep 2020 02:53:10 +0000 (10:53 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 30 Sep 2020 17:19:15 +0000 (19:19 +0200)
Use C11 atomics with RELAXED ordering instead of the rte_atomic ops
which enforce unnecessary barriers on aarch64.

Signed-off-by: Phil Yang <phil.yang@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Acked-by: Alexander Kozyrev <akozyrev@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.c
drivers/net/mlx5/mlx5_rxtx.h

index a9ccc2b..c059e21 100644 (file)
@@ -1090,7 +1090,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
 
        memset(_m, 0, sizeof(*buf));
        buf->mp = mp;
-       rte_atomic16_set(&buf->refcnt, 1);
+       __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
        for (j = 0; j != strd_n; ++j) {
                shinfo = &buf->shinfos[j];
                shinfo->free_cb = mlx5_mprq_buf_free_cb;
index 101555e..0b87be1 100644 (file)
@@ -1626,10 +1626,11 @@ mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
 {
        struct mlx5_mprq_buf *buf = opaque;
 
-       if (rte_atomic16_read(&buf->refcnt) == 1) {
+       if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
                rte_mempool_put(buf->mp, buf);
-       } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
-               rte_atomic16_set(&buf->refcnt, 1);
+       } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
+                                              __ATOMIC_RELAXED) == 0)) {
+               __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
                rte_mempool_put(buf->mp, buf);
        }
 }
@@ -1709,7 +1710,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 
                if (consumed_strd == strd_n) {
                        /* Replace WQE only if the buffer is still in use. */
-                       if (rte_atomic16_read(&buf->refcnt) > 1) {
+                       if (__atomic_load_n(&buf->refcnt,
+                                           __ATOMIC_RELAXED) > 1) {
                                mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
                                /* Release the old buffer. */
                                mlx5_mprq_buf_free(buf);
@@ -1821,9 +1823,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        void *buf_addr;
 
                        /* Increment the refcnt of the whole chunk. */
-                       rte_atomic16_add_return(&buf->refcnt, 1);
-                       MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <=
-                                   strd_n + 1);
+                       __atomic_add_fetch(&buf->refcnt, 1, __ATOMIC_RELAXED);
+                       MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
+                                   __ATOMIC_RELAXED) <= strd_n + 1);
                        buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
                        /*
                         * MLX5 device doesn't use iova but it is necessary in a
index 6876c1b..9ffa028 100644 (file)
@@ -68,7 +68,7 @@ struct rxq_zip {
 /* Multi-Packet RQ buffer header. */
 struct mlx5_mprq_buf {
        struct rte_mempool *mp;
-       rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
+       uint16_t refcnt; /* Atomically accessed refcnt. */
        uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
        struct rte_mbuf_ext_shared_info shinfos[];
        /*