rwlock: reimplement with atomic builtins
authorJoyce Kong <joyce.kong@arm.com>
Mon, 25 Mar 2019 09:14:57 +0000 (17:14 +0800)
committerThomas Monjalon <thomas@monjalon.net>
Thu, 28 Mar 2019 10:47:05 +0000 (11:47 +0100)
The __sync builtin based implementation generates full memory
barriers ('dmb ish') on Arm platforms. Using C11 atomic builtins
to generate one way barriers.

Here is the assembly code of __sync_compare_and_swap builtin.
__sync_bool_compare_and_swap(dst, exp, src);
   0x000000000090f1b0 <+16>:    e0 07 40 f9 ldr x0, [sp, #8]
   0x000000000090f1b4 <+20>:    e1 0f 40 79 ldrh    w1, [sp, #6]
   0x000000000090f1b8 <+24>:    e2 0b 40 79 ldrh    w2, [sp, #4]
   0x000000000090f1bc <+28>:    21 3c 00 12 and w1, w1, #0xffff
   0x000000000090f1c0 <+32>:    03 7c 5f 48 ldxrh   w3, [x0]
   0x000000000090f1c4 <+36>:    7f 00 01 6b cmp w3, w1
   0x000000000090f1c8 <+40>:    61 00 00 54 b.ne    0x90f1d4
<rte_atomic16_cmpset+52>  // b.any
   0x000000000090f1cc <+44>:    02 fc 04 48 stlxrh  w4, w2, [x0]
   0x000000000090f1d0 <+48>:    84 ff ff 35 cbnz    w4, 0x90f1c0
<rte_atomic16_cmpset+32>
   0x000000000090f1d4 <+52>:    bf 3b 03 d5 dmb ish
   0x000000000090f1d8 <+56>:    e0 17 9f 1a cset    w0, eq  // eq = none

Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Gavin Hu <gavin.hu@arm.com>
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Tested-by: Joyce Kong <joyce.kong@arm.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
lib/librte_eal/common/include/generic/rte_rwlock.h

index b05d85a..31608fa 100644 (file)
@@ -64,14 +64,14 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
        int success = 0;
 
        while (success == 0) {
-               x = rwl->cnt;
+               x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
                /* write lock is held */
                if (x < 0) {
                        rte_pause();
                        continue;
                }
-               success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
-                                             (uint32_t)x, (uint32_t)(x + 1));
+               success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1,
+                                       __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
        }
 }
 
@@ -95,13 +95,14 @@ rte_rwlock_read_trylock(rte_rwlock_t *rwl)
        int success = 0;
 
        while (success == 0) {
-               x = rwl->cnt;
+               x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
                /* write lock is held */
                if (x < 0)
                        return -EBUSY;
-               success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
-                                             (uint32_t)x, (uint32_t)(x + 1));
+               success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1,
+                                       __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
        }
+
        return 0;
 }
 
@@ -114,7 +115,7 @@ rte_rwlock_read_trylock(rte_rwlock_t *rwl)
 static inline void
 rte_rwlock_read_unlock(rte_rwlock_t *rwl)
 {
-       rte_atomic32_dec((rte_atomic32_t *)(intptr_t)&rwl->cnt);
+       __atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE);
 }
 
 /**
@@ -135,9 +136,9 @@ rte_rwlock_write_trylock(rte_rwlock_t *rwl)
 {
        int32_t x;
 
-       x = rwl->cnt;
-       if (x != 0 || rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
-                               0, (uint32_t)-1) == 0)
+       x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
+       if (x != 0 || __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1,
+                             __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) == 0)
                return -EBUSY;
 
        return 0;
@@ -156,14 +157,14 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
        int success = 0;
 
        while (success == 0) {
-               x = rwl->cnt;
+               x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
                /* a lock is held */
                if (x != 0) {
                        rte_pause();
                        continue;
                }
-               success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
-                                             0, (uint32_t)-1);
+               success = __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1,
+                                       __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
        }
 }
 
@@ -176,7 +177,7 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
 static inline void
 rte_rwlock_write_unlock(rte_rwlock_t *rwl)
 {
-       rte_atomic32_inc((rte_atomic32_t *)(intptr_t)&rwl->cnt);
+       __atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE);
 }
 
 /**