stack: fix sign of list length
authorPhil Yang <phil.yang@arm.com>
Mon, 17 Jun 2019 07:41:30 +0000 (15:41 +0800)
committerThomas Monjalon <thomas@monjalon.net>
Thu, 27 Jun 2019 16:00:59 +0000 (18:00 +0200)
clang raise 'pointer-sign' warnings in __atomic_compare_exchange
when passing 'uint64_t *' to parameter of type 'int64_t *' converts
between pointers to integer types with different sign.

Fixes: 7e6e609939a8 ("stack: add C11 atomic implementation")
Cc: stable@dpdk.org
Suggested-by: Gage Eads <gage.eads@intel.com>
Signed-off-by: Phil Yang <phil.yang@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Acked-by: Gage Eads <gage.eads@intel.com>
lib/librte_stack/rte_stack.h
lib/librte_stack/rte_stack_lf_c11.h
lib/librte_stack/rte_stack_lf_generic.h

index fe048f0..93c360c 100644 (file)
@@ -47,7 +47,7 @@ struct rte_stack_lf_list {
        /** List head */
        struct rte_stack_lf_head head __rte_aligned(16);
        /** List len */
-       rte_atomic64_t len;
+       uint64_t len;
 };
 
 /* Structure containing two lock-free LIFO lists: the stack itself and a list
index a316e9a..3d677ae 100644 (file)
@@ -26,7 +26,7 @@ __rte_stack_lf_count(struct rte_stack *s)
         * elements. If the mempool is near-empty to the point that this is a
         * concern, the user should consider increasing the mempool size.
         */
-       return (unsigned int)__atomic_load_n(&s->stack_lf.used.len.cnt,
+       return (unsigned int)__atomic_load_n(&s->stack_lf.used.len,
                                             __ATOMIC_RELAXED);
 }
 
@@ -78,7 +78,7 @@ __rte_stack_lf_push_elems(struct rte_stack_lf_list *list,
        /* Ensure the stack modifications are not reordered with respect
         * to the LIFO len update.
         */
-       __atomic_add_fetch(&list->len.cnt, num, __ATOMIC_RELEASE);
+       __atomic_add_fetch(&list->len, num, __ATOMIC_RELEASE);
 #endif
 }
 
@@ -101,7 +101,7 @@ __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list,
        int success;
 
        /* Reserve num elements, if available */
-       len = __atomic_load_n(&list->len.cnt, __ATOMIC_ACQUIRE);
+       len = __atomic_load_n(&list->len, __ATOMIC_ACQUIRE);
 
        while (1) {
                /* Does the list contain enough elements? */
@@ -109,7 +109,7 @@ __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list,
                        return NULL;
 
                /* len is updated on failure */
-               if (__atomic_compare_exchange_n(&list->len.cnt,
+               if (__atomic_compare_exchange_n(&list->len,
                                                &len, len - num,
                                                0, __ATOMIC_ACQUIRE,
                                                __ATOMIC_ACQUIRE))
index 1191406..3182151 100644 (file)
@@ -26,7 +26,8 @@ __rte_stack_lf_count(struct rte_stack *s)
         * elements. If the mempool is near-empty to the point that this is a
         * concern, the user should consider increasing the mempool size.
         */
-       return (unsigned int)rte_atomic64_read(&s->stack_lf.used.len);
+       return (unsigned int)rte_atomic64_read((rte_atomic64_t *)
+                       &s->stack_lf.used.len);
 }
 
 static __rte_always_inline void
@@ -73,7 +74,7 @@ __rte_stack_lf_push_elems(struct rte_stack_lf_list *list,
                                __ATOMIC_RELAXED);
        } while (success == 0);
 
-       rte_atomic64_add(&list->len, num);
+       rte_atomic64_add((rte_atomic64_t *)&list->len, num);
 #endif
 }
 
@@ -96,7 +97,7 @@ __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list,
 
        /* Reserve num elements, if available */
        while (1) {
-               uint64_t len = rte_atomic64_read(&list->len);
+               uint64_t len = rte_atomic64_read((rte_atomic64_t *)&list->len);
 
                /* Does the list contain enough elements? */
                if (unlikely(len < num))