net: add rte prefix to IP defines
[dpdk.git] / lib / librte_stack / rte_stack_lf_c11.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #ifndef _RTE_STACK_LF_C11_H_
6 #define _RTE_STACK_LF_C11_H_
7
8 #include <rte_branch_prediction.h>
9 #include <rte_prefetch.h>
10
11 static __rte_always_inline unsigned int
12 __rte_stack_lf_count(struct rte_stack *s)
13 {
14         /* stack_lf_push() and stack_lf_pop() do not update the list's contents
15          * and stack_lf->len atomically, which can cause the list to appear
16          * shorter than it actually is if this function is called while other
17          * threads are modifying the list.
18          *
19          * However, given the inherently approximate nature of the get_count
20          * callback -- even if the list and its size were updated atomically,
21          * the size could change between when get_count executes and when the
22          * value is returned to the caller -- this is acceptable.
23          *
24          * The stack_lf->len updates are placed such that the list may appear to
25          * have fewer elements than it does, but will never appear to have more
26          * elements. If the mempool is near-empty to the point that this is a
27          * concern, the user should consider increasing the mempool size.
28          */
29         return (unsigned int)__atomic_load_n(&s->stack_lf.used.len.cnt,
30                                              __ATOMIC_RELAXED);
31 }
32
33 static __rte_always_inline void
34 __rte_stack_lf_push_elems(struct rte_stack_lf_list *list,
35                           struct rte_stack_lf_elem *first,
36                           struct rte_stack_lf_elem *last,
37                           unsigned int num)
38 {
39 #ifndef RTE_ARCH_X86_64
40         RTE_SET_USED(first);
41         RTE_SET_USED(last);
42         RTE_SET_USED(list);
43         RTE_SET_USED(num);
44 #else
45         struct rte_stack_lf_head old_head;
46         int success;
47
48         old_head = list->head;
49
50         do {
51                 struct rte_stack_lf_head new_head;
52
53                 /* Use an acquire fence to establish a synchronized-with
54                  * relationship between the list->head load and store-release
55                  * operations (as part of the rte_atomic128_cmp_exchange()).
56                  */
57                 __atomic_thread_fence(__ATOMIC_ACQUIRE);
58
59                 /* Swing the top pointer to the first element in the list and
60                  * make the last element point to the old top.
61                  */
62                 new_head.top = first;
63                 new_head.cnt = old_head.cnt + 1;
64
65                 last->next = old_head.top;
66
67                 /* Use the release memmodel to ensure the writes to the LF LIFO
68                  * elements are visible before the head pointer write.
69                  */
70                 success = rte_atomic128_cmp_exchange(
71                                 (rte_int128_t *)&list->head,
72                                 (rte_int128_t *)&old_head,
73                                 (rte_int128_t *)&new_head,
74                                 1, __ATOMIC_RELEASE,
75                                 __ATOMIC_RELAXED);
76         } while (success == 0);
77
78         /* Ensure the stack modifications are not reordered with respect
79          * to the LIFO len update.
80          */
81         __atomic_add_fetch(&list->len.cnt, num, __ATOMIC_RELEASE);
82 #endif
83 }
84
85 static __rte_always_inline struct rte_stack_lf_elem *
86 __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list,
87                          unsigned int num,
88                          void **obj_table,
89                          struct rte_stack_lf_elem **last)
90 {
91 #ifndef RTE_ARCH_X86_64
92         RTE_SET_USED(obj_table);
93         RTE_SET_USED(last);
94         RTE_SET_USED(list);
95         RTE_SET_USED(num);
96
97         return NULL;
98 #else
99         struct rte_stack_lf_head old_head;
100         uint64_t len;
101         int success;
102
103         /* Reserve num elements, if available */
104         len = __atomic_load_n(&list->len.cnt, __ATOMIC_ACQUIRE);
105
106         while (1) {
107                 /* Does the list contain enough elements? */
108                 if (unlikely(len < num))
109                         return NULL;
110
111                 /* len is updated on failure */
112                 if (__atomic_compare_exchange_n(&list->len.cnt,
113                                                 &len, len - num,
114                                                 0, __ATOMIC_ACQUIRE,
115                                                 __ATOMIC_ACQUIRE))
116                         break;
117         }
118
119         /* If a torn read occurs, the CAS will fail and set old_head to the
120          * correct/latest value.
121          */
122         old_head = list->head;
123
124         /* Pop num elements */
125         do {
126                 struct rte_stack_lf_head new_head;
127                 struct rte_stack_lf_elem *tmp;
128                 unsigned int i;
129
130                 /* Use the acquire memmodel to ensure the reads to the LF LIFO
131                  * elements are properly ordered with respect to the head
132                  * pointer read.
133                  */
134                 __atomic_thread_fence(__ATOMIC_ACQUIRE);
135
136                 rte_prefetch0(old_head.top);
137
138                 tmp = old_head.top;
139
140                 /* Traverse the list to find the new head. A next pointer will
141                  * either point to another element or NULL; if a thread
142                  * encounters a pointer that has already been popped, the CAS
143                  * will fail.
144                  */
145                 for (i = 0; i < num && tmp != NULL; i++) {
146                         rte_prefetch0(tmp->next);
147                         if (obj_table)
148                                 obj_table[i] = tmp->data;
149                         if (last)
150                                 *last = tmp;
151                         tmp = tmp->next;
152                 }
153
154                 /* If NULL was encountered, the list was modified while
155                  * traversing it. Retry.
156                  */
157                 if (i != num)
158                         continue;
159
160                 new_head.top = tmp;
161                 new_head.cnt = old_head.cnt + 1;
162
163                 success = rte_atomic128_cmp_exchange(
164                                 (rte_int128_t *)&list->head,
165                                 (rte_int128_t *)&old_head,
166                                 (rte_int128_t *)&new_head,
167                                 1, __ATOMIC_RELEASE,
168                                 __ATOMIC_RELAXED);
169         } while (success == 0);
170
171         return old_head.top;
172 #endif
173 }
174
175 #endif /* _RTE_STACK_LF_C11_H_ */