cryptodev: fix indent in Meson file
[dpdk.git] / lib / distributor / rte_distributor_single.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <sys/queue.h>
7 #include <string.h>
8 #include <rte_mbuf.h>
9 #include <rte_memory.h>
10 #include <rte_memzone.h>
11 #include <rte_errno.h>
12 #include <rte_string_fns.h>
13 #include <rte_eal_memconfig.h>
14 #include <rte_pause.h>
15 #include <rte_tailq.h>
16
17 #include "rte_distributor_single.h"
18 #include "distributor_private.h"
19
20 TAILQ_HEAD(rte_distributor_list, rte_distributor_single);
21
22 static struct rte_tailq_elem rte_distributor_tailq = {
23         .name = "RTE_DISTRIBUTOR",
24 };
25 EAL_REGISTER_TAILQ(rte_distributor_tailq)
26
27 /**** APIs called by workers ****/
28
29 void
30 rte_distributor_request_pkt_single(struct rte_distributor_single *d,
31                 unsigned worker_id, struct rte_mbuf *oldpkt)
32 {
33         union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
34         int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
35                         | RTE_DISTRIB_GET_BUF;
36         while (unlikely(__atomic_load_n(&buf->bufptr64, __ATOMIC_RELAXED)
37                         & RTE_DISTRIB_FLAGS_MASK))
38                 rte_pause();
39
40         /* Sync with distributor on GET_BUF flag. */
41         __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
42 }
43
44 struct rte_mbuf *
45 rte_distributor_poll_pkt_single(struct rte_distributor_single *d,
46                 unsigned worker_id)
47 {
48         union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
49         /* Sync with distributor. Acquire bufptr64. */
50         if (__atomic_load_n(&buf->bufptr64, __ATOMIC_ACQUIRE)
51                 & RTE_DISTRIB_GET_BUF)
52                 return NULL;
53
54         /* since bufptr64 is signed, this should be an arithmetic shift */
55         int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
56         return (struct rte_mbuf *)((uintptr_t)ret);
57 }
58
59 struct rte_mbuf *
60 rte_distributor_get_pkt_single(struct rte_distributor_single *d,
61                 unsigned worker_id, struct rte_mbuf *oldpkt)
62 {
63         struct rte_mbuf *ret;
64         rte_distributor_request_pkt_single(d, worker_id, oldpkt);
65         while ((ret = rte_distributor_poll_pkt_single(d, worker_id)) == NULL)
66                 rte_pause();
67         return ret;
68 }
69
70 int
71 rte_distributor_return_pkt_single(struct rte_distributor_single *d,
72                 unsigned worker_id, struct rte_mbuf *oldpkt)
73 {
74         union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
75         uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
76                         | RTE_DISTRIB_RETURN_BUF;
77         while (unlikely(__atomic_load_n(&buf->bufptr64, __ATOMIC_RELAXED)
78                         & RTE_DISTRIB_FLAGS_MASK))
79                 rte_pause();
80
81         /* Sync with distributor on RETURN_BUF flag. */
82         __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
83         return 0;
84 }
85
86 /**** APIs called on distributor core ***/
87
88 /* as name suggests, adds a packet to the backlog for a particular worker */
89 static int
90 add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
91 {
92         if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
93                 return -1;
94
95         bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
96                         = item;
97         return 0;
98 }
99
100 /* takes the next packet for a worker off the backlog */
101 static int64_t
102 backlog_pop(struct rte_distributor_backlog *bl)
103 {
104         bl->count--;
105         return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
106 }
107
108 /* stores a packet returned from a worker inside the returns array */
109 static inline void
110 store_return(uintptr_t oldbuf, struct rte_distributor_single *d,
111                 unsigned *ret_start, unsigned *ret_count)
112 {
113         /* store returns in a circular buffer - code is branch-free */
114         d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
115                         = (void *)oldbuf;
116         *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
117         *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
118 }
119
120 static inline void
121 handle_worker_shutdown(struct rte_distributor_single *d, unsigned int wkr)
122 {
123         d->in_flight_tags[wkr] = 0;
124         d->in_flight_bitmask &= ~(1UL << wkr);
125         /* Sync with worker. Release bufptr64. */
126         __atomic_store_n(&(d->bufs[wkr].bufptr64), 0, __ATOMIC_RELEASE);
127         if (unlikely(d->backlog[wkr].count != 0)) {
128                 /* On return of a packet, we need to move the
129                  * queued packets for this core elsewhere.
130                  * Easiest solution is to set things up for
131                  * a recursive call. That will cause those
132                  * packets to be queued up for the next free
133                  * core, i.e. it will return as soon as a
134                  * core becomes free to accept the first
135                  * packet, as subsequent ones will be added to
136                  * the backlog for that core.
137                  */
138                 struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
139                 unsigned i;
140                 struct rte_distributor_backlog *bl = &d->backlog[wkr];
141
142                 for (i = 0; i < bl->count; i++) {
143                         unsigned idx = (bl->start + i) &
144                                         RTE_DISTRIB_BACKLOG_MASK;
145                         pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
146                                         RTE_DISTRIB_FLAG_BITS));
147                 }
148                 /* recursive call.
149                  * Note that the tags were set before first level call
150                  * to rte_distributor_process.
151                  */
152                 rte_distributor_process_single(d, pkts, i);
153                 bl->count = bl->start = 0;
154         }
155 }
156
157 /* this function is called when process() fn is called without any new
158  * packets. It goes through all the workers and clears any returned packets
159  * to do a partial flush.
160  */
161 static int
162 process_returns(struct rte_distributor_single *d)
163 {
164         unsigned wkr;
165         unsigned flushed = 0;
166         unsigned ret_start = d->returns.start,
167                         ret_count = d->returns.count;
168
169         for (wkr = 0; wkr < d->num_workers; wkr++) {
170                 uintptr_t oldbuf = 0;
171                 /* Sync with worker. Acquire bufptr64. */
172                 const int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
173                                                         __ATOMIC_ACQUIRE);
174
175                 if (data & RTE_DISTRIB_GET_BUF) {
176                         flushed++;
177                         if (d->backlog[wkr].count)
178                                 /* Sync with worker. Release bufptr64. */
179                                 __atomic_store_n(&(d->bufs[wkr].bufptr64),
180                                         backlog_pop(&d->backlog[wkr]),
181                                         __ATOMIC_RELEASE);
182                         else {
183                                 /* Sync with worker on GET_BUF flag. */
184                                 __atomic_store_n(&(d->bufs[wkr].bufptr64),
185                                         RTE_DISTRIB_GET_BUF,
186                                         __ATOMIC_RELEASE);
187                                 d->in_flight_tags[wkr] = 0;
188                                 d->in_flight_bitmask &= ~(1UL << wkr);
189                         }
190                         oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
191                 } else if (data & RTE_DISTRIB_RETURN_BUF) {
192                         handle_worker_shutdown(d, wkr);
193                         oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
194                 }
195
196                 store_return(oldbuf, d, &ret_start, &ret_count);
197         }
198
199         d->returns.start = ret_start;
200         d->returns.count = ret_count;
201
202         return flushed;
203 }
204
205 /* process a set of packets to distribute them to workers */
206 int
207 rte_distributor_process_single(struct rte_distributor_single *d,
208                 struct rte_mbuf **mbufs, unsigned num_mbufs)
209 {
210         unsigned next_idx = 0;
211         unsigned wkr = 0;
212         struct rte_mbuf *next_mb = NULL;
213         int64_t next_value = 0;
214         uint32_t new_tag = 0;
215         unsigned ret_start = d->returns.start,
216                         ret_count = d->returns.count;
217
218         if (unlikely(num_mbufs == 0))
219                 return process_returns(d);
220
221         while (next_idx < num_mbufs || next_mb != NULL) {
222                 uintptr_t oldbuf = 0;
223                 /* Sync with worker. Acquire bufptr64. */
224                 int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
225                                                 __ATOMIC_ACQUIRE);
226
227                 if (!next_mb) {
228                         next_mb = mbufs[next_idx++];
229                         next_value = (((int64_t)(uintptr_t)next_mb)
230                                         << RTE_DISTRIB_FLAG_BITS);
231                         /*
232                          * User is advocated to set tag value for each
233                          * mbuf before calling rte_distributor_process.
234                          * User defined tags are used to identify flows,
235                          * or sessions.
236                          */
237                         new_tag = next_mb->hash.usr;
238
239                         /*
240                          * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
241                          * then the size of match has to be expanded.
242                          */
243                         uint64_t match = 0;
244                         unsigned i;
245                         /*
246                          * to scan for a match use "xor" and "not" to get a 0/1
247                          * value, then use shifting to merge to single "match"
248                          * variable, where a one-bit indicates a match for the
249                          * worker given by the bit-position
250                          */
251                         for (i = 0; i < d->num_workers; i++)
252                                 match |= (!(d->in_flight_tags[i] ^ new_tag)
253                                         << i);
254
255                         /* Only turned-on bits are considered as match */
256                         match &= d->in_flight_bitmask;
257
258                         if (match) {
259                                 next_mb = NULL;
260                                 unsigned worker = __builtin_ctzl(match);
261                                 if (add_to_backlog(&d->backlog[worker],
262                                                 next_value) < 0)
263                                         next_idx--;
264                         }
265                 }
266
267                 if ((data & RTE_DISTRIB_GET_BUF) &&
268                                 (d->backlog[wkr].count || next_mb)) {
269
270                         if (d->backlog[wkr].count)
271                                 /* Sync with worker. Release bufptr64. */
272                                 __atomic_store_n(&(d->bufs[wkr].bufptr64),
273                                                 backlog_pop(&d->backlog[wkr]),
274                                                 __ATOMIC_RELEASE);
275
276                         else {
277                                 /* Sync with worker. Release bufptr64.  */
278                                 __atomic_store_n(&(d->bufs[wkr].bufptr64),
279                                                 next_value,
280                                                 __ATOMIC_RELEASE);
281                                 d->in_flight_tags[wkr] = new_tag;
282                                 d->in_flight_bitmask |= (1UL << wkr);
283                                 next_mb = NULL;
284                         }
285                         oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
286                 } else if (data & RTE_DISTRIB_RETURN_BUF) {
287                         handle_worker_shutdown(d, wkr);
288                         oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
289                 }
290
291                 /* store returns in a circular buffer */
292                 store_return(oldbuf, d, &ret_start, &ret_count);
293
294                 if (++wkr == d->num_workers)
295                         wkr = 0;
296         }
297         /* to finish, check all workers for backlog and schedule work for them
298          * if they are ready */
299         for (wkr = 0; wkr < d->num_workers; wkr++)
300                 if (d->backlog[wkr].count &&
301                                 /* Sync with worker. Acquire bufptr64. */
302                                 (__atomic_load_n(&(d->bufs[wkr].bufptr64),
303                                 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
304
305                         int64_t oldbuf = d->bufs[wkr].bufptr64 >>
306                                         RTE_DISTRIB_FLAG_BITS;
307
308                         store_return(oldbuf, d, &ret_start, &ret_count);
309
310                         /* Sync with worker. Release bufptr64. */
311                         __atomic_store_n(&(d->bufs[wkr].bufptr64),
312                                 backlog_pop(&d->backlog[wkr]),
313                                 __ATOMIC_RELEASE);
314                 }
315
316         d->returns.start = ret_start;
317         d->returns.count = ret_count;
318         return num_mbufs;
319 }
320
321 /* return to the caller, packets returned from workers */
322 int
323 rte_distributor_returned_pkts_single(struct rte_distributor_single *d,
324                 struct rte_mbuf **mbufs, unsigned max_mbufs)
325 {
326         struct rte_distributor_returned_pkts *returns = &d->returns;
327         unsigned retval = (max_mbufs < returns->count) ?
328                         max_mbufs : returns->count;
329         unsigned i;
330
331         for (i = 0; i < retval; i++) {
332                 unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
333                 mbufs[i] = returns->mbufs[idx];
334         }
335         returns->start += i;
336         returns->count -= i;
337
338         return retval;
339 }
340
341 /* return the number of packets in-flight in a distributor, i.e. packets
342  * being worked on or queued up in a backlog.
343  */
344 static inline unsigned
345 total_outstanding(const struct rte_distributor_single *d)
346 {
347         unsigned wkr, total_outstanding;
348
349         total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
350
351         for (wkr = 0; wkr < d->num_workers; wkr++)
352                 total_outstanding += d->backlog[wkr].count;
353
354         return total_outstanding;
355 }
356
357 /* flush the distributor, so that there are no outstanding packets in flight or
358  * queued up. */
359 int
360 rte_distributor_flush_single(struct rte_distributor_single *d)
361 {
362         const unsigned flushed = total_outstanding(d);
363
364         while (total_outstanding(d) > 0)
365                 rte_distributor_process_single(d, NULL, 0);
366
367         return flushed;
368 }
369
370 /* clears the internal returns array in the distributor */
371 void
372 rte_distributor_clear_returns_single(struct rte_distributor_single *d)
373 {
374         d->returns.start = d->returns.count = 0;
375 #ifndef __OPTIMIZE__
376         memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
377 #endif
378 }
379
380 /* creates a distributor instance */
381 struct rte_distributor_single *
382 rte_distributor_create_single(const char *name,
383                 unsigned socket_id,
384                 unsigned num_workers)
385 {
386         struct rte_distributor_single *d;
387         struct rte_distributor_list *distributor_list;
388         char mz_name[RTE_MEMZONE_NAMESIZE];
389         const struct rte_memzone *mz;
390
391         /* compilation-time checks */
392         RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
393         RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
394         RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
395                                 sizeof(d->in_flight_bitmask) * CHAR_BIT);
396
397         if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
398                 rte_errno = EINVAL;
399                 return NULL;
400         }
401
402         snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
403         mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
404         if (mz == NULL) {
405                 rte_errno = ENOMEM;
406                 return NULL;
407         }
408
409         d = mz->addr;
410         strlcpy(d->name, name, sizeof(d->name));
411         d->num_workers = num_workers;
412
413         distributor_list = RTE_TAILQ_CAST(rte_distributor_tailq.head,
414                                           rte_distributor_list);
415
416         rte_mcfg_tailq_write_lock();
417         TAILQ_INSERT_TAIL(distributor_list, d, next);
418         rte_mcfg_tailq_write_unlock();
419
420         return d;
421 }