event/octeontx: add dump function for easier debugging
[dpdk.git] / lib / librte_distributor / rte_distributor_v20.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <sys/queue.h>
36 #include <string.h>
37 #include <rte_mbuf.h>
38 #include <rte_memory.h>
39 #include <rte_memzone.h>
40 #include <rte_errno.h>
41 #include <rte_compat.h>
42 #include <rte_string_fns.h>
43 #include <rte_eal_memconfig.h>
44 #include "rte_distributor_v20.h"
45 #include "rte_distributor_private.h"
46
47 TAILQ_HEAD(rte_distributor_list, rte_distributor_v20);
48
49 static struct rte_tailq_elem rte_distributor_tailq = {
50         .name = "RTE_DISTRIBUTOR",
51 };
52 EAL_REGISTER_TAILQ(rte_distributor_tailq)
53
54 /**** APIs called by workers ****/
55
56 void
57 rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d,
58                 unsigned worker_id, struct rte_mbuf *oldpkt)
59 {
60         union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
61         int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
62                         | RTE_DISTRIB_GET_BUF;
63         while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK))
64                 rte_pause();
65         buf->bufptr64 = req;
66 }
67 VERSION_SYMBOL(rte_distributor_request_pkt, _v20, 2.0);
68
69 struct rte_mbuf *
70 rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d,
71                 unsigned worker_id)
72 {
73         union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
74         if (buf->bufptr64 & RTE_DISTRIB_GET_BUF)
75                 return NULL;
76
77         /* since bufptr64 is signed, this should be an arithmetic shift */
78         int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
79         return (struct rte_mbuf *)((uintptr_t)ret);
80 }
81 VERSION_SYMBOL(rte_distributor_poll_pkt, _v20, 2.0);
82
83 struct rte_mbuf *
84 rte_distributor_get_pkt_v20(struct rte_distributor_v20 *d,
85                 unsigned worker_id, struct rte_mbuf *oldpkt)
86 {
87         struct rte_mbuf *ret;
88         rte_distributor_request_pkt_v20(d, worker_id, oldpkt);
89         while ((ret = rte_distributor_poll_pkt_v20(d, worker_id)) == NULL)
90                 rte_pause();
91         return ret;
92 }
93 VERSION_SYMBOL(rte_distributor_get_pkt, _v20, 2.0);
94
95 int
96 rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d,
97                 unsigned worker_id, struct rte_mbuf *oldpkt)
98 {
99         union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
100         uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
101                         | RTE_DISTRIB_RETURN_BUF;
102         buf->bufptr64 = req;
103         return 0;
104 }
105 VERSION_SYMBOL(rte_distributor_return_pkt, _v20, 2.0);
106
107 /**** APIs called on distributor core ***/
108
109 /* as name suggests, adds a packet to the backlog for a particular worker */
110 static int
111 add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
112 {
113         if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
114                 return -1;
115
116         bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
117                         = item;
118         return 0;
119 }
120
121 /* takes the next packet for a worker off the backlog */
122 static int64_t
123 backlog_pop(struct rte_distributor_backlog *bl)
124 {
125         bl->count--;
126         return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
127 }
128
129 /* stores a packet returned from a worker inside the returns array */
130 static inline void
131 store_return(uintptr_t oldbuf, struct rte_distributor_v20 *d,
132                 unsigned *ret_start, unsigned *ret_count)
133 {
134         /* store returns in a circular buffer - code is branch-free */
135         d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
136                         = (void *)oldbuf;
137         *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
138         *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
139 }
140
141 static inline void
142 handle_worker_shutdown(struct rte_distributor_v20 *d, unsigned int wkr)
143 {
144         d->in_flight_tags[wkr] = 0;
145         d->in_flight_bitmask &= ~(1UL << wkr);
146         d->bufs[wkr].bufptr64 = 0;
147         if (unlikely(d->backlog[wkr].count != 0)) {
148                 /* On return of a packet, we need to move the
149                  * queued packets for this core elsewhere.
150                  * Easiest solution is to set things up for
151                  * a recursive call. That will cause those
152                  * packets to be queued up for the next free
153                  * core, i.e. it will return as soon as a
154                  * core becomes free to accept the first
155                  * packet, as subsequent ones will be added to
156                  * the backlog for that core.
157                  */
158                 struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
159                 unsigned i;
160                 struct rte_distributor_backlog *bl = &d->backlog[wkr];
161
162                 for (i = 0; i < bl->count; i++) {
163                         unsigned idx = (bl->start + i) &
164                                         RTE_DISTRIB_BACKLOG_MASK;
165                         pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
166                                         RTE_DISTRIB_FLAG_BITS));
167                 }
168                 /* recursive call.
169                  * Note that the tags were set before first level call
170                  * to rte_distributor_process.
171                  */
172                 rte_distributor_process_v20(d, pkts, i);
173                 bl->count = bl->start = 0;
174         }
175 }
176
177 /* this function is called when process() fn is called without any new
178  * packets. It goes through all the workers and clears any returned packets
179  * to do a partial flush.
180  */
181 static int
182 process_returns(struct rte_distributor_v20 *d)
183 {
184         unsigned wkr;
185         unsigned flushed = 0;
186         unsigned ret_start = d->returns.start,
187                         ret_count = d->returns.count;
188
189         for (wkr = 0; wkr < d->num_workers; wkr++) {
190
191                 const int64_t data = d->bufs[wkr].bufptr64;
192                 uintptr_t oldbuf = 0;
193
194                 if (data & RTE_DISTRIB_GET_BUF) {
195                         flushed++;
196                         if (d->backlog[wkr].count)
197                                 d->bufs[wkr].bufptr64 =
198                                                 backlog_pop(&d->backlog[wkr]);
199                         else {
200                                 d->bufs[wkr].bufptr64 = RTE_DISTRIB_GET_BUF;
201                                 d->in_flight_tags[wkr] = 0;
202                                 d->in_flight_bitmask &= ~(1UL << wkr);
203                         }
204                         oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
205                 } else if (data & RTE_DISTRIB_RETURN_BUF) {
206                         handle_worker_shutdown(d, wkr);
207                         oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
208                 }
209
210                 store_return(oldbuf, d, &ret_start, &ret_count);
211         }
212
213         d->returns.start = ret_start;
214         d->returns.count = ret_count;
215
216         return flushed;
217 }
218
219 /* process a set of packets to distribute them to workers */
220 int
221 rte_distributor_process_v20(struct rte_distributor_v20 *d,
222                 struct rte_mbuf **mbufs, unsigned num_mbufs)
223 {
224         unsigned next_idx = 0;
225         unsigned wkr = 0;
226         struct rte_mbuf *next_mb = NULL;
227         int64_t next_value = 0;
228         uint32_t new_tag = 0;
229         unsigned ret_start = d->returns.start,
230                         ret_count = d->returns.count;
231
232         if (unlikely(num_mbufs == 0))
233                 return process_returns(d);
234
235         while (next_idx < num_mbufs || next_mb != NULL) {
236
237                 int64_t data = d->bufs[wkr].bufptr64;
238                 uintptr_t oldbuf = 0;
239
240                 if (!next_mb) {
241                         next_mb = mbufs[next_idx++];
242                         next_value = (((int64_t)(uintptr_t)next_mb)
243                                         << RTE_DISTRIB_FLAG_BITS);
244                         /*
245                          * User is advocated to set tag vaue for each
246                          * mbuf before calling rte_distributor_process.
247                          * User defined tags are used to identify flows,
248                          * or sessions.
249                          */
250                         new_tag = next_mb->hash.usr;
251
252                         /*
253                          * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
254                          * then the size of match has to be expanded.
255                          */
256                         uint64_t match = 0;
257                         unsigned i;
258                         /*
259                          * to scan for a match use "xor" and "not" to get a 0/1
260                          * value, then use shifting to merge to single "match"
261                          * variable, where a one-bit indicates a match for the
262                          * worker given by the bit-position
263                          */
264                         for (i = 0; i < d->num_workers; i++)
265                                 match |= (!(d->in_flight_tags[i] ^ new_tag)
266                                         << i);
267
268                         /* Only turned-on bits are considered as match */
269                         match &= d->in_flight_bitmask;
270
271                         if (match) {
272                                 next_mb = NULL;
273                                 unsigned worker = __builtin_ctzl(match);
274                                 if (add_to_backlog(&d->backlog[worker],
275                                                 next_value) < 0)
276                                         next_idx--;
277                         }
278                 }
279
280                 if ((data & RTE_DISTRIB_GET_BUF) &&
281                                 (d->backlog[wkr].count || next_mb)) {
282
283                         if (d->backlog[wkr].count)
284                                 d->bufs[wkr].bufptr64 =
285                                                 backlog_pop(&d->backlog[wkr]);
286
287                         else {
288                                 d->bufs[wkr].bufptr64 = next_value;
289                                 d->in_flight_tags[wkr] = new_tag;
290                                 d->in_flight_bitmask |= (1UL << wkr);
291                                 next_mb = NULL;
292                         }
293                         oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
294                 } else if (data & RTE_DISTRIB_RETURN_BUF) {
295                         handle_worker_shutdown(d, wkr);
296                         oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
297                 }
298
299                 /* store returns in a circular buffer */
300                 store_return(oldbuf, d, &ret_start, &ret_count);
301
302                 if (++wkr == d->num_workers)
303                         wkr = 0;
304         }
305         /* to finish, check all workers for backlog and schedule work for them
306          * if they are ready */
307         for (wkr = 0; wkr < d->num_workers; wkr++)
308                 if (d->backlog[wkr].count &&
309                                 (d->bufs[wkr].bufptr64 & RTE_DISTRIB_GET_BUF)) {
310
311                         int64_t oldbuf = d->bufs[wkr].bufptr64 >>
312                                         RTE_DISTRIB_FLAG_BITS;
313                         store_return(oldbuf, d, &ret_start, &ret_count);
314
315                         d->bufs[wkr].bufptr64 = backlog_pop(&d->backlog[wkr]);
316                 }
317
318         d->returns.start = ret_start;
319         d->returns.count = ret_count;
320         return num_mbufs;
321 }
322 VERSION_SYMBOL(rte_distributor_process, _v20, 2.0);
323
324 /* return to the caller, packets returned from workers */
325 int
326 rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d,
327                 struct rte_mbuf **mbufs, unsigned max_mbufs)
328 {
329         struct rte_distributor_returned_pkts *returns = &d->returns;
330         unsigned retval = (max_mbufs < returns->count) ?
331                         max_mbufs : returns->count;
332         unsigned i;
333
334         for (i = 0; i < retval; i++) {
335                 unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
336                 mbufs[i] = returns->mbufs[idx];
337         }
338         returns->start += i;
339         returns->count -= i;
340
341         return retval;
342 }
343 VERSION_SYMBOL(rte_distributor_returned_pkts, _v20, 2.0);
344
345 /* return the number of packets in-flight in a distributor, i.e. packets
346  * being workered on or queued up in a backlog. */
347 static inline unsigned
348 total_outstanding(const struct rte_distributor_v20 *d)
349 {
350         unsigned wkr, total_outstanding;
351
352         total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
353
354         for (wkr = 0; wkr < d->num_workers; wkr++)
355                 total_outstanding += d->backlog[wkr].count;
356
357         return total_outstanding;
358 }
359
360 /* flush the distributor, so that there are no outstanding packets in flight or
361  * queued up. */
362 int
363 rte_distributor_flush_v20(struct rte_distributor_v20 *d)
364 {
365         const unsigned flushed = total_outstanding(d);
366
367         while (total_outstanding(d) > 0)
368                 rte_distributor_process_v20(d, NULL, 0);
369
370         return flushed;
371 }
372 VERSION_SYMBOL(rte_distributor_flush, _v20, 2.0);
373
374 /* clears the internal returns array in the distributor */
375 void
376 rte_distributor_clear_returns_v20(struct rte_distributor_v20 *d)
377 {
378         d->returns.start = d->returns.count = 0;
379 #ifndef __OPTIMIZE__
380         memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
381 #endif
382 }
383 VERSION_SYMBOL(rte_distributor_clear_returns, _v20, 2.0);
384
385 /* creates a distributor instance */
386 struct rte_distributor_v20 *
387 rte_distributor_create_v20(const char *name,
388                 unsigned socket_id,
389                 unsigned num_workers)
390 {
391         struct rte_distributor_v20 *d;
392         struct rte_distributor_list *distributor_list;
393         char mz_name[RTE_MEMZONE_NAMESIZE];
394         const struct rte_memzone *mz;
395
396         /* compilation-time checks */
397         RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
398         RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
399         RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
400                                 sizeof(d->in_flight_bitmask) * CHAR_BIT);
401
402         if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
403                 rte_errno = EINVAL;
404                 return NULL;
405         }
406
407         snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
408         mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
409         if (mz == NULL) {
410                 rte_errno = ENOMEM;
411                 return NULL;
412         }
413
414         d = mz->addr;
415         snprintf(d->name, sizeof(d->name), "%s", name);
416         d->num_workers = num_workers;
417
418         distributor_list = RTE_TAILQ_CAST(rte_distributor_tailq.head,
419                                           rte_distributor_list);
420
421         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
422         TAILQ_INSERT_TAIL(distributor_list, d, next);
423         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
424
425         return d;
426 }
427 VERSION_SYMBOL(rte_distributor_create, _v20, 2.0);