eal: split compat header file
[dpdk.git] / lib / librte_distributor / rte_distributor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <sys/queue.h>
7 #include <string.h>
8 #include <rte_mbuf.h>
9 #include <rte_memory.h>
10 #include <rte_cycles.h>
11 #include <rte_function_versioning.h>
12 #include <rte_memzone.h>
13 #include <rte_errno.h>
14 #include <rte_string_fns.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_pause.h>
17 #include <rte_tailq.h>
18
19 #include "rte_distributor_private.h"
20 #include "rte_distributor.h"
21 #include "rte_distributor_v20.h"
22 #include "rte_distributor_v1705.h"
23
24 TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
25
26 static struct rte_tailq_elem rte_dist_burst_tailq = {
27         .name = "RTE_DIST_BURST",
28 };
29 EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
30
31 /**** APIs called by workers ****/
32
33 /**** Burst Packet APIs called by workers ****/
34
35 void
36 rte_distributor_request_pkt_v1705(struct rte_distributor *d,
37                 unsigned int worker_id, struct rte_mbuf **oldpkt,
38                 unsigned int count)
39 {
40         struct rte_distributor_buffer *buf = &(d->bufs[worker_id]);
41         unsigned int i;
42
43         volatile int64_t *retptr64;
44
45         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
46                 rte_distributor_request_pkt_v20(d->d_v20,
47                         worker_id, oldpkt[0]);
48                 return;
49         }
50
51         retptr64 = &(buf->retptr64[0]);
52         /* Spin while handshake bits are set (scheduler clears it).
53          * Sync with worker on GET_BUF flag.
54          */
55         while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE)
56                         & RTE_DISTRIB_GET_BUF)) {
57                 rte_pause();
58                 uint64_t t = rte_rdtsc()+100;
59
60                 while (rte_rdtsc() < t)
61                         rte_pause();
62         }
63
64         /*
65          * OK, if we've got here, then the scheduler has just cleared the
66          * handshake bits. Populate the retptrs with returning packets.
67          */
68
69         for (i = count; i < RTE_DIST_BURST_SIZE; i++)
70                 buf->retptr64[i] = 0;
71
72         /* Set Return bit for each packet returned */
73         for (i = count; i-- > 0; )
74                 buf->retptr64[i] =
75                         (((int64_t)(uintptr_t)(oldpkt[i])) <<
76                         RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
77
78         /*
79          * Finally, set the GET_BUF  to signal to distributor that cache
80          * line is ready for processing
81          * Sync with distributor to release retptrs
82          */
83         __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF,
84                         __ATOMIC_RELEASE);
85 }
86 BIND_DEFAULT_SYMBOL(rte_distributor_request_pkt, _v1705, 17.05);
87 MAP_STATIC_SYMBOL(void rte_distributor_request_pkt(struct rte_distributor *d,
88                 unsigned int worker_id, struct rte_mbuf **oldpkt,
89                 unsigned int count),
90                 rte_distributor_request_pkt_v1705);
91
92 int
93 rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
94                 unsigned int worker_id, struct rte_mbuf **pkts)
95 {
96         struct rte_distributor_buffer *buf = &d->bufs[worker_id];
97         uint64_t ret;
98         int count = 0;
99         unsigned int i;
100
101         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
102                 pkts[0] = rte_distributor_poll_pkt_v20(d->d_v20, worker_id);
103                 return (pkts[0]) ? 1 : 0;
104         }
105
106         /* If bit is set, return
107          * Sync with distributor to acquire bufptrs
108          */
109         if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
110                 & RTE_DISTRIB_GET_BUF)
111                 return -1;
112
113         /* since bufptr64 is signed, this should be an arithmetic shift */
114         for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
115                 if (likely(buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)) {
116                         ret = buf->bufptr64[i] >> RTE_DISTRIB_FLAG_BITS;
117                         pkts[count++] = (struct rte_mbuf *)((uintptr_t)(ret));
118                 }
119         }
120
121         /*
122          * so now we've got the contents of the cacheline into an  array of
123          * mbuf pointers, so toggle the bit so scheduler can start working
124          * on the next cacheline while we're working.
125          * Sync with distributor on GET_BUF flag. Release bufptrs.
126          */
127         __atomic_store_n(&(buf->bufptr64[0]),
128                 buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
129
130         return count;
131 }
132 BIND_DEFAULT_SYMBOL(rte_distributor_poll_pkt, _v1705, 17.05);
133 MAP_STATIC_SYMBOL(int rte_distributor_poll_pkt(struct rte_distributor *d,
134                 unsigned int worker_id, struct rte_mbuf **pkts),
135                 rte_distributor_poll_pkt_v1705);
136
137 int
138 rte_distributor_get_pkt_v1705(struct rte_distributor *d,
139                 unsigned int worker_id, struct rte_mbuf **pkts,
140                 struct rte_mbuf **oldpkt, unsigned int return_count)
141 {
142         int count;
143
144         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
145                 if (return_count <= 1) {
146                         pkts[0] = rte_distributor_get_pkt_v20(d->d_v20,
147                                 worker_id, oldpkt[0]);
148                         return (pkts[0]) ? 1 : 0;
149                 } else
150                         return -EINVAL;
151         }
152
153         rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
154
155         count = rte_distributor_poll_pkt(d, worker_id, pkts);
156         while (count == -1) {
157                 uint64_t t = rte_rdtsc() + 100;
158
159                 while (rte_rdtsc() < t)
160                         rte_pause();
161
162                 count = rte_distributor_poll_pkt(d, worker_id, pkts);
163         }
164         return count;
165 }
166 BIND_DEFAULT_SYMBOL(rte_distributor_get_pkt, _v1705, 17.05);
167 MAP_STATIC_SYMBOL(int rte_distributor_get_pkt(struct rte_distributor *d,
168                 unsigned int worker_id, struct rte_mbuf **pkts,
169                 struct rte_mbuf **oldpkt, unsigned int return_count),
170                 rte_distributor_get_pkt_v1705);
171
172 int
173 rte_distributor_return_pkt_v1705(struct rte_distributor *d,
174                 unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
175 {
176         struct rte_distributor_buffer *buf = &d->bufs[worker_id];
177         unsigned int i;
178
179         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
180                 if (num == 1)
181                         return rte_distributor_return_pkt_v20(d->d_v20,
182                                 worker_id, oldpkt[0]);
183                 else
184                         return -EINVAL;
185         }
186
187         /* Sync with distributor to acquire retptrs */
188         __atomic_thread_fence(__ATOMIC_ACQUIRE);
189         for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
190                 /* Switch off the return bit first */
191                 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
192
193         for (i = num; i-- > 0; )
194                 buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
195                         RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
196
197         /* set the GET_BUF but even if we got no returns.
198          * Sync with distributor on GET_BUF flag. Release retptrs.
199          */
200         __atomic_store_n(&(buf->retptr64[0]),
201                 buf->retptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
202
203         return 0;
204 }
205 BIND_DEFAULT_SYMBOL(rte_distributor_return_pkt, _v1705, 17.05);
206 MAP_STATIC_SYMBOL(int rte_distributor_return_pkt(struct rte_distributor *d,
207                 unsigned int worker_id, struct rte_mbuf **oldpkt, int num),
208                 rte_distributor_return_pkt_v1705);
209
210 /**** APIs called on distributor core ***/
211
212 /* stores a packet returned from a worker inside the returns array */
213 static inline void
214 store_return(uintptr_t oldbuf, struct rte_distributor *d,
215                 unsigned int *ret_start, unsigned int *ret_count)
216 {
217         if (!oldbuf)
218                 return;
219         /* store returns in a circular buffer */
220         d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
221                         = (void *)oldbuf;
222         *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK);
223         *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK);
224 }
225
226 /*
227  * Match then flow_ids (tags) of the incoming packets to the flow_ids
228  * of the inflight packets (both inflight on the workers and in each worker
229  * backlog). This will then allow us to pin those packets to the relevant
230  * workers to give us our atomic flow pinning.
231  */
232 void
233 find_match_scalar(struct rte_distributor *d,
234                         uint16_t *data_ptr,
235                         uint16_t *output_ptr)
236 {
237         struct rte_distributor_backlog *bl;
238         uint16_t i, j, w;
239
240         /*
241          * Function overview:
242          * 1. Loop through all worker ID's
243          * 2. Compare the current inflights to the incoming tags
244          * 3. Compare the current backlog to the incoming tags
245          * 4. Add any matches to the output
246          */
247
248         for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
249                 output_ptr[j] = 0;
250
251         for (i = 0; i < d->num_workers; i++) {
252                 bl = &d->backlog[i];
253
254                 for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
255                         for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
256                                 if (d->in_flight_tags[i][j] == data_ptr[w]) {
257                                         output_ptr[j] = i+1;
258                                         break;
259                                 }
260                 for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
261                         for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
262                                 if (bl->tags[j] == data_ptr[w]) {
263                                         output_ptr[j] = i+1;
264                                         break;
265                                 }
266         }
267
268         /*
269          * At this stage, the output contains 8 16-bit values, with
270          * each non-zero value containing the worker ID on which the
271          * corresponding flow is pinned to.
272          */
273 }
274
275
276 /*
277  * When the handshake bits indicate that there are packets coming
278  * back from the worker, this function is called to copy and store
279  * the valid returned pointers (store_return).
280  */
281 static unsigned int
282 handle_returns(struct rte_distributor *d, unsigned int wkr)
283 {
284         struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
285         uintptr_t oldbuf;
286         unsigned int ret_start = d->returns.start,
287                         ret_count = d->returns.count;
288         unsigned int count = 0;
289         unsigned int i;
290
291         /* Sync on GET_BUF flag. Acquire retptrs. */
292         if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE)
293                 & RTE_DISTRIB_GET_BUF) {
294                 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
295                         if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) {
296                                 oldbuf = ((uintptr_t)(buf->retptr64[i] >>
297                                         RTE_DISTRIB_FLAG_BITS));
298                                 /* store returns in a circular buffer */
299                                 store_return(oldbuf, d, &ret_start, &ret_count);
300                                 count++;
301                                 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
302                         }
303                 }
304                 d->returns.start = ret_start;
305                 d->returns.count = ret_count;
306                 /* Clear for the worker to populate with more returns.
307                  * Sync with distributor on GET_BUF flag. Release retptrs.
308                  */
309                 __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE);
310         }
311         return count;
312 }
313
314 /*
315  * This function releases a burst (cache line) to a worker.
316  * It is called from the process function when a cacheline is
317  * full to make room for more packets for that worker, or when
318  * all packets have been assigned to bursts and need to be flushed
319  * to the workers.
320  * It also needs to wait for any outstanding packets from the worker
321  * before sending out new packets.
322  */
323 static unsigned int
324 release(struct rte_distributor *d, unsigned int wkr)
325 {
326         struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
327         unsigned int i;
328
329         /* Sync with worker on GET_BUF flag */
330         while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE)
331                 & RTE_DISTRIB_GET_BUF))
332                 rte_pause();
333
334         handle_returns(d, wkr);
335
336         buf->count = 0;
337
338         for (i = 0; i < d->backlog[wkr].count; i++) {
339                 d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] |
340                                 RTE_DISTRIB_GET_BUF | RTE_DISTRIB_VALID_BUF;
341                 d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
342         }
343         buf->count = i;
344         for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
345                 buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
346                 d->in_flight_tags[wkr][i] = 0;
347         }
348
349         d->backlog[wkr].count = 0;
350
351         /* Clear the GET bit.
352          * Sync with worker on GET_BUF flag. Release bufptrs.
353          */
354         __atomic_store_n(&(buf->bufptr64[0]),
355                 buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
356         return  buf->count;
357
358 }
359
360
361 /* process a set of packets to distribute them to workers */
362 int
363 rte_distributor_process_v1705(struct rte_distributor *d,
364                 struct rte_mbuf **mbufs, unsigned int num_mbufs)
365 {
366         unsigned int next_idx = 0;
367         static unsigned int wkr;
368         struct rte_mbuf *next_mb = NULL;
369         int64_t next_value = 0;
370         uint16_t new_tag = 0;
371         uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
372         unsigned int i, j, w, wid;
373
374         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
375                 /* Call the old API */
376                 return rte_distributor_process_v20(d->d_v20, mbufs, num_mbufs);
377         }
378
379         if (unlikely(num_mbufs == 0)) {
380                 /* Flush out all non-full cache-lines to workers. */
381                 for (wid = 0 ; wid < d->num_workers; wid++) {
382                         /* Sync with worker on GET_BUF flag. */
383                         if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
384                                 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
385                                 release(d, wid);
386                                 handle_returns(d, wid);
387                         }
388                 }
389                 return 0;
390         }
391
392         while (next_idx < num_mbufs) {
393                 uint16_t matches[RTE_DIST_BURST_SIZE];
394                 unsigned int pkts;
395
396                 /* Sync with worker on GET_BUF flag. */
397                 if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]),
398                         __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)
399                         d->bufs[wkr].count = 0;
400
401                 if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
402                         pkts = num_mbufs - next_idx;
403                 else
404                         pkts = RTE_DIST_BURST_SIZE;
405
406                 for (i = 0; i < pkts; i++) {
407                         if (mbufs[next_idx + i]) {
408                                 /* flows have to be non-zero */
409                                 flows[i] = mbufs[next_idx + i]->hash.usr | 1;
410                         } else
411                                 flows[i] = 0;
412                 }
413                 for (; i < RTE_DIST_BURST_SIZE; i++)
414                         flows[i] = 0;
415
416                 switch (d->dist_match_fn) {
417                 case RTE_DIST_MATCH_VECTOR:
418                         find_match_vec(d, &flows[0], &matches[0]);
419                         break;
420                 default:
421                         find_match_scalar(d, &flows[0], &matches[0]);
422                 }
423
424                 /*
425                  * Matches array now contain the intended worker ID (+1) of
426                  * the incoming packets. Any zeroes need to be assigned
427                  * workers.
428                  */
429
430                 for (j = 0; j < pkts; j++) {
431
432                         next_mb = mbufs[next_idx++];
433                         next_value = (((int64_t)(uintptr_t)next_mb) <<
434                                         RTE_DISTRIB_FLAG_BITS);
435                         /*
436                          * User is advocated to set tag value for each
437                          * mbuf before calling rte_distributor_process.
438                          * User defined tags are used to identify flows,
439                          * or sessions.
440                          */
441                         /* flows MUST be non-zero */
442                         new_tag = (uint16_t)(next_mb->hash.usr) | 1;
443
444                         /*
445                          * Uncommenting the next line will cause the find_match
446                          * function to be optimized out, making this function
447                          * do parallel (non-atomic) distribution
448                          */
449                         /* matches[j] = 0; */
450
451                         if (matches[j]) {
452                                 struct rte_distributor_backlog *bl =
453                                                 &d->backlog[matches[j]-1];
454                                 if (unlikely(bl->count ==
455                                                 RTE_DIST_BURST_SIZE)) {
456                                         release(d, matches[j]-1);
457                                 }
458
459                                 /* Add to worker that already has flow */
460                                 unsigned int idx = bl->count++;
461
462                                 bl->tags[idx] = new_tag;
463                                 bl->pkts[idx] = next_value;
464
465                         } else {
466                                 struct rte_distributor_backlog *bl =
467                                                 &d->backlog[wkr];
468                                 if (unlikely(bl->count ==
469                                                 RTE_DIST_BURST_SIZE)) {
470                                         release(d, wkr);
471                                 }
472
473                                 /* Add to current worker worker */
474                                 unsigned int idx = bl->count++;
475
476                                 bl->tags[idx] = new_tag;
477                                 bl->pkts[idx] = next_value;
478                                 /*
479                                  * Now that we've just added an unpinned flow
480                                  * to a worker, we need to ensure that all
481                                  * other packets with that same flow will go
482                                  * to the same worker in this burst.
483                                  */
484                                 for (w = j; w < pkts; w++)
485                                         if (flows[w] == new_tag)
486                                                 matches[w] = wkr+1;
487                         }
488                 }
489                 wkr++;
490                 if (wkr >= d->num_workers)
491                         wkr = 0;
492         }
493
494         /* Flush out all non-full cache-lines to workers. */
495         for (wid = 0 ; wid < d->num_workers; wid++)
496                 /* Sync with worker on GET_BUF flag. */
497                 if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
498                         __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF))
499                         release(d, wid);
500
501         return num_mbufs;
502 }
503 BIND_DEFAULT_SYMBOL(rte_distributor_process, _v1705, 17.05);
504 MAP_STATIC_SYMBOL(int rte_distributor_process(struct rte_distributor *d,
505                 struct rte_mbuf **mbufs, unsigned int num_mbufs),
506                 rte_distributor_process_v1705);
507
508 /* return to the caller, packets returned from workers */
509 int
510 rte_distributor_returned_pkts_v1705(struct rte_distributor *d,
511                 struct rte_mbuf **mbufs, unsigned int max_mbufs)
512 {
513         struct rte_distributor_returned_pkts *returns = &d->returns;
514         unsigned int retval = (max_mbufs < returns->count) ?
515                         max_mbufs : returns->count;
516         unsigned int i;
517
518         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
519                 /* Call the old API */
520                 return rte_distributor_returned_pkts_v20(d->d_v20,
521                                 mbufs, max_mbufs);
522         }
523
524         for (i = 0; i < retval; i++) {
525                 unsigned int idx = (returns->start + i) &
526                                 RTE_DISTRIB_RETURNS_MASK;
527
528                 mbufs[i] = returns->mbufs[idx];
529         }
530         returns->start += i;
531         returns->count -= i;
532
533         return retval;
534 }
535 BIND_DEFAULT_SYMBOL(rte_distributor_returned_pkts, _v1705, 17.05);
536 MAP_STATIC_SYMBOL(int rte_distributor_returned_pkts(struct rte_distributor *d,
537                 struct rte_mbuf **mbufs, unsigned int max_mbufs),
538                 rte_distributor_returned_pkts_v1705);
539
540 /*
541  * Return the number of packets in-flight in a distributor, i.e. packets
542  * being worked on or queued up in a backlog.
543  */
544 static inline unsigned int
545 total_outstanding(const struct rte_distributor *d)
546 {
547         unsigned int wkr, total_outstanding = 0;
548
549         for (wkr = 0; wkr < d->num_workers; wkr++)
550                 total_outstanding += d->backlog[wkr].count;
551
552         return total_outstanding;
553 }
554
555 /*
556  * Flush the distributor, so that there are no outstanding packets in flight or
557  * queued up.
558  */
559 int
560 rte_distributor_flush_v1705(struct rte_distributor *d)
561 {
562         unsigned int flushed;
563         unsigned int wkr;
564
565         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
566                 /* Call the old API */
567                 return rte_distributor_flush_v20(d->d_v20);
568         }
569
570         flushed = total_outstanding(d);
571
572         while (total_outstanding(d) > 0)
573                 rte_distributor_process(d, NULL, 0);
574
575         /* wait 10ms to allow all worker drain the pkts */
576         rte_delay_us(10000);
577
578         /*
579          * Send empty burst to all workers to allow them to exit
580          * gracefully, should they need to.
581          */
582         rte_distributor_process(d, NULL, 0);
583
584         for (wkr = 0; wkr < d->num_workers; wkr++)
585                 handle_returns(d, wkr);
586
587         return flushed;
588 }
589 BIND_DEFAULT_SYMBOL(rte_distributor_flush, _v1705, 17.05);
590 MAP_STATIC_SYMBOL(int rte_distributor_flush(struct rte_distributor *d),
591                 rte_distributor_flush_v1705);
592
593 /* clears the internal returns array in the distributor */
594 void
595 rte_distributor_clear_returns_v1705(struct rte_distributor *d)
596 {
597         unsigned int wkr;
598
599         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
600                 /* Call the old API */
601                 rte_distributor_clear_returns_v20(d->d_v20);
602                 return;
603         }
604
605         /* throw away returns, so workers can exit */
606         for (wkr = 0; wkr < d->num_workers; wkr++)
607                 /* Sync with worker. Release retptrs. */
608                 __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0,
609                                 __ATOMIC_RELEASE);
610 }
611 BIND_DEFAULT_SYMBOL(rte_distributor_clear_returns, _v1705, 17.05);
612 MAP_STATIC_SYMBOL(void rte_distributor_clear_returns(struct rte_distributor *d),
613                 rte_distributor_clear_returns_v1705);
614
615 /* creates a distributor instance */
616 struct rte_distributor *
617 rte_distributor_create_v1705(const char *name,
618                 unsigned int socket_id,
619                 unsigned int num_workers,
620                 unsigned int alg_type)
621 {
622         struct rte_distributor *d;
623         struct rte_dist_burst_list *dist_burst_list;
624         char mz_name[RTE_MEMZONE_NAMESIZE];
625         const struct rte_memzone *mz;
626         unsigned int i;
627
628         /* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
629
630         /* compilation-time checks */
631         RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
632         RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
633
634         if (name == NULL || num_workers >=
635                 (unsigned int)RTE_MIN(RTE_DISTRIB_MAX_WORKERS, RTE_MAX_LCORE)) {
636                 rte_errno = EINVAL;
637                 return NULL;
638         }
639
640         if (alg_type == RTE_DIST_ALG_SINGLE) {
641                 d = malloc(sizeof(struct rte_distributor));
642                 if (d == NULL) {
643                         rte_errno = ENOMEM;
644                         return NULL;
645                 }
646                 d->d_v20 = rte_distributor_create_v20(name,
647                                 socket_id, num_workers);
648                 if (d->d_v20 == NULL) {
649                         free(d);
650                         /* rte_errno will have been set */
651                         return NULL;
652                 }
653                 d->alg_type = alg_type;
654                 return d;
655         }
656
657         snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
658         mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
659         if (mz == NULL) {
660                 rte_errno = ENOMEM;
661                 return NULL;
662         }
663
664         d = mz->addr;
665         strlcpy(d->name, name, sizeof(d->name));
666         d->num_workers = num_workers;
667         d->alg_type = alg_type;
668
669         d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
670 #if defined(RTE_ARCH_X86)
671         d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
672 #endif
673
674         /*
675          * Set up the backlog tags so they're pointing at the second cache
676          * line for performance during flow matching
677          */
678         for (i = 0 ; i < num_workers ; i++)
679                 d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
680
681         dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
682                                           rte_dist_burst_list);
683
684
685         rte_mcfg_tailq_write_lock();
686         TAILQ_INSERT_TAIL(dist_burst_list, d, next);
687         rte_mcfg_tailq_write_unlock();
688
689         return d;
690 }
691 BIND_DEFAULT_SYMBOL(rte_distributor_create, _v1705, 17.05);
692 MAP_STATIC_SYMBOL(struct rte_distributor *rte_distributor_create(
693                 const char *name, unsigned int socket_id,
694                 unsigned int num_workers, unsigned int alg_type),
695                 rte_distributor_create_v1705);