event/octeontx: add dump function for easier debugging
[dpdk.git] / lib / librte_distributor / rte_distributor.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <stdio.h>
34 #include <sys/queue.h>
35 #include <string.h>
36 #include <rte_mbuf.h>
37 #include <rte_memory.h>
38 #include <rte_cycles.h>
39 #include <rte_compat.h>
40 #include <rte_memzone.h>
41 #include <rte_errno.h>
42 #include <rte_string_fns.h>
43 #include <rte_eal_memconfig.h>
44 #include <rte_compat.h>
45 #include "rte_distributor_private.h"
46 #include "rte_distributor.h"
47 #include "rte_distributor_v20.h"
48 #include "rte_distributor_v1705.h"
49
50 TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
51
52 static struct rte_tailq_elem rte_dist_burst_tailq = {
53         .name = "RTE_DIST_BURST",
54 };
55 EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
56
57 /**** APIs called by workers ****/
58
59 /**** Burst Packet APIs called by workers ****/
60
61 void
62 rte_distributor_request_pkt_v1705(struct rte_distributor *d,
63                 unsigned int worker_id, struct rte_mbuf **oldpkt,
64                 unsigned int count)
65 {
66         struct rte_distributor_buffer *buf = &(d->bufs[worker_id]);
67         unsigned int i;
68
69         volatile int64_t *retptr64;
70
71         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
72                 rte_distributor_request_pkt_v20(d->d_v20,
73                         worker_id, oldpkt[0]);
74                 return;
75         }
76
77         retptr64 = &(buf->retptr64[0]);
78         /* Spin while handshake bits are set (scheduler clears it) */
79         while (unlikely(*retptr64 & RTE_DISTRIB_GET_BUF)) {
80                 rte_pause();
81                 uint64_t t = rte_rdtsc()+100;
82
83                 while (rte_rdtsc() < t)
84                         rte_pause();
85         }
86
87         /*
88          * OK, if we've got here, then the scheduler has just cleared the
89          * handshake bits. Populate the retptrs with returning packets.
90          */
91
92         for (i = count; i < RTE_DIST_BURST_SIZE; i++)
93                 buf->retptr64[i] = 0;
94
95         /* Set Return bit for each packet returned */
96         for (i = count; i-- > 0; )
97                 buf->retptr64[i] =
98                         (((int64_t)(uintptr_t)(oldpkt[i])) <<
99                         RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
100
101         /*
102          * Finally, set the GET_BUF  to signal to distributor that cache
103          * line is ready for processing
104          */
105         *retptr64 |= RTE_DISTRIB_GET_BUF;
106 }
107 BIND_DEFAULT_SYMBOL(rte_distributor_request_pkt, _v1705, 17.05);
108 MAP_STATIC_SYMBOL(void rte_distributor_request_pkt(struct rte_distributor *d,
109                 unsigned int worker_id, struct rte_mbuf **oldpkt,
110                 unsigned int count),
111                 rte_distributor_request_pkt_v1705);
112
113 int
114 rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
115                 unsigned int worker_id, struct rte_mbuf **pkts)
116 {
117         struct rte_distributor_buffer *buf = &d->bufs[worker_id];
118         uint64_t ret;
119         int count = 0;
120         unsigned int i;
121
122         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
123                 pkts[0] = rte_distributor_poll_pkt_v20(d->d_v20, worker_id);
124                 return (pkts[0]) ? 1 : 0;
125         }
126
127         /* If bit is set, return */
128         if (buf->bufptr64[0] & RTE_DISTRIB_GET_BUF)
129                 return -1;
130
131         /* since bufptr64 is signed, this should be an arithmetic shift */
132         for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
133                 if (likely(buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)) {
134                         ret = buf->bufptr64[i] >> RTE_DISTRIB_FLAG_BITS;
135                         pkts[count++] = (struct rte_mbuf *)((uintptr_t)(ret));
136                 }
137         }
138
139         /*
140          * so now we've got the contents of the cacheline into an  array of
141          * mbuf pointers, so toggle the bit so scheduler can start working
142          * on the next cacheline while we're working.
143          */
144         buf->bufptr64[0] |= RTE_DISTRIB_GET_BUF;
145
146         return count;
147 }
148 BIND_DEFAULT_SYMBOL(rte_distributor_poll_pkt, _v1705, 17.05);
149 MAP_STATIC_SYMBOL(int rte_distributor_poll_pkt(struct rte_distributor *d,
150                 unsigned int worker_id, struct rte_mbuf **pkts),
151                 rte_distributor_poll_pkt_v1705);
152
153 int
154 rte_distributor_get_pkt_v1705(struct rte_distributor *d,
155                 unsigned int worker_id, struct rte_mbuf **pkts,
156                 struct rte_mbuf **oldpkt, unsigned int return_count)
157 {
158         int count;
159
160         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
161                 if (return_count <= 1) {
162                         pkts[0] = rte_distributor_get_pkt_v20(d->d_v20,
163                                 worker_id, oldpkt[0]);
164                         return (pkts[0]) ? 1 : 0;
165                 } else
166                         return -EINVAL;
167         }
168
169         rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
170
171         count = rte_distributor_poll_pkt(d, worker_id, pkts);
172         while (count == -1) {
173                 uint64_t t = rte_rdtsc() + 100;
174
175                 while (rte_rdtsc() < t)
176                         rte_pause();
177
178                 count = rte_distributor_poll_pkt(d, worker_id, pkts);
179         }
180         return count;
181 }
182 BIND_DEFAULT_SYMBOL(rte_distributor_get_pkt, _v1705, 17.05);
183 MAP_STATIC_SYMBOL(int rte_distributor_get_pkt(struct rte_distributor *d,
184                 unsigned int worker_id, struct rte_mbuf **pkts,
185                 struct rte_mbuf **oldpkt, unsigned int return_count),
186                 rte_distributor_get_pkt_v1705);
187
188 int
189 rte_distributor_return_pkt_v1705(struct rte_distributor *d,
190                 unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
191 {
192         struct rte_distributor_buffer *buf = &d->bufs[worker_id];
193         unsigned int i;
194
195         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
196                 if (num == 1)
197                         return rte_distributor_return_pkt_v20(d->d_v20,
198                                 worker_id, oldpkt[0]);
199                 else
200                         return -EINVAL;
201         }
202
203         for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
204                 /* Switch off the return bit first */
205                 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
206
207         for (i = num; i-- > 0; )
208                 buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
209                         RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
210
211         /* set the GET_BUF but even if we got no returns */
212         buf->retptr64[0] |= RTE_DISTRIB_GET_BUF;
213
214         return 0;
215 }
216 BIND_DEFAULT_SYMBOL(rte_distributor_return_pkt, _v1705, 17.05);
217 MAP_STATIC_SYMBOL(int rte_distributor_return_pkt(struct rte_distributor *d,
218                 unsigned int worker_id, struct rte_mbuf **oldpkt, int num),
219                 rte_distributor_return_pkt_v1705);
220
221 /**** APIs called on distributor core ***/
222
223 /* stores a packet returned from a worker inside the returns array */
224 static inline void
225 store_return(uintptr_t oldbuf, struct rte_distributor *d,
226                 unsigned int *ret_start, unsigned int *ret_count)
227 {
228         if (!oldbuf)
229                 return;
230         /* store returns in a circular buffer */
231         d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
232                         = (void *)oldbuf;
233         *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK);
234         *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK);
235 }
236
237 /*
238  * Match then flow_ids (tags) of the incoming packets to the flow_ids
239  * of the inflight packets (both inflight on the workers and in each worker
240  * backlog). This will then allow us to pin those packets to the relevant
241  * workers to give us our atomic flow pinning.
242  */
243 void
244 find_match_scalar(struct rte_distributor *d,
245                         uint16_t *data_ptr,
246                         uint16_t *output_ptr)
247 {
248         struct rte_distributor_backlog *bl;
249         uint16_t i, j, w;
250
251         /*
252          * Function overview:
253          * 1. Loop through all worker ID's
254          * 2. Compare the current inflights to the incoming tags
255          * 3. Compare the current backlog to the incoming tags
256          * 4. Add any matches to the output
257          */
258
259         for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
260                 output_ptr[j] = 0;
261
262         for (i = 0; i < d->num_workers; i++) {
263                 bl = &d->backlog[i];
264
265                 for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
266                         for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
267                                 if (d->in_flight_tags[i][j] == data_ptr[w]) {
268                                         output_ptr[j] = i+1;
269                                         break;
270                                 }
271                 for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
272                         for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
273                                 if (bl->tags[j] == data_ptr[w]) {
274                                         output_ptr[j] = i+1;
275                                         break;
276                                 }
277         }
278
279         /*
280          * At this stage, the output contains 8 16-bit values, with
281          * each non-zero value containing the worker ID on which the
282          * corresponding flow is pinned to.
283          */
284 }
285
286
287 /*
288  * When the handshake bits indicate that there are packets coming
289  * back from the worker, this function is called to copy and store
290  * the valid returned pointers (store_return).
291  */
292 static unsigned int
293 handle_returns(struct rte_distributor *d, unsigned int wkr)
294 {
295         struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
296         uintptr_t oldbuf;
297         unsigned int ret_start = d->returns.start,
298                         ret_count = d->returns.count;
299         unsigned int count = 0;
300         unsigned int i;
301
302         if (buf->retptr64[0] & RTE_DISTRIB_GET_BUF) {
303                 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
304                         if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) {
305                                 oldbuf = ((uintptr_t)(buf->retptr64[i] >>
306                                         RTE_DISTRIB_FLAG_BITS));
307                                 /* store returns in a circular buffer */
308                                 store_return(oldbuf, d, &ret_start, &ret_count);
309                                 count++;
310                                 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
311                         }
312                 }
313                 d->returns.start = ret_start;
314                 d->returns.count = ret_count;
315                 /* Clear for the worker to populate with more returns */
316                 buf->retptr64[0] = 0;
317         }
318         return count;
319 }
320
321 /*
322  * This function releases a burst (cache line) to a worker.
323  * It is called from the process function when a cacheline is
324  * full to make room for more packets for that worker, or when
325  * all packets have been assigned to bursts and need to be flushed
326  * to the workers.
327  * It also needs to wait for any outstanding packets from the worker
328  * before sending out new packets.
329  */
330 static unsigned int
331 release(struct rte_distributor *d, unsigned int wkr)
332 {
333         struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
334         unsigned int i;
335
336         while (!(d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF))
337                 rte_pause();
338
339         handle_returns(d, wkr);
340
341         buf->count = 0;
342
343         for (i = 0; i < d->backlog[wkr].count; i++) {
344                 d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] |
345                                 RTE_DISTRIB_GET_BUF | RTE_DISTRIB_VALID_BUF;
346                 d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
347         }
348         buf->count = i;
349         for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
350                 buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
351                 d->in_flight_tags[wkr][i] = 0;
352         }
353
354         d->backlog[wkr].count = 0;
355
356         /* Clear the GET bit */
357         buf->bufptr64[0] &= ~RTE_DISTRIB_GET_BUF;
358         return  buf->count;
359
360 }
361
362
363 /* process a set of packets to distribute them to workers */
364 int
365 rte_distributor_process_v1705(struct rte_distributor *d,
366                 struct rte_mbuf **mbufs, unsigned int num_mbufs)
367 {
368         unsigned int next_idx = 0;
369         static unsigned int wkr;
370         struct rte_mbuf *next_mb = NULL;
371         int64_t next_value = 0;
372         uint16_t new_tag = 0;
373         uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
374         unsigned int i, j, w, wid;
375
376         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
377                 /* Call the old API */
378                 return rte_distributor_process_v20(d->d_v20, mbufs, num_mbufs);
379         }
380
381         if (unlikely(num_mbufs == 0)) {
382                 /* Flush out all non-full cache-lines to workers. */
383                 for (wid = 0 ; wid < d->num_workers; wid++) {
384                         if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF)) {
385                                 release(d, wid);
386                                 handle_returns(d, wid);
387                         }
388                 }
389                 return 0;
390         }
391
392         while (next_idx < num_mbufs) {
393                 uint16_t matches[RTE_DIST_BURST_SIZE];
394                 unsigned int pkts;
395
396                 if (d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF)
397                         d->bufs[wkr].count = 0;
398
399                 if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
400                         pkts = num_mbufs - next_idx;
401                 else
402                         pkts = RTE_DIST_BURST_SIZE;
403
404                 for (i = 0; i < pkts; i++) {
405                         if (mbufs[next_idx + i]) {
406                                 /* flows have to be non-zero */
407                                 flows[i] = mbufs[next_idx + i]->hash.usr | 1;
408                         } else
409                                 flows[i] = 0;
410                 }
411                 for (; i < RTE_DIST_BURST_SIZE; i++)
412                         flows[i] = 0;
413
414                 switch (d->dist_match_fn) {
415                 case RTE_DIST_MATCH_VECTOR:
416                         find_match_vec(d, &flows[0], &matches[0]);
417                         break;
418                 default:
419                         find_match_scalar(d, &flows[0], &matches[0]);
420                 }
421
422                 /*
423                  * Matches array now contain the intended worker ID (+1) of
424                  * the incoming packets. Any zeroes need to be assigned
425                  * workers.
426                  */
427
428                 for (j = 0; j < pkts; j++) {
429
430                         next_mb = mbufs[next_idx++];
431                         next_value = (((int64_t)(uintptr_t)next_mb) <<
432                                         RTE_DISTRIB_FLAG_BITS);
433                         /*
434                          * User is advocated to set tag vaue for each
435                          * mbuf before calling rte_distributor_process.
436                          * User defined tags are used to identify flows,
437                          * or sessions.
438                          */
439                         /* flows MUST be non-zero */
440                         new_tag = (uint16_t)(next_mb->hash.usr) | 1;
441
442                         /*
443                          * Uncommenting the next line will cause the find_match
444                          * function to be optimised out, making this function
445                          * do parallel (non-atomic) distribution
446                          */
447                         /* matches[j] = 0; */
448
449                         if (matches[j]) {
450                                 struct rte_distributor_backlog *bl =
451                                                 &d->backlog[matches[j]-1];
452                                 if (unlikely(bl->count ==
453                                                 RTE_DIST_BURST_SIZE)) {
454                                         release(d, matches[j]-1);
455                                 }
456
457                                 /* Add to worker that already has flow */
458                                 unsigned int idx = bl->count++;
459
460                                 bl->tags[idx] = new_tag;
461                                 bl->pkts[idx] = next_value;
462
463                         } else {
464                                 struct rte_distributor_backlog *bl =
465                                                 &d->backlog[wkr];
466                                 if (unlikely(bl->count ==
467                                                 RTE_DIST_BURST_SIZE)) {
468                                         release(d, wkr);
469                                 }
470
471                                 /* Add to current worker worker */
472                                 unsigned int idx = bl->count++;
473
474                                 bl->tags[idx] = new_tag;
475                                 bl->pkts[idx] = next_value;
476                                 /*
477                                  * Now that we've just added an unpinned flow
478                                  * to a worker, we need to ensure that all
479                                  * other packets with that same flow will go
480                                  * to the same worker in this burst.
481                                  */
482                                 for (w = j; w < pkts; w++)
483                                         if (flows[w] == new_tag)
484                                                 matches[w] = wkr+1;
485                         }
486                 }
487                 wkr++;
488                 if (wkr >= d->num_workers)
489                         wkr = 0;
490         }
491
492         /* Flush out all non-full cache-lines to workers. */
493         for (wid = 0 ; wid < d->num_workers; wid++)
494                 if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF))
495                         release(d, wid);
496
497         return num_mbufs;
498 }
499 BIND_DEFAULT_SYMBOL(rte_distributor_process, _v1705, 17.05);
500 MAP_STATIC_SYMBOL(int rte_distributor_process(struct rte_distributor *d,
501                 struct rte_mbuf **mbufs, unsigned int num_mbufs),
502                 rte_distributor_process_v1705);
503
504 /* return to the caller, packets returned from workers */
505 int
506 rte_distributor_returned_pkts_v1705(struct rte_distributor *d,
507                 struct rte_mbuf **mbufs, unsigned int max_mbufs)
508 {
509         struct rte_distributor_returned_pkts *returns = &d->returns;
510         unsigned int retval = (max_mbufs < returns->count) ?
511                         max_mbufs : returns->count;
512         unsigned int i;
513
514         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
515                 /* Call the old API */
516                 return rte_distributor_returned_pkts_v20(d->d_v20,
517                                 mbufs, max_mbufs);
518         }
519
520         for (i = 0; i < retval; i++) {
521                 unsigned int idx = (returns->start + i) &
522                                 RTE_DISTRIB_RETURNS_MASK;
523
524                 mbufs[i] = returns->mbufs[idx];
525         }
526         returns->start += i;
527         returns->count -= i;
528
529         return retval;
530 }
531 BIND_DEFAULT_SYMBOL(rte_distributor_returned_pkts, _v1705, 17.05);
532 MAP_STATIC_SYMBOL(int rte_distributor_returned_pkts(struct rte_distributor *d,
533                 struct rte_mbuf **mbufs, unsigned int max_mbufs),
534                 rte_distributor_returned_pkts_v1705);
535
536 /*
537  * Return the number of packets in-flight in a distributor, i.e. packets
538  * being workered on or queued up in a backlog.
539  */
540 static inline unsigned int
541 total_outstanding(const struct rte_distributor *d)
542 {
543         unsigned int wkr, total_outstanding = 0;
544
545         for (wkr = 0; wkr < d->num_workers; wkr++)
546                 total_outstanding += d->backlog[wkr].count;
547
548         return total_outstanding;
549 }
550
551 /*
552  * Flush the distributor, so that there are no outstanding packets in flight or
553  * queued up.
554  */
555 int
556 rte_distributor_flush_v1705(struct rte_distributor *d)
557 {
558         const unsigned int flushed = total_outstanding(d);
559         unsigned int wkr;
560
561         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
562                 /* Call the old API */
563                 return rte_distributor_flush_v20(d->d_v20);
564         }
565
566         while (total_outstanding(d) > 0)
567                 rte_distributor_process(d, NULL, 0);
568
569         /*
570          * Send empty burst to all workers to allow them to exit
571          * gracefully, should they need to.
572          */
573         rte_distributor_process(d, NULL, 0);
574
575         for (wkr = 0; wkr < d->num_workers; wkr++)
576                 handle_returns(d, wkr);
577
578         return flushed;
579 }
580 BIND_DEFAULT_SYMBOL(rte_distributor_flush, _v1705, 17.05);
581 MAP_STATIC_SYMBOL(int rte_distributor_flush(struct rte_distributor *d),
582                 rte_distributor_flush_v1705);
583
584 /* clears the internal returns array in the distributor */
585 void
586 rte_distributor_clear_returns_v1705(struct rte_distributor *d)
587 {
588         unsigned int wkr;
589
590         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
591                 /* Call the old API */
592                 rte_distributor_clear_returns_v20(d->d_v20);
593         }
594
595         /* throw away returns, so workers can exit */
596         for (wkr = 0; wkr < d->num_workers; wkr++)
597                 d->bufs[wkr].retptr64[0] = 0;
598 }
599 BIND_DEFAULT_SYMBOL(rte_distributor_clear_returns, _v1705, 17.05);
600 MAP_STATIC_SYMBOL(void rte_distributor_clear_returns(struct rte_distributor *d),
601                 rte_distributor_clear_returns_v1705);
602
603 /* creates a distributor instance */
604 struct rte_distributor *
605 rte_distributor_create_v1705(const char *name,
606                 unsigned int socket_id,
607                 unsigned int num_workers,
608                 unsigned int alg_type)
609 {
610         struct rte_distributor *d;
611         struct rte_dist_burst_list *dist_burst_list;
612         char mz_name[RTE_MEMZONE_NAMESIZE];
613         const struct rte_memzone *mz;
614         unsigned int i;
615
616         /* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
617
618         /* compilation-time checks */
619         RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
620         RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
621
622         if (alg_type == RTE_DIST_ALG_SINGLE) {
623                 d = malloc(sizeof(struct rte_distributor));
624                 if (d == NULL) {
625                         rte_errno = ENOMEM;
626                         return NULL;
627                 }
628                 d->d_v20 = rte_distributor_create_v20(name,
629                                 socket_id, num_workers);
630                 if (d->d_v20 == NULL) {
631                         free(d);
632                         /* rte_errno will have been set */
633                         return NULL;
634                 }
635                 d->alg_type = alg_type;
636                 return d;
637         }
638
639         if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
640                 rte_errno = EINVAL;
641                 return NULL;
642         }
643
644         snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
645         mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
646         if (mz == NULL) {
647                 rte_errno = ENOMEM;
648                 return NULL;
649         }
650
651         d = mz->addr;
652         snprintf(d->name, sizeof(d->name), "%s", name);
653         d->num_workers = num_workers;
654         d->alg_type = alg_type;
655
656 #if defined(RTE_ARCH_X86)
657         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_2))
658                 d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
659         else
660 #endif
661                 d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
662
663         /*
664          * Set up the backog tags so they're pointing at the second cache
665          * line for performance during flow matching
666          */
667         for (i = 0 ; i < num_workers ; i++)
668                 d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
669
670         dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
671                                           rte_dist_burst_list);
672
673
674         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
675         TAILQ_INSERT_TAIL(dist_burst_list, d, next);
676         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
677
678         return d;
679 }
680 BIND_DEFAULT_SYMBOL(rte_distributor_create, _v1705, 17.05);
681 MAP_STATIC_SYMBOL(struct rte_distributor *rte_distributor_create(
682                 const char *name, unsigned int socket_id,
683                 unsigned int num_workers, unsigned int alg_type),
684                 rte_distributor_create_v1705);