c6b19a388649b3e7e3363e34e8ffd3f04d068765
[dpdk.git] / lib / librte_distributor / rte_distributor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <sys/queue.h>
7 #include <string.h>
8 #include <rte_mbuf.h>
9 #include <rte_memory.h>
10 #include <rte_cycles.h>
11 #include <rte_memzone.h>
12 #include <rte_errno.h>
13 #include <rte_string_fns.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_pause.h>
16 #include <rte_tailq.h>
17
18 #include "rte_distributor.h"
19 #include "rte_distributor_single.h"
20 #include "distributor_private.h"
21
22 TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
23
24 static struct rte_tailq_elem rte_dist_burst_tailq = {
25         .name = "RTE_DIST_BURST",
26 };
27 EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
28
29 /**** APIs called by workers ****/
30
31 /**** Burst Packet APIs called by workers ****/
32
33 void
34 rte_distributor_request_pkt(struct rte_distributor *d,
35                 unsigned int worker_id, struct rte_mbuf **oldpkt,
36                 unsigned int count)
37 {
38         struct rte_distributor_buffer *buf = &(d->bufs[worker_id]);
39         unsigned int i;
40
41         volatile int64_t *retptr64;
42
43         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
44                 rte_distributor_request_pkt_single(d->d_single,
45                         worker_id, oldpkt[0]);
46                 return;
47         }
48
49         retptr64 = &(buf->retptr64[0]);
50         /* Spin while handshake bits are set (scheduler clears it).
51          * Sync with worker on GET_BUF flag.
52          */
53         while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE)
54                         & RTE_DISTRIB_GET_BUF)) {
55                 rte_pause();
56                 uint64_t t = rte_rdtsc()+100;
57
58                 while (rte_rdtsc() < t)
59                         rte_pause();
60         }
61
62         /*
63          * OK, if we've got here, then the scheduler has just cleared the
64          * handshake bits. Populate the retptrs with returning packets.
65          */
66
67         for (i = count; i < RTE_DIST_BURST_SIZE; i++)
68                 buf->retptr64[i] = 0;
69
70         /* Set Return bit for each packet returned */
71         for (i = count; i-- > 0; )
72                 buf->retptr64[i] =
73                         (((int64_t)(uintptr_t)(oldpkt[i])) <<
74                         RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
75
76         /*
77          * Finally, set the GET_BUF  to signal to distributor that cache
78          * line is ready for processing
79          * Sync with distributor to release retptrs
80          */
81         __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF,
82                         __ATOMIC_RELEASE);
83 }
84
85 int
86 rte_distributor_poll_pkt(struct rte_distributor *d,
87                 unsigned int worker_id, struct rte_mbuf **pkts)
88 {
89         struct rte_distributor_buffer *buf = &d->bufs[worker_id];
90         uint64_t ret;
91         int count = 0;
92         unsigned int i;
93
94         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
95                 pkts[0] = rte_distributor_poll_pkt_single(d->d_single,
96                         worker_id);
97                 return (pkts[0]) ? 1 : 0;
98         }
99
100         /* If bit is set, return
101          * Sync with distributor to acquire bufptrs
102          */
103         if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
104                 & RTE_DISTRIB_GET_BUF)
105                 return -1;
106
107         /* since bufptr64 is signed, this should be an arithmetic shift */
108         for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
109                 if (likely(buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)) {
110                         ret = buf->bufptr64[i] >> RTE_DISTRIB_FLAG_BITS;
111                         pkts[count++] = (struct rte_mbuf *)((uintptr_t)(ret));
112                 }
113         }
114
115         /*
116          * so now we've got the contents of the cacheline into an  array of
117          * mbuf pointers, so toggle the bit so scheduler can start working
118          * on the next cacheline while we're working.
119          * Sync with distributor on GET_BUF flag. Release bufptrs.
120          */
121         __atomic_store_n(&(buf->bufptr64[0]),
122                 buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
123
124         return count;
125 }
126
127 int
128 rte_distributor_get_pkt(struct rte_distributor *d,
129                 unsigned int worker_id, struct rte_mbuf **pkts,
130                 struct rte_mbuf **oldpkt, unsigned int return_count)
131 {
132         int count;
133
134         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
135                 if (return_count <= 1) {
136                         pkts[0] = rte_distributor_get_pkt_single(d->d_single,
137                                 worker_id, oldpkt[0]);
138                         return (pkts[0]) ? 1 : 0;
139                 } else
140                         return -EINVAL;
141         }
142
143         rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
144
145         count = rte_distributor_poll_pkt(d, worker_id, pkts);
146         while (count == -1) {
147                 uint64_t t = rte_rdtsc() + 100;
148
149                 while (rte_rdtsc() < t)
150                         rte_pause();
151
152                 count = rte_distributor_poll_pkt(d, worker_id, pkts);
153         }
154         return count;
155 }
156
157 int
158 rte_distributor_return_pkt(struct rte_distributor *d,
159                 unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
160 {
161         struct rte_distributor_buffer *buf = &d->bufs[worker_id];
162         unsigned int i;
163
164         if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
165                 if (num == 1)
166                         return rte_distributor_return_pkt_single(d->d_single,
167                                 worker_id, oldpkt[0]);
168                 else
169                         return -EINVAL;
170         }
171
172         /* Spin while handshake bits are set (scheduler clears it).
173          * Sync with worker on GET_BUF flag.
174          */
175         while (unlikely(__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_RELAXED)
176                         & RTE_DISTRIB_GET_BUF)) {
177                 rte_pause();
178                 uint64_t t = rte_rdtsc()+100;
179
180                 while (rte_rdtsc() < t)
181                         rte_pause();
182         }
183
184         /* Sync with distributor to acquire retptrs */
185         __atomic_thread_fence(__ATOMIC_ACQUIRE);
186         for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
187                 /* Switch off the return bit first */
188                 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
189
190         for (i = num; i-- > 0; )
191                 buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
192                         RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
193
194         /* set the GET_BUF but even if we got no returns.
195          * Sync with distributor on GET_BUF flag. Release retptrs.
196          */
197         __atomic_store_n(&(buf->retptr64[0]),
198                 buf->retptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
199
200         return 0;
201 }
202
203 /**** APIs called on distributor core ***/
204
205 /* stores a packet returned from a worker inside the returns array */
206 static inline void
207 store_return(uintptr_t oldbuf, struct rte_distributor *d,
208                 unsigned int *ret_start, unsigned int *ret_count)
209 {
210         if (!oldbuf)
211                 return;
212         /* store returns in a circular buffer */
213         d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
214                         = (void *)oldbuf;
215         *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK);
216         *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK);
217 }
218
219 /*
220  * Match then flow_ids (tags) of the incoming packets to the flow_ids
221  * of the inflight packets (both inflight on the workers and in each worker
222  * backlog). This will then allow us to pin those packets to the relevant
223  * workers to give us our atomic flow pinning.
224  */
225 void
226 find_match_scalar(struct rte_distributor *d,
227                         uint16_t *data_ptr,
228                         uint16_t *output_ptr)
229 {
230         struct rte_distributor_backlog *bl;
231         uint16_t i, j, w;
232
233         /*
234          * Function overview:
235          * 1. Loop through all worker ID's
236          * 2. Compare the current inflights to the incoming tags
237          * 3. Compare the current backlog to the incoming tags
238          * 4. Add any matches to the output
239          */
240
241         for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
242                 output_ptr[j] = 0;
243
244         for (i = 0; i < d->num_workers; i++) {
245                 bl = &d->backlog[i];
246
247                 for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
248                         for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
249                                 if (d->in_flight_tags[i][j] == data_ptr[w]) {
250                                         output_ptr[j] = i+1;
251                                         break;
252                                 }
253                 for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
254                         for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
255                                 if (bl->tags[j] == data_ptr[w]) {
256                                         output_ptr[j] = i+1;
257                                         break;
258                                 }
259         }
260
261         /*
262          * At this stage, the output contains 8 16-bit values, with
263          * each non-zero value containing the worker ID on which the
264          * corresponding flow is pinned to.
265          */
266 }
267
268
269 /*
270  * When the handshake bits indicate that there are packets coming
271  * back from the worker, this function is called to copy and store
272  * the valid returned pointers (store_return).
273  */
274 static unsigned int
275 handle_returns(struct rte_distributor *d, unsigned int wkr)
276 {
277         struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
278         uintptr_t oldbuf;
279         unsigned int ret_start = d->returns.start,
280                         ret_count = d->returns.count;
281         unsigned int count = 0;
282         unsigned int i;
283
284         /* Sync on GET_BUF flag. Acquire retptrs. */
285         if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE)
286                 & RTE_DISTRIB_GET_BUF) {
287                 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
288                         if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) {
289                                 oldbuf = ((uintptr_t)(buf->retptr64[i] >>
290                                         RTE_DISTRIB_FLAG_BITS));
291                                 /* store returns in a circular buffer */
292                                 store_return(oldbuf, d, &ret_start, &ret_count);
293                                 count++;
294                                 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
295                         }
296                 }
297                 d->returns.start = ret_start;
298                 d->returns.count = ret_count;
299                 /* Clear for the worker to populate with more returns.
300                  * Sync with distributor on GET_BUF flag. Release retptrs.
301                  */
302                 __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE);
303         }
304         return count;
305 }
306
307 /*
308  * This function releases a burst (cache line) to a worker.
309  * It is called from the process function when a cacheline is
310  * full to make room for more packets for that worker, or when
311  * all packets have been assigned to bursts and need to be flushed
312  * to the workers.
313  * It also needs to wait for any outstanding packets from the worker
314  * before sending out new packets.
315  */
316 static unsigned int
317 release(struct rte_distributor *d, unsigned int wkr)
318 {
319         struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
320         unsigned int i;
321
322         /* Sync with worker on GET_BUF flag */
323         while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE)
324                 & RTE_DISTRIB_GET_BUF))
325                 rte_pause();
326
327         handle_returns(d, wkr);
328
329         buf->count = 0;
330
331         for (i = 0; i < d->backlog[wkr].count; i++) {
332                 d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] |
333                                 RTE_DISTRIB_GET_BUF | RTE_DISTRIB_VALID_BUF;
334                 d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
335         }
336         buf->count = i;
337         for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
338                 buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
339                 d->in_flight_tags[wkr][i] = 0;
340         }
341
342         d->backlog[wkr].count = 0;
343
344         /* Clear the GET bit.
345          * Sync with worker on GET_BUF flag. Release bufptrs.
346          */
347         __atomic_store_n(&(buf->bufptr64[0]),
348                 buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
349         return  buf->count;
350
351 }
352
353
354 /* process a set of packets to distribute them to workers */
355 int
356 rte_distributor_process(struct rte_distributor *d,
357                 struct rte_mbuf **mbufs, unsigned int num_mbufs)
358 {
359         unsigned int next_idx = 0;
360         static unsigned int wkr;
361         struct rte_mbuf *next_mb = NULL;
362         int64_t next_value = 0;
363         uint16_t new_tag = 0;
364         uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
365         unsigned int i, j, w, wid;
366
367         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
368                 /* Call the old API */
369                 return rte_distributor_process_single(d->d_single,
370                         mbufs, num_mbufs);
371         }
372
373         if (unlikely(num_mbufs == 0)) {
374                 /* Flush out all non-full cache-lines to workers. */
375                 for (wid = 0 ; wid < d->num_workers; wid++) {
376                         /* Sync with worker on GET_BUF flag. */
377                         if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
378                                 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
379                                 release(d, wid);
380                                 handle_returns(d, wid);
381                         }
382                 }
383                 return 0;
384         }
385
386         while (next_idx < num_mbufs) {
387                 uint16_t matches[RTE_DIST_BURST_SIZE];
388                 unsigned int pkts;
389
390                 /* Sync with worker on GET_BUF flag. */
391                 if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]),
392                         __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)
393                         d->bufs[wkr].count = 0;
394
395                 if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
396                         pkts = num_mbufs - next_idx;
397                 else
398                         pkts = RTE_DIST_BURST_SIZE;
399
400                 for (i = 0; i < pkts; i++) {
401                         if (mbufs[next_idx + i]) {
402                                 /* flows have to be non-zero */
403                                 flows[i] = mbufs[next_idx + i]->hash.usr | 1;
404                         } else
405                                 flows[i] = 0;
406                 }
407                 for (; i < RTE_DIST_BURST_SIZE; i++)
408                         flows[i] = 0;
409
410                 switch (d->dist_match_fn) {
411                 case RTE_DIST_MATCH_VECTOR:
412                         find_match_vec(d, &flows[0], &matches[0]);
413                         break;
414                 default:
415                         find_match_scalar(d, &flows[0], &matches[0]);
416                 }
417
418                 /*
419                  * Matches array now contain the intended worker ID (+1) of
420                  * the incoming packets. Any zeroes need to be assigned
421                  * workers.
422                  */
423
424                 for (j = 0; j < pkts; j++) {
425
426                         next_mb = mbufs[next_idx++];
427                         next_value = (((int64_t)(uintptr_t)next_mb) <<
428                                         RTE_DISTRIB_FLAG_BITS);
429                         /*
430                          * User is advocated to set tag value for each
431                          * mbuf before calling rte_distributor_process.
432                          * User defined tags are used to identify flows,
433                          * or sessions.
434                          */
435                         /* flows MUST be non-zero */
436                         new_tag = (uint16_t)(next_mb->hash.usr) | 1;
437
438                         /*
439                          * Uncommenting the next line will cause the find_match
440                          * function to be optimized out, making this function
441                          * do parallel (non-atomic) distribution
442                          */
443                         /* matches[j] = 0; */
444
445                         if (matches[j]) {
446                                 struct rte_distributor_backlog *bl =
447                                                 &d->backlog[matches[j]-1];
448                                 if (unlikely(bl->count ==
449                                                 RTE_DIST_BURST_SIZE)) {
450                                         release(d, matches[j]-1);
451                                 }
452
453                                 /* Add to worker that already has flow */
454                                 unsigned int idx = bl->count++;
455
456                                 bl->tags[idx] = new_tag;
457                                 bl->pkts[idx] = next_value;
458
459                         } else {
460                                 struct rte_distributor_backlog *bl =
461                                                 &d->backlog[wkr];
462                                 if (unlikely(bl->count ==
463                                                 RTE_DIST_BURST_SIZE)) {
464                                         release(d, wkr);
465                                 }
466
467                                 /* Add to current worker worker */
468                                 unsigned int idx = bl->count++;
469
470                                 bl->tags[idx] = new_tag;
471                                 bl->pkts[idx] = next_value;
472                                 /*
473                                  * Now that we've just added an unpinned flow
474                                  * to a worker, we need to ensure that all
475                                  * other packets with that same flow will go
476                                  * to the same worker in this burst.
477                                  */
478                                 for (w = j; w < pkts; w++)
479                                         if (flows[w] == new_tag)
480                                                 matches[w] = wkr+1;
481                         }
482                 }
483                 wkr++;
484                 if (wkr >= d->num_workers)
485                         wkr = 0;
486         }
487
488         /* Flush out all non-full cache-lines to workers. */
489         for (wid = 0 ; wid < d->num_workers; wid++)
490                 /* Sync with worker on GET_BUF flag. */
491                 if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
492                         __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF))
493                         release(d, wid);
494
495         return num_mbufs;
496 }
497
498 /* return to the caller, packets returned from workers */
499 int
500 rte_distributor_returned_pkts(struct rte_distributor *d,
501                 struct rte_mbuf **mbufs, unsigned int max_mbufs)
502 {
503         struct rte_distributor_returned_pkts *returns = &d->returns;
504         unsigned int retval = (max_mbufs < returns->count) ?
505                         max_mbufs : returns->count;
506         unsigned int i;
507
508         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
509                 /* Call the old API */
510                 return rte_distributor_returned_pkts_single(d->d_single,
511                                 mbufs, max_mbufs);
512         }
513
514         for (i = 0; i < retval; i++) {
515                 unsigned int idx = (returns->start + i) &
516                                 RTE_DISTRIB_RETURNS_MASK;
517
518                 mbufs[i] = returns->mbufs[idx];
519         }
520         returns->start += i;
521         returns->count -= i;
522
523         return retval;
524 }
525
526 /*
527  * Return the number of packets in-flight in a distributor, i.e. packets
528  * being worked on or queued up in a backlog.
529  */
530 static inline unsigned int
531 total_outstanding(const struct rte_distributor *d)
532 {
533         unsigned int wkr, total_outstanding = 0;
534
535         for (wkr = 0; wkr < d->num_workers; wkr++)
536                 total_outstanding += d->backlog[wkr].count;
537
538         return total_outstanding;
539 }
540
541 /*
542  * Flush the distributor, so that there are no outstanding packets in flight or
543  * queued up.
544  */
545 int
546 rte_distributor_flush(struct rte_distributor *d)
547 {
548         unsigned int flushed;
549         unsigned int wkr;
550
551         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
552                 /* Call the old API */
553                 return rte_distributor_flush_single(d->d_single);
554         }
555
556         flushed = total_outstanding(d);
557
558         while (total_outstanding(d) > 0)
559                 rte_distributor_process(d, NULL, 0);
560
561         /* wait 10ms to allow all worker drain the pkts */
562         rte_delay_us(10000);
563
564         /*
565          * Send empty burst to all workers to allow them to exit
566          * gracefully, should they need to.
567          */
568         rte_distributor_process(d, NULL, 0);
569
570         for (wkr = 0; wkr < d->num_workers; wkr++)
571                 handle_returns(d, wkr);
572
573         return flushed;
574 }
575
576 /* clears the internal returns array in the distributor */
577 void
578 rte_distributor_clear_returns(struct rte_distributor *d)
579 {
580         unsigned int wkr;
581
582         if (d->alg_type == RTE_DIST_ALG_SINGLE) {
583                 /* Call the old API */
584                 rte_distributor_clear_returns_single(d->d_single);
585                 return;
586         }
587
588         /* throw away returns, so workers can exit */
589         for (wkr = 0; wkr < d->num_workers; wkr++)
590                 /* Sync with worker. Release retptrs. */
591                 __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0,
592                                 __ATOMIC_RELEASE);
593 }
594
595 /* creates a distributor instance */
596 struct rte_distributor *
597 rte_distributor_create(const char *name,
598                 unsigned int socket_id,
599                 unsigned int num_workers,
600                 unsigned int alg_type)
601 {
602         struct rte_distributor *d;
603         struct rte_dist_burst_list *dist_burst_list;
604         char mz_name[RTE_MEMZONE_NAMESIZE];
605         const struct rte_memzone *mz;
606         unsigned int i;
607
608         /* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
609
610         /* compilation-time checks */
611         RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
612         RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
613
614         if (name == NULL || num_workers >=
615                 (unsigned int)RTE_MIN(RTE_DISTRIB_MAX_WORKERS, RTE_MAX_LCORE)) {
616                 rte_errno = EINVAL;
617                 return NULL;
618         }
619
620         if (alg_type == RTE_DIST_ALG_SINGLE) {
621                 d = malloc(sizeof(struct rte_distributor));
622                 if (d == NULL) {
623                         rte_errno = ENOMEM;
624                         return NULL;
625                 }
626                 d->d_single = rte_distributor_create_single(name,
627                                 socket_id, num_workers);
628                 if (d->d_single == NULL) {
629                         free(d);
630                         /* rte_errno will have been set */
631                         return NULL;
632                 }
633                 d->alg_type = alg_type;
634                 return d;
635         }
636
637         snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
638         mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
639         if (mz == NULL) {
640                 rte_errno = ENOMEM;
641                 return NULL;
642         }
643
644         d = mz->addr;
645         strlcpy(d->name, name, sizeof(d->name));
646         d->num_workers = num_workers;
647         d->alg_type = alg_type;
648
649         d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
650 #if defined(RTE_ARCH_X86)
651         d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
652 #endif
653
654         /*
655          * Set up the backlog tags so they're pointing at the second cache
656          * line for performance during flow matching
657          */
658         for (i = 0 ; i < num_workers ; i++)
659                 d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
660
661         dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
662                                           rte_dist_burst_list);
663
664
665         rte_mcfg_tailq_write_lock();
666         TAILQ_INSERT_TAIL(dist_burst_list, d, next);
667         rte_mcfg_tailq_write_unlock();
668
669         return d;
670 }