1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_memory.h>
10 #include <rte_cycles.h>
11 #include <rte_memzone.h>
12 #include <rte_errno.h>
13 #include <rte_string_fns.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_pause.h>
16 #include <rte_tailq.h>
18 #include "rte_distributor.h"
19 #include "rte_distributor_single.h"
20 #include "distributor_private.h"
22 TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
24 static struct rte_tailq_elem rte_dist_burst_tailq = {
25 .name = "RTE_DIST_BURST",
27 EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
29 /**** APIs called by workers ****/
31 /**** Burst Packet APIs called by workers ****/
34 rte_distributor_request_pkt(struct rte_distributor *d,
35 unsigned int worker_id, struct rte_mbuf **oldpkt,
38 struct rte_distributor_buffer *buf = &(d->bufs[worker_id]);
41 volatile int64_t *retptr64;
43 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
44 rte_distributor_request_pkt_single(d->d_single,
45 worker_id, count ? oldpkt[0] : NULL);
49 retptr64 = &(buf->retptr64[0]);
50 /* Spin while handshake bits are set (scheduler clears it).
51 * Sync with worker on GET_BUF flag.
53 while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE)
54 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) {
56 uint64_t t = rte_rdtsc()+100;
58 while (rte_rdtsc() < t)
63 * OK, if we've got here, then the scheduler has just cleared the
64 * handshake bits. Populate the retptrs with returning packets.
67 for (i = count; i < RTE_DIST_BURST_SIZE; i++)
70 /* Set VALID_BUF bit for each packet returned */
71 for (i = count; i-- > 0; )
73 (((int64_t)(uintptr_t)(oldpkt[i])) <<
74 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_VALID_BUF;
77 * Finally, set the GET_BUF to signal to distributor that cache
78 * line is ready for processing
79 * Sync with distributor to release retptrs
81 __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF,
86 rte_distributor_poll_pkt(struct rte_distributor *d,
87 unsigned int worker_id, struct rte_mbuf **pkts)
89 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
94 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
95 pkts[0] = rte_distributor_poll_pkt_single(d->d_single,
97 return (pkts[0]) ? 1 : 0;
100 /* If any of below bits is set, return.
101 * GET_BUF is set when distributor hasn't sent any packets yet
102 * RETURN_BUF is set when distributor must retrieve in-flight packets
103 * Sync with distributor to acquire bufptrs
105 if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
106 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))
109 /* since bufptr64 is signed, this should be an arithmetic shift */
110 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
111 if (likely(buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)) {
112 ret = buf->bufptr64[i] >> RTE_DISTRIB_FLAG_BITS;
113 pkts[count++] = (struct rte_mbuf *)((uintptr_t)(ret));
118 * so now we've got the contents of the cacheline into an array of
119 * mbuf pointers, so toggle the bit so scheduler can start working
120 * on the next cacheline while we're working.
121 * Sync with distributor on GET_BUF flag. Release bufptrs.
123 __atomic_store_n(&(buf->bufptr64[0]),
124 buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
130 rte_distributor_get_pkt(struct rte_distributor *d,
131 unsigned int worker_id, struct rte_mbuf **pkts,
132 struct rte_mbuf **oldpkt, unsigned int return_count)
136 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
137 if (return_count <= 1) {
138 pkts[0] = rte_distributor_get_pkt_single(d->d_single,
139 worker_id, return_count ? oldpkt[0] : NULL);
140 return (pkts[0]) ? 1 : 0;
145 rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
147 count = rte_distributor_poll_pkt(d, worker_id, pkts);
148 while (count == -1) {
149 uint64_t t = rte_rdtsc() + 100;
151 while (rte_rdtsc() < t)
154 count = rte_distributor_poll_pkt(d, worker_id, pkts);
160 rte_distributor_return_pkt(struct rte_distributor *d,
161 unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
163 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
166 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
168 return rte_distributor_return_pkt_single(d->d_single,
169 worker_id, oldpkt[0]);
171 return rte_distributor_return_pkt_single(d->d_single,
177 /* Spin while handshake bits are set (scheduler clears it).
178 * Sync with worker on GET_BUF flag.
180 while (unlikely(__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_RELAXED)
181 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) {
183 uint64_t t = rte_rdtsc()+100;
185 while (rte_rdtsc() < t)
189 /* Sync with distributor to acquire retptrs */
190 __atomic_thread_fence(__ATOMIC_ACQUIRE);
191 for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
192 /* Switch off the return bit first */
193 buf->retptr64[i] = 0;
195 for (i = num; i-- > 0; )
196 buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
197 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_VALID_BUF;
199 /* Use RETURN_BUF on bufptr64 to notify distributor that
200 * we won't read any mbufs from there even if GET_BUF is set.
201 * This allows distributor to retrieve in-flight already sent packets.
203 __atomic_or_fetch(&(buf->bufptr64[0]), RTE_DISTRIB_RETURN_BUF,
206 /* set the RETURN_BUF on retptr64 even if we got no returns.
207 * Sync with distributor on RETURN_BUF flag. Release retptrs.
208 * Notify distributor that we don't request more packets any more.
210 __atomic_store_n(&(buf->retptr64[0]),
211 buf->retptr64[0] | RTE_DISTRIB_RETURN_BUF, __ATOMIC_RELEASE);
216 /**** APIs called on distributor core ***/
218 /* stores a packet returned from a worker inside the returns array */
220 store_return(uintptr_t oldbuf, struct rte_distributor *d,
221 unsigned int *ret_start, unsigned int *ret_count)
225 /* store returns in a circular buffer */
226 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
228 *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK);
229 *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK);
233 * Match then flow_ids (tags) of the incoming packets to the flow_ids
234 * of the inflight packets (both inflight on the workers and in each worker
235 * backlog). This will then allow us to pin those packets to the relevant
236 * workers to give us our atomic flow pinning.
239 find_match_scalar(struct rte_distributor *d,
241 uint16_t *output_ptr)
243 struct rte_distributor_backlog *bl;
248 * 1. Loop through all worker ID's
249 * 2. Compare the current inflights to the incoming tags
250 * 3. Compare the current backlog to the incoming tags
251 * 4. Add any matches to the output
254 for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
257 for (i = 0; i < d->num_workers; i++) {
260 for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
261 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
262 if (d->in_flight_tags[i][w] == data_ptr[j]) {
266 for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
267 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
268 if (bl->tags[w] == data_ptr[j]) {
275 * At this stage, the output contains 8 16-bit values, with
276 * each non-zero value containing the worker ID on which the
277 * corresponding flow is pinned to.
282 * When worker called rte_distributor_return_pkt()
283 * and passed RTE_DISTRIB_RETURN_BUF handshake through retptr64,
284 * distributor must retrieve both inflight and backlog packets assigned
285 * to the worker and reprocess them to another worker.
288 handle_worker_shutdown(struct rte_distributor *d, unsigned int wkr)
290 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
291 /* double BURST size for storing both inflights and backlog */
292 struct rte_mbuf *pkts[RTE_DIST_BURST_SIZE * 2];
293 unsigned int pkts_count = 0;
296 /* If GET_BUF is cleared there are in-flight packets sent
297 * to worker which does not require new packets.
298 * They must be retrieved and assigned to another worker.
300 if (!(__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
301 & RTE_DISTRIB_GET_BUF))
302 for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
303 if (buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)
304 pkts[pkts_count++] = (void *)((uintptr_t)
306 >> RTE_DISTRIB_FLAG_BITS));
308 /* Make following operations on handshake flags on bufptr64:
309 * - set GET_BUF to indicate that distributor can overwrite buffer
310 * with new packets if worker will make a new request.
311 * - clear RETURN_BUF to unlock reads on worker side.
313 __atomic_store_n(&(buf->bufptr64[0]), RTE_DISTRIB_GET_BUF,
316 /* Collect backlog packets from worker */
317 for (i = 0; i < d->backlog[wkr].count; i++)
318 pkts[pkts_count++] = (void *)((uintptr_t)
319 (d->backlog[wkr].pkts[i] >> RTE_DISTRIB_FLAG_BITS));
321 d->backlog[wkr].count = 0;
323 /* Clear both inflight and backlog tags */
324 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
325 d->in_flight_tags[wkr][i] = 0;
326 d->backlog[wkr].tags[i] = 0;
331 rte_distributor_process(d, pkts, pkts_count);
336 * When the handshake bits indicate that there are packets coming
337 * back from the worker, this function is called to copy and store
338 * the valid returned pointers (store_return).
341 handle_returns(struct rte_distributor *d, unsigned int wkr)
343 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
345 unsigned int ret_start = d->returns.start,
346 ret_count = d->returns.count;
347 unsigned int count = 0;
350 /* Sync on GET_BUF flag. Acquire retptrs. */
351 if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE)
352 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF)) {
353 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
354 if (buf->retptr64[i] & RTE_DISTRIB_VALID_BUF) {
355 oldbuf = ((uintptr_t)(buf->retptr64[i] >>
356 RTE_DISTRIB_FLAG_BITS));
357 /* store returns in a circular buffer */
358 store_return(oldbuf, d, &ret_start, &ret_count);
360 buf->retptr64[i] &= ~RTE_DISTRIB_VALID_BUF;
363 d->returns.start = ret_start;
364 d->returns.count = ret_count;
366 /* If worker requested packets with GET_BUF, set it to active
367 * otherwise (RETURN_BUF), set it to not active.
369 d->activesum -= d->active[wkr];
370 d->active[wkr] = !!(buf->retptr64[0] & RTE_DISTRIB_GET_BUF);
371 d->activesum += d->active[wkr];
373 /* If worker returned packets without requesting new ones,
374 * handle all in-flights and backlog packets assigned to it.
376 if (unlikely(buf->retptr64[0] & RTE_DISTRIB_RETURN_BUF))
377 handle_worker_shutdown(d, wkr);
379 /* Clear for the worker to populate with more returns.
380 * Sync with distributor on GET_BUF flag. Release retptrs.
382 __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE);
388 * This function releases a burst (cache line) to a worker.
389 * It is called from the process function when a cacheline is
390 * full to make room for more packets for that worker, or when
391 * all packets have been assigned to bursts and need to be flushed
393 * It also needs to wait for any outstanding packets from the worker
394 * before sending out new packets.
397 release(struct rte_distributor *d, unsigned int wkr)
399 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
402 handle_returns(d, wkr);
403 if (unlikely(!d->active[wkr]))
406 /* Sync with worker on GET_BUF flag */
407 while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE)
408 & RTE_DISTRIB_GET_BUF)) {
409 handle_returns(d, wkr);
410 if (unlikely(!d->active[wkr]))
417 for (i = 0; i < d->backlog[wkr].count; i++) {
418 d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] |
419 RTE_DISTRIB_GET_BUF | RTE_DISTRIB_VALID_BUF;
420 d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
423 for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
424 buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
425 d->in_flight_tags[wkr][i] = 0;
428 d->backlog[wkr].count = 0;
430 /* Clear the GET bit.
431 * Sync with worker on GET_BUF flag. Release bufptrs.
433 __atomic_store_n(&(buf->bufptr64[0]),
434 buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
440 /* process a set of packets to distribute them to workers */
442 rte_distributor_process(struct rte_distributor *d,
443 struct rte_mbuf **mbufs, unsigned int num_mbufs)
445 unsigned int next_idx = 0;
446 static unsigned int wkr;
447 struct rte_mbuf *next_mb = NULL;
448 int64_t next_value = 0;
449 uint16_t new_tag = 0;
450 uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
451 unsigned int i, j, w, wid, matching_required;
453 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
454 /* Call the old API */
455 return rte_distributor_process_single(d->d_single,
459 for (wid = 0 ; wid < d->num_workers; wid++)
460 handle_returns(d, wid);
462 if (unlikely(num_mbufs == 0)) {
463 /* Flush out all non-full cache-lines to workers. */
464 for (wid = 0 ; wid < d->num_workers; wid++) {
465 /* Sync with worker on GET_BUF flag. */
466 if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
467 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
468 d->bufs[wid].count = 0;
470 handle_returns(d, wid);
476 if (unlikely(!d->activesum))
479 while (next_idx < num_mbufs) {
480 uint16_t matches[RTE_DIST_BURST_SIZE];
483 if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
484 pkts = num_mbufs - next_idx;
486 pkts = RTE_DIST_BURST_SIZE;
488 for (i = 0; i < pkts; i++) {
489 if (mbufs[next_idx + i]) {
490 /* flows have to be non-zero */
491 flows[i] = mbufs[next_idx + i]->hash.usr | 1;
495 for (; i < RTE_DIST_BURST_SIZE; i++)
498 matching_required = 1;
500 for (j = 0; j < pkts; j++) {
501 if (unlikely(!d->activesum))
504 if (unlikely(matching_required)) {
505 switch (d->dist_match_fn) {
506 case RTE_DIST_MATCH_VECTOR:
507 find_match_vec(d, &flows[0],
511 find_match_scalar(d, &flows[0],
514 matching_required = 0;
517 * Matches array now contain the intended worker ID (+1) of
518 * the incoming packets. Any zeroes need to be assigned
522 next_mb = mbufs[next_idx++];
523 next_value = (((int64_t)(uintptr_t)next_mb) <<
524 RTE_DISTRIB_FLAG_BITS);
526 * User is advocated to set tag value for each
527 * mbuf before calling rte_distributor_process.
528 * User defined tags are used to identify flows,
531 /* flows MUST be non-zero */
532 new_tag = (uint16_t)(next_mb->hash.usr) | 1;
535 * Uncommenting the next line will cause the find_match
536 * function to be optimized out, making this function
537 * do parallel (non-atomic) distribution
539 /* matches[j] = 0; */
541 if (matches[j] && d->active[matches[j]-1]) {
542 struct rte_distributor_backlog *bl =
543 &d->backlog[matches[j]-1];
544 if (unlikely(bl->count ==
545 RTE_DIST_BURST_SIZE)) {
546 release(d, matches[j]-1);
547 if (!d->active[matches[j]-1]) {
550 matching_required = 1;
555 /* Add to worker that already has flow */
556 unsigned int idx = bl->count++;
558 bl->tags[idx] = new_tag;
559 bl->pkts[idx] = next_value;
562 struct rte_distributor_backlog *bl;
564 while (unlikely(!d->active[wkr]))
565 wkr = (wkr + 1) % d->num_workers;
566 bl = &d->backlog[wkr];
568 if (unlikely(bl->count ==
569 RTE_DIST_BURST_SIZE)) {
571 if (!d->active[wkr]) {
574 matching_required = 1;
579 /* Add to current worker worker */
580 unsigned int idx = bl->count++;
582 bl->tags[idx] = new_tag;
583 bl->pkts[idx] = next_value;
585 * Now that we've just added an unpinned flow
586 * to a worker, we need to ensure that all
587 * other packets with that same flow will go
588 * to the same worker in this burst.
590 for (w = j; w < pkts; w++)
591 if (flows[w] == new_tag)
595 wkr = (wkr + 1) % d->num_workers;
598 /* Flush out all non-full cache-lines to workers. */
599 for (wid = 0 ; wid < d->num_workers; wid++)
600 /* Sync with worker on GET_BUF flag. */
601 if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
602 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
603 d->bufs[wid].count = 0;
610 /* return to the caller, packets returned from workers */
612 rte_distributor_returned_pkts(struct rte_distributor *d,
613 struct rte_mbuf **mbufs, unsigned int max_mbufs)
615 struct rte_distributor_returned_pkts *returns = &d->returns;
616 unsigned int retval = (max_mbufs < returns->count) ?
617 max_mbufs : returns->count;
620 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
621 /* Call the old API */
622 return rte_distributor_returned_pkts_single(d->d_single,
626 for (i = 0; i < retval; i++) {
627 unsigned int idx = (returns->start + i) &
628 RTE_DISTRIB_RETURNS_MASK;
630 mbufs[i] = returns->mbufs[idx];
639 * Return the number of packets in-flight in a distributor, i.e. packets
640 * being worked on or queued up in a backlog.
642 static inline unsigned int
643 total_outstanding(const struct rte_distributor *d)
645 unsigned int wkr, total_outstanding = 0;
647 for (wkr = 0; wkr < d->num_workers; wkr++)
648 total_outstanding += d->backlog[wkr].count + d->bufs[wkr].count;
650 return total_outstanding;
654 * Flush the distributor, so that there are no outstanding packets in flight or
658 rte_distributor_flush(struct rte_distributor *d)
660 unsigned int flushed;
663 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
664 /* Call the old API */
665 return rte_distributor_flush_single(d->d_single);
668 flushed = total_outstanding(d);
670 while (total_outstanding(d) > 0)
671 rte_distributor_process(d, NULL, 0);
673 /* wait 10ms to allow all worker drain the pkts */
677 * Send empty burst to all workers to allow them to exit
678 * gracefully, should they need to.
680 rte_distributor_process(d, NULL, 0);
682 for (wkr = 0; wkr < d->num_workers; wkr++)
683 handle_returns(d, wkr);
688 /* clears the internal returns array in the distributor */
690 rte_distributor_clear_returns(struct rte_distributor *d)
694 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
695 /* Call the old API */
696 rte_distributor_clear_returns_single(d->d_single);
700 /* throw away returns, so workers can exit */
701 for (wkr = 0; wkr < d->num_workers; wkr++)
702 /* Sync with worker. Release retptrs. */
703 __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0,
706 d->returns.start = d->returns.count = 0;
709 /* creates a distributor instance */
710 struct rte_distributor *
711 rte_distributor_create(const char *name,
712 unsigned int socket_id,
713 unsigned int num_workers,
714 unsigned int alg_type)
716 struct rte_distributor *d;
717 struct rte_dist_burst_list *dist_burst_list;
718 char mz_name[RTE_MEMZONE_NAMESIZE];
719 const struct rte_memzone *mz;
722 /* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
724 /* compilation-time checks */
725 RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
726 RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
728 if (name == NULL || num_workers >=
729 (unsigned int)RTE_MIN(RTE_DISTRIB_MAX_WORKERS, RTE_MAX_LCORE)) {
734 if (alg_type == RTE_DIST_ALG_SINGLE) {
735 d = malloc(sizeof(struct rte_distributor));
740 d->d_single = rte_distributor_create_single(name,
741 socket_id, num_workers);
742 if (d->d_single == NULL) {
744 /* rte_errno will have been set */
747 d->alg_type = alg_type;
751 snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
752 mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
759 strlcpy(d->name, name, sizeof(d->name));
760 d->num_workers = num_workers;
761 d->alg_type = alg_type;
763 d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
764 #if defined(RTE_ARCH_X86)
765 d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
769 * Set up the backlog tags so they're pointing at the second cache
770 * line for performance during flow matching
772 for (i = 0 ; i < num_workers ; i++)
773 d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
775 memset(d->active, 0, sizeof(d->active));
778 dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
779 rte_dist_burst_list);
782 rte_mcfg_tailq_write_lock();
783 TAILQ_INSERT_TAIL(dist_burst_list, d, next);
784 rte_mcfg_tailq_write_unlock();