1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_cycles.h>
10 #include <rte_memzone.h>
11 #include <rte_errno.h>
12 #include <rte_string_fns.h>
13 #include <rte_eal_memconfig.h>
14 #include <rte_pause.h>
15 #include <rte_tailq.h>
17 #include "rte_distributor.h"
18 #include "rte_distributor_single.h"
19 #include "distributor_private.h"
21 TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
23 static struct rte_tailq_elem rte_dist_burst_tailq = {
24 .name = "RTE_DIST_BURST",
26 EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
28 /**** APIs called by workers ****/
30 /**** Burst Packet APIs called by workers ****/
33 rte_distributor_request_pkt(struct rte_distributor *d,
34 unsigned int worker_id, struct rte_mbuf **oldpkt,
37 struct rte_distributor_buffer *buf = &(d->bufs[worker_id]);
40 volatile int64_t *retptr64;
42 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
43 rte_distributor_request_pkt_single(d->d_single,
44 worker_id, count ? oldpkt[0] : NULL);
48 retptr64 = &(buf->retptr64[0]);
49 /* Spin while handshake bits are set (scheduler clears it).
50 * Sync with worker on GET_BUF flag.
52 while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE)
53 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) {
55 uint64_t t = rte_rdtsc()+100;
57 while (rte_rdtsc() < t)
62 * OK, if we've got here, then the scheduler has just cleared the
63 * handshake bits. Populate the retptrs with returning packets.
66 for (i = count; i < RTE_DIST_BURST_SIZE; i++)
69 /* Set VALID_BUF bit for each packet returned */
70 for (i = count; i-- > 0; )
72 (((int64_t)(uintptr_t)(oldpkt[i])) <<
73 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_VALID_BUF;
76 * Finally, set the GET_BUF to signal to distributor that cache
77 * line is ready for processing
78 * Sync with distributor to release retptrs
80 __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF,
85 rte_distributor_poll_pkt(struct rte_distributor *d,
86 unsigned int worker_id, struct rte_mbuf **pkts)
88 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
93 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
94 pkts[0] = rte_distributor_poll_pkt_single(d->d_single,
96 return (pkts[0]) ? 1 : 0;
99 /* If any of below bits is set, return.
100 * GET_BUF is set when distributor hasn't sent any packets yet
101 * RETURN_BUF is set when distributor must retrieve in-flight packets
102 * Sync with distributor to acquire bufptrs
104 if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
105 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))
108 /* since bufptr64 is signed, this should be an arithmetic shift */
109 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
110 if (likely(buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)) {
111 ret = buf->bufptr64[i] >> RTE_DISTRIB_FLAG_BITS;
112 pkts[count++] = (struct rte_mbuf *)((uintptr_t)(ret));
117 * so now we've got the contents of the cacheline into an array of
118 * mbuf pointers, so toggle the bit so scheduler can start working
119 * on the next cacheline while we're working.
120 * Sync with distributor on GET_BUF flag. Release bufptrs.
122 __atomic_store_n(&(buf->bufptr64[0]),
123 buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
129 rte_distributor_get_pkt(struct rte_distributor *d,
130 unsigned int worker_id, struct rte_mbuf **pkts,
131 struct rte_mbuf **oldpkt, unsigned int return_count)
135 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
136 if (return_count <= 1) {
137 pkts[0] = rte_distributor_get_pkt_single(d->d_single,
138 worker_id, return_count ? oldpkt[0] : NULL);
139 return (pkts[0]) ? 1 : 0;
144 rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
146 count = rte_distributor_poll_pkt(d, worker_id, pkts);
147 while (count == -1) {
148 uint64_t t = rte_rdtsc() + 100;
150 while (rte_rdtsc() < t)
153 count = rte_distributor_poll_pkt(d, worker_id, pkts);
159 rte_distributor_return_pkt(struct rte_distributor *d,
160 unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
162 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
165 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
167 return rte_distributor_return_pkt_single(d->d_single,
168 worker_id, oldpkt[0]);
170 return rte_distributor_return_pkt_single(d->d_single,
176 /* Spin while handshake bits are set (scheduler clears it).
177 * Sync with worker on GET_BUF flag.
179 while (unlikely(__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_RELAXED)
180 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) {
182 uint64_t t = rte_rdtsc()+100;
184 while (rte_rdtsc() < t)
188 /* Sync with distributor to acquire retptrs */
189 __atomic_thread_fence(__ATOMIC_ACQUIRE);
190 for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
191 /* Switch off the return bit first */
192 buf->retptr64[i] = 0;
194 for (i = num; i-- > 0; )
195 buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
196 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_VALID_BUF;
198 /* Use RETURN_BUF on bufptr64 to notify distributor that
199 * we won't read any mbufs from there even if GET_BUF is set.
200 * This allows distributor to retrieve in-flight already sent packets.
202 __atomic_or_fetch(&(buf->bufptr64[0]), RTE_DISTRIB_RETURN_BUF,
205 /* set the RETURN_BUF on retptr64 even if we got no returns.
206 * Sync with distributor on RETURN_BUF flag. Release retptrs.
207 * Notify distributor that we don't request more packets any more.
209 __atomic_store_n(&(buf->retptr64[0]),
210 buf->retptr64[0] | RTE_DISTRIB_RETURN_BUF, __ATOMIC_RELEASE);
215 /**** APIs called on distributor core ***/
217 /* stores a packet returned from a worker inside the returns array */
219 store_return(uintptr_t oldbuf, struct rte_distributor *d,
220 unsigned int *ret_start, unsigned int *ret_count)
224 /* store returns in a circular buffer */
225 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
227 *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK);
228 *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK);
232 * Match then flow_ids (tags) of the incoming packets to the flow_ids
233 * of the inflight packets (both inflight on the workers and in each worker
234 * backlog). This will then allow us to pin those packets to the relevant
235 * workers to give us our atomic flow pinning.
238 find_match_scalar(struct rte_distributor *d,
240 uint16_t *output_ptr)
242 struct rte_distributor_backlog *bl;
247 * 1. Loop through all worker ID's
248 * 2. Compare the current inflights to the incoming tags
249 * 3. Compare the current backlog to the incoming tags
250 * 4. Add any matches to the output
253 for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
256 for (i = 0; i < d->num_workers; i++) {
259 for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
260 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
261 if (d->in_flight_tags[i][w] == data_ptr[j]) {
265 for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
266 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
267 if (bl->tags[w] == data_ptr[j]) {
274 * At this stage, the output contains 8 16-bit values, with
275 * each non-zero value containing the worker ID on which the
276 * corresponding flow is pinned to.
281 * When worker called rte_distributor_return_pkt()
282 * and passed RTE_DISTRIB_RETURN_BUF handshake through retptr64,
283 * distributor must retrieve both inflight and backlog packets assigned
284 * to the worker and reprocess them to another worker.
287 handle_worker_shutdown(struct rte_distributor *d, unsigned int wkr)
289 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
290 /* double BURST size for storing both inflights and backlog */
291 struct rte_mbuf *pkts[RTE_DIST_BURST_SIZE * 2];
292 unsigned int pkts_count = 0;
295 /* If GET_BUF is cleared there are in-flight packets sent
296 * to worker which does not require new packets.
297 * They must be retrieved and assigned to another worker.
299 if (!(__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
300 & RTE_DISTRIB_GET_BUF))
301 for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
302 if (buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)
303 pkts[pkts_count++] = (void *)((uintptr_t)
305 >> RTE_DISTRIB_FLAG_BITS));
307 /* Make following operations on handshake flags on bufptr64:
308 * - set GET_BUF to indicate that distributor can overwrite buffer
309 * with new packets if worker will make a new request.
310 * - clear RETURN_BUF to unlock reads on worker side.
312 __atomic_store_n(&(buf->bufptr64[0]), RTE_DISTRIB_GET_BUF,
315 /* Collect backlog packets from worker */
316 for (i = 0; i < d->backlog[wkr].count; i++)
317 pkts[pkts_count++] = (void *)((uintptr_t)
318 (d->backlog[wkr].pkts[i] >> RTE_DISTRIB_FLAG_BITS));
320 d->backlog[wkr].count = 0;
322 /* Clear both inflight and backlog tags */
323 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
324 d->in_flight_tags[wkr][i] = 0;
325 d->backlog[wkr].tags[i] = 0;
330 rte_distributor_process(d, pkts, pkts_count);
335 * When the handshake bits indicate that there are packets coming
336 * back from the worker, this function is called to copy and store
337 * the valid returned pointers (store_return).
340 handle_returns(struct rte_distributor *d, unsigned int wkr)
342 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
344 unsigned int ret_start = d->returns.start,
345 ret_count = d->returns.count;
346 unsigned int count = 0;
349 /* Sync on GET_BUF flag. Acquire retptrs. */
350 if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE)
351 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF)) {
352 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
353 if (buf->retptr64[i] & RTE_DISTRIB_VALID_BUF) {
354 oldbuf = ((uintptr_t)(buf->retptr64[i] >>
355 RTE_DISTRIB_FLAG_BITS));
356 /* store returns in a circular buffer */
357 store_return(oldbuf, d, &ret_start, &ret_count);
359 buf->retptr64[i] &= ~RTE_DISTRIB_VALID_BUF;
362 d->returns.start = ret_start;
363 d->returns.count = ret_count;
365 /* If worker requested packets with GET_BUF, set it to active
366 * otherwise (RETURN_BUF), set it to not active.
368 d->activesum -= d->active[wkr];
369 d->active[wkr] = !!(buf->retptr64[0] & RTE_DISTRIB_GET_BUF);
370 d->activesum += d->active[wkr];
372 /* If worker returned packets without requesting new ones,
373 * handle all in-flights and backlog packets assigned to it.
375 if (unlikely(buf->retptr64[0] & RTE_DISTRIB_RETURN_BUF))
376 handle_worker_shutdown(d, wkr);
378 /* Clear for the worker to populate with more returns.
379 * Sync with distributor on GET_BUF flag. Release retptrs.
381 __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE);
387 * This function releases a burst (cache line) to a worker.
388 * It is called from the process function when a cacheline is
389 * full to make room for more packets for that worker, or when
390 * all packets have been assigned to bursts and need to be flushed
392 * It also needs to wait for any outstanding packets from the worker
393 * before sending out new packets.
396 release(struct rte_distributor *d, unsigned int wkr)
398 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
401 handle_returns(d, wkr);
402 if (unlikely(!d->active[wkr]))
405 /* Sync with worker on GET_BUF flag */
406 while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE)
407 & RTE_DISTRIB_GET_BUF)) {
408 handle_returns(d, wkr);
409 if (unlikely(!d->active[wkr]))
416 for (i = 0; i < d->backlog[wkr].count; i++) {
417 d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] |
418 RTE_DISTRIB_GET_BUF | RTE_DISTRIB_VALID_BUF;
419 d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
422 for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
423 buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
424 d->in_flight_tags[wkr][i] = 0;
427 d->backlog[wkr].count = 0;
429 /* Clear the GET bit.
430 * Sync with worker on GET_BUF flag. Release bufptrs.
432 __atomic_store_n(&(buf->bufptr64[0]),
433 buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
439 /* process a set of packets to distribute them to workers */
441 rte_distributor_process(struct rte_distributor *d,
442 struct rte_mbuf **mbufs, unsigned int num_mbufs)
444 unsigned int next_idx = 0;
445 static unsigned int wkr;
446 struct rte_mbuf *next_mb = NULL;
447 int64_t next_value = 0;
448 uint16_t new_tag = 0;
449 uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
450 unsigned int i, j, w, wid, matching_required;
452 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
453 /* Call the old API */
454 return rte_distributor_process_single(d->d_single,
458 for (wid = 0 ; wid < d->num_workers; wid++)
459 handle_returns(d, wid);
461 if (unlikely(num_mbufs == 0)) {
462 /* Flush out all non-full cache-lines to workers. */
463 for (wid = 0 ; wid < d->num_workers; wid++) {
464 /* Sync with worker on GET_BUF flag. */
465 if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
466 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
467 d->bufs[wid].count = 0;
469 handle_returns(d, wid);
475 if (unlikely(!d->activesum))
478 while (next_idx < num_mbufs) {
479 uint16_t matches[RTE_DIST_BURST_SIZE] __rte_aligned(128);
482 if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
483 pkts = num_mbufs - next_idx;
485 pkts = RTE_DIST_BURST_SIZE;
487 for (i = 0; i < pkts; i++) {
488 if (mbufs[next_idx + i]) {
489 /* flows have to be non-zero */
490 flows[i] = mbufs[next_idx + i]->hash.usr | 1;
494 for (; i < RTE_DIST_BURST_SIZE; i++)
497 matching_required = 1;
499 for (j = 0; j < pkts; j++) {
500 if (unlikely(!d->activesum))
503 if (unlikely(matching_required)) {
504 switch (d->dist_match_fn) {
505 case RTE_DIST_MATCH_VECTOR:
506 find_match_vec(d, &flows[0],
510 find_match_scalar(d, &flows[0],
513 matching_required = 0;
516 * Matches array now contain the intended worker ID (+1) of
517 * the incoming packets. Any zeroes need to be assigned
521 next_mb = mbufs[next_idx++];
522 next_value = (((int64_t)(uintptr_t)next_mb) <<
523 RTE_DISTRIB_FLAG_BITS);
525 * User is advocated to set tag value for each
526 * mbuf before calling rte_distributor_process.
527 * User defined tags are used to identify flows,
530 /* flows MUST be non-zero */
531 new_tag = (uint16_t)(next_mb->hash.usr) | 1;
534 * Uncommenting the next line will cause the find_match
535 * function to be optimized out, making this function
536 * do parallel (non-atomic) distribution
538 /* matches[j] = 0; */
540 if (matches[j] && d->active[matches[j]-1]) {
541 struct rte_distributor_backlog *bl =
542 &d->backlog[matches[j]-1];
543 if (unlikely(bl->count ==
544 RTE_DIST_BURST_SIZE)) {
545 release(d, matches[j]-1);
546 if (!d->active[matches[j]-1]) {
549 matching_required = 1;
554 /* Add to worker that already has flow */
555 unsigned int idx = bl->count++;
557 bl->tags[idx] = new_tag;
558 bl->pkts[idx] = next_value;
561 struct rte_distributor_backlog *bl;
563 while (unlikely(!d->active[wkr]))
564 wkr = (wkr + 1) % d->num_workers;
565 bl = &d->backlog[wkr];
567 if (unlikely(bl->count ==
568 RTE_DIST_BURST_SIZE)) {
570 if (!d->active[wkr]) {
573 matching_required = 1;
578 /* Add to current worker worker */
579 unsigned int idx = bl->count++;
581 bl->tags[idx] = new_tag;
582 bl->pkts[idx] = next_value;
584 * Now that we've just added an unpinned flow
585 * to a worker, we need to ensure that all
586 * other packets with that same flow will go
587 * to the same worker in this burst.
589 for (w = j; w < pkts; w++)
590 if (flows[w] == new_tag)
594 wkr = (wkr + 1) % d->num_workers;
597 /* Flush out all non-full cache-lines to workers. */
598 for (wid = 0 ; wid < d->num_workers; wid++)
599 /* Sync with worker on GET_BUF flag. */
600 if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
601 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
602 d->bufs[wid].count = 0;
609 /* return to the caller, packets returned from workers */
611 rte_distributor_returned_pkts(struct rte_distributor *d,
612 struct rte_mbuf **mbufs, unsigned int max_mbufs)
614 struct rte_distributor_returned_pkts *returns = &d->returns;
615 unsigned int retval = (max_mbufs < returns->count) ?
616 max_mbufs : returns->count;
619 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
620 /* Call the old API */
621 return rte_distributor_returned_pkts_single(d->d_single,
625 for (i = 0; i < retval; i++) {
626 unsigned int idx = (returns->start + i) &
627 RTE_DISTRIB_RETURNS_MASK;
629 mbufs[i] = returns->mbufs[idx];
638 * Return the number of packets in-flight in a distributor, i.e. packets
639 * being worked on or queued up in a backlog.
641 static inline unsigned int
642 total_outstanding(const struct rte_distributor *d)
644 unsigned int wkr, total_outstanding = 0;
646 for (wkr = 0; wkr < d->num_workers; wkr++)
647 total_outstanding += d->backlog[wkr].count + d->bufs[wkr].count;
649 return total_outstanding;
653 * Flush the distributor, so that there are no outstanding packets in flight or
657 rte_distributor_flush(struct rte_distributor *d)
659 unsigned int flushed;
662 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
663 /* Call the old API */
664 return rte_distributor_flush_single(d->d_single);
667 flushed = total_outstanding(d);
669 while (total_outstanding(d) > 0)
670 rte_distributor_process(d, NULL, 0);
672 /* wait 10ms to allow all worker drain the pkts */
676 * Send empty burst to all workers to allow them to exit
677 * gracefully, should they need to.
679 rte_distributor_process(d, NULL, 0);
681 for (wkr = 0; wkr < d->num_workers; wkr++)
682 handle_returns(d, wkr);
687 /* clears the internal returns array in the distributor */
689 rte_distributor_clear_returns(struct rte_distributor *d)
693 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
694 /* Call the old API */
695 rte_distributor_clear_returns_single(d->d_single);
699 /* throw away returns, so workers can exit */
700 for (wkr = 0; wkr < d->num_workers; wkr++)
701 /* Sync with worker. Release retptrs. */
702 __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0,
705 d->returns.start = d->returns.count = 0;
708 /* creates a distributor instance */
709 struct rte_distributor *
710 rte_distributor_create(const char *name,
711 unsigned int socket_id,
712 unsigned int num_workers,
713 unsigned int alg_type)
715 struct rte_distributor *d;
716 struct rte_dist_burst_list *dist_burst_list;
717 char mz_name[RTE_MEMZONE_NAMESIZE];
718 const struct rte_memzone *mz;
721 /* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
723 /* compilation-time checks */
724 RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
725 RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
727 if (name == NULL || num_workers >=
728 (unsigned int)RTE_MIN(RTE_DISTRIB_MAX_WORKERS, RTE_MAX_LCORE)) {
733 if (alg_type == RTE_DIST_ALG_SINGLE) {
734 d = malloc(sizeof(struct rte_distributor));
739 d->d_single = rte_distributor_create_single(name,
740 socket_id, num_workers);
741 if (d->d_single == NULL) {
743 /* rte_errno will have been set */
746 d->alg_type = alg_type;
750 snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
751 mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
758 strlcpy(d->name, name, sizeof(d->name));
759 d->num_workers = num_workers;
760 d->alg_type = alg_type;
762 d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
763 #if defined(RTE_ARCH_X86)
764 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
765 d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
769 * Set up the backlog tags so they're pointing at the second cache
770 * line for performance during flow matching
772 for (i = 0 ; i < num_workers ; i++)
773 d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
775 memset(d->active, 0, sizeof(d->active));
778 dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
779 rte_dist_burst_list);
782 rte_mcfg_tailq_write_lock();
783 TAILQ_INSERT_TAIL(dist_burst_list, d, next);
784 rte_mcfg_tailq_write_unlock();