1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_memory.h>
10 #include <rte_cycles.h>
11 #include <rte_memzone.h>
12 #include <rte_errno.h>
13 #include <rte_string_fns.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_pause.h>
16 #include <rte_tailq.h>
18 #include "rte_distributor.h"
19 #include "rte_distributor_single.h"
20 #include "distributor_private.h"
22 TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
24 static struct rte_tailq_elem rte_dist_burst_tailq = {
25 .name = "RTE_DIST_BURST",
27 EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
29 /**** APIs called by workers ****/
31 /**** Burst Packet APIs called by workers ****/
34 rte_distributor_request_pkt(struct rte_distributor *d,
35 unsigned int worker_id, struct rte_mbuf **oldpkt,
38 struct rte_distributor_buffer *buf = &(d->bufs[worker_id]);
41 volatile int64_t *retptr64;
43 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
44 rte_distributor_request_pkt_single(d->d_single,
45 worker_id, count ? oldpkt[0] : NULL);
49 retptr64 = &(buf->retptr64[0]);
50 /* Spin while handshake bits are set (scheduler clears it).
51 * Sync with worker on GET_BUF flag.
53 while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE)
54 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) {
56 uint64_t t = rte_rdtsc()+100;
58 while (rte_rdtsc() < t)
63 * OK, if we've got here, then the scheduler has just cleared the
64 * handshake bits. Populate the retptrs with returning packets.
67 for (i = count; i < RTE_DIST_BURST_SIZE; i++)
70 /* Set VALID_BUF bit for each packet returned */
71 for (i = count; i-- > 0; )
73 (((int64_t)(uintptr_t)(oldpkt[i])) <<
74 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_VALID_BUF;
77 * Finally, set the GET_BUF to signal to distributor that cache
78 * line is ready for processing
79 * Sync with distributor to release retptrs
81 __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF,
86 rte_distributor_poll_pkt(struct rte_distributor *d,
87 unsigned int worker_id, struct rte_mbuf **pkts)
89 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
94 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
95 pkts[0] = rte_distributor_poll_pkt_single(d->d_single,
97 return (pkts[0]) ? 1 : 0;
100 /* If any of below bits is set, return.
101 * GET_BUF is set when distributor hasn't sent any packets yet
102 * RETURN_BUF is set when distributor must retrieve in-flight packets
103 * Sync with distributor to acquire bufptrs
105 if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
106 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))
109 /* since bufptr64 is signed, this should be an arithmetic shift */
110 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
111 if (likely(buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)) {
112 ret = buf->bufptr64[i] >> RTE_DISTRIB_FLAG_BITS;
113 pkts[count++] = (struct rte_mbuf *)((uintptr_t)(ret));
118 * so now we've got the contents of the cacheline into an array of
119 * mbuf pointers, so toggle the bit so scheduler can start working
120 * on the next cacheline while we're working.
121 * Sync with distributor on GET_BUF flag. Release bufptrs.
123 __atomic_store_n(&(buf->bufptr64[0]),
124 buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
130 rte_distributor_get_pkt(struct rte_distributor *d,
131 unsigned int worker_id, struct rte_mbuf **pkts,
132 struct rte_mbuf **oldpkt, unsigned int return_count)
136 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
137 if (return_count <= 1) {
138 pkts[0] = rte_distributor_get_pkt_single(d->d_single,
139 worker_id, return_count ? oldpkt[0] : NULL);
140 return (pkts[0]) ? 1 : 0;
145 rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
147 count = rte_distributor_poll_pkt(d, worker_id, pkts);
148 while (count == -1) {
149 uint64_t t = rte_rdtsc() + 100;
151 while (rte_rdtsc() < t)
154 count = rte_distributor_poll_pkt(d, worker_id, pkts);
160 rte_distributor_return_pkt(struct rte_distributor *d,
161 unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
163 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
166 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
168 return rte_distributor_return_pkt_single(d->d_single,
169 worker_id, oldpkt[0]);
174 /* Spin while handshake bits are set (scheduler clears it).
175 * Sync with worker on GET_BUF flag.
177 while (unlikely(__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_RELAXED)
178 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) {
180 uint64_t t = rte_rdtsc()+100;
182 while (rte_rdtsc() < t)
186 /* Sync with distributor to acquire retptrs */
187 __atomic_thread_fence(__ATOMIC_ACQUIRE);
188 for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
189 /* Switch off the return bit first */
190 buf->retptr64[i] = 0;
192 for (i = num; i-- > 0; )
193 buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
194 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_VALID_BUF;
196 /* Use RETURN_BUF on bufptr64 to notify distributor that
197 * we won't read any mbufs from there even if GET_BUF is set.
198 * This allows distributor to retrieve in-flight already sent packets.
200 __atomic_or_fetch(&(buf->bufptr64[0]), RTE_DISTRIB_RETURN_BUF,
203 /* set the RETURN_BUF on retptr64 even if we got no returns.
204 * Sync with distributor on RETURN_BUF flag. Release retptrs.
205 * Notify distributor that we don't request more packets any more.
207 __atomic_store_n(&(buf->retptr64[0]),
208 buf->retptr64[0] | RTE_DISTRIB_RETURN_BUF, __ATOMIC_RELEASE);
213 /**** APIs called on distributor core ***/
215 /* stores a packet returned from a worker inside the returns array */
217 store_return(uintptr_t oldbuf, struct rte_distributor *d,
218 unsigned int *ret_start, unsigned int *ret_count)
222 /* store returns in a circular buffer */
223 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
225 *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK);
226 *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK);
230 * Match then flow_ids (tags) of the incoming packets to the flow_ids
231 * of the inflight packets (both inflight on the workers and in each worker
232 * backlog). This will then allow us to pin those packets to the relevant
233 * workers to give us our atomic flow pinning.
236 find_match_scalar(struct rte_distributor *d,
238 uint16_t *output_ptr)
240 struct rte_distributor_backlog *bl;
245 * 1. Loop through all worker ID's
246 * 2. Compare the current inflights to the incoming tags
247 * 3. Compare the current backlog to the incoming tags
248 * 4. Add any matches to the output
251 for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
254 for (i = 0; i < d->num_workers; i++) {
257 for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
258 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
259 if (d->in_flight_tags[i][j] == data_ptr[w]) {
263 for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
264 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
265 if (bl->tags[j] == data_ptr[w]) {
272 * At this stage, the output contains 8 16-bit values, with
273 * each non-zero value containing the worker ID on which the
274 * corresponding flow is pinned to.
279 * When worker called rte_distributor_return_pkt()
280 * and passed RTE_DISTRIB_RETURN_BUF handshake through retptr64,
281 * distributor must retrieve both inflight and backlog packets assigned
282 * to the worker and reprocess them to another worker.
285 handle_worker_shutdown(struct rte_distributor *d, unsigned int wkr)
287 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
288 /* double BURST size for storing both inflights and backlog */
289 struct rte_mbuf *pkts[RTE_DIST_BURST_SIZE * 2];
290 unsigned int pkts_count = 0;
293 /* If GET_BUF is cleared there are in-flight packets sent
294 * to worker which does not require new packets.
295 * They must be retrieved and assigned to another worker.
297 if (!(__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
298 & RTE_DISTRIB_GET_BUF))
299 for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
300 if (buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)
301 pkts[pkts_count++] = (void *)((uintptr_t)
303 >> RTE_DISTRIB_FLAG_BITS));
305 /* Make following operations on handshake flags on bufptr64:
306 * - set GET_BUF to indicate that distributor can overwrite buffer
307 * with new packets if worker will make a new request.
308 * - clear RETURN_BUF to unlock reads on worker side.
310 __atomic_store_n(&(buf->bufptr64[0]), RTE_DISTRIB_GET_BUF,
313 /* Collect backlog packets from worker */
314 for (i = 0; i < d->backlog[wkr].count; i++)
315 pkts[pkts_count++] = (void *)((uintptr_t)
316 (d->backlog[wkr].pkts[i] >> RTE_DISTRIB_FLAG_BITS));
318 d->backlog[wkr].count = 0;
320 /* Clear both inflight and backlog tags */
321 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
322 d->in_flight_tags[wkr][i] = 0;
323 d->backlog[wkr].tags[i] = 0;
328 rte_distributor_process(d, pkts, pkts_count);
333 * When the handshake bits indicate that there are packets coming
334 * back from the worker, this function is called to copy and store
335 * the valid returned pointers (store_return).
338 handle_returns(struct rte_distributor *d, unsigned int wkr)
340 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
342 unsigned int ret_start = d->returns.start,
343 ret_count = d->returns.count;
344 unsigned int count = 0;
347 /* Sync on GET_BUF flag. Acquire retptrs. */
348 if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE)
349 & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF)) {
350 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
351 if (buf->retptr64[i] & RTE_DISTRIB_VALID_BUF) {
352 oldbuf = ((uintptr_t)(buf->retptr64[i] >>
353 RTE_DISTRIB_FLAG_BITS));
354 /* store returns in a circular buffer */
355 store_return(oldbuf, d, &ret_start, &ret_count);
357 buf->retptr64[i] &= ~RTE_DISTRIB_VALID_BUF;
360 d->returns.start = ret_start;
361 d->returns.count = ret_count;
363 /* If worker requested packets with GET_BUF, set it to active
364 * otherwise (RETURN_BUF), set it to not active.
366 d->activesum -= d->active[wkr];
367 d->active[wkr] = !!(buf->retptr64[0] & RTE_DISTRIB_GET_BUF);
368 d->activesum += d->active[wkr];
370 /* If worker returned packets without requesting new ones,
371 * handle all in-flights and backlog packets assigned to it.
373 if (unlikely(buf->retptr64[0] & RTE_DISTRIB_RETURN_BUF))
374 handle_worker_shutdown(d, wkr);
376 /* Clear for the worker to populate with more returns.
377 * Sync with distributor on GET_BUF flag. Release retptrs.
379 __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE);
385 * This function releases a burst (cache line) to a worker.
386 * It is called from the process function when a cacheline is
387 * full to make room for more packets for that worker, or when
388 * all packets have been assigned to bursts and need to be flushed
390 * It also needs to wait for any outstanding packets from the worker
391 * before sending out new packets.
394 release(struct rte_distributor *d, unsigned int wkr)
396 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
399 handle_returns(d, wkr);
400 if (unlikely(!d->active[wkr]))
403 /* Sync with worker on GET_BUF flag */
404 while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE)
405 & RTE_DISTRIB_GET_BUF)) {
406 handle_returns(d, wkr);
407 if (unlikely(!d->active[wkr]))
414 for (i = 0; i < d->backlog[wkr].count; i++) {
415 d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] |
416 RTE_DISTRIB_GET_BUF | RTE_DISTRIB_VALID_BUF;
417 d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
420 for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
421 buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
422 d->in_flight_tags[wkr][i] = 0;
425 d->backlog[wkr].count = 0;
427 /* Clear the GET bit.
428 * Sync with worker on GET_BUF flag. Release bufptrs.
430 __atomic_store_n(&(buf->bufptr64[0]),
431 buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
437 /* process a set of packets to distribute them to workers */
439 rte_distributor_process(struct rte_distributor *d,
440 struct rte_mbuf **mbufs, unsigned int num_mbufs)
442 unsigned int next_idx = 0;
443 static unsigned int wkr;
444 struct rte_mbuf *next_mb = NULL;
445 int64_t next_value = 0;
446 uint16_t new_tag = 0;
447 uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
448 unsigned int i, j, w, wid, matching_required;
450 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
451 /* Call the old API */
452 return rte_distributor_process_single(d->d_single,
456 for (wid = 0 ; wid < d->num_workers; wid++)
457 handle_returns(d, wid);
459 if (unlikely(num_mbufs == 0)) {
460 /* Flush out all non-full cache-lines to workers. */
461 for (wid = 0 ; wid < d->num_workers; wid++) {
462 /* Sync with worker on GET_BUF flag. */
463 if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
464 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
466 handle_returns(d, wid);
472 if (unlikely(!d->activesum))
475 while (next_idx < num_mbufs) {
476 uint16_t matches[RTE_DIST_BURST_SIZE];
479 /* Sync with worker on GET_BUF flag. */
480 if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]),
481 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)
482 d->bufs[wkr].count = 0;
484 if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
485 pkts = num_mbufs - next_idx;
487 pkts = RTE_DIST_BURST_SIZE;
489 for (i = 0; i < pkts; i++) {
490 if (mbufs[next_idx + i]) {
491 /* flows have to be non-zero */
492 flows[i] = mbufs[next_idx + i]->hash.usr | 1;
496 for (; i < RTE_DIST_BURST_SIZE; i++)
499 matching_required = 1;
501 for (j = 0; j < pkts; j++) {
502 if (unlikely(!d->activesum))
505 if (unlikely(matching_required)) {
506 switch (d->dist_match_fn) {
507 case RTE_DIST_MATCH_VECTOR:
508 find_match_vec(d, &flows[0],
512 find_match_scalar(d, &flows[0],
515 matching_required = 0;
518 * Matches array now contain the intended worker ID (+1) of
519 * the incoming packets. Any zeroes need to be assigned
523 next_mb = mbufs[next_idx++];
524 next_value = (((int64_t)(uintptr_t)next_mb) <<
525 RTE_DISTRIB_FLAG_BITS);
527 * User is advocated to set tag value for each
528 * mbuf before calling rte_distributor_process.
529 * User defined tags are used to identify flows,
532 /* flows MUST be non-zero */
533 new_tag = (uint16_t)(next_mb->hash.usr) | 1;
536 * Uncommenting the next line will cause the find_match
537 * function to be optimized out, making this function
538 * do parallel (non-atomic) distribution
540 /* matches[j] = 0; */
542 if (matches[j] && d->active[matches[j]-1]) {
543 struct rte_distributor_backlog *bl =
544 &d->backlog[matches[j]-1];
545 if (unlikely(bl->count ==
546 RTE_DIST_BURST_SIZE)) {
547 release(d, matches[j]-1);
548 if (!d->active[matches[j]-1]) {
551 matching_required = 1;
556 /* Add to worker that already has flow */
557 unsigned int idx = bl->count++;
559 bl->tags[idx] = new_tag;
560 bl->pkts[idx] = next_value;
563 struct rte_distributor_backlog *bl;
565 while (unlikely(!d->active[wkr]))
566 wkr = (wkr + 1) % d->num_workers;
567 bl = &d->backlog[wkr];
569 if (unlikely(bl->count ==
570 RTE_DIST_BURST_SIZE)) {
572 if (!d->active[wkr]) {
575 matching_required = 1;
580 /* Add to current worker worker */
581 unsigned int idx = bl->count++;
583 bl->tags[idx] = new_tag;
584 bl->pkts[idx] = next_value;
586 * Now that we've just added an unpinned flow
587 * to a worker, we need to ensure that all
588 * other packets with that same flow will go
589 * to the same worker in this burst.
591 for (w = j; w < pkts; w++)
592 if (flows[w] == new_tag)
596 wkr = (wkr + 1) % d->num_workers;
599 /* Flush out all non-full cache-lines to workers. */
600 for (wid = 0 ; wid < d->num_workers; wid++)
601 /* Sync with worker on GET_BUF flag. */
602 if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
603 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF))
609 /* return to the caller, packets returned from workers */
611 rte_distributor_returned_pkts(struct rte_distributor *d,
612 struct rte_mbuf **mbufs, unsigned int max_mbufs)
614 struct rte_distributor_returned_pkts *returns = &d->returns;
615 unsigned int retval = (max_mbufs < returns->count) ?
616 max_mbufs : returns->count;
619 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
620 /* Call the old API */
621 return rte_distributor_returned_pkts_single(d->d_single,
625 for (i = 0; i < retval; i++) {
626 unsigned int idx = (returns->start + i) &
627 RTE_DISTRIB_RETURNS_MASK;
629 mbufs[i] = returns->mbufs[idx];
638 * Return the number of packets in-flight in a distributor, i.e. packets
639 * being worked on or queued up in a backlog.
641 static inline unsigned int
642 total_outstanding(const struct rte_distributor *d)
644 unsigned int wkr, total_outstanding = 0;
646 for (wkr = 0; wkr < d->num_workers; wkr++)
647 total_outstanding += d->backlog[wkr].count;
649 return total_outstanding;
653 * Flush the distributor, so that there are no outstanding packets in flight or
657 rte_distributor_flush(struct rte_distributor *d)
659 unsigned int flushed;
662 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
663 /* Call the old API */
664 return rte_distributor_flush_single(d->d_single);
667 flushed = total_outstanding(d);
669 while (total_outstanding(d) > 0)
670 rte_distributor_process(d, NULL, 0);
672 /* wait 10ms to allow all worker drain the pkts */
676 * Send empty burst to all workers to allow them to exit
677 * gracefully, should they need to.
679 rte_distributor_process(d, NULL, 0);
681 for (wkr = 0; wkr < d->num_workers; wkr++)
682 handle_returns(d, wkr);
687 /* clears the internal returns array in the distributor */
689 rte_distributor_clear_returns(struct rte_distributor *d)
693 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
694 /* Call the old API */
695 rte_distributor_clear_returns_single(d->d_single);
699 /* throw away returns, so workers can exit */
700 for (wkr = 0; wkr < d->num_workers; wkr++)
701 /* Sync with worker. Release retptrs. */
702 __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0,
706 /* creates a distributor instance */
707 struct rte_distributor *
708 rte_distributor_create(const char *name,
709 unsigned int socket_id,
710 unsigned int num_workers,
711 unsigned int alg_type)
713 struct rte_distributor *d;
714 struct rte_dist_burst_list *dist_burst_list;
715 char mz_name[RTE_MEMZONE_NAMESIZE];
716 const struct rte_memzone *mz;
719 /* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
721 /* compilation-time checks */
722 RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
723 RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
725 if (name == NULL || num_workers >=
726 (unsigned int)RTE_MIN(RTE_DISTRIB_MAX_WORKERS, RTE_MAX_LCORE)) {
731 if (alg_type == RTE_DIST_ALG_SINGLE) {
732 d = malloc(sizeof(struct rte_distributor));
737 d->d_single = rte_distributor_create_single(name,
738 socket_id, num_workers);
739 if (d->d_single == NULL) {
741 /* rte_errno will have been set */
744 d->alg_type = alg_type;
748 snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
749 mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
756 strlcpy(d->name, name, sizeof(d->name));
757 d->num_workers = num_workers;
758 d->alg_type = alg_type;
760 d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
761 #if defined(RTE_ARCH_X86)
762 d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
766 * Set up the backlog tags so they're pointing at the second cache
767 * line for performance during flow matching
769 for (i = 0 ; i < num_workers ; i++)
770 d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
772 memset(d->active, 0, sizeof(d->active));
775 dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
776 rte_dist_burst_list);
779 rte_mcfg_tailq_write_lock();
780 TAILQ_INSERT_TAIL(dist_burst_list, d, next);
781 rte_mcfg_tailq_write_unlock();