1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_memory.h>
10 #include <rte_cycles.h>
11 #include <rte_memzone.h>
12 #include <rte_errno.h>
13 #include <rte_string_fns.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_pause.h>
16 #include <rte_tailq.h>
18 #include "rte_distributor.h"
19 #include "rte_distributor_single.h"
20 #include "distributor_private.h"
22 TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
24 static struct rte_tailq_elem rte_dist_burst_tailq = {
25 .name = "RTE_DIST_BURST",
27 EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
29 /**** APIs called by workers ****/
31 /**** Burst Packet APIs called by workers ****/
34 rte_distributor_request_pkt(struct rte_distributor *d,
35 unsigned int worker_id, struct rte_mbuf **oldpkt,
38 struct rte_distributor_buffer *buf = &(d->bufs[worker_id]);
41 volatile int64_t *retptr64;
43 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
44 rte_distributor_request_pkt_single(d->d_single,
45 worker_id, count ? oldpkt[0] : NULL);
49 retptr64 = &(buf->retptr64[0]);
50 /* Spin while handshake bits are set (scheduler clears it).
51 * Sync with worker on GET_BUF flag.
53 while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE)
54 & RTE_DISTRIB_GET_BUF)) {
56 uint64_t t = rte_rdtsc()+100;
58 while (rte_rdtsc() < t)
63 * OK, if we've got here, then the scheduler has just cleared the
64 * handshake bits. Populate the retptrs with returning packets.
67 for (i = count; i < RTE_DIST_BURST_SIZE; i++)
70 /* Set Return bit for each packet returned */
71 for (i = count; i-- > 0; )
73 (((int64_t)(uintptr_t)(oldpkt[i])) <<
74 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
77 * Finally, set the GET_BUF to signal to distributor that cache
78 * line is ready for processing
79 * Sync with distributor to release retptrs
81 __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF,
86 rte_distributor_poll_pkt(struct rte_distributor *d,
87 unsigned int worker_id, struct rte_mbuf **pkts)
89 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
94 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
95 pkts[0] = rte_distributor_poll_pkt_single(d->d_single,
97 return (pkts[0]) ? 1 : 0;
100 /* If bit is set, return
101 * Sync with distributor to acquire bufptrs
103 if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
104 & RTE_DISTRIB_GET_BUF)
107 /* since bufptr64 is signed, this should be an arithmetic shift */
108 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
109 if (likely(buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)) {
110 ret = buf->bufptr64[i] >> RTE_DISTRIB_FLAG_BITS;
111 pkts[count++] = (struct rte_mbuf *)((uintptr_t)(ret));
116 * so now we've got the contents of the cacheline into an array of
117 * mbuf pointers, so toggle the bit so scheduler can start working
118 * on the next cacheline while we're working.
119 * Sync with distributor on GET_BUF flag. Release bufptrs.
121 __atomic_store_n(&(buf->bufptr64[0]),
122 buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
128 rte_distributor_get_pkt(struct rte_distributor *d,
129 unsigned int worker_id, struct rte_mbuf **pkts,
130 struct rte_mbuf **oldpkt, unsigned int return_count)
134 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
135 if (return_count <= 1) {
136 pkts[0] = rte_distributor_get_pkt_single(d->d_single,
137 worker_id, return_count ? oldpkt[0] : NULL);
138 return (pkts[0]) ? 1 : 0;
143 rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
145 count = rte_distributor_poll_pkt(d, worker_id, pkts);
146 while (count == -1) {
147 uint64_t t = rte_rdtsc() + 100;
149 while (rte_rdtsc() < t)
152 count = rte_distributor_poll_pkt(d, worker_id, pkts);
158 rte_distributor_return_pkt(struct rte_distributor *d,
159 unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
161 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
164 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
166 return rte_distributor_return_pkt_single(d->d_single,
167 worker_id, oldpkt[0]);
172 /* Spin while handshake bits are set (scheduler clears it).
173 * Sync with worker on GET_BUF flag.
175 while (unlikely(__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_RELAXED)
176 & RTE_DISTRIB_GET_BUF)) {
178 uint64_t t = rte_rdtsc()+100;
180 while (rte_rdtsc() < t)
184 /* Sync with distributor to acquire retptrs */
185 __atomic_thread_fence(__ATOMIC_ACQUIRE);
186 for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
187 /* Switch off the return bit first */
188 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
190 for (i = num; i-- > 0; )
191 buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
192 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
194 /* set the GET_BUF but even if we got no returns.
195 * Sync with distributor on GET_BUF flag. Release retptrs.
197 __atomic_store_n(&(buf->retptr64[0]),
198 buf->retptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
203 /**** APIs called on distributor core ***/
205 /* stores a packet returned from a worker inside the returns array */
207 store_return(uintptr_t oldbuf, struct rte_distributor *d,
208 unsigned int *ret_start, unsigned int *ret_count)
212 /* store returns in a circular buffer */
213 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
215 *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK);
216 *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK);
220 * Match then flow_ids (tags) of the incoming packets to the flow_ids
221 * of the inflight packets (both inflight on the workers and in each worker
222 * backlog). This will then allow us to pin those packets to the relevant
223 * workers to give us our atomic flow pinning.
226 find_match_scalar(struct rte_distributor *d,
228 uint16_t *output_ptr)
230 struct rte_distributor_backlog *bl;
235 * 1. Loop through all worker ID's
236 * 2. Compare the current inflights to the incoming tags
237 * 3. Compare the current backlog to the incoming tags
238 * 4. Add any matches to the output
241 for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
244 for (i = 0; i < d->num_workers; i++) {
247 for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
248 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
249 if (d->in_flight_tags[i][j] == data_ptr[w]) {
253 for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
254 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
255 if (bl->tags[j] == data_ptr[w]) {
262 * At this stage, the output contains 8 16-bit values, with
263 * each non-zero value containing the worker ID on which the
264 * corresponding flow is pinned to.
270 * When the handshake bits indicate that there are packets coming
271 * back from the worker, this function is called to copy and store
272 * the valid returned pointers (store_return).
275 handle_returns(struct rte_distributor *d, unsigned int wkr)
277 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
279 unsigned int ret_start = d->returns.start,
280 ret_count = d->returns.count;
281 unsigned int count = 0;
284 /* Sync on GET_BUF flag. Acquire retptrs. */
285 if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE)
286 & RTE_DISTRIB_GET_BUF) {
287 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
288 if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) {
289 oldbuf = ((uintptr_t)(buf->retptr64[i] >>
290 RTE_DISTRIB_FLAG_BITS));
291 /* store returns in a circular buffer */
292 store_return(oldbuf, d, &ret_start, &ret_count);
294 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
297 d->returns.start = ret_start;
298 d->returns.count = ret_count;
299 /* Clear for the worker to populate with more returns.
300 * Sync with distributor on GET_BUF flag. Release retptrs.
302 __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE);
308 * This function releases a burst (cache line) to a worker.
309 * It is called from the process function when a cacheline is
310 * full to make room for more packets for that worker, or when
311 * all packets have been assigned to bursts and need to be flushed
313 * It also needs to wait for any outstanding packets from the worker
314 * before sending out new packets.
317 release(struct rte_distributor *d, unsigned int wkr)
319 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
322 handle_returns(d, wkr);
324 /* Sync with worker on GET_BUF flag */
325 while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE)
326 & RTE_DISTRIB_GET_BUF)) {
327 handle_returns(d, wkr);
333 for (i = 0; i < d->backlog[wkr].count; i++) {
334 d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] |
335 RTE_DISTRIB_GET_BUF | RTE_DISTRIB_VALID_BUF;
336 d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
339 for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
340 buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
341 d->in_flight_tags[wkr][i] = 0;
344 d->backlog[wkr].count = 0;
346 /* Clear the GET bit.
347 * Sync with worker on GET_BUF flag. Release bufptrs.
349 __atomic_store_n(&(buf->bufptr64[0]),
350 buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
356 /* process a set of packets to distribute them to workers */
358 rte_distributor_process(struct rte_distributor *d,
359 struct rte_mbuf **mbufs, unsigned int num_mbufs)
361 unsigned int next_idx = 0;
362 static unsigned int wkr;
363 struct rte_mbuf *next_mb = NULL;
364 int64_t next_value = 0;
365 uint16_t new_tag = 0;
366 uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
367 unsigned int i, j, w, wid;
369 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
370 /* Call the old API */
371 return rte_distributor_process_single(d->d_single,
375 if (unlikely(num_mbufs == 0)) {
376 /* Flush out all non-full cache-lines to workers. */
377 for (wid = 0 ; wid < d->num_workers; wid++) {
378 /* Sync with worker on GET_BUF flag. */
379 handle_returns(d, wid);
380 if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
381 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
383 handle_returns(d, wid);
389 while (next_idx < num_mbufs) {
390 uint16_t matches[RTE_DIST_BURST_SIZE];
393 /* Sync with worker on GET_BUF flag. */
394 if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]),
395 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)
396 d->bufs[wkr].count = 0;
398 if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
399 pkts = num_mbufs - next_idx;
401 pkts = RTE_DIST_BURST_SIZE;
403 for (i = 0; i < pkts; i++) {
404 if (mbufs[next_idx + i]) {
405 /* flows have to be non-zero */
406 flows[i] = mbufs[next_idx + i]->hash.usr | 1;
410 for (; i < RTE_DIST_BURST_SIZE; i++)
413 switch (d->dist_match_fn) {
414 case RTE_DIST_MATCH_VECTOR:
415 find_match_vec(d, &flows[0], &matches[0]);
418 find_match_scalar(d, &flows[0], &matches[0]);
422 * Matches array now contain the intended worker ID (+1) of
423 * the incoming packets. Any zeroes need to be assigned
427 for (j = 0; j < pkts; j++) {
429 next_mb = mbufs[next_idx++];
430 next_value = (((int64_t)(uintptr_t)next_mb) <<
431 RTE_DISTRIB_FLAG_BITS);
433 * User is advocated to set tag value for each
434 * mbuf before calling rte_distributor_process.
435 * User defined tags are used to identify flows,
438 /* flows MUST be non-zero */
439 new_tag = (uint16_t)(next_mb->hash.usr) | 1;
442 * Uncommenting the next line will cause the find_match
443 * function to be optimized out, making this function
444 * do parallel (non-atomic) distribution
446 /* matches[j] = 0; */
449 struct rte_distributor_backlog *bl =
450 &d->backlog[matches[j]-1];
451 if (unlikely(bl->count ==
452 RTE_DIST_BURST_SIZE)) {
453 release(d, matches[j]-1);
456 /* Add to worker that already has flow */
457 unsigned int idx = bl->count++;
459 bl->tags[idx] = new_tag;
460 bl->pkts[idx] = next_value;
463 struct rte_distributor_backlog *bl =
465 if (unlikely(bl->count ==
466 RTE_DIST_BURST_SIZE)) {
470 /* Add to current worker worker */
471 unsigned int idx = bl->count++;
473 bl->tags[idx] = new_tag;
474 bl->pkts[idx] = next_value;
476 * Now that we've just added an unpinned flow
477 * to a worker, we need to ensure that all
478 * other packets with that same flow will go
479 * to the same worker in this burst.
481 for (w = j; w < pkts; w++)
482 if (flows[w] == new_tag)
487 if (wkr >= d->num_workers)
491 /* Flush out all non-full cache-lines to workers. */
492 for (wid = 0 ; wid < d->num_workers; wid++)
493 /* Sync with worker on GET_BUF flag. */
494 if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
495 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF))
501 /* return to the caller, packets returned from workers */
503 rte_distributor_returned_pkts(struct rte_distributor *d,
504 struct rte_mbuf **mbufs, unsigned int max_mbufs)
506 struct rte_distributor_returned_pkts *returns = &d->returns;
507 unsigned int retval = (max_mbufs < returns->count) ?
508 max_mbufs : returns->count;
511 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
512 /* Call the old API */
513 return rte_distributor_returned_pkts_single(d->d_single,
517 for (i = 0; i < retval; i++) {
518 unsigned int idx = (returns->start + i) &
519 RTE_DISTRIB_RETURNS_MASK;
521 mbufs[i] = returns->mbufs[idx];
530 * Return the number of packets in-flight in a distributor, i.e. packets
531 * being worked on or queued up in a backlog.
533 static inline unsigned int
534 total_outstanding(const struct rte_distributor *d)
536 unsigned int wkr, total_outstanding = 0;
538 for (wkr = 0; wkr < d->num_workers; wkr++)
539 total_outstanding += d->backlog[wkr].count;
541 return total_outstanding;
545 * Flush the distributor, so that there are no outstanding packets in flight or
549 rte_distributor_flush(struct rte_distributor *d)
551 unsigned int flushed;
554 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
555 /* Call the old API */
556 return rte_distributor_flush_single(d->d_single);
559 flushed = total_outstanding(d);
561 while (total_outstanding(d) > 0)
562 rte_distributor_process(d, NULL, 0);
564 /* wait 10ms to allow all worker drain the pkts */
568 * Send empty burst to all workers to allow them to exit
569 * gracefully, should they need to.
571 rte_distributor_process(d, NULL, 0);
573 for (wkr = 0; wkr < d->num_workers; wkr++)
574 handle_returns(d, wkr);
579 /* clears the internal returns array in the distributor */
581 rte_distributor_clear_returns(struct rte_distributor *d)
585 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
586 /* Call the old API */
587 rte_distributor_clear_returns_single(d->d_single);
591 /* throw away returns, so workers can exit */
592 for (wkr = 0; wkr < d->num_workers; wkr++)
593 /* Sync with worker. Release retptrs. */
594 __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0,
598 /* creates a distributor instance */
599 struct rte_distributor *
600 rte_distributor_create(const char *name,
601 unsigned int socket_id,
602 unsigned int num_workers,
603 unsigned int alg_type)
605 struct rte_distributor *d;
606 struct rte_dist_burst_list *dist_burst_list;
607 char mz_name[RTE_MEMZONE_NAMESIZE];
608 const struct rte_memzone *mz;
611 /* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
613 /* compilation-time checks */
614 RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
615 RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
617 if (name == NULL || num_workers >=
618 (unsigned int)RTE_MIN(RTE_DISTRIB_MAX_WORKERS, RTE_MAX_LCORE)) {
623 if (alg_type == RTE_DIST_ALG_SINGLE) {
624 d = malloc(sizeof(struct rte_distributor));
629 d->d_single = rte_distributor_create_single(name,
630 socket_id, num_workers);
631 if (d->d_single == NULL) {
633 /* rte_errno will have been set */
636 d->alg_type = alg_type;
640 snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
641 mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
648 strlcpy(d->name, name, sizeof(d->name));
649 d->num_workers = num_workers;
650 d->alg_type = alg_type;
652 d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
653 #if defined(RTE_ARCH_X86)
654 d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
658 * Set up the backlog tags so they're pointing at the second cache
659 * line for performance during flow matching
661 for (i = 0 ; i < num_workers ; i++)
662 d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
664 dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
665 rte_dist_burst_list);
668 rte_mcfg_tailq_write_lock();
669 TAILQ_INSERT_TAIL(dist_burst_list, d, next);
670 rte_mcfg_tailq_write_unlock();