1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_memory.h>
10 #include <rte_cycles.h>
11 #include <rte_compat.h>
12 #include <rte_memzone.h>
13 #include <rte_errno.h>
14 #include <rte_string_fns.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_pause.h>
17 #include <rte_tailq.h>
19 #include "rte_distributor_private.h"
20 #include "rte_distributor.h"
21 #include "rte_distributor_v20.h"
22 #include "rte_distributor_v1705.h"
24 TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
26 static struct rte_tailq_elem rte_dist_burst_tailq = {
27 .name = "RTE_DIST_BURST",
29 EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
31 /**** APIs called by workers ****/
33 /**** Burst Packet APIs called by workers ****/
36 rte_distributor_request_pkt_v1705(struct rte_distributor *d,
37 unsigned int worker_id, struct rte_mbuf **oldpkt,
40 struct rte_distributor_buffer *buf = &(d->bufs[worker_id]);
43 volatile int64_t *retptr64;
45 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
46 rte_distributor_request_pkt_v20(d->d_v20,
47 worker_id, oldpkt[0]);
51 retptr64 = &(buf->retptr64[0]);
52 /* Spin while handshake bits are set (scheduler clears it).
53 * Sync with worker on GET_BUF flag.
55 while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE)
56 & RTE_DISTRIB_GET_BUF)) {
58 uint64_t t = rte_rdtsc()+100;
60 while (rte_rdtsc() < t)
65 * OK, if we've got here, then the scheduler has just cleared the
66 * handshake bits. Populate the retptrs with returning packets.
69 for (i = count; i < RTE_DIST_BURST_SIZE; i++)
72 /* Set Return bit for each packet returned */
73 for (i = count; i-- > 0; )
75 (((int64_t)(uintptr_t)(oldpkt[i])) <<
76 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
79 * Finally, set the GET_BUF to signal to distributor that cache
80 * line is ready for processing
81 * Sync with distributor to release retptrs
83 __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF,
86 BIND_DEFAULT_SYMBOL(rte_distributor_request_pkt, _v1705, 17.05);
87 MAP_STATIC_SYMBOL(void rte_distributor_request_pkt(struct rte_distributor *d,
88 unsigned int worker_id, struct rte_mbuf **oldpkt,
90 rte_distributor_request_pkt_v1705);
93 rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
94 unsigned int worker_id, struct rte_mbuf **pkts)
96 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
101 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
102 pkts[0] = rte_distributor_poll_pkt_v20(d->d_v20, worker_id);
103 return (pkts[0]) ? 1 : 0;
106 /* If bit is set, return
107 * Sync with distributor to acquire bufptrs
109 if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
110 & RTE_DISTRIB_GET_BUF)
113 /* since bufptr64 is signed, this should be an arithmetic shift */
114 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
115 if (likely(buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)) {
116 ret = buf->bufptr64[i] >> RTE_DISTRIB_FLAG_BITS;
117 pkts[count++] = (struct rte_mbuf *)((uintptr_t)(ret));
122 * so now we've got the contents of the cacheline into an array of
123 * mbuf pointers, so toggle the bit so scheduler can start working
124 * on the next cacheline while we're working.
125 * Sync with distributor on GET_BUF flag. Release bufptrs.
127 __atomic_store_n(&(buf->bufptr64[0]),
128 buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
132 BIND_DEFAULT_SYMBOL(rte_distributor_poll_pkt, _v1705, 17.05);
133 MAP_STATIC_SYMBOL(int rte_distributor_poll_pkt(struct rte_distributor *d,
134 unsigned int worker_id, struct rte_mbuf **pkts),
135 rte_distributor_poll_pkt_v1705);
138 rte_distributor_get_pkt_v1705(struct rte_distributor *d,
139 unsigned int worker_id, struct rte_mbuf **pkts,
140 struct rte_mbuf **oldpkt, unsigned int return_count)
144 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
145 if (return_count <= 1) {
146 pkts[0] = rte_distributor_get_pkt_v20(d->d_v20,
147 worker_id, oldpkt[0]);
148 return (pkts[0]) ? 1 : 0;
153 rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
155 count = rte_distributor_poll_pkt(d, worker_id, pkts);
156 while (count == -1) {
157 uint64_t t = rte_rdtsc() + 100;
159 while (rte_rdtsc() < t)
162 count = rte_distributor_poll_pkt(d, worker_id, pkts);
166 BIND_DEFAULT_SYMBOL(rte_distributor_get_pkt, _v1705, 17.05);
167 MAP_STATIC_SYMBOL(int rte_distributor_get_pkt(struct rte_distributor *d,
168 unsigned int worker_id, struct rte_mbuf **pkts,
169 struct rte_mbuf **oldpkt, unsigned int return_count),
170 rte_distributor_get_pkt_v1705);
173 rte_distributor_return_pkt_v1705(struct rte_distributor *d,
174 unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
176 struct rte_distributor_buffer *buf = &d->bufs[worker_id];
179 if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
181 return rte_distributor_return_pkt_v20(d->d_v20,
182 worker_id, oldpkt[0]);
187 /* Sync with distributor to acquire retptrs */
188 __atomic_thread_fence(__ATOMIC_ACQUIRE);
189 for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
190 /* Switch off the return bit first */
191 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
193 for (i = num; i-- > 0; )
194 buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
195 RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
197 /* set the GET_BUF but even if we got no returns.
198 * Sync with distributor on GET_BUF flag. Release retptrs.
200 __atomic_store_n(&(buf->retptr64[0]),
201 buf->retptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
205 BIND_DEFAULT_SYMBOL(rte_distributor_return_pkt, _v1705, 17.05);
206 MAP_STATIC_SYMBOL(int rte_distributor_return_pkt(struct rte_distributor *d,
207 unsigned int worker_id, struct rte_mbuf **oldpkt, int num),
208 rte_distributor_return_pkt_v1705);
210 /**** APIs called on distributor core ***/
212 /* stores a packet returned from a worker inside the returns array */
214 store_return(uintptr_t oldbuf, struct rte_distributor *d,
215 unsigned int *ret_start, unsigned int *ret_count)
219 /* store returns in a circular buffer */
220 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
222 *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK);
223 *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK);
227 * Match then flow_ids (tags) of the incoming packets to the flow_ids
228 * of the inflight packets (both inflight on the workers and in each worker
229 * backlog). This will then allow us to pin those packets to the relevant
230 * workers to give us our atomic flow pinning.
233 find_match_scalar(struct rte_distributor *d,
235 uint16_t *output_ptr)
237 struct rte_distributor_backlog *bl;
242 * 1. Loop through all worker ID's
243 * 2. Compare the current inflights to the incoming tags
244 * 3. Compare the current backlog to the incoming tags
245 * 4. Add any matches to the output
248 for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
251 for (i = 0; i < d->num_workers; i++) {
254 for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
255 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
256 if (d->in_flight_tags[i][j] == data_ptr[w]) {
260 for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
261 for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
262 if (bl->tags[j] == data_ptr[w]) {
269 * At this stage, the output contains 8 16-bit values, with
270 * each non-zero value containing the worker ID on which the
271 * corresponding flow is pinned to.
277 * When the handshake bits indicate that there are packets coming
278 * back from the worker, this function is called to copy and store
279 * the valid returned pointers (store_return).
282 handle_returns(struct rte_distributor *d, unsigned int wkr)
284 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
286 unsigned int ret_start = d->returns.start,
287 ret_count = d->returns.count;
288 unsigned int count = 0;
291 /* Sync on GET_BUF flag. Acquire retptrs. */
292 if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE)
293 & RTE_DISTRIB_GET_BUF) {
294 for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
295 if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) {
296 oldbuf = ((uintptr_t)(buf->retptr64[i] >>
297 RTE_DISTRIB_FLAG_BITS));
298 /* store returns in a circular buffer */
299 store_return(oldbuf, d, &ret_start, &ret_count);
301 buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
304 d->returns.start = ret_start;
305 d->returns.count = ret_count;
306 /* Clear for the worker to populate with more returns.
307 * Sync with distributor on GET_BUF flag. Release retptrs.
309 __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE);
315 * This function releases a burst (cache line) to a worker.
316 * It is called from the process function when a cacheline is
317 * full to make room for more packets for that worker, or when
318 * all packets have been assigned to bursts and need to be flushed
320 * It also needs to wait for any outstanding packets from the worker
321 * before sending out new packets.
324 release(struct rte_distributor *d, unsigned int wkr)
326 struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
329 /* Sync with worker on GET_BUF flag */
330 while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE)
331 & RTE_DISTRIB_GET_BUF))
334 handle_returns(d, wkr);
338 for (i = 0; i < d->backlog[wkr].count; i++) {
339 d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] |
340 RTE_DISTRIB_GET_BUF | RTE_DISTRIB_VALID_BUF;
341 d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
344 for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
345 buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
346 d->in_flight_tags[wkr][i] = 0;
349 d->backlog[wkr].count = 0;
351 /* Clear the GET bit.
352 * Sync with worker on GET_BUF flag. Release bufptrs.
354 __atomic_store_n(&(buf->bufptr64[0]),
355 buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
361 /* process a set of packets to distribute them to workers */
363 rte_distributor_process_v1705(struct rte_distributor *d,
364 struct rte_mbuf **mbufs, unsigned int num_mbufs)
366 unsigned int next_idx = 0;
367 static unsigned int wkr;
368 struct rte_mbuf *next_mb = NULL;
369 int64_t next_value = 0;
370 uint16_t new_tag = 0;
371 uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
372 unsigned int i, j, w, wid;
374 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
375 /* Call the old API */
376 return rte_distributor_process_v20(d->d_v20, mbufs, num_mbufs);
379 if (unlikely(num_mbufs == 0)) {
380 /* Flush out all non-full cache-lines to workers. */
381 for (wid = 0 ; wid < d->num_workers; wid++) {
382 /* Sync with worker on GET_BUF flag. */
383 if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
384 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
386 handle_returns(d, wid);
392 while (next_idx < num_mbufs) {
393 uint16_t matches[RTE_DIST_BURST_SIZE];
396 /* Sync with worker on GET_BUF flag. */
397 if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]),
398 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)
399 d->bufs[wkr].count = 0;
401 if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
402 pkts = num_mbufs - next_idx;
404 pkts = RTE_DIST_BURST_SIZE;
406 for (i = 0; i < pkts; i++) {
407 if (mbufs[next_idx + i]) {
408 /* flows have to be non-zero */
409 flows[i] = mbufs[next_idx + i]->hash.usr | 1;
413 for (; i < RTE_DIST_BURST_SIZE; i++)
416 switch (d->dist_match_fn) {
417 case RTE_DIST_MATCH_VECTOR:
418 find_match_vec(d, &flows[0], &matches[0]);
421 find_match_scalar(d, &flows[0], &matches[0]);
425 * Matches array now contain the intended worker ID (+1) of
426 * the incoming packets. Any zeroes need to be assigned
430 for (j = 0; j < pkts; j++) {
432 next_mb = mbufs[next_idx++];
433 next_value = (((int64_t)(uintptr_t)next_mb) <<
434 RTE_DISTRIB_FLAG_BITS);
436 * User is advocated to set tag value for each
437 * mbuf before calling rte_distributor_process.
438 * User defined tags are used to identify flows,
441 /* flows MUST be non-zero */
442 new_tag = (uint16_t)(next_mb->hash.usr) | 1;
445 * Uncommenting the next line will cause the find_match
446 * function to be optimized out, making this function
447 * do parallel (non-atomic) distribution
449 /* matches[j] = 0; */
452 struct rte_distributor_backlog *bl =
453 &d->backlog[matches[j]-1];
454 if (unlikely(bl->count ==
455 RTE_DIST_BURST_SIZE)) {
456 release(d, matches[j]-1);
459 /* Add to worker that already has flow */
460 unsigned int idx = bl->count++;
462 bl->tags[idx] = new_tag;
463 bl->pkts[idx] = next_value;
466 struct rte_distributor_backlog *bl =
468 if (unlikely(bl->count ==
469 RTE_DIST_BURST_SIZE)) {
473 /* Add to current worker worker */
474 unsigned int idx = bl->count++;
476 bl->tags[idx] = new_tag;
477 bl->pkts[idx] = next_value;
479 * Now that we've just added an unpinned flow
480 * to a worker, we need to ensure that all
481 * other packets with that same flow will go
482 * to the same worker in this burst.
484 for (w = j; w < pkts; w++)
485 if (flows[w] == new_tag)
490 if (wkr >= d->num_workers)
494 /* Flush out all non-full cache-lines to workers. */
495 for (wid = 0 ; wid < d->num_workers; wid++)
496 /* Sync with worker on GET_BUF flag. */
497 if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
498 __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF))
503 BIND_DEFAULT_SYMBOL(rte_distributor_process, _v1705, 17.05);
504 MAP_STATIC_SYMBOL(int rte_distributor_process(struct rte_distributor *d,
505 struct rte_mbuf **mbufs, unsigned int num_mbufs),
506 rte_distributor_process_v1705);
508 /* return to the caller, packets returned from workers */
510 rte_distributor_returned_pkts_v1705(struct rte_distributor *d,
511 struct rte_mbuf **mbufs, unsigned int max_mbufs)
513 struct rte_distributor_returned_pkts *returns = &d->returns;
514 unsigned int retval = (max_mbufs < returns->count) ?
515 max_mbufs : returns->count;
518 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
519 /* Call the old API */
520 return rte_distributor_returned_pkts_v20(d->d_v20,
524 for (i = 0; i < retval; i++) {
525 unsigned int idx = (returns->start + i) &
526 RTE_DISTRIB_RETURNS_MASK;
528 mbufs[i] = returns->mbufs[idx];
535 BIND_DEFAULT_SYMBOL(rte_distributor_returned_pkts, _v1705, 17.05);
536 MAP_STATIC_SYMBOL(int rte_distributor_returned_pkts(struct rte_distributor *d,
537 struct rte_mbuf **mbufs, unsigned int max_mbufs),
538 rte_distributor_returned_pkts_v1705);
541 * Return the number of packets in-flight in a distributor, i.e. packets
542 * being worked on or queued up in a backlog.
544 static inline unsigned int
545 total_outstanding(const struct rte_distributor *d)
547 unsigned int wkr, total_outstanding = 0;
549 for (wkr = 0; wkr < d->num_workers; wkr++)
550 total_outstanding += d->backlog[wkr].count;
552 return total_outstanding;
556 * Flush the distributor, so that there are no outstanding packets in flight or
560 rte_distributor_flush_v1705(struct rte_distributor *d)
562 unsigned int flushed;
565 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
566 /* Call the old API */
567 return rte_distributor_flush_v20(d->d_v20);
570 flushed = total_outstanding(d);
572 while (total_outstanding(d) > 0)
573 rte_distributor_process(d, NULL, 0);
575 /* wait 10ms to allow all worker drain the pkts */
579 * Send empty burst to all workers to allow them to exit
580 * gracefully, should they need to.
582 rte_distributor_process(d, NULL, 0);
584 for (wkr = 0; wkr < d->num_workers; wkr++)
585 handle_returns(d, wkr);
589 BIND_DEFAULT_SYMBOL(rte_distributor_flush, _v1705, 17.05);
590 MAP_STATIC_SYMBOL(int rte_distributor_flush(struct rte_distributor *d),
591 rte_distributor_flush_v1705);
593 /* clears the internal returns array in the distributor */
595 rte_distributor_clear_returns_v1705(struct rte_distributor *d)
599 if (d->alg_type == RTE_DIST_ALG_SINGLE) {
600 /* Call the old API */
601 rte_distributor_clear_returns_v20(d->d_v20);
605 /* throw away returns, so workers can exit */
606 for (wkr = 0; wkr < d->num_workers; wkr++)
607 /* Sync with worker. Release retptrs. */
608 __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0,
611 BIND_DEFAULT_SYMBOL(rte_distributor_clear_returns, _v1705, 17.05);
612 MAP_STATIC_SYMBOL(void rte_distributor_clear_returns(struct rte_distributor *d),
613 rte_distributor_clear_returns_v1705);
615 /* creates a distributor instance */
616 struct rte_distributor *
617 rte_distributor_create_v1705(const char *name,
618 unsigned int socket_id,
619 unsigned int num_workers,
620 unsigned int alg_type)
622 struct rte_distributor *d;
623 struct rte_dist_burst_list *dist_burst_list;
624 char mz_name[RTE_MEMZONE_NAMESIZE];
625 const struct rte_memzone *mz;
628 /* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
630 /* compilation-time checks */
631 RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
632 RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
634 if (name == NULL || num_workers >=
635 (unsigned int)RTE_MIN(RTE_DISTRIB_MAX_WORKERS, RTE_MAX_LCORE)) {
640 if (alg_type == RTE_DIST_ALG_SINGLE) {
641 d = malloc(sizeof(struct rte_distributor));
646 d->d_v20 = rte_distributor_create_v20(name,
647 socket_id, num_workers);
648 if (d->d_v20 == NULL) {
650 /* rte_errno will have been set */
653 d->alg_type = alg_type;
657 snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
658 mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
665 strlcpy(d->name, name, sizeof(d->name));
666 d->num_workers = num_workers;
667 d->alg_type = alg_type;
669 d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
670 #if defined(RTE_ARCH_X86)
671 d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
675 * Set up the backlog tags so they're pointing at the second cache
676 * line for performance during flow matching
678 for (i = 0 ; i < num_workers ; i++)
679 d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
681 dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
682 rte_dist_burst_list);
685 rte_mcfg_tailq_write_lock();
686 TAILQ_INSERT_TAIL(dist_burst_list, d, next);
687 rte_mcfg_tailq_write_unlock();
691 BIND_DEFAULT_SYMBOL(rte_distributor_create, _v1705, 17.05);
692 MAP_STATIC_SYMBOL(struct rte_distributor *rte_distributor_create(
693 const char *name, unsigned int socket_id,
694 unsigned int num_workers, unsigned int alg_type),
695 rte_distributor_create_v1705);