EXPORT_MAP := rte_distributor_version.map
# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) := rte_distributor_v20.c
+SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) := rte_distributor_single.c
SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += rte_distributor.c
ifeq ($(CONFIG_RTE_ARCH_X86),y)
SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += rte_distributor_match_sse.c
* the next cache line to worker 0, we pad this out to three cache lines.
* Only 64-bits of the memory is actually used though.
*/
-union rte_distributor_buffer_v20 {
+union rte_distributor_buffer_single {
volatile int64_t bufptr64;
char pad[RTE_CACHE_LINE_SIZE*3];
} __rte_cache_aligned;
struct rte_mbuf *mbufs[RTE_DISTRIB_MAX_RETURNS];
};
-struct rte_distributor_v20 {
- TAILQ_ENTRY(rte_distributor_v20) next; /**< Next in list. */
+struct rte_distributor_single {
+ TAILQ_ENTRY(rte_distributor_single) next; /**< Next in list. */
char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */
unsigned int num_workers; /**< Number of workers polling */
struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS];
- union rte_distributor_buffer_v20 bufs[RTE_DISTRIB_MAX_WORKERS];
+ union rte_distributor_buffer_single bufs[RTE_DISTRIB_MAX_WORKERS];
struct rte_distributor_returned_pkts returns;
};
enum rte_distributor_match_function dist_match_fn;
- struct rte_distributor_v20 *d_v20;
+ struct rte_distributor_single *d_single;
};
void
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-sources = files('rte_distributor.c', 'rte_distributor_v20.c')
+sources = files('rte_distributor.c', 'rte_distributor_single.c')
if arch_subdir == 'x86'
sources += files('rte_distributor_match_sse.c')
else
#include <rte_tailq.h>
#include "rte_distributor.h"
-#include "rte_distributor_v20.h"
+#include "rte_distributor_single.h"
#include "distributor_private.h"
TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
volatile int64_t *retptr64;
if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
- rte_distributor_request_pkt_v20(d->d_v20,
+ rte_distributor_request_pkt_single(d->d_single,
worker_id, oldpkt[0]);
return;
}
unsigned int i;
if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
- pkts[0] = rte_distributor_poll_pkt_v20(d->d_v20, worker_id);
+ pkts[0] = rte_distributor_poll_pkt_single(d->d_single,
+ worker_id);
return (pkts[0]) ? 1 : 0;
}
if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
if (return_count <= 1) {
- pkts[0] = rte_distributor_get_pkt_v20(d->d_v20,
+ pkts[0] = rte_distributor_get_pkt_single(d->d_single,
worker_id, oldpkt[0]);
return (pkts[0]) ? 1 : 0;
} else
if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
if (num == 1)
- return rte_distributor_return_pkt_v20(d->d_v20,
+ return rte_distributor_return_pkt_single(d->d_single,
worker_id, oldpkt[0]);
else
return -EINVAL;
if (d->alg_type == RTE_DIST_ALG_SINGLE) {
/* Call the old API */
- return rte_distributor_process_v20(d->d_v20, mbufs, num_mbufs);
+ return rte_distributor_process_single(d->d_single,
+ mbufs, num_mbufs);
}
if (unlikely(num_mbufs == 0)) {
if (d->alg_type == RTE_DIST_ALG_SINGLE) {
/* Call the old API */
- return rte_distributor_returned_pkts_v20(d->d_v20,
+ return rte_distributor_returned_pkts_single(d->d_single,
mbufs, max_mbufs);
}
if (d->alg_type == RTE_DIST_ALG_SINGLE) {
/* Call the old API */
- return rte_distributor_flush_v20(d->d_v20);
+ return rte_distributor_flush_single(d->d_single);
}
flushed = total_outstanding(d);
if (d->alg_type == RTE_DIST_ALG_SINGLE) {
/* Call the old API */
- rte_distributor_clear_returns_v20(d->d_v20);
+ rte_distributor_clear_returns_single(d->d_single);
return;
}
rte_errno = ENOMEM;
return NULL;
}
- d->d_v20 = rte_distributor_create_v20(name,
+ d->d_single = rte_distributor_create_single(name,
socket_id, num_workers);
- if (d->d_v20 == NULL) {
+ if (d->d_single == NULL) {
free(d);
/* rte_errno will have been set */
return NULL;
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <sys/queue.h>
+#include <string.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_errno.h>
+#include <rte_function_versioning.h>
+#include <rte_string_fns.h>
+#include <rte_eal_memconfig.h>
+#include <rte_pause.h>
+#include <rte_tailq.h>
+
+#include "rte_distributor_single.h"
+#include "distributor_private.h"
+
+TAILQ_HEAD(rte_distributor_list, rte_distributor_single);
+
+static struct rte_tailq_elem rte_distributor_tailq = {
+ .name = "RTE_DISTRIBUTOR",
+};
+EAL_REGISTER_TAILQ(rte_distributor_tailq)
+
+/**** APIs called by workers ****/
+
+void
+rte_distributor_request_pkt_single(struct rte_distributor_single *d,
+ unsigned worker_id, struct rte_mbuf *oldpkt)
+{
+ union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
+ int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
+ | RTE_DISTRIB_GET_BUF;
+ while (unlikely(__atomic_load_n(&buf->bufptr64, __ATOMIC_RELAXED)
+ & RTE_DISTRIB_FLAGS_MASK))
+ rte_pause();
+
+ /* Sync with distributor on GET_BUF flag. */
+ __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
+}
+
+struct rte_mbuf *
+rte_distributor_poll_pkt_single(struct rte_distributor_single *d,
+ unsigned worker_id)
+{
+ union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
+ /* Sync with distributor. Acquire bufptr64. */
+ if (__atomic_load_n(&buf->bufptr64, __ATOMIC_ACQUIRE)
+ & RTE_DISTRIB_GET_BUF)
+ return NULL;
+
+ /* since bufptr64 is signed, this should be an arithmetic shift */
+ int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
+ return (struct rte_mbuf *)((uintptr_t)ret);
+}
+
+struct rte_mbuf *
+rte_distributor_get_pkt_single(struct rte_distributor_single *d,
+ unsigned worker_id, struct rte_mbuf *oldpkt)
+{
+ struct rte_mbuf *ret;
+ rte_distributor_request_pkt_single(d, worker_id, oldpkt);
+ while ((ret = rte_distributor_poll_pkt_single(d, worker_id)) == NULL)
+ rte_pause();
+ return ret;
+}
+
+int
+rte_distributor_return_pkt_single(struct rte_distributor_single *d,
+ unsigned worker_id, struct rte_mbuf *oldpkt)
+{
+ union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
+ uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
+ | RTE_DISTRIB_RETURN_BUF;
+ /* Sync with distributor on RETURN_BUF flag. */
+ __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
+ return 0;
+}
+
+/**** APIs called on distributor core ***/
+
+/* as name suggests, adds a packet to the backlog for a particular worker */
+static int
+add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
+{
+ if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
+ return -1;
+
+ bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
+ = item;
+ return 0;
+}
+
+/* takes the next packet for a worker off the backlog */
+static int64_t
+backlog_pop(struct rte_distributor_backlog *bl)
+{
+ bl->count--;
+ return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
+}
+
+/* stores a packet returned from a worker inside the returns array */
+static inline void
+store_return(uintptr_t oldbuf, struct rte_distributor_single *d,
+ unsigned *ret_start, unsigned *ret_count)
+{
+ /* store returns in a circular buffer - code is branch-free */
+ d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
+ = (void *)oldbuf;
+ *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
+ *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
+}
+
+static inline void
+handle_worker_shutdown(struct rte_distributor_single *d, unsigned int wkr)
+{
+ d->in_flight_tags[wkr] = 0;
+ d->in_flight_bitmask &= ~(1UL << wkr);
+ /* Sync with worker. Release bufptr64. */
+ __atomic_store_n(&(d->bufs[wkr].bufptr64), 0, __ATOMIC_RELEASE);
+ if (unlikely(d->backlog[wkr].count != 0)) {
+ /* On return of a packet, we need to move the
+ * queued packets for this core elsewhere.
+ * Easiest solution is to set things up for
+ * a recursive call. That will cause those
+ * packets to be queued up for the next free
+ * core, i.e. it will return as soon as a
+ * core becomes free to accept the first
+ * packet, as subsequent ones will be added to
+ * the backlog for that core.
+ */
+ struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
+ unsigned i;
+ struct rte_distributor_backlog *bl = &d->backlog[wkr];
+
+ for (i = 0; i < bl->count; i++) {
+ unsigned idx = (bl->start + i) &
+ RTE_DISTRIB_BACKLOG_MASK;
+ pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
+ RTE_DISTRIB_FLAG_BITS));
+ }
+ /* recursive call.
+ * Note that the tags were set before first level call
+ * to rte_distributor_process.
+ */
+ rte_distributor_process_single(d, pkts, i);
+ bl->count = bl->start = 0;
+ }
+}
+
+/* this function is called when process() fn is called without any new
+ * packets. It goes through all the workers and clears any returned packets
+ * to do a partial flush.
+ */
+static int
+process_returns(struct rte_distributor_single *d)
+{
+ unsigned wkr;
+ unsigned flushed = 0;
+ unsigned ret_start = d->returns.start,
+ ret_count = d->returns.count;
+
+ for (wkr = 0; wkr < d->num_workers; wkr++) {
+ uintptr_t oldbuf = 0;
+ /* Sync with worker. Acquire bufptr64. */
+ const int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
+ __ATOMIC_ACQUIRE);
+
+ if (data & RTE_DISTRIB_GET_BUF) {
+ flushed++;
+ if (d->backlog[wkr].count)
+ /* Sync with worker. Release bufptr64. */
+ __atomic_store_n(&(d->bufs[wkr].bufptr64),
+ backlog_pop(&d->backlog[wkr]),
+ __ATOMIC_RELEASE);
+ else {
+ /* Sync with worker on GET_BUF flag. */
+ __atomic_store_n(&(d->bufs[wkr].bufptr64),
+ RTE_DISTRIB_GET_BUF,
+ __ATOMIC_RELEASE);
+ d->in_flight_tags[wkr] = 0;
+ d->in_flight_bitmask &= ~(1UL << wkr);
+ }
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ } else if (data & RTE_DISTRIB_RETURN_BUF) {
+ handle_worker_shutdown(d, wkr);
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ }
+
+ store_return(oldbuf, d, &ret_start, &ret_count);
+ }
+
+ d->returns.start = ret_start;
+ d->returns.count = ret_count;
+
+ return flushed;
+}
+
+/* process a set of packets to distribute them to workers */
+int
+rte_distributor_process_single(struct rte_distributor_single *d,
+ struct rte_mbuf **mbufs, unsigned num_mbufs)
+{
+ unsigned next_idx = 0;
+ unsigned wkr = 0;
+ struct rte_mbuf *next_mb = NULL;
+ int64_t next_value = 0;
+ uint32_t new_tag = 0;
+ unsigned ret_start = d->returns.start,
+ ret_count = d->returns.count;
+
+ if (unlikely(num_mbufs == 0))
+ return process_returns(d);
+
+ while (next_idx < num_mbufs || next_mb != NULL) {
+ uintptr_t oldbuf = 0;
+ /* Sync with worker. Acquire bufptr64. */
+ int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
+ __ATOMIC_ACQUIRE);
+
+ if (!next_mb) {
+ next_mb = mbufs[next_idx++];
+ next_value = (((int64_t)(uintptr_t)next_mb)
+ << RTE_DISTRIB_FLAG_BITS);
+ /*
+ * User is advocated to set tag value for each
+ * mbuf before calling rte_distributor_process.
+ * User defined tags are used to identify flows,
+ * or sessions.
+ */
+ new_tag = next_mb->hash.usr;
+
+ /*
+ * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
+ * then the size of match has to be expanded.
+ */
+ uint64_t match = 0;
+ unsigned i;
+ /*
+ * to scan for a match use "xor" and "not" to get a 0/1
+ * value, then use shifting to merge to single "match"
+ * variable, where a one-bit indicates a match for the
+ * worker given by the bit-position
+ */
+ for (i = 0; i < d->num_workers; i++)
+ match |= (!(d->in_flight_tags[i] ^ new_tag)
+ << i);
+
+ /* Only turned-on bits are considered as match */
+ match &= d->in_flight_bitmask;
+
+ if (match) {
+ next_mb = NULL;
+ unsigned worker = __builtin_ctzl(match);
+ if (add_to_backlog(&d->backlog[worker],
+ next_value) < 0)
+ next_idx--;
+ }
+ }
+
+ if ((data & RTE_DISTRIB_GET_BUF) &&
+ (d->backlog[wkr].count || next_mb)) {
+
+ if (d->backlog[wkr].count)
+ /* Sync with worker. Release bufptr64. */
+ __atomic_store_n(&(d->bufs[wkr].bufptr64),
+ backlog_pop(&d->backlog[wkr]),
+ __ATOMIC_RELEASE);
+
+ else {
+ /* Sync with worker. Release bufptr64. */
+ __atomic_store_n(&(d->bufs[wkr].bufptr64),
+ next_value,
+ __ATOMIC_RELEASE);
+ d->in_flight_tags[wkr] = new_tag;
+ d->in_flight_bitmask |= (1UL << wkr);
+ next_mb = NULL;
+ }
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ } else if (data & RTE_DISTRIB_RETURN_BUF) {
+ handle_worker_shutdown(d, wkr);
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ }
+
+ /* store returns in a circular buffer */
+ store_return(oldbuf, d, &ret_start, &ret_count);
+
+ if (++wkr == d->num_workers)
+ wkr = 0;
+ }
+ /* to finish, check all workers for backlog and schedule work for them
+ * if they are ready */
+ for (wkr = 0; wkr < d->num_workers; wkr++)
+ if (d->backlog[wkr].count &&
+ /* Sync with worker. Acquire bufptr64. */
+ (__atomic_load_n(&(d->bufs[wkr].bufptr64),
+ __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
+
+ int64_t oldbuf = d->bufs[wkr].bufptr64 >>
+ RTE_DISTRIB_FLAG_BITS;
+
+ store_return(oldbuf, d, &ret_start, &ret_count);
+
+ /* Sync with worker. Release bufptr64. */
+ __atomic_store_n(&(d->bufs[wkr].bufptr64),
+ backlog_pop(&d->backlog[wkr]),
+ __ATOMIC_RELEASE);
+ }
+
+ d->returns.start = ret_start;
+ d->returns.count = ret_count;
+ return num_mbufs;
+}
+
+/* return to the caller, packets returned from workers */
+int
+rte_distributor_returned_pkts_single(struct rte_distributor_single *d,
+ struct rte_mbuf **mbufs, unsigned max_mbufs)
+{
+ struct rte_distributor_returned_pkts *returns = &d->returns;
+ unsigned retval = (max_mbufs < returns->count) ?
+ max_mbufs : returns->count;
+ unsigned i;
+
+ for (i = 0; i < retval; i++) {
+ unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
+ mbufs[i] = returns->mbufs[idx];
+ }
+ returns->start += i;
+ returns->count -= i;
+
+ return retval;
+}
+
+/* return the number of packets in-flight in a distributor, i.e. packets
+ * being worked on or queued up in a backlog.
+ */
+static inline unsigned
+total_outstanding(const struct rte_distributor_single *d)
+{
+ unsigned wkr, total_outstanding;
+
+ total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
+
+ for (wkr = 0; wkr < d->num_workers; wkr++)
+ total_outstanding += d->backlog[wkr].count;
+
+ return total_outstanding;
+}
+
+/* flush the distributor, so that there are no outstanding packets in flight or
+ * queued up. */
+int
+rte_distributor_flush_single(struct rte_distributor_single *d)
+{
+ const unsigned flushed = total_outstanding(d);
+
+ while (total_outstanding(d) > 0)
+ rte_distributor_process_single(d, NULL, 0);
+
+ return flushed;
+}
+
+/* clears the internal returns array in the distributor */
+void
+rte_distributor_clear_returns_single(struct rte_distributor_single *d)
+{
+ d->returns.start = d->returns.count = 0;
+#ifndef __OPTIMIZE__
+ memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
+#endif
+}
+
+/* creates a distributor instance */
+struct rte_distributor_single *
+rte_distributor_create_single(const char *name,
+ unsigned socket_id,
+ unsigned num_workers)
+{
+ struct rte_distributor_single *d;
+ struct rte_distributor_list *distributor_list;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
+ RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
+ sizeof(d->in_flight_bitmask) * CHAR_BIT);
+
+ if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
+ mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
+ if (mz == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ d = mz->addr;
+ strlcpy(d->name, name, sizeof(d->name));
+ d->num_workers = num_workers;
+
+ distributor_list = RTE_TAILQ_CAST(rte_distributor_tailq.head,
+ rte_distributor_list);
+
+ rte_mcfg_tailq_write_lock();
+ TAILQ_INSERT_TAIL(distributor_list, d, next);
+ rte_mcfg_tailq_write_unlock();
+
+ return d;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_DISTRIB_SINGLE_H_
+#define _RTE_DISTRIB_SINGLE_H_
+
+/**
+ * @file
+ * RTE distributor
+ *
+ * The distributor is a component which is designed to pass packets
+ * one-at-a-time to workers, with dynamic load balancing.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
+
+struct rte_distributor_single;
+struct rte_mbuf;
+
+/**
+ * Function to create a new distributor instance
+ *
+ * Reserves the memory needed for the distributor operation and
+ * initializes the distributor to work with the configured number of workers.
+ *
+ * @param name
+ * The name to be given to the distributor instance.
+ * @param socket_id
+ * The NUMA node on which the memory is to be allocated
+ * @param num_workers
+ * The maximum number of workers that will request packets from this
+ * distributor
+ * @return
+ * The newly created distributor instance
+ */
+struct rte_distributor_single *
+rte_distributor_create_single(const char *name, unsigned int socket_id,
+ unsigned int num_workers);
+
+/* *** APIS to be called on the distributor lcore *** */
+/*
+ * The following APIs are the public APIs which are designed for use on a
+ * single lcore which acts as the distributor lcore for a given distributor
+ * instance. These functions cannot be called on multiple cores simultaneously
+ * without using locking to protect access to the internals of the distributor.
+ *
+ * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
+ * for the same distributor instance, otherwise deadlock will result.
+ */
+
+/**
+ * Process a set of packets by distributing them among workers that request
+ * packets. The distributor will ensure that no two packets that have the
+ * same flow id, or tag, in the mbuf will be processed at the same time.
+ *
+ * The user is advocated to set tag for each mbuf before calling this function.
+ * If user doesn't set the tag, the tag value can be various values depending on
+ * driver implementation and configuration.
+ *
+ * This is not multi-thread safe and should only be called on a single lcore.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param mbufs
+ * The mbufs to be distributed
+ * @param num_mbufs
+ * The number of mbufs in the mbufs array
+ * @return
+ * The number of mbufs processed.
+ */
+int
+rte_distributor_process_single(struct rte_distributor_single *d,
+ struct rte_mbuf **mbufs, unsigned int num_mbufs);
+
+/**
+ * Get a set of mbufs that have been returned to the distributor by workers
+ *
+ * This should only be called on the same lcore as rte_distributor_process()
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param mbufs
+ * The mbufs pointer array to be filled in
+ * @param max_mbufs
+ * The size of the mbufs array
+ * @return
+ * The number of mbufs returned in the mbufs array.
+ */
+int
+rte_distributor_returned_pkts_single(struct rte_distributor_single *d,
+ struct rte_mbuf **mbufs, unsigned int max_mbufs);
+
+/**
+ * Flush the distributor component, so that there are no in-flight or
+ * backlogged packets awaiting processing
+ *
+ * This should only be called on the same lcore as rte_distributor_process()
+ *
+ * @param d
+ * The distributor instance to be used
+ * @return
+ * The number of queued/in-flight packets that were completed by this call.
+ */
+int
+rte_distributor_flush_single(struct rte_distributor_single *d);
+
+/**
+ * Clears the array of returned packets used as the source for the
+ * rte_distributor_returned_pkts() API call.
+ *
+ * This should only be called on the same lcore as rte_distributor_process()
+ *
+ * @param d
+ * The distributor instance to be used
+ */
+void
+rte_distributor_clear_returns_single(struct rte_distributor_single *d);
+
+/* *** APIS to be called on the worker lcores *** */
+/*
+ * The following APIs are the public APIs which are designed for use on
+ * multiple lcores which act as workers for a distributor. Each lcore should use
+ * a unique worker id when requesting packets.
+ *
+ * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
+ * for the same distributor instance, otherwise deadlock will result.
+ */
+
+/**
+ * API called by a worker to get a new packet to process. Any previous packet
+ * given to the worker is assumed to have completed processing, and may be
+ * optionally returned to the distributor via the oldpkt parameter.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ * @param oldpkt
+ * The previous packet, if any, being processed by the worker
+ *
+ * @return
+ * A new packet to be processed by the worker thread.
+ */
+struct rte_mbuf *
+rte_distributor_get_pkt_single(struct rte_distributor_single *d,
+ unsigned int worker_id, struct rte_mbuf *oldpkt);
+
+/**
+ * API called by a worker to return a completed packet without requesting a
+ * new packet, for example, because a worker thread is shutting down
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ * @param mbuf
+ * The previous packet being processed by the worker
+ */
+int
+rte_distributor_return_pkt_single(struct rte_distributor_single *d,
+ unsigned int worker_id, struct rte_mbuf *mbuf);
+
+/**
+ * API called by a worker to request a new packet to process.
+ * Any previous packet given to the worker is assumed to have completed
+ * processing, and may be optionally returned to the distributor via
+ * the oldpkt parameter.
+ * Unlike rte_distributor_get_pkt(), this function does not wait for a new
+ * packet to be provided by the distributor.
+ *
+ * NOTE: after calling this function, rte_distributor_poll_pkt() should
+ * be used to poll for the packet requested. The rte_distributor_get_pkt()
+ * API should *not* be used to try and retrieve the new packet.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ * @param oldpkt
+ * The previous packet, if any, being processed by the worker
+ */
+void
+rte_distributor_request_pkt_single(struct rte_distributor_single *d,
+ unsigned int worker_id, struct rte_mbuf *oldpkt);
+
+/**
+ * API called by a worker to check for a new packet that was previously
+ * requested by a call to rte_distributor_request_pkt(). It does not wait
+ * for the new packet to be available, but returns NULL if the request has
+ * not yet been fulfilled by the distributor.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ *
+ * @return
+ * A new packet to be processed by the worker thread, or NULL if no
+ * packet is yet available.
+ */
+struct rte_mbuf *
+rte_distributor_poll_pkt_single(struct rte_distributor_single *d,
+ unsigned int worker_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#include <stdio.h>
-#include <sys/queue.h>
-#include <string.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_errno.h>
-#include <rte_function_versioning.h>
-#include <rte_string_fns.h>
-#include <rte_eal_memconfig.h>
-#include <rte_pause.h>
-#include <rte_tailq.h>
-
-#include "rte_distributor_v20.h"
-#include "distributor_private.h"
-
-TAILQ_HEAD(rte_distributor_list, rte_distributor_v20);
-
-static struct rte_tailq_elem rte_distributor_tailq = {
- .name = "RTE_DISTRIBUTOR",
-};
-EAL_REGISTER_TAILQ(rte_distributor_tailq)
-
-/**** APIs called by workers ****/
-
-void __vsym
-rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d,
- unsigned worker_id, struct rte_mbuf *oldpkt)
-{
- union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
- int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
- | RTE_DISTRIB_GET_BUF;
- while (unlikely(__atomic_load_n(&buf->bufptr64, __ATOMIC_RELAXED)
- & RTE_DISTRIB_FLAGS_MASK))
- rte_pause();
-
- /* Sync with distributor on GET_BUF flag. */
- __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
-}
-
-struct rte_mbuf * __vsym
-rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d,
- unsigned worker_id)
-{
- union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
- /* Sync with distributor. Acquire bufptr64. */
- if (__atomic_load_n(&buf->bufptr64, __ATOMIC_ACQUIRE)
- & RTE_DISTRIB_GET_BUF)
- return NULL;
-
- /* since bufptr64 is signed, this should be an arithmetic shift */
- int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
- return (struct rte_mbuf *)((uintptr_t)ret);
-}
-
-struct rte_mbuf * __vsym
-rte_distributor_get_pkt_v20(struct rte_distributor_v20 *d,
- unsigned worker_id, struct rte_mbuf *oldpkt)
-{
- struct rte_mbuf *ret;
- rte_distributor_request_pkt_v20(d, worker_id, oldpkt);
- while ((ret = rte_distributor_poll_pkt_v20(d, worker_id)) == NULL)
- rte_pause();
- return ret;
-}
-
-int __vsym
-rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d,
- unsigned worker_id, struct rte_mbuf *oldpkt)
-{
- union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
- uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
- | RTE_DISTRIB_RETURN_BUF;
- /* Sync with distributor on RETURN_BUF flag. */
- __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
- return 0;
-}
-
-/**** APIs called on distributor core ***/
-
-/* as name suggests, adds a packet to the backlog for a particular worker */
-static int
-add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
-{
- if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
- return -1;
-
- bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
- = item;
- return 0;
-}
-
-/* takes the next packet for a worker off the backlog */
-static int64_t
-backlog_pop(struct rte_distributor_backlog *bl)
-{
- bl->count--;
- return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
-}
-
-/* stores a packet returned from a worker inside the returns array */
-static inline void
-store_return(uintptr_t oldbuf, struct rte_distributor_v20 *d,
- unsigned *ret_start, unsigned *ret_count)
-{
- /* store returns in a circular buffer - code is branch-free */
- d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
- = (void *)oldbuf;
- *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
- *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
-}
-
-static inline void
-handle_worker_shutdown(struct rte_distributor_v20 *d, unsigned int wkr)
-{
- d->in_flight_tags[wkr] = 0;
- d->in_flight_bitmask &= ~(1UL << wkr);
- /* Sync with worker. Release bufptr64. */
- __atomic_store_n(&(d->bufs[wkr].bufptr64), 0, __ATOMIC_RELEASE);
- if (unlikely(d->backlog[wkr].count != 0)) {
- /* On return of a packet, we need to move the
- * queued packets for this core elsewhere.
- * Easiest solution is to set things up for
- * a recursive call. That will cause those
- * packets to be queued up for the next free
- * core, i.e. it will return as soon as a
- * core becomes free to accept the first
- * packet, as subsequent ones will be added to
- * the backlog for that core.
- */
- struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
- unsigned i;
- struct rte_distributor_backlog *bl = &d->backlog[wkr];
-
- for (i = 0; i < bl->count; i++) {
- unsigned idx = (bl->start + i) &
- RTE_DISTRIB_BACKLOG_MASK;
- pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
- RTE_DISTRIB_FLAG_BITS));
- }
- /* recursive call.
- * Note that the tags were set before first level call
- * to rte_distributor_process.
- */
- rte_distributor_process_v20(d, pkts, i);
- bl->count = bl->start = 0;
- }
-}
-
-/* this function is called when process() fn is called without any new
- * packets. It goes through all the workers and clears any returned packets
- * to do a partial flush.
- */
-static int
-process_returns(struct rte_distributor_v20 *d)
-{
- unsigned wkr;
- unsigned flushed = 0;
- unsigned ret_start = d->returns.start,
- ret_count = d->returns.count;
-
- for (wkr = 0; wkr < d->num_workers; wkr++) {
- uintptr_t oldbuf = 0;
- /* Sync with worker. Acquire bufptr64. */
- const int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
- __ATOMIC_ACQUIRE);
-
- if (data & RTE_DISTRIB_GET_BUF) {
- flushed++;
- if (d->backlog[wkr].count)
- /* Sync with worker. Release bufptr64. */
- __atomic_store_n(&(d->bufs[wkr].bufptr64),
- backlog_pop(&d->backlog[wkr]),
- __ATOMIC_RELEASE);
- else {
- /* Sync with worker on GET_BUF flag. */
- __atomic_store_n(&(d->bufs[wkr].bufptr64),
- RTE_DISTRIB_GET_BUF,
- __ATOMIC_RELEASE);
- d->in_flight_tags[wkr] = 0;
- d->in_flight_bitmask &= ~(1UL << wkr);
- }
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- } else if (data & RTE_DISTRIB_RETURN_BUF) {
- handle_worker_shutdown(d, wkr);
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- }
-
- store_return(oldbuf, d, &ret_start, &ret_count);
- }
-
- d->returns.start = ret_start;
- d->returns.count = ret_count;
-
- return flushed;
-}
-
-/* process a set of packets to distribute them to workers */
-int __vsym
-rte_distributor_process_v20(struct rte_distributor_v20 *d,
- struct rte_mbuf **mbufs, unsigned num_mbufs)
-{
- unsigned next_idx = 0;
- unsigned wkr = 0;
- struct rte_mbuf *next_mb = NULL;
- int64_t next_value = 0;
- uint32_t new_tag = 0;
- unsigned ret_start = d->returns.start,
- ret_count = d->returns.count;
-
- if (unlikely(num_mbufs == 0))
- return process_returns(d);
-
- while (next_idx < num_mbufs || next_mb != NULL) {
- uintptr_t oldbuf = 0;
- /* Sync with worker. Acquire bufptr64. */
- int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
- __ATOMIC_ACQUIRE);
-
- if (!next_mb) {
- next_mb = mbufs[next_idx++];
- next_value = (((int64_t)(uintptr_t)next_mb)
- << RTE_DISTRIB_FLAG_BITS);
- /*
- * User is advocated to set tag value for each
- * mbuf before calling rte_distributor_process.
- * User defined tags are used to identify flows,
- * or sessions.
- */
- new_tag = next_mb->hash.usr;
-
- /*
- * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
- * then the size of match has to be expanded.
- */
- uint64_t match = 0;
- unsigned i;
- /*
- * to scan for a match use "xor" and "not" to get a 0/1
- * value, then use shifting to merge to single "match"
- * variable, where a one-bit indicates a match for the
- * worker given by the bit-position
- */
- for (i = 0; i < d->num_workers; i++)
- match |= (!(d->in_flight_tags[i] ^ new_tag)
- << i);
-
- /* Only turned-on bits are considered as match */
- match &= d->in_flight_bitmask;
-
- if (match) {
- next_mb = NULL;
- unsigned worker = __builtin_ctzl(match);
- if (add_to_backlog(&d->backlog[worker],
- next_value) < 0)
- next_idx--;
- }
- }
-
- if ((data & RTE_DISTRIB_GET_BUF) &&
- (d->backlog[wkr].count || next_mb)) {
-
- if (d->backlog[wkr].count)
- /* Sync with worker. Release bufptr64. */
- __atomic_store_n(&(d->bufs[wkr].bufptr64),
- backlog_pop(&d->backlog[wkr]),
- __ATOMIC_RELEASE);
-
- else {
- /* Sync with worker. Release bufptr64. */
- __atomic_store_n(&(d->bufs[wkr].bufptr64),
- next_value,
- __ATOMIC_RELEASE);
- d->in_flight_tags[wkr] = new_tag;
- d->in_flight_bitmask |= (1UL << wkr);
- next_mb = NULL;
- }
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- } else if (data & RTE_DISTRIB_RETURN_BUF) {
- handle_worker_shutdown(d, wkr);
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- }
-
- /* store returns in a circular buffer */
- store_return(oldbuf, d, &ret_start, &ret_count);
-
- if (++wkr == d->num_workers)
- wkr = 0;
- }
- /* to finish, check all workers for backlog and schedule work for them
- * if they are ready */
- for (wkr = 0; wkr < d->num_workers; wkr++)
- if (d->backlog[wkr].count &&
- /* Sync with worker. Acquire bufptr64. */
- (__atomic_load_n(&(d->bufs[wkr].bufptr64),
- __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
-
- int64_t oldbuf = d->bufs[wkr].bufptr64 >>
- RTE_DISTRIB_FLAG_BITS;
-
- store_return(oldbuf, d, &ret_start, &ret_count);
-
- /* Sync with worker. Release bufptr64. */
- __atomic_store_n(&(d->bufs[wkr].bufptr64),
- backlog_pop(&d->backlog[wkr]),
- __ATOMIC_RELEASE);
- }
-
- d->returns.start = ret_start;
- d->returns.count = ret_count;
- return num_mbufs;
-}
-
-/* return to the caller, packets returned from workers */
-int __vsym
-rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d,
- struct rte_mbuf **mbufs, unsigned max_mbufs)
-{
- struct rte_distributor_returned_pkts *returns = &d->returns;
- unsigned retval = (max_mbufs < returns->count) ?
- max_mbufs : returns->count;
- unsigned i;
-
- for (i = 0; i < retval; i++) {
- unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
- mbufs[i] = returns->mbufs[idx];
- }
- returns->start += i;
- returns->count -= i;
-
- return retval;
-}
-
-/* return the number of packets in-flight in a distributor, i.e. packets
- * being worked on or queued up in a backlog.
- */
-static inline unsigned
-total_outstanding(const struct rte_distributor_v20 *d)
-{
- unsigned wkr, total_outstanding;
-
- total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
-
- for (wkr = 0; wkr < d->num_workers; wkr++)
- total_outstanding += d->backlog[wkr].count;
-
- return total_outstanding;
-}
-
-/* flush the distributor, so that there are no outstanding packets in flight or
- * queued up. */
-int __vsym
-rte_distributor_flush_v20(struct rte_distributor_v20 *d)
-{
- const unsigned flushed = total_outstanding(d);
-
- while (total_outstanding(d) > 0)
- rte_distributor_process_v20(d, NULL, 0);
-
- return flushed;
-}
-
-/* clears the internal returns array in the distributor */
-void __vsym
-rte_distributor_clear_returns_v20(struct rte_distributor_v20 *d)
-{
- d->returns.start = d->returns.count = 0;
-#ifndef __OPTIMIZE__
- memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
-#endif
-}
-
-/* creates a distributor instance */
-struct rte_distributor_v20 * __vsym
-rte_distributor_create_v20(const char *name,
- unsigned socket_id,
- unsigned num_workers)
-{
- struct rte_distributor_v20 *d;
- struct rte_distributor_list *distributor_list;
- char mz_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- /* compilation-time checks */
- RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
- RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
- sizeof(d->in_flight_bitmask) * CHAR_BIT);
-
- if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
- rte_errno = EINVAL;
- return NULL;
- }
-
- snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
- mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
- if (mz == NULL) {
- rte_errno = ENOMEM;
- return NULL;
- }
-
- d = mz->addr;
- strlcpy(d->name, name, sizeof(d->name));
- d->num_workers = num_workers;
-
- distributor_list = RTE_TAILQ_CAST(rte_distributor_tailq.head,
- rte_distributor_list);
-
- rte_mcfg_tailq_write_lock();
- TAILQ_INSERT_TAIL(distributor_list, d, next);
- rte_mcfg_tailq_write_unlock();
-
- return d;
-}
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef _RTE_DISTRIB_V20_H_
-#define _RTE_DISTRIB_V20_H_
-
-/**
- * @file
- * RTE distributor
- *
- * The distributor is a component which is designed to pass packets
- * one-at-a-time to workers, with dynamic load balancing.
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
-
-struct rte_distributor_v20;
-struct rte_mbuf;
-
-/**
- * Function to create a new distributor instance
- *
- * Reserves the memory needed for the distributor operation and
- * initializes the distributor to work with the configured number of workers.
- *
- * @param name
- * The name to be given to the distributor instance.
- * @param socket_id
- * The NUMA node on which the memory is to be allocated
- * @param num_workers
- * The maximum number of workers that will request packets from this
- * distributor
- * @return
- * The newly created distributor instance
- */
-struct rte_distributor_v20 *
-rte_distributor_create_v20(const char *name, unsigned int socket_id,
- unsigned int num_workers);
-
-/* *** APIS to be called on the distributor lcore *** */
-/*
- * The following APIs are the public APIs which are designed for use on a
- * single lcore which acts as the distributor lcore for a given distributor
- * instance. These functions cannot be called on multiple cores simultaneously
- * without using locking to protect access to the internals of the distributor.
- *
- * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
- * for the same distributor instance, otherwise deadlock will result.
- */
-
-/**
- * Process a set of packets by distributing them among workers that request
- * packets. The distributor will ensure that no two packets that have the
- * same flow id, or tag, in the mbuf will be processed at the same time.
- *
- * The user is advocated to set tag for each mbuf before calling this function.
- * If user doesn't set the tag, the tag value can be various values depending on
- * driver implementation and configuration.
- *
- * This is not multi-thread safe and should only be called on a single lcore.
- *
- * @param d
- * The distributor instance to be used
- * @param mbufs
- * The mbufs to be distributed
- * @param num_mbufs
- * The number of mbufs in the mbufs array
- * @return
- * The number of mbufs processed.
- */
-int
-rte_distributor_process_v20(struct rte_distributor_v20 *d,
- struct rte_mbuf **mbufs, unsigned int num_mbufs);
-
-/**
- * Get a set of mbufs that have been returned to the distributor by workers
- *
- * This should only be called on the same lcore as rte_distributor_process()
- *
- * @param d
- * The distributor instance to be used
- * @param mbufs
- * The mbufs pointer array to be filled in
- * @param max_mbufs
- * The size of the mbufs array
- * @return
- * The number of mbufs returned in the mbufs array.
- */
-int
-rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d,
- struct rte_mbuf **mbufs, unsigned int max_mbufs);
-
-/**
- * Flush the distributor component, so that there are no in-flight or
- * backlogged packets awaiting processing
- *
- * This should only be called on the same lcore as rte_distributor_process()
- *
- * @param d
- * The distributor instance to be used
- * @return
- * The number of queued/in-flight packets that were completed by this call.
- */
-int
-rte_distributor_flush_v20(struct rte_distributor_v20 *d);
-
-/**
- * Clears the array of returned packets used as the source for the
- * rte_distributor_returned_pkts() API call.
- *
- * This should only be called on the same lcore as rte_distributor_process()
- *
- * @param d
- * The distributor instance to be used
- */
-void
-rte_distributor_clear_returns_v20(struct rte_distributor_v20 *d);
-
-/* *** APIS to be called on the worker lcores *** */
-/*
- * The following APIs are the public APIs which are designed for use on
- * multiple lcores which act as workers for a distributor. Each lcore should use
- * a unique worker id when requesting packets.
- *
- * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
- * for the same distributor instance, otherwise deadlock will result.
- */
-
-/**
- * API called by a worker to get a new packet to process. Any previous packet
- * given to the worker is assumed to have completed processing, and may be
- * optionally returned to the distributor via the oldpkt parameter.
- *
- * @param d
- * The distributor instance to be used
- * @param worker_id
- * The worker instance number to use - must be less that num_workers passed
- * at distributor creation time.
- * @param oldpkt
- * The previous packet, if any, being processed by the worker
- *
- * @return
- * A new packet to be processed by the worker thread.
- */
-struct rte_mbuf *
-rte_distributor_get_pkt_v20(struct rte_distributor_v20 *d,
- unsigned int worker_id, struct rte_mbuf *oldpkt);
-
-/**
- * API called by a worker to return a completed packet without requesting a
- * new packet, for example, because a worker thread is shutting down
- *
- * @param d
- * The distributor instance to be used
- * @param worker_id
- * The worker instance number to use - must be less that num_workers passed
- * at distributor creation time.
- * @param mbuf
- * The previous packet being processed by the worker
- */
-int
-rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d,
- unsigned int worker_id, struct rte_mbuf *mbuf);
-
-/**
- * API called by a worker to request a new packet to process.
- * Any previous packet given to the worker is assumed to have completed
- * processing, and may be optionally returned to the distributor via
- * the oldpkt parameter.
- * Unlike rte_distributor_get_pkt(), this function does not wait for a new
- * packet to be provided by the distributor.
- *
- * NOTE: after calling this function, rte_distributor_poll_pkt() should
- * be used to poll for the packet requested. The rte_distributor_get_pkt()
- * API should *not* be used to try and retrieve the new packet.
- *
- * @param d
- * The distributor instance to be used
- * @param worker_id
- * The worker instance number to use - must be less that num_workers passed
- * at distributor creation time.
- * @param oldpkt
- * The previous packet, if any, being processed by the worker
- */
-void
-rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d,
- unsigned int worker_id, struct rte_mbuf *oldpkt);
-
-/**
- * API called by a worker to check for a new packet that was previously
- * requested by a call to rte_distributor_request_pkt(). It does not wait
- * for the new packet to be available, but returns NULL if the request has
- * not yet been fulfilled by the distributor.
- *
- * @param d
- * The distributor instance to be used
- * @param worker_id
- * The worker instance number to use - must be less that num_workers passed
- * at distributor creation time.
- *
- * @return
- * A new packet to be processed by the worker thread, or NULL if no
- * packet is yet available.
- */
-struct rte_mbuf *
-rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d,
- unsigned int worker_id);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-DPDK_2.0 {
- global:
-
- rte_distributor_clear_returns;
- rte_distributor_create;
- rte_distributor_flush;
- rte_distributor_get_pkt;
- rte_distributor_poll_pkt;
- rte_distributor_process;
- rte_distributor_request_pkt;
- rte_distributor_return_pkt;
- rte_distributor_returned_pkts;
-
- local: *;
-};
-
DPDK_17.05 {
global:
rte_distributor_request_pkt;
rte_distributor_return_pkt;
rte_distributor_returned_pkts;
-} DPDK_2.0;
+};