X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_distributor%2Frte_distributor.c;h=208abfb1d6e5a56fadb2b572f4206c80a3f7a03f;hb=c6bc117c99ba750835bcb51f922f3901a7cfa314;hp=75b0d47329b002cb830af180ab207fa0a755acbd;hpb=775003ad2f96c9881a00a6534518a875723978c8;p=dpdk.git diff --git a/lib/librte_distributor/rte_distributor.c b/lib/librte_distributor/rte_distributor.c index 75b0d47329..208abfb1d6 100644 --- a/lib/librte_distributor/rte_distributor.c +++ b/lib/librte_distributor/rte_distributor.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #include @@ -36,16 +8,19 @@ #include #include #include +#include #include #include #include #include -#include +#include + #include "rte_distributor_private.h" -#include "rte_distributor_next.h" +#include "rte_distributor.h" #include "rte_distributor_v20.h" +#include "rte_distributor_v1705.h" -TAILQ_HEAD(rte_dist_burst_list, rte_distributor_v1705); +TAILQ_HEAD(rte_dist_burst_list, rte_distributor); static struct rte_tailq_elem rte_dist_burst_tailq = { .name = "RTE_DIST_BURST", @@ -57,17 +32,17 @@ EAL_REGISTER_TAILQ(rte_dist_burst_tailq) /**** Burst Packet APIs called by workers ****/ void -rte_distributor_request_pkt_v1705(struct rte_distributor_v1705 *d, +rte_distributor_request_pkt_v1705(struct rte_distributor *d, unsigned int worker_id, struct rte_mbuf **oldpkt, unsigned int count) { - struct rte_distributor_buffer_v1705 *buf = &(d->bufs[worker_id]); + struct rte_distributor_buffer *buf = &(d->bufs[worker_id]); unsigned int i; volatile int64_t *retptr64; if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) { - rte_distributor_request_pkt(d->d_v20, + rte_distributor_request_pkt_v20(d->d_v20, worker_id, oldpkt[0]); return; } @@ -102,18 +77,23 @@ rte_distributor_request_pkt_v1705(struct rte_distributor_v1705 *d, */ *retptr64 |= RTE_DISTRIB_GET_BUF; } +BIND_DEFAULT_SYMBOL(rte_distributor_request_pkt, _v1705, 17.05); +MAP_STATIC_SYMBOL(void rte_distributor_request_pkt(struct rte_distributor *d, + unsigned int worker_id, struct rte_mbuf **oldpkt, + unsigned int count), + rte_distributor_request_pkt_v1705); int -rte_distributor_poll_pkt_v1705(struct rte_distributor_v1705 *d, +rte_distributor_poll_pkt_v1705(struct rte_distributor *d, unsigned int worker_id, struct rte_mbuf **pkts) { - struct rte_distributor_buffer_v1705 *buf = &d->bufs[worker_id]; + struct rte_distributor_buffer *buf = &d->bufs[worker_id]; uint64_t ret; int count = 0; unsigned int i; if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) { - pkts[0] = rte_distributor_poll_pkt(d->d_v20, worker_id); + pkts[0] = rte_distributor_poll_pkt_v20(d->d_v20, worker_id); return (pkts[0]) ? 1 : 0; } @@ -138,9 +118,13 @@ rte_distributor_poll_pkt_v1705(struct rte_distributor_v1705 *d, return count; } +BIND_DEFAULT_SYMBOL(rte_distributor_poll_pkt, _v1705, 17.05); +MAP_STATIC_SYMBOL(int rte_distributor_poll_pkt(struct rte_distributor *d, + unsigned int worker_id, struct rte_mbuf **pkts), + rte_distributor_poll_pkt_v1705); int -rte_distributor_get_pkt_v1705(struct rte_distributor_v1705 *d, +rte_distributor_get_pkt_v1705(struct rte_distributor *d, unsigned int worker_id, struct rte_mbuf **pkts, struct rte_mbuf **oldpkt, unsigned int return_count) { @@ -148,37 +132,42 @@ rte_distributor_get_pkt_v1705(struct rte_distributor_v1705 *d, if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) { if (return_count <= 1) { - pkts[0] = rte_distributor_get_pkt(d->d_v20, + pkts[0] = rte_distributor_get_pkt_v20(d->d_v20, worker_id, oldpkt[0]); return (pkts[0]) ? 1 : 0; } else return -EINVAL; } - rte_distributor_request_pkt_v1705(d, worker_id, oldpkt, return_count); + rte_distributor_request_pkt(d, worker_id, oldpkt, return_count); - count = rte_distributor_poll_pkt_v1705(d, worker_id, pkts); + count = rte_distributor_poll_pkt(d, worker_id, pkts); while (count == -1) { uint64_t t = rte_rdtsc() + 100; while (rte_rdtsc() < t) rte_pause(); - count = rte_distributor_poll_pkt_v1705(d, worker_id, pkts); + count = rte_distributor_poll_pkt(d, worker_id, pkts); } return count; } +BIND_DEFAULT_SYMBOL(rte_distributor_get_pkt, _v1705, 17.05); +MAP_STATIC_SYMBOL(int rte_distributor_get_pkt(struct rte_distributor *d, + unsigned int worker_id, struct rte_mbuf **pkts, + struct rte_mbuf **oldpkt, unsigned int return_count), + rte_distributor_get_pkt_v1705); int -rte_distributor_return_pkt_v1705(struct rte_distributor_v1705 *d, +rte_distributor_return_pkt_v1705(struct rte_distributor *d, unsigned int worker_id, struct rte_mbuf **oldpkt, int num) { - struct rte_distributor_buffer_v1705 *buf = &d->bufs[worker_id]; + struct rte_distributor_buffer *buf = &d->bufs[worker_id]; unsigned int i; if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) { if (num == 1) - return rte_distributor_return_pkt(d->d_v20, + return rte_distributor_return_pkt_v20(d->d_v20, worker_id, oldpkt[0]); else return -EINVAL; @@ -197,12 +186,16 @@ rte_distributor_return_pkt_v1705(struct rte_distributor_v1705 *d, return 0; } +BIND_DEFAULT_SYMBOL(rte_distributor_return_pkt, _v1705, 17.05); +MAP_STATIC_SYMBOL(int rte_distributor_return_pkt(struct rte_distributor *d, + unsigned int worker_id, struct rte_mbuf **oldpkt, int num), + rte_distributor_return_pkt_v1705); /**** APIs called on distributor core ***/ /* stores a packet returned from a worker inside the returns array */ static inline void -store_return(uintptr_t oldbuf, struct rte_distributor_v1705 *d, +store_return(uintptr_t oldbuf, struct rte_distributor *d, unsigned int *ret_start, unsigned int *ret_count) { if (!oldbuf) @@ -221,7 +214,7 @@ store_return(uintptr_t oldbuf, struct rte_distributor_v1705 *d, * workers to give us our atomic flow pinning. */ void -find_match_scalar(struct rte_distributor_v1705 *d, +find_match_scalar(struct rte_distributor *d, uint16_t *data_ptr, uint16_t *output_ptr) { @@ -270,9 +263,9 @@ find_match_scalar(struct rte_distributor_v1705 *d, * the valid returned pointers (store_return). */ static unsigned int -handle_returns(struct rte_distributor_v1705 *d, unsigned int wkr) +handle_returns(struct rte_distributor *d, unsigned int wkr) { - struct rte_distributor_buffer_v1705 *buf = &(d->bufs[wkr]); + struct rte_distributor_buffer *buf = &(d->bufs[wkr]); uintptr_t oldbuf; unsigned int ret_start = d->returns.start, ret_count = d->returns.count; @@ -308,9 +301,9 @@ handle_returns(struct rte_distributor_v1705 *d, unsigned int wkr) * before sending out new packets. */ static unsigned int -release(struct rte_distributor_v1705 *d, unsigned int wkr) +release(struct rte_distributor *d, unsigned int wkr) { - struct rte_distributor_buffer_v1705 *buf = &(d->bufs[wkr]); + struct rte_distributor_buffer *buf = &(d->bufs[wkr]); unsigned int i; while (!(d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF)) @@ -342,7 +335,7 @@ release(struct rte_distributor_v1705 *d, unsigned int wkr) /* process a set of packets to distribute them to workers */ int -rte_distributor_process_v1705(struct rte_distributor_v1705 *d, +rte_distributor_process_v1705(struct rte_distributor *d, struct rte_mbuf **mbufs, unsigned int num_mbufs) { unsigned int next_idx = 0; @@ -355,13 +348,13 @@ rte_distributor_process_v1705(struct rte_distributor_v1705 *d, if (d->alg_type == RTE_DIST_ALG_SINGLE) { /* Call the old API */ - return rte_distributor_process(d->d_v20, mbufs, num_mbufs); + return rte_distributor_process_v20(d->d_v20, mbufs, num_mbufs); } if (unlikely(num_mbufs == 0)) { /* Flush out all non-full cache-lines to workers. */ for (wid = 0 ; wid < d->num_workers; wid++) { - if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF)) { + if (d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF) { release(d, wid); handle_returns(d, wid); } @@ -391,7 +384,13 @@ rte_distributor_process_v1705(struct rte_distributor_v1705 *d, for (; i < RTE_DIST_BURST_SIZE; i++) flows[i] = 0; - find_match_scalar(d, &flows[0], &matches[0]); + switch (d->dist_match_fn) { + case RTE_DIST_MATCH_VECTOR: + find_match_vec(d, &flows[0], &matches[0]); + break; + default: + find_match_scalar(d, &flows[0], &matches[0]); + } /* * Matches array now contain the intended worker ID (+1) of @@ -405,7 +404,7 @@ rte_distributor_process_v1705(struct rte_distributor_v1705 *d, next_value = (((int64_t)(uintptr_t)next_mb) << RTE_DISTRIB_FLAG_BITS); /* - * User is advocated to set tag vaue for each + * User is advocated to set tag value for each * mbuf before calling rte_distributor_process. * User defined tags are used to identify flows, * or sessions. @@ -415,7 +414,7 @@ rte_distributor_process_v1705(struct rte_distributor_v1705 *d, /* * Uncommenting the next line will cause the find_match - * function to be optimised out, making this function + * function to be optimized out, making this function * do parallel (non-atomic) distribution */ /* matches[j] = 0; */ @@ -470,10 +469,14 @@ rte_distributor_process_v1705(struct rte_distributor_v1705 *d, return num_mbufs; } +BIND_DEFAULT_SYMBOL(rte_distributor_process, _v1705, 17.05); +MAP_STATIC_SYMBOL(int rte_distributor_process(struct rte_distributor *d, + struct rte_mbuf **mbufs, unsigned int num_mbufs), + rte_distributor_process_v1705); /* return to the caller, packets returned from workers */ int -rte_distributor_returned_pkts_v1705(struct rte_distributor_v1705 *d, +rte_distributor_returned_pkts_v1705(struct rte_distributor *d, struct rte_mbuf **mbufs, unsigned int max_mbufs) { struct rte_distributor_returned_pkts *returns = &d->returns; @@ -483,7 +486,7 @@ rte_distributor_returned_pkts_v1705(struct rte_distributor_v1705 *d, if (d->alg_type == RTE_DIST_ALG_SINGLE) { /* Call the old API */ - return rte_distributor_returned_pkts(d->d_v20, + return rte_distributor_returned_pkts_v20(d->d_v20, mbufs, max_mbufs); } @@ -498,13 +501,17 @@ rte_distributor_returned_pkts_v1705(struct rte_distributor_v1705 *d, return retval; } +BIND_DEFAULT_SYMBOL(rte_distributor_returned_pkts, _v1705, 17.05); +MAP_STATIC_SYMBOL(int rte_distributor_returned_pkts(struct rte_distributor *d, + struct rte_mbuf **mbufs, unsigned int max_mbufs), + rte_distributor_returned_pkts_v1705); /* * Return the number of packets in-flight in a distributor, i.e. packets - * being workered on or queued up in a backlog. + * being worked on or queued up in a backlog. */ static inline unsigned int -total_outstanding(const struct rte_distributor_v1705 *d) +total_outstanding(const struct rte_distributor *d) { unsigned int wkr, total_outstanding = 0; @@ -519,55 +526,64 @@ total_outstanding(const struct rte_distributor_v1705 *d) * queued up. */ int -rte_distributor_flush_v1705(struct rte_distributor_v1705 *d) +rte_distributor_flush_v1705(struct rte_distributor *d) { - const unsigned int flushed = total_outstanding(d); + unsigned int flushed; unsigned int wkr; if (d->alg_type == RTE_DIST_ALG_SINGLE) { /* Call the old API */ - return rte_distributor_flush(d->d_v20); + return rte_distributor_flush_v20(d->d_v20); } + flushed = total_outstanding(d); + while (total_outstanding(d) > 0) - rte_distributor_process_v1705(d, NULL, 0); + rte_distributor_process(d, NULL, 0); /* * Send empty burst to all workers to allow them to exit * gracefully, should they need to. */ - rte_distributor_process_v1705(d, NULL, 0); + rte_distributor_process(d, NULL, 0); for (wkr = 0; wkr < d->num_workers; wkr++) handle_returns(d, wkr); return flushed; } +BIND_DEFAULT_SYMBOL(rte_distributor_flush, _v1705, 17.05); +MAP_STATIC_SYMBOL(int rte_distributor_flush(struct rte_distributor *d), + rte_distributor_flush_v1705); /* clears the internal returns array in the distributor */ void -rte_distributor_clear_returns_v1705(struct rte_distributor_v1705 *d) +rte_distributor_clear_returns_v1705(struct rte_distributor *d) { unsigned int wkr; if (d->alg_type == RTE_DIST_ALG_SINGLE) { /* Call the old API */ - rte_distributor_clear_returns(d->d_v20); + rte_distributor_clear_returns_v20(d->d_v20); + return; } /* throw away returns, so workers can exit */ for (wkr = 0; wkr < d->num_workers; wkr++) d->bufs[wkr].retptr64[0] = 0; } +BIND_DEFAULT_SYMBOL(rte_distributor_clear_returns, _v1705, 17.05); +MAP_STATIC_SYMBOL(void rte_distributor_clear_returns(struct rte_distributor *d), + rte_distributor_clear_returns_v1705); /* creates a distributor instance */ -struct rte_distributor_v1705 * +struct rte_distributor * rte_distributor_create_v1705(const char *name, unsigned int socket_id, unsigned int num_workers, unsigned int alg_type) { - struct rte_distributor_v1705 *d; + struct rte_distributor *d; struct rte_dist_burst_list *dist_burst_list; char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; @@ -580,10 +596,15 @@ rte_distributor_create_v1705(const char *name, RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0); if (alg_type == RTE_DIST_ALG_SINGLE) { - d = malloc(sizeof(struct rte_distributor_v1705)); - d->d_v20 = rte_distributor_create(name, + d = malloc(sizeof(struct rte_distributor)); + if (d == NULL) { + rte_errno = ENOMEM; + return NULL; + } + d->d_v20 = rte_distributor_create_v20(name, socket_id, num_workers); if (d->d_v20 == NULL) { + free(d); /* rte_errno will have been set */ return NULL; } @@ -604,13 +625,17 @@ rte_distributor_create_v1705(const char *name, } d = mz->addr; - snprintf(d->name, sizeof(d->name), "%s", name); + strlcpy(d->name, name, sizeof(d->name)); d->num_workers = num_workers; d->alg_type = alg_type; + d->dist_match_fn = RTE_DIST_MATCH_SCALAR; +#if defined(RTE_ARCH_X86) + d->dist_match_fn = RTE_DIST_MATCH_VECTOR; +#endif /* - * Set up the backog tags so they're pointing at the second cache + * Set up the backlog tags so they're pointing at the second cache * line for performance during flow matching */ for (i = 0 ; i < num_workers ; i++) @@ -626,3 +651,8 @@ rte_distributor_create_v1705(const char *name, return d; } +BIND_DEFAULT_SYMBOL(rte_distributor_create, _v1705, 17.05); +MAP_STATIC_SYMBOL(struct rte_distributor *rte_distributor_create( + const char *name, unsigned int socket_id, + unsigned int num_workers, unsigned int alg_type), + rte_distributor_create_v1705);