X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=lib%2Flibrte_distributor%2Frte_distributor_v20.c;h=9566b53f2b39d4f0613fe7aea4819c4e9a1d3f74;hb=e14bc93e8f231455ac7ffb45189f8dedcc45276d;hp=b890947feba6f1a19bd77207dd56b69563087e5f;hpb=73f08e03c964fac87d89df63c208e548ea699f82;p=dpdk.git diff --git a/lib/librte_distributor/rte_distributor_v20.c b/lib/librte_distributor/rte_distributor_v20.c index b890947feb..9566b53f2b 100644 --- a/lib/librte_distributor/rte_distributor_v20.c +++ b/lib/librte_distributor/rte_distributor_v20.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -38,82 +9,15 @@ #include #include #include +#include #include #include -#include "rte_distributor_v20.h" - -#define NO_FLAGS 0 -#define RTE_DISTRIB_PREFIX "DT_" - -/* we will use the bottom four bits of pointer for flags, shifting out - * the top four bits to make room (since a 64-bit pointer actually only uses - * 48 bits). An arithmetic-right-shift will then appropriately restore the - * original pointer value with proper sign extension into the top bits. */ -#define RTE_DISTRIB_FLAG_BITS 4 -#define RTE_DISTRIB_FLAGS_MASK (0x0F) -#define RTE_DISTRIB_NO_BUF 0 /**< empty flags: no buffer requested */ -#define RTE_DISTRIB_GET_BUF (1) /**< worker requests a buffer, returns old */ -#define RTE_DISTRIB_RETURN_BUF (2) /**< worker returns a buffer, no request */ - -#define RTE_DISTRIB_BACKLOG_SIZE 8 -#define RTE_DISTRIB_BACKLOG_MASK (RTE_DISTRIB_BACKLOG_SIZE - 1) - -#define RTE_DISTRIB_MAX_RETURNS 128 -#define RTE_DISTRIB_RETURNS_MASK (RTE_DISTRIB_MAX_RETURNS - 1) - -/** - * Maximum number of workers allowed. - * Be aware of increasing the limit, becaus it is limited by how we track - * in-flight tags. See @in_flight_bitmask and @rte_distributor_process - */ -#define RTE_DISTRIB_MAX_WORKERS 64 - -/** - * Buffer structure used to pass the pointer data between cores. This is cache - * line aligned, but to improve performance and prevent adjacent cache-line - * prefetches of buffers for other workers, e.g. when worker 1's buffer is on - * the next cache line to worker 0, we pad this out to three cache lines. - * Only 64-bits of the memory is actually used though. - */ -union rte_distributor_buffer { - volatile int64_t bufptr64; - char pad[RTE_CACHE_LINE_SIZE*3]; -} __rte_cache_aligned; - -struct rte_distributor_backlog { - unsigned start; - unsigned count; - int64_t pkts[RTE_DISTRIB_BACKLOG_SIZE]; -}; - -struct rte_distributor_returned_pkts { - unsigned start; - unsigned count; - struct rte_mbuf *mbufs[RTE_DISTRIB_MAX_RETURNS]; -}; - -struct rte_distributor { - TAILQ_ENTRY(rte_distributor) next; /**< Next in list. */ +#include - char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */ - unsigned num_workers; /**< Number of workers polling */ - - uint32_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS]; - /**< Tracks the tag being processed per core */ - uint64_t in_flight_bitmask; - /**< on/off bits for in-flight tags. - * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64 then - * the bitmask has to expand. - */ - - struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS]; - - union rte_distributor_buffer bufs[RTE_DISTRIB_MAX_WORKERS]; - - struct rte_distributor_returned_pkts returns; -}; +#include "rte_distributor_v20.h" +#include "rte_distributor_private.h" -TAILQ_HEAD(rte_distributor_list, rte_distributor); +TAILQ_HEAD(rte_distributor_list, rte_distributor_v20); static struct rte_tailq_elem rte_distributor_tailq = { .name = "RTE_DISTRIBUTOR", @@ -123,22 +27,23 @@ EAL_REGISTER_TAILQ(rte_distributor_tailq) /**** APIs called by workers ****/ void -rte_distributor_request_pkt(struct rte_distributor *d, +rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d, unsigned worker_id, struct rte_mbuf *oldpkt) { - union rte_distributor_buffer *buf = &d->bufs[worker_id]; + union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id]; int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_GET_BUF; while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK)) rte_pause(); buf->bufptr64 = req; } +VERSION_SYMBOL(rte_distributor_request_pkt, _v20, 2.0); struct rte_mbuf * -rte_distributor_poll_pkt(struct rte_distributor *d, +rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d, unsigned worker_id) { - union rte_distributor_buffer *buf = &d->bufs[worker_id]; + union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id]; if (buf->bufptr64 & RTE_DISTRIB_GET_BUF) return NULL; @@ -146,28 +51,31 @@ rte_distributor_poll_pkt(struct rte_distributor *d, int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS; return (struct rte_mbuf *)((uintptr_t)ret); } +VERSION_SYMBOL(rte_distributor_poll_pkt, _v20, 2.0); struct rte_mbuf * -rte_distributor_get_pkt(struct rte_distributor *d, +rte_distributor_get_pkt_v20(struct rte_distributor_v20 *d, unsigned worker_id, struct rte_mbuf *oldpkt) { struct rte_mbuf *ret; - rte_distributor_request_pkt(d, worker_id, oldpkt); - while ((ret = rte_distributor_poll_pkt(d, worker_id)) == NULL) + rte_distributor_request_pkt_v20(d, worker_id, oldpkt); + while ((ret = rte_distributor_poll_pkt_v20(d, worker_id)) == NULL) rte_pause(); return ret; } +VERSION_SYMBOL(rte_distributor_get_pkt, _v20, 2.0); int -rte_distributor_return_pkt(struct rte_distributor *d, +rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d, unsigned worker_id, struct rte_mbuf *oldpkt) { - union rte_distributor_buffer *buf = &d->bufs[worker_id]; + union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id]; uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF; buf->bufptr64 = req; return 0; } +VERSION_SYMBOL(rte_distributor_return_pkt, _v20, 2.0); /**** APIs called on distributor core ***/ @@ -193,7 +101,7 @@ backlog_pop(struct rte_distributor_backlog *bl) /* stores a packet returned from a worker inside the returns array */ static inline void -store_return(uintptr_t oldbuf, struct rte_distributor *d, +store_return(uintptr_t oldbuf, struct rte_distributor_v20 *d, unsigned *ret_start, unsigned *ret_count) { /* store returns in a circular buffer - code is branch-free */ @@ -204,7 +112,7 @@ store_return(uintptr_t oldbuf, struct rte_distributor *d, } static inline void -handle_worker_shutdown(struct rte_distributor *d, unsigned wkr) +handle_worker_shutdown(struct rte_distributor_v20 *d, unsigned int wkr) { d->in_flight_tags[wkr] = 0; d->in_flight_bitmask &= ~(1UL << wkr); @@ -234,7 +142,7 @@ handle_worker_shutdown(struct rte_distributor *d, unsigned wkr) * Note that the tags were set before first level call * to rte_distributor_process. */ - rte_distributor_process(d, pkts, i); + rte_distributor_process_v20(d, pkts, i); bl->count = bl->start = 0; } } @@ -244,7 +152,7 @@ handle_worker_shutdown(struct rte_distributor *d, unsigned wkr) * to do a partial flush. */ static int -process_returns(struct rte_distributor *d) +process_returns(struct rte_distributor_v20 *d) { unsigned wkr; unsigned flushed = 0; @@ -283,7 +191,7 @@ process_returns(struct rte_distributor *d) /* process a set of packets to distribute them to workers */ int -rte_distributor_process(struct rte_distributor *d, +rte_distributor_process_v20(struct rte_distributor_v20 *d, struct rte_mbuf **mbufs, unsigned num_mbufs) { unsigned next_idx = 0; @@ -307,7 +215,7 @@ rte_distributor_process(struct rte_distributor *d, next_value = (((int64_t)(uintptr_t)next_mb) << RTE_DISTRIB_FLAG_BITS); /* - * User is advocated to set tag vaue for each + * User is advocated to set tag value for each * mbuf before calling rte_distributor_process. * User defined tags are used to identify flows, * or sessions. @@ -384,10 +292,11 @@ rte_distributor_process(struct rte_distributor *d, d->returns.count = ret_count; return num_mbufs; } +VERSION_SYMBOL(rte_distributor_process, _v20, 2.0); /* return to the caller, packets returned from workers */ int -rte_distributor_returned_pkts(struct rte_distributor *d, +rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d, struct rte_mbuf **mbufs, unsigned max_mbufs) { struct rte_distributor_returned_pkts *returns = &d->returns; @@ -404,11 +313,13 @@ rte_distributor_returned_pkts(struct rte_distributor *d, return retval; } +VERSION_SYMBOL(rte_distributor_returned_pkts, _v20, 2.0); /* return the number of packets in-flight in a distributor, i.e. packets - * being workered on or queued up in a backlog. */ + * being worked on or queued up in a backlog. + */ static inline unsigned -total_outstanding(const struct rte_distributor *d) +total_outstanding(const struct rte_distributor_v20 *d) { unsigned wkr, total_outstanding; @@ -423,33 +334,35 @@ total_outstanding(const struct rte_distributor *d) /* flush the distributor, so that there are no outstanding packets in flight or * queued up. */ int -rte_distributor_flush(struct rte_distributor *d) +rte_distributor_flush_v20(struct rte_distributor_v20 *d) { const unsigned flushed = total_outstanding(d); while (total_outstanding(d) > 0) - rte_distributor_process(d, NULL, 0); + rte_distributor_process_v20(d, NULL, 0); return flushed; } +VERSION_SYMBOL(rte_distributor_flush, _v20, 2.0); /* clears the internal returns array in the distributor */ void -rte_distributor_clear_returns(struct rte_distributor *d) +rte_distributor_clear_returns_v20(struct rte_distributor_v20 *d) { d->returns.start = d->returns.count = 0; #ifndef __OPTIMIZE__ memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs)); #endif } +VERSION_SYMBOL(rte_distributor_clear_returns, _v20, 2.0); /* creates a distributor instance */ -struct rte_distributor * -rte_distributor_create(const char *name, +struct rte_distributor_v20 * +rte_distributor_create_v20(const char *name, unsigned socket_id, unsigned num_workers) { - struct rte_distributor *d; + struct rte_distributor_v20 *d; struct rte_distributor_list *distributor_list; char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; @@ -485,3 +398,4 @@ rte_distributor_create(const char *name, return d; } +VERSION_SYMBOL(rte_distributor_create, _v20, 2.0);