X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_distributor%2Frte_distributor.c;h=7df97df92d62970078401ca28dee968f84ac2cd8;hb=9362945d7e79ca292215a8875055998928d36e74;hp=c4e31b8c4245185eb89120e02eedf2a4dba2cf5b;hpb=673e2fe5863d9c00bbd2148c759240517a176356;p=dpdk.git diff --git a/lib/librte_distributor/rte_distributor.c b/lib/librte_distributor/rte_distributor.c index c4e31b8c42..7df97df92d 100644 --- a/lib/librte_distributor/rte_distributor.c +++ b/lib/librte_distributor/rte_distributor.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #include @@ -36,18 +8,18 @@ #include #include #include -#include +#include #include #include #include #include -#include #include +#include -#include "rte_distributor_private.h" #include "rte_distributor.h" #include "rte_distributor_v20.h" #include "rte_distributor_v1705.h" +#include "distributor_private.h" TAILQ_HEAD(rte_dist_burst_list, rte_distributor); @@ -77,8 +49,11 @@ rte_distributor_request_pkt_v1705(struct rte_distributor *d, } retptr64 = &(buf->retptr64[0]); - /* Spin while handshake bits are set (scheduler clears it) */ - while (unlikely(*retptr64 & RTE_DISTRIB_GET_BUF)) { + /* Spin while handshake bits are set (scheduler clears it). + * Sync with worker on GET_BUF flag. + */ + while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE) + & RTE_DISTRIB_GET_BUF)) { rte_pause(); uint64_t t = rte_rdtsc()+100; @@ -103,8 +78,10 @@ rte_distributor_request_pkt_v1705(struct rte_distributor *d, /* * Finally, set the GET_BUF to signal to distributor that cache * line is ready for processing + * Sync with distributor to release retptrs */ - *retptr64 |= RTE_DISTRIB_GET_BUF; + __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF, + __ATOMIC_RELEASE); } BIND_DEFAULT_SYMBOL(rte_distributor_request_pkt, _v1705, 17.05); MAP_STATIC_SYMBOL(void rte_distributor_request_pkt(struct rte_distributor *d, @@ -126,8 +103,11 @@ rte_distributor_poll_pkt_v1705(struct rte_distributor *d, return (pkts[0]) ? 1 : 0; } - /* If bit is set, return */ - if (buf->bufptr64[0] & RTE_DISTRIB_GET_BUF) + /* If bit is set, return + * Sync with distributor to acquire bufptrs + */ + if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE) + & RTE_DISTRIB_GET_BUF) return -1; /* since bufptr64 is signed, this should be an arithmetic shift */ @@ -142,8 +122,10 @@ rte_distributor_poll_pkt_v1705(struct rte_distributor *d, * so now we've got the contents of the cacheline into an array of * mbuf pointers, so toggle the bit so scheduler can start working * on the next cacheline while we're working. + * Sync with distributor on GET_BUF flag. Release bufptrs. */ - buf->bufptr64[0] |= RTE_DISTRIB_GET_BUF; + __atomic_store_n(&(buf->bufptr64[0]), + buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE); return count; } @@ -202,6 +184,8 @@ rte_distributor_return_pkt_v1705(struct rte_distributor *d, return -EINVAL; } + /* Sync with distributor to acquire retptrs */ + __atomic_thread_fence(__ATOMIC_ACQUIRE); for (i = 0; i < RTE_DIST_BURST_SIZE; i++) /* Switch off the return bit first */ buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF; @@ -210,8 +194,11 @@ rte_distributor_return_pkt_v1705(struct rte_distributor *d, buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) << RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF; - /* set the GET_BUF but even if we got no returns */ - buf->retptr64[0] |= RTE_DISTRIB_GET_BUF; + /* set the GET_BUF but even if we got no returns. + * Sync with distributor on GET_BUF flag. Release retptrs. + */ + __atomic_store_n(&(buf->retptr64[0]), + buf->retptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE); return 0; } @@ -301,7 +288,9 @@ handle_returns(struct rte_distributor *d, unsigned int wkr) unsigned int count = 0; unsigned int i; - if (buf->retptr64[0] & RTE_DISTRIB_GET_BUF) { + /* Sync on GET_BUF flag. Acquire retptrs. */ + if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE) + & RTE_DISTRIB_GET_BUF) { for (i = 0; i < RTE_DIST_BURST_SIZE; i++) { if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) { oldbuf = ((uintptr_t)(buf->retptr64[i] >> @@ -314,8 +303,10 @@ handle_returns(struct rte_distributor *d, unsigned int wkr) } d->returns.start = ret_start; d->returns.count = ret_count; - /* Clear for the worker to populate with more returns */ - buf->retptr64[0] = 0; + /* Clear for the worker to populate with more returns. + * Sync with distributor on GET_BUF flag. Release retptrs. + */ + __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE); } return count; } @@ -335,7 +326,9 @@ release(struct rte_distributor *d, unsigned int wkr) struct rte_distributor_buffer *buf = &(d->bufs[wkr]); unsigned int i; - while (!(d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF)) + /* Sync with worker on GET_BUF flag */ + while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE) + & RTE_DISTRIB_GET_BUF)) rte_pause(); handle_returns(d, wkr); @@ -355,8 +348,11 @@ release(struct rte_distributor *d, unsigned int wkr) d->backlog[wkr].count = 0; - /* Clear the GET bit */ - buf->bufptr64[0] &= ~RTE_DISTRIB_GET_BUF; + /* Clear the GET bit. + * Sync with worker on GET_BUF flag. Release bufptrs. + */ + __atomic_store_n(&(buf->bufptr64[0]), + buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE); return buf->count; } @@ -383,7 +379,9 @@ rte_distributor_process_v1705(struct rte_distributor *d, if (unlikely(num_mbufs == 0)) { /* Flush out all non-full cache-lines to workers. */ for (wid = 0 ; wid < d->num_workers; wid++) { - if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF)) { + /* Sync with worker on GET_BUF flag. */ + if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]), + __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) { release(d, wid); handle_returns(d, wid); } @@ -395,7 +393,9 @@ rte_distributor_process_v1705(struct rte_distributor *d, uint16_t matches[RTE_DIST_BURST_SIZE]; unsigned int pkts; - if (d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF) + /* Sync with worker on GET_BUF flag. */ + if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), + __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) d->bufs[wkr].count = 0; if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE) @@ -433,7 +433,7 @@ rte_distributor_process_v1705(struct rte_distributor *d, next_value = (((int64_t)(uintptr_t)next_mb) << RTE_DISTRIB_FLAG_BITS); /* - * User is advocated to set tag vaue for each + * User is advocated to set tag value for each * mbuf before calling rte_distributor_process. * User defined tags are used to identify flows, * or sessions. @@ -443,7 +443,7 @@ rte_distributor_process_v1705(struct rte_distributor *d, /* * Uncommenting the next line will cause the find_match - * function to be optimised out, making this function + * function to be optimized out, making this function * do parallel (non-atomic) distribution */ /* matches[j] = 0; */ @@ -493,7 +493,9 @@ rte_distributor_process_v1705(struct rte_distributor *d, /* Flush out all non-full cache-lines to workers. */ for (wid = 0 ; wid < d->num_workers; wid++) - if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF)) + /* Sync with worker on GET_BUF flag. */ + if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]), + __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) release(d, wid); return num_mbufs; @@ -537,7 +539,7 @@ MAP_STATIC_SYMBOL(int rte_distributor_returned_pkts(struct rte_distributor *d, /* * Return the number of packets in-flight in a distributor, i.e. packets - * being workered on or queued up in a backlog. + * being worked on or queued up in a backlog. */ static inline unsigned int total_outstanding(const struct rte_distributor *d) @@ -570,6 +572,9 @@ rte_distributor_flush_v1705(struct rte_distributor *d) while (total_outstanding(d) > 0) rte_distributor_process(d, NULL, 0); + /* wait 10ms to allow all worker drain the pkts */ + rte_delay_us(10000); + /* * Send empty burst to all workers to allow them to exit * gracefully, should they need to. @@ -599,7 +604,9 @@ rte_distributor_clear_returns_v1705(struct rte_distributor *d) /* throw away returns, so workers can exit */ for (wkr = 0; wkr < d->num_workers; wkr++) - d->bufs[wkr].retptr64[0] = 0; + /* Sync with worker. Release retptrs. */ + __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0, + __ATOMIC_RELEASE); } BIND_DEFAULT_SYMBOL(rte_distributor_clear_returns, _v1705, 17.05); MAP_STATIC_SYMBOL(void rte_distributor_clear_returns(struct rte_distributor *d), @@ -624,6 +631,12 @@ rte_distributor_create_v1705(const char *name, RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0); RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0); + if (name == NULL || num_workers >= + (unsigned int)RTE_MIN(RTE_DISTRIB_MAX_WORKERS, RTE_MAX_LCORE)) { + rte_errno = EINVAL; + return NULL; + } + if (alg_type == RTE_DIST_ALG_SINGLE) { d = malloc(sizeof(struct rte_distributor)); if (d == NULL) { @@ -641,11 +654,6 @@ rte_distributor_create_v1705(const char *name, return d; } - if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) { - rte_errno = EINVAL; - return NULL; - } - snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name); mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS); if (mz == NULL) { @@ -654,7 +662,7 @@ rte_distributor_create_v1705(const char *name, } d = mz->addr; - snprintf(d->name, sizeof(d->name), "%s", name); + strlcpy(d->name, name, sizeof(d->name)); d->num_workers = num_workers; d->alg_type = alg_type; @@ -664,7 +672,7 @@ rte_distributor_create_v1705(const char *name, #endif /* - * Set up the backog tags so they're pointing at the second cache + * Set up the backlog tags so they're pointing at the second cache * line for performance during flow matching */ for (i = 0 ; i < num_workers ; i++) @@ -674,9 +682,9 @@ rte_distributor_create_v1705(const char *name, rte_dist_burst_list); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); TAILQ_INSERT_TAIL(dist_burst_list, d, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); return d; }