doc: fix description of versioning macros
[dpdk.git] / lib / librte_distributor / rte_distributor_v20.c
index 5be6efd..db6c492 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
  */
 
 #include <stdio.h>
 #include <rte_memory.h>
 #include <rte_memzone.h>
 #include <rte_errno.h>
-#include <rte_compat.h>
+#include <rte_function_versioning.h>
 #include <rte_string_fns.h>
 #include <rte_eal_memconfig.h>
 #include <rte_pause.h>
+#include <rte_tailq.h>
 
 #include "rte_distributor_v20.h"
-#include "rte_distributor_private.h"
+#include "distributor_private.h"
 
 TAILQ_HEAD(rte_distributor_list, rte_distributor_v20);
 
@@ -62,9 +34,12 @@ rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d,
        union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
        int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
                        | RTE_DISTRIB_GET_BUF;
-       while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK))
+       while (unlikely(__atomic_load_n(&buf->bufptr64, __ATOMIC_RELAXED)
+                       & RTE_DISTRIB_FLAGS_MASK))
                rte_pause();
-       buf->bufptr64 = req;
+
+       /* Sync with distributor on GET_BUF flag. */
+       __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
 }
 VERSION_SYMBOL(rte_distributor_request_pkt, _v20, 2.0);
 
@@ -73,7 +48,9 @@ rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d,
                unsigned worker_id)
 {
        union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
-       if (buf->bufptr64 & RTE_DISTRIB_GET_BUF)
+       /* Sync with distributor. Acquire bufptr64. */
+       if (__atomic_load_n(&buf->bufptr64, __ATOMIC_ACQUIRE)
+               & RTE_DISTRIB_GET_BUF)
                return NULL;
 
        /* since bufptr64 is signed, this should be an arithmetic shift */
@@ -101,7 +78,8 @@ rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d,
        union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
        uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
                        | RTE_DISTRIB_RETURN_BUF;
-       buf->bufptr64 = req;
+       /* Sync with distributor on RETURN_BUF flag. */
+       __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
        return 0;
 }
 VERSION_SYMBOL(rte_distributor_return_pkt, _v20, 2.0);
@@ -145,7 +123,8 @@ handle_worker_shutdown(struct rte_distributor_v20 *d, unsigned int wkr)
 {
        d->in_flight_tags[wkr] = 0;
        d->in_flight_bitmask &= ~(1UL << wkr);
-       d->bufs[wkr].bufptr64 = 0;
+       /* Sync with worker. Release bufptr64. */
+       __atomic_store_n(&(d->bufs[wkr].bufptr64), 0, __ATOMIC_RELEASE);
        if (unlikely(d->backlog[wkr].count != 0)) {
                /* On return of a packet, we need to move the
                 * queued packets for this core elsewhere.
@@ -189,17 +168,23 @@ process_returns(struct rte_distributor_v20 *d)
                        ret_count = d->returns.count;
 
        for (wkr = 0; wkr < d->num_workers; wkr++) {
-
-               const int64_t data = d->bufs[wkr].bufptr64;
                uintptr_t oldbuf = 0;
+               /* Sync with worker. Acquire bufptr64. */
+               const int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
+                                                       __ATOMIC_ACQUIRE);
 
                if (data & RTE_DISTRIB_GET_BUF) {
                        flushed++;
                        if (d->backlog[wkr].count)
-                               d->bufs[wkr].bufptr64 =
-                                               backlog_pop(&d->backlog[wkr]);
+                               /* Sync with worker. Release bufptr64. */
+                               __atomic_store_n(&(d->bufs[wkr].bufptr64),
+                                       backlog_pop(&d->backlog[wkr]),
+                                       __ATOMIC_RELEASE);
                        else {
-                               d->bufs[wkr].bufptr64 = RTE_DISTRIB_GET_BUF;
+                               /* Sync with worker on GET_BUF flag. */
+                               __atomic_store_n(&(d->bufs[wkr].bufptr64),
+                                       RTE_DISTRIB_GET_BUF,
+                                       __ATOMIC_RELEASE);
                                d->in_flight_tags[wkr] = 0;
                                d->in_flight_bitmask &= ~(1UL << wkr);
                        }
@@ -235,9 +220,10 @@ rte_distributor_process_v20(struct rte_distributor_v20 *d,
                return process_returns(d);
 
        while (next_idx < num_mbufs || next_mb != NULL) {
-
-               int64_t data = d->bufs[wkr].bufptr64;
                uintptr_t oldbuf = 0;
+               /* Sync with worker. Acquire bufptr64. */
+               int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
+                                               __ATOMIC_ACQUIRE);
 
                if (!next_mb) {
                        next_mb = mbufs[next_idx++];
@@ -283,11 +269,16 @@ rte_distributor_process_v20(struct rte_distributor_v20 *d,
                                (d->backlog[wkr].count || next_mb)) {
 
                        if (d->backlog[wkr].count)
-                               d->bufs[wkr].bufptr64 =
-                                               backlog_pop(&d->backlog[wkr]);
+                               /* Sync with worker. Release bufptr64. */
+                               __atomic_store_n(&(d->bufs[wkr].bufptr64),
+                                               backlog_pop(&d->backlog[wkr]),
+                                               __ATOMIC_RELEASE);
 
                        else {
-                               d->bufs[wkr].bufptr64 = next_value;
+                               /* Sync with worker. Release bufptr64.  */
+                               __atomic_store_n(&(d->bufs[wkr].bufptr64),
+                                               next_value,
+                                               __ATOMIC_RELEASE);
                                d->in_flight_tags[wkr] = new_tag;
                                d->in_flight_bitmask |= (1UL << wkr);
                                next_mb = NULL;
@@ -308,13 +299,19 @@ rte_distributor_process_v20(struct rte_distributor_v20 *d,
         * if they are ready */
        for (wkr = 0; wkr < d->num_workers; wkr++)
                if (d->backlog[wkr].count &&
-                               (d->bufs[wkr].bufptr64 & RTE_DISTRIB_GET_BUF)) {
+                               /* Sync with worker. Acquire bufptr64. */
+                               (__atomic_load_n(&(d->bufs[wkr].bufptr64),
+                               __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
 
                        int64_t oldbuf = d->bufs[wkr].bufptr64 >>
                                        RTE_DISTRIB_FLAG_BITS;
+
                        store_return(oldbuf, d, &ret_start, &ret_count);
 
-                       d->bufs[wkr].bufptr64 = backlog_pop(&d->backlog[wkr]);
+                       /* Sync with worker. Release bufptr64. */
+                       __atomic_store_n(&(d->bufs[wkr].bufptr64),
+                               backlog_pop(&d->backlog[wkr]),
+                               __ATOMIC_RELEASE);
                }
 
        d->returns.start = ret_start;
@@ -415,15 +412,15 @@ rte_distributor_create_v20(const char *name,
        }
 
        d = mz->addr;
-       snprintf(d->name, sizeof(d->name), "%s", name);
+       strlcpy(d->name, name, sizeof(d->name));
        d->num_workers = num_workers;
 
        distributor_list = RTE_TAILQ_CAST(rte_distributor_tailq.head,
                                          rte_distributor_list);
 
-       rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+       rte_mcfg_tailq_write_lock();
        TAILQ_INSERT_TAIL(distributor_list, d, next);
-       rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+       rte_mcfg_tailq_write_unlock();
 
        return d;
 }