/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2020 Arm Limited
*/
#include <string.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_malloc.h>
#include <rte_ring.h>
static const int esize[] = {-1, 4, 8, 16, 20};
+/* Wrappers around the zero-copy APIs. The wrappers match
+ * the normal enqueue/dequeue API declarations.
+ */
+static unsigned int
+test_ring_enqueue_zc_bulk(struct rte_ring *r, void * const *obj_table,
+ unsigned int n, unsigned int *free_space)
+{
+ uint32_t ret;
+ struct rte_ring_zc_data zcd;
+
+ ret = rte_ring_enqueue_zc_bulk_start(r, n, &zcd, free_space);
+ if (ret != 0) {
+ /* Copy the data to the ring */
+ test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
+ rte_ring_enqueue_zc_finish(r, ret);
+ }
+
+ return ret;
+}
+
+static unsigned int
+test_ring_enqueue_zc_bulk_elem(struct rte_ring *r, const void *obj_table,
+ unsigned int esize, unsigned int n, unsigned int *free_space)
+{
+ unsigned int ret;
+ struct rte_ring_zc_data zcd;
+
+ ret = rte_ring_enqueue_zc_bulk_elem_start(r, esize, n,
+ &zcd, free_space);
+ if (ret != 0) {
+ /* Copy the data to the ring */
+ test_ring_copy_to(&zcd, obj_table, esize, ret);
+ rte_ring_enqueue_zc_finish(r, ret);
+ }
+
+ return ret;
+}
+
+static unsigned int
+test_ring_enqueue_zc_burst(struct rte_ring *r, void * const *obj_table,
+ unsigned int n, unsigned int *free_space)
+{
+ unsigned int ret;
+ struct rte_ring_zc_data zcd;
+
+ ret = rte_ring_enqueue_zc_burst_start(r, n, &zcd, free_space);
+ if (ret != 0) {
+ /* Copy the data to the ring */
+ test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
+ rte_ring_enqueue_zc_finish(r, ret);
+ }
+
+ return ret;
+}
+
+static unsigned int
+test_ring_enqueue_zc_burst_elem(struct rte_ring *r, const void *obj_table,
+ unsigned int esize, unsigned int n, unsigned int *free_space)
+{
+ unsigned int ret;
+ struct rte_ring_zc_data zcd;
+
+ ret = rte_ring_enqueue_zc_burst_elem_start(r, esize, n,
+ &zcd, free_space);
+ if (ret != 0) {
+ /* Copy the data to the ring */
+ test_ring_copy_to(&zcd, obj_table, esize, ret);
+ rte_ring_enqueue_zc_finish(r, ret);
+ }
+
+ return ret;
+}
+
+static unsigned int
+test_ring_dequeue_zc_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
+{
+ unsigned int ret;
+ struct rte_ring_zc_data zcd;
+
+ ret = rte_ring_dequeue_zc_bulk_start(r, n, &zcd, available);
+ if (ret != 0) {
+ /* Copy the data from the ring */
+ test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
+ rte_ring_dequeue_zc_finish(r, ret);
+ }
+
+ return ret;
+}
+
+static unsigned int
+test_ring_dequeue_zc_bulk_elem(struct rte_ring *r, void *obj_table,
+ unsigned int esize, unsigned int n, unsigned int *available)
+{
+ unsigned int ret;
+ struct rte_ring_zc_data zcd;
+
+ ret = rte_ring_dequeue_zc_bulk_elem_start(r, esize, n,
+ &zcd, available);
+ if (ret != 0) {
+ /* Copy the data from the ring */
+ test_ring_copy_from(&zcd, obj_table, esize, ret);
+ rte_ring_dequeue_zc_finish(r, ret);
+ }
+
+ return ret;
+}
+
+static unsigned int
+test_ring_dequeue_zc_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
+{
+ unsigned int ret;
+ struct rte_ring_zc_data zcd;
+
+ ret = rte_ring_dequeue_zc_burst_start(r, n, &zcd, available);
+ if (ret != 0) {
+ /* Copy the data from the ring */
+ test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
+ rte_ring_dequeue_zc_finish(r, ret);
+ }
+
+ return ret;
+}
+
+static unsigned int
+test_ring_dequeue_zc_burst_elem(struct rte_ring *r, void *obj_table,
+ unsigned int esize, unsigned int n, unsigned int *available)
+{
+ unsigned int ret;
+ struct rte_ring_zc_data zcd;
+
+ ret = rte_ring_dequeue_zc_burst_elem_start(r, esize, n,
+ &zcd, available);
+ if (ret != 0) {
+ /* Copy the data from the ring */
+ test_ring_copy_from(&zcd, obj_table, esize, ret);
+ rte_ring_dequeue_zc_finish(r, ret);
+ }
+
+ return ret;
+}
+
static const struct {
const char *desc;
uint32_t api_type;
.felem = rte_ring_dequeue_burst_elem,
},
},
+ {
+ .desc = "SP/SC sync mode (ZC)",
+ .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
+ .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
+ .enq = {
+ .flegacy = test_ring_enqueue_zc_bulk,
+ .felem = test_ring_enqueue_zc_bulk_elem,
+ },
+ .deq = {
+ .flegacy = test_ring_dequeue_zc_bulk,
+ .felem = test_ring_dequeue_zc_bulk_elem,
+ },
+ },
+ {
+ .desc = "MP_HTS/MC_HTS sync mode (ZC)",
+ .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
+ .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
+ .enq = {
+ .flegacy = test_ring_enqueue_zc_bulk,
+ .felem = test_ring_enqueue_zc_bulk_elem,
+ },
+ .deq = {
+ .flegacy = test_ring_dequeue_zc_bulk,
+ .felem = test_ring_dequeue_zc_bulk_elem,
+ },
+ },
+ {
+ .desc = "SP/SC sync mode (ZC)",
+ .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
+ .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
+ .enq = {
+ .flegacy = test_ring_enqueue_zc_burst,
+ .felem = test_ring_enqueue_zc_burst_elem,
+ },
+ .deq = {
+ .flegacy = test_ring_dequeue_zc_burst,
+ .felem = test_ring_dequeue_zc_burst_elem,
+ },
+ },
+ {
+ .desc = "MP_HTS/MC_HTS sync mode (ZC)",
+ .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
+ .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
+ .enq = {
+ .flegacy = test_ring_enqueue_zc_burst,
+ .felem = test_ring_enqueue_zc_burst_elem,
+ },
+ .deq = {
+ .flegacy = test_ring_dequeue_zc_burst,
+ .felem = test_ring_dequeue_zc_burst_elem,
+ },
+ }
};
static unsigned int
NULL);
}
-static void**
-test_ring_inc_ptr(void **obj, int esize, unsigned int n)
-{
- /* Legacy queue APIs? */
- if (esize == -1)
- return ((void **)obj) + n;
- else
- return (void **)(((uint32_t *)obj) +
- (n * esize / sizeof(uint32_t)));
-}
-
static void
test_ring_mem_init(void *obj, unsigned int count, int esize)
{
ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
test_idx);
TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
- cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK);
printf("dequeue 1 obj\n");
ret = test_ring_deq_impl(r, cur_dst, esize[i], 1, test_idx);
ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
- cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
TEST_RING_VERIFY(ret != -ENOBUFS, exact_sz_r, goto test_fail);
- cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
/* check that dequeue returns the expected number of elements */
ret = test_ring_dequeue(exact_sz_r, cur_dst, esize[i], ring_sz,