/*-
* BSD LICENSE
*
- * Copyright(c) 2017 Cavium networks. All rights reserved.
+ * Copyright(c) 2017 Cavium, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
seqn_list_index = 0;
}
+static inline int
+seqn_list_update(int val)
+{
+ if (seqn_list_index >= NUM_PACKETS)
+ return TEST_FAILED;
+
+ seqn_list[seqn_list_index++] = val;
+ rte_smp_wmb();
+ return TEST_SUCCESS;
+}
+
+static inline int
+seqn_list_check(int limit)
+{
+ int i;
+
+ for (i = 0; i < limit; i++) {
+ if (seqn_list[i] != i) {
+ printf("Seqn mismatch %d %d\n", seqn_list[i], i);
+ return TEST_FAILED;
+ }
+ }
+ return TEST_SUCCESS;
+}
struct test_core_param {
rte_atomic32_t *total_events;
if (evdev < 0) {
printf("%d: Eventdev %s not found - creating.\n",
__LINE__, eventdev_name);
- if (rte_eal_vdev_init(eventdev_name, NULL) < 0) {
+ if (rte_vdev_init(eventdev_name, NULL) < 0) {
printf("Error creating eventdev %s\n", eventdev_name);
return TEST_FAILED;
}
ret = rte_event_dev_configure(evdev, &dev_conf);
TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+ if (queue_count > 8) {
+ printf("test expects the unique priority per queue\n");
+ return -ENOTSUP;
+ }
+
/* Configure event queues(0 to n) with
* RTE_EVENT_DEV_PRIORITY_HIGHEST to
* RTE_EVENT_DEV_PRIORITY_LOWEST
*/
uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
- rte_event_queue_count(evdev);
- for (i = 0; i < rte_event_queue_count(evdev); i++) {
+ queue_count;
+ for (i = 0; i < (int)queue_count; i++) {
struct rte_event_queue_conf queue_conf;
ret = rte_event_queue_default_conf_get(evdev, i,
} else {
/* Configure event queues with default priority */
- for (i = 0; i < rte_event_queue_count(evdev); i++) {
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(evdev, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
}
}
/* Configure event ports */
- for (i = 0; i < rte_event_port_count(evdev); i++) {
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_setup(evdev, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
}
+static inline int
+eventdev_setup_dequeue_timeout(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
+}
+
static inline void
eventdev_teardown(void)
{
unsigned int i;
int ret;
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
ret = rte_event_dev_info_get(evdev, &info);
TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
for (i = 0; i < total_events; i++) {
rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
rte_rand() % 256 /* sub_event_type */,
rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
- rte_rand() % rte_event_queue_count(evdev) /* queue */,
+ rte_rand() % queue_count /* queue */,
0 /* port */,
1 /* events */);
if (ret)
}
/*
- * Inject 0..MAX_EVENTS events over 0..rte_event_queue_count() with modulus
+ * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
* operation
*
* For example, Inject 32 events over 0..7 queues
static int
validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
{
- uint32_t range = MAX_EVENTS / rte_event_queue_count(evdev);
- uint32_t expected_val = (index % range) * rte_event_queue_count(evdev);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint32_t range = MAX_EVENTS / queue_count;
+ uint32_t expected_val = (index % range) * queue_count;
expected_val += ev->queue_id;
RTE_SET_USED(port);
TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
ev->mbuf->seqn, index, expected_val, range,
- rte_event_queue_count(evdev), MAX_EVENTS);
+ queue_count, MAX_EVENTS);
return 0;
}
int i, max_evts_roundoff;
/* See validate_queue_priority() comments for priority validate logic */
- max_evts_roundoff = MAX_EVENTS / rte_event_queue_count(evdev);
- max_evts_roundoff *= rte_event_queue_count(evdev);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ max_evts_roundoff = MAX_EVENTS / queue_count;
+ max_evts_roundoff *= queue_count;
for (i = 0; i < max_evts_roundoff; i++) {
struct rte_event ev = {.event = 0, .u64 = 0};
TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
m->seqn = i;
- queue = i % rte_event_queue_count(evdev);
+ queue = i % queue_count;
update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
rte_event_enqueue_burst(evdev, 0, &ev, 1);
test_multi_queue_enq_multi_port_deq(void)
{
const unsigned int total_events = MAX_EVENTS;
- uint8_t nr_ports;
+ uint32_t nr_ports;
int ret;
ret = generate_random_events(total_events);
if (ret)
return TEST_FAILED;
- nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
if (!nr_ports) {
printf("%s: Not enough ports=%d or workers=%d\n", __func__,
- rte_event_port_count(evdev), rte_lcore_count() - 1);
+ nr_ports, rte_lcore_count() - 1);
return TEST_SUCCESS;
}
{
int i, nr_links, ret;
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+
/* Unlink all connections that created in eventdev_setup */
- for (i = 0; i < rte_event_port_count(evdev); i++) {
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_unlink(evdev, i, NULL, 0);
TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", i);
}
- nr_links = RTE_MIN(rte_event_port_count(evdev),
- rte_event_queue_count(evdev));
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
+ nr_links = RTE_MIN(port_count, queue_count);
const unsigned int total_events = MAX_EVENTS / nr_links;
/* Link queue x to port x and inject events to queue x through port x */
test_queue_to_port_multi_link(void)
{
int ret, port0_events = 0, port1_events = 0;
- uint8_t nr_queues, nr_ports, queue, port;
+ uint8_t queue, port;
+ uint32_t nr_queues = 0;
+ uint32_t nr_ports = 0;
+
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &nr_queues), "Queue count get failed");
- nr_queues = rte_event_queue_count(evdev);
- nr_ports = rte_event_port_count(evdev);
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &nr_queues), "Queue count get failed");
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
if (nr_ports < 2) {
printf("%s: Not enough ports to test ports=%d\n",
return TEST_SUCCESS;
}
+static int
+worker_flow_based_pipeline(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint8_t new_sched_type = param->sched_type;
+ rte_atomic32_t *total_events = param->total_events;
+ uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+ dequeue_tmo_ticks);
+ if (!valid_event)
+ continue;
+
+ /* Events from stage 0 */
+ if (ev.sub_event_type == 0) {
+ /* Move to atomic flow to maintain the ordering */
+ ev.flow_id = 0x2;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 1; /* stage 1 */
+ ev.sched_type = new_sched_type;
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
+ if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ printf("Failed to update seqn_list\n");
+ return TEST_FAILED;
+ }
+ } else {
+ printf("Invalid ev.sub_event_type = %d\n",
+ ev.sub_event_type);
+ return TEST_FAILED;
+ }
+ }
+ return 0;
+}
+
+static int
+test_multiport_flow_sched_type_test(uint8_t in_sched_type,
+ uint8_t out_sched_type)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t nr_ports;
+ int ret;
+
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ printf("%s: Not enough ports=%d or workers=%d\n", __func__,
+ nr_ports, rte_lcore_count() - 1);
+ return TEST_SUCCESS;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ in_sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ total_events /* events */);
+ if (ret)
+ return TEST_FAILED;
+
+ ret = launch_workers_and_wait(worker_flow_based_pipeline,
+ worker_flow_based_pipeline,
+ total_events, nr_ports, out_sched_type);
+ if (ret)
+ return TEST_FAILED;
+
+ if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+ out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ /* Check the events order maintained or not */
+ return seqn_list_check(total_events);
+ }
+ return TEST_SUCCESS;
+}
+
+
+/* Multi port ordered to atomic transaction */
+static int
+test_multi_port_flow_ordered_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_ordered_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_ordered_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_atomic_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_atomic_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_atomic_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_parallel_to_atomic(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_parallel_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_parallel_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_group_based_pipeline(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint8_t new_sched_type = param->sched_type;
+ rte_atomic32_t *total_events = param->total_events;
+ uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+ dequeue_tmo_ticks);
+ if (!valid_event)
+ continue;
+
+ /* Events from stage 0(group 0) */
+ if (ev.queue_id == 0) {
+ /* Move to atomic flow to maintain the ordering */
+ ev.flow_id = 0x2;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sched_type = new_sched_type;
+ ev.queue_id = 1; /* Stage 1*/
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
+ if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ printf("Failed to update seqn_list\n");
+ return TEST_FAILED;
+ }
+ } else {
+ printf("Invalid ev.queue_id = %d\n", ev.queue_id);
+ return TEST_FAILED;
+ }
+ }
+
+
+ return 0;
+}
+
+static int
+test_multiport_queue_sched_type_test(uint8_t in_sched_type,
+ uint8_t out_sched_type)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t nr_ports;
+ int ret;
+
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ if (queue_count < 2 || !nr_ports) {
+ printf("%s: Not enough queues=%d ports=%d or workers=%d\n",
+ __func__, queue_count, nr_ports,
+ rte_lcore_count() - 1);
+ return TEST_SUCCESS;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ in_sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ total_events /* events */);
+ if (ret)
+ return TEST_FAILED;
+
+ ret = launch_workers_and_wait(worker_group_based_pipeline,
+ worker_group_based_pipeline,
+ total_events, nr_ports, out_sched_type);
+ if (ret)
+ return TEST_FAILED;
+
+ if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+ out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ /* Check the events order maintained or not */
+ return seqn_list_check(total_events);
+ }
+ return TEST_SUCCESS;
+}
+
+static int
+test_multi_port_queue_ordered_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_ordered_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_ordered_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_atomic_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_atomic_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_atomic_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_parallel_to_atomic(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_parallel_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_parallel_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.sub_event_type == 255) { /* last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type++;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+static int
+launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
+{
+ uint32_t nr_ports;
+ int ret;
+
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ printf("%s: Not enough ports=%d or workers=%d\n", __func__,
+ nr_ports, rte_lcore_count() - 1);
+ return TEST_SUCCESS;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
+ 0 /* queue */,
+ 0 /* port */,
+ MAX_EVENTS /* events */);
+ if (ret)
+ return TEST_FAILED;
+
+ return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
+ 0xff /* invalid */);
+}
+
+/* Flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_flow_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_flow_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint8_t nr_queues = queue_count;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.queue_id == nr_queues - 1) { /* last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.queue_id++;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+/* Queue based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_queue_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_queue_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint8_t nr_queues = queue_count;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.queue_id == nr_queues - 1) { /* Last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.queue_id++;
+ ev.sub_event_type = rte_rand() % 256;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+/* Queue and flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_mixed_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_mixed_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_ordered_flow_producer(void *arg)
+{
+ struct test_core_param *param = arg;
+ uint8_t port = param->port;
+ struct rte_mbuf *m;
+ int counter = 0;
+
+ while (counter < NUM_PACKETS) {
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ if (m == NULL)
+ continue;
+
+ m->seqn = counter++;
+
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ ev.flow_id = 0x1; /* Generate a fat flow */
+ ev.sub_event_type = 0;
+ /* Inject the new event */
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+ ev.queue_id = 0;
+ ev.mbuf = m;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+
+ return 0;
+}
+
+static inline int
+test_producer_consumer_ingress_order_test(int (*fn)(void *))
+{
+ uint32_t nr_ports;
+
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (rte_lcore_count() < 3 || nr_ports < 2) {
+ printf("### Not enough cores for %s test.\n", __func__);
+ return TEST_SUCCESS;
+ }
+
+ launch_workers_and_wait(worker_ordered_flow_producer, fn,
+ NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
+ /* Check the events order maintained or not */
+ return seqn_list_check(NUM_PACKETS);
+}
+
+/* Flow based producer consumer ingress order test */
+static int
+test_flow_producer_consumer_ingress_order_test(void)
+{
+ return test_producer_consumer_ingress_order_test(
+ worker_flow_based_pipeline);
+}
+
+/* Queue based producer consumer ingress order test */
+static int
+test_queue_producer_consumer_ingress_order_test(void)
+{
+ return test_producer_consumer_ingress_order_test(
+ worker_group_based_pipeline);
+}
+
static struct unit_test_suite eventdev_octeontx_testsuite = {
.suite_name = "eventdev octeontx unit test suite",
.setup = testsuite_setup,
test_queue_to_port_single_link),
TEST_CASE_ST(eventdev_setup, eventdev_teardown,
test_queue_to_port_multi_link),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_max_stages_random_sched_type),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_max_stages_random_sched_type),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_mixed_max_stages_random_sched_type),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_flow_producer_consumer_ingress_order_test),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_queue_producer_consumer_ingress_order_test),
+ /* Tests with dequeue timeout */
+ TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
+ test_multi_port_flow_ordered_to_atomic),
+ TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
+ test_multi_port_queue_ordered_to_atomic),
TEST_CASES_END() /**< NULL terminate unit test array */
}
};