/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * Copyright(c) 2016 Cavium, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_eventdev.h>
-#include <rte_cryptodev.h>
+#include <rte_dev.h>
#include "test.h"
if (!count) {
printf("Failed to find a valid event device,"
" testing with event_skeleton device\n");
- return rte_eal_vdev_init("event_skeleton", NULL);
+ return rte_vdev_init("event_skeleton", NULL);
}
return TEST_SUCCESS;
}
return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
}
-static void
-min_dequeue_limit(struct rte_event_dev_config *dev_conf,
- struct rte_event_dev_info *info)
-{
- dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns - 1;
-}
-
static void
max_dequeue_limit(struct rte_event_dev_config *dev_conf,
struct rte_event_dev_info *info)
TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
/* Check limits */
- TEST_ASSERT_EQUAL(-EINVAL,
- test_ethdev_config_run(&dev_conf, &info, min_dequeue_limit),
- "Config negative test failed");
TEST_ASSERT_EQUAL(-EINVAL,
test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
"Config negative test failed");
ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
&qconf);
TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
}
ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
- TEST_ASSERT_EQUAL(rte_event_queue_count(TEST_DEV_ID),
- info.max_event_queues, "Wrong queue count");
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
+ "Wrong queue count");
return TEST_SUCCESS;
}
static int
-test_eventdev_queue_priority(void)
+test_eventdev_queue_attr_priority(void)
{
int i, ret;
struct rte_event_dev_info info;
ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
&qconf);
TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
}
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
- priority = rte_event_queue_priority(TEST_DEV_ID, i);
+ for (i = 0; i < (int)queue_count; i++) {
+ uint32_t tmp;
+ TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
+ RTE_EVENT_QUEUE_ATTR_PRIORITY, &tmp),
+ "Queue priority get failed");
+ priority = tmp;
+
if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
TEST_ASSERT_EQUAL(priority,
i % RTE_EVENT_DEV_PRIORITY_LOWEST,
return TEST_SUCCESS;
}
+static int
+test_eventdev_queue_attr_nb_atomic_flows(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_queue_conf qconf;
+ uint32_t nb_atomic_flows;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
+
+ if (qconf.nb_atomic_flows == 0)
+ /* Assume PMD doesn't support atomic flows, return early */
+ return -ENOTSUP;
+
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY;
+
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < (int)queue_count; i++) {
+ TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
+ RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
+ &nb_atomic_flows),
+ "Queue nb_atomic_flows get failed");
+
+ TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
+ "Wrong atomic flows value for queue%d", i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_queue_attr_nb_atomic_order_sequences(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_queue_conf qconf;
+ uint32_t nb_atomic_order_sequences;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
+
+ if (qconf.nb_atomic_order_sequences == 0)
+ /* Assume PMD doesn't support reordering */
+ return -ENOTSUP;
+
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
+
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < (int)queue_count; i++) {
+ TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
+ RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
+ &nb_atomic_order_sequences),
+ "Queue nb_atomic_order_sequencess get failed");
+
+ TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
+ qconf.nb_atomic_order_sequences,
+ "Wrong atomic order sequences value for queue%d",
+ i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_queue_attr_event_queue_cfg(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_queue_conf qconf;
+ uint32_t event_queue_cfg;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
+
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < (int)queue_count; i++) {
+ TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
+ RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
+ &event_queue_cfg),
+ "Queue event_queue_cfg get failed");
+
+ TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
+ "Wrong event_queue_cfg value for queue%d",
+ i);
+ }
+
+ return TEST_SUCCESS;
+}
+
static int
test_eventdev_port_default_conf_get(void)
{
ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+
ret = rte_event_port_default_conf_get(TEST_DEV_ID,
- rte_event_port_count(TEST_DEV_ID) + 1, NULL);
+ port_count + 1, NULL);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
- for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
&pconf);
TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
- for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
}
}
static int
-test_eventdev_dequeue_depth(void)
+test_eventdev_port_attr_dequeue_depth(void)
{
int ret;
struct rte_event_dev_info info;
ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
- TEST_ASSERT_EQUAL(rte_event_port_dequeue_depth(TEST_DEV_ID, 0),
- pconf.dequeue_depth, "Wrong port dequeue depth");
+ uint32_t value;
+ TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
+ RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
+ 0, "Call to get port dequeue depth failed");
+ TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
+ "Wrong port dequeue depth");
return TEST_SUCCESS;
}
static int
-test_eventdev_enqueue_depth(void)
+test_eventdev_port_attr_enqueue_depth(void)
{
int ret;
struct rte_event_dev_info info;
ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
- TEST_ASSERT_EQUAL(rte_event_port_enqueue_depth(TEST_DEV_ID, 0),
- pconf.enqueue_depth, "Wrong port enqueue depth");
+ uint32_t value;
+ TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
+ RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
+ 0, "Call to get port enqueue depth failed");
+ TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
+ "Wrong port enqueue depth");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_port_attr_new_event_threshold(void)
+{
+ int ret;
+ struct rte_event_dev_info info;
+ struct rte_event_port_conf pconf;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
+ ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
+
+ uint32_t value;
+ TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
+ RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
+ 0, "Call to get port new event threshold failed");
+ TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
+ "Wrong port new event threshold");
return TEST_SUCCESS;
}
ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
- TEST_ASSERT_EQUAL(rte_event_port_count(TEST_DEV_ID),
- info.max_event_ports, "Wrong port count");
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+ TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
return TEST_SUCCESS;
}
uint64_t timeout_ticks;
ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
- TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
+ if (ret != -ENOTSUP)
+ TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
- return TEST_SUCCESS;
+ return ret;
}
ret = eventdev_configure_setup();
TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
}
- for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
}
ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
- TEST_ASSERT(ret == rte_event_queue_count(TEST_DEV_ID),
- "Failed to link port, device %d", TEST_DEV_ID);
+ TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
+ TEST_DEV_ID);
ret = rte_event_dev_start(TEST_DEV_ID);
TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
ret = eventdev_configure_setup();
TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
}
- for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
}
ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
- TEST_ASSERT(ret == rte_event_queue_count(TEST_DEV_ID),
- "Failed to link port, device %d", TEST_DEV_ID);
+ TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
+ TEST_DEV_ID);
ret = rte_event_dev_start(TEST_DEV_ID);
TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
TEST_DEV_ID);
- nb_queues = rte_event_queue_count(TEST_DEV_ID);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ nb_queues = queue_count;
for (i = 0; i < nb_queues; i++) {
queues[i] = i;
priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
TEST_DEV_ID);
- nb_queues = rte_event_queue_count(TEST_DEV_ID);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ nb_queues = queue_count;
for (i = 0; i < nb_queues; i++)
queues[i] = i;
static int
test_eventdev_link_get(void)
{
- int ret, nb_queues, i;
+ int ret, i;
uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
TEST_DEV_ID);
- nb_queues = rte_event_queue_count(TEST_DEV_ID);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ const int nb_queues = queue_count;
for (i = 0; i < nb_queues; i++)
queues[i] = i;
TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
/* link all queues and get the links */
- nb_queues = rte_event_queue_count(TEST_DEV_ID);
for (i = 0; i < nb_queues; i++) {
queues[i] = i;
priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
TEST_DEV_ID, ret);
+
/* 4links and 2 unlinks */
- nb_queues = rte_event_queue_count(TEST_DEV_ID);
if (nb_queues >= 4) {
for (i = 0; i < 4; i++) {
queues[i] = i;
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_queue_count),
TEST_CASE_ST(eventdev_configure_setup, NULL,
- test_eventdev_queue_priority),
+ test_eventdev_queue_attr_priority),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_attr_nb_atomic_flows),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_attr_nb_atomic_order_sequences),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_attr_event_queue_cfg),
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_port_default_conf_get),
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_port_setup),
TEST_CASE_ST(eventdev_configure_setup, NULL,
- test_eventdev_dequeue_depth),
+ test_eventdev_port_attr_dequeue_depth),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_port_attr_enqueue_depth),
TEST_CASE_ST(eventdev_configure_setup, NULL,
- test_eventdev_enqueue_depth),
+ test_eventdev_port_attr_new_event_threshold),
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_port_count),
TEST_CASE_ST(eventdev_configure_setup, NULL,