-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Cavium, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#include <rte_common.h>
#include <rte_memcpy.h>
#include <rte_eventdev.h>
#include <rte_dev.h>
+#include <rte_bus_vdev.h>
#include "test.h"
/* Negative cases */
ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
- qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ALL_TYPES &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK);
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
qconf.nb_atomic_flows = info.max_event_queue_flows;
- qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ORDERED_ONLY &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK);
+ qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
/* Assume PMD doesn't support atomic flows, return early */
return -ENOTSUP;
- qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY;
+ qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
/* Assume PMD doesn't support reordering */
return -ENOTSUP;
- qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
+ qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
- qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
}
static int
-test_eventdev_dequeue_depth(void)
+test_eventdev_port_attr_dequeue_depth(void)
{
int ret;
struct rte_event_dev_info info;
uint32_t value;
TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
- 0, "Call to port dequeue depth failed");
+ 0, "Call to get port dequeue depth failed");
TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
"Wrong port dequeue depth");
}
static int
-test_eventdev_enqueue_depth(void)
+test_eventdev_port_attr_enqueue_depth(void)
{
int ret;
struct rte_event_dev_info info;
uint32_t value;
TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
- 0, "Call to port enqueue depth failed");
+ 0, "Call to get port enqueue depth failed");
TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
"Wrong port enqueue depth");
return TEST_SUCCESS;
}
+static int
+test_eventdev_port_attr_new_event_threshold(void)
+{
+ int ret;
+ struct rte_event_dev_info info;
+ struct rte_event_port_conf pconf;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
+ ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
+
+ uint32_t value;
+ TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
+ RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
+ 0, "Call to get port new event threshold failed");
+ TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
+ "Wrong port new event threshold");
+
+ return TEST_SUCCESS;
+}
+
static int
test_eventdev_port_count(void)
{
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_port_setup),
TEST_CASE_ST(eventdev_configure_setup, NULL,
- test_eventdev_dequeue_depth),
+ test_eventdev_port_attr_dequeue_depth),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_port_attr_enqueue_depth),
TEST_CASE_ST(eventdev_configure_setup, NULL,
- test_eventdev_enqueue_depth),
+ test_eventdev_port_attr_new_event_threshold),
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_port_count),
TEST_CASE_ST(eventdev_configure_setup, NULL,