rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
{
uint8_t old_nb_queues = dev->data->nb_queues;
- uint8_t *queues_prio;
+ struct rte_event_queue_conf *queues_cfg;
unsigned int i;
RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
dev->data->dev_id);
/* First time configuration */
- if (dev->data->queues_prio == NULL && nb_queues != 0) {
- /* Allocate memory to store queue priority */
- dev->data->queues_prio = rte_zmalloc_socket(
- "eventdev->data->queues_prio",
- sizeof(dev->data->queues_prio[0]) * nb_queues,
+ if (dev->data->queues_cfg == NULL && nb_queues != 0) {
+ /* Allocate memory to store queue configuration */
+ dev->data->queues_cfg = rte_zmalloc_socket(
+ "eventdev->data->queues_cfg",
+ sizeof(dev->data->queues_cfg[0]) * nb_queues,
RTE_CACHE_LINE_SIZE, dev->data->socket_id);
- if (dev->data->queues_prio == NULL) {
+ if (dev->data->queues_cfg == NULL) {
dev->data->nb_queues = 0;
- RTE_EDEV_LOG_ERR("failed to get mem for queue priority,"
+ RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
"nb_queues %u", nb_queues);
return -(ENOMEM);
}
/* Re-configure */
- } else if (dev->data->queues_prio != NULL && nb_queues != 0) {
+ } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
for (i = nb_queues; i < old_nb_queues; i++)
(*dev->dev_ops->queue_release)(dev, i);
- /* Re allocate memory to store queue priority */
- queues_prio = dev->data->queues_prio;
- queues_prio = rte_realloc(queues_prio,
- sizeof(queues_prio[0]) * nb_queues,
+ /* Re allocate memory to store queue configuration */
+ queues_cfg = dev->data->queues_cfg;
+ queues_cfg = rte_realloc(queues_cfg,
+ sizeof(queues_cfg[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
- if (queues_prio == NULL) {
- RTE_EDEV_LOG_ERR("failed to realloc queue priority,"
+ if (queues_cfg == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
" nb_queues %u", nb_queues);
return -(ENOMEM);
}
- dev->data->queues_prio = queues_prio;
+ dev->data->queues_cfg = queues_cfg;
if (nb_queues > old_nb_queues) {
uint8_t new_qs = nb_queues - old_nb_queues;
- memset(queues_prio + old_nb_queues, 0,
- sizeof(queues_prio[0]) * new_qs);
+ memset(queues_cfg + old_nb_queues, 0,
+ sizeof(queues_cfg[0]) * new_qs);
}
- } else if (dev->data->queues_prio != NULL && nb_queues == 0) {
+ } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
for (i = nb_queues; i < old_nb_queues; i++)
queue_conf = &def_conf;
}
- dev->data->queues_prio[queue_id] = queue_conf->priority;
+ dev->data->queues_cfg[queue_id] = *queue_conf;
return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
}
rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
uint32_t *attr_value)
{
+ struct rte_event_queue_conf *conf;
struct rte_eventdev *dev;
if (!attr_value)
return -EINVAL;
}
+ conf = &dev->data->queues_cfg[queue_id];
+
switch (attr_id) {
case RTE_EVENT_QUEUE_ATTR_PRIORITY:
*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
- *attr_value = dev->data->queues_prio[queue_id];
+ *attr_value = conf->priority;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
+ *attr_value = conf->nb_atomic_flows;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
+ *attr_value = conf->nb_atomic_order_sequences;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
+ *attr_value = conf->event_queue_cfg;
break;
default:
return -EINVAL;
}
static int
-test_eventdev_queue_priority(void)
+test_eventdev_queue_attr_priority(void)
{
int i, ret;
struct rte_event_dev_info info;
return TEST_SUCCESS;
}
+static int
+test_eventdev_queue_attr_nb_atomic_flows(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_queue_conf qconf;
+ uint32_t nb_atomic_flows;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
+
+ if (qconf.nb_atomic_flows == 0)
+ /* Assume PMD doesn't support atomic flows, return early */
+ return -ENOTSUP;
+
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY;
+
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < (int)queue_count; i++) {
+ TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
+ RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
+ &nb_atomic_flows),
+ "Queue nb_atomic_flows get failed");
+
+ TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
+ "Wrong atomic flows value for queue%d", i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_queue_attr_nb_atomic_order_sequences(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_queue_conf qconf;
+ uint32_t nb_atomic_order_sequences;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
+
+ if (qconf.nb_atomic_order_sequences == 0)
+ /* Assume PMD doesn't support reordering */
+ return -ENOTSUP;
+
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
+
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < (int)queue_count; i++) {
+ TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
+ RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
+ &nb_atomic_order_sequences),
+ "Queue nb_atomic_order_sequencess get failed");
+
+ TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
+ qconf.nb_atomic_order_sequences,
+ "Wrong atomic order sequences value for queue%d",
+ i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_queue_attr_event_queue_cfg(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_queue_conf qconf;
+ uint32_t event_queue_cfg;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
+
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < (int)queue_count; i++) {
+ TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
+ RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
+ &event_queue_cfg),
+ "Queue event_queue_cfg get failed");
+
+ TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
+ "Wrong event_queue_cfg value for queue%d",
+ i);
+ }
+
+ return TEST_SUCCESS;
+}
+
static int
test_eventdev_port_default_conf_get(void)
{
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_queue_count),
TEST_CASE_ST(eventdev_configure_setup, NULL,
- test_eventdev_queue_priority),
+ test_eventdev_queue_attr_priority),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_attr_nb_atomic_flows),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_attr_nb_atomic_order_sequences),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_attr_event_queue_cfg),
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_port_default_conf_get),
TEST_CASE_ST(eventdev_configure_setup, NULL,