1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
13 #ifdef RTE_EXEC_ENV_WINDOWS
15 test_eventdev_common(void)
17 printf("eventdev_common not supported on Windows, skipping test\n");
23 #include <rte_eventdev.h>
25 #include <rte_bus_vdev.h>
32 RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
34 count = rte_event_dev_count();
36 printf("Failed to find a valid event device,"
37 " testing with event_skeleton device\n");
38 return rte_vdev_init("event_skeleton", NULL);
44 testsuite_teardown(void)
49 test_eventdev_count(void)
52 count = rte_event_dev_count();
53 TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
58 test_eventdev_get_dev_id(void)
61 ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
62 TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
67 test_eventdev_socket_id(void)
70 socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
71 TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
73 socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
74 TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
80 test_eventdev_info_get(void)
83 struct rte_event_dev_info info;
84 ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
85 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
86 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
87 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
88 TEST_ASSERT(info.max_event_ports > 0,
89 "Not enough event ports %d", info.max_event_ports);
90 TEST_ASSERT(info.max_event_queues > 0,
91 "Not enough event queues %d", info.max_event_queues);
96 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
97 struct rte_event_dev_info *info)
99 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
100 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
101 dev_conf->nb_event_ports = info->max_event_ports;
102 dev_conf->nb_event_queues = info->max_event_queues;
103 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
104 dev_conf->nb_event_port_dequeue_depth =
105 info->max_event_port_dequeue_depth;
106 dev_conf->nb_event_port_enqueue_depth =
107 info->max_event_port_enqueue_depth;
108 dev_conf->nb_event_port_enqueue_depth =
109 info->max_event_port_enqueue_depth;
110 dev_conf->nb_events_limit =
111 info->max_num_events;
115 test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
116 struct rte_event_dev_info *info,
117 void (*fn)(struct rte_event_dev_config *dev_conf,
118 struct rte_event_dev_info *info))
120 devconf_set_default_sane_values(dev_conf, info);
122 return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
126 max_dequeue_limit(struct rte_event_dev_config *dev_conf,
127 struct rte_event_dev_info *info)
129 dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
133 max_events_limit(struct rte_event_dev_config *dev_conf,
134 struct rte_event_dev_info *info)
136 dev_conf->nb_events_limit = info->max_num_events + 1;
140 max_event_ports(struct rte_event_dev_config *dev_conf,
141 struct rte_event_dev_info *info)
143 dev_conf->nb_event_ports = info->max_event_ports + 1;
147 max_event_queues(struct rte_event_dev_config *dev_conf,
148 struct rte_event_dev_info *info)
150 dev_conf->nb_event_queues = info->max_event_queues + 1;
154 max_event_queue_flows(struct rte_event_dev_config *dev_conf,
155 struct rte_event_dev_info *info)
157 dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
161 max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
162 struct rte_event_dev_info *info)
164 dev_conf->nb_event_port_dequeue_depth =
165 info->max_event_port_dequeue_depth + 1;
169 max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
170 struct rte_event_dev_info *info)
172 dev_conf->nb_event_port_enqueue_depth =
173 info->max_event_port_enqueue_depth + 1;
178 test_eventdev_configure(void)
181 struct rte_event_dev_config dev_conf;
182 struct rte_event_dev_info info;
183 ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
184 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
186 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
187 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
190 TEST_ASSERT_EQUAL(-EINVAL,
191 test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
192 "Config negative test failed");
193 TEST_ASSERT_EQUAL(-EINVAL,
194 test_ethdev_config_run(&dev_conf, &info, max_events_limit),
195 "Config negative test failed");
196 TEST_ASSERT_EQUAL(-EINVAL,
197 test_ethdev_config_run(&dev_conf, &info, max_event_ports),
198 "Config negative test failed");
199 TEST_ASSERT_EQUAL(-EINVAL,
200 test_ethdev_config_run(&dev_conf, &info, max_event_queues),
201 "Config negative test failed");
202 TEST_ASSERT_EQUAL(-EINVAL,
203 test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
204 "Config negative test failed");
206 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) {
207 TEST_ASSERT_EQUAL(-EINVAL,
208 test_ethdev_config_run(&dev_conf, &info,
209 max_event_port_dequeue_depth),
210 "Config negative test failed");
211 TEST_ASSERT_EQUAL(-EINVAL,
212 test_ethdev_config_run(&dev_conf, &info,
213 max_event_port_enqueue_depth),
214 "Config negative test failed");
218 devconf_set_default_sane_values(&dev_conf, &info);
219 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
220 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
223 devconf_set_default_sane_values(&dev_conf, &info);
224 dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
225 dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
226 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
227 TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
229 /* re-configure back to max_event_queues and max_event_ports */
230 devconf_set_default_sane_values(&dev_conf, &info);
231 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
232 TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
239 eventdev_configure_setup(void)
242 struct rte_event_dev_config dev_conf;
243 struct rte_event_dev_info info;
245 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
246 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
247 devconf_set_default_sane_values(&dev_conf, &info);
248 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
249 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
255 test_eventdev_queue_default_conf_get(void)
258 struct rte_event_queue_conf qconf;
260 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
261 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
263 uint32_t queue_count;
264 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
265 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
266 "Queue count get failed");
268 for (i = 0; i < (int)queue_count; i++) {
269 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
271 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
278 test_eventdev_queue_setup(void)
281 struct rte_event_dev_info info;
282 struct rte_event_queue_conf qconf;
284 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
285 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
288 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
289 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
290 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
291 qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
292 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
293 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
295 qconf.nb_atomic_flows = info.max_event_queue_flows;
296 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
297 qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
298 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
299 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
301 ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
303 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
306 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
307 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
308 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
309 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
311 uint32_t queue_count;
312 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
313 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
314 "Queue count get failed");
316 for (i = 0; i < (int)queue_count; i++) {
317 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
318 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
325 test_eventdev_queue_count(void)
328 struct rte_event_dev_info info;
330 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
331 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
333 uint32_t queue_count;
334 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
335 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
336 "Queue count get failed");
337 TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
338 "Wrong queue count");
344 test_eventdev_queue_attr_priority(void)
347 struct rte_event_dev_info info;
348 struct rte_event_queue_conf qconf;
351 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
352 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
354 uint32_t queue_count;
355 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
356 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
357 "Queue count get failed");
359 for (i = 0; i < (int)queue_count; i++) {
360 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
362 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
363 qconf.priority = i % RTE_EVENT_DEV_PRIORITY_LOWEST;
364 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
365 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
368 for (i = 0; i < (int)queue_count; i++) {
370 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
371 RTE_EVENT_QUEUE_ATTR_PRIORITY, &tmp),
372 "Queue priority get failed");
375 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
376 TEST_ASSERT_EQUAL(priority,
377 i % RTE_EVENT_DEV_PRIORITY_LOWEST,
378 "Wrong priority value for queue%d", i);
380 TEST_ASSERT_EQUAL(priority,
381 RTE_EVENT_DEV_PRIORITY_NORMAL,
382 "Wrong priority value for queue%d", i);
389 test_eventdev_queue_attr_priority_runtime(void)
391 uint32_t queue_count, queue_req, prio, deq_cnt;
392 struct rte_event_queue_conf qconf;
393 struct rte_event_port_conf pconf;
394 struct rte_event_dev_info info;
395 struct rte_event event = {
396 .op = RTE_EVENT_OP_NEW,
397 .event_type = RTE_EVENT_TYPE_CPU,
398 .sched_type = RTE_SCHED_TYPE_ATOMIC,
403 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
404 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
406 if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR))
409 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(
410 TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
412 "Queue count get failed");
414 /* Need at least 2 queues to test LOW and HIGH priority. */
415 TEST_ASSERT(queue_count > 1, "Not enough event queues, needed 2");
418 for (i = 0; i < (int)queue_count; i++) {
419 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf);
420 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
421 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
422 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
425 ret = rte_event_queue_attr_set(TEST_DEV_ID, 0,
426 RTE_EVENT_QUEUE_ATTR_PRIORITY,
427 RTE_EVENT_DEV_PRIORITY_LOWEST);
430 TEST_ASSERT_SUCCESS(ret, "Queue0 priority set failed");
432 ret = rte_event_queue_attr_set(TEST_DEV_ID, 1,
433 RTE_EVENT_QUEUE_ATTR_PRIORITY,
434 RTE_EVENT_DEV_PRIORITY_HIGHEST);
437 TEST_ASSERT_SUCCESS(ret, "Queue1 priority set failed");
439 /* Setup event port 0 */
440 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
441 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
442 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
443 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
444 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
445 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
448 ret = rte_event_dev_start(TEST_DEV_ID);
449 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
451 for (i = 0; i < (int)queue_req; i++) {
453 while (rte_event_enqueue_burst(TEST_DEV_ID, 0, &event, 1) != 1)
457 prio = RTE_EVENT_DEV_PRIORITY_HIGHEST;
459 while (deq_cnt < queue_req) {
462 if (rte_event_dequeue_burst(TEST_DEV_ID, 0, &event, 1, 0) == 0)
465 ret = rte_event_queue_attr_get(TEST_DEV_ID, event.queue_id,
466 RTE_EVENT_QUEUE_ATTR_PRIORITY,
471 TEST_ASSERT_SUCCESS(ret, "Queue priority get failed");
472 TEST_ASSERT(queue_prio >= prio,
473 "Received event from a lower priority queue first");
482 test_eventdev_queue_attr_weight_runtime(void)
484 struct rte_event_queue_conf qconf;
485 struct rte_event_dev_info info;
486 uint32_t queue_count;
489 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
490 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
492 if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR))
495 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(
496 TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
498 "Queue count get failed");
500 for (i = 0; i < (int)queue_count; i++) {
501 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf);
502 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
503 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
504 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
507 for (i = 0; i < (int)queue_count; i++) {
511 set_val = i % RTE_EVENT_QUEUE_WEIGHT_HIGHEST;
512 ret = rte_event_queue_attr_set(
513 TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_WEIGHT, set_val);
517 TEST_ASSERT_SUCCESS(ret, "Queue weight set failed");
519 ret = rte_event_queue_attr_get(
520 TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_WEIGHT, &get_val);
524 TEST_ASSERT_SUCCESS(ret, "Queue weight get failed");
525 TEST_ASSERT_EQUAL(get_val, set_val,
526 "Wrong weight value for queue%d", i);
533 test_eventdev_queue_attr_affinity_runtime(void)
535 struct rte_event_queue_conf qconf;
536 struct rte_event_dev_info info;
537 uint32_t queue_count;
540 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
541 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
543 if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR))
546 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(
547 TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
549 "Queue count get failed");
551 for (i = 0; i < (int)queue_count; i++) {
552 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf);
553 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
554 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
555 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
558 for (i = 0; i < (int)queue_count; i++) {
562 set_val = i % RTE_EVENT_QUEUE_AFFINITY_HIGHEST;
563 ret = rte_event_queue_attr_set(
564 TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_AFFINITY, set_val);
568 TEST_ASSERT_SUCCESS(ret, "Queue affinity set failed");
570 ret = rte_event_queue_attr_get(
571 TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_AFFINITY, &get_val);
575 TEST_ASSERT_SUCCESS(ret, "Queue affinity get failed");
576 TEST_ASSERT_EQUAL(get_val, set_val,
577 "Wrong affinity value for queue%d", i);
584 test_eventdev_queue_attr_nb_atomic_flows(void)
587 struct rte_event_dev_info info;
588 struct rte_event_queue_conf qconf;
589 uint32_t nb_atomic_flows;
591 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
592 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
594 uint32_t queue_count;
595 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
596 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
597 "Queue count get failed");
599 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
600 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
602 if (qconf.nb_atomic_flows == 0)
603 /* Assume PMD doesn't support atomic flows, return early */
606 qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
608 for (i = 0; i < (int)queue_count; i++) {
609 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
610 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
613 for (i = 0; i < (int)queue_count; i++) {
614 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
615 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
617 "Queue nb_atomic_flows get failed");
619 TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
620 "Wrong atomic flows value for queue%d", i);
627 test_eventdev_queue_attr_nb_atomic_order_sequences(void)
630 struct rte_event_dev_info info;
631 struct rte_event_queue_conf qconf;
632 uint32_t nb_atomic_order_sequences;
634 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
635 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
637 uint32_t queue_count;
638 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
639 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
640 "Queue count get failed");
642 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
643 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
645 if (qconf.nb_atomic_order_sequences == 0)
646 /* Assume PMD doesn't support reordering */
649 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
651 for (i = 0; i < (int)queue_count; i++) {
652 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
653 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
656 for (i = 0; i < (int)queue_count; i++) {
657 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
658 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
659 &nb_atomic_order_sequences),
660 "Queue nb_atomic_order_sequencess get failed");
662 TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
663 qconf.nb_atomic_order_sequences,
664 "Wrong atomic order sequences value for queue%d",
672 test_eventdev_queue_attr_event_queue_cfg(void)
675 struct rte_event_dev_info info;
676 struct rte_event_queue_conf qconf;
677 uint32_t event_queue_cfg;
679 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
680 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
682 uint32_t queue_count;
683 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
684 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
685 "Queue count get failed");
687 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
688 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
690 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
692 for (i = 0; i < (int)queue_count; i++) {
693 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
694 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
697 for (i = 0; i < (int)queue_count; i++) {
698 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
699 RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
701 "Queue event_queue_cfg get failed");
703 TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
704 "Wrong event_queue_cfg value for queue%d",
712 test_eventdev_port_default_conf_get(void)
715 struct rte_event_port_conf pconf;
717 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
718 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
721 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
722 RTE_EVENT_DEV_ATTR_PORT_COUNT,
723 &port_count), "Port count get failed");
725 ret = rte_event_port_default_conf_get(TEST_DEV_ID,
726 port_count + 1, NULL);
727 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
729 for (i = 0; i < (int)port_count; i++) {
730 ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
732 TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
739 test_eventdev_port_setup(void)
742 struct rte_event_dev_info info;
743 struct rte_event_port_conf pconf;
745 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
746 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
749 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
750 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
751 pconf.new_event_threshold = info.max_num_events + 1;
752 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
753 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
755 pconf.new_event_threshold = info.max_num_events;
756 pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
757 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
758 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
760 pconf.dequeue_depth = info.max_event_port_dequeue_depth;
761 pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
762 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
763 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
765 if (!(info.event_dev_cap &
766 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
767 pconf.enqueue_depth = info.max_event_port_enqueue_depth;
768 pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
769 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
770 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
771 pconf.event_port_cfg = 0;
774 ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
776 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
779 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
780 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
781 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
782 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
785 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
786 RTE_EVENT_DEV_ATTR_PORT_COUNT,
787 &port_count), "Port count get failed");
789 for (i = 0; i < (int)port_count; i++) {
790 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
791 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
798 test_eventdev_port_attr_dequeue_depth(void)
801 struct rte_event_dev_info info;
802 struct rte_event_port_conf pconf;
804 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
805 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
807 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
808 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
809 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
810 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
813 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
814 RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
815 0, "Call to get port dequeue depth failed");
816 TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
817 "Wrong port dequeue depth");
823 test_eventdev_port_attr_enqueue_depth(void)
826 struct rte_event_dev_info info;
827 struct rte_event_port_conf pconf;
829 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
830 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
832 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
833 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
834 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
835 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
838 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
839 RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
840 0, "Call to get port enqueue depth failed");
841 TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
842 "Wrong port enqueue depth");
848 test_eventdev_port_attr_new_event_threshold(void)
851 struct rte_event_dev_info info;
852 struct rte_event_port_conf pconf;
854 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
855 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
857 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
858 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
859 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
860 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
863 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
864 RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
865 0, "Call to get port new event threshold failed");
866 TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
867 "Wrong port new event threshold");
873 test_eventdev_port_count(void)
876 struct rte_event_dev_info info;
878 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
879 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
882 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
883 RTE_EVENT_DEV_ATTR_PORT_COUNT,
884 &port_count), "Port count get failed");
885 TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
891 test_eventdev_timeout_ticks(void)
894 uint64_t timeout_ticks;
896 ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
898 TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
905 test_eventdev_start_stop(void)
909 ret = eventdev_configure_setup();
910 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
912 uint32_t queue_count;
913 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
914 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
915 "Queue count get failed");
916 for (i = 0; i < (int)queue_count; i++) {
917 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
918 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
922 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
923 RTE_EVENT_DEV_ATTR_PORT_COUNT,
924 &port_count), "Port count get failed");
926 for (i = 0; i < (int)port_count; i++) {
927 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
928 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
931 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
932 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
935 ret = rte_event_dev_start(TEST_DEV_ID);
936 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
938 rte_event_dev_stop(TEST_DEV_ID);
944 eventdev_setup_device(void)
948 ret = eventdev_configure_setup();
949 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
951 uint32_t queue_count;
952 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
953 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
954 "Queue count get failed");
955 for (i = 0; i < (int)queue_count; i++) {
956 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
957 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
961 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
962 RTE_EVENT_DEV_ATTR_PORT_COUNT,
963 &port_count), "Port count get failed");
965 for (i = 0; i < (int)port_count; i++) {
966 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
967 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
970 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
971 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
974 ret = rte_event_dev_start(TEST_DEV_ID);
975 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
981 eventdev_stop_device(void)
983 rte_event_dev_stop(TEST_DEV_ID);
987 test_eventdev_link(void)
989 int ret, nb_queues, i;
990 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
991 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
993 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
994 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
997 uint32_t queue_count;
998 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
999 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1000 "Queue count get failed");
1001 nb_queues = queue_count;
1002 for (i = 0; i < nb_queues; i++) {
1004 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1007 ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
1008 priorities, nb_queues);
1009 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
1011 return TEST_SUCCESS;
1015 test_eventdev_unlink(void)
1017 int ret, nb_queues, i;
1018 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1020 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
1021 TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
1024 uint32_t queue_count;
1025 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
1026 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1027 "Queue count get failed");
1028 nb_queues = queue_count;
1029 for (i = 0; i < nb_queues; i++)
1032 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
1033 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
1036 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
1037 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
1039 return TEST_SUCCESS;
1043 test_eventdev_link_get(void)
1046 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1047 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1049 /* link all queues */
1050 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
1051 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
1054 uint32_t queue_count;
1055 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
1056 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1057 "Queue count get failed");
1058 const int nb_queues = queue_count;
1059 for (i = 0; i < nb_queues; i++)
1062 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
1063 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
1066 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
1067 TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
1069 /* link all queues and get the links */
1070 for (i = 0; i < nb_queues; i++) {
1072 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1074 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
1076 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
1078 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
1079 TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
1080 TEST_DEV_ID, ret, nb_queues);
1082 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
1083 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
1085 /* link just one queue */
1087 priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1089 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
1090 TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
1092 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
1093 TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
1094 TEST_DEV_ID, ret, 1);
1095 /* unlink the queue */
1096 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
1097 TEST_ASSERT(ret == 1, "Failed to unlink(device%d) ret=%d",
1100 /* 4links and 2 unlinks */
1101 if (nb_queues >= 4) {
1102 for (i = 0; i < 4; i++) {
1104 priorities[i] = 0x40;
1106 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
1108 TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
1111 for (i = 0; i < 2; i++)
1114 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
1115 TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
1117 ret = rte_event_port_links_get(TEST_DEV_ID, 0,
1118 queues, priorities);
1119 TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
1120 TEST_DEV_ID, ret, 2);
1121 TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
1122 TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
1124 TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
1125 TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
1129 return TEST_SUCCESS;
1133 test_eventdev_close(void)
1135 rte_event_dev_stop(TEST_DEV_ID);
1136 return rte_event_dev_close(TEST_DEV_ID);
1139 static struct unit_test_suite eventdev_common_testsuite = {
1140 .suite_name = "eventdev common code unit test suite",
1141 .setup = testsuite_setup,
1142 .teardown = testsuite_teardown,
1143 .unit_test_cases = {
1144 TEST_CASE_ST(NULL, NULL,
1145 test_eventdev_count),
1146 TEST_CASE_ST(NULL, NULL,
1147 test_eventdev_get_dev_id),
1148 TEST_CASE_ST(NULL, NULL,
1149 test_eventdev_socket_id),
1150 TEST_CASE_ST(NULL, NULL,
1151 test_eventdev_info_get),
1152 TEST_CASE_ST(NULL, NULL,
1153 test_eventdev_configure),
1154 TEST_CASE_ST(eventdev_configure_setup, NULL,
1155 test_eventdev_queue_default_conf_get),
1156 TEST_CASE_ST(eventdev_configure_setup, NULL,
1157 test_eventdev_queue_setup),
1158 TEST_CASE_ST(eventdev_configure_setup, NULL,
1159 test_eventdev_queue_count),
1160 TEST_CASE_ST(eventdev_configure_setup, NULL,
1161 test_eventdev_queue_attr_priority),
1162 TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device,
1163 test_eventdev_queue_attr_priority_runtime),
1164 TEST_CASE_ST(eventdev_configure_setup, NULL,
1165 test_eventdev_queue_attr_weight_runtime),
1166 TEST_CASE_ST(eventdev_configure_setup, NULL,
1167 test_eventdev_queue_attr_affinity_runtime),
1168 TEST_CASE_ST(eventdev_configure_setup, NULL,
1169 test_eventdev_queue_attr_nb_atomic_flows),
1170 TEST_CASE_ST(eventdev_configure_setup, NULL,
1171 test_eventdev_queue_attr_nb_atomic_order_sequences),
1172 TEST_CASE_ST(eventdev_configure_setup, NULL,
1173 test_eventdev_queue_attr_event_queue_cfg),
1174 TEST_CASE_ST(eventdev_configure_setup, NULL,
1175 test_eventdev_port_default_conf_get),
1176 TEST_CASE_ST(eventdev_configure_setup, NULL,
1177 test_eventdev_port_setup),
1178 TEST_CASE_ST(eventdev_configure_setup, NULL,
1179 test_eventdev_port_attr_dequeue_depth),
1180 TEST_CASE_ST(eventdev_configure_setup, NULL,
1181 test_eventdev_port_attr_enqueue_depth),
1182 TEST_CASE_ST(eventdev_configure_setup, NULL,
1183 test_eventdev_port_attr_new_event_threshold),
1184 TEST_CASE_ST(eventdev_configure_setup, NULL,
1185 test_eventdev_port_count),
1186 TEST_CASE_ST(eventdev_configure_setup, NULL,
1187 test_eventdev_timeout_ticks),
1188 TEST_CASE_ST(NULL, NULL,
1189 test_eventdev_start_stop),
1190 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
1191 test_eventdev_link),
1192 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
1193 test_eventdev_unlink),
1194 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
1195 test_eventdev_link_get),
1196 TEST_CASE_ST(eventdev_setup_device, NULL,
1197 test_eventdev_close),
1198 TEST_CASES_END() /**< NULL terminate unit test array */
1203 test_eventdev_common(void)
1205 return unit_test_suite_runner(&eventdev_common_testsuite);
1209 test_eventdev_selftest_impl(const char *pmd, const char *opts)
1213 if (rte_event_dev_get_dev_id(pmd) == -ENODEV)
1214 ret = rte_vdev_init(pmd, opts);
1216 return TEST_SKIPPED;
1218 return rte_event_dev_selftest(rte_event_dev_get_dev_id(pmd));
1222 test_eventdev_selftest_sw(void)
1224 return test_eventdev_selftest_impl("event_sw", "");
1228 test_eventdev_selftest_octeontx(void)
1230 return test_eventdev_selftest_impl("event_octeontx", "");
1234 test_eventdev_selftest_dpaa2(void)
1236 return test_eventdev_selftest_impl("event_dpaa2", "");
1240 test_eventdev_selftest_dlb2(void)
1242 return test_eventdev_selftest_impl("dlb2_event", "");
1246 test_eventdev_selftest_cn9k(void)
1248 return test_eventdev_selftest_impl("event_cn9k", "");
1252 test_eventdev_selftest_cn10k(void)
1254 return test_eventdev_selftest_impl("event_cn10k", "");
1257 #endif /* !RTE_EXEC_ENV_WINDOWS */
1259 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
1261 #ifndef RTE_EXEC_ENV_WINDOWS
1262 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
1263 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
1264 test_eventdev_selftest_octeontx);
1265 REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
1266 REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);
1267 REGISTER_TEST_COMMAND(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k);
1268 REGISTER_TEST_COMMAND(eventdev_selftest_cn10k, test_eventdev_selftest_cn10k);
1270 #endif /* !RTE_EXEC_ENV_WINDOWS */