1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
5 #include <rte_common.h>
6 #include <rte_hexdump.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_eventdev.h>
12 #include <rte_bus_vdev.h>
21 RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
23 count = rte_event_dev_count();
25 printf("Failed to find a valid event device,"
26 " testing with event_skeleton device\n");
27 return rte_vdev_init("event_skeleton", NULL);
33 testsuite_teardown(void)
38 test_eventdev_count(void)
41 count = rte_event_dev_count();
42 TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
47 test_eventdev_get_dev_id(void)
50 ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
51 TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
56 test_eventdev_socket_id(void)
59 socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
60 TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
62 socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
63 TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
69 test_eventdev_info_get(void)
72 struct rte_event_dev_info info;
73 ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
74 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
75 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
76 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
77 TEST_ASSERT(info.max_event_ports > 0,
78 "Not enough event ports %d", info.max_event_ports);
79 TEST_ASSERT(info.max_event_queues > 0,
80 "Not enough event queues %d", info.max_event_queues);
85 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
86 struct rte_event_dev_info *info)
88 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
89 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
90 dev_conf->nb_event_ports = info->max_event_ports;
91 dev_conf->nb_event_queues = info->max_event_queues;
92 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
93 dev_conf->nb_event_port_dequeue_depth =
94 info->max_event_port_dequeue_depth;
95 dev_conf->nb_event_port_enqueue_depth =
96 info->max_event_port_enqueue_depth;
97 dev_conf->nb_event_port_enqueue_depth =
98 info->max_event_port_enqueue_depth;
99 dev_conf->nb_events_limit =
100 info->max_num_events;
104 test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
105 struct rte_event_dev_info *info,
106 void (*fn)(struct rte_event_dev_config *dev_conf,
107 struct rte_event_dev_info *info))
109 devconf_set_default_sane_values(dev_conf, info);
111 return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
115 max_dequeue_limit(struct rte_event_dev_config *dev_conf,
116 struct rte_event_dev_info *info)
118 dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
122 max_events_limit(struct rte_event_dev_config *dev_conf,
123 struct rte_event_dev_info *info)
125 dev_conf->nb_events_limit = info->max_num_events + 1;
129 max_event_ports(struct rte_event_dev_config *dev_conf,
130 struct rte_event_dev_info *info)
132 dev_conf->nb_event_ports = info->max_event_ports + 1;
136 max_event_queues(struct rte_event_dev_config *dev_conf,
137 struct rte_event_dev_info *info)
139 dev_conf->nb_event_queues = info->max_event_queues + 1;
143 max_event_queue_flows(struct rte_event_dev_config *dev_conf,
144 struct rte_event_dev_info *info)
146 dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
150 max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
151 struct rte_event_dev_info *info)
153 dev_conf->nb_event_port_dequeue_depth =
154 info->max_event_port_dequeue_depth + 1;
158 max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
159 struct rte_event_dev_info *info)
161 dev_conf->nb_event_port_enqueue_depth =
162 info->max_event_port_enqueue_depth + 1;
167 test_eventdev_configure(void)
170 struct rte_event_dev_config dev_conf;
171 struct rte_event_dev_info info;
172 ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
173 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
175 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
176 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
179 TEST_ASSERT_EQUAL(-EINVAL,
180 test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
181 "Config negative test failed");
182 TEST_ASSERT_EQUAL(-EINVAL,
183 test_ethdev_config_run(&dev_conf, &info, max_events_limit),
184 "Config negative test failed");
185 TEST_ASSERT_EQUAL(-EINVAL,
186 test_ethdev_config_run(&dev_conf, &info, max_event_ports),
187 "Config negative test failed");
188 TEST_ASSERT_EQUAL(-EINVAL,
189 test_ethdev_config_run(&dev_conf, &info, max_event_queues),
190 "Config negative test failed");
191 TEST_ASSERT_EQUAL(-EINVAL,
192 test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
193 "Config negative test failed");
195 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) {
196 TEST_ASSERT_EQUAL(-EINVAL,
197 test_ethdev_config_run(&dev_conf, &info,
198 max_event_port_dequeue_depth),
199 "Config negative test failed");
200 TEST_ASSERT_EQUAL(-EINVAL,
201 test_ethdev_config_run(&dev_conf, &info,
202 max_event_port_enqueue_depth),
203 "Config negative test failed");
207 devconf_set_default_sane_values(&dev_conf, &info);
208 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
209 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
212 devconf_set_default_sane_values(&dev_conf, &info);
213 dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
214 dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
215 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
216 TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
218 /* re-configure back to max_event_queues and max_event_ports */
219 devconf_set_default_sane_values(&dev_conf, &info);
220 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
221 TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
228 eventdev_configure_setup(void)
231 struct rte_event_dev_config dev_conf;
232 struct rte_event_dev_info info;
234 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
235 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
236 devconf_set_default_sane_values(&dev_conf, &info);
237 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
238 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
244 test_eventdev_queue_default_conf_get(void)
247 struct rte_event_queue_conf qconf;
249 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
250 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
252 uint32_t queue_count;
253 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
254 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
255 "Queue count get failed");
257 for (i = 0; i < (int)queue_count; i++) {
258 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
260 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
267 test_eventdev_queue_setup(void)
270 struct rte_event_dev_info info;
271 struct rte_event_queue_conf qconf;
273 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
274 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
277 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
278 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
279 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
280 qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
281 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
282 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
284 qconf.nb_atomic_flows = info.max_event_queue_flows;
285 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
286 qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
287 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
288 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
290 ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
292 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
295 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
296 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
297 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
298 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
300 uint32_t queue_count;
301 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
302 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
303 "Queue count get failed");
305 for (i = 0; i < (int)queue_count; i++) {
306 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
307 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
314 test_eventdev_queue_count(void)
317 struct rte_event_dev_info info;
319 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
320 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
322 uint32_t queue_count;
323 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
324 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
325 "Queue count get failed");
326 TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
327 "Wrong queue count");
333 test_eventdev_queue_attr_priority(void)
336 struct rte_event_dev_info info;
337 struct rte_event_queue_conf qconf;
340 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
341 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
343 uint32_t queue_count;
344 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
345 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
346 "Queue count get failed");
348 for (i = 0; i < (int)queue_count; i++) {
349 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
351 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
352 qconf.priority = i % RTE_EVENT_DEV_PRIORITY_LOWEST;
353 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
354 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
357 for (i = 0; i < (int)queue_count; i++) {
359 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
360 RTE_EVENT_QUEUE_ATTR_PRIORITY, &tmp),
361 "Queue priority get failed");
364 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
365 TEST_ASSERT_EQUAL(priority,
366 i % RTE_EVENT_DEV_PRIORITY_LOWEST,
367 "Wrong priority value for queue%d", i);
369 TEST_ASSERT_EQUAL(priority,
370 RTE_EVENT_DEV_PRIORITY_NORMAL,
371 "Wrong priority value for queue%d", i);
378 test_eventdev_queue_attr_nb_atomic_flows(void)
381 struct rte_event_dev_info info;
382 struct rte_event_queue_conf qconf;
383 uint32_t nb_atomic_flows;
385 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
386 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
388 uint32_t queue_count;
389 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
390 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
391 "Queue count get failed");
393 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
394 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
396 if (qconf.nb_atomic_flows == 0)
397 /* Assume PMD doesn't support atomic flows, return early */
400 qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
402 for (i = 0; i < (int)queue_count; i++) {
403 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
404 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
407 for (i = 0; i < (int)queue_count; i++) {
408 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
409 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
411 "Queue nb_atomic_flows get failed");
413 TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
414 "Wrong atomic flows value for queue%d", i);
421 test_eventdev_queue_attr_nb_atomic_order_sequences(void)
424 struct rte_event_dev_info info;
425 struct rte_event_queue_conf qconf;
426 uint32_t nb_atomic_order_sequences;
428 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
429 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
431 uint32_t queue_count;
432 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
433 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
434 "Queue count get failed");
436 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
437 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
439 if (qconf.nb_atomic_order_sequences == 0)
440 /* Assume PMD doesn't support reordering */
443 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
445 for (i = 0; i < (int)queue_count; i++) {
446 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
447 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
450 for (i = 0; i < (int)queue_count; i++) {
451 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
452 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
453 &nb_atomic_order_sequences),
454 "Queue nb_atomic_order_sequencess get failed");
456 TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
457 qconf.nb_atomic_order_sequences,
458 "Wrong atomic order sequences value for queue%d",
466 test_eventdev_queue_attr_event_queue_cfg(void)
469 struct rte_event_dev_info info;
470 struct rte_event_queue_conf qconf;
471 uint32_t event_queue_cfg;
473 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
474 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
476 uint32_t queue_count;
477 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
478 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
479 "Queue count get failed");
481 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
482 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
484 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
486 for (i = 0; i < (int)queue_count; i++) {
487 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
488 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
491 for (i = 0; i < (int)queue_count; i++) {
492 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
493 RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
495 "Queue event_queue_cfg get failed");
497 TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
498 "Wrong event_queue_cfg value for queue%d",
506 test_eventdev_port_default_conf_get(void)
509 struct rte_event_port_conf pconf;
511 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
512 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
515 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
516 RTE_EVENT_DEV_ATTR_PORT_COUNT,
517 &port_count), "Port count get failed");
519 ret = rte_event_port_default_conf_get(TEST_DEV_ID,
520 port_count + 1, NULL);
521 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
523 for (i = 0; i < (int)port_count; i++) {
524 ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
526 TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
533 test_eventdev_port_setup(void)
536 struct rte_event_dev_info info;
537 struct rte_event_port_conf pconf;
539 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
540 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
543 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
544 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
545 pconf.new_event_threshold = info.max_num_events + 1;
546 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
547 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
549 pconf.new_event_threshold = info.max_num_events;
550 pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
551 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
552 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
554 pconf.dequeue_depth = info.max_event_port_dequeue_depth;
555 pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
556 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
557 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
559 if (!(info.event_dev_cap &
560 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
561 pconf.enqueue_depth = info.max_event_port_enqueue_depth;
562 pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
563 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
564 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
565 pconf.event_port_cfg = 0;
568 ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
570 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
573 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
574 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
575 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
576 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
579 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
580 RTE_EVENT_DEV_ATTR_PORT_COUNT,
581 &port_count), "Port count get failed");
583 for (i = 0; i < (int)port_count; i++) {
584 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
585 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
592 test_eventdev_port_attr_dequeue_depth(void)
595 struct rte_event_dev_info info;
596 struct rte_event_port_conf pconf;
598 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
599 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
601 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
602 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
603 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
604 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
607 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
608 RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
609 0, "Call to get port dequeue depth failed");
610 TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
611 "Wrong port dequeue depth");
617 test_eventdev_port_attr_enqueue_depth(void)
620 struct rte_event_dev_info info;
621 struct rte_event_port_conf pconf;
623 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
624 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
626 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
627 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
628 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
629 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
632 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
633 RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
634 0, "Call to get port enqueue depth failed");
635 TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
636 "Wrong port enqueue depth");
642 test_eventdev_port_attr_new_event_threshold(void)
645 struct rte_event_dev_info info;
646 struct rte_event_port_conf pconf;
648 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
649 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
651 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
652 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
653 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
654 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
657 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
658 RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
659 0, "Call to get port new event threshold failed");
660 TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
661 "Wrong port new event threshold");
667 test_eventdev_port_count(void)
670 struct rte_event_dev_info info;
672 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
673 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
676 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
677 RTE_EVENT_DEV_ATTR_PORT_COUNT,
678 &port_count), "Port count get failed");
679 TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
685 test_eventdev_timeout_ticks(void)
688 uint64_t timeout_ticks;
690 ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
692 TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
699 test_eventdev_start_stop(void)
703 ret = eventdev_configure_setup();
704 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
706 uint32_t queue_count;
707 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
708 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
709 "Queue count get failed");
710 for (i = 0; i < (int)queue_count; i++) {
711 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
712 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
716 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
717 RTE_EVENT_DEV_ATTR_PORT_COUNT,
718 &port_count), "Port count get failed");
720 for (i = 0; i < (int)port_count; i++) {
721 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
722 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
725 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
726 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
729 ret = rte_event_dev_start(TEST_DEV_ID);
730 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
732 rte_event_dev_stop(TEST_DEV_ID);
738 eventdev_setup_device(void)
742 ret = eventdev_configure_setup();
743 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
745 uint32_t queue_count;
746 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
747 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
748 "Queue count get failed");
749 for (i = 0; i < (int)queue_count; i++) {
750 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
751 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
755 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
756 RTE_EVENT_DEV_ATTR_PORT_COUNT,
757 &port_count), "Port count get failed");
759 for (i = 0; i < (int)port_count; i++) {
760 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
761 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
764 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
765 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
768 ret = rte_event_dev_start(TEST_DEV_ID);
769 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
775 eventdev_stop_device(void)
777 rte_event_dev_stop(TEST_DEV_ID);
781 test_eventdev_link(void)
783 int ret, nb_queues, i;
784 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
785 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
787 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
788 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
791 uint32_t queue_count;
792 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
793 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
794 "Queue count get failed");
795 nb_queues = queue_count;
796 for (i = 0; i < nb_queues; i++) {
798 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
801 ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
802 priorities, nb_queues);
803 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
809 test_eventdev_unlink(void)
811 int ret, nb_queues, i;
812 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
814 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
815 TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
818 uint32_t queue_count;
819 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
820 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
821 "Queue count get failed");
822 nb_queues = queue_count;
823 for (i = 0; i < nb_queues; i++)
826 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
827 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
830 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
831 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
837 test_eventdev_link_get(void)
840 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
841 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
843 /* link all queues */
844 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
845 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
848 uint32_t queue_count;
849 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
850 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
851 "Queue count get failed");
852 const int nb_queues = queue_count;
853 for (i = 0; i < nb_queues; i++)
856 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
857 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
860 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
861 TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
863 /* link all queues and get the links */
864 for (i = 0; i < nb_queues; i++) {
866 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
868 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
870 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
872 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
873 TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
874 TEST_DEV_ID, ret, nb_queues);
876 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
877 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
879 /* link just one queue */
881 priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
883 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
884 TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
886 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
887 TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
888 TEST_DEV_ID, ret, 1);
889 /* unlink the queue */
890 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
891 TEST_ASSERT(ret == 1, "Failed to unlink(device%d) ret=%d",
894 /* 4links and 2 unlinks */
895 if (nb_queues >= 4) {
896 for (i = 0; i < 4; i++) {
898 priorities[i] = 0x40;
900 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
902 TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
905 for (i = 0; i < 2; i++)
908 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
909 TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
911 ret = rte_event_port_links_get(TEST_DEV_ID, 0,
913 TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
914 TEST_DEV_ID, ret, 2);
915 TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
916 TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
918 TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
919 TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
927 test_eventdev_close(void)
929 rte_event_dev_stop(TEST_DEV_ID);
930 return rte_event_dev_close(TEST_DEV_ID);
933 static struct unit_test_suite eventdev_common_testsuite = {
934 .suite_name = "eventdev common code unit test suite",
935 .setup = testsuite_setup,
936 .teardown = testsuite_teardown,
938 TEST_CASE_ST(NULL, NULL,
939 test_eventdev_count),
940 TEST_CASE_ST(NULL, NULL,
941 test_eventdev_get_dev_id),
942 TEST_CASE_ST(NULL, NULL,
943 test_eventdev_socket_id),
944 TEST_CASE_ST(NULL, NULL,
945 test_eventdev_info_get),
946 TEST_CASE_ST(NULL, NULL,
947 test_eventdev_configure),
948 TEST_CASE_ST(eventdev_configure_setup, NULL,
949 test_eventdev_queue_default_conf_get),
950 TEST_CASE_ST(eventdev_configure_setup, NULL,
951 test_eventdev_queue_setup),
952 TEST_CASE_ST(eventdev_configure_setup, NULL,
953 test_eventdev_queue_count),
954 TEST_CASE_ST(eventdev_configure_setup, NULL,
955 test_eventdev_queue_attr_priority),
956 TEST_CASE_ST(eventdev_configure_setup, NULL,
957 test_eventdev_queue_attr_nb_atomic_flows),
958 TEST_CASE_ST(eventdev_configure_setup, NULL,
959 test_eventdev_queue_attr_nb_atomic_order_sequences),
960 TEST_CASE_ST(eventdev_configure_setup, NULL,
961 test_eventdev_queue_attr_event_queue_cfg),
962 TEST_CASE_ST(eventdev_configure_setup, NULL,
963 test_eventdev_port_default_conf_get),
964 TEST_CASE_ST(eventdev_configure_setup, NULL,
965 test_eventdev_port_setup),
966 TEST_CASE_ST(eventdev_configure_setup, NULL,
967 test_eventdev_port_attr_dequeue_depth),
968 TEST_CASE_ST(eventdev_configure_setup, NULL,
969 test_eventdev_port_attr_enqueue_depth),
970 TEST_CASE_ST(eventdev_configure_setup, NULL,
971 test_eventdev_port_attr_new_event_threshold),
972 TEST_CASE_ST(eventdev_configure_setup, NULL,
973 test_eventdev_port_count),
974 TEST_CASE_ST(eventdev_configure_setup, NULL,
975 test_eventdev_timeout_ticks),
976 TEST_CASE_ST(NULL, NULL,
977 test_eventdev_start_stop),
978 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
980 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
981 test_eventdev_unlink),
982 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
983 test_eventdev_link_get),
984 TEST_CASE_ST(eventdev_setup_device, NULL,
985 test_eventdev_close),
986 TEST_CASES_END() /**< NULL terminate unit test array */
991 test_eventdev_common(void)
993 return unit_test_suite_runner(&eventdev_common_testsuite);
997 test_eventdev_selftest_impl(const char *pmd, const char *opts)
1001 if (rte_event_dev_get_dev_id(pmd) == -ENODEV)
1002 ret = rte_vdev_init(pmd, opts);
1004 return TEST_SKIPPED;
1006 return rte_event_dev_selftest(rte_event_dev_get_dev_id(pmd));
1010 test_eventdev_selftest_sw(void)
1012 return test_eventdev_selftest_impl("event_sw", "");
1016 test_eventdev_selftest_octeontx(void)
1018 return test_eventdev_selftest_impl("event_octeontx", "");
1022 test_eventdev_selftest_octeontx2(void)
1024 return test_eventdev_selftest_impl("event_octeontx2", "");
1028 test_eventdev_selftest_dpaa2(void)
1030 return test_eventdev_selftest_impl("event_dpaa2", "");
1034 test_eventdev_selftest_dlb(void)
1036 return test_eventdev_selftest_impl("dlb_event", "");
1040 test_eventdev_selftest_dlb2(void)
1042 return test_eventdev_selftest_impl("dlb2_event", "");
1045 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
1046 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
1047 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
1048 test_eventdev_selftest_octeontx);
1049 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,
1050 test_eventdev_selftest_octeontx2);
1051 REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
1052 REGISTER_TEST_COMMAND(eventdev_selftest_dlb, test_eventdev_selftest_dlb);
1053 REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);