1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
13 #ifdef RTE_EXEC_ENV_WINDOWS
15 test_eventdev_common(void)
17 printf("eventdev_common not supported on Windows, skipping test\n");
23 #include <rte_eventdev.h>
25 #include <rte_bus_vdev.h>
32 RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
34 count = rte_event_dev_count();
36 printf("Failed to find a valid event device,"
37 " testing with event_skeleton device\n");
38 return rte_vdev_init("event_skeleton", NULL);
44 testsuite_teardown(void)
49 test_eventdev_count(void)
52 count = rte_event_dev_count();
53 TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
58 test_eventdev_get_dev_id(void)
61 ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
62 TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
67 test_eventdev_socket_id(void)
70 socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
71 TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
73 socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
74 TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
80 test_eventdev_info_get(void)
83 struct rte_event_dev_info info;
84 ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
85 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
86 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
87 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
88 TEST_ASSERT(info.max_event_ports > 0,
89 "Not enough event ports %d", info.max_event_ports);
90 TEST_ASSERT(info.max_event_queues > 0,
91 "Not enough event queues %d", info.max_event_queues);
96 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
97 struct rte_event_dev_info *info)
99 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
100 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
101 dev_conf->nb_event_ports = info->max_event_ports;
102 dev_conf->nb_event_queues = info->max_event_queues;
103 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
104 dev_conf->nb_event_port_dequeue_depth =
105 info->max_event_port_dequeue_depth;
106 dev_conf->nb_event_port_enqueue_depth =
107 info->max_event_port_enqueue_depth;
108 dev_conf->nb_event_port_enqueue_depth =
109 info->max_event_port_enqueue_depth;
110 dev_conf->nb_events_limit =
111 info->max_num_events;
115 test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
116 struct rte_event_dev_info *info,
117 void (*fn)(struct rte_event_dev_config *dev_conf,
118 struct rte_event_dev_info *info))
120 devconf_set_default_sane_values(dev_conf, info);
122 return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
126 max_dequeue_limit(struct rte_event_dev_config *dev_conf,
127 struct rte_event_dev_info *info)
129 dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
133 max_events_limit(struct rte_event_dev_config *dev_conf,
134 struct rte_event_dev_info *info)
136 dev_conf->nb_events_limit = info->max_num_events + 1;
140 max_event_ports(struct rte_event_dev_config *dev_conf,
141 struct rte_event_dev_info *info)
143 dev_conf->nb_event_ports = info->max_event_ports + 1;
147 max_event_queues(struct rte_event_dev_config *dev_conf,
148 struct rte_event_dev_info *info)
150 dev_conf->nb_event_queues = info->max_event_queues + 1;
154 max_event_queue_flows(struct rte_event_dev_config *dev_conf,
155 struct rte_event_dev_info *info)
157 dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
161 max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
162 struct rte_event_dev_info *info)
164 dev_conf->nb_event_port_dequeue_depth =
165 info->max_event_port_dequeue_depth + 1;
169 max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
170 struct rte_event_dev_info *info)
172 dev_conf->nb_event_port_enqueue_depth =
173 info->max_event_port_enqueue_depth + 1;
178 test_eventdev_configure(void)
181 struct rte_event_dev_config dev_conf;
182 struct rte_event_dev_info info;
183 ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
184 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
186 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
187 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
190 TEST_ASSERT_EQUAL(-EINVAL,
191 test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
192 "Config negative test failed");
193 TEST_ASSERT_EQUAL(-EINVAL,
194 test_ethdev_config_run(&dev_conf, &info, max_events_limit),
195 "Config negative test failed");
196 TEST_ASSERT_EQUAL(-EINVAL,
197 test_ethdev_config_run(&dev_conf, &info, max_event_ports),
198 "Config negative test failed");
199 TEST_ASSERT_EQUAL(-EINVAL,
200 test_ethdev_config_run(&dev_conf, &info, max_event_queues),
201 "Config negative test failed");
202 TEST_ASSERT_EQUAL(-EINVAL,
203 test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
204 "Config negative test failed");
206 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) {
207 TEST_ASSERT_EQUAL(-EINVAL,
208 test_ethdev_config_run(&dev_conf, &info,
209 max_event_port_dequeue_depth),
210 "Config negative test failed");
211 TEST_ASSERT_EQUAL(-EINVAL,
212 test_ethdev_config_run(&dev_conf, &info,
213 max_event_port_enqueue_depth),
214 "Config negative test failed");
218 devconf_set_default_sane_values(&dev_conf, &info);
219 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
220 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
223 devconf_set_default_sane_values(&dev_conf, &info);
224 dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
225 dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
226 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
227 TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
229 /* re-configure back to max_event_queues and max_event_ports */
230 devconf_set_default_sane_values(&dev_conf, &info);
231 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
232 TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
239 eventdev_configure_setup(void)
242 struct rte_event_dev_config dev_conf;
243 struct rte_event_dev_info info;
245 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
246 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
247 devconf_set_default_sane_values(&dev_conf, &info);
248 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
249 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
255 test_eventdev_queue_default_conf_get(void)
258 struct rte_event_queue_conf qconf;
260 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
261 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
263 uint32_t queue_count;
264 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
265 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
266 "Queue count get failed");
268 for (i = 0; i < (int)queue_count; i++) {
269 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
271 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
278 test_eventdev_queue_setup(void)
281 struct rte_event_dev_info info;
282 struct rte_event_queue_conf qconf;
284 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
285 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
288 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
289 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
290 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
291 qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
292 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
293 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
295 qconf.nb_atomic_flows = info.max_event_queue_flows;
296 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
297 qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
298 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
299 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
301 ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
303 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
306 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
307 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
308 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
309 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
311 uint32_t queue_count;
312 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
313 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
314 "Queue count get failed");
316 for (i = 0; i < (int)queue_count; i++) {
317 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
318 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
325 test_eventdev_queue_count(void)
328 struct rte_event_dev_info info;
330 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
331 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
333 uint32_t queue_count;
334 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
335 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
336 "Queue count get failed");
337 TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
338 "Wrong queue count");
344 test_eventdev_queue_attr_priority(void)
347 struct rte_event_dev_info info;
348 struct rte_event_queue_conf qconf;
351 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
352 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
354 uint32_t queue_count;
355 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
356 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
357 "Queue count get failed");
359 for (i = 0; i < (int)queue_count; i++) {
360 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
362 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
363 qconf.priority = i % RTE_EVENT_DEV_PRIORITY_LOWEST;
364 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
365 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
368 for (i = 0; i < (int)queue_count; i++) {
370 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
371 RTE_EVENT_QUEUE_ATTR_PRIORITY, &tmp),
372 "Queue priority get failed");
375 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
376 TEST_ASSERT_EQUAL(priority,
377 i % RTE_EVENT_DEV_PRIORITY_LOWEST,
378 "Wrong priority value for queue%d", i);
380 TEST_ASSERT_EQUAL(priority,
381 RTE_EVENT_DEV_PRIORITY_NORMAL,
382 "Wrong priority value for queue%d", i);
389 test_eventdev_queue_attr_nb_atomic_flows(void)
392 struct rte_event_dev_info info;
393 struct rte_event_queue_conf qconf;
394 uint32_t nb_atomic_flows;
396 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
397 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
399 uint32_t queue_count;
400 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
401 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
402 "Queue count get failed");
404 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
405 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
407 if (qconf.nb_atomic_flows == 0)
408 /* Assume PMD doesn't support atomic flows, return early */
411 qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
413 for (i = 0; i < (int)queue_count; i++) {
414 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
415 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
418 for (i = 0; i < (int)queue_count; i++) {
419 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
420 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
422 "Queue nb_atomic_flows get failed");
424 TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
425 "Wrong atomic flows value for queue%d", i);
432 test_eventdev_queue_attr_nb_atomic_order_sequences(void)
435 struct rte_event_dev_info info;
436 struct rte_event_queue_conf qconf;
437 uint32_t nb_atomic_order_sequences;
439 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
440 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
442 uint32_t queue_count;
443 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
444 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
445 "Queue count get failed");
447 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
448 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
450 if (qconf.nb_atomic_order_sequences == 0)
451 /* Assume PMD doesn't support reordering */
454 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
456 for (i = 0; i < (int)queue_count; i++) {
457 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
458 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
461 for (i = 0; i < (int)queue_count; i++) {
462 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
463 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
464 &nb_atomic_order_sequences),
465 "Queue nb_atomic_order_sequencess get failed");
467 TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
468 qconf.nb_atomic_order_sequences,
469 "Wrong atomic order sequences value for queue%d",
477 test_eventdev_queue_attr_event_queue_cfg(void)
480 struct rte_event_dev_info info;
481 struct rte_event_queue_conf qconf;
482 uint32_t event_queue_cfg;
484 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
485 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
487 uint32_t queue_count;
488 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
489 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
490 "Queue count get failed");
492 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
493 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
495 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
497 for (i = 0; i < (int)queue_count; i++) {
498 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
499 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
502 for (i = 0; i < (int)queue_count; i++) {
503 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
504 RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
506 "Queue event_queue_cfg get failed");
508 TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
509 "Wrong event_queue_cfg value for queue%d",
517 test_eventdev_port_default_conf_get(void)
520 struct rte_event_port_conf pconf;
522 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
523 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
526 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
527 RTE_EVENT_DEV_ATTR_PORT_COUNT,
528 &port_count), "Port count get failed");
530 ret = rte_event_port_default_conf_get(TEST_DEV_ID,
531 port_count + 1, NULL);
532 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
534 for (i = 0; i < (int)port_count; i++) {
535 ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
537 TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
544 test_eventdev_port_setup(void)
547 struct rte_event_dev_info info;
548 struct rte_event_port_conf pconf;
550 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
551 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
554 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
555 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
556 pconf.new_event_threshold = info.max_num_events + 1;
557 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
558 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
560 pconf.new_event_threshold = info.max_num_events;
561 pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
562 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
563 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
565 pconf.dequeue_depth = info.max_event_port_dequeue_depth;
566 pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
567 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
568 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
570 if (!(info.event_dev_cap &
571 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
572 pconf.enqueue_depth = info.max_event_port_enqueue_depth;
573 pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
574 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
575 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
576 pconf.event_port_cfg = 0;
579 ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
581 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
584 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
585 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
586 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
587 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
590 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
591 RTE_EVENT_DEV_ATTR_PORT_COUNT,
592 &port_count), "Port count get failed");
594 for (i = 0; i < (int)port_count; i++) {
595 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
596 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
603 test_eventdev_port_attr_dequeue_depth(void)
606 struct rte_event_dev_info info;
607 struct rte_event_port_conf pconf;
609 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
610 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
612 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
613 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
614 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
615 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
618 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
619 RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
620 0, "Call to get port dequeue depth failed");
621 TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
622 "Wrong port dequeue depth");
628 test_eventdev_port_attr_enqueue_depth(void)
631 struct rte_event_dev_info info;
632 struct rte_event_port_conf pconf;
634 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
635 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
637 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
638 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
639 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
640 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
643 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
644 RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
645 0, "Call to get port enqueue depth failed");
646 TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
647 "Wrong port enqueue depth");
653 test_eventdev_port_attr_new_event_threshold(void)
656 struct rte_event_dev_info info;
657 struct rte_event_port_conf pconf;
659 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
660 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
662 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
663 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
664 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
665 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
668 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
669 RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
670 0, "Call to get port new event threshold failed");
671 TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
672 "Wrong port new event threshold");
678 test_eventdev_port_count(void)
681 struct rte_event_dev_info info;
683 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
684 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
687 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
688 RTE_EVENT_DEV_ATTR_PORT_COUNT,
689 &port_count), "Port count get failed");
690 TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
696 test_eventdev_timeout_ticks(void)
699 uint64_t timeout_ticks;
701 ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
703 TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
710 test_eventdev_start_stop(void)
714 ret = eventdev_configure_setup();
715 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
717 uint32_t queue_count;
718 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
719 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
720 "Queue count get failed");
721 for (i = 0; i < (int)queue_count; i++) {
722 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
723 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
727 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
728 RTE_EVENT_DEV_ATTR_PORT_COUNT,
729 &port_count), "Port count get failed");
731 for (i = 0; i < (int)port_count; i++) {
732 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
733 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
736 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
737 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
740 ret = rte_event_dev_start(TEST_DEV_ID);
741 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
743 rte_event_dev_stop(TEST_DEV_ID);
749 eventdev_setup_device(void)
753 ret = eventdev_configure_setup();
754 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
756 uint32_t queue_count;
757 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
758 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
759 "Queue count get failed");
760 for (i = 0; i < (int)queue_count; i++) {
761 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
762 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
766 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
767 RTE_EVENT_DEV_ATTR_PORT_COUNT,
768 &port_count), "Port count get failed");
770 for (i = 0; i < (int)port_count; i++) {
771 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
772 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
775 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
776 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
779 ret = rte_event_dev_start(TEST_DEV_ID);
780 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
786 eventdev_stop_device(void)
788 rte_event_dev_stop(TEST_DEV_ID);
792 test_eventdev_link(void)
794 int ret, nb_queues, i;
795 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
796 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
798 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
799 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
802 uint32_t queue_count;
803 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
804 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
805 "Queue count get failed");
806 nb_queues = queue_count;
807 for (i = 0; i < nb_queues; i++) {
809 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
812 ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
813 priorities, nb_queues);
814 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
820 test_eventdev_unlink(void)
822 int ret, nb_queues, i;
823 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
825 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
826 TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
829 uint32_t queue_count;
830 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
831 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
832 "Queue count get failed");
833 nb_queues = queue_count;
834 for (i = 0; i < nb_queues; i++)
837 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
838 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
841 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
842 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
848 test_eventdev_link_get(void)
851 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
852 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
854 /* link all queues */
855 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
856 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
859 uint32_t queue_count;
860 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
861 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
862 "Queue count get failed");
863 const int nb_queues = queue_count;
864 for (i = 0; i < nb_queues; i++)
867 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
868 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
871 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
872 TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
874 /* link all queues and get the links */
875 for (i = 0; i < nb_queues; i++) {
877 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
879 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
881 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
883 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
884 TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
885 TEST_DEV_ID, ret, nb_queues);
887 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
888 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
890 /* link just one queue */
892 priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
894 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
895 TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
897 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
898 TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
899 TEST_DEV_ID, ret, 1);
900 /* unlink the queue */
901 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
902 TEST_ASSERT(ret == 1, "Failed to unlink(device%d) ret=%d",
905 /* 4links and 2 unlinks */
906 if (nb_queues >= 4) {
907 for (i = 0; i < 4; i++) {
909 priorities[i] = 0x40;
911 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
913 TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
916 for (i = 0; i < 2; i++)
919 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
920 TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
922 ret = rte_event_port_links_get(TEST_DEV_ID, 0,
924 TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
925 TEST_DEV_ID, ret, 2);
926 TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
927 TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
929 TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
930 TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
938 test_eventdev_close(void)
940 rte_event_dev_stop(TEST_DEV_ID);
941 return rte_event_dev_close(TEST_DEV_ID);
944 static struct unit_test_suite eventdev_common_testsuite = {
945 .suite_name = "eventdev common code unit test suite",
946 .setup = testsuite_setup,
947 .teardown = testsuite_teardown,
949 TEST_CASE_ST(NULL, NULL,
950 test_eventdev_count),
951 TEST_CASE_ST(NULL, NULL,
952 test_eventdev_get_dev_id),
953 TEST_CASE_ST(NULL, NULL,
954 test_eventdev_socket_id),
955 TEST_CASE_ST(NULL, NULL,
956 test_eventdev_info_get),
957 TEST_CASE_ST(NULL, NULL,
958 test_eventdev_configure),
959 TEST_CASE_ST(eventdev_configure_setup, NULL,
960 test_eventdev_queue_default_conf_get),
961 TEST_CASE_ST(eventdev_configure_setup, NULL,
962 test_eventdev_queue_setup),
963 TEST_CASE_ST(eventdev_configure_setup, NULL,
964 test_eventdev_queue_count),
965 TEST_CASE_ST(eventdev_configure_setup, NULL,
966 test_eventdev_queue_attr_priority),
967 TEST_CASE_ST(eventdev_configure_setup, NULL,
968 test_eventdev_queue_attr_nb_atomic_flows),
969 TEST_CASE_ST(eventdev_configure_setup, NULL,
970 test_eventdev_queue_attr_nb_atomic_order_sequences),
971 TEST_CASE_ST(eventdev_configure_setup, NULL,
972 test_eventdev_queue_attr_event_queue_cfg),
973 TEST_CASE_ST(eventdev_configure_setup, NULL,
974 test_eventdev_port_default_conf_get),
975 TEST_CASE_ST(eventdev_configure_setup, NULL,
976 test_eventdev_port_setup),
977 TEST_CASE_ST(eventdev_configure_setup, NULL,
978 test_eventdev_port_attr_dequeue_depth),
979 TEST_CASE_ST(eventdev_configure_setup, NULL,
980 test_eventdev_port_attr_enqueue_depth),
981 TEST_CASE_ST(eventdev_configure_setup, NULL,
982 test_eventdev_port_attr_new_event_threshold),
983 TEST_CASE_ST(eventdev_configure_setup, NULL,
984 test_eventdev_port_count),
985 TEST_CASE_ST(eventdev_configure_setup, NULL,
986 test_eventdev_timeout_ticks),
987 TEST_CASE_ST(NULL, NULL,
988 test_eventdev_start_stop),
989 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
991 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
992 test_eventdev_unlink),
993 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
994 test_eventdev_link_get),
995 TEST_CASE_ST(eventdev_setup_device, NULL,
996 test_eventdev_close),
997 TEST_CASES_END() /**< NULL terminate unit test array */
1002 test_eventdev_common(void)
1004 return unit_test_suite_runner(&eventdev_common_testsuite);
1008 test_eventdev_selftest_impl(const char *pmd, const char *opts)
1012 if (rte_event_dev_get_dev_id(pmd) == -ENODEV)
1013 ret = rte_vdev_init(pmd, opts);
1015 return TEST_SKIPPED;
1017 return rte_event_dev_selftest(rte_event_dev_get_dev_id(pmd));
1021 test_eventdev_selftest_sw(void)
1023 return test_eventdev_selftest_impl("event_sw", "");
1027 test_eventdev_selftest_octeontx(void)
1029 return test_eventdev_selftest_impl("event_octeontx", "");
1033 test_eventdev_selftest_dpaa2(void)
1035 return test_eventdev_selftest_impl("event_dpaa2", "");
1039 test_eventdev_selftest_dlb2(void)
1041 return test_eventdev_selftest_impl("dlb2_event", "");
1045 test_eventdev_selftest_cn9k(void)
1047 return test_eventdev_selftest_impl("event_cn9k", "");
1051 test_eventdev_selftest_cn10k(void)
1053 return test_eventdev_selftest_impl("event_cn10k", "");
1056 #endif /* !RTE_EXEC_ENV_WINDOWS */
1058 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
1060 #ifndef RTE_EXEC_ENV_WINDOWS
1061 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
1062 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
1063 test_eventdev_selftest_octeontx);
1064 REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
1065 REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);
1066 REGISTER_TEST_COMMAND(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k);
1067 REGISTER_TEST_COMMAND(eventdev_selftest_cn10k, test_eventdev_selftest_cn10k);
1069 #endif /* !RTE_EXEC_ENV_WINDOWS */