4 * Copyright(c) 2016 Cavium networks. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_hexdump.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_eventdev.h>
39 #include <rte_cryptodev.h>
48 RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
50 count = rte_event_dev_count();
52 printf("Failed to find a valid event device,"
53 " testing with event_skeleton device\n");
54 return rte_eal_vdev_init("event_skeleton", NULL);
60 testsuite_teardown(void)
65 test_eventdev_count(void)
68 count = rte_event_dev_count();
69 TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
74 test_eventdev_get_dev_id(void)
77 ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
78 TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
83 test_eventdev_socket_id(void)
86 socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
87 TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
89 socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
90 TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
96 test_eventdev_info_get(void)
99 struct rte_event_dev_info info;
100 ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
101 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
102 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
103 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
104 TEST_ASSERT(info.max_event_ports > 0,
105 "Not enough event ports %d", info.max_event_ports);
106 TEST_ASSERT(info.max_event_queues > 0,
107 "Not enough event queues %d", info.max_event_queues);
112 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
113 struct rte_event_dev_info *info)
115 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
116 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
117 dev_conf->nb_event_ports = info->max_event_ports;
118 dev_conf->nb_event_queues = info->max_event_queues;
119 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
120 dev_conf->nb_event_port_dequeue_depth =
121 info->max_event_port_dequeue_depth;
122 dev_conf->nb_event_port_enqueue_depth =
123 info->max_event_port_enqueue_depth;
124 dev_conf->nb_event_port_enqueue_depth =
125 info->max_event_port_enqueue_depth;
126 dev_conf->nb_events_limit =
127 info->max_num_events;
131 test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
132 struct rte_event_dev_info *info,
133 void (*fn)(struct rte_event_dev_config *dev_conf,
134 struct rte_event_dev_info *info))
136 devconf_set_default_sane_values(dev_conf, info);
138 return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
142 min_dequeue_limit(struct rte_event_dev_config *dev_conf,
143 struct rte_event_dev_info *info)
145 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns - 1;
149 max_dequeue_limit(struct rte_event_dev_config *dev_conf,
150 struct rte_event_dev_info *info)
152 dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
156 max_events_limit(struct rte_event_dev_config *dev_conf,
157 struct rte_event_dev_info *info)
159 dev_conf->nb_events_limit = info->max_num_events + 1;
163 max_event_ports(struct rte_event_dev_config *dev_conf,
164 struct rte_event_dev_info *info)
166 dev_conf->nb_event_ports = info->max_event_ports + 1;
170 max_event_queues(struct rte_event_dev_config *dev_conf,
171 struct rte_event_dev_info *info)
173 dev_conf->nb_event_queues = info->max_event_queues + 1;
177 max_event_queue_flows(struct rte_event_dev_config *dev_conf,
178 struct rte_event_dev_info *info)
180 dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
184 max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
185 struct rte_event_dev_info *info)
187 dev_conf->nb_event_port_dequeue_depth =
188 info->max_event_port_dequeue_depth + 1;
192 max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
193 struct rte_event_dev_info *info)
195 dev_conf->nb_event_port_enqueue_depth =
196 info->max_event_port_enqueue_depth + 1;
201 test_eventdev_configure(void)
204 struct rte_event_dev_config dev_conf;
205 struct rte_event_dev_info info;
206 ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
207 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
209 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
210 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
213 TEST_ASSERT_EQUAL(-EINVAL,
214 test_ethdev_config_run(&dev_conf, &info, min_dequeue_limit),
215 "Config negative test failed");
216 TEST_ASSERT_EQUAL(-EINVAL,
217 test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
218 "Config negative test failed");
219 TEST_ASSERT_EQUAL(-EINVAL,
220 test_ethdev_config_run(&dev_conf, &info, max_events_limit),
221 "Config negative test failed");
222 TEST_ASSERT_EQUAL(-EINVAL,
223 test_ethdev_config_run(&dev_conf, &info, max_event_ports),
224 "Config negative test failed");
225 TEST_ASSERT_EQUAL(-EINVAL,
226 test_ethdev_config_run(&dev_conf, &info, max_event_queues),
227 "Config negative test failed");
228 TEST_ASSERT_EQUAL(-EINVAL,
229 test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
230 "Config negative test failed");
231 TEST_ASSERT_EQUAL(-EINVAL,
232 test_ethdev_config_run(&dev_conf, &info,
233 max_event_port_dequeue_depth),
234 "Config negative test failed");
235 TEST_ASSERT_EQUAL(-EINVAL,
236 test_ethdev_config_run(&dev_conf, &info,
237 max_event_port_enqueue_depth),
238 "Config negative test failed");
241 devconf_set_default_sane_values(&dev_conf, &info);
242 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
243 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
246 devconf_set_default_sane_values(&dev_conf, &info);
247 dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
248 dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
249 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
250 TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
252 /* re-configure back to max_event_queues and max_event_ports */
253 devconf_set_default_sane_values(&dev_conf, &info);
254 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
255 TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
262 eventdev_configure_setup(void)
265 struct rte_event_dev_config dev_conf;
266 struct rte_event_dev_info info;
268 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
269 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
270 devconf_set_default_sane_values(&dev_conf, &info);
271 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
272 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
278 test_eventdev_queue_default_conf_get(void)
281 struct rte_event_queue_conf qconf;
283 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
284 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
286 for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
287 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
289 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
296 test_eventdev_queue_setup(void)
299 struct rte_event_dev_info info;
300 struct rte_event_queue_conf qconf;
302 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
303 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
306 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
307 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
308 qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ALL_TYPES &
309 RTE_EVENT_QUEUE_CFG_TYPE_MASK);
310 qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
311 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
312 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
314 qconf.nb_atomic_flows = info.max_event_queue_flows;
315 qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ORDERED_ONLY &
316 RTE_EVENT_QUEUE_CFG_TYPE_MASK);
317 qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
318 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
319 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
321 ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
323 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
326 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
327 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
328 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
329 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
332 for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
333 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
334 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
341 test_eventdev_queue_count(void)
344 struct rte_event_dev_info info;
346 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
347 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
349 TEST_ASSERT_EQUAL(rte_event_queue_count(TEST_DEV_ID),
350 info.max_event_queues, "Wrong queue count");
356 test_eventdev_queue_priority(void)
359 struct rte_event_dev_info info;
360 struct rte_event_queue_conf qconf;
363 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
364 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
366 for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
367 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
369 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
370 qconf.priority = i % RTE_EVENT_DEV_PRIORITY_LOWEST;
371 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
372 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
375 for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
376 priority = rte_event_queue_priority(TEST_DEV_ID, i);
377 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
378 TEST_ASSERT_EQUAL(priority,
379 i % RTE_EVENT_DEV_PRIORITY_LOWEST,
380 "Wrong priority value for queue%d", i);
382 TEST_ASSERT_EQUAL(priority,
383 RTE_EVENT_DEV_PRIORITY_NORMAL,
384 "Wrong priority value for queue%d", i);
391 test_eventdev_port_default_conf_get(void)
394 struct rte_event_port_conf pconf;
396 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
397 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
399 ret = rte_event_port_default_conf_get(TEST_DEV_ID,
400 rte_event_port_count(TEST_DEV_ID) + 1, NULL);
401 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
403 for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
404 ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
406 TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
413 test_eventdev_port_setup(void)
416 struct rte_event_dev_info info;
417 struct rte_event_port_conf pconf;
419 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
420 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
423 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
424 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
425 pconf.new_event_threshold = info.max_num_events + 1;
426 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
427 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
429 pconf.new_event_threshold = info.max_num_events;
430 pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
431 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
432 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
434 pconf.dequeue_depth = info.max_event_port_dequeue_depth;
435 pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
436 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
437 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
439 ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
441 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
444 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
445 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
446 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
447 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
450 for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
451 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
452 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
459 test_eventdev_dequeue_depth(void)
462 struct rte_event_dev_info info;
463 struct rte_event_port_conf pconf;
465 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
466 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
468 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
469 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
470 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
471 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
473 TEST_ASSERT_EQUAL(rte_event_port_dequeue_depth(TEST_DEV_ID, 0),
474 pconf.dequeue_depth, "Wrong port dequeue depth");
480 test_eventdev_enqueue_depth(void)
483 struct rte_event_dev_info info;
484 struct rte_event_port_conf pconf;
486 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
487 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
489 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
490 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
491 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
492 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
494 TEST_ASSERT_EQUAL(rte_event_port_enqueue_depth(TEST_DEV_ID, 0),
495 pconf.enqueue_depth, "Wrong port enqueue depth");
501 test_eventdev_port_count(void)
504 struct rte_event_dev_info info;
506 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
507 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
509 TEST_ASSERT_EQUAL(rte_event_port_count(TEST_DEV_ID),
510 info.max_event_ports, "Wrong port count");
516 test_eventdev_timeout_ticks(void)
519 uint64_t timeout_ticks;
521 ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
523 TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
530 test_eventdev_start_stop(void)
534 ret = eventdev_configure_setup();
535 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
537 for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
538 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
539 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
542 for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
543 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
544 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
547 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
548 TEST_ASSERT(ret == rte_event_queue_count(TEST_DEV_ID),
549 "Failed to link port, device %d", TEST_DEV_ID);
551 ret = rte_event_dev_start(TEST_DEV_ID);
552 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
554 rte_event_dev_stop(TEST_DEV_ID);
560 eventdev_setup_device(void)
564 ret = eventdev_configure_setup();
565 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
567 for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
568 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
569 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
572 for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
573 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
574 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
577 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
578 TEST_ASSERT(ret == rte_event_queue_count(TEST_DEV_ID),
579 "Failed to link port, device %d", TEST_DEV_ID);
581 ret = rte_event_dev_start(TEST_DEV_ID);
582 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
588 eventdev_stop_device(void)
590 rte_event_dev_stop(TEST_DEV_ID);
594 test_eventdev_link(void)
596 int ret, nb_queues, i;
597 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
598 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
600 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
601 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
604 nb_queues = rte_event_queue_count(TEST_DEV_ID);
605 for (i = 0; i < nb_queues; i++) {
607 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
610 ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
611 priorities, nb_queues);
612 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
618 test_eventdev_unlink(void)
620 int ret, nb_queues, i;
621 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
623 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
624 TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
627 nb_queues = rte_event_queue_count(TEST_DEV_ID);
628 for (i = 0; i < nb_queues; i++)
632 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
633 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
639 test_eventdev_link_get(void)
641 int ret, nb_queues, i;
642 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
643 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
645 /* link all queues */
646 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
647 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
650 nb_queues = rte_event_queue_count(TEST_DEV_ID);
651 for (i = 0; i < nb_queues; i++)
654 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
655 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
658 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
659 TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
661 /* link all queues and get the links */
662 nb_queues = rte_event_queue_count(TEST_DEV_ID);
663 for (i = 0; i < nb_queues; i++) {
665 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
667 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
669 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
671 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
672 TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
673 TEST_DEV_ID, ret, nb_queues);
675 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
676 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
678 /* link just one queue */
680 priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
682 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
683 TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
685 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
686 TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
687 TEST_DEV_ID, ret, 1);
689 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
690 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
692 /* 4links and 2 unlinks */
693 nb_queues = rte_event_queue_count(TEST_DEV_ID);
694 if (nb_queues >= 4) {
695 for (i = 0; i < 4; i++) {
697 priorities[i] = 0x40;
699 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
701 TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
704 for (i = 0; i < 2; i++)
707 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
708 TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
710 ret = rte_event_port_links_get(TEST_DEV_ID, 0,
712 TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
713 TEST_DEV_ID, ret, 2);
714 TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
715 TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
717 TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
718 TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
726 test_eventdev_close(void)
728 rte_event_dev_stop(TEST_DEV_ID);
729 return rte_event_dev_close(TEST_DEV_ID);
732 static struct unit_test_suite eventdev_common_testsuite = {
733 .suite_name = "eventdev common code unit test suite",
734 .setup = testsuite_setup,
735 .teardown = testsuite_teardown,
737 TEST_CASE_ST(NULL, NULL,
738 test_eventdev_count),
739 TEST_CASE_ST(NULL, NULL,
740 test_eventdev_get_dev_id),
741 TEST_CASE_ST(NULL, NULL,
742 test_eventdev_socket_id),
743 TEST_CASE_ST(NULL, NULL,
744 test_eventdev_info_get),
745 TEST_CASE_ST(NULL, NULL,
746 test_eventdev_configure),
747 TEST_CASE_ST(eventdev_configure_setup, NULL,
748 test_eventdev_queue_default_conf_get),
749 TEST_CASE_ST(eventdev_configure_setup, NULL,
750 test_eventdev_queue_setup),
751 TEST_CASE_ST(eventdev_configure_setup, NULL,
752 test_eventdev_queue_count),
753 TEST_CASE_ST(eventdev_configure_setup, NULL,
754 test_eventdev_queue_priority),
755 TEST_CASE_ST(eventdev_configure_setup, NULL,
756 test_eventdev_port_default_conf_get),
757 TEST_CASE_ST(eventdev_configure_setup, NULL,
758 test_eventdev_port_setup),
759 TEST_CASE_ST(eventdev_configure_setup, NULL,
760 test_eventdev_dequeue_depth),
761 TEST_CASE_ST(eventdev_configure_setup, NULL,
762 test_eventdev_enqueue_depth),
763 TEST_CASE_ST(eventdev_configure_setup, NULL,
764 test_eventdev_port_count),
765 TEST_CASE_ST(eventdev_configure_setup, NULL,
766 test_eventdev_timeout_ticks),
767 TEST_CASE_ST(NULL, NULL,
768 test_eventdev_start_stop),
769 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
771 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
772 test_eventdev_unlink),
773 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
774 test_eventdev_link_get),
775 TEST_CASE_ST(eventdev_setup_device, NULL,
776 test_eventdev_close),
777 TEST_CASES_END() /**< NULL terminate unit test array */
782 test_eventdev_common(void)
784 return unit_test_suite_runner(&eventdev_common_testsuite);
787 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);