4 * Copyright(c) 2016 Cavium, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_hexdump.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_eventdev.h>
48 RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
50 count = rte_event_dev_count();
52 printf("Failed to find a valid event device,"
53 " testing with event_skeleton device\n");
54 return rte_vdev_init("event_skeleton", NULL);
60 testsuite_teardown(void)
65 test_eventdev_count(void)
68 count = rte_event_dev_count();
69 TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
74 test_eventdev_get_dev_id(void)
77 ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
78 TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
83 test_eventdev_socket_id(void)
86 socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
87 TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
89 socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
90 TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
96 test_eventdev_info_get(void)
99 struct rte_event_dev_info info;
100 ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
101 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
102 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
103 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
104 TEST_ASSERT(info.max_event_ports > 0,
105 "Not enough event ports %d", info.max_event_ports);
106 TEST_ASSERT(info.max_event_queues > 0,
107 "Not enough event queues %d", info.max_event_queues);
112 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
113 struct rte_event_dev_info *info)
115 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
116 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
117 dev_conf->nb_event_ports = info->max_event_ports;
118 dev_conf->nb_event_queues = info->max_event_queues;
119 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
120 dev_conf->nb_event_port_dequeue_depth =
121 info->max_event_port_dequeue_depth;
122 dev_conf->nb_event_port_enqueue_depth =
123 info->max_event_port_enqueue_depth;
124 dev_conf->nb_event_port_enqueue_depth =
125 info->max_event_port_enqueue_depth;
126 dev_conf->nb_events_limit =
127 info->max_num_events;
131 test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
132 struct rte_event_dev_info *info,
133 void (*fn)(struct rte_event_dev_config *dev_conf,
134 struct rte_event_dev_info *info))
136 devconf_set_default_sane_values(dev_conf, info);
138 return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
142 max_dequeue_limit(struct rte_event_dev_config *dev_conf,
143 struct rte_event_dev_info *info)
145 dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
149 max_events_limit(struct rte_event_dev_config *dev_conf,
150 struct rte_event_dev_info *info)
152 dev_conf->nb_events_limit = info->max_num_events + 1;
156 max_event_ports(struct rte_event_dev_config *dev_conf,
157 struct rte_event_dev_info *info)
159 dev_conf->nb_event_ports = info->max_event_ports + 1;
163 max_event_queues(struct rte_event_dev_config *dev_conf,
164 struct rte_event_dev_info *info)
166 dev_conf->nb_event_queues = info->max_event_queues + 1;
170 max_event_queue_flows(struct rte_event_dev_config *dev_conf,
171 struct rte_event_dev_info *info)
173 dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
177 max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
178 struct rte_event_dev_info *info)
180 dev_conf->nb_event_port_dequeue_depth =
181 info->max_event_port_dequeue_depth + 1;
185 max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
186 struct rte_event_dev_info *info)
188 dev_conf->nb_event_port_enqueue_depth =
189 info->max_event_port_enqueue_depth + 1;
194 test_eventdev_configure(void)
197 struct rte_event_dev_config dev_conf;
198 struct rte_event_dev_info info;
199 ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
200 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
202 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
203 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
206 TEST_ASSERT_EQUAL(-EINVAL,
207 test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
208 "Config negative test failed");
209 TEST_ASSERT_EQUAL(-EINVAL,
210 test_ethdev_config_run(&dev_conf, &info, max_events_limit),
211 "Config negative test failed");
212 TEST_ASSERT_EQUAL(-EINVAL,
213 test_ethdev_config_run(&dev_conf, &info, max_event_ports),
214 "Config negative test failed");
215 TEST_ASSERT_EQUAL(-EINVAL,
216 test_ethdev_config_run(&dev_conf, &info, max_event_queues),
217 "Config negative test failed");
218 TEST_ASSERT_EQUAL(-EINVAL,
219 test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
220 "Config negative test failed");
221 TEST_ASSERT_EQUAL(-EINVAL,
222 test_ethdev_config_run(&dev_conf, &info,
223 max_event_port_dequeue_depth),
224 "Config negative test failed");
225 TEST_ASSERT_EQUAL(-EINVAL,
226 test_ethdev_config_run(&dev_conf, &info,
227 max_event_port_enqueue_depth),
228 "Config negative test failed");
231 devconf_set_default_sane_values(&dev_conf, &info);
232 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
233 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
236 devconf_set_default_sane_values(&dev_conf, &info);
237 dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
238 dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
239 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
240 TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
242 /* re-configure back to max_event_queues and max_event_ports */
243 devconf_set_default_sane_values(&dev_conf, &info);
244 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
245 TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
252 eventdev_configure_setup(void)
255 struct rte_event_dev_config dev_conf;
256 struct rte_event_dev_info info;
258 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
259 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
260 devconf_set_default_sane_values(&dev_conf, &info);
261 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
262 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
268 test_eventdev_queue_default_conf_get(void)
271 struct rte_event_queue_conf qconf;
273 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
274 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
276 uint32_t queue_count;
277 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
278 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
279 "Queue count get failed");
281 for (i = 0; i < (int)queue_count; i++) {
282 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
284 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
291 test_eventdev_queue_setup(void)
294 struct rte_event_dev_info info;
295 struct rte_event_queue_conf qconf;
297 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
298 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
301 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
302 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
303 qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ALL_TYPES &
304 RTE_EVENT_QUEUE_CFG_TYPE_MASK);
305 qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
306 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
307 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
309 qconf.nb_atomic_flows = info.max_event_queue_flows;
310 qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ORDERED_ONLY &
311 RTE_EVENT_QUEUE_CFG_TYPE_MASK);
312 qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
313 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
314 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
316 ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
318 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
321 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
322 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
323 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
324 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
326 uint32_t queue_count;
327 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
328 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
329 "Queue count get failed");
331 for (i = 0; i < (int)queue_count; i++) {
332 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
333 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
340 test_eventdev_queue_count(void)
343 struct rte_event_dev_info info;
345 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
346 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
348 uint32_t queue_count;
349 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
350 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
351 "Queue count get failed");
352 TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
353 "Wrong queue count");
359 test_eventdev_queue_priority(void)
362 struct rte_event_dev_info info;
363 struct rte_event_queue_conf qconf;
366 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
367 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
369 uint32_t queue_count;
370 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
371 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
372 "Queue count get failed");
374 for (i = 0; i < (int)queue_count; i++) {
375 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
377 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
378 qconf.priority = i % RTE_EVENT_DEV_PRIORITY_LOWEST;
379 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
380 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
383 for (i = 0; i < (int)queue_count; i++) {
384 priority = rte_event_queue_priority(TEST_DEV_ID, i);
386 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
387 TEST_ASSERT_EQUAL(priority,
388 i % RTE_EVENT_DEV_PRIORITY_LOWEST,
389 "Wrong priority value for queue%d", i);
391 TEST_ASSERT_EQUAL(priority,
392 RTE_EVENT_DEV_PRIORITY_NORMAL,
393 "Wrong priority value for queue%d", i);
400 test_eventdev_port_default_conf_get(void)
403 struct rte_event_port_conf pconf;
405 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
406 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
409 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
410 RTE_EVENT_DEV_ATTR_PORT_COUNT,
411 &port_count), "Port count get failed");
413 ret = rte_event_port_default_conf_get(TEST_DEV_ID,
414 port_count + 1, NULL);
415 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
417 for (i = 0; i < (int)port_count; i++) {
418 ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
420 TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
427 test_eventdev_port_setup(void)
430 struct rte_event_dev_info info;
431 struct rte_event_port_conf pconf;
433 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
434 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
437 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
438 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
439 pconf.new_event_threshold = info.max_num_events + 1;
440 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
441 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
443 pconf.new_event_threshold = info.max_num_events;
444 pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
445 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
446 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
448 pconf.dequeue_depth = info.max_event_port_dequeue_depth;
449 pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
450 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
451 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
453 ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
455 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
458 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
459 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
460 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
461 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
464 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
465 RTE_EVENT_DEV_ATTR_PORT_COUNT,
466 &port_count), "Port count get failed");
468 for (i = 0; i < (int)port_count; i++) {
469 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
470 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
477 test_eventdev_dequeue_depth(void)
480 struct rte_event_dev_info info;
481 struct rte_event_port_conf pconf;
483 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
484 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
486 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
487 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
488 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
489 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
492 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
493 RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
494 0, "Call to port dequeue depth failed");
495 TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
496 "Wrong port dequeue depth");
502 test_eventdev_enqueue_depth(void)
505 struct rte_event_dev_info info;
506 struct rte_event_port_conf pconf;
508 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
509 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
511 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
512 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
513 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
514 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
517 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
518 RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
519 0, "Call to port enqueue depth failed");
520 TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
521 "Wrong port enqueue depth");
527 test_eventdev_port_count(void)
530 struct rte_event_dev_info info;
532 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
533 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
536 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
537 RTE_EVENT_DEV_ATTR_PORT_COUNT,
538 &port_count), "Port count get failed");
539 TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
545 test_eventdev_timeout_ticks(void)
548 uint64_t timeout_ticks;
550 ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
552 TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
559 test_eventdev_start_stop(void)
563 ret = eventdev_configure_setup();
564 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
566 uint32_t queue_count;
567 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
568 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
569 "Queue count get failed");
570 for (i = 0; i < (int)queue_count; i++) {
571 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
572 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
576 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
577 RTE_EVENT_DEV_ATTR_PORT_COUNT,
578 &port_count), "Port count get failed");
580 for (i = 0; i < (int)port_count; i++) {
581 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
582 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
585 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
586 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
589 ret = rte_event_dev_start(TEST_DEV_ID);
590 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
592 rte_event_dev_stop(TEST_DEV_ID);
598 eventdev_setup_device(void)
602 ret = eventdev_configure_setup();
603 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
605 uint32_t queue_count;
606 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
607 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
608 "Queue count get failed");
609 for (i = 0; i < (int)queue_count; i++) {
610 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
611 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
615 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
616 RTE_EVENT_DEV_ATTR_PORT_COUNT,
617 &port_count), "Port count get failed");
619 for (i = 0; i < (int)port_count; i++) {
620 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
621 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
624 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
625 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
628 ret = rte_event_dev_start(TEST_DEV_ID);
629 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
635 eventdev_stop_device(void)
637 rte_event_dev_stop(TEST_DEV_ID);
641 test_eventdev_link(void)
643 int ret, nb_queues, i;
644 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
645 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
647 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
648 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
651 uint32_t queue_count;
652 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
653 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
654 "Queue count get failed");
655 nb_queues = queue_count;
656 for (i = 0; i < nb_queues; i++) {
658 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
661 ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
662 priorities, nb_queues);
663 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
669 test_eventdev_unlink(void)
671 int ret, nb_queues, i;
672 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
674 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
675 TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
678 uint32_t queue_count;
679 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
680 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
681 "Queue count get failed");
682 nb_queues = queue_count;
683 for (i = 0; i < nb_queues; i++)
687 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
688 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
694 test_eventdev_link_get(void)
697 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
698 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
700 /* link all queues */
701 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
702 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
705 uint32_t queue_count;
706 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
707 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
708 "Queue count get failed");
709 const int nb_queues = queue_count;
710 for (i = 0; i < nb_queues; i++)
713 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
714 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
717 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
718 TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
720 /* link all queues and get the links */
721 for (i = 0; i < nb_queues; i++) {
723 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
725 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
727 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
729 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
730 TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
731 TEST_DEV_ID, ret, nb_queues);
733 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
734 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
736 /* link just one queue */
738 priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
740 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
741 TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
743 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
744 TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
745 TEST_DEV_ID, ret, 1);
747 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
748 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
751 /* 4links and 2 unlinks */
752 if (nb_queues >= 4) {
753 for (i = 0; i < 4; i++) {
755 priorities[i] = 0x40;
757 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
759 TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
762 for (i = 0; i < 2; i++)
765 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
766 TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
768 ret = rte_event_port_links_get(TEST_DEV_ID, 0,
770 TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
771 TEST_DEV_ID, ret, 2);
772 TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
773 TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
775 TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
776 TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
784 test_eventdev_close(void)
786 rte_event_dev_stop(TEST_DEV_ID);
787 return rte_event_dev_close(TEST_DEV_ID);
790 static struct unit_test_suite eventdev_common_testsuite = {
791 .suite_name = "eventdev common code unit test suite",
792 .setup = testsuite_setup,
793 .teardown = testsuite_teardown,
795 TEST_CASE_ST(NULL, NULL,
796 test_eventdev_count),
797 TEST_CASE_ST(NULL, NULL,
798 test_eventdev_get_dev_id),
799 TEST_CASE_ST(NULL, NULL,
800 test_eventdev_socket_id),
801 TEST_CASE_ST(NULL, NULL,
802 test_eventdev_info_get),
803 TEST_CASE_ST(NULL, NULL,
804 test_eventdev_configure),
805 TEST_CASE_ST(eventdev_configure_setup, NULL,
806 test_eventdev_queue_default_conf_get),
807 TEST_CASE_ST(eventdev_configure_setup, NULL,
808 test_eventdev_queue_setup),
809 TEST_CASE_ST(eventdev_configure_setup, NULL,
810 test_eventdev_queue_count),
811 TEST_CASE_ST(eventdev_configure_setup, NULL,
812 test_eventdev_queue_priority),
813 TEST_CASE_ST(eventdev_configure_setup, NULL,
814 test_eventdev_port_default_conf_get),
815 TEST_CASE_ST(eventdev_configure_setup, NULL,
816 test_eventdev_port_setup),
817 TEST_CASE_ST(eventdev_configure_setup, NULL,
818 test_eventdev_dequeue_depth),
819 TEST_CASE_ST(eventdev_configure_setup, NULL,
820 test_eventdev_enqueue_depth),
821 TEST_CASE_ST(eventdev_configure_setup, NULL,
822 test_eventdev_port_count),
823 TEST_CASE_ST(eventdev_configure_setup, NULL,
824 test_eventdev_timeout_ticks),
825 TEST_CASE_ST(NULL, NULL,
826 test_eventdev_start_stop),
827 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
829 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
830 test_eventdev_unlink),
831 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
832 test_eventdev_link_get),
833 TEST_CASE_ST(eventdev_setup_device, NULL,
834 test_eventdev_close),
835 TEST_CASES_END() /**< NULL terminate unit test array */
840 test_eventdev_common(void)
842 return unit_test_suite_runner(&eventdev_common_testsuite);
845 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);