1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_memzone.h>
14 #include <rte_launch.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_cycles.h>
19 #include <rte_eventdev.h>
20 #include <rte_mempool.h>
24 #include "rte_pmd_dlb.h"
28 #define DEFAULT_NUM_SEQ_NUMS 32
30 static struct rte_mempool *eventdev_func_mempool;
34 struct rte_mempool *mbuf_pool;
38 /* initialization and config */
40 init(struct test *t, int nb_queues, int nb_ports)
42 struct rte_event_dev_config config = {0};
43 struct rte_event_dev_info info;
46 memset(t, 0, sizeof(*t));
48 t->mbuf_pool = eventdev_func_mempool;
50 if (rte_event_dev_info_get(evdev, &info)) {
51 printf("%d: Error querying device info\n", __LINE__);
55 config.nb_event_queues = nb_queues;
56 config.nb_event_ports = nb_ports;
57 config.nb_event_queue_flows = info.max_event_queue_flows;
58 config.nb_events_limit = info.max_num_events;
59 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
60 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
61 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
62 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
64 ret = rte_event_dev_configure(evdev, &config);
66 printf("%d: Error configuring device\n", __LINE__);
72 create_ports(int num_ports)
76 if (num_ports > MAX_PORTS)
79 for (i = 0; i < num_ports; i++) {
80 struct rte_event_port_conf conf;
82 if (rte_event_port_default_conf_get(evdev, i, &conf)) {
83 printf("%d: Error querying default port conf\n",
88 if (rte_event_port_setup(evdev, i, &conf) < 0) {
89 printf("%d: Error setting up port %d\n", __LINE__, i);
98 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
102 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
103 struct rte_event_queue_conf conf;
105 if (rte_event_queue_default_conf_get(evdev, i, &conf)) {
106 printf("%d: Error querying default queue conf\n",
111 conf.schedule_type = flags;
113 if (conf.schedule_type == RTE_SCHED_TYPE_PARALLEL)
114 conf.nb_atomic_order_sequences = 0;
116 conf.nb_atomic_order_sequences = DEFAULT_NUM_SEQ_NUMS;
118 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
119 printf("%d: error creating qid %d\n", __LINE__, i);
124 t->nb_qids += num_qids;
125 if (t->nb_qids > MAX_QIDS)
132 create_atomic_qids(struct test *t, int num_qids)
134 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
141 rte_event_dev_stop(evdev);
142 return rte_event_dev_close(evdev);
146 enqueue_timeout(uint8_t port_id, struct rte_event *ev, uint64_t tmo_us)
148 const uint64_t start = rte_get_timer_cycles();
149 const uint64_t ticks = (tmo_us * rte_get_timer_hz()) / 1E6;
151 while ((rte_get_timer_cycles() - start) < ticks) {
152 if (rte_event_enqueue_burst(evdev, port_id, ev, 1) == 1)
155 if (rte_errno != -ENOSPC)
163 flush(uint8_t id __rte_unused, struct rte_event event, void *arg __rte_unused)
165 rte_pktmbuf_free(event.mbuf);
169 test_stop_flush(struct test *t) /* test to check we can properly flush events */
172 uint32_t dequeue_depth;
173 unsigned int i, count;
176 ev.op = RTE_EVENT_OP_NEW;
178 if (init(t, 2, 1) < 0 ||
179 create_ports(1) < 0 ||
180 create_atomic_qids(t, 2) < 0) {
181 printf("%d: Error initializing device\n", __LINE__);
185 if (rte_event_port_link(evdev, 0, NULL, NULL, 0) != 2) {
186 printf("%d: Error linking queues to the port\n", __LINE__);
190 if (rte_event_dev_start(evdev) < 0) {
191 printf("%d: Error with start call\n", __LINE__);
195 /* Unlink queue 1 so the PMD's stop callback has to cleanup an unlinked
200 if (rte_event_port_unlink(evdev, 0, &queue_id, 1) != 1) {
201 printf("%d: Error unlinking queue 1 from port\n", __LINE__);
206 count = rte_mempool_avail_count(t->mbuf_pool);
208 printf("%d: mbuf_pool is NULL\n", __LINE__);
212 if (rte_event_port_attr_get(evdev,
214 RTE_EVENT_PORT_ATTR_DEQ_DEPTH,
216 printf("%d: Error retrieveing dequeue depth\n", __LINE__);
220 /* Send QEs to queue 0 */
221 for (i = 0; i < dequeue_depth + 1; i++) {
222 ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
224 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
226 if (enqueue_timeout(0, &ev, 1000)) {
227 printf("%d: Error enqueuing events\n", __LINE__);
232 /* Send QEs to queue 1 */
233 for (i = 0; i < dequeue_depth + 1; i++) {
234 ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
236 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
238 if (enqueue_timeout(0, &ev, 1000)) {
239 printf("%d: Error enqueuing events\n", __LINE__);
244 /* Now the DLB is scheduling events from the port to the IQ, and at
245 * least one event should be remaining in each queue.
248 if (rte_event_dev_stop_flush_callback_register(evdev, flush, NULL)) {
249 printf("%d: Error installing the flush callback\n", __LINE__);
255 if (count != rte_mempool_avail_count(t->mbuf_pool)) {
256 printf("%d: Error executing the flush callback\n", __LINE__);
260 if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
261 printf("%d: Error uninstalling the flush callback\n", __LINE__);
272 test_single_link(void)
274 struct rte_event_dev_config config = {0};
275 struct rte_event_queue_conf queue_conf;
276 struct rte_event_port_conf port_conf;
277 struct rte_event_dev_info info;
281 if (rte_event_dev_info_get(evdev, &info)) {
282 printf("%d: Error querying device info\n", __LINE__);
286 config.nb_event_queues = 2;
287 config.nb_event_ports = 2;
288 config.nb_single_link_event_port_queues = 1;
289 config.nb_event_queue_flows = info.max_event_queue_flows;
290 config.nb_events_limit = info.max_num_events;
291 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
292 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
293 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
294 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
296 ret = rte_event_dev_configure(evdev, &config);
298 printf("%d: Error configuring device\n", __LINE__);
302 /* Create a directed port */
303 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
304 printf("%d: Error querying default port conf\n", __LINE__);
308 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
310 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
311 printf("%d: port 0 setup expected to succeed\n", __LINE__);
315 /* Attempt to create another directed port */
316 if (rte_event_port_setup(evdev, 1, &port_conf) == 0) {
317 printf("%d: port 1 setup expected to fail\n", __LINE__);
321 port_conf.event_port_cfg = 0;
323 /* Create a load-balanced port */
324 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
325 printf("%d: port 1 setup expected to succeed\n", __LINE__);
329 /* Create a directed queue */
330 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
331 printf("%d: Error querying default queue conf\n", __LINE__);
335 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
337 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
338 printf("%d: queue 0 setup expected to succeed\n", __LINE__);
342 /* Attempt to create another directed queue */
343 if (rte_event_queue_setup(evdev, 1, &queue_conf) == 0) {
344 printf("%d: queue 1 setup expected to fail\n", __LINE__);
348 /* Create a load-balanced queue */
349 queue_conf.event_queue_cfg = 0;
351 if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
352 printf("%d: queue 1 setup expected to succeed\n", __LINE__);
356 /* Attempt to link directed and load-balanced resources */
358 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) == 1) {
359 printf("%d: port 0 link expected to fail\n", __LINE__);
364 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) == 1) {
365 printf("%d: port 1 link expected to fail\n", __LINE__);
369 /* Link ports to queues */
371 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
372 printf("%d: port 0 link expected to succeed\n", __LINE__);
377 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
378 printf("%d: port 1 link expected to succeed\n", __LINE__);
382 return rte_event_dev_close(evdev);
385 rte_event_dev_close(evdev);
389 #define NUM_LDB_PORTS 64
390 #define NUM_LDB_QUEUES 128
395 struct rte_event_dev_config config = {0};
396 struct rte_event_dev_info info;
399 if (rte_event_dev_info_get(evdev, &info)) {
400 printf("%d: Error querying device info\n", __LINE__);
404 if (info.max_event_ports != NUM_LDB_PORTS) {
405 printf("%d: Got %u ports, expected %u\n",
406 __LINE__, info.max_event_ports, NUM_LDB_PORTS);
410 if (info.max_event_queues != NUM_LDB_QUEUES) {
411 printf("%d: Got %u queues, expected %u\n",
412 __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
416 config.nb_event_ports = info.max_event_ports;
417 config.nb_event_queues = NUM_LDB_QUEUES + info.max_event_ports / 2;
418 config.nb_single_link_event_port_queues = info.max_event_ports / 2;
419 config.nb_event_queue_flows = info.max_event_queue_flows;
420 config.nb_events_limit = info.max_num_events;
421 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
422 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
423 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
424 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
426 ret = rte_event_dev_configure(evdev, &config);
428 printf("%d: Error configuring device\n", __LINE__);
432 if (rte_event_dev_info_get(evdev, &info)) {
433 printf("%d: Error querying device info\n", __LINE__);
437 /* The DLB PMD only reports load-balanced ports and queues in its
438 * info_get function. Confirm that these values don't include the
439 * directed port or queue counts.
442 if (info.max_event_ports != NUM_LDB_PORTS) {
443 printf("%d: Got %u ports, expected %u\n",
444 __LINE__, info.max_event_ports, NUM_LDB_PORTS);
448 if (info.max_event_queues != NUM_LDB_QUEUES) {
449 printf("%d: Got %u queues, expected %u\n",
450 __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
454 ret = rte_event_dev_close(evdev);
456 printf("rte_event_dev_close err %d\n", ret);
463 rte_event_dev_close(evdev);
468 test_reconfiguration_link(void)
470 struct rte_event_dev_config config = {0};
471 struct rte_event_queue_conf queue_conf;
472 struct rte_event_port_conf port_conf;
473 struct rte_event_dev_info info;
477 if (rte_event_dev_info_get(evdev, &info)) {
478 printf("%d: Error querying device info\n", __LINE__);
482 config.nb_event_queues = 2;
483 config.nb_event_ports = 2;
484 config.nb_single_link_event_port_queues = 0;
485 config.nb_event_queue_flows = info.max_event_queue_flows;
486 config.nb_events_limit = info.max_num_events;
487 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
488 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
489 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
490 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
492 /* Configure the device with 2 LDB ports and 2 LDB queues */
493 ret = rte_event_dev_configure(evdev, &config);
495 printf("%d: Error configuring device\n", __LINE__);
499 /* Configure the ports and queues */
500 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
501 printf("%d: Error querying default port conf\n", __LINE__);
505 for (i = 0; i < 2; i++) {
506 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
507 printf("%d: port %d setup expected to succeed\n",
513 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
514 printf("%d: Error querying default queue conf\n", __LINE__);
518 for (i = 0; i < 2; i++) {
519 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
520 printf("%d: queue %d setup expected to succeed\n",
526 /* Link P0->Q0 and P1->Q1 */
527 for (i = 0; i < 2; i++) {
530 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
531 printf("%d: port %d link expected to succeed\n",
537 /* Start the device */
538 if (rte_event_dev_start(evdev) < 0) {
539 printf("%d: device start failed\n", __LINE__);
543 /* Stop the device */
544 rte_event_dev_stop(evdev);
546 /* Reconfigure device */
547 ret = rte_event_dev_configure(evdev, &config);
549 printf("%d: Error re-configuring device\n", __LINE__);
553 /* Configure P1 and Q1, leave P0 and Q0 to be configured by the PMD. */
554 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
555 printf("%d: port 1 setup expected to succeed\n",
560 if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
561 printf("%d: queue 1 setup expected to succeed\n",
566 /* Link P0->Q0 and Q1 */
567 for (i = 0; i < 2; i++) {
570 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
571 printf("%d: P0->Q%d link expected to succeed\n",
577 /* Link P1->Q0 and Q1 */
578 for (i = 0; i < 2; i++) {
581 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
582 printf("%d: P1->Q%d link expected to succeed\n",
588 /* Start the device */
589 if (rte_event_dev_start(evdev) < 0) {
590 printf("%d: device start failed\n", __LINE__);
594 /* Stop the device */
595 rte_event_dev_stop(evdev);
597 /* Configure device with 2 DIR ports and 2 DIR queues */
598 config.nb_single_link_event_port_queues = 2;
600 ret = rte_event_dev_configure(evdev, &config);
602 printf("%d: Error configuring device\n", __LINE__);
606 /* Configure the ports and queues */
607 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
609 for (i = 0; i < 2; i++) {
610 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
611 printf("%d: port %d setup expected to succeed\n",
617 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
619 for (i = 0; i < 2; i++) {
620 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
621 printf("%d: queue %d setup expected to succeed\n",
627 /* Link P0->Q0 and P1->Q1 */
628 for (i = 0; i < 2; i++) {
631 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
632 printf("%d: port %d link expected to succeed\n",
638 /* Start the device */
639 if (rte_event_dev_start(evdev) < 0) {
640 printf("%d: device start failed\n", __LINE__);
644 /* Stop the device */
645 rte_event_dev_stop(evdev);
647 /* Reconfigure device */
648 ret = rte_event_dev_configure(evdev, &config);
650 printf("%d: Error re-configuring device\n", __LINE__);
654 /* Configure P1 and Q0, leave P0 and Q1 to be configured by the PMD. */
655 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
656 printf("%d: port 1 setup expected to succeed\n",
661 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
662 printf("%d: queue 1 setup expected to succeed\n",
670 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
671 printf("%d: P0->Q%d link expected to succeed\n",
679 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
680 printf("%d: P1->Q%d link expected to succeed\n",
685 /* Start the device */
686 if (rte_event_dev_start(evdev) < 0) {
687 printf("%d: device start failed\n", __LINE__);
691 rte_event_dev_stop(evdev);
693 config.nb_event_queues = 5;
694 config.nb_event_ports = 5;
695 config.nb_single_link_event_port_queues = 1;
697 ret = rte_event_dev_configure(evdev, &config);
699 printf("%d: Error re-configuring device\n", __LINE__);
703 for (i = 0; i < config.nb_event_queues - 1; i++) {
704 port_conf.event_port_cfg = 0;
705 queue_conf.event_queue_cfg = 0;
707 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
708 printf("%d: port %d setup expected to succeed\n",
713 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
714 printf("%d: queue %d setup expected to succeed\n",
721 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
722 printf("%d: P%d->Q%d link expected to succeed\n",
728 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
729 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
731 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
732 printf("%d: port %d setup expected to succeed\n",
737 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
738 printf("%d: queue %d setup expected to succeed\n",
745 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
746 printf("%d: P%d->Q%d link expected to succeed\n",
751 /* Start the device */
752 if (rte_event_dev_start(evdev) < 0) {
753 printf("%d: device start failed\n", __LINE__);
757 /* Stop the device */
758 rte_event_dev_stop(evdev);
760 config.nb_event_ports += 1;
762 /* Reconfigure device with 1 more load-balanced port */
763 ret = rte_event_dev_configure(evdev, &config);
765 printf("%d: Error re-configuring device\n", __LINE__);
769 port_conf.event_port_cfg = 0;
771 /* Configure the new port */
772 if (rte_event_port_setup(evdev, config.nb_event_ports - 1,
774 printf("%d: port 1 setup expected to succeed\n",
779 /* Start the device */
780 if (rte_event_dev_start(evdev) < 0) {
781 printf("%d: device start failed\n", __LINE__);
794 test_load_balanced_traffic(void)
797 struct rte_event_dev_config config = {0};
798 struct rte_event_queue_conf queue_conf;
799 struct rte_event_port_conf port_conf;
800 struct rte_event_dev_info info;
805 if (rte_event_dev_info_get(evdev, &info)) {
806 printf("%d: Error querying device info\n", __LINE__);
810 config.nb_event_queues = 1;
811 config.nb_event_ports = 1;
812 config.nb_single_link_event_port_queues = 0;
813 config.nb_event_queue_flows = info.max_event_queue_flows;
814 config.nb_events_limit = info.max_num_events;
815 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
816 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
817 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
818 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
820 /* Configure the device with 1 LDB port and queue */
821 ret = rte_event_dev_configure(evdev, &config);
823 printf("%d: Error configuring device\n", __LINE__);
827 /* Configure the ports and queues */
828 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
829 printf("%d: Error querying default port conf\n", __LINE__);
833 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
834 printf("%d: port 0 setup expected to succeed\n",
839 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
840 printf("%d: Error querying default queue conf\n", __LINE__);
844 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
845 printf("%d: queue 0 setup expected to succeed\n",
853 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
854 printf("%d: port 0 link expected to succeed\n",
859 /* Start the device */
860 if (rte_event_dev_start(evdev) < 0) {
861 printf("%d: device start failed\n", __LINE__);
865 /* Enqueue 1 NEW event */
866 ev.op = RTE_EVENT_OP_NEW;
867 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
872 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
873 printf("%d: NEW enqueue expected to succeed\n",
878 /* Dequeue and enqueue 1 FORWARD event */
879 timeout = 0xFFFFFFFFF;
880 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
881 printf("%d: event dequeue expected to succeed\n",
886 ev.op = RTE_EVENT_OP_FORWARD;
888 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
889 printf("%d: NEW enqueue expected to succeed\n",
894 /* Dequeue and enqueue 1 RELEASE operation */
895 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
896 printf("%d: event dequeue expected to succeed\n",
901 ev.op = RTE_EVENT_OP_RELEASE;
903 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
904 printf("%d: NEW enqueue expected to succeed\n",
918 test_directed_traffic(void)
921 struct rte_event_dev_config config = {0};
922 struct rte_event_queue_conf queue_conf;
923 struct rte_event_port_conf port_conf;
924 struct rte_event_dev_info info;
929 if (rte_event_dev_info_get(evdev, &info)) {
930 printf("%d: Error querying device info\n", __LINE__);
934 config.nb_event_queues = 1;
935 config.nb_event_ports = 1;
936 config.nb_single_link_event_port_queues = 1;
937 config.nb_event_queue_flows = info.max_event_queue_flows;
938 config.nb_events_limit = info.max_num_events;
939 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
940 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
941 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
942 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
944 /* Configure the device with 1 DIR port and queue */
945 ret = rte_event_dev_configure(evdev, &config);
947 printf("%d: Error configuring device\n", __LINE__);
951 /* Configure the ports and queues */
952 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
953 printf("%d: Error querying default port conf\n", __LINE__);
957 port_conf.event_port_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
959 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
960 printf("%d: port 0 setup expected to succeed\n",
965 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
966 printf("%d: Error querying default queue conf\n", __LINE__);
970 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
972 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
973 printf("%d: queue 0 setup expected to succeed\n",
981 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
982 printf("%d: port 0 link expected to succeed\n",
987 /* Start the device */
988 if (rte_event_dev_start(evdev) < 0) {
989 printf("%d: device start failed\n", __LINE__);
993 /* Enqueue 1 NEW event */
994 ev.op = RTE_EVENT_OP_NEW;
999 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1000 printf("%d: NEW enqueue expected to succeed\n",
1005 /* Dequeue and enqueue 1 FORWARD event */
1006 timeout = 0xFFFFFFFFF;
1007 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1008 printf("%d: event dequeue expected to succeed\n",
1013 if (ev.queue_id != 0) {
1014 printf("%d: invalid dequeued event queue ID (%d)\n",
1015 __LINE__, ev.queue_id);
1019 ev.op = RTE_EVENT_OP_FORWARD;
1021 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1022 printf("%d: NEW enqueue expected to succeed\n",
1027 /* Dequeue and enqueue 1 RELEASE operation */
1028 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1029 printf("%d: event dequeue expected to succeed\n",
1034 ev.op = RTE_EVENT_OP_RELEASE;
1036 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1037 printf("%d: NEW enqueue expected to succeed\n",
1051 test_deferred_sched(void)
1054 struct rte_event_dev_config config = {0};
1055 struct rte_event_queue_conf queue_conf;
1056 struct rte_event_port_conf port_conf;
1057 struct rte_event_dev_info info;
1058 const int num_events = 128;
1059 struct rte_event ev;
1063 if (rte_event_dev_info_get(evdev, &info)) {
1064 printf("%d: Error querying device info\n", __LINE__);
1068 config.nb_event_queues = 1;
1069 config.nb_event_ports = 2;
1070 config.nb_single_link_event_port_queues = 0;
1071 config.nb_event_queue_flows = info.max_event_queue_flows;
1072 config.nb_events_limit = info.max_num_events;
1073 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
1074 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
1075 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
1076 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
1078 /* Configure the device with 2 LDB ports and 1 queue */
1079 ret = rte_event_dev_configure(evdev, &config);
1081 printf("%d: Error configuring device\n", __LINE__);
1085 ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DEFERRED_POP);
1087 printf("%d: Error setting deferred scheduling\n", __LINE__);
1091 ret = rte_pmd_dlb_set_token_pop_mode(evdev, 1, DEFERRED_POP);
1093 printf("%d: Error setting deferred scheduling\n", __LINE__);
1097 /* Configure the ports and queues */
1098 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
1099 printf("%d: Error querying default port conf\n", __LINE__);
1103 port_conf.dequeue_depth = 1;
1105 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1106 printf("%d: port 0 setup expected to succeed\n",
1111 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1112 printf("%d: port 1 setup expected to succeed\n",
1117 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
1118 printf("%d: Error querying default queue conf\n", __LINE__);
1122 queue_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
1123 queue_conf.nb_atomic_order_sequences = 0;
1125 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
1126 printf("%d: queue 0 setup expected to succeed\n",
1131 /* Link P0->Q0 and P1->Q0 */
1134 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1135 printf("%d: port 0 link expected to succeed\n",
1140 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
1141 printf("%d: port 1 link expected to succeed\n",
1146 /* Start the device */
1147 if (rte_event_dev_start(evdev) < 0) {
1148 printf("%d: device start failed\n", __LINE__);
1152 /* Enqueue 128 NEW events */
1153 ev.op = RTE_EVENT_OP_NEW;
1154 ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1159 for (i = 0; i < num_events; i++) {
1160 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1161 printf("%d: NEW enqueue expected to succeed\n",
1167 /* Dequeue two events from port 0 (dequeue_depth * 2 due to the
1168 * reserved token scheme)
1170 timeout = 0xFFFFFFFFF;
1171 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1172 printf("%d: event dequeue expected to succeed\n",
1177 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1178 printf("%d: event dequeue expected to succeed\n",
1183 /* Dequeue (and release) all other events from port 1. Deferred
1184 * scheduling ensures no other events are scheduled to port 0 without a
1185 * subsequent rte_event_dequeue_burst() call.
1187 for (i = 0; i < num_events - 2; i++) {
1188 if (rte_event_dequeue_burst(evdev, 1, &ev, 1, timeout) != 1) {
1189 printf("%d: event dequeue expected to succeed\n",
1194 ev.op = RTE_EVENT_OP_RELEASE;
1196 if (rte_event_enqueue_burst(evdev, 1, &ev, 1) != 1) {
1197 printf("%d: RELEASE enqueue expected to succeed\n",
1212 test_delayed_pop(void)
1215 struct rte_event_dev_config config = {0};
1216 struct rte_event_queue_conf queue_conf;
1217 struct rte_event_port_conf port_conf;
1218 struct rte_event_dev_info info;
1219 int ret, i, num_events;
1220 struct rte_event ev;
1223 if (rte_event_dev_info_get(evdev, &info)) {
1224 printf("%d: Error querying device info\n", __LINE__);
1228 config.nb_event_queues = 1;
1229 config.nb_event_ports = 1;
1230 config.nb_single_link_event_port_queues = 0;
1231 config.nb_event_queue_flows = info.max_event_queue_flows;
1232 config.nb_events_limit = info.max_num_events;
1233 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
1234 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
1235 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
1236 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
1238 /* Configure the device with 1 LDB port and queue */
1239 ret = rte_event_dev_configure(evdev, &config);
1241 printf("%d: Error configuring device\n", __LINE__);
1245 ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DELAYED_POP);
1247 printf("%d: Error setting deferred scheduling\n", __LINE__);
1251 /* Configure the ports and queues */
1252 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
1253 printf("%d: Error querying default port conf\n", __LINE__);
1257 port_conf.dequeue_depth = 16;
1258 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
1260 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1261 printf("%d: port 0 setup expected to succeed\n",
1266 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
1267 printf("%d: Error querying default queue conf\n", __LINE__);
1271 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
1272 printf("%d: queue 0 setup expected to succeed\n",
1280 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1281 printf("%d: port 0 link expected to succeed\n",
1286 /* Start the device */
1287 if (rte_event_dev_start(evdev) < 0) {
1288 printf("%d: device start failed\n", __LINE__);
1292 num_events = 2 * port_conf.dequeue_depth;
1294 /* Enqueue 2 * dequeue_depth NEW events. Due to the PMD's reserved
1295 * token scheme, the port will initially behave as though its
1296 * dequeue_depth is twice the requested size.
1298 ev.op = RTE_EVENT_OP_NEW;
1299 ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1304 for (i = 0; i < num_events; i++) {
1305 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1306 printf("%d: NEW enqueue expected to succeed\n",
1312 /* Flush these events out of the CQ */
1313 timeout = 0xFFFFFFFFF;
1315 for (i = 0; i < num_events; i++) {
1316 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1317 printf("%d: event dequeue expected to succeed\n",
1323 ev.op = RTE_EVENT_OP_RELEASE;
1325 for (i = 0; i < num_events; i++) {
1326 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1327 printf("%d: RELEASE enqueue expected to succeed\n",
1333 /* Enqueue 2 * dequeue_depth NEW events again */
1334 ev.op = RTE_EVENT_OP_NEW;
1335 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
1340 for (i = 0; i < num_events; i++) {
1341 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1342 printf("%d: NEW enqueue expected to succeed\n",
1348 /* Dequeue dequeue_depth events but only release dequeue_depth - 1.
1349 * Delayed pop won't perform the pop and no more events will be
1352 for (i = 0; i < port_conf.dequeue_depth; i++) {
1353 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1354 printf("%d: event dequeue expected to succeed\n",
1360 ev.op = RTE_EVENT_OP_RELEASE;
1362 for (i = 0; i < port_conf.dequeue_depth - 1; i++) {
1363 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1364 printf("%d: RELEASE enqueue expected to succeed\n",
1372 ret = rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout);
1374 printf("%d: event dequeue expected to fail (ret = %d)\n",
1379 /* Release one more event. This will trigger the token pop, and
1380 * another batch of events will be scheduled to the device.
1382 ev.op = RTE_EVENT_OP_RELEASE;
1384 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1385 printf("%d: RELEASE enqueue expected to succeed\n",
1390 timeout = 0xFFFFFFFFF;
1392 for (i = 0; i < port_conf.dequeue_depth; i++) {
1393 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1394 printf("%d: event dequeue expected to succeed\n",
1414 /* Only create mbuf pool once, reuse for each test run */
1415 if (!eventdev_func_mempool) {
1416 eventdev_func_mempool =
1417 rte_pktmbuf_pool_create("EVENTDEV_DLB_SA_MBUF_POOL",
1418 (1 << 12), /* 4k buffers */
1419 32 /*MBUF_CACHE_SIZE*/,
1421 512, /* use very small mbufs */
1423 if (!eventdev_func_mempool) {
1424 printf("ERROR creating mempool\n");
1428 t.mbuf_pool = eventdev_func_mempool;
1430 printf("*** Running Stop Flush test...\n");
1431 ret = test_stop_flush(&t);
1433 printf("ERROR - Stop Flush test FAILED.\n");
1437 printf("*** Running Single Link test...\n");
1438 ret = test_single_link();
1440 printf("ERROR - Single Link test FAILED.\n");
1445 printf("*** Running Info Get test...\n");
1446 ret = test_info_get();
1448 printf("ERROR - Stop Flush test FAILED.\n");
1452 printf("*** Running Reconfiguration Link test...\n");
1453 ret = test_reconfiguration_link();
1455 printf("ERROR - Reconfiguration Link test FAILED.\n");
1460 printf("*** Running Load-Balanced Traffic test...\n");
1461 ret = test_load_balanced_traffic();
1463 printf("ERROR - Load-Balanced Traffic test FAILED.\n");
1468 printf("*** Running Directed Traffic test...\n");
1469 ret = test_directed_traffic();
1471 printf("ERROR - Directed Traffic test FAILED.\n");
1476 printf("*** Running Deferred Scheduling test...\n");
1477 ret = test_deferred_sched();
1479 printf("ERROR - Deferred Scheduling test FAILED.\n");
1484 printf("*** Running Delayed Pop test...\n");
1485 ret = test_delayed_pop();
1487 printf("ERROR - Delayed Pop test FAILED.\n");
1499 test_dlb_eventdev(void)
1501 const char *dlb_eventdev_name = "dlb_event";
1502 uint8_t num_evdevs = rte_event_dev_count();
1504 int found = 0, skipped = 0, passed = 0, failed = 0;
1505 struct rte_event_dev_info info;
1507 for (i = 0; found + skipped < num_evdevs && i < RTE_EVENT_MAX_DEVS;
1509 ret = rte_event_dev_info_get(i, &info);
1513 /* skip non-dlb event devices */
1514 if (strncmp(info.driver_name, dlb_eventdev_name,
1515 sizeof(*info.driver_name)) != 0) {
1520 evdev = rte_event_dev_get_dev_id(info.driver_name);
1522 printf("Could not get dev_id for eventdev with name %s, i=%d\n",
1523 info.driver_name, i);
1528 printf("Running selftest on eventdev %s\n", info.driver_name);
1529 ret = do_selftest();
1532 printf("Selftest passed for eventdev %s\n",
1536 printf("Selftest failed for eventdev %s, err=%d\n",
1537 info.driver_name, ret);
1541 printf("Ran selftest on %d eventdevs, %d skipped, %d passed, %d failed\n",
1542 found, skipped, passed, failed);