1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_memzone.h>
14 #include <rte_launch.h>
16 #include <rte_per_lcore.h>
17 #include <rte_lcore.h>
18 #include <rte_debug.h>
19 #include <rte_ethdev.h>
20 #include <rte_cycles.h>
21 #include <rte_eventdev.h>
22 #include <rte_pause.h>
24 #include "dlb2_priv.h"
25 #include "rte_pmd_dlb2.h"
29 #define DEFAULT_NUM_SEQ_NUMS 64
31 static struct rte_mempool *eventdev_func_mempool;
35 struct rte_mempool *mbuf_pool;
39 /* initialization and config */
41 init(struct test *t, int nb_queues, int nb_ports)
43 struct rte_event_dev_config config = {0};
44 struct rte_event_dev_info info;
47 memset(t, 0, sizeof(*t));
49 t->mbuf_pool = eventdev_func_mempool;
51 if (rte_event_dev_info_get(evdev, &info)) {
52 printf("%d: Error querying device info\n", __LINE__);
56 config.nb_event_queues = nb_queues;
57 config.nb_event_ports = nb_ports;
58 config.nb_event_queue_flows = info.max_event_queue_flows;
59 config.nb_events_limit = info.max_num_events;
60 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
61 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
62 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
63 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
65 ret = rte_event_dev_configure(evdev, &config);
67 printf("%d: Error configuring device\n", __LINE__);
73 create_ports(int num_ports)
77 if (num_ports > MAX_PORTS)
80 for (i = 0; i < num_ports; i++) {
81 struct rte_event_port_conf conf;
83 if (rte_event_port_default_conf_get(evdev, i, &conf)) {
84 printf("%d: Error querying default port conf\n",
89 if (rte_event_port_setup(evdev, i, &conf) < 0) {
90 printf("%d: Error setting up port %d\n", __LINE__, i);
99 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
103 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
104 struct rte_event_queue_conf conf;
106 if (rte_event_queue_default_conf_get(evdev, i, &conf)) {
107 printf("%d: Error querying default queue conf\n",
112 conf.schedule_type = flags;
114 if (conf.schedule_type == RTE_SCHED_TYPE_PARALLEL)
115 conf.nb_atomic_order_sequences = 0;
117 conf.nb_atomic_order_sequences = DEFAULT_NUM_SEQ_NUMS;
119 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
120 printf("%d: error creating qid %d\n", __LINE__, i);
125 t->nb_qids += num_qids;
126 if (t->nb_qids > MAX_QIDS)
133 create_atomic_qids(struct test *t, int num_qids)
135 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
144 rte_event_dev_stop(evdev);
145 ret = rte_event_dev_close(evdev);
148 printf("%d: rte_event_dev_close failed, ret = %d\n",
153 enqueue_timeout(uint8_t port_id, struct rte_event *ev, uint64_t tmo_us)
155 const uint64_t start = rte_get_timer_cycles();
156 const uint64_t ticks = (tmo_us * rte_get_timer_hz()) / 1E6;
158 while ((rte_get_timer_cycles() - start) < ticks) {
159 if (rte_event_enqueue_burst(evdev, port_id, ev, 1) == 1)
162 if (rte_errno != -ENOSPC) {
163 printf("enqueue_burst returned rte_errno %d\n",
168 printf("%s time out\n", __func__);
173 flush(uint8_t id __rte_unused, struct rte_event event, void *arg __rte_unused)
175 rte_pktmbuf_free(event.mbuf);
179 test_stop_flush(struct test *t) /* test to check we can properly flush events */
182 uint32_t dequeue_depth;
183 unsigned int i, count;
186 ev.op = RTE_EVENT_OP_NEW;
188 if (init(t, 2, 1) < 0 ||
189 create_ports(1) < 0 ||
190 create_atomic_qids(t, 2) < 0) {
191 printf("%d: Error initializing device\n", __LINE__);
195 if (rte_event_port_link(evdev, 0, NULL, NULL, 0) != 2) {
196 printf("%d: Error linking queues to the port\n", __LINE__);
200 if (rte_event_dev_start(evdev) < 0) {
201 printf("%d: Error with start call\n", __LINE__);
205 /* Unlink queue 1 so the PMD's stop callback has to cleanup an unlinked
210 if (rte_event_port_unlink(evdev, 0, &queue_id, 1) != 1) {
211 printf("%d: Error unlinking queue 1 from port\n", __LINE__);
216 count = rte_mempool_avail_count(t->mbuf_pool);
218 printf("%d: mbuf_pool is NULL\n", __LINE__);
222 if (rte_event_port_attr_get(evdev,
224 RTE_EVENT_PORT_ATTR_DEQ_DEPTH,
226 printf("%d: Error retrieveing dequeue depth\n", __LINE__);
230 /* Send QEs to queue 0 */
231 for (i = 0; i < dequeue_depth + 1; i++) {
232 ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
234 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
236 if (enqueue_timeout(0, &ev, 1000)) {
237 printf("%d: Error enqueuing events\n", __LINE__);
242 /* Send QEs to queue 1 */
243 for (i = 0; i < dequeue_depth + 1; i++) {
244 ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
246 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
248 if (enqueue_timeout(0, &ev, 1000)) {
249 printf("%d: Error enqueuing events\n", __LINE__);
254 /* Now the DLB is scheduling events from the port to the IQ, and at
255 * least one event should be remaining in each queue.
258 if (rte_event_dev_stop_flush_callback_register(evdev, flush, NULL)) {
259 printf("%d: Error installing the flush callback\n", __LINE__);
265 if (count != rte_mempool_avail_count(t->mbuf_pool)) {
266 printf("%d: Error executing the flush callback\n", __LINE__);
270 if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
271 printf("%d: Error uninstalling the flush callback\n", __LINE__);
282 test_single_link(void)
284 struct rte_event_dev_config config = {0};
285 struct rte_event_queue_conf queue_conf;
286 struct rte_event_port_conf port_conf;
287 struct rte_event_dev_info info;
291 if (rte_event_dev_info_get(evdev, &info)) {
292 printf("%d: Error querying device info\n", __LINE__);
296 config.nb_event_queues = 2;
297 config.nb_event_ports = 2;
298 config.nb_single_link_event_port_queues = 1;
299 config.nb_event_queue_flows = info.max_event_queue_flows;
300 config.nb_events_limit = info.max_num_events;
301 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
302 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
303 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
304 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
306 ret = rte_event_dev_configure(evdev, &config);
308 printf("%d: Error configuring device\n", __LINE__);
312 /* Create a directed port */
313 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
314 printf("%d: Error querying default port conf\n", __LINE__);
318 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
320 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
321 printf("%d: port 0 setup expected to succeed\n", __LINE__);
325 /* Attempt to create another directed port */
326 if (rte_event_port_setup(evdev, 1, &port_conf) == 0) {
327 printf("%d: port 1 setup expected to fail\n", __LINE__);
331 port_conf.event_port_cfg = 0;
333 /* Create a load-balanced port */
334 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
335 printf("%d: port 1 setup expected to succeed\n", __LINE__);
339 /* Create a directed queue */
340 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
341 printf("%d: Error querying default queue conf\n", __LINE__);
345 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
347 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
348 printf("%d: queue 0 setup expected to succeed\n", __LINE__);
352 /* Attempt to create another directed queue */
353 if (rte_event_queue_setup(evdev, 1, &queue_conf) == 0) {
354 printf("%d: queue 1 setup expected to fail\n", __LINE__);
358 /* Create a load-balanced queue */
359 queue_conf.event_queue_cfg = 0;
361 if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
362 printf("%d: queue 1 setup expected to succeed\n", __LINE__);
366 /* Attempt to link directed and load-balanced resources */
368 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) == 1) {
369 printf("%d: port 0 link expected to fail\n", __LINE__);
374 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) == 1) {
375 printf("%d: port 1 link expected to fail\n", __LINE__);
379 /* Link ports to queues */
381 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
382 printf("%d: port 0 link expected to succeed\n", __LINE__);
387 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
388 printf("%d: port 1 link expected to succeed\n", __LINE__);
392 ret = rte_event_dev_close(evdev);
394 printf("%d: rte_event_dev_close failed, ret = %d\n",
400 ret = rte_event_dev_close(evdev);
402 printf("%d: rte_event_dev_close failed, ret = %d\n",
408 #define NUM_LDB_PORTS 64
409 #define NUM_LDB_QUEUES 32
414 struct rte_event_dev_config config = {0};
415 struct rte_event_dev_info info;
418 if (rte_event_dev_info_get(evdev, &info)) {
419 printf("%d: Error querying device info\n", __LINE__);
423 if (info.max_event_ports != NUM_LDB_PORTS) {
424 printf("%d: Got %u ports, expected %u\n",
425 __LINE__, info.max_event_ports, NUM_LDB_PORTS);
429 if (info.max_event_queues != NUM_LDB_QUEUES) {
430 printf("%d: Got %u queues, expected %u\n",
431 __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
435 config.nb_event_ports = info.max_event_ports;
436 config.nb_event_queues = NUM_LDB_QUEUES + info.max_event_ports / 2;
437 config.nb_single_link_event_port_queues = info.max_event_ports / 2;
438 config.nb_event_queue_flows = info.max_event_queue_flows;
439 config.nb_events_limit = info.max_num_events;
440 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
441 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
442 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
443 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
445 ret = rte_event_dev_configure(evdev, &config);
447 printf("%d: Error configuring device\n", __LINE__);
451 if (rte_event_dev_info_get(evdev, &info)) {
452 printf("%d: Error querying device info\n", __LINE__);
456 /* The DLB2 PMD only reports load-balanced ports and queues in its
457 * info_get function. Confirm that these values don't include the
458 * directed port or queue counts.
461 if (info.max_event_ports != NUM_LDB_PORTS) {
462 printf("%d: Got %u ports, expected %u\n",
463 __LINE__, info.max_event_ports, NUM_LDB_PORTS);
467 if (info.max_event_queues != NUM_LDB_QUEUES) {
468 printf("%d: Got %u queues, expected %u\n",
469 __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
473 ret = rte_event_dev_close(evdev);
475 printf("%d: rte_event_dev_close failed, ret = %d\n",
482 ret = rte_event_dev_close(evdev);
484 printf("%d: rte_event_dev_close failed, ret = %d\n",
491 test_reconfiguration_link(void)
493 struct rte_event_dev_config config = {0};
494 struct rte_event_queue_conf queue_conf;
495 struct rte_event_port_conf port_conf;
496 struct rte_event_dev_info info;
500 if (rte_event_dev_info_get(evdev, &info)) {
501 printf("%d: Error querying device info\n", __LINE__);
505 config.nb_event_queues = 2;
506 config.nb_event_ports = 2;
507 config.nb_single_link_event_port_queues = 0;
508 config.nb_event_queue_flows = info.max_event_queue_flows;
509 config.nb_events_limit = info.max_num_events;
510 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
511 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
512 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
513 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
515 /* Configure the device with 2 LDB ports and 2 LDB queues */
516 ret = rte_event_dev_configure(evdev, &config);
518 printf("%d: Error configuring device\n", __LINE__);
522 /* Configure the ports and queues */
523 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
524 printf("%d: Error querying default port conf\n", __LINE__);
528 for (i = 0; i < 2; i++) {
529 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
530 printf("%d: port %d setup expected to succeed\n",
536 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
537 printf("%d: Error querying default queue conf\n", __LINE__);
541 for (i = 0; i < 2; i++) {
542 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
543 printf("%d: queue %d setup expected to succeed\n",
549 /* Link P0->Q0 and P1->Q1 */
550 for (i = 0; i < 2; i++) {
553 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
554 printf("%d: port %d link expected to succeed\n",
560 /* Start the device */
561 if (rte_event_dev_start(evdev) < 0) {
562 printf("%d: device start failed\n", __LINE__);
566 /* Stop the device */
567 rte_event_dev_stop(evdev);
569 /* Reconfigure device */
570 ret = rte_event_dev_configure(evdev, &config);
572 printf("%d: Error re-configuring device\n", __LINE__);
576 /* Configure P1 and Q1, leave P0 and Q0 to be configured by the PMD. */
577 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
578 printf("%d: port 1 setup expected to succeed\n",
583 if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
584 printf("%d: queue 1 setup expected to succeed\n",
589 /* Link P0->Q0 and Q1 */
590 for (i = 0; i < 2; i++) {
593 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
594 printf("%d: P0->Q%d link expected to succeed\n",
600 /* Link P1->Q0 and Q1 */
601 for (i = 0; i < 2; i++) {
604 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
605 printf("%d: P1->Q%d link expected to succeed\n",
611 /* Start the device */
612 if (rte_event_dev_start(evdev) < 0) {
613 printf("%d: device start failed\n", __LINE__);
617 /* Stop the device */
618 rte_event_dev_stop(evdev);
620 /* Configure device with 2 DIR ports and 2 DIR queues */
621 config.nb_single_link_event_port_queues = 2;
623 ret = rte_event_dev_configure(evdev, &config);
625 printf("%d: Error configuring device\n", __LINE__);
629 /* Configure the ports and queues */
630 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
632 for (i = 0; i < 2; i++) {
633 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
634 printf("%d: port %d setup expected to succeed\n",
640 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
642 for (i = 0; i < 2; i++) {
643 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
644 printf("%d: queue %d setup expected to succeed\n",
650 /* Link P0->Q0 and P1->Q1 */
651 for (i = 0; i < 2; i++) {
654 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
655 printf("%d: port %d link expected to succeed\n",
661 /* Start the device */
662 if (rte_event_dev_start(evdev) < 0) {
663 printf("%d: device start failed\n", __LINE__);
667 /* Stop the device */
668 rte_event_dev_stop(evdev);
670 /* Reconfigure device */
671 ret = rte_event_dev_configure(evdev, &config);
673 printf("%d: Error re-configuring device\n", __LINE__);
677 /* Configure P1 and Q0, leave P0 and Q1 to be configured by the PMD. */
678 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
679 printf("%d: port 1 setup expected to succeed\n",
684 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
685 printf("%d: queue 1 setup expected to succeed\n",
693 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
694 printf("%d: P0->Q%d link expected to succeed\n",
702 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
703 printf("%d: P1->Q%d link expected to succeed\n",
708 /* Start the device */
709 if (rte_event_dev_start(evdev) < 0) {
710 printf("%d: device start failed\n", __LINE__);
714 rte_event_dev_stop(evdev);
716 config.nb_event_queues = 5;
717 config.nb_event_ports = 5;
718 config.nb_single_link_event_port_queues = 1;
720 ret = rte_event_dev_configure(evdev, &config);
722 printf("%d: Error re-configuring device\n", __LINE__);
726 for (i = 0; i < config.nb_event_queues - 1; i++) {
727 port_conf.event_port_cfg = 0;
728 queue_conf.event_queue_cfg = 0;
730 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
731 printf("%d: port %d setup expected to succeed\n",
736 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
737 printf("%d: queue %d setup expected to succeed\n",
744 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
745 printf("%d: P%d->Q%d link expected to succeed\n",
751 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
752 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
754 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
755 printf("%d: port %d setup expected to succeed\n",
760 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
761 printf("%d: queue %d setup expected to succeed\n",
768 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
769 printf("%d: P%d->Q%d link expected to succeed\n",
774 /* Start the device */
775 if (rte_event_dev_start(evdev) < 0) {
776 printf("%d: device start failed\n", __LINE__);
780 /* Stop the device */
781 rte_event_dev_stop(evdev);
783 config.nb_event_ports += 1;
785 /* Reconfigure device with 1 more load-balanced port */
786 ret = rte_event_dev_configure(evdev, &config);
788 printf("%d: Error re-configuring device\n", __LINE__);
792 port_conf.event_port_cfg = 0;
794 /* Configure the new port */
795 if (rte_event_port_setup(evdev, config.nb_event_ports - 1,
797 printf("%d: port 1 setup expected to succeed\n",
802 /* Start the device */
803 if (rte_event_dev_start(evdev) < 0) {
804 printf("%d: device start failed\n", __LINE__);
817 test_load_balanced_traffic(void)
820 struct rte_event_dev_config config = {0};
821 struct rte_event_queue_conf queue_conf;
822 struct rte_event_port_conf port_conf;
823 struct rte_event_dev_info info;
828 if (rte_event_dev_info_get(evdev, &info)) {
829 printf("%d: Error querying device info\n", __LINE__);
833 config.nb_event_queues = 1;
834 config.nb_event_ports = 1;
835 config.nb_single_link_event_port_queues = 0;
836 config.nb_event_queue_flows = info.max_event_queue_flows;
837 config.nb_events_limit = info.max_num_events;
838 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
839 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
840 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
841 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
843 /* Configure the device with 1 LDB port and queue */
844 ret = rte_event_dev_configure(evdev, &config);
846 printf("%d: Error configuring device\n", __LINE__);
850 /* Configure the ports and queues */
851 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
852 printf("%d: Error querying default port conf\n", __LINE__);
856 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
857 printf("%d: port 0 setup expected to succeed\n",
862 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
863 printf("%d: Error querying default queue conf\n", __LINE__);
867 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
868 printf("%d: queue 0 setup expected to succeed\n",
876 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
877 printf("%d: port 0 link expected to succeed\n",
882 /* Start the device */
883 if (rte_event_dev_start(evdev) < 0) {
884 printf("%d: device start failed\n", __LINE__);
888 /* Enqueue 1 NEW event */
889 ev.op = RTE_EVENT_OP_NEW;
890 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
895 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
896 printf("%d: NEW enqueue expected to succeed\n",
901 /* Dequeue and enqueue 1 FORWARD event */
902 timeout = 0xFFFFFFFFF;
903 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
904 printf("%d: event dequeue expected to succeed\n",
909 ev.op = RTE_EVENT_OP_FORWARD;
911 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
912 printf("%d: NEW enqueue expected to succeed\n",
917 /* Dequeue and enqueue 1 RELEASE operation */
918 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
919 printf("%d: event dequeue expected to succeed\n",
924 ev.op = RTE_EVENT_OP_RELEASE;
926 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
927 printf("%d: NEW enqueue expected to succeed\n",
941 test_directed_traffic(void)
944 struct rte_event_dev_config config = {0};
945 struct rte_event_queue_conf queue_conf;
946 struct rte_event_port_conf port_conf;
947 struct rte_event_dev_info info;
952 if (rte_event_dev_info_get(evdev, &info)) {
953 printf("%d: Error querying device info\n", __LINE__);
957 config.nb_event_queues = 1;
958 config.nb_event_ports = 1;
959 config.nb_single_link_event_port_queues = 1;
960 config.nb_event_queue_flows = info.max_event_queue_flows;
961 config.nb_events_limit = info.max_num_events;
962 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
963 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
964 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
965 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
967 /* Configure the device with 1 DIR port and queue */
968 ret = rte_event_dev_configure(evdev, &config);
970 printf("%d: Error configuring device\n", __LINE__);
974 /* Configure the ports and queues */
975 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
976 printf("%d: Error querying default port conf\n", __LINE__);
980 port_conf.event_port_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
982 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
983 printf("%d: port 0 setup expected to succeed\n",
988 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
989 printf("%d: Error querying default queue conf\n", __LINE__);
993 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
995 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
996 printf("%d: queue 0 setup expected to succeed\n",
1004 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1005 printf("%d: port 0 link expected to succeed\n",
1010 /* Start the device */
1011 if (rte_event_dev_start(evdev) < 0) {
1012 printf("%d: device start failed\n", __LINE__);
1016 /* Enqueue 1 NEW event */
1017 ev.op = RTE_EVENT_OP_NEW;
1022 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1023 printf("%d: NEW enqueue expected to succeed\n",
1028 /* Dequeue and enqueue 1 FORWARD event */
1029 timeout = 0xFFFFFFFFF;
1030 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1031 printf("%d: event dequeue expected to succeed\n",
1036 if (ev.queue_id != 0) {
1037 printf("%d: invalid dequeued event queue ID (%d)\n",
1038 __LINE__, ev.queue_id);
1042 ev.op = RTE_EVENT_OP_FORWARD;
1044 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1045 printf("%d: NEW enqueue expected to succeed\n",
1050 /* Dequeue and enqueue 1 RELEASE operation */
1051 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1052 printf("%d: event dequeue expected to succeed\n",
1057 ev.op = RTE_EVENT_OP_RELEASE;
1059 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1060 printf("%d: NEW enqueue expected to succeed\n",
1074 test_deferred_sched(void)
1077 struct rte_event_dev_config config = {0};
1078 struct rte_event_queue_conf queue_conf;
1079 struct rte_event_port_conf port_conf;
1080 struct rte_event_dev_info info;
1081 const int num_events = 128;
1082 struct rte_event ev;
1086 if (rte_event_dev_info_get(evdev, &info)) {
1087 printf("%d: Error querying device info\n", __LINE__);
1091 config.nb_event_queues = 1;
1092 config.nb_event_ports = 2;
1093 config.nb_single_link_event_port_queues = 0;
1094 config.nb_event_queue_flows = info.max_event_queue_flows;
1095 config.nb_events_limit = info.max_num_events;
1096 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
1097 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
1098 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
1099 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
1101 /* Configure the device with 2 LDB ports and 1 queue */
1102 ret = rte_event_dev_configure(evdev, &config);
1104 printf("%d: Error configuring device\n", __LINE__);
1108 ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 0, DEFERRED_POP);
1110 printf("%d: Error setting deferred scheduling\n", __LINE__);
1114 ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 1, DEFERRED_POP);
1116 printf("%d: Error setting deferred scheduling\n", __LINE__);
1120 /* Configure the ports and queues */
1121 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
1122 printf("%d: Error querying default port conf\n", __LINE__);
1126 port_conf.dequeue_depth = 1;
1128 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1129 printf("%d: port 0 setup expected to succeed\n",
1134 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1135 printf("%d: port 1 setup expected to succeed\n",
1140 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
1141 printf("%d: Error querying default queue conf\n", __LINE__);
1145 queue_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
1146 queue_conf.nb_atomic_order_sequences = 0;
1148 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
1149 printf("%d: queue 0 setup expected to succeed\n",
1154 /* Link P0->Q0 and P1->Q0 */
1157 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1158 printf("%d: port 0 link expected to succeed\n",
1163 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
1164 printf("%d: port 1 link expected to succeed\n",
1169 /* Start the device */
1170 if (rte_event_dev_start(evdev) < 0) {
1171 printf("%d: device start failed\n", __LINE__);
1175 /* Enqueue 128 NEW events */
1176 ev.op = RTE_EVENT_OP_NEW;
1177 ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1182 for (i = 0; i < num_events; i++) {
1183 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1184 printf("%d: NEW enqueue expected to succeed\n",
1190 /* Dequeue one event from port 0 */
1191 timeout = 0xFFFFFFFFF;
1192 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1193 printf("%d: event dequeue expected to succeed\n",
1198 /* Dequeue (and release) all other events from port 1. Deferred
1199 * scheduling ensures no other events are scheduled to port 0 without a
1200 * subsequent rte_event_dequeue_burst() call.
1202 for (i = 0; i < num_events - 1; i++) {
1203 if (rte_event_dequeue_burst(evdev, 1, &ev, 1, timeout) != 1) {
1204 printf("%d: event dequeue expected to succeed\n",
1209 ev.op = RTE_EVENT_OP_RELEASE;
1211 if (rte_event_enqueue_burst(evdev, 1, &ev, 1) != 1) {
1212 printf("%d: RELEASE enqueue expected to succeed\n",
1227 test_delayed_pop(void)
1230 struct rte_event_dev_config config = {0};
1231 struct rte_event_queue_conf queue_conf;
1232 struct rte_event_port_conf port_conf;
1233 struct rte_event_dev_info info;
1234 int ret, i, num_events;
1235 struct rte_event ev;
1238 if (rte_event_dev_info_get(evdev, &info)) {
1239 printf("%d: Error querying device info\n", __LINE__);
1243 config.nb_event_queues = 1;
1244 config.nb_event_ports = 1;
1245 config.nb_single_link_event_port_queues = 0;
1246 config.nb_event_queue_flows = info.max_event_queue_flows;
1247 config.nb_events_limit = info.max_num_events;
1248 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
1249 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
1250 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
1251 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
1253 /* Configure the device with 1 LDB port and queue */
1254 ret = rte_event_dev_configure(evdev, &config);
1256 printf("%d: Error configuring device\n", __LINE__);
1260 ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 0, DELAYED_POP);
1262 printf("%d: Error setting deferred scheduling\n", __LINE__);
1266 /* Configure the ports and queues */
1267 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
1268 printf("%d: Error querying default port conf\n", __LINE__);
1272 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
1274 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1275 printf("%d: port 0 setup expected to succeed\n",
1280 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
1281 printf("%d: Error querying default queue conf\n", __LINE__);
1285 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
1286 printf("%d: queue 0 setup expected to succeed\n",
1294 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1295 printf("%d: port 0 link expected to succeed\n",
1300 /* Start the device */
1301 if (rte_event_dev_start(evdev) < 0) {
1302 printf("%d: device start failed\n", __LINE__);
1306 num_events = 2 * port_conf.dequeue_depth;
1308 /* Enqueue 2 * dequeue_depth NEW events */
1309 ev.op = RTE_EVENT_OP_NEW;
1310 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
1315 for (i = 0; i < num_events; i++) {
1316 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1317 printf("%d: NEW enqueue expected to succeed\n",
1323 /* Dequeue dequeue_depth events but only release dequeue_depth - 1.
1324 * Delayed pop won't perform the pop and no more events will be
1327 timeout = 0xFFFFFFFFF;
1329 for (i = 0; i < port_conf.dequeue_depth; i++) {
1330 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1331 printf("%d: event dequeue expected to succeed\n",
1337 ev.op = RTE_EVENT_OP_RELEASE;
1339 for (i = 0; i < port_conf.dequeue_depth - 1; i++) {
1340 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1341 printf("%d: RELEASE enqueue expected to succeed\n",
1349 ret = rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout);
1351 printf("%d: event dequeue expected to fail (ret = %d)\n",
1356 /* Release one more event. This will trigger the token pop, and
1357 * dequeue_depth more events will be scheduled to the device.
1359 ev.op = RTE_EVENT_OP_RELEASE;
1361 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1362 printf("%d: RELEASE enqueue expected to succeed\n",
1367 timeout = 0xFFFFFFFFF;
1369 for (i = 0; i < port_conf.dequeue_depth; i++) {
1370 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1371 printf("%d: event dequeue expected to succeed\n",
1391 /* Only create mbuf pool once, reuse for each test run */
1392 if (!eventdev_func_mempool) {
1393 eventdev_func_mempool =
1394 rte_pktmbuf_pool_create("EVENTDEV_DLB2_ST_POOL",
1395 (1 << 12), /* 4k buffers */
1396 32 /*MBUF_CACHE_SIZE*/,
1398 512, /* use very small mbufs */
1400 if (!eventdev_func_mempool) {
1401 printf("ERROR creating mempool\n");
1405 t.mbuf_pool = eventdev_func_mempool;
1407 printf("*** Running Stop Flush test...\n");
1408 ret = test_stop_flush(&t);
1410 printf("ERROR - Stop Flush test FAILED.\n");
1414 printf("*** Running Single Link test...\n");
1415 ret = test_single_link();
1417 printf("ERROR - Single Link test FAILED.\n");
1422 printf("*** Running Info Get test...\n");
1423 ret = test_info_get();
1425 printf("ERROR - Stop Flush test FAILED.\n");
1429 printf("*** Running Reconfiguration Link test...\n");
1430 ret = test_reconfiguration_link();
1432 printf("ERROR - Reconfiguration Link test FAILED.\n");
1437 printf("*** Running Load-Balanced Traffic test...\n");
1438 ret = test_load_balanced_traffic();
1440 printf("ERROR - Load-Balanced Traffic test FAILED.\n");
1445 printf("*** Running Directed Traffic test...\n");
1446 ret = test_directed_traffic();
1448 printf("ERROR - Directed Traffic test FAILED.\n");
1453 printf("*** Running Deferred Scheduling test...\n");
1454 ret = test_deferred_sched();
1456 printf("ERROR - Deferred Scheduling test FAILED.\n");
1461 printf("*** Running Delayed Pop test...\n");
1462 ret = test_delayed_pop();
1464 printf("ERROR - Delayed Pop test FAILED.\n");
1476 test_dlb2_eventdev(void)
1478 const char *dlb2_eventdev_name = "dlb2_event";
1479 uint8_t num_evdevs = rte_event_dev_count();
1481 int found = 0, skipped = 0, passed = 0, failed = 0;
1482 struct rte_event_dev_info info;
1484 for (i = 0; found + skipped < num_evdevs && i < RTE_EVENT_MAX_DEVS;
1486 ret = rte_event_dev_info_get(i, &info);
1490 /* skip non-dlb2 event devices */
1491 if (strncmp(info.driver_name, dlb2_eventdev_name,
1492 sizeof(*info.driver_name)) != 0) {
1497 evdev = rte_event_dev_get_dev_id(info.driver_name);
1499 printf("Could not get dev_id for eventdev with name %s, i=%d\n",
1500 info.driver_name, i);
1505 printf("Running selftest on eventdev %s\n", info.driver_name);
1506 ret = do_selftest();
1509 printf("Selftest passed for eventdev %s\n",
1513 printf("Selftest failed for eventdev %s, err=%d\n",
1514 info.driver_name, ret);
1518 printf("Ran selftest on %d eventdevs, %d skipped, %d passed, %d failed\n",
1519 found, skipped, passed, failed);