1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_memzone.h>
14 #include <rte_launch.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_cycles.h>
19 #include <rte_eventdev.h>
20 #include <rte_mempool.h>
24 #include "rte_pmd_dlb.h"
28 #define DEFAULT_NUM_SEQ_NUMS 32
30 static struct rte_mempool *eventdev_func_mempool;
34 struct rte_mempool *mbuf_pool;
38 /* initialization and config */
40 init(struct test *t, int nb_queues, int nb_ports)
42 struct rte_event_dev_config config = {0};
43 struct rte_event_dev_info info;
46 memset(t, 0, sizeof(*t));
48 t->mbuf_pool = eventdev_func_mempool;
50 if (rte_event_dev_info_get(evdev, &info)) {
51 printf("%d: Error querying device info\n", __LINE__);
55 config.nb_event_queues = nb_queues;
56 config.nb_event_ports = nb_ports;
57 config.nb_event_queue_flows = info.max_event_queue_flows;
58 config.nb_events_limit = info.max_num_events;
59 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
60 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
61 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
62 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
64 ret = rte_event_dev_configure(evdev, &config);
66 printf("%d: Error configuring device\n", __LINE__);
72 create_ports(int num_ports)
76 if (num_ports > MAX_PORTS)
79 for (i = 0; i < num_ports; i++) {
80 struct rte_event_port_conf conf;
82 if (rte_event_port_default_conf_get(evdev, i, &conf)) {
83 printf("%d: Error querying default port conf\n",
88 if (rte_event_port_setup(evdev, i, &conf) < 0) {
89 printf("%d: Error setting up port %d\n", __LINE__, i);
98 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
102 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
103 struct rte_event_queue_conf conf;
105 if (rte_event_queue_default_conf_get(evdev, i, &conf)) {
106 printf("%d: Error querying default queue conf\n",
111 conf.schedule_type = flags;
113 if (conf.schedule_type == RTE_SCHED_TYPE_PARALLEL)
114 conf.nb_atomic_order_sequences = 0;
116 conf.nb_atomic_order_sequences = DEFAULT_NUM_SEQ_NUMS;
118 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
119 printf("%d: error creating qid %d\n", __LINE__, i);
124 t->nb_qids += num_qids;
125 if (t->nb_qids > MAX_QIDS)
132 create_atomic_qids(struct test *t, int num_qids)
134 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
141 rte_event_dev_stop(evdev);
142 return rte_event_dev_close(evdev);
146 enqueue_timeout(uint8_t port_id, struct rte_event *ev, uint64_t tmo_us)
148 const uint64_t start = rte_get_timer_cycles();
149 const uint64_t ticks = (tmo_us * rte_get_timer_hz()) / 1E6;
151 while ((rte_get_timer_cycles() - start) < ticks) {
152 if (rte_event_enqueue_burst(evdev, port_id, ev, 1) == 1)
155 if (rte_errno != -ENOSPC)
163 flush(uint8_t id __rte_unused, struct rte_event event, void *arg __rte_unused)
165 rte_pktmbuf_free(event.mbuf);
169 test_stop_flush(struct test *t) /* test to check we can properly flush events */
172 uint32_t dequeue_depth;
173 unsigned int i, count;
176 ev.op = RTE_EVENT_OP_NEW;
178 if (init(t, 2, 1) < 0 ||
179 create_ports(1) < 0 ||
180 create_atomic_qids(t, 2) < 0) {
181 printf("%d: Error initializing device\n", __LINE__);
185 if (rte_event_port_link(evdev, 0, NULL, NULL, 0) != 2) {
186 printf("%d: Error linking queues to the port\n", __LINE__);
190 if (rte_event_dev_start(evdev) < 0) {
191 printf("%d: Error with start call\n", __LINE__);
195 /* Unlink queue 1 so the PMD's stop callback has to cleanup an unlinked
200 if (rte_event_port_unlink(evdev, 0, &queue_id, 1) != 1) {
201 printf("%d: Error unlinking queue 1 from port\n", __LINE__);
205 count = rte_mempool_avail_count(t->mbuf_pool);
207 if (rte_event_port_attr_get(evdev,
209 RTE_EVENT_PORT_ATTR_DEQ_DEPTH,
211 printf("%d: Error retrieveing dequeue depth\n", __LINE__);
215 /* Send QEs to queue 0 */
216 for (i = 0; i < dequeue_depth + 1; i++) {
217 ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
219 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
221 if (enqueue_timeout(0, &ev, 1000)) {
222 printf("%d: Error enqueuing events\n", __LINE__);
227 /* Send QEs to queue 1 */
228 for (i = 0; i < dequeue_depth + 1; i++) {
229 ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
231 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
233 if (enqueue_timeout(0, &ev, 1000)) {
234 printf("%d: Error enqueuing events\n", __LINE__);
239 /* Now the DLB is scheduling events from the port to the IQ, and at
240 * least one event should be remaining in each queue.
243 if (rte_event_dev_stop_flush_callback_register(evdev, flush, NULL)) {
244 printf("%d: Error installing the flush callback\n", __LINE__);
250 if (count != rte_mempool_avail_count(t->mbuf_pool)) {
251 printf("%d: Error executing the flush callback\n", __LINE__);
255 if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
256 printf("%d: Error uninstalling the flush callback\n", __LINE__);
267 test_single_link(void)
269 struct rte_event_dev_config config = {0};
270 struct rte_event_queue_conf queue_conf;
271 struct rte_event_port_conf port_conf;
272 struct rte_event_dev_info info;
276 if (rte_event_dev_info_get(evdev, &info)) {
277 printf("%d: Error querying device info\n", __LINE__);
281 config.nb_event_queues = 2;
282 config.nb_event_ports = 2;
283 config.nb_single_link_event_port_queues = 1;
284 config.nb_event_queue_flows = info.max_event_queue_flows;
285 config.nb_events_limit = info.max_num_events;
286 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
287 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
288 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
289 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
291 ret = rte_event_dev_configure(evdev, &config);
293 printf("%d: Error configuring device\n", __LINE__);
297 /* Create a directed port */
298 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
299 printf("%d: Error querying default port conf\n", __LINE__);
303 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
305 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
306 printf("%d: port 0 setup expected to succeed\n", __LINE__);
310 /* Attempt to create another directed port */
311 if (rte_event_port_setup(evdev, 1, &port_conf) == 0) {
312 printf("%d: port 1 setup expected to fail\n", __LINE__);
316 port_conf.event_port_cfg = 0;
318 /* Create a load-balanced port */
319 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
320 printf("%d: port 1 setup expected to succeed\n", __LINE__);
324 /* Create a directed queue */
325 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
326 printf("%d: Error querying default queue conf\n", __LINE__);
330 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
332 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
333 printf("%d: queue 0 setup expected to succeed\n", __LINE__);
337 /* Attempt to create another directed queue */
338 if (rte_event_queue_setup(evdev, 1, &queue_conf) == 0) {
339 printf("%d: queue 1 setup expected to fail\n", __LINE__);
343 /* Create a load-balanced queue */
344 queue_conf.event_queue_cfg = 0;
346 if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
347 printf("%d: queue 1 setup expected to succeed\n", __LINE__);
351 /* Attempt to link directed and load-balanced resources */
353 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) == 1) {
354 printf("%d: port 0 link expected to fail\n", __LINE__);
359 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) == 1) {
360 printf("%d: port 1 link expected to fail\n", __LINE__);
364 /* Link ports to queues */
366 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
367 printf("%d: port 0 link expected to succeed\n", __LINE__);
372 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
373 printf("%d: port 1 link expected to succeed\n", __LINE__);
377 return rte_event_dev_close(evdev);
380 rte_event_dev_close(evdev);
384 #define NUM_LDB_PORTS 64
385 #define NUM_LDB_QUEUES 128
390 struct rte_event_dev_config config = {0};
391 struct rte_event_dev_info info;
394 if (rte_event_dev_info_get(evdev, &info)) {
395 printf("%d: Error querying device info\n", __LINE__);
399 if (info.max_event_ports != NUM_LDB_PORTS) {
400 printf("%d: Got %u ports, expected %u\n",
401 __LINE__, info.max_event_ports, NUM_LDB_PORTS);
405 if (info.max_event_queues != NUM_LDB_QUEUES) {
406 printf("%d: Got %u queues, expected %u\n",
407 __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
411 config.nb_event_ports = info.max_event_ports;
412 config.nb_event_queues = NUM_LDB_QUEUES + info.max_event_ports / 2;
413 config.nb_single_link_event_port_queues = info.max_event_ports / 2;
414 config.nb_event_queue_flows = info.max_event_queue_flows;
415 config.nb_events_limit = info.max_num_events;
416 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
417 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
418 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
419 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
421 ret = rte_event_dev_configure(evdev, &config);
423 printf("%d: Error configuring device\n", __LINE__);
427 if (rte_event_dev_info_get(evdev, &info)) {
428 printf("%d: Error querying device info\n", __LINE__);
432 /* The DLB PMD only reports load-balanced ports and queues in its
433 * info_get function. Confirm that these values don't include the
434 * directed port or queue counts.
437 if (info.max_event_ports != NUM_LDB_PORTS) {
438 printf("%d: Got %u ports, expected %u\n",
439 __LINE__, info.max_event_ports, NUM_LDB_PORTS);
443 if (info.max_event_queues != NUM_LDB_QUEUES) {
444 printf("%d: Got %u queues, expected %u\n",
445 __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
449 ret = rte_event_dev_close(evdev);
451 printf("rte_event_dev_close err %d\n", ret);
458 rte_event_dev_close(evdev);
463 test_reconfiguration_link(void)
465 struct rte_event_dev_config config = {0};
466 struct rte_event_queue_conf queue_conf;
467 struct rte_event_port_conf port_conf;
468 struct rte_event_dev_info info;
472 if (rte_event_dev_info_get(evdev, &info)) {
473 printf("%d: Error querying device info\n", __LINE__);
477 config.nb_event_queues = 2;
478 config.nb_event_ports = 2;
479 config.nb_single_link_event_port_queues = 0;
480 config.nb_event_queue_flows = info.max_event_queue_flows;
481 config.nb_events_limit = info.max_num_events;
482 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
483 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
484 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
485 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
487 /* Configure the device with 2 LDB ports and 2 LDB queues */
488 ret = rte_event_dev_configure(evdev, &config);
490 printf("%d: Error configuring device\n", __LINE__);
494 /* Configure the ports and queues */
495 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
496 printf("%d: Error querying default port conf\n", __LINE__);
500 for (i = 0; i < 2; i++) {
501 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
502 printf("%d: port %d setup expected to succeed\n",
508 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
509 printf("%d: Error querying default queue conf\n", __LINE__);
513 for (i = 0; i < 2; i++) {
514 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
515 printf("%d: queue %d setup expected to succeed\n",
521 /* Link P0->Q0 and P1->Q1 */
522 for (i = 0; i < 2; i++) {
525 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
526 printf("%d: port %d link expected to succeed\n",
532 /* Start the device */
533 if (rte_event_dev_start(evdev) < 0) {
534 printf("%d: device start failed\n", __LINE__);
538 /* Stop the device */
539 rte_event_dev_stop(evdev);
541 /* Reconfigure device */
542 ret = rte_event_dev_configure(evdev, &config);
544 printf("%d: Error re-configuring device\n", __LINE__);
548 /* Configure P1 and Q1, leave P0 and Q0 to be configured by the PMD. */
549 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
550 printf("%d: port 1 setup expected to succeed\n",
555 if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
556 printf("%d: queue 1 setup expected to succeed\n",
561 /* Link P0->Q0 and Q1 */
562 for (i = 0; i < 2; i++) {
565 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
566 printf("%d: P0->Q%d link expected to succeed\n",
572 /* Link P1->Q0 and Q1 */
573 for (i = 0; i < 2; i++) {
576 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
577 printf("%d: P1->Q%d link expected to succeed\n",
583 /* Start the device */
584 if (rte_event_dev_start(evdev) < 0) {
585 printf("%d: device start failed\n", __LINE__);
589 /* Stop the device */
590 rte_event_dev_stop(evdev);
592 /* Configure device with 2 DIR ports and 2 DIR queues */
593 config.nb_single_link_event_port_queues = 2;
595 ret = rte_event_dev_configure(evdev, &config);
597 printf("%d: Error configuring device\n", __LINE__);
601 /* Configure the ports and queues */
602 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
604 for (i = 0; i < 2; i++) {
605 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
606 printf("%d: port %d setup expected to succeed\n",
612 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
614 for (i = 0; i < 2; i++) {
615 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
616 printf("%d: queue %d setup expected to succeed\n",
622 /* Link P0->Q0 and P1->Q1 */
623 for (i = 0; i < 2; i++) {
626 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
627 printf("%d: port %d link expected to succeed\n",
633 /* Start the device */
634 if (rte_event_dev_start(evdev) < 0) {
635 printf("%d: device start failed\n", __LINE__);
639 /* Stop the device */
640 rte_event_dev_stop(evdev);
642 /* Reconfigure device */
643 ret = rte_event_dev_configure(evdev, &config);
645 printf("%d: Error re-configuring device\n", __LINE__);
649 /* Configure P1 and Q0, leave P0 and Q1 to be configured by the PMD. */
650 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
651 printf("%d: port 1 setup expected to succeed\n",
656 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
657 printf("%d: queue 1 setup expected to succeed\n",
665 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
666 printf("%d: P0->Q%d link expected to succeed\n",
674 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
675 printf("%d: P1->Q%d link expected to succeed\n",
680 /* Start the device */
681 if (rte_event_dev_start(evdev) < 0) {
682 printf("%d: device start failed\n", __LINE__);
686 rte_event_dev_stop(evdev);
688 config.nb_event_queues = 5;
689 config.nb_event_ports = 5;
690 config.nb_single_link_event_port_queues = 1;
692 ret = rte_event_dev_configure(evdev, &config);
694 printf("%d: Error re-configuring device\n", __LINE__);
698 for (i = 0; i < config.nb_event_queues - 1; i++) {
699 port_conf.event_port_cfg = 0;
700 queue_conf.event_queue_cfg = 0;
702 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
703 printf("%d: port %d setup expected to succeed\n",
708 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
709 printf("%d: queue %d setup expected to succeed\n",
716 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
717 printf("%d: P%d->Q%d link expected to succeed\n",
723 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
724 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
726 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
727 printf("%d: port %d setup expected to succeed\n",
732 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
733 printf("%d: queue %d setup expected to succeed\n",
740 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
741 printf("%d: P%d->Q%d link expected to succeed\n",
746 /* Start the device */
747 if (rte_event_dev_start(evdev) < 0) {
748 printf("%d: device start failed\n", __LINE__);
752 /* Stop the device */
753 rte_event_dev_stop(evdev);
755 config.nb_event_ports += 1;
757 /* Reconfigure device with 1 more load-balanced port */
758 ret = rte_event_dev_configure(evdev, &config);
760 printf("%d: Error re-configuring device\n", __LINE__);
764 port_conf.event_port_cfg = 0;
766 /* Configure the new port */
767 if (rte_event_port_setup(evdev, config.nb_event_ports - 1,
769 printf("%d: port 1 setup expected to succeed\n",
774 /* Start the device */
775 if (rte_event_dev_start(evdev) < 0) {
776 printf("%d: device start failed\n", __LINE__);
789 test_load_balanced_traffic(void)
792 struct rte_event_dev_config config = {0};
793 struct rte_event_queue_conf queue_conf;
794 struct rte_event_port_conf port_conf;
795 struct rte_event_dev_info info;
800 if (rte_event_dev_info_get(evdev, &info)) {
801 printf("%d: Error querying device info\n", __LINE__);
805 config.nb_event_queues = 1;
806 config.nb_event_ports = 1;
807 config.nb_single_link_event_port_queues = 0;
808 config.nb_event_queue_flows = info.max_event_queue_flows;
809 config.nb_events_limit = info.max_num_events;
810 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
811 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
812 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
813 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
815 /* Configure the device with 1 LDB port and queue */
816 ret = rte_event_dev_configure(evdev, &config);
818 printf("%d: Error configuring device\n", __LINE__);
822 /* Configure the ports and queues */
823 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
824 printf("%d: Error querying default port conf\n", __LINE__);
828 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
829 printf("%d: port 0 setup expected to succeed\n",
834 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
835 printf("%d: Error querying default queue conf\n", __LINE__);
839 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
840 printf("%d: queue 0 setup expected to succeed\n",
848 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
849 printf("%d: port 0 link expected to succeed\n",
854 /* Start the device */
855 if (rte_event_dev_start(evdev) < 0) {
856 printf("%d: device start failed\n", __LINE__);
860 /* Enqueue 1 NEW event */
861 ev.op = RTE_EVENT_OP_NEW;
862 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
867 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
868 printf("%d: NEW enqueue expected to succeed\n",
873 /* Dequeue and enqueue 1 FORWARD event */
874 timeout = 0xFFFFFFFFF;
875 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
876 printf("%d: event dequeue expected to succeed\n",
881 ev.op = RTE_EVENT_OP_FORWARD;
883 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
884 printf("%d: NEW enqueue expected to succeed\n",
889 /* Dequeue and enqueue 1 RELEASE operation */
890 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
891 printf("%d: event dequeue expected to succeed\n",
896 ev.op = RTE_EVENT_OP_RELEASE;
898 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
899 printf("%d: NEW enqueue expected to succeed\n",
913 test_directed_traffic(void)
916 struct rte_event_dev_config config = {0};
917 struct rte_event_queue_conf queue_conf;
918 struct rte_event_port_conf port_conf;
919 struct rte_event_dev_info info;
924 if (rte_event_dev_info_get(evdev, &info)) {
925 printf("%d: Error querying device info\n", __LINE__);
929 config.nb_event_queues = 1;
930 config.nb_event_ports = 1;
931 config.nb_single_link_event_port_queues = 1;
932 config.nb_event_queue_flows = info.max_event_queue_flows;
933 config.nb_events_limit = info.max_num_events;
934 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
935 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
936 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
937 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
939 /* Configure the device with 1 DIR port and queue */
940 ret = rte_event_dev_configure(evdev, &config);
942 printf("%d: Error configuring device\n", __LINE__);
946 /* Configure the ports and queues */
947 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
948 printf("%d: Error querying default port conf\n", __LINE__);
952 port_conf.event_port_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
954 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
955 printf("%d: port 0 setup expected to succeed\n",
960 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
961 printf("%d: Error querying default queue conf\n", __LINE__);
965 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
967 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
968 printf("%d: queue 0 setup expected to succeed\n",
976 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
977 printf("%d: port 0 link expected to succeed\n",
982 /* Start the device */
983 if (rte_event_dev_start(evdev) < 0) {
984 printf("%d: device start failed\n", __LINE__);
988 /* Enqueue 1 NEW event */
989 ev.op = RTE_EVENT_OP_NEW;
994 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
995 printf("%d: NEW enqueue expected to succeed\n",
1000 /* Dequeue and enqueue 1 FORWARD event */
1001 timeout = 0xFFFFFFFFF;
1002 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1003 printf("%d: event dequeue expected to succeed\n",
1008 if (ev.queue_id != 0) {
1009 printf("%d: invalid dequeued event queue ID (%d)\n",
1010 __LINE__, ev.queue_id);
1014 ev.op = RTE_EVENT_OP_FORWARD;
1016 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1017 printf("%d: NEW enqueue expected to succeed\n",
1022 /* Dequeue and enqueue 1 RELEASE operation */
1023 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1024 printf("%d: event dequeue expected to succeed\n",
1029 ev.op = RTE_EVENT_OP_RELEASE;
1031 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1032 printf("%d: NEW enqueue expected to succeed\n",
1046 test_deferred_sched(void)
1049 struct rte_event_dev_config config = {0};
1050 struct rte_event_queue_conf queue_conf;
1051 struct rte_event_port_conf port_conf;
1052 struct rte_event_dev_info info;
1053 const int num_events = 128;
1054 struct rte_event ev;
1058 if (rte_event_dev_info_get(evdev, &info)) {
1059 printf("%d: Error querying device info\n", __LINE__);
1063 config.nb_event_queues = 1;
1064 config.nb_event_ports = 2;
1065 config.nb_single_link_event_port_queues = 0;
1066 config.nb_event_queue_flows = info.max_event_queue_flows;
1067 config.nb_events_limit = info.max_num_events;
1068 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
1069 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
1070 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
1071 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
1073 /* Configure the device with 2 LDB ports and 1 queue */
1074 ret = rte_event_dev_configure(evdev, &config);
1076 printf("%d: Error configuring device\n", __LINE__);
1080 ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DEFERRED_POP);
1082 printf("%d: Error setting deferred scheduling\n", __LINE__);
1086 ret = rte_pmd_dlb_set_token_pop_mode(evdev, 1, DEFERRED_POP);
1088 printf("%d: Error setting deferred scheduling\n", __LINE__);
1092 /* Configure the ports and queues */
1093 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
1094 printf("%d: Error querying default port conf\n", __LINE__);
1098 port_conf.dequeue_depth = 1;
1100 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1101 printf("%d: port 0 setup expected to succeed\n",
1106 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1107 printf("%d: port 1 setup expected to succeed\n",
1112 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
1113 printf("%d: Error querying default queue conf\n", __LINE__);
1117 queue_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
1118 queue_conf.nb_atomic_order_sequences = 0;
1120 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
1121 printf("%d: queue 0 setup expected to succeed\n",
1126 /* Link P0->Q0 and P1->Q0 */
1129 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1130 printf("%d: port 0 link expected to succeed\n",
1135 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
1136 printf("%d: port 1 link expected to succeed\n",
1141 /* Start the device */
1142 if (rte_event_dev_start(evdev) < 0) {
1143 printf("%d: device start failed\n", __LINE__);
1147 /* Enqueue 128 NEW events */
1148 ev.op = RTE_EVENT_OP_NEW;
1149 ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1154 for (i = 0; i < num_events; i++) {
1155 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1156 printf("%d: NEW enqueue expected to succeed\n",
1162 /* Dequeue two events from port 0 (dequeue_depth * 2 due to the
1163 * reserved token scheme)
1165 timeout = 0xFFFFFFFFF;
1166 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1167 printf("%d: event dequeue expected to succeed\n",
1172 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1173 printf("%d: event dequeue expected to succeed\n",
1178 /* Dequeue (and release) all other events from port 1. Deferred
1179 * scheduling ensures no other events are scheduled to port 0 without a
1180 * subsequent rte_event_dequeue_burst() call.
1182 for (i = 0; i < num_events - 2; i++) {
1183 if (rte_event_dequeue_burst(evdev, 1, &ev, 1, timeout) != 1) {
1184 printf("%d: event dequeue expected to succeed\n",
1189 ev.op = RTE_EVENT_OP_RELEASE;
1191 if (rte_event_enqueue_burst(evdev, 1, &ev, 1) != 1) {
1192 printf("%d: RELEASE enqueue expected to succeed\n",
1207 test_delayed_pop(void)
1210 struct rte_event_dev_config config = {0};
1211 struct rte_event_queue_conf queue_conf;
1212 struct rte_event_port_conf port_conf;
1213 struct rte_event_dev_info info;
1214 int ret, i, num_events;
1215 struct rte_event ev;
1218 if (rte_event_dev_info_get(evdev, &info)) {
1219 printf("%d: Error querying device info\n", __LINE__);
1223 config.nb_event_queues = 1;
1224 config.nb_event_ports = 1;
1225 config.nb_single_link_event_port_queues = 0;
1226 config.nb_event_queue_flows = info.max_event_queue_flows;
1227 config.nb_events_limit = info.max_num_events;
1228 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
1229 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
1230 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
1231 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
1233 /* Configure the device with 1 LDB port and queue */
1234 ret = rte_event_dev_configure(evdev, &config);
1236 printf("%d: Error configuring device\n", __LINE__);
1240 ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DELAYED_POP);
1242 printf("%d: Error setting deferred scheduling\n", __LINE__);
1246 /* Configure the ports and queues */
1247 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
1248 printf("%d: Error querying default port conf\n", __LINE__);
1252 port_conf.dequeue_depth = 16;
1253 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
1255 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1256 printf("%d: port 0 setup expected to succeed\n",
1261 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
1262 printf("%d: Error querying default queue conf\n", __LINE__);
1266 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
1267 printf("%d: queue 0 setup expected to succeed\n",
1275 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1276 printf("%d: port 0 link expected to succeed\n",
1281 /* Start the device */
1282 if (rte_event_dev_start(evdev) < 0) {
1283 printf("%d: device start failed\n", __LINE__);
1287 num_events = 2 * port_conf.dequeue_depth;
1289 /* Enqueue 2 * dequeue_depth NEW events. Due to the PMD's reserved
1290 * token scheme, the port will initially behave as though its
1291 * dequeue_depth is twice the requested size.
1293 ev.op = RTE_EVENT_OP_NEW;
1294 ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1299 for (i = 0; i < num_events; i++) {
1300 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1301 printf("%d: NEW enqueue expected to succeed\n",
1307 /* Flush these events out of the CQ */
1308 timeout = 0xFFFFFFFFF;
1310 for (i = 0; i < num_events; i++) {
1311 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1312 printf("%d: event dequeue expected to succeed\n",
1318 ev.op = RTE_EVENT_OP_RELEASE;
1320 for (i = 0; i < num_events; i++) {
1321 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1322 printf("%d: RELEASE enqueue expected to succeed\n",
1328 /* Enqueue 2 * dequeue_depth NEW events again */
1329 ev.op = RTE_EVENT_OP_NEW;
1330 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
1335 for (i = 0; i < num_events; i++) {
1336 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1337 printf("%d: NEW enqueue expected to succeed\n",
1343 /* Dequeue dequeue_depth events but only release dequeue_depth - 1.
1344 * Delayed pop won't perform the pop and no more events will be
1347 for (i = 0; i < port_conf.dequeue_depth; i++) {
1348 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1349 printf("%d: event dequeue expected to succeed\n",
1355 ev.op = RTE_EVENT_OP_RELEASE;
1357 for (i = 0; i < port_conf.dequeue_depth - 1; i++) {
1358 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1359 printf("%d: RELEASE enqueue expected to succeed\n",
1367 ret = rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout);
1369 printf("%d: event dequeue expected to fail (ret = %d)\n",
1374 /* Release one more event. This will trigger the token pop, and
1375 * another batch of events will be scheduled to the device.
1377 ev.op = RTE_EVENT_OP_RELEASE;
1379 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1380 printf("%d: RELEASE enqueue expected to succeed\n",
1385 timeout = 0xFFFFFFFFF;
1387 for (i = 0; i < port_conf.dequeue_depth; i++) {
1388 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1389 printf("%d: event dequeue expected to succeed\n",
1409 /* Only create mbuf pool once, reuse for each test run */
1410 if (!eventdev_func_mempool) {
1411 eventdev_func_mempool =
1412 rte_pktmbuf_pool_create("EVENTDEV_DLB_SA_MBUF_POOL",
1413 (1 << 12), /* 4k buffers */
1414 32 /*MBUF_CACHE_SIZE*/,
1416 512, /* use very small mbufs */
1418 if (!eventdev_func_mempool) {
1419 printf("ERROR creating mempool\n");
1423 t.mbuf_pool = eventdev_func_mempool;
1425 printf("*** Running Stop Flush test...\n");
1426 ret = test_stop_flush(&t);
1428 printf("ERROR - Stop Flush test FAILED.\n");
1432 printf("*** Running Single Link test...\n");
1433 ret = test_single_link();
1435 printf("ERROR - Single Link test FAILED.\n");
1440 printf("*** Running Info Get test...\n");
1441 ret = test_info_get();
1443 printf("ERROR - Stop Flush test FAILED.\n");
1447 printf("*** Running Reconfiguration Link test...\n");
1448 ret = test_reconfiguration_link();
1450 printf("ERROR - Reconfiguration Link test FAILED.\n");
1455 printf("*** Running Load-Balanced Traffic test...\n");
1456 ret = test_load_balanced_traffic();
1458 printf("ERROR - Load-Balanced Traffic test FAILED.\n");
1463 printf("*** Running Directed Traffic test...\n");
1464 ret = test_directed_traffic();
1466 printf("ERROR - Directed Traffic test FAILED.\n");
1471 printf("*** Running Deferred Scheduling test...\n");
1472 ret = test_deferred_sched();
1474 printf("ERROR - Deferred Scheduling test FAILED.\n");
1479 printf("*** Running Delayed Pop test...\n");
1480 ret = test_delayed_pop();
1482 printf("ERROR - Delayed Pop test FAILED.\n");
1494 test_dlb_eventdev(void)
1496 const char *dlb_eventdev_name = "dlb_event";
1497 uint8_t num_evdevs = rte_event_dev_count();
1499 int found = 0, skipped = 0, passed = 0, failed = 0;
1500 struct rte_event_dev_info info;
1502 for (i = 0; found + skipped < num_evdevs && i < RTE_EVENT_MAX_DEVS;
1504 ret = rte_event_dev_info_get(i, &info);
1508 /* skip non-dlb event devices */
1509 if (strncmp(info.driver_name, dlb_eventdev_name,
1510 sizeof(*info.driver_name)) != 0) {
1515 evdev = rte_event_dev_get_dev_id(info.driver_name);
1517 printf("Could not get dev_id for eventdev with name %s, i=%d\n",
1518 info.driver_name, i);
1523 printf("Running selftest on eventdev %s\n", info.driver_name);
1524 ret = do_selftest();
1527 printf("Selftest passed for eventdev %s\n",
1531 printf("Selftest failed for eventdev %s, err=%d\n",
1532 info.driver_name, ret);
1536 printf("Ran selftest on %d eventdevs, %d skipped, %d passed, %d failed\n",
1537 found, skipped, passed, failed);