1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_memzone.h>
14 #include <rte_launch.h>
16 #include <rte_per_lcore.h>
17 #include <rte_lcore.h>
18 #include <rte_debug.h>
19 #include <rte_ethdev.h>
20 #include <rte_cycles.h>
21 #include <rte_eventdev.h>
22 #include <rte_pause.h>
24 #include "dlb2_priv.h"
25 #include "rte_pmd_dlb2.h"
29 #define DEFAULT_NUM_SEQ_NUMS 64
31 static struct rte_mempool *eventdev_func_mempool;
35 struct rte_mempool *mbuf_pool;
39 /* initialization and config */
41 init(struct test *t, int nb_queues, int nb_ports)
43 struct rte_event_dev_config config = {0};
44 struct rte_event_dev_info info;
47 memset(t, 0, sizeof(*t));
49 t->mbuf_pool = eventdev_func_mempool;
51 if (rte_event_dev_info_get(evdev, &info)) {
52 printf("%d: Error querying device info\n", __LINE__);
56 config.nb_event_queues = nb_queues;
57 config.nb_event_ports = nb_ports;
58 config.nb_event_queue_flows = info.max_event_queue_flows;
59 config.nb_events_limit = info.max_num_events;
60 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
61 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
62 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
63 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
65 ret = rte_event_dev_configure(evdev, &config);
67 printf("%d: Error configuring device\n", __LINE__);
73 create_ports(int num_ports)
77 if (num_ports > MAX_PORTS)
80 for (i = 0; i < num_ports; i++) {
81 struct rte_event_port_conf conf;
83 if (rte_event_port_default_conf_get(evdev, i, &conf)) {
84 printf("%d: Error querying default port conf\n",
89 if (rte_event_port_setup(evdev, i, &conf) < 0) {
90 printf("%d: Error setting up port %d\n", __LINE__, i);
99 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
103 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
104 struct rte_event_queue_conf conf;
106 if (rte_event_queue_default_conf_get(evdev, i, &conf)) {
107 printf("%d: Error querying default queue conf\n",
112 conf.schedule_type = flags;
114 if (conf.schedule_type == RTE_SCHED_TYPE_PARALLEL)
115 conf.nb_atomic_order_sequences = 0;
117 conf.nb_atomic_order_sequences = DEFAULT_NUM_SEQ_NUMS;
119 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
120 printf("%d: error creating qid %d\n", __LINE__, i);
125 t->nb_qids += num_qids;
126 if (t->nb_qids > MAX_QIDS)
133 create_atomic_qids(struct test *t, int num_qids)
135 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
144 rte_event_dev_stop(evdev);
145 ret = rte_event_dev_close(evdev);
148 printf("%d: rte_event_dev_close failed, ret = %d\n",
153 enqueue_timeout(uint8_t port_id, struct rte_event *ev, uint64_t tmo_us)
155 const uint64_t start = rte_get_timer_cycles();
156 const uint64_t ticks = (tmo_us * rte_get_timer_hz()) / 1E6;
158 while ((rte_get_timer_cycles() - start) < ticks) {
159 if (rte_event_enqueue_burst(evdev, port_id, ev, 1) == 1)
162 if (rte_errno != -ENOSPC) {
163 printf("enqueue_burst returned rte_errno %d\n",
168 printf("%s time out\n", __func__);
173 flush(uint8_t id __rte_unused, struct rte_event event, void *arg __rte_unused)
175 rte_pktmbuf_free(event.mbuf);
179 test_stop_flush(struct test *t) /* test to check we can properly flush events */
182 uint32_t dequeue_depth;
183 unsigned int i, count;
186 ev.op = RTE_EVENT_OP_NEW;
188 if (init(t, 2, 1) < 0 ||
189 create_ports(1) < 0 ||
190 create_atomic_qids(t, 2) < 0) {
191 printf("%d: Error initializing device\n", __LINE__);
195 if (rte_event_port_link(evdev, 0, NULL, NULL, 0) != 2) {
196 printf("%d: Error linking queues to the port\n", __LINE__);
200 if (rte_event_dev_start(evdev) < 0) {
201 printf("%d: Error with start call\n", __LINE__);
205 /* Unlink queue 1 so the PMD's stop callback has to cleanup an unlinked
210 if (rte_event_port_unlink(evdev, 0, &queue_id, 1) != 1) {
211 printf("%d: Error unlinking queue 1 from port\n", __LINE__);
215 count = rte_mempool_avail_count(t->mbuf_pool);
217 if (rte_event_port_attr_get(evdev,
219 RTE_EVENT_PORT_ATTR_DEQ_DEPTH,
221 printf("%d: Error retrieveing dequeue depth\n", __LINE__);
225 /* Send QEs to queue 0 */
226 for (i = 0; i < dequeue_depth + 1; i++) {
227 ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
229 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
231 if (enqueue_timeout(0, &ev, 1000)) {
232 printf("%d: Error enqueuing events\n", __LINE__);
237 /* Send QEs to queue 1 */
238 for (i = 0; i < dequeue_depth + 1; i++) {
239 ev.mbuf = rte_pktmbuf_alloc(t->mbuf_pool);
241 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
243 if (enqueue_timeout(0, &ev, 1000)) {
244 printf("%d: Error enqueuing events\n", __LINE__);
249 /* Now the DLB is scheduling events from the port to the IQ, and at
250 * least one event should be remaining in each queue.
253 if (rte_event_dev_stop_flush_callback_register(evdev, flush, NULL)) {
254 printf("%d: Error installing the flush callback\n", __LINE__);
260 if (count != rte_mempool_avail_count(t->mbuf_pool)) {
261 printf("%d: Error executing the flush callback\n", __LINE__);
265 if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
266 printf("%d: Error uninstalling the flush callback\n", __LINE__);
277 test_single_link(void)
279 struct rte_event_dev_config config = {0};
280 struct rte_event_queue_conf queue_conf;
281 struct rte_event_port_conf port_conf;
282 struct rte_event_dev_info info;
286 if (rte_event_dev_info_get(evdev, &info)) {
287 printf("%d: Error querying device info\n", __LINE__);
291 config.nb_event_queues = 2;
292 config.nb_event_ports = 2;
293 config.nb_single_link_event_port_queues = 1;
294 config.nb_event_queue_flows = info.max_event_queue_flows;
295 config.nb_events_limit = info.max_num_events;
296 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
297 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
298 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
299 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
301 ret = rte_event_dev_configure(evdev, &config);
303 printf("%d: Error configuring device\n", __LINE__);
307 /* Create a directed port */
308 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
309 printf("%d: Error querying default port conf\n", __LINE__);
313 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
315 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
316 printf("%d: port 0 setup expected to succeed\n", __LINE__);
320 /* Attempt to create another directed port */
321 if (rte_event_port_setup(evdev, 1, &port_conf) == 0) {
322 printf("%d: port 1 setup expected to fail\n", __LINE__);
326 port_conf.event_port_cfg = 0;
328 /* Create a load-balanced port */
329 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
330 printf("%d: port 1 setup expected to succeed\n", __LINE__);
334 /* Create a directed queue */
335 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
336 printf("%d: Error querying default queue conf\n", __LINE__);
340 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
342 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
343 printf("%d: queue 0 setup expected to succeed\n", __LINE__);
347 /* Attempt to create another directed queue */
348 if (rte_event_queue_setup(evdev, 1, &queue_conf) == 0) {
349 printf("%d: queue 1 setup expected to fail\n", __LINE__);
353 /* Create a load-balanced queue */
354 queue_conf.event_queue_cfg = 0;
356 if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
357 printf("%d: queue 1 setup expected to succeed\n", __LINE__);
361 /* Attempt to link directed and load-balanced resources */
363 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) == 1) {
364 printf("%d: port 0 link expected to fail\n", __LINE__);
369 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) == 1) {
370 printf("%d: port 1 link expected to fail\n", __LINE__);
374 /* Link ports to queues */
376 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
377 printf("%d: port 0 link expected to succeed\n", __LINE__);
382 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
383 printf("%d: port 1 link expected to succeed\n", __LINE__);
387 ret = rte_event_dev_close(evdev);
389 printf("%d: rte_event_dev_close failed, ret = %d\n",
395 ret = rte_event_dev_close(evdev);
397 printf("%d: rte_event_dev_close failed, ret = %d\n",
403 #define NUM_LDB_PORTS 64
404 #define NUM_LDB_QUEUES 32
409 struct rte_event_dev_config config = {0};
410 struct rte_event_dev_info info;
413 if (rte_event_dev_info_get(evdev, &info)) {
414 printf("%d: Error querying device info\n", __LINE__);
418 if (info.max_event_ports != NUM_LDB_PORTS) {
419 printf("%d: Got %u ports, expected %u\n",
420 __LINE__, info.max_event_ports, NUM_LDB_PORTS);
424 if (info.max_event_queues != NUM_LDB_QUEUES) {
425 printf("%d: Got %u queues, expected %u\n",
426 __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
430 config.nb_event_ports = info.max_event_ports;
431 config.nb_event_queues = NUM_LDB_QUEUES + info.max_event_ports / 2;
432 config.nb_single_link_event_port_queues = info.max_event_ports / 2;
433 config.nb_event_queue_flows = info.max_event_queue_flows;
434 config.nb_events_limit = info.max_num_events;
435 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
436 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
437 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
438 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
440 ret = rte_event_dev_configure(evdev, &config);
442 printf("%d: Error configuring device\n", __LINE__);
446 if (rte_event_dev_info_get(evdev, &info)) {
447 printf("%d: Error querying device info\n", __LINE__);
451 /* The DLB2 PMD only reports load-balanced ports and queues in its
452 * info_get function. Confirm that these values don't include the
453 * directed port or queue counts.
456 if (info.max_event_ports != NUM_LDB_PORTS) {
457 printf("%d: Got %u ports, expected %u\n",
458 __LINE__, info.max_event_ports, NUM_LDB_PORTS);
462 if (info.max_event_queues != NUM_LDB_QUEUES) {
463 printf("%d: Got %u queues, expected %u\n",
464 __LINE__, info.max_event_queues, NUM_LDB_QUEUES);
468 ret = rte_event_dev_close(evdev);
470 printf("%d: rte_event_dev_close failed, ret = %d\n",
477 ret = rte_event_dev_close(evdev);
479 printf("%d: rte_event_dev_close failed, ret = %d\n",
486 test_reconfiguration_link(void)
488 struct rte_event_dev_config config = {0};
489 struct rte_event_queue_conf queue_conf;
490 struct rte_event_port_conf port_conf;
491 struct rte_event_dev_info info;
495 if (rte_event_dev_info_get(evdev, &info)) {
496 printf("%d: Error querying device info\n", __LINE__);
500 config.nb_event_queues = 2;
501 config.nb_event_ports = 2;
502 config.nb_single_link_event_port_queues = 0;
503 config.nb_event_queue_flows = info.max_event_queue_flows;
504 config.nb_events_limit = info.max_num_events;
505 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
506 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
507 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
508 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
510 /* Configure the device with 2 LDB ports and 2 LDB queues */
511 ret = rte_event_dev_configure(evdev, &config);
513 printf("%d: Error configuring device\n", __LINE__);
517 /* Configure the ports and queues */
518 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
519 printf("%d: Error querying default port conf\n", __LINE__);
523 for (i = 0; i < 2; i++) {
524 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
525 printf("%d: port %d setup expected to succeed\n",
531 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
532 printf("%d: Error querying default queue conf\n", __LINE__);
536 for (i = 0; i < 2; i++) {
537 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
538 printf("%d: queue %d setup expected to succeed\n",
544 /* Link P0->Q0 and P1->Q1 */
545 for (i = 0; i < 2; i++) {
548 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
549 printf("%d: port %d link expected to succeed\n",
555 /* Start the device */
556 if (rte_event_dev_start(evdev) < 0) {
557 printf("%d: device start failed\n", __LINE__);
561 /* Stop the device */
562 rte_event_dev_stop(evdev);
564 /* Reconfigure device */
565 ret = rte_event_dev_configure(evdev, &config);
567 printf("%d: Error re-configuring device\n", __LINE__);
571 /* Configure P1 and Q1, leave P0 and Q0 to be configured by the PMD. */
572 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
573 printf("%d: port 1 setup expected to succeed\n",
578 if (rte_event_queue_setup(evdev, 1, &queue_conf) < 0) {
579 printf("%d: queue 1 setup expected to succeed\n",
584 /* Link P0->Q0 and Q1 */
585 for (i = 0; i < 2; i++) {
588 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
589 printf("%d: P0->Q%d link expected to succeed\n",
595 /* Link P1->Q0 and Q1 */
596 for (i = 0; i < 2; i++) {
599 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
600 printf("%d: P1->Q%d link expected to succeed\n",
606 /* Start the device */
607 if (rte_event_dev_start(evdev) < 0) {
608 printf("%d: device start failed\n", __LINE__);
612 /* Stop the device */
613 rte_event_dev_stop(evdev);
615 /* Configure device with 2 DIR ports and 2 DIR queues */
616 config.nb_single_link_event_port_queues = 2;
618 ret = rte_event_dev_configure(evdev, &config);
620 printf("%d: Error configuring device\n", __LINE__);
624 /* Configure the ports and queues */
625 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
627 for (i = 0; i < 2; i++) {
628 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
629 printf("%d: port %d setup expected to succeed\n",
635 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
637 for (i = 0; i < 2; i++) {
638 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
639 printf("%d: queue %d setup expected to succeed\n",
645 /* Link P0->Q0 and P1->Q1 */
646 for (i = 0; i < 2; i++) {
649 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
650 printf("%d: port %d link expected to succeed\n",
656 /* Start the device */
657 if (rte_event_dev_start(evdev) < 0) {
658 printf("%d: device start failed\n", __LINE__);
662 /* Stop the device */
663 rte_event_dev_stop(evdev);
665 /* Reconfigure device */
666 ret = rte_event_dev_configure(evdev, &config);
668 printf("%d: Error re-configuring device\n", __LINE__);
672 /* Configure P1 and Q0, leave P0 and Q1 to be configured by the PMD. */
673 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
674 printf("%d: port 1 setup expected to succeed\n",
679 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
680 printf("%d: queue 1 setup expected to succeed\n",
688 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
689 printf("%d: P0->Q%d link expected to succeed\n",
697 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
698 printf("%d: P1->Q%d link expected to succeed\n",
703 /* Start the device */
704 if (rte_event_dev_start(evdev) < 0) {
705 printf("%d: device start failed\n", __LINE__);
709 rte_event_dev_stop(evdev);
711 config.nb_event_queues = 5;
712 config.nb_event_ports = 5;
713 config.nb_single_link_event_port_queues = 1;
715 ret = rte_event_dev_configure(evdev, &config);
717 printf("%d: Error re-configuring device\n", __LINE__);
721 for (i = 0; i < config.nb_event_queues - 1; i++) {
722 port_conf.event_port_cfg = 0;
723 queue_conf.event_queue_cfg = 0;
725 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
726 printf("%d: port %d setup expected to succeed\n",
731 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
732 printf("%d: queue %d setup expected to succeed\n",
739 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
740 printf("%d: P%d->Q%d link expected to succeed\n",
746 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_SINGLE_LINK;
747 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
749 if (rte_event_port_setup(evdev, i, &port_conf) < 0) {
750 printf("%d: port %d setup expected to succeed\n",
755 if (rte_event_queue_setup(evdev, i, &queue_conf) < 0) {
756 printf("%d: queue %d setup expected to succeed\n",
763 if (rte_event_port_link(evdev, i, &queue_id, NULL, 1) != 1) {
764 printf("%d: P%d->Q%d link expected to succeed\n",
769 /* Start the device */
770 if (rte_event_dev_start(evdev) < 0) {
771 printf("%d: device start failed\n", __LINE__);
775 /* Stop the device */
776 rte_event_dev_stop(evdev);
778 config.nb_event_ports += 1;
780 /* Reconfigure device with 1 more load-balanced port */
781 ret = rte_event_dev_configure(evdev, &config);
783 printf("%d: Error re-configuring device\n", __LINE__);
787 port_conf.event_port_cfg = 0;
789 /* Configure the new port */
790 if (rte_event_port_setup(evdev, config.nb_event_ports - 1,
792 printf("%d: port 1 setup expected to succeed\n",
797 /* Start the device */
798 if (rte_event_dev_start(evdev) < 0) {
799 printf("%d: device start failed\n", __LINE__);
812 test_load_balanced_traffic(void)
815 struct rte_event_dev_config config = {0};
816 struct rte_event_queue_conf queue_conf;
817 struct rte_event_port_conf port_conf;
818 struct rte_event_dev_info info;
823 if (rte_event_dev_info_get(evdev, &info)) {
824 printf("%d: Error querying device info\n", __LINE__);
828 config.nb_event_queues = 1;
829 config.nb_event_ports = 1;
830 config.nb_single_link_event_port_queues = 0;
831 config.nb_event_queue_flows = info.max_event_queue_flows;
832 config.nb_events_limit = info.max_num_events;
833 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
834 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
835 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
836 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
838 /* Configure the device with 1 LDB port and queue */
839 ret = rte_event_dev_configure(evdev, &config);
841 printf("%d: Error configuring device\n", __LINE__);
845 /* Configure the ports and queues */
846 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
847 printf("%d: Error querying default port conf\n", __LINE__);
851 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
852 printf("%d: port 0 setup expected to succeed\n",
857 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
858 printf("%d: Error querying default queue conf\n", __LINE__);
862 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
863 printf("%d: queue 0 setup expected to succeed\n",
871 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
872 printf("%d: port 0 link expected to succeed\n",
877 /* Start the device */
878 if (rte_event_dev_start(evdev) < 0) {
879 printf("%d: device start failed\n", __LINE__);
883 /* Enqueue 1 NEW event */
884 ev.op = RTE_EVENT_OP_NEW;
885 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
890 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
891 printf("%d: NEW enqueue expected to succeed\n",
896 /* Dequeue and enqueue 1 FORWARD event */
897 timeout = 0xFFFFFFFFF;
898 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
899 printf("%d: event dequeue expected to succeed\n",
904 ev.op = RTE_EVENT_OP_FORWARD;
906 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
907 printf("%d: NEW enqueue expected to succeed\n",
912 /* Dequeue and enqueue 1 RELEASE operation */
913 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
914 printf("%d: event dequeue expected to succeed\n",
919 ev.op = RTE_EVENT_OP_RELEASE;
921 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
922 printf("%d: NEW enqueue expected to succeed\n",
936 test_directed_traffic(void)
939 struct rte_event_dev_config config = {0};
940 struct rte_event_queue_conf queue_conf;
941 struct rte_event_port_conf port_conf;
942 struct rte_event_dev_info info;
947 if (rte_event_dev_info_get(evdev, &info)) {
948 printf("%d: Error querying device info\n", __LINE__);
952 config.nb_event_queues = 1;
953 config.nb_event_ports = 1;
954 config.nb_single_link_event_port_queues = 1;
955 config.nb_event_queue_flows = info.max_event_queue_flows;
956 config.nb_events_limit = info.max_num_events;
957 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
958 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
959 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
960 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
962 /* Configure the device with 1 DIR port and queue */
963 ret = rte_event_dev_configure(evdev, &config);
965 printf("%d: Error configuring device\n", __LINE__);
969 /* Configure the ports and queues */
970 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
971 printf("%d: Error querying default port conf\n", __LINE__);
975 port_conf.event_port_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
977 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
978 printf("%d: port 0 setup expected to succeed\n",
983 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
984 printf("%d: Error querying default queue conf\n", __LINE__);
988 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
990 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
991 printf("%d: queue 0 setup expected to succeed\n",
999 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1000 printf("%d: port 0 link expected to succeed\n",
1005 /* Start the device */
1006 if (rte_event_dev_start(evdev) < 0) {
1007 printf("%d: device start failed\n", __LINE__);
1011 /* Enqueue 1 NEW event */
1012 ev.op = RTE_EVENT_OP_NEW;
1017 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1018 printf("%d: NEW enqueue expected to succeed\n",
1023 /* Dequeue and enqueue 1 FORWARD event */
1024 timeout = 0xFFFFFFFFF;
1025 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1026 printf("%d: event dequeue expected to succeed\n",
1031 if (ev.queue_id != 0) {
1032 printf("%d: invalid dequeued event queue ID (%d)\n",
1033 __LINE__, ev.queue_id);
1037 ev.op = RTE_EVENT_OP_FORWARD;
1039 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1040 printf("%d: NEW enqueue expected to succeed\n",
1045 /* Dequeue and enqueue 1 RELEASE operation */
1046 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1047 printf("%d: event dequeue expected to succeed\n",
1052 ev.op = RTE_EVENT_OP_RELEASE;
1054 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1055 printf("%d: NEW enqueue expected to succeed\n",
1069 test_deferred_sched(void)
1072 struct rte_event_dev_config config = {0};
1073 struct rte_event_queue_conf queue_conf;
1074 struct rte_event_port_conf port_conf;
1075 struct rte_event_dev_info info;
1076 const int num_events = 128;
1077 struct rte_event ev;
1081 if (rte_event_dev_info_get(evdev, &info)) {
1082 printf("%d: Error querying device info\n", __LINE__);
1086 config.nb_event_queues = 1;
1087 config.nb_event_ports = 2;
1088 config.nb_single_link_event_port_queues = 0;
1089 config.nb_event_queue_flows = info.max_event_queue_flows;
1090 config.nb_events_limit = info.max_num_events;
1091 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
1092 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
1093 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
1094 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
1096 /* Configure the device with 2 LDB ports and 1 queue */
1097 ret = rte_event_dev_configure(evdev, &config);
1099 printf("%d: Error configuring device\n", __LINE__);
1103 ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 0, DEFERRED_POP);
1105 printf("%d: Error setting deferred scheduling\n", __LINE__);
1109 ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 1, DEFERRED_POP);
1111 printf("%d: Error setting deferred scheduling\n", __LINE__);
1115 /* Configure the ports and queues */
1116 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
1117 printf("%d: Error querying default port conf\n", __LINE__);
1121 port_conf.dequeue_depth = 1;
1123 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1124 printf("%d: port 0 setup expected to succeed\n",
1129 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1130 printf("%d: port 1 setup expected to succeed\n",
1135 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
1136 printf("%d: Error querying default queue conf\n", __LINE__);
1140 queue_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
1141 queue_conf.nb_atomic_order_sequences = 0;
1143 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
1144 printf("%d: queue 0 setup expected to succeed\n",
1149 /* Link P0->Q0 and P1->Q0 */
1152 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1153 printf("%d: port 0 link expected to succeed\n",
1158 if (rte_event_port_link(evdev, 1, &queue_id, NULL, 1) != 1) {
1159 printf("%d: port 1 link expected to succeed\n",
1164 /* Start the device */
1165 if (rte_event_dev_start(evdev) < 0) {
1166 printf("%d: device start failed\n", __LINE__);
1170 /* Enqueue 128 NEW events */
1171 ev.op = RTE_EVENT_OP_NEW;
1172 ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1177 for (i = 0; i < num_events; i++) {
1178 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1179 printf("%d: NEW enqueue expected to succeed\n",
1185 /* Dequeue one event from port 0 */
1186 timeout = 0xFFFFFFFFF;
1187 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1188 printf("%d: event dequeue expected to succeed\n",
1193 /* Dequeue (and release) all other events from port 1. Deferred
1194 * scheduling ensures no other events are scheduled to port 0 without a
1195 * subsequent rte_event_dequeue_burst() call.
1197 for (i = 0; i < num_events - 1; i++) {
1198 if (rte_event_dequeue_burst(evdev, 1, &ev, 1, timeout) != 1) {
1199 printf("%d: event dequeue expected to succeed\n",
1204 ev.op = RTE_EVENT_OP_RELEASE;
1206 if (rte_event_enqueue_burst(evdev, 1, &ev, 1) != 1) {
1207 printf("%d: RELEASE enqueue expected to succeed\n",
1222 test_delayed_pop(void)
1225 struct rte_event_dev_config config = {0};
1226 struct rte_event_queue_conf queue_conf;
1227 struct rte_event_port_conf port_conf;
1228 struct rte_event_dev_info info;
1229 int ret, i, num_events;
1230 struct rte_event ev;
1233 if (rte_event_dev_info_get(evdev, &info)) {
1234 printf("%d: Error querying device info\n", __LINE__);
1238 config.nb_event_queues = 1;
1239 config.nb_event_ports = 1;
1240 config.nb_single_link_event_port_queues = 0;
1241 config.nb_event_queue_flows = info.max_event_queue_flows;
1242 config.nb_events_limit = info.max_num_events;
1243 config.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth;
1244 config.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth;
1245 config.dequeue_timeout_ns = info.max_dequeue_timeout_ns;
1246 config.event_dev_cfg = RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
1248 /* Configure the device with 1 LDB port and queue */
1249 ret = rte_event_dev_configure(evdev, &config);
1251 printf("%d: Error configuring device\n", __LINE__);
1255 ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 0, DELAYED_POP);
1257 printf("%d: Error setting deferred scheduling\n", __LINE__);
1261 /* Configure the ports and queues */
1262 if (rte_event_port_default_conf_get(evdev, 0, &port_conf)) {
1263 printf("%d: Error querying default port conf\n", __LINE__);
1267 port_conf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
1269 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1270 printf("%d: port 0 setup expected to succeed\n",
1275 if (rte_event_queue_default_conf_get(evdev, 0, &queue_conf)) {
1276 printf("%d: Error querying default queue conf\n", __LINE__);
1280 if (rte_event_queue_setup(evdev, 0, &queue_conf) < 0) {
1281 printf("%d: queue 0 setup expected to succeed\n",
1289 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1290 printf("%d: port 0 link expected to succeed\n",
1295 /* Start the device */
1296 if (rte_event_dev_start(evdev) < 0) {
1297 printf("%d: device start failed\n", __LINE__);
1301 num_events = 2 * port_conf.dequeue_depth;
1303 /* Enqueue 2 * dequeue_depth NEW events */
1304 ev.op = RTE_EVENT_OP_NEW;
1305 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
1310 for (i = 0; i < num_events; i++) {
1311 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1312 printf("%d: NEW enqueue expected to succeed\n",
1318 /* Dequeue dequeue_depth events but only release dequeue_depth - 2.
1319 * Delayed pop won't perform the pop and no more events will be
1322 timeout = 0xFFFFFFFFF;
1324 for (i = 0; i < port_conf.dequeue_depth; i++) {
1325 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1326 printf("%d: event dequeue expected to succeed\n",
1332 ev.op = RTE_EVENT_OP_RELEASE;
1334 for (i = 0; i < port_conf.dequeue_depth - 2; i++) {
1335 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1336 printf("%d: RELEASE enqueue expected to succeed\n",
1344 ret = rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout);
1346 printf("%d: event dequeue expected to fail (ret = %d)\n",
1351 /* Release one more event. This will trigger the token pop, and
1352 * dequeue_depth - 1 more events will be scheduled to the device.
1354 ev.op = RTE_EVENT_OP_RELEASE;
1356 if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
1357 printf("%d: RELEASE enqueue expected to succeed\n",
1362 timeout = 0xFFFFFFFFF;
1364 for (i = 0; i < port_conf.dequeue_depth - 1; i++) {
1365 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 1) {
1366 printf("%d: event dequeue expected to succeed\n",
1374 if (rte_event_dequeue_burst(evdev, 0, &ev, 1, timeout) != 0) {
1375 printf("%d: event dequeue expected to fail\n",
1394 /* Only create mbuf pool once, reuse for each test run */
1395 if (!eventdev_func_mempool) {
1396 eventdev_func_mempool =
1397 rte_pktmbuf_pool_create("EVENTDEV_DLB2_ST_POOL",
1398 (1 << 12), /* 4k buffers */
1399 32 /*MBUF_CACHE_SIZE*/,
1401 512, /* use very small mbufs */
1403 if (!eventdev_func_mempool) {
1404 printf("ERROR creating mempool\n");
1408 t.mbuf_pool = eventdev_func_mempool;
1410 printf("*** Running Stop Flush test...\n");
1411 ret = test_stop_flush(&t);
1413 printf("ERROR - Stop Flush test FAILED.\n");
1417 printf("*** Running Single Link test...\n");
1418 ret = test_single_link();
1420 printf("ERROR - Single Link test FAILED.\n");
1425 printf("*** Running Info Get test...\n");
1426 ret = test_info_get();
1428 printf("ERROR - Stop Flush test FAILED.\n");
1432 printf("*** Running Reconfiguration Link test...\n");
1433 ret = test_reconfiguration_link();
1435 printf("ERROR - Reconfiguration Link test FAILED.\n");
1440 printf("*** Running Load-Balanced Traffic test...\n");
1441 ret = test_load_balanced_traffic();
1443 printf("ERROR - Load-Balanced Traffic test FAILED.\n");
1448 printf("*** Running Directed Traffic test...\n");
1449 ret = test_directed_traffic();
1451 printf("ERROR - Directed Traffic test FAILED.\n");
1456 printf("*** Running Deferred Scheduling test...\n");
1457 ret = test_deferred_sched();
1459 printf("ERROR - Deferred Scheduling test FAILED.\n");
1464 printf("*** Running Delayed Pop test...\n");
1465 ret = test_delayed_pop();
1467 printf("ERROR - Delayed Pop test FAILED.\n");
1479 test_dlb2_eventdev(void)
1481 const char *dlb2_eventdev_name = "dlb2_event";
1482 uint8_t num_evdevs = rte_event_dev_count();
1484 int found = 0, skipped = 0, passed = 0, failed = 0;
1485 struct rte_event_dev_info info;
1487 for (i = 0; found + skipped < num_evdevs && i < RTE_EVENT_MAX_DEVS;
1489 ret = rte_event_dev_info_get(i, &info);
1493 /* skip non-dlb2 event devices */
1494 if (strncmp(info.driver_name, dlb2_eventdev_name,
1495 sizeof(*info.driver_name)) != 0) {
1500 evdev = rte_event_dev_get_dev_id(info.driver_name);
1502 printf("Could not get dev_id for eventdev with name %s, i=%d\n",
1503 info.driver_name, i);
1508 printf("Running selftest on eventdev %s\n", info.driver_name);
1509 ret = do_selftest();
1512 printf("Selftest passed for eventdev %s\n",
1516 printf("Selftest failed for eventdev %s, err=%d\n",
1517 info.driver_name, ret);
1521 printf("Ran selftest on %d eventdevs, %d skipped, %d passed, %d failed\n",
1522 found, skipped, passed, failed);