1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_mempool.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_bus_vdev.h>
12 #include <rte_event_eth_rx_adapter.h>
16 #define MAX_NUM_RX_QUEUE 64
17 #define NB_MBUFS (8192 * num_ports * MAX_NUM_RX_QUEUE)
18 #define MBUF_CACHE_SIZE 512
19 #define MBUF_PRIV_SIZE 0
20 #define TEST_INST_ID 0
22 #define TEST_ETHDEV_ID 0
24 struct event_eth_rx_adapter_test_params {
25 struct rte_mempool *mp;
26 uint16_t rx_rings, tx_rings;
28 int rx_intr_port_inited;
29 uint16_t rx_intr_port;
32 static struct event_eth_rx_adapter_test_params default_params;
33 static bool event_dev_created;
34 static bool eth_dev_created;
37 port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
38 struct rte_mempool *mp)
40 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
43 struct rte_eth_dev_info dev_info;
45 if (!rte_eth_dev_is_valid_port(port))
48 retval = rte_eth_dev_configure(port, 0, 0, port_conf);
50 retval = rte_eth_dev_info_get(port, &dev_info);
54 default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
56 default_params.tx_rings = 1;
58 /* Configure the Ethernet device. */
59 retval = rte_eth_dev_configure(port, default_params.rx_rings,
60 default_params.tx_rings, port_conf);
64 for (q = 0; q < default_params.rx_rings; q++) {
65 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
66 rte_eth_dev_socket_id(port), NULL, mp);
71 /* Allocate and set up 1 TX queue per Ethernet port. */
72 for (q = 0; q < default_params.tx_rings; q++) {
73 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
74 rte_eth_dev_socket_id(port), NULL);
79 /* Start the Ethernet port. */
80 retval = rte_eth_dev_start(port);
84 /* Display the port MAC address. */
85 struct rte_ether_addr addr;
86 retval = rte_eth_macaddr_get(port, &addr);
89 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
90 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
92 addr.addr_bytes[0], addr.addr_bytes[1],
93 addr.addr_bytes[2], addr.addr_bytes[3],
94 addr.addr_bytes[4], addr.addr_bytes[5]);
96 /* Enable RX in promiscuous mode for the Ethernet device. */
97 retval = rte_eth_promiscuous_enable(port);
105 port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
107 static const struct rte_eth_conf port_conf_default = {
109 .mq_mode = ETH_MQ_RX_NONE,
116 return port_init_common(port, &port_conf_default, mp);
120 port_init(uint16_t port, struct rte_mempool *mp)
122 static const struct rte_eth_conf port_conf_default = {
124 .mq_mode = ETH_MQ_RX_NONE,
128 return port_init_common(port, &port_conf_default, mp);
132 init_port_rx_intr(int num_ports)
138 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
142 RTE_MBUF_DEFAULT_BUF_SIZE,
144 if (!default_params.mp)
147 RTE_ETH_FOREACH_DEV(portid) {
148 retval = port_init_rx_intr(portid, default_params.mp);
151 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
152 &default_params.caps);
155 if (!(default_params.caps &
156 RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
157 default_params.rx_intr_port_inited = 1;
158 default_params.rx_intr_port = portid;
161 rte_eth_dev_stop(portid);
167 init_ports(int num_ports)
172 struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
175 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
179 RTE_MBUF_DEFAULT_BUF_SIZE,
182 default_params.mp = ptr;
184 if (!default_params.mp)
187 RTE_ETH_FOREACH_DEV(portid) {
188 retval = port_init(portid, default_params.mp);
197 testsuite_setup(void)
201 struct rte_event_dev_info dev_info;
203 count = rte_event_dev_count();
205 printf("Failed to find a valid event device,"
206 " testing with event_skeleton device\n");
207 err = rte_vdev_init("event_skeleton", NULL);
208 TEST_ASSERT(err == 0, "Failed to create event_skeleton. err=%d",
210 event_dev_created = true;
213 struct rte_event_dev_config config = {
214 .nb_event_queues = 1,
218 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
219 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
220 config.nb_event_port_dequeue_depth =
221 dev_info.max_event_port_dequeue_depth;
222 config.nb_event_port_enqueue_depth =
223 dev_info.max_event_port_enqueue_depth;
224 config.nb_events_limit =
225 dev_info.max_num_events;
226 err = rte_event_dev_configure(TEST_DEV_ID, &config);
227 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
230 count = rte_eth_dev_count_total();
232 printf("Testing with net_null device\n");
233 err = rte_vdev_init("net_null", NULL);
234 TEST_ASSERT(err == 0, "Failed to create net_null. err=%d",
236 eth_dev_created = true;
240 * eth devices like octeontx use event device to receive packets
241 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
242 * call init_ports after rte_event_dev_configure
244 err = init_ports(rte_eth_dev_count_total());
245 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
247 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
248 &default_params.caps);
249 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n",
256 testsuite_setup_rx_intr(void)
260 struct rte_event_dev_info dev_info;
262 count = rte_event_dev_count();
264 printf("Failed to find a valid event device,"
265 " testing with event_skeleton device\n");
266 err = rte_vdev_init("event_skeleton", NULL);
267 TEST_ASSERT(err == 0, "Failed to create event_skeleton. err=%d",
269 event_dev_created = true;
272 struct rte_event_dev_config config = {
273 .nb_event_queues = 1,
277 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
278 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
279 config.nb_event_port_dequeue_depth =
280 dev_info.max_event_port_dequeue_depth;
281 config.nb_event_port_enqueue_depth =
282 dev_info.max_event_port_enqueue_depth;
283 config.nb_events_limit =
284 dev_info.max_num_events;
286 err = rte_event_dev_configure(TEST_DEV_ID, &config);
287 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
290 count = rte_eth_dev_count_total();
292 printf("Testing with net_null device\n");
293 err = rte_vdev_init("net_null", NULL);
294 TEST_ASSERT(err == 0, "Failed to create net_null. err=%d",
296 eth_dev_created = true;
300 * eth devices like octeontx use event device to receive packets
301 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
302 * call init_ports after rte_event_dev_configure
304 err = init_port_rx_intr(rte_eth_dev_count_total());
305 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
307 if (!default_params.rx_intr_port_inited)
310 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
311 default_params.rx_intr_port,
312 &default_params.caps);
313 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
319 testsuite_teardown(void)
323 RTE_ETH_FOREACH_DEV(i)
326 if (eth_dev_created) {
327 err = rte_vdev_uninit("net_null");
329 printf("Failed to delete net_null. err=%d", err);
330 eth_dev_created = false;
333 rte_mempool_free(default_params.mp);
334 if (event_dev_created) {
335 err = rte_vdev_uninit("event_skeleton");
337 printf("Failed to delete event_skeleton. err=%d", err);
338 event_dev_created = false;
341 memset(&default_params, 0, sizeof(default_params));
345 testsuite_teardown_rx_intr(void)
348 if (!default_params.rx_intr_port_inited)
351 rte_eth_dev_stop(default_params.rx_intr_port);
352 if (eth_dev_created) {
353 err = rte_vdev_uninit("net_null");
355 printf("Failed to delete net_null. err=%d", err);
356 eth_dev_created = false;
358 rte_mempool_free(default_params.mp);
359 if (event_dev_created) {
360 err = rte_vdev_uninit("event_skeleton");
362 printf("Failed to delete event_skeleton. err=%d", err);
363 event_dev_created = false;
366 memset(&default_params, 0, sizeof(default_params));
373 struct rte_event_dev_info dev_info;
374 struct rte_event_port_conf rx_p_conf;
376 memset(&rx_p_conf, 0, sizeof(rx_p_conf));
378 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
379 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
381 rx_p_conf.new_event_threshold = dev_info.max_num_events;
382 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
383 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
384 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
386 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
394 rte_event_eth_rx_adapter_free(TEST_INST_ID);
398 adapter_create_free(void)
402 struct rte_event_port_conf rx_p_conf = {
405 .new_event_threshold = 1200,
408 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
410 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
412 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
414 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
416 err = rte_event_eth_rx_adapter_create(TEST_INST_ID,
417 TEST_DEV_ID, &rx_p_conf);
418 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
420 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
421 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
423 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
424 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
426 err = rte_event_eth_rx_adapter_free(1);
427 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
433 adapter_queue_add_del(void)
439 struct rte_event_eth_rx_adapter_queue_conf queue_config;
441 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
443 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
446 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
449 queue_config.rx_queue_flags = 0;
450 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
452 queue_config.rx_queue_flags =
453 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
455 queue_config.ev = ev;
456 queue_config.servicing_weight = 1;
458 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
459 rte_eth_dev_count_total(),
461 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
463 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
464 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
467 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
469 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
471 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
473 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
477 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
479 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
482 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
484 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
488 TEST_ASSERT(err == -EINVAL, "Expected EINVAL got %d", err);
490 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
493 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
495 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
497 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
499 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
501 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
503 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
505 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
508 err = rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID, -1,
510 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
512 err = rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
513 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
519 adapter_multi_eth_add_del(void)
524 uint16_t port_index, port_index_base, drv_id = 0;
525 char driver_name[50];
527 struct rte_event_eth_rx_adapter_queue_conf queue_config;
530 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
533 queue_config.rx_queue_flags = 0;
534 queue_config.ev = ev;
535 queue_config.servicing_weight = 1;
537 /* stop eth devices for existing */
539 for (; port_index < rte_eth_dev_count_total(); port_index += 1)
540 rte_eth_dev_stop(port_index);
542 /* add the max port for rx_adapter */
543 port_index = rte_eth_dev_count_total();
544 port_index_base = port_index;
545 for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
546 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
548 err = rte_vdev_init(driver_name, NULL);
549 TEST_ASSERT(err == 0, "Failed driver %s got %d",
554 err = init_ports(rte_eth_dev_count_total());
555 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
557 /* eth_rx_adapter_queue_add for n ports */
559 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
560 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
563 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
566 /* eth_rx_adapter_queue_del n ports */
568 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
569 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
571 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
574 /* delete vdev ports */
575 for (drv_id = 0, port_index = port_index_base;
576 port_index < RTE_MAX_ETHPORTS;
577 drv_id += 1, port_index += 1) {
578 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
580 err = rte_vdev_uninit(driver_name);
581 TEST_ASSERT(err == 0, "Failed driver %s got %d",
589 adapter_intr_queue_add_del(void)
595 struct rte_event_eth_rx_adapter_queue_conf queue_config;
597 if (!default_params.rx_intr_port_inited)
600 eth_port = default_params.rx_intr_port;
601 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
602 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
605 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
608 queue_config.rx_queue_flags = 0;
609 queue_config.ev = ev;
611 /* weight = 0 => interrupt mode */
612 queue_config.servicing_weight = 0;
614 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
616 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
619 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
623 queue_config.servicing_weight = 0;
624 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
628 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
630 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
632 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
635 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
638 /* del remaining queues */
639 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
642 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
645 queue_config.servicing_weight = 0;
646 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
650 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
652 /* intr -> poll mode queue */
653 queue_config.servicing_weight = 1;
655 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
656 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
660 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
663 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
667 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
670 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
673 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
679 adapter_start_stop(void)
685 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
688 struct rte_event_eth_rx_adapter_queue_conf queue_config;
690 queue_config.rx_queue_flags = 0;
691 if (default_params.caps &
692 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
694 queue_config.rx_queue_flags =
695 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
698 queue_config.ev = ev;
699 queue_config.servicing_weight = 1;
701 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
703 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
705 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
706 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
708 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
709 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
711 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
713 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
715 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
716 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
718 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
719 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
721 err = rte_event_eth_rx_adapter_start(1);
722 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
724 err = rte_event_eth_rx_adapter_stop(1);
725 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
734 struct rte_event_eth_rx_adapter_stats stats;
736 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, NULL);
737 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
739 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, &stats);
740 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
742 err = rte_event_eth_rx_adapter_stats_get(1, &stats);
743 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
748 static struct unit_test_suite event_eth_rx_tests = {
749 .suite_name = "rx event eth adapter test suite",
750 .setup = testsuite_setup,
751 .teardown = testsuite_teardown,
753 TEST_CASE_ST(NULL, NULL, adapter_create_free),
754 TEST_CASE_ST(adapter_create, adapter_free,
755 adapter_queue_add_del),
756 TEST_CASE_ST(adapter_create, adapter_free,
757 adapter_multi_eth_add_del),
758 TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
759 TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
760 TEST_CASES_END() /**< NULL terminate unit test array */
764 static struct unit_test_suite event_eth_rx_intr_tests = {
765 .suite_name = "rx event eth adapter test suite",
766 .setup = testsuite_setup_rx_intr,
767 .teardown = testsuite_teardown_rx_intr,
769 TEST_CASE_ST(adapter_create, adapter_free,
770 adapter_intr_queue_add_del),
771 TEST_CASES_END() /**< NULL terminate unit test array */
776 test_event_eth_rx_adapter_common(void)
778 return unit_test_suite_runner(&event_eth_rx_tests);
782 test_event_eth_rx_intr_adapter_common(void)
784 return unit_test_suite_runner(&event_eth_rx_intr_tests);
787 REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
788 test_event_eth_rx_adapter_common);
789 REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
790 test_event_eth_rx_intr_adapter_common);