1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_mempool.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_bus_vdev.h>
12 #include <rte_event_eth_rx_adapter.h>
16 #define MAX_NUM_RX_QUEUE 64
17 #define NB_MBUFS (8192 * num_ports * MAX_NUM_RX_QUEUE)
18 #define MBUF_CACHE_SIZE 512
19 #define MBUF_PRIV_SIZE 0
20 #define TEST_INST_ID 0
22 #define TEST_ETHDEV_ID 0
24 struct event_eth_rx_adapter_test_params {
25 struct rte_mempool *mp;
26 uint16_t rx_rings, tx_rings;
28 int rx_intr_port_inited;
29 uint16_t rx_intr_port;
32 static struct event_eth_rx_adapter_test_params default_params;
33 static bool event_dev_created;
34 static bool eth_dev_created;
37 port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
38 struct rte_mempool *mp)
40 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
43 struct rte_eth_dev_info dev_info;
45 if (!rte_eth_dev_is_valid_port(port))
48 retval = rte_eth_dev_configure(port, 0, 0, port_conf);
50 retval = rte_eth_dev_info_get(port, &dev_info);
54 default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
56 default_params.tx_rings = 1;
58 /* Configure the Ethernet device. */
59 retval = rte_eth_dev_configure(port, default_params.rx_rings,
60 default_params.tx_rings, port_conf);
64 for (q = 0; q < default_params.rx_rings; q++) {
65 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
66 rte_eth_dev_socket_id(port), NULL, mp);
71 /* Allocate and set up 1 TX queue per Ethernet port. */
72 for (q = 0; q < default_params.tx_rings; q++) {
73 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
74 rte_eth_dev_socket_id(port), NULL);
79 /* Start the Ethernet port. */
80 retval = rte_eth_dev_start(port);
84 /* Display the port MAC address. */
85 struct rte_ether_addr addr;
86 retval = rte_eth_macaddr_get(port, &addr);
89 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
90 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
91 (unsigned int)port, RTE_ETHER_ADDR_BYTES(&addr));
93 /* Enable RX in promiscuous mode for the Ethernet device. */
94 retval = rte_eth_promiscuous_enable(port);
102 port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
104 static const struct rte_eth_conf port_conf_default = {
106 .mq_mode = ETH_MQ_RX_NONE,
113 return port_init_common(port, &port_conf_default, mp);
117 port_init(uint16_t port, struct rte_mempool *mp)
119 static const struct rte_eth_conf port_conf_default = {
121 .mq_mode = ETH_MQ_RX_NONE,
125 return port_init_common(port, &port_conf_default, mp);
129 init_port_rx_intr(int num_ports)
135 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
139 RTE_MBUF_DEFAULT_BUF_SIZE,
141 if (!default_params.mp)
144 RTE_ETH_FOREACH_DEV(portid) {
145 retval = port_init_rx_intr(portid, default_params.mp);
148 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
149 &default_params.caps);
152 if (!(default_params.caps &
153 RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
154 default_params.rx_intr_port_inited = 1;
155 default_params.rx_intr_port = portid;
158 retval = rte_eth_dev_stop(portid);
159 TEST_ASSERT(retval == 0, "Failed to stop port %u: %d\n",
166 init_ports(int num_ports)
171 struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
174 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
178 RTE_MBUF_DEFAULT_BUF_SIZE,
181 default_params.mp = ptr;
183 if (!default_params.mp)
186 RTE_ETH_FOREACH_DEV(portid) {
187 retval = port_init(portid, default_params.mp);
196 testsuite_setup(void)
200 struct rte_event_dev_info dev_info;
202 count = rte_event_dev_count();
204 printf("Failed to find a valid event device,"
205 " testing with event_skeleton device\n");
206 err = rte_vdev_init("event_skeleton", NULL);
207 TEST_ASSERT(err == 0, "Failed to create event_skeleton. err=%d",
209 event_dev_created = true;
212 struct rte_event_dev_config config = {
213 .nb_event_queues = 1,
217 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
218 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
219 config.nb_event_port_dequeue_depth =
220 dev_info.max_event_port_dequeue_depth;
221 config.nb_event_port_enqueue_depth =
222 dev_info.max_event_port_enqueue_depth;
223 config.nb_events_limit =
224 dev_info.max_num_events;
225 err = rte_event_dev_configure(TEST_DEV_ID, &config);
226 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
229 count = rte_eth_dev_count_total();
231 printf("Testing with net_null device\n");
232 err = rte_vdev_init("net_null", NULL);
233 TEST_ASSERT(err == 0, "Failed to create net_null. err=%d",
235 eth_dev_created = true;
239 * eth devices like octeontx use event device to receive packets
240 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
241 * call init_ports after rte_event_dev_configure
243 err = init_ports(rte_eth_dev_count_total());
244 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
246 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
247 &default_params.caps);
248 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n",
255 testsuite_setup_rx_intr(void)
259 struct rte_event_dev_info dev_info;
261 count = rte_event_dev_count();
263 printf("Failed to find a valid event device,"
264 " testing with event_skeleton device\n");
265 err = rte_vdev_init("event_skeleton", NULL);
266 TEST_ASSERT(err == 0, "Failed to create event_skeleton. err=%d",
268 event_dev_created = true;
271 struct rte_event_dev_config config = {
272 .nb_event_queues = 1,
276 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
277 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
278 config.nb_event_port_dequeue_depth =
279 dev_info.max_event_port_dequeue_depth;
280 config.nb_event_port_enqueue_depth =
281 dev_info.max_event_port_enqueue_depth;
282 config.nb_events_limit =
283 dev_info.max_num_events;
285 err = rte_event_dev_configure(TEST_DEV_ID, &config);
286 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
289 count = rte_eth_dev_count_total();
291 printf("Testing with net_null device\n");
292 err = rte_vdev_init("net_null", NULL);
293 TEST_ASSERT(err == 0, "Failed to create net_null. err=%d",
295 eth_dev_created = true;
299 * eth devices like octeontx use event device to receive packets
300 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
301 * call init_ports after rte_event_dev_configure
303 err = init_port_rx_intr(rte_eth_dev_count_total());
304 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
306 if (!default_params.rx_intr_port_inited)
309 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
310 default_params.rx_intr_port,
311 &default_params.caps);
312 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
318 testsuite_teardown(void)
322 RTE_ETH_FOREACH_DEV(i)
325 if (eth_dev_created) {
326 err = rte_vdev_uninit("net_null");
328 printf("Failed to delete net_null. err=%d", err);
329 eth_dev_created = false;
332 rte_mempool_free(default_params.mp);
333 if (event_dev_created) {
334 err = rte_vdev_uninit("event_skeleton");
336 printf("Failed to delete event_skeleton. err=%d", err);
337 event_dev_created = false;
340 memset(&default_params, 0, sizeof(default_params));
344 testsuite_teardown_rx_intr(void)
347 if (!default_params.rx_intr_port_inited)
350 rte_eth_dev_stop(default_params.rx_intr_port);
351 if (eth_dev_created) {
352 err = rte_vdev_uninit("net_null");
354 printf("Failed to delete net_null. err=%d", err);
355 eth_dev_created = false;
357 rte_mempool_free(default_params.mp);
358 if (event_dev_created) {
359 err = rte_vdev_uninit("event_skeleton");
361 printf("Failed to delete event_skeleton. err=%d", err);
362 event_dev_created = false;
365 memset(&default_params, 0, sizeof(default_params));
372 struct rte_event_dev_info dev_info;
373 struct rte_event_port_conf rx_p_conf;
375 memset(&rx_p_conf, 0, sizeof(rx_p_conf));
377 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
378 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
380 rx_p_conf.new_event_threshold = dev_info.max_num_events;
381 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
382 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
383 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
385 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
393 rte_event_eth_rx_adapter_free(TEST_INST_ID);
397 adapter_create_free(void)
401 struct rte_event_port_conf rx_p_conf = {
404 .new_event_threshold = 1200,
407 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
409 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
411 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
413 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
415 err = rte_event_eth_rx_adapter_create(TEST_INST_ID,
416 TEST_DEV_ID, &rx_p_conf);
417 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
419 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
420 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
422 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
423 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
425 err = rte_event_eth_rx_adapter_free(1);
426 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
432 adapter_queue_add_del(void)
438 struct rte_event_eth_rx_adapter_queue_conf queue_config;
440 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
442 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
445 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
448 queue_config.rx_queue_flags = 0;
449 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
451 queue_config.rx_queue_flags =
452 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
454 queue_config.ev = ev;
455 queue_config.servicing_weight = 1;
457 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
458 rte_eth_dev_count_total(),
460 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
462 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
463 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
466 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
468 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
470 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
472 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
476 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
478 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
481 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
483 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
487 TEST_ASSERT(err == -EINVAL, "Expected EINVAL got %d", err);
489 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
492 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
494 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
496 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
498 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
500 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
502 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
504 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
507 err = rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID, -1,
509 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
511 err = rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
512 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
518 adapter_multi_eth_add_del(void)
523 uint16_t port_index, port_index_base, drv_id = 0;
524 char driver_name[50];
526 struct rte_event_eth_rx_adapter_queue_conf queue_config;
529 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
532 queue_config.rx_queue_flags = 0;
533 queue_config.ev = ev;
534 queue_config.servicing_weight = 1;
536 /* stop eth devices for existing */
538 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
539 err = rte_eth_dev_stop(port_index);
540 TEST_ASSERT(err == 0, "Failed to stop port %u: %d\n",
544 /* add the max port for rx_adapter */
545 port_index = rte_eth_dev_count_total();
546 port_index_base = port_index;
547 for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
548 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
550 err = rte_vdev_init(driver_name, NULL);
551 TEST_ASSERT(err == 0, "Failed driver %s got %d",
556 err = init_ports(rte_eth_dev_count_total());
557 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
559 /* eth_rx_adapter_queue_add for n ports */
561 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
562 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
565 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
568 /* eth_rx_adapter_queue_del n ports */
570 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
571 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
573 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
576 /* delete vdev ports */
577 for (drv_id = 0, port_index = port_index_base;
578 port_index < RTE_MAX_ETHPORTS;
579 drv_id += 1, port_index += 1) {
580 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
582 err = rte_vdev_uninit(driver_name);
583 TEST_ASSERT(err == 0, "Failed driver %s got %d",
591 adapter_intr_queue_add_del(void)
597 struct rte_event_eth_rx_adapter_queue_conf queue_config;
599 if (!default_params.rx_intr_port_inited)
602 eth_port = default_params.rx_intr_port;
603 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
604 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
607 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
610 queue_config.rx_queue_flags = 0;
611 queue_config.ev = ev;
613 /* weight = 0 => interrupt mode */
614 queue_config.servicing_weight = 0;
616 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
618 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
621 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
625 queue_config.servicing_weight = 0;
626 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
630 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
632 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
634 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
637 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
640 /* del remaining queues */
641 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
644 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
647 queue_config.servicing_weight = 0;
648 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
652 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
654 /* intr -> poll mode queue */
655 queue_config.servicing_weight = 1;
657 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
658 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
662 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
665 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
669 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
672 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
675 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
681 adapter_start_stop(void)
687 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
690 struct rte_event_eth_rx_adapter_queue_conf queue_config;
692 queue_config.rx_queue_flags = 0;
693 if (default_params.caps &
694 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
696 queue_config.rx_queue_flags =
697 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
700 queue_config.ev = ev;
701 queue_config.servicing_weight = 1;
703 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
705 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
707 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
708 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
710 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
711 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
713 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
715 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
717 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
718 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
720 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
721 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
723 err = rte_event_eth_rx_adapter_start(1);
724 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
726 err = rte_event_eth_rx_adapter_stop(1);
727 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
736 struct rte_event_eth_rx_adapter_stats stats;
738 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, NULL);
739 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
741 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, &stats);
742 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
744 err = rte_event_eth_rx_adapter_stats_get(1, &stats);
745 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
750 static struct unit_test_suite event_eth_rx_tests = {
751 .suite_name = "rx event eth adapter test suite",
752 .setup = testsuite_setup,
753 .teardown = testsuite_teardown,
755 TEST_CASE_ST(NULL, NULL, adapter_create_free),
756 TEST_CASE_ST(adapter_create, adapter_free,
757 adapter_queue_add_del),
758 TEST_CASE_ST(adapter_create, adapter_free,
759 adapter_multi_eth_add_del),
760 TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
761 TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
762 TEST_CASES_END() /**< NULL terminate unit test array */
766 static struct unit_test_suite event_eth_rx_intr_tests = {
767 .suite_name = "rx event eth adapter test suite",
768 .setup = testsuite_setup_rx_intr,
769 .teardown = testsuite_teardown_rx_intr,
771 TEST_CASE_ST(adapter_create, adapter_free,
772 adapter_intr_queue_add_del),
773 TEST_CASES_END() /**< NULL terminate unit test array */
778 test_event_eth_rx_adapter_common(void)
780 return unit_test_suite_runner(&event_eth_rx_tests);
784 test_event_eth_rx_intr_adapter_common(void)
786 return unit_test_suite_runner(&event_eth_rx_intr_tests);
789 REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
790 test_event_eth_rx_adapter_common);
791 REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
792 test_event_eth_rx_intr_adapter_common);