1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_mempool.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_bus_vdev.h>
12 #include <rte_event_eth_rx_adapter.h>
16 #define MAX_NUM_RX_QUEUE 64
17 #define NB_MBUFS (8192 * num_ports * MAX_NUM_RX_QUEUE)
18 #define MBUF_CACHE_SIZE 512
19 #define MBUF_PRIV_SIZE 0
20 #define TEST_INST_ID 0
22 #define TEST_ETHDEV_ID 0
24 struct event_eth_rx_adapter_test_params {
25 struct rte_mempool *mp;
26 uint16_t rx_rings, tx_rings;
28 int rx_intr_port_inited;
29 uint16_t rx_intr_port;
32 static struct event_eth_rx_adapter_test_params default_params;
35 port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
36 struct rte_mempool *mp)
38 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
41 struct rte_eth_dev_info dev_info;
43 if (!rte_eth_dev_is_valid_port(port))
46 retval = rte_eth_dev_configure(port, 0, 0, port_conf);
48 rte_eth_dev_info_get(port, &dev_info);
50 default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
52 default_params.tx_rings = 1;
54 /* Configure the Ethernet device. */
55 retval = rte_eth_dev_configure(port, default_params.rx_rings,
56 default_params.tx_rings, port_conf);
60 for (q = 0; q < default_params.rx_rings; q++) {
61 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
62 rte_eth_dev_socket_id(port), NULL, mp);
67 /* Allocate and set up 1 TX queue per Ethernet port. */
68 for (q = 0; q < default_params.tx_rings; q++) {
69 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
70 rte_eth_dev_socket_id(port), NULL);
75 /* Start the Ethernet port. */
76 retval = rte_eth_dev_start(port);
80 /* Display the port MAC address. */
81 struct rte_ether_addr addr;
82 rte_eth_macaddr_get(port, &addr);
83 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
84 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
86 addr.addr_bytes[0], addr.addr_bytes[1],
87 addr.addr_bytes[2], addr.addr_bytes[3],
88 addr.addr_bytes[4], addr.addr_bytes[5]);
90 /* Enable RX in promiscuous mode for the Ethernet device. */
91 rte_eth_promiscuous_enable(port);
97 port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
99 static const struct rte_eth_conf port_conf_default = {
101 .mq_mode = ETH_MQ_RX_NONE,
108 return port_init_common(port, &port_conf_default, mp);
112 port_init(uint16_t port, struct rte_mempool *mp)
114 static const struct rte_eth_conf port_conf_default = {
116 .mq_mode = ETH_MQ_RX_NONE,
120 return port_init_common(port, &port_conf_default, mp);
124 init_port_rx_intr(int num_ports)
130 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
134 RTE_MBUF_DEFAULT_BUF_SIZE,
136 if (!default_params.mp)
139 RTE_ETH_FOREACH_DEV(portid) {
140 retval = port_init_rx_intr(portid, default_params.mp);
143 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
144 &default_params.caps);
147 if (!(default_params.caps &
148 RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
149 default_params.rx_intr_port_inited = 1;
150 default_params.rx_intr_port = portid;
153 rte_eth_dev_stop(portid);
159 init_ports(int num_ports)
164 struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
167 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
171 RTE_MBUF_DEFAULT_BUF_SIZE,
174 default_params.mp = ptr;
176 if (!default_params.mp)
179 RTE_ETH_FOREACH_DEV(portid) {
180 retval = port_init(portid, default_params.mp);
189 testsuite_setup(void)
193 struct rte_event_dev_info dev_info;
195 count = rte_event_dev_count();
197 printf("Failed to find a valid event device,"
198 " testing with event_skeleton device\n");
199 rte_vdev_init("event_skeleton", NULL);
202 struct rte_event_dev_config config = {
203 .nb_event_queues = 1,
207 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
208 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
209 config.nb_event_port_dequeue_depth =
210 dev_info.max_event_port_dequeue_depth;
211 config.nb_event_port_enqueue_depth =
212 dev_info.max_event_port_enqueue_depth;
213 config.nb_events_limit =
214 dev_info.max_num_events;
215 err = rte_event_dev_configure(TEST_DEV_ID, &config);
216 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
220 * eth devices like octeontx use event device to receive packets
221 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
222 * call init_ports after rte_event_dev_configure
224 err = init_ports(rte_eth_dev_count_total());
225 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
227 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
228 &default_params.caps);
229 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n",
236 testsuite_setup_rx_intr(void)
240 struct rte_event_dev_info dev_info;
242 count = rte_event_dev_count();
244 printf("Failed to find a valid event device,"
245 " testing with event_skeleton device\n");
246 rte_vdev_init("event_skeleton", NULL);
249 struct rte_event_dev_config config = {
250 .nb_event_queues = 1,
254 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
255 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
256 config.nb_event_port_dequeue_depth =
257 dev_info.max_event_port_dequeue_depth;
258 config.nb_event_port_enqueue_depth =
259 dev_info.max_event_port_enqueue_depth;
260 config.nb_events_limit =
261 dev_info.max_num_events;
263 err = rte_event_dev_configure(TEST_DEV_ID, &config);
264 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
268 * eth devices like octeontx use event device to receive packets
269 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
270 * call init_ports after rte_event_dev_configure
272 err = init_port_rx_intr(rte_eth_dev_count_total());
273 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
275 if (!default_params.rx_intr_port_inited)
278 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
279 default_params.rx_intr_port,
280 &default_params.caps);
281 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
287 testsuite_teardown(void)
290 RTE_ETH_FOREACH_DEV(i)
293 rte_mempool_free(default_params.mp);
297 testsuite_teardown_rx_intr(void)
299 if (!default_params.rx_intr_port_inited)
302 rte_eth_dev_stop(default_params.rx_intr_port);
303 rte_mempool_free(default_params.mp);
310 struct rte_event_dev_info dev_info;
311 struct rte_event_port_conf rx_p_conf;
313 memset(&rx_p_conf, 0, sizeof(rx_p_conf));
315 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
316 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
318 rx_p_conf.new_event_threshold = dev_info.max_num_events;
319 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
320 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
321 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
323 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
331 rte_event_eth_rx_adapter_free(TEST_INST_ID);
335 adapter_create_free(void)
339 struct rte_event_port_conf rx_p_conf = {
342 .new_event_threshold = 1200,
345 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
347 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
349 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
351 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
353 err = rte_event_eth_rx_adapter_create(TEST_INST_ID,
354 TEST_DEV_ID, &rx_p_conf);
355 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
357 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
358 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
360 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
361 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
363 err = rte_event_eth_rx_adapter_free(1);
364 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
370 adapter_queue_add_del(void)
376 struct rte_event_eth_rx_adapter_queue_conf queue_config;
378 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
380 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
383 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
386 queue_config.rx_queue_flags = 0;
387 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
389 queue_config.rx_queue_flags =
390 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
392 queue_config.ev = ev;
393 queue_config.servicing_weight = 1;
395 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
396 rte_eth_dev_count_total(),
398 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
400 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
401 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
404 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
406 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
408 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
410 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
414 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
416 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
419 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
421 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
425 TEST_ASSERT(err == -EINVAL, "Expected EINVAL got %d", err);
427 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
430 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
432 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
434 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
436 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
438 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
440 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
442 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
445 err = rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID, -1,
447 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
449 err = rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
450 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
456 adapter_multi_eth_add_del(void)
461 uint16_t port_index, drv_id = 0;
462 char driver_name[50];
464 struct rte_event_eth_rx_adapter_queue_conf queue_config;
467 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
470 queue_config.rx_queue_flags = 0;
471 queue_config.ev = ev;
472 queue_config.servicing_weight = 1;
474 /* stop eth devices for existing */
476 for (; port_index < rte_eth_dev_count_total(); port_index += 1)
477 rte_eth_dev_stop(port_index);
479 /* add the max port for rx_adapter */
480 port_index = rte_eth_dev_count_total();
481 for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
482 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
484 err = rte_vdev_init(driver_name, NULL);
485 TEST_ASSERT(err == 0, "Failed driver %s got %d",
490 err = init_ports(rte_eth_dev_count_total());
491 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
493 /* eth_rx_adapter_queue_add for n ports */
495 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
496 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
499 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
502 /* eth_rx_adapter_queue_del n ports */
504 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
505 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
507 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
514 adapter_intr_queue_add_del(void)
520 struct rte_event_eth_rx_adapter_queue_conf queue_config;
522 if (!default_params.rx_intr_port_inited)
525 eth_port = default_params.rx_intr_port;
526 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
527 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
530 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
533 queue_config.rx_queue_flags = 0;
534 queue_config.ev = ev;
536 /* weight = 0 => interrupt mode */
537 queue_config.servicing_weight = 0;
539 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
541 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
544 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
548 queue_config.servicing_weight = 0;
549 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
553 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
555 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
557 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
560 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
563 /* del remaining queues */
564 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
567 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
570 queue_config.servicing_weight = 0;
571 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
575 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
577 /* intr -> poll mode queue */
578 queue_config.servicing_weight = 1;
580 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
581 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
585 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
588 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
592 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
595 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
598 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
604 adapter_start_stop(void)
610 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
613 struct rte_event_eth_rx_adapter_queue_conf queue_config;
615 queue_config.rx_queue_flags = 0;
616 if (default_params.caps &
617 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
619 queue_config.rx_queue_flags =
620 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
623 queue_config.ev = ev;
624 queue_config.servicing_weight = 1;
626 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
628 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
630 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
631 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
633 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
634 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
636 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
638 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
640 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
641 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
643 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
644 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
646 err = rte_event_eth_rx_adapter_start(1);
647 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
649 err = rte_event_eth_rx_adapter_stop(1);
650 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
659 struct rte_event_eth_rx_adapter_stats stats;
661 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, NULL);
662 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
664 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, &stats);
665 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
667 err = rte_event_eth_rx_adapter_stats_get(1, &stats);
668 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
673 static struct unit_test_suite event_eth_rx_tests = {
674 .suite_name = "rx event eth adapter test suite",
675 .setup = testsuite_setup,
676 .teardown = testsuite_teardown,
678 TEST_CASE_ST(NULL, NULL, adapter_create_free),
679 TEST_CASE_ST(adapter_create, adapter_free,
680 adapter_queue_add_del),
681 TEST_CASE_ST(adapter_create, adapter_free,
682 adapter_multi_eth_add_del),
683 TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
684 TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
685 TEST_CASES_END() /**< NULL terminate unit test array */
689 static struct unit_test_suite event_eth_rx_intr_tests = {
690 .suite_name = "rx event eth adapter test suite",
691 .setup = testsuite_setup_rx_intr,
692 .teardown = testsuite_teardown_rx_intr,
694 TEST_CASE_ST(adapter_create, adapter_free,
695 adapter_intr_queue_add_del),
696 TEST_CASES_END() /**< NULL terminate unit test array */
701 test_event_eth_rx_adapter_common(void)
703 return unit_test_suite_runner(&event_eth_rx_tests);
707 test_event_eth_rx_intr_adapter_common(void)
709 return unit_test_suite_runner(&event_eth_rx_intr_tests);
712 REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
713 test_event_eth_rx_adapter_common);
714 REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
715 test_event_eth_rx_intr_adapter_common);