1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_mempool.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_bus_vdev.h>
12 #include <rte_event_eth_rx_adapter.h>
16 #define MAX_NUM_RX_QUEUE 64
17 #define NB_MBUFS (8192 * num_ports * MAX_NUM_RX_QUEUE)
18 #define MBUF_CACHE_SIZE 512
19 #define MBUF_PRIV_SIZE 0
20 #define TEST_INST_ID 0
22 #define TEST_ETHDEV_ID 0
24 struct event_eth_rx_adapter_test_params {
25 struct rte_mempool *mp;
26 uint16_t rx_rings, tx_rings;
28 int rx_intr_port_inited;
29 uint16_t rx_intr_port;
32 static struct event_eth_rx_adapter_test_params default_params;
35 port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
36 struct rte_mempool *mp)
38 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
41 struct rte_eth_dev_info dev_info;
43 if (!rte_eth_dev_is_valid_port(port))
46 retval = rte_eth_dev_configure(port, 0, 0, port_conf);
48 retval = rte_eth_dev_info_get(port, &dev_info);
52 default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
54 default_params.tx_rings = 1;
56 /* Configure the Ethernet device. */
57 retval = rte_eth_dev_configure(port, default_params.rx_rings,
58 default_params.tx_rings, port_conf);
62 for (q = 0; q < default_params.rx_rings; q++) {
63 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
64 rte_eth_dev_socket_id(port), NULL, mp);
69 /* Allocate and set up 1 TX queue per Ethernet port. */
70 for (q = 0; q < default_params.tx_rings; q++) {
71 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
72 rte_eth_dev_socket_id(port), NULL);
77 /* Start the Ethernet port. */
78 retval = rte_eth_dev_start(port);
82 /* Display the port MAC address. */
83 struct rte_ether_addr addr;
84 rte_eth_macaddr_get(port, &addr);
85 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
86 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
88 addr.addr_bytes[0], addr.addr_bytes[1],
89 addr.addr_bytes[2], addr.addr_bytes[3],
90 addr.addr_bytes[4], addr.addr_bytes[5]);
92 /* Enable RX in promiscuous mode for the Ethernet device. */
93 rte_eth_promiscuous_enable(port);
99 port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
101 static const struct rte_eth_conf port_conf_default = {
103 .mq_mode = ETH_MQ_RX_NONE,
110 return port_init_common(port, &port_conf_default, mp);
114 port_init(uint16_t port, struct rte_mempool *mp)
116 static const struct rte_eth_conf port_conf_default = {
118 .mq_mode = ETH_MQ_RX_NONE,
122 return port_init_common(port, &port_conf_default, mp);
126 init_port_rx_intr(int num_ports)
132 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
136 RTE_MBUF_DEFAULT_BUF_SIZE,
138 if (!default_params.mp)
141 RTE_ETH_FOREACH_DEV(portid) {
142 retval = port_init_rx_intr(portid, default_params.mp);
145 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
146 &default_params.caps);
149 if (!(default_params.caps &
150 RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
151 default_params.rx_intr_port_inited = 1;
152 default_params.rx_intr_port = portid;
155 rte_eth_dev_stop(portid);
161 init_ports(int num_ports)
166 struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
169 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
173 RTE_MBUF_DEFAULT_BUF_SIZE,
176 default_params.mp = ptr;
178 if (!default_params.mp)
181 RTE_ETH_FOREACH_DEV(portid) {
182 retval = port_init(portid, default_params.mp);
191 testsuite_setup(void)
195 struct rte_event_dev_info dev_info;
197 count = rte_event_dev_count();
199 printf("Failed to find a valid event device,"
200 " testing with event_skeleton device\n");
201 rte_vdev_init("event_skeleton", NULL);
204 struct rte_event_dev_config config = {
205 .nb_event_queues = 1,
209 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
210 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
211 config.nb_event_port_dequeue_depth =
212 dev_info.max_event_port_dequeue_depth;
213 config.nb_event_port_enqueue_depth =
214 dev_info.max_event_port_enqueue_depth;
215 config.nb_events_limit =
216 dev_info.max_num_events;
217 err = rte_event_dev_configure(TEST_DEV_ID, &config);
218 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
222 * eth devices like octeontx use event device to receive packets
223 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
224 * call init_ports after rte_event_dev_configure
226 err = init_ports(rte_eth_dev_count_total());
227 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
229 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
230 &default_params.caps);
231 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n",
238 testsuite_setup_rx_intr(void)
242 struct rte_event_dev_info dev_info;
244 count = rte_event_dev_count();
246 printf("Failed to find a valid event device,"
247 " testing with event_skeleton device\n");
248 rte_vdev_init("event_skeleton", NULL);
251 struct rte_event_dev_config config = {
252 .nb_event_queues = 1,
256 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
257 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
258 config.nb_event_port_dequeue_depth =
259 dev_info.max_event_port_dequeue_depth;
260 config.nb_event_port_enqueue_depth =
261 dev_info.max_event_port_enqueue_depth;
262 config.nb_events_limit =
263 dev_info.max_num_events;
265 err = rte_event_dev_configure(TEST_DEV_ID, &config);
266 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
270 * eth devices like octeontx use event device to receive packets
271 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
272 * call init_ports after rte_event_dev_configure
274 err = init_port_rx_intr(rte_eth_dev_count_total());
275 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
277 if (!default_params.rx_intr_port_inited)
280 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
281 default_params.rx_intr_port,
282 &default_params.caps);
283 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
289 testsuite_teardown(void)
292 RTE_ETH_FOREACH_DEV(i)
295 rte_mempool_free(default_params.mp);
299 testsuite_teardown_rx_intr(void)
301 if (!default_params.rx_intr_port_inited)
304 rte_eth_dev_stop(default_params.rx_intr_port);
305 rte_mempool_free(default_params.mp);
312 struct rte_event_dev_info dev_info;
313 struct rte_event_port_conf rx_p_conf;
315 memset(&rx_p_conf, 0, sizeof(rx_p_conf));
317 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
318 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
320 rx_p_conf.new_event_threshold = dev_info.max_num_events;
321 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
322 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
323 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
325 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
333 rte_event_eth_rx_adapter_free(TEST_INST_ID);
337 adapter_create_free(void)
341 struct rte_event_port_conf rx_p_conf = {
344 .new_event_threshold = 1200,
347 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
349 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
351 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
353 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
355 err = rte_event_eth_rx_adapter_create(TEST_INST_ID,
356 TEST_DEV_ID, &rx_p_conf);
357 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
359 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
360 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
362 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
363 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
365 err = rte_event_eth_rx_adapter_free(1);
366 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
372 adapter_queue_add_del(void)
378 struct rte_event_eth_rx_adapter_queue_conf queue_config;
380 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
382 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
385 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
388 queue_config.rx_queue_flags = 0;
389 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
391 queue_config.rx_queue_flags =
392 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
394 queue_config.ev = ev;
395 queue_config.servicing_weight = 1;
397 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
398 rte_eth_dev_count_total(),
400 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
402 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
403 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
406 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
408 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
410 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
412 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
416 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
418 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
421 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
423 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
427 TEST_ASSERT(err == -EINVAL, "Expected EINVAL got %d", err);
429 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
432 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
434 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
436 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
438 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
440 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
442 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
444 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
447 err = rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID, -1,
449 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
451 err = rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
452 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
458 adapter_multi_eth_add_del(void)
463 uint16_t port_index, drv_id = 0;
464 char driver_name[50];
466 struct rte_event_eth_rx_adapter_queue_conf queue_config;
469 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
472 queue_config.rx_queue_flags = 0;
473 queue_config.ev = ev;
474 queue_config.servicing_weight = 1;
476 /* stop eth devices for existing */
478 for (; port_index < rte_eth_dev_count_total(); port_index += 1)
479 rte_eth_dev_stop(port_index);
481 /* add the max port for rx_adapter */
482 port_index = rte_eth_dev_count_total();
483 for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
484 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
486 err = rte_vdev_init(driver_name, NULL);
487 TEST_ASSERT(err == 0, "Failed driver %s got %d",
492 err = init_ports(rte_eth_dev_count_total());
493 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
495 /* eth_rx_adapter_queue_add for n ports */
497 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
498 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
501 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
504 /* eth_rx_adapter_queue_del n ports */
506 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
507 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
509 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
516 adapter_intr_queue_add_del(void)
522 struct rte_event_eth_rx_adapter_queue_conf queue_config;
524 if (!default_params.rx_intr_port_inited)
527 eth_port = default_params.rx_intr_port;
528 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
529 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
532 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
535 queue_config.rx_queue_flags = 0;
536 queue_config.ev = ev;
538 /* weight = 0 => interrupt mode */
539 queue_config.servicing_weight = 0;
541 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
543 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
546 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
550 queue_config.servicing_weight = 0;
551 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
555 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
557 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
559 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
562 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
565 /* del remaining queues */
566 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
569 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
572 queue_config.servicing_weight = 0;
573 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
577 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
579 /* intr -> poll mode queue */
580 queue_config.servicing_weight = 1;
582 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
583 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
587 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
590 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
594 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
597 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
600 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
606 adapter_start_stop(void)
612 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
615 struct rte_event_eth_rx_adapter_queue_conf queue_config;
617 queue_config.rx_queue_flags = 0;
618 if (default_params.caps &
619 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
621 queue_config.rx_queue_flags =
622 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
625 queue_config.ev = ev;
626 queue_config.servicing_weight = 1;
628 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
630 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
632 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
633 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
635 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
636 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
638 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
640 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
642 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
643 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
645 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
646 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
648 err = rte_event_eth_rx_adapter_start(1);
649 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
651 err = rte_event_eth_rx_adapter_stop(1);
652 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
661 struct rte_event_eth_rx_adapter_stats stats;
663 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, NULL);
664 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
666 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, &stats);
667 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
669 err = rte_event_eth_rx_adapter_stats_get(1, &stats);
670 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
675 static struct unit_test_suite event_eth_rx_tests = {
676 .suite_name = "rx event eth adapter test suite",
677 .setup = testsuite_setup,
678 .teardown = testsuite_teardown,
680 TEST_CASE_ST(NULL, NULL, adapter_create_free),
681 TEST_CASE_ST(adapter_create, adapter_free,
682 adapter_queue_add_del),
683 TEST_CASE_ST(adapter_create, adapter_free,
684 adapter_multi_eth_add_del),
685 TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
686 TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
687 TEST_CASES_END() /**< NULL terminate unit test array */
691 static struct unit_test_suite event_eth_rx_intr_tests = {
692 .suite_name = "rx event eth adapter test suite",
693 .setup = testsuite_setup_rx_intr,
694 .teardown = testsuite_teardown_rx_intr,
696 TEST_CASE_ST(adapter_create, adapter_free,
697 adapter_intr_queue_add_del),
698 TEST_CASES_END() /**< NULL terminate unit test array */
703 test_event_eth_rx_adapter_common(void)
705 return unit_test_suite_runner(&event_eth_rx_tests);
709 test_event_eth_rx_intr_adapter_common(void)
711 return unit_test_suite_runner(&event_eth_rx_intr_tests);
714 REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
715 test_event_eth_rx_adapter_common);
716 REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
717 test_event_eth_rx_intr_adapter_common);