1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_mempool.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_bus_vdev.h>
12 #include <rte_event_eth_rx_adapter.h>
16 #define MAX_NUM_RX_QUEUE 64
17 #define NB_MBUFS (8192 * num_ports * MAX_NUM_RX_QUEUE)
18 #define MBUF_CACHE_SIZE 512
19 #define MBUF_PRIV_SIZE 0
20 #define TEST_INST_ID 0
22 #define TEST_ETHDEV_ID 0
24 struct event_eth_rx_adapter_test_params {
25 struct rte_mempool *mp;
26 uint16_t rx_rings, tx_rings;
28 int rx_intr_port_inited;
29 uint16_t rx_intr_port;
32 static struct event_eth_rx_adapter_test_params default_params;
35 port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
36 struct rte_mempool *mp)
38 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
41 struct rte_eth_dev_info dev_info;
43 if (!rte_eth_dev_is_valid_port(port))
46 retval = rte_eth_dev_configure(port, 0, 0, port_conf);
48 retval = rte_eth_dev_info_get(port, &dev_info);
52 default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
54 default_params.tx_rings = 1;
56 /* Configure the Ethernet device. */
57 retval = rte_eth_dev_configure(port, default_params.rx_rings,
58 default_params.tx_rings, port_conf);
62 for (q = 0; q < default_params.rx_rings; q++) {
63 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
64 rte_eth_dev_socket_id(port), NULL, mp);
69 /* Allocate and set up 1 TX queue per Ethernet port. */
70 for (q = 0; q < default_params.tx_rings; q++) {
71 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
72 rte_eth_dev_socket_id(port), NULL);
77 /* Start the Ethernet port. */
78 retval = rte_eth_dev_start(port);
82 /* Display the port MAC address. */
83 struct rte_ether_addr addr;
84 retval = rte_eth_macaddr_get(port, &addr);
87 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
88 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
90 addr.addr_bytes[0], addr.addr_bytes[1],
91 addr.addr_bytes[2], addr.addr_bytes[3],
92 addr.addr_bytes[4], addr.addr_bytes[5]);
94 /* Enable RX in promiscuous mode for the Ethernet device. */
95 retval = rte_eth_promiscuous_enable(port);
103 port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
105 static const struct rte_eth_conf port_conf_default = {
107 .mq_mode = ETH_MQ_RX_NONE,
114 return port_init_common(port, &port_conf_default, mp);
118 port_init(uint16_t port, struct rte_mempool *mp)
120 static const struct rte_eth_conf port_conf_default = {
122 .mq_mode = ETH_MQ_RX_NONE,
126 return port_init_common(port, &port_conf_default, mp);
130 init_port_rx_intr(int num_ports)
136 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
140 RTE_MBUF_DEFAULT_BUF_SIZE,
142 if (!default_params.mp)
145 RTE_ETH_FOREACH_DEV(portid) {
146 retval = port_init_rx_intr(portid, default_params.mp);
149 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
150 &default_params.caps);
153 if (!(default_params.caps &
154 RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
155 default_params.rx_intr_port_inited = 1;
156 default_params.rx_intr_port = portid;
159 rte_eth_dev_stop(portid);
165 init_ports(int num_ports)
170 struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
173 default_params.mp = rte_pktmbuf_pool_create("packet_pool",
177 RTE_MBUF_DEFAULT_BUF_SIZE,
180 default_params.mp = ptr;
182 if (!default_params.mp)
185 RTE_ETH_FOREACH_DEV(portid) {
186 retval = port_init(portid, default_params.mp);
195 testsuite_setup(void)
199 struct rte_event_dev_info dev_info;
201 count = rte_event_dev_count();
203 printf("Failed to find a valid event device,"
204 " testing with event_skeleton device\n");
205 rte_vdev_init("event_skeleton", NULL);
208 struct rte_event_dev_config config = {
209 .nb_event_queues = 1,
213 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
214 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
215 config.nb_event_port_dequeue_depth =
216 dev_info.max_event_port_dequeue_depth;
217 config.nb_event_port_enqueue_depth =
218 dev_info.max_event_port_enqueue_depth;
219 config.nb_events_limit =
220 dev_info.max_num_events;
221 err = rte_event_dev_configure(TEST_DEV_ID, &config);
222 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
226 * eth devices like octeontx use event device to receive packets
227 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
228 * call init_ports after rte_event_dev_configure
230 err = init_ports(rte_eth_dev_count_total());
231 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
233 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
234 &default_params.caps);
235 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n",
242 testsuite_setup_rx_intr(void)
246 struct rte_event_dev_info dev_info;
248 count = rte_event_dev_count();
250 printf("Failed to find a valid event device,"
251 " testing with event_skeleton device\n");
252 rte_vdev_init("event_skeleton", NULL);
255 struct rte_event_dev_config config = {
256 .nb_event_queues = 1,
260 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
261 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
262 config.nb_event_port_dequeue_depth =
263 dev_info.max_event_port_dequeue_depth;
264 config.nb_event_port_enqueue_depth =
265 dev_info.max_event_port_enqueue_depth;
266 config.nb_events_limit =
267 dev_info.max_num_events;
269 err = rte_event_dev_configure(TEST_DEV_ID, &config);
270 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
274 * eth devices like octeontx use event device to receive packets
275 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
276 * call init_ports after rte_event_dev_configure
278 err = init_port_rx_intr(rte_eth_dev_count_total());
279 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
281 if (!default_params.rx_intr_port_inited)
284 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
285 default_params.rx_intr_port,
286 &default_params.caps);
287 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
293 testsuite_teardown(void)
296 RTE_ETH_FOREACH_DEV(i)
299 rte_mempool_free(default_params.mp);
303 testsuite_teardown_rx_intr(void)
305 if (!default_params.rx_intr_port_inited)
308 rte_eth_dev_stop(default_params.rx_intr_port);
309 rte_mempool_free(default_params.mp);
316 struct rte_event_dev_info dev_info;
317 struct rte_event_port_conf rx_p_conf;
319 memset(&rx_p_conf, 0, sizeof(rx_p_conf));
321 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
322 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
324 rx_p_conf.new_event_threshold = dev_info.max_num_events;
325 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
326 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
327 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
329 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
337 rte_event_eth_rx_adapter_free(TEST_INST_ID);
341 adapter_create_free(void)
345 struct rte_event_port_conf rx_p_conf = {
348 .new_event_threshold = 1200,
351 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
353 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
355 err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
357 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
359 err = rte_event_eth_rx_adapter_create(TEST_INST_ID,
360 TEST_DEV_ID, &rx_p_conf);
361 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
363 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
364 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
366 err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
367 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
369 err = rte_event_eth_rx_adapter_free(1);
370 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
376 adapter_queue_add_del(void)
382 struct rte_event_eth_rx_adapter_queue_conf queue_config;
384 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
386 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
389 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
392 queue_config.rx_queue_flags = 0;
393 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
395 queue_config.rx_queue_flags =
396 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
398 queue_config.ev = ev;
399 queue_config.servicing_weight = 1;
401 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
402 rte_eth_dev_count_total(),
404 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
406 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
407 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
410 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
412 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
414 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
416 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
420 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
422 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
425 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
427 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
431 TEST_ASSERT(err == -EINVAL, "Expected EINVAL got %d", err);
433 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
436 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
438 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
440 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
442 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
444 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
446 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
448 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
451 err = rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID, -1,
453 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
455 err = rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
456 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
462 adapter_multi_eth_add_del(void)
467 uint16_t port_index, drv_id = 0;
468 char driver_name[50];
470 struct rte_event_eth_rx_adapter_queue_conf queue_config;
473 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
476 queue_config.rx_queue_flags = 0;
477 queue_config.ev = ev;
478 queue_config.servicing_weight = 1;
480 /* stop eth devices for existing */
482 for (; port_index < rte_eth_dev_count_total(); port_index += 1)
483 rte_eth_dev_stop(port_index);
485 /* add the max port for rx_adapter */
486 port_index = rte_eth_dev_count_total();
487 for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
488 snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
490 err = rte_vdev_init(driver_name, NULL);
491 TEST_ASSERT(err == 0, "Failed driver %s got %d",
496 err = init_ports(rte_eth_dev_count_total());
497 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
499 /* eth_rx_adapter_queue_add for n ports */
501 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
502 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
505 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
508 /* eth_rx_adapter_queue_del n ports */
510 for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
511 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
513 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
520 adapter_intr_queue_add_del(void)
526 struct rte_event_eth_rx_adapter_queue_conf queue_config;
528 if (!default_params.rx_intr_port_inited)
531 eth_port = default_params.rx_intr_port;
532 err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
533 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
536 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
539 queue_config.rx_queue_flags = 0;
540 queue_config.ev = ev;
542 /* weight = 0 => interrupt mode */
543 queue_config.servicing_weight = 0;
545 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
547 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
550 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
554 queue_config.servicing_weight = 0;
555 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
559 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
561 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
563 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
566 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
569 /* del remaining queues */
570 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
573 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
576 queue_config.servicing_weight = 0;
577 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
581 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
583 /* intr -> poll mode queue */
584 queue_config.servicing_weight = 1;
586 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
587 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
591 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
594 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
598 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
601 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
604 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
610 adapter_start_stop(void)
616 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
619 struct rte_event_eth_rx_adapter_queue_conf queue_config;
621 queue_config.rx_queue_flags = 0;
622 if (default_params.caps &
623 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
625 queue_config.rx_queue_flags =
626 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
629 queue_config.ev = ev;
630 queue_config.servicing_weight = 1;
632 err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
634 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
636 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
637 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
639 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
640 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
642 err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
644 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
646 err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
647 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
649 err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
650 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
652 err = rte_event_eth_rx_adapter_start(1);
653 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
655 err = rte_event_eth_rx_adapter_stop(1);
656 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
665 struct rte_event_eth_rx_adapter_stats stats;
667 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, NULL);
668 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
670 err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, &stats);
671 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
673 err = rte_event_eth_rx_adapter_stats_get(1, &stats);
674 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
679 static struct unit_test_suite event_eth_rx_tests = {
680 .suite_name = "rx event eth adapter test suite",
681 .setup = testsuite_setup,
682 .teardown = testsuite_teardown,
684 TEST_CASE_ST(NULL, NULL, adapter_create_free),
685 TEST_CASE_ST(adapter_create, adapter_free,
686 adapter_queue_add_del),
687 TEST_CASE_ST(adapter_create, adapter_free,
688 adapter_multi_eth_add_del),
689 TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
690 TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
691 TEST_CASES_END() /**< NULL terminate unit test array */
695 static struct unit_test_suite event_eth_rx_intr_tests = {
696 .suite_name = "rx event eth adapter test suite",
697 .setup = testsuite_setup_rx_intr,
698 .teardown = testsuite_teardown_rx_intr,
700 TEST_CASE_ST(adapter_create, adapter_free,
701 adapter_intr_queue_add_del),
702 TEST_CASES_END() /**< NULL terminate unit test array */
707 test_event_eth_rx_adapter_common(void)
709 return unit_test_suite_runner(&event_eth_rx_tests);
713 test_event_eth_rx_intr_adapter_common(void)
715 return unit_test_suite_runner(&event_eth_rx_intr_tests);
718 REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
719 test_event_eth_rx_adapter_common);
720 REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
721 test_event_eth_rx_intr_adapter_common);